repo_name
stringlengths 8
130
| hexsha
sequence | file_path
sequence | code
sequence | apis
sequence |
---|---|---|---|---|
ZJUGuoShuai/vision | [
"a9940fe4b2b63bd82a2f853616e00fd0bd112f9a",
"a9940fe4b2b63bd82a2f853616e00fd0bd112f9a"
] | [
"torchvision/models/quantization/resnet.py",
"test/common_utils.py"
] | [
"from typing import Any, Type, Union, List\n\nimport torch\nimport torch.nn as nn\nfrom torch import Tensor\nfrom torch.quantization import fuse_modules\nfrom torchvision.models.resnet import Bottleneck, BasicBlock, ResNet, model_urls\n\nfrom ..._internally_replaced_utils import load_state_dict_from_url\nfrom .utils import _replace_relu, quantize_model\n\n__all__ = [\"QuantizableResNet\", \"resnet18\", \"resnet50\", \"resnext101_32x8d\"]\n\n\nquant_model_urls = {\n \"resnet18_fbgemm\": \"https://download.pytorch.org/models/quantized/resnet18_fbgemm_16fa66dd.pth\",\n \"resnet50_fbgemm\": \"https://download.pytorch.org/models/quantized/resnet50_fbgemm_bf931d71.pth\",\n \"resnext101_32x8d_fbgemm\": \"https://download.pytorch.org/models/quantized/resnext101_32x8_fbgemm_09835ccf.pth\",\n}\n\n\nclass QuantizableBasicBlock(BasicBlock):\n def __init__(self, *args: Any, **kwargs: Any) -> None:\n super(QuantizableBasicBlock, self).__init__(*args, **kwargs)\n self.add_relu = torch.nn.quantized.FloatFunctional()\n\n def forward(self, x: Tensor) -> Tensor:\n identity = x\n\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n\n out = self.conv2(out)\n out = self.bn2(out)\n\n if self.downsample is not None:\n identity = self.downsample(x)\n\n out = self.add_relu.add_relu(out, identity)\n\n return out\n\n def fuse_model(self) -> None:\n torch.quantization.fuse_modules(self, [[\"conv1\", \"bn1\", \"relu\"], [\"conv2\", \"bn2\"]], inplace=True)\n if self.downsample:\n torch.quantization.fuse_modules(self.downsample, [\"0\", \"1\"], inplace=True)\n\n\nclass QuantizableBottleneck(Bottleneck):\n def __init__(self, *args: Any, **kwargs: Any) -> None:\n super(QuantizableBottleneck, self).__init__(*args, **kwargs)\n self.skip_add_relu = nn.quantized.FloatFunctional()\n self.relu1 = nn.ReLU(inplace=False)\n self.relu2 = nn.ReLU(inplace=False)\n\n def forward(self, x: Tensor) -> Tensor:\n identity = x\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu1(out)\n out = self.conv2(out)\n out = self.bn2(out)\n out = self.relu2(out)\n\n out = self.conv3(out)\n out = self.bn3(out)\n\n if self.downsample is not None:\n identity = self.downsample(x)\n out = self.skip_add_relu.add_relu(out, identity)\n\n return out\n\n def fuse_model(self) -> None:\n fuse_modules(self, [[\"conv1\", \"bn1\", \"relu1\"], [\"conv2\", \"bn2\", \"relu2\"], [\"conv3\", \"bn3\"]], inplace=True)\n if self.downsample:\n torch.quantization.fuse_modules(self.downsample, [\"0\", \"1\"], inplace=True)\n\n\nclass QuantizableResNet(ResNet):\n def __init__(self, *args: Any, **kwargs: Any) -> None:\n super(QuantizableResNet, self).__init__(*args, **kwargs)\n\n self.quant = torch.quantization.QuantStub()\n self.dequant = torch.quantization.DeQuantStub()\n\n def forward(self, x: Tensor) -> Tensor:\n x = self.quant(x)\n # Ensure scriptability\n # super(QuantizableResNet,self).forward(x)\n # is not scriptable\n x = self._forward_impl(x)\n x = self.dequant(x)\n return x\n\n def fuse_model(self) -> None:\n r\"\"\"Fuse conv/bn/relu modules in resnet models\n\n Fuse conv+bn+relu/ Conv+relu/conv+Bn modules to prepare for quantization.\n Model is modified in place. Note that this operation does not change numerics\n and the model after modification is in floating point\n \"\"\"\n\n fuse_modules(self, [\"conv1\", \"bn1\", \"relu\"], inplace=True)\n for m in self.modules():\n if type(m) == QuantizableBottleneck or type(m) == QuantizableBasicBlock:\n m.fuse_model()\n\n\ndef _resnet(\n arch: str,\n block: Type[Union[BasicBlock, Bottleneck]],\n layers: List[int],\n pretrained: bool,\n progress: bool,\n quantize: bool,\n **kwargs: Any,\n) -> QuantizableResNet:\n\n model = QuantizableResNet(block, layers, **kwargs)\n _replace_relu(model)\n if quantize:\n # TODO use pretrained as a string to specify the backend\n backend = \"fbgemm\"\n quantize_model(model, backend)\n else:\n assert pretrained in [True, False]\n\n if pretrained:\n if quantize:\n model_url = quant_model_urls[arch + \"_\" + backend]\n else:\n model_url = model_urls[arch]\n\n state_dict = load_state_dict_from_url(model_url, progress=progress)\n\n model.load_state_dict(state_dict)\n return model\n\n\ndef resnet18(\n pretrained: bool = False,\n progress: bool = True,\n quantize: bool = False,\n **kwargs: Any,\n) -> QuantizableResNet:\n r\"\"\"ResNet-18 model from\n `\"Deep Residual Learning for Image Recognition\" <https://arxiv.org/pdf/1512.03385.pdf>`_\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n quantize (bool): If True, return a quantized version of the model\n \"\"\"\n return _resnet(\"resnet18\", QuantizableBasicBlock, [2, 2, 2, 2], pretrained, progress, quantize, **kwargs)\n\n\ndef resnet50(\n pretrained: bool = False,\n progress: bool = True,\n quantize: bool = False,\n **kwargs: Any,\n) -> QuantizableResNet:\n\n r\"\"\"ResNet-50 model from\n `\"Deep Residual Learning for Image Recognition\" <https://arxiv.org/pdf/1512.03385.pdf>`_\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n quantize (bool): If True, return a quantized version of the model\n \"\"\"\n return _resnet(\"resnet50\", QuantizableBottleneck, [3, 4, 6, 3], pretrained, progress, quantize, **kwargs)\n\n\ndef resnext101_32x8d(\n pretrained: bool = False,\n progress: bool = True,\n quantize: bool = False,\n **kwargs: Any,\n) -> QuantizableResNet:\n r\"\"\"ResNeXt-101 32x8d model from\n `\"Aggregated Residual Transformation for Deep Neural Networks\" <https://arxiv.org/pdf/1611.05431.pdf>`_\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n quantize (bool): If True, return a quantized version of the model\n \"\"\"\n kwargs[\"groups\"] = 32\n kwargs[\"width_per_group\"] = 8\n return _resnet(\"resnext101_32x8d\", QuantizableBottleneck, [3, 4, 23, 3], pretrained, progress, quantize, **kwargs)\n",
"import contextlib\nimport functools\nimport os\nimport random\nimport shutil\nimport tempfile\n\nimport numpy as np\nimport torch\nfrom PIL import Image\nfrom torchvision import io\n\nimport __main__ # noqa: 401\n\n\nIN_CIRCLE_CI = os.getenv(\"CIRCLECI\", False) == \"true\"\nIN_RE_WORKER = os.environ.get(\"INSIDE_RE_WORKER\") is not None\nIN_FBCODE = os.environ.get(\"IN_FBCODE_TORCHVISION\") == \"1\"\nCUDA_NOT_AVAILABLE_MSG = \"CUDA device not available\"\nCIRCLECI_GPU_NO_CUDA_MSG = \"We're in a CircleCI GPU machine, and this test doesn't need cuda.\"\n\n\[email protected]\ndef get_tmp_dir(src=None, **kwargs):\n tmp_dir = tempfile.mkdtemp(**kwargs)\n if src is not None:\n os.rmdir(tmp_dir)\n shutil.copytree(src, tmp_dir)\n try:\n yield tmp_dir\n finally:\n shutil.rmtree(tmp_dir)\n\n\ndef set_rng_seed(seed):\n torch.manual_seed(seed)\n random.seed(seed)\n\n\nclass MapNestedTensorObjectImpl(object):\n def __init__(self, tensor_map_fn):\n self.tensor_map_fn = tensor_map_fn\n\n def __call__(self, object):\n if isinstance(object, torch.Tensor):\n return self.tensor_map_fn(object)\n\n elif isinstance(object, dict):\n mapped_dict = {}\n for key, value in object.items():\n mapped_dict[self(key)] = self(value)\n return mapped_dict\n\n elif isinstance(object, (list, tuple)):\n mapped_iter = []\n for iter in object:\n mapped_iter.append(self(iter))\n return mapped_iter if not isinstance(object, tuple) else tuple(mapped_iter)\n\n else:\n return object\n\n\ndef map_nested_tensor_object(object, tensor_map_fn):\n impl = MapNestedTensorObjectImpl(tensor_map_fn)\n return impl(object)\n\n\ndef is_iterable(obj):\n try:\n iter(obj)\n return True\n except TypeError:\n return False\n\n\[email protected]\ndef freeze_rng_state():\n rng_state = torch.get_rng_state()\n if torch.cuda.is_available():\n cuda_rng_state = torch.cuda.get_rng_state()\n yield\n if torch.cuda.is_available():\n torch.cuda.set_rng_state(cuda_rng_state)\n torch.set_rng_state(rng_state)\n\n\ndef cycle_over(objs):\n for idx, obj1 in enumerate(objs):\n for obj2 in objs[:idx] + objs[idx + 1 :]:\n yield obj1, obj2\n\n\ndef int_dtypes():\n return (torch.uint8, torch.int8, torch.int16, torch.int32, torch.int64)\n\n\ndef float_dtypes():\n return (torch.float32, torch.float64)\n\n\[email protected]\ndef disable_console_output():\n with contextlib.ExitStack() as stack, open(os.devnull, \"w\") as devnull:\n stack.enter_context(contextlib.redirect_stdout(devnull))\n stack.enter_context(contextlib.redirect_stderr(devnull))\n yield\n\n\ndef cpu_and_gpu():\n import pytest # noqa\n\n return (\"cpu\", pytest.param(\"cuda\", marks=pytest.mark.needs_cuda))\n\n\ndef needs_cuda(test_func):\n import pytest # noqa\n\n return pytest.mark.needs_cuda(test_func)\n\n\ndef _create_data(height=3, width=3, channels=3, device=\"cpu\"):\n # TODO: When all relevant tests are ported to pytest, turn this into a module-level fixture\n tensor = torch.randint(0, 256, (channels, height, width), dtype=torch.uint8, device=device)\n data = tensor.permute(1, 2, 0).contiguous().cpu().numpy()\n mode = \"RGB\"\n if channels == 1:\n mode = \"L\"\n data = data[..., 0]\n pil_img = Image.fromarray(data, mode=mode)\n return tensor, pil_img\n\n\ndef _create_data_batch(height=3, width=3, channels=3, num_samples=4, device=\"cpu\"):\n # TODO: When all relevant tests are ported to pytest, turn this into a module-level fixture\n batch_tensor = torch.randint(0, 256, (num_samples, channels, height, width), dtype=torch.uint8, device=device)\n return batch_tensor\n\n\nassert_equal = functools.partial(torch.testing.assert_close, rtol=0, atol=1e-6)\n\n\ndef get_list_of_videos(tmpdir, num_videos=5, sizes=None, fps=None):\n names = []\n for i in range(num_videos):\n if sizes is None:\n size = 5 * (i + 1)\n else:\n size = sizes[i]\n if fps is None:\n f = 5\n else:\n f = fps[i]\n data = torch.randint(0, 256, (size, 300, 400, 3), dtype=torch.uint8)\n name = os.path.join(tmpdir, \"{}.mp4\".format(i))\n names.append(name)\n io.write_video(name, data, fps=f)\n\n return names\n\n\ndef _assert_equal_tensor_to_pil(tensor, pil_image, msg=None):\n np_pil_image = np.array(pil_image)\n if np_pil_image.ndim == 2:\n np_pil_image = np_pil_image[:, :, None]\n pil_tensor = torch.as_tensor(np_pil_image.transpose((2, 0, 1)))\n if msg is None:\n msg = \"tensor:\\n{} \\ndid not equal PIL tensor:\\n{}\".format(tensor, pil_tensor)\n assert_equal(tensor.cpu(), pil_tensor, msg=msg)\n\n\ndef _assert_approx_equal_tensor_to_pil(\n tensor, pil_image, tol=1e-5, msg=None, agg_method=\"mean\", allowed_percentage_diff=None\n):\n # TODO: we could just merge this into _assert_equal_tensor_to_pil\n np_pil_image = np.array(pil_image)\n if np_pil_image.ndim == 2:\n np_pil_image = np_pil_image[:, :, None]\n pil_tensor = torch.as_tensor(np_pil_image.transpose((2, 0, 1))).to(tensor)\n\n if allowed_percentage_diff is not None:\n # Assert that less than a given %age of pixels are different\n assert (tensor != pil_tensor).to(torch.float).mean() <= allowed_percentage_diff\n\n # error value can be mean absolute error, max abs error\n # Convert to float to avoid underflow when computing absolute difference\n tensor = tensor.to(torch.float)\n pil_tensor = pil_tensor.to(torch.float)\n err = getattr(torch, agg_method)(torch.abs(tensor - pil_tensor)).item()\n assert err < tol\n\n\ndef _test_fn_on_batch(batch_tensors, fn, scripted_fn_atol=1e-8, **fn_kwargs):\n transformed_batch = fn(batch_tensors, **fn_kwargs)\n for i in range(len(batch_tensors)):\n img_tensor = batch_tensors[i, ...]\n transformed_img = fn(img_tensor, **fn_kwargs)\n assert_equal(transformed_img, transformed_batch[i, ...])\n\n if scripted_fn_atol >= 0:\n scripted_fn = torch.jit.script(fn)\n # scriptable function test\n s_transformed_batch = scripted_fn(batch_tensors, **fn_kwargs)\n torch.testing.assert_close(transformed_batch, s_transformed_batch, rtol=1e-5, atol=scripted_fn_atol)\n"
] | [
[
"torch.quantization.DeQuantStub",
"torch.nn.quantized.FloatFunctional",
"torch.nn.ReLU",
"torch.quantization.fuse_modules",
"torch.quantization.QuantStub"
],
[
"torch.jit.script",
"torch.get_rng_state",
"torch.randint",
"torch.cuda.set_rng_state",
"torch.cuda.get_rng_state",
"torch.manual_seed",
"torch.cuda.is_available",
"torch.abs",
"torch.set_rng_state",
"numpy.array",
"torch.testing.assert_close"
]
] |
jonathanyepez/Master-C3 | [
"37a3a13ef2bc8f51d3da3ca1b3704569ef83ef3d"
] | [
"CasoPractico01.py"
] | [
"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon Jul 6 18:03:09 2020\r\n\r\n@author: Jonathan A. Yepez M.\r\n\"\"\"\r\n\r\n#Task Description\r\n\"\"\"\r\nEn el archivo auto.csv se encuentran los siguientes datos de diferentes automoviles:\r\n * Cilindros\r\n * Cilindrada\r\n * Potencia\r\n * Peso\r\n * Aceleracion\r\n * Año del coche\r\n * Origen\r\n * Consumo (mpg)\r\n \r\nLas unidades de las características no se encuentran en el sistema internacional.\r\nLa variable 'origen' es un código que identifica el país de orígen.\r\n\r\nTASK: Crear un modelo para que se pueda estimar el consumo de un vehículo a partir del resto de las variables\r\n\"\"\"\r\n#Import the libraries that will be used in this case\r\nimport pandas as pd\r\nfrom pandas.plotting import scatter_matrix #for a specific stage in EDA\r\nfrom sklearn.linear_model import LinearRegression\r\nfrom sklearn.model_selection import train_test_split\r\nfrom sklearn.metrics import mean_squared_error\r\nimport seaborn as sns #visualisation\r\nimport matplotlib.pyplot as plt #visualisation\r\n\r\n#Read the data and create a dataframe\r\ndf = pd.read_csv(\"auto.csv\")\r\nprint(\"--------------------\")\r\nprint(df.info()) #a quick view of the dataframe structure\r\nprint(\"--------------------\")\r\nprint(df.describe()) #a more in-depth description of the information contained in the df\r\nprint(\"--------------------\")\r\nprint(df.head()) #show the first 5 entries of the dataframe\r\nprint(\"the columns for this dataframe are:\")\r\nprint(df.columns)\r\n\r\n#we check for missing values (NAs)\r\n#print(df.isnull().sum())\r\nif(df.isnull().sum().sum()== 0):\r\n print(\"\\nThere are NO missing values.\\n\")\r\nelse:\r\n print(\"\\nThere are\",df.isnull().sum().sum(),\"missing values in the dataframe!\\n\")\r\n\r\n#EDA => Exploratory Data Analysis\r\ndf = df.drop_duplicates() #remove duplicates (if applicable)\r\n\r\n#Scatter matrix for the whole data\r\nscatter_matrix(df, figsize = (12, 12), diagonal = 'kde');\r\n\r\nplt.figure(figsize=(10, 6)) #size configuration for plotting\r\nsns.distplot(df['mpg'], color='b', hist_kws={'alpha': 0.4}); #we first generate a distribution plot for 'mpg'\r\n#Se nota una tendencia a un consumo entre 20 y 30 mpgs dentro del dataset.\r\n\r\ndf_cont = df.select_dtypes(include = ['float64']) #we select the continuous variables \r\nprint(\"The continuous variables for this case are: \\n\")\r\nprint(df_cont.head())\r\n\r\n#Analysing the continuous variables -> scatter plots\r\nfor i in range(len(df_cont.columns)-1):\r\n sns.pairplot(data=df_cont, x_vars=df_cont.columns[i], y_vars=['mpg'], height = 5, aspect=2) #scatter plot vars vs 'mpg'\r\n\r\n\"\"\"\r\nEn este caso, notamos una relación inversamente proporcional entre el consumo (mpg) y las\r\nvariables displacement, horsepower, y weight. Esto nos indica que a mayor potencia y \r\ndesplazamiento (en términos del motor), el consumo de gasolina será mayor y por ende se\r\ntendrá un menor 'rendimiento' de mpg.\r\nEn el caso de la variable acceleration, se nota un grafico de dispersión sin una tendencia\r\nclara. Esto puede deberse a que esta característica varía entre modelos y tipos de carro\r\ncasi intependientemente del consumo de gasolina.\r\n\"\"\"\r\n \r\ndf_cat = df.select_dtypes(include = ['int64']) #we select the 'categorical' variables.\r\nprint(\"\\nThe categorical variables for this case are: \\n\")\r\nprint(df_cat.head())\r\n\r\nfor i in range(len(df_cat.columns)):\r\n sns.catplot(x=df_cat.columns[i], y=\"mpg\", data=df, alpha=0.5) #gnerate a catplot\r\n ax = sns.boxplot(x=df_cat.columns[i], y=\"mpg\", data=df) #add a boxplot on top of the catplot\r\n plt.setp(ax.artists, alpha=0.6, edgecolor=\"k\")\r\n\r\n\"\"\"\r\nTras haber presentado las gráficas para las variables categóricas, se nota que el número \r\nde cilindros muestra cierta tendencia en términos generales. A grosso modo, se puede asumir\r\nque cuando un vehículo tiene 8 cilindros, el rendimiento en mpg tiende a ser notablemente \r\nmenor a uno que tenga 4 cilindros.\r\nAsi mismo, el origen del vehículo indica que, si bien es cierto no hay una variación extrema, \r\nse puede asumir que aquellos provenientes del país '3', tienen un mejor consumo de \r\ngasolina.\r\nEn el caso del año de fabricación (model_year), se observa una tendencia general a la mejora\r\nde mpg conforme se avanza en el tiempo. Esto puede deberse a los avances en el área de la\r\nmecánica automotriz y la implementación de mejores diseños a nivel de componentes mecánicos\r\ncomo aerodinámicos.\r\n\"\"\"\r\n\r\n#Implementing the Regression Model\r\nindep_vars = df.iloc[:,:-1] #selecting 'independent variables'\r\ndep_var = df[[\"mpg\"]] #selecting the 'dependent variable'\r\n\r\n#separating the data into train and test sets\r\nx_train, x_test, y_train, y_test = train_test_split(indep_vars,dep_var)\r\n\r\nmodel = LinearRegression() #constructor that initializes a LinearRegression object\r\n\r\nmodel.fit(x_train, y_train) #fit the model using the training set\r\n\r\npred_train = model.predict(x_train) #prediction based on the training set\r\npred_test = model.predict(x_test) #prediction based on the test set\r\n\r\n#Checking results using R^2 and MSE\r\nprint(\"====================\\n\")\r\nprint('\\nR2 en entrenamiento es: ', round(model.score(x_train, y_train),4))\r\nprint('MSE en entrenamiento: ', round(mean_squared_error(y_train, pred_train),2)) \r\nprint(\"-----------------------\")\r\nprint('R2 en validación es: ', round(model.score(x_test, y_test),4))\r\nprint('MSE en validación es: ', round(mean_squared_error(y_test, pred_test),2))\r\n\r\n\"\"\"\r\nSe ha obtenido resultados aceptables en terminos de precisión. Dado que los valores de MSE\r\ny R^2 en los test y train sets son similares se puede determinar que no se presenta\r\noverfitting. \r\nLos parametros de la regresión se muestran a continuación \r\n\"\"\"\r\n\r\nprint(\"====================\\n\")\r\nprint(\"Los parametros de la regresion son: \")\r\nprint(model.coef_)\r\nprint(\"El termino independiente de la regresión es: \", model.intercept_)"
] | [
[
"sklearn.metrics.mean_squared_error",
"pandas.read_csv",
"matplotlib.pyplot.figure",
"sklearn.linear_model.LinearRegression",
"pandas.plotting.scatter_matrix",
"matplotlib.pyplot.setp",
"sklearn.model_selection.train_test_split"
]
] |
meltmedia/the-ark | [
"d559897494e02a2e2048fdc44014f17af89691bb"
] | [
"the_ark/screen_capture.py"
] | [
"import math\nimport numpy\nfrom PIL import Image\nfrom the_ark.selenium_helpers import SeleniumHelperExceptions, ElementNotVisibleError, ElementError\nfrom StringIO import StringIO\nimport time\nimport traceback\n\nDEFAULT_SCROLL_PADDING = 100\nSCREENSHOT_FILE_EXTENSION = \"png\"\nDEFAULT_PIXEL_MATCH_OFFSET = 100\nFIREFOX_HEAD_HEIGHT = 75\nMAX_IMAGE_HEIGHT = 32768.0\n\n\nclass Screenshot:\n \"\"\"\n A helper class for taking screenshots using a Selenium Helper instance\n \"\"\"\n def __init__(self, selenium_helper, paginated=False, header_ids=None, footer_ids=None,\n scroll_padding=DEFAULT_SCROLL_PADDING, pixel_match_offset=DEFAULT_PIXEL_MATCH_OFFSET,\n file_extenson=SCREENSHOT_FILE_EXTENSION, resize_delay=0, content_container_selector=\"html\"):\n \"\"\"\n Initializes the Screenshot class. These variable will be used throughout to help determine how to capture pages\n for this website.\n :param\n - selenium_helper: SeleniumHelper() - The Selenium Helper object whose browser you are capturing\n - paginated: bool - if True, all full page screenshots captured by this class will be a sequence of\n viewport sized images\n - header_ids: list - A list of css_selectors for elements that \"stick\" to the top of the screen when\n scrolling. These hidden and shown while capturing the screen so that they display\n only at the top of the page, and do not cover any content\n - footer_ids: list - A list of css_selectors for elements that \"stick\" to the bottom of the screen\n when scrolling. These hidden and shown while capturing the screen so that they\n display only at the bottom of the page, and do not cover any content\n - scroll_padding: int - The height, in pixels, of the overlap between paginated captures. This is also\n used when scrolling elements. the element is scrolled its height minus the padding\n to create an overlapping of content shown on both images to not cut any text in half\n - file_extenson: string - If provided, this extension will be used while creating the image. This must\n be an extension that is usable with PIL\n \"\"\"\n # Set parameters as class variables\n self.sh = selenium_helper\n self.paginated = paginated\n self.headers = header_ids\n self.footers = footer_ids\n self.content_container_selector = content_container_selector\n self.scroll_padding = scroll_padding\n self.pixel_match_offset = pixel_match_offset\n self.file_extenson = \"png\"\n\n self.headless = self.sh.desired_capabilities.get(\"headless\", False)\n self.head_padding = FIREFOX_HEAD_HEIGHT if self.sh.desired_capabilities [\"browserName\"] == \"firefox\" else 0\n self.scale_factor = self.sh.desired_capabilities.get(\"scale_factor\", 1)\n self.max_height = MAX_IMAGE_HEIGHT / self.scale_factor\n self.resize_delay = resize_delay\n\n def capture_page(self, viewport_only=False, padding=None):\n \"\"\"\n Entry point for a screenshot of the whole page. This will send the screenshot off to the correct methods\n depending on whether you need paginated screenshots, just the current viewport area, or the whole page in\n one large shot.\n :param\n - viewport_only: bool - Whether to capture just the viewport's visible area or not\n :return\n - StringIO: A StingIO object containing the captured image(s)\n \"\"\"\n try:\n if self.headless:\n return self._capture_headless_page(viewport_only)\n elif viewport_only:\n return self._capture_single_viewport()\n elif self.paginated:\n return self._capture_paginated_page(padding)\n else:\n return self._capture_full_page()\n\n except SeleniumHelperExceptions as selenium_error:\n message = \"A selenium issue arose while taking the screenshot\".format()\n error = SeleniumError(message, selenium_error)\n raise error\n except Exception as e:\n message = \"Unhandled exception while taking the screenshot | {0}\".format(e)\n raise ScreenshotException(message, stacktrace=traceback.format_exc())\n\n def capture_scrolling_element(self, css_selector, viewport_only=True, scroll_padding=None):\n \"\"\"\n This method will scroll an element one height (with padding) and take a screenshot each scroll until the element\n has been scrolled to the bottom. You can choose to capture the whole page (helpful when the scrollable element\n is taller than the viewport) or just the viewport area\n :param\n - css_selector: string - The css selector for the element that you plan to scroll\n - viewport_only: bool - Whether to capture just the viewport's visible area or not (each screenshot\n after scrolling)\n - scroll_padding: int - Overwrites the default scroll padding for the class. This can be used when the\n element, or site, have greatly different scroll padding numbers\n :return\n - StringIO: list - A list containing multiple StringIO image objects\n \"\"\"\n padding = scroll_padding if scroll_padding else self.scroll_padding\n\n try:\n image_list = []\n # Scroll the element to the top\n self.sh.scroll_an_element(css_selector, scroll_top=True)\n\n while True:\n if self.headless:\n image_list.append(self._capture_headless_page(viewport_only))\n elif viewport_only:\n image_list.append(self._capture_single_viewport())\n else:\n image_list.append(self._capture_full_page())\n\n if self.sh.get_is_element_scroll_position_at_bottom(css_selector):\n # Stop capturing once you're at the bottom\n break\n else:\n # Scroll down for the next one!\n self.sh.scroll_an_element(css_selector, scroll_padding=padding)\n\n return image_list\n\n except SeleniumHelperExceptions as selenium_error:\n message = \"A selenium issue arose while trying to capture the scrolling element\"\n error = SeleniumError(message, selenium_error)\n raise error\n except Exception as e:\n message = \"Unhandled exception while taking the scrolling screenshot \" \\\n \"of the element '{0}' | {1}\".format(css_selector, e)\n raise ScreenshotException(message,\n stacktrace=traceback.format_exc(),\n details={\"css_selector\": css_selector})\n\n def capture_horizontal_scrolling_element(self, css_selector, viewport_only=True, scroll_padding=None):\n \"\"\"\n This method will scroll an element horizontally one width (with padding) and take a screenshot each scroll until\n the element has been scrolled to the right. You can choose to capture the whole page (helpful when the\n scrollable element is taller than the viewport) or just the viewport area.\n\n :param\n - css_selector: string - The css selector for the element that you plan to scroll\n - viewport_only: bool - Whether to capture just the viewport's visible area or not (each screenshot\n after scrolling)\n - scroll_padding: int - Overwrites the default scroll padding for the class. This can be used when the\n element, or site, have greatly different scroll padding numbers\n :return\n - StringIO: list - A list containing multiple StringIO image objects\n \"\"\"\n padding = scroll_padding if scroll_padding else self.scroll_padding\n\n try:\n image_list = []\n # Scroll the element to the top\n self.sh.scroll_an_element(css_selector, scroll_left=True)\n\n while True:\n # - Capture the image\n if viewport_only:\n image_list.append(self._capture_single_viewport())\n else:\n image_list.append(self._capture_full_page())\n\n if self.sh.get_is_element_scroll_position_at_most_right(css_selector):\n # - Stop capturing once you're at the most right\n break\n else:\n # - Scroll right for the next one!\n self.sh.scroll_an_element(css_selector, scroll_padding=padding, scroll_horizontal=True)\n\n return image_list\n\n except SeleniumHelperExceptions as selenium_error:\n message = \"A selenium issue arose while trying to capture the scrolling element\"\n error = SeleniumError(message, selenium_error)\n raise error\n except Exception as e:\n message = \"Unhandled exception while taking the scrolling screenshot \" \\\n \"of the element '{0}' | {1}\".format(css_selector, e)\n raise ScreenshotException(message,\n stacktrace=traceback.format_exc(),\n details={\"css_selector\": css_selector})\n\n def _capture_single_viewport(self):\n \"\"\"\n Grabs an image of the page and then craps it to just the visible / viewport area\n :return\n - StringIO: A StingIO object containing the captured image\n \"\"\"\n cropped_image = self._get_image_data(viewport_only=True)\n return self._create_image_file(cropped_image)\n\n def _capture_full_page(self):\n \"\"\"\n Captures an image of the whole page. If there are sitcky elements, as specified by the footers and headers\n class variables the code will, the code will capture them only where appropriate ie. headers on top, footers on\n bottom. Otherwise the whole screen is sent back as it is currently set up.\n :return\n - StringIO: A StingIO object containing the captured image\n \"\"\"\n if self.headers and self.footers:\n # Capture viewport size window of the headers\n self.sh.scroll_window_to_position(0)\n self._hide_elements(self.footers)\n header_image = self._get_image_data(True)\n\n # - Capture the page from the bottom without headers\n self._show_elements(self.footers)\n #TODO: Update when scroll position updates to have a scroll to bottom option\n self.sh.scroll_window_to_position(40000)\n self._hide_elements(self.headers)\n footer_image = self._get_image_data()\n\n # Show all header elements again\n self._show_elements(self.headers)\n\n # Send the two images off to get merged into one\n image_data = self._crop_and_stitch_image(header_image, footer_image)\n elif self.headers:\n # Scroll to the top so that the headers are not covering content\n self.sh.scroll_window_to_position(0)\n time.sleep(0.5)\n image_data = self._get_image_data()\n elif self.footers:\n # Scroll to the bottom so that the footer items are not covering content\n self.sh.scroll_window_to_position(40000)\n time.sleep(0.5)\n image_data = self._get_image_data()\n else:\n image_data = self._get_image_data()\n\n return self._create_image_file(image_data)\n\n def _hide_elements(self, css_selectors):\n \"\"\"\n Hides all elements in the given list\n :param\n - css_selectors: list - A list of the elements you would like to hide\n \"\"\"\n for selector in css_selectors:\n try:\n self.sh.hide_element(selector)\n # Continue to the next item is this one did not exist or was already not visible\n except ElementNotVisibleError:\n pass\n except ElementError:\n pass\n\n def _show_elements(self, css_selectors):\n \"\"\"\n Shows all elements in the given list\n :param\n - css_selectors: list - A list of the elements you would like to make visible\n \"\"\"\n # Show footer items again\n for selector in css_selectors:\n try:\n self.sh.show_element(selector)\n # Continue to the next item is this one did not exist\n except ElementError:\n pass\n\n def _capture_headless_page(self, viewport_only):\n if self.paginated and not viewport_only:\n return self._capture_headless_paginated_page()\n\n # Store the current size and scroll position of the browser\n width, height = self.sh.get_window_size()\n current_scroll_position = self.sh.get_window_current_scroll_position()\n\n if not viewport_only:\n content_height = self.sh.get_content_height(self.content_container_selector)\n if content_height > self.max_height:\n self.sh.resize_browser(width, self.max_height + self.head_padding)\n time.sleep(self.resize_delay)\n elif height < content_height:\n self.sh.resize_browser(width, content_height + self.head_padding)\n time.sleep(self.resize_delay)\n self.sh.scroll_window_to_position(scroll_bottom=True)\n time.sleep(self.resize_delay)\n\n if content_height > self.max_height:\n images_list = []\n number_of_loops = int(math.ceil(content_height / self.max_height))\n\n # Loop through, starting at one for multiplication purposes\n for i in range(1, number_of_loops + 1):\n image_data = self.sh.get_screenshot_base64()\n image = Image.open(StringIO(image_data.decode('base64')))\n images_list.append(image)\n self.sh.scroll_window_to_position(self.max_height * i)\n\n # Combine al of the images into one capture\n image = self._combine_vertical_images(images_list, content_height)\n else:\n # Gather image byte data\n image_data = self.sh.get_screenshot_base64()\n # Create an image canvas and write the byte data to it\n image = Image.open(StringIO(image_data.decode('base64')))\n\n else:\n # Gather image byte data\n image_data = self.sh.get_screenshot_base64()\n # Create an image canvas and write the byte data to it\n image = Image.open(StringIO(image_data.decode('base64')))\n\n # - Return the browser to its previous size and scroll position\n if not viewport_only:\n self.sh.resize_browser(width, height)\n self.sh.scroll_window_to_position(current_scroll_position)\n time.sleep(self.resize_delay)\n\n return self._create_image_file(image)\n\n def _combine_vertical_images(self, images_list, content_height):\n height_of_full_images = 0\n total_height = 0\n total_width = 0\n\n # Make the last image the height of the remaining content\n for image in images_list[:-1]:\n height_of_full_images += image.size[1]\n remaining_height = (content_height * self.scale_factor) - height_of_full_images\n\n images_list[-1] = images_list[-1].crop((0,\n images_list[-1].size[1] - remaining_height,\n images_list[-1].size[0],\n images_list[-1].size[1]))\n\n for image in images_list:\n total_width = image.size[0] if image.size[0] > total_width else total_width\n total_height += image.size[1]\n\n resulting_image = Image.new('RGB', (total_width, total_height))\n current_height = 0\n for i, image in enumerate(images_list):\n resulting_image.paste(im=image, box=(0, current_height))\n current_height += image.size[1]\n\n return resulting_image\n\n def _capture_paginated_page(self, padding=None):\n \"\"\"\n Captures the page viewport by viewport, leaving an overlap of pixels the height of the self.padding variable\n between each image\n \"\"\"\n image_list = []\n scroll_padding = padding if padding else self.scroll_padding\n\n # Scroll page to the top\n self.sh.scroll_window_to_position(0)\n\n current_scroll_position = 0\n viewport_height = self.sh.driver.execute_script(\"return document.documentElement.clientHeight\")\n\n while True:\n # Capture the image\n image_list.append(self._capture_single_viewport())\n\n # Scroll for the next one!\n self.sh.scroll_window_to_position(current_scroll_position + viewport_height - scroll_padding)\n time.sleep(0.25)\n new_scroll_position = self.sh.get_window_current_scroll_position()\n\n # Break if the scroll position did not change (because it was at the bottom)\n if new_scroll_position == current_scroll_position:\n break\n else:\n current_scroll_position = new_scroll_position\n\n return image_list\n\n def _capture_headless_paginated_page(self, padding=None):\n \"\"\"\n Captures the page viewport by viewport, leaving an overlap of pixels the height of the self.padding variable\n between each image\n \"\"\"\n image_list = []\n scroll_padding = padding if padding else self.scroll_padding\n\n # Scroll page to the top\n self.sh.scroll_window_to_position(0)\n\n current_scroll_position = 0\n viewport_height = self.sh.driver.execute_script(\"return document.documentElement.clientHeight\")\n\n while True:\n # Capture the image\n image_data = self.sh.get_screenshot_base64()\n image_file = self._create_image_file(Image.open(StringIO(image_data.decode('base64'))))\n image_list.append(image_file)\n\n # Scroll for the next one!\n self.sh.scroll_window_to_position(current_scroll_position + viewport_height - scroll_padding)\n time.sleep(0.25)\n new_scroll_position = self.sh.get_window_current_scroll_position()\n\n # Break if the scroll position did not change (because it was at the bottom)\n if new_scroll_position == current_scroll_position:\n break\n else:\n current_scroll_position = new_scroll_position\n\n return image_list\n\n def _get_image_data(self, viewport_only=False):\n \"\"\"\n Creates an Image() canvas of the page. The image is cropped to be only the viewport area if specified.\n :param\n - viewport_only: bool - Captures only the visible /viewport area if true\n\n :return\n - image: Image() - The image canvas of the captured data\n \"\"\"\n # - Capture the image\n # Gather image byte data\n image_data = self.sh.get_screenshot_base64()\n # Create an image canvas and write the byte data to it\n image = Image.open(StringIO(image_data.decode('base64')))\n\n # - Crop the image to just the visible area\n # Top of the viewport\n current_scroll_position = self.sh.get_window_current_scroll_position()\n\n # Viewport Dimensions\n viewport_width, viewport_height = self.sh.get_viewport_size()\n\n # Image size of data returned by Selenium\n image_height, image_width = image.size\n\n if viewport_only:\n # Calculate the visible area\n crop_box = (0, current_scroll_position, viewport_width, current_scroll_position + viewport_height)\n\n # Crop everything of the image but the visible area\n cropped_image = image.crop(crop_box)\n return cropped_image\n else:\n # Calculate the visible area\n crop_box = (0, 0, viewport_width, image_width)\n\n # Crop everything of the image but the visible area\n cropped_image = image.crop(crop_box)\n return cropped_image\n\n def _crop_and_stitch_image(self, header_image, footer_image):\n \"\"\"\n This object takes in a header and footer image. It then searched for a block of 100 mixles that matches between\n the two images. Once it finds this point the footer image is cropped above the \"match\" point. A new canvas is\n then created that is the total height of both images. The two images are then copied onto a new canvas to create\n the final image, headers on top, footers on the bottom.\n :param\n - header_image: Image() - The top of the page, usually displays all of the headers elements\n - footer_image: Image() - The bottom of the page, usually displays all of the footer elements\n :return\n - stitched_image: Image() - The resulting image of the crop and stitching of the header and footer images\n \"\"\"\n try:\n # Create Pixel Row arrays from each image\n header_array = numpy.asarray(header_image)\n footer_array = numpy.asarray(footer_image)\n\n # - Find a place in both images that match then crop and stitch them at that location\n crop_row = 0\n header_image_height = header_image.height\n # Set the offset to the height of the image if the height is less than the offset\n if self.pixel_match_offset > header_image_height:\n self.pixel_match_offset = header_image_height\n\n # - Find the pixel row in the footer image that matches the bottom row in the header image\n # Grab the last 100 rows of header_image\n header_last_hundred_rows = header_array[header_image_height - self.pixel_match_offset: header_image_height]\n\n # Iterates throughout the check, will match the height of the row being checked in the image.\n for i, footer_row in enumerate(footer_array):\n # Jump out if the crop row has been set\n if crop_row != 0:\n break\n\n # Check if the current row being inspected matches the header row 100 pixels above the bottom\n if numpy.array_equal(footer_row, header_last_hundred_rows[0]):\n # It is a match!\n for y, row in enumerate(header_last_hundred_rows):\n # Check that the 100 footer rows above the matching row also match the bottom 100 of\n # the header image we grabbed at the start of this check\n if numpy.array_equal(footer_array[i + y], header_last_hundred_rows[y]):\n # Check whether we've found 100 matching rows or not\n if y == self.pixel_match_offset - 1:\n # Yes! All 100 matched. Set the crop row to this row\n crop_row = i + self.pixel_match_offset\n break\n\n # If no rows matched, crop at height of header image\n if crop_row == 0:\n crop_row = header_image_height\n\n # - Crop the top of the footer image off above the line that matches the header image's bottom row\n # Create the crop box that outlines what to remove from the footer image\n footer_image_width = footer_image.size[0]\n footer_image_height = footer_image.size[1]\n crop_box = (0, crop_row, footer_image_width, footer_image_height)\n # Perform the crop\n cropped_footer_image = footer_image.crop(crop_box)\n\n # Grab the new height of the footer image\n cropped_footer_image_height = cropped_footer_image.size[1]\n\n # Create a blank image canvas that is as tall the footer and header images combined\n total_height = header_image_height + cropped_footer_image_height\n stitched_image = Image.new(\"RGB\", (footer_image_width, total_height))\n\n # - Paste the header and footer images onto the canvas\n # Paste the header image at the top\n stitched_image.paste(header_image, (0, 0))\n # Paste the footer image directly below the header image\n stitched_image.paste(cropped_footer_image, (0, header_image_height))\n\n return stitched_image\n\n except Exception as e:\n message = \"Error while cropping and stitching a full page screenshot | {0}\".format(e)\n raise ScreenshotException(message, stacktrace=traceback.format_exc())\n\n def _create_image_file(self, image):\n \"\"\"\n This method takes an Image() variable and saves it into a StringIO \"file\".\n :param\n - image_data: Image() - The image to be saved into the StringIO object\n\n :return\n - image_file: StingIO() - The stringIO object containing the saved image\n \"\"\"\n # Instantiate the file object\n image_file = StringIO()\n # Save the image canvas to the file as the given file type\n image.save(image_file, self.file_extenson.upper())\n # Set the file marker back to the beginning\n image_file.seek(0)\n\n return image_file\n\n\nclass ScreenshotException(Exception):\n def __init__(self, msg, stacktrace=None, details=None):\n self.msg = msg\n self.details = {} if details is None else details\n self.details[\"stracktrace\"] = stacktrace\n super(ScreenshotException, self).__init__()\n\n def __str__(self):\n exception_msg = \"Screenshot Exception: \\n\"\n detail_string = \"Exception Details:\\n\"\n for key, value in self.details.items():\n detail_string += \"{0}: {1}\\n\".format(key, value)\n exception_msg += detail_string\n exception_msg += \"Message: {0}\".format(self.msg)\n\n return exception_msg\n\n\nclass SeleniumError(ScreenshotException):\n def __init__(self, message, selenium_helper_exception):\n new_message = \"{0} | {1}\".format(message, selenium_helper_exception.msg)\n super(SeleniumError, self).__init__(msg=new_message,\n stacktrace=selenium_helper_exception.stacktrace,\n details=selenium_helper_exception.details)\n"
] | [
[
"numpy.asarray",
"numpy.array_equal"
]
] |
feng-y16/Hamiltonian-Generative-Networks | [
"702d3ff3aec40eba20e17c5a1612b5b0b1e2f831"
] | [
"train.py"
] | [
"\"\"\"Script to train the Hamiltonian Generative Network\n\"\"\"\nimport ast\nimport argparse\nimport copy\nimport pprint\nimport os\nimport warnings\nimport yaml\n\nimport numpy as np\nimport torch\nimport tqdm\n\nfrom utilities.integrator import Integrator\nfrom utilities.training_logger import TrainingLogger\nfrom utilities import loader\nfrom utilities.loader import load_hgn, get_online_dataloaders, get_offline_dataloaders\nfrom utilities.losses import reconstruction_loss, kld_loss, geco_constraint\nfrom utilities.statistics import mean_confidence_interval\n\ndef _avoid_overwriting(experiment_id):\n # This function throws an error if the given experiment data already exists in runs/\n logdir = os.path.join('runs', experiment_id)\n if os.path.exists(logdir):\n assert len(os.listdir(logdir)) == 0,\\\n f'Experiment id {experiment_id} already exists in runs/. Remove it, change the name ' \\\n f'in the yaml file.'\n\n\nclass HgnTrainer:\n\n def __init__(self, params, resume=False):\n \"\"\"Instantiate and train the Hamiltonian Generative Network.\n\n Args:\n params (dict): Experiment parameters (see experiment_params folder).\n \"\"\"\n\n self.params = params\n self.resume = resume\n\n if not resume: # Fail if experiment_id already exist in runs/\n _avoid_overwriting(params[\"experiment_id\"])\n\n # Set device\n self.device = params[\"device\"]\n if \"cuda\" in self.device and not torch.cuda.is_available():\n warnings.warn(\n \"Warning! Set to train in GPU but cuda is not available. Device is set to CPU.\")\n self.device = \"cpu\"\n\n # Get dtype, will raise a 'module 'torch' has no attribute' if there is a typo\n self.dtype = torch.__getattribute__(params[\"networks\"][\"dtype\"])\n\n # Load hgn from parameters to deice\n self.hgn = load_hgn(params=self.params,\n device=self.device,\n dtype=self.dtype)\n if 'load_path' in self.params:\n self.load_and_reset(self.params, self.device, self.dtype)\n\n # Either generate data on-the-fly or load the data from disk\n if \"train_data\" in self.params[\"dataset\"]:\n print(\"Training with OFFLINE data...\")\n self.train_data_loader, self.test_data_loader = get_offline_dataloaders(self.params)\n else:\n print(\"Training with ONLINE data...\")\n self.train_data_loader, self.test_data_loader = get_online_dataloaders(self.params)\n\n # Initialize training logger\n self.training_logger = TrainingLogger(\n hyper_params=self.params,\n loss_freq=100,\n rollout_freq=1000,\n model_freq=10000\n )\n\n # Initialize tensorboard writer\n self.model_save_file = os.path.join(\n self.params[\"model_save_dir\"],\n self.params[\"experiment_id\"]\n )\n\n # Define optimization modules\n optim_params = [\n {\n 'params': self.hgn.encoder.parameters(),\n 'lr': params[\"optimization\"][\"encoder_lr\"]\n },\n {\n 'params': self.hgn.transformer.parameters(),\n 'lr': params[\"optimization\"][\"transformer_lr\"]\n },\n {\n 'params': self.hgn.hnn.parameters(),\n 'lr': params[\"optimization\"][\"hnn_lr\"]\n },\n {\n 'params': self.hgn.decoder.parameters(),\n 'lr': params[\"optimization\"][\"decoder_lr\"]\n },\n ]\n self.optimizer = torch.optim.Adam(optim_params)\n\n def load_and_reset(self, params, device, dtype):\n \"\"\"Load the HGN from the path specified in params['load_path'] and reset the networks in\n params['reset'].\n\n Args:\n params (dict): Dictionary with all the necessary parameters to load the networks.\n device (str): 'gpu:N' or 'cpu'\n dtype (torch.dtype): Data type to be used in computations.\n \"\"\"\n self.hgn.load(params['load_path'])\n if 'reset' in params:\n if isinstance(params['reset'], list):\n for net in params['reset']:\n assert net in ['encoder', 'decoder', 'hamiltonian', 'transformer']\n else:\n assert params['reset'] in ['encoder', 'decoder', 'hamiltonian', 'transformer']\n if 'encoder' in params['reset']:\n self.hgn.encoder = loader.instantiate_encoder(params, device, dtype)\n if 'decoder' in params['reset']:\n self.hgn.decoder = loader.instantiate_decoder(params, device, dtype)\n if 'transformer' in params['reset']:\n self.hgn.transformer = loader.instantiate_transformer(params, device, dtype)\n if 'hamiltonian' in params['reset']:\n self.hgn.hnn = loader.instantiate_hamiltonian(params, device, dtype)\n\n def training_step(self, rollouts):\n \"\"\"Perform a training step with the given rollouts batch.\n\n Args:\n rollouts (torch.Tensor): Tensor of shape (batch_size, seq_len, channels, height, width)\n corresponding to a batch of sampled rollouts.\n\n Returns:\n A dictionary of losses and the model's prediction of the rollout. The reconstruction loss and\n KL divergence are floats and prediction is the HGNResult object with data of the forward pass.\n \"\"\"\n self.optimizer.zero_grad()\n\n rollout_len = rollouts.shape[1]\n input_frames = self.params['optimization']['input_frames']\n assert(input_frames <= rollout_len) # optimization.use_steps must be smaller (or equal) to rollout.sequence_length\n roll = rollouts[:, :input_frames]\n\n hgn_output = self.hgn.forward(rollout_batch=roll, n_steps=rollout_len - input_frames)\n target = rollouts[:, input_frames-1:] # Fit first input_frames and try to predict the last + the next (rollout_len - input_frames)\n prediction = hgn_output.reconstructed_rollout\n\n if self.params[\"networks\"][\"variational\"]:\n tol = self.params[\"geco\"][\"tol\"]\n alpha = self.params[\"geco\"][\"alpha\"]\n lagrange_mult_param = self.params[\"geco\"][\"lagrange_multiplier_param\"]\n\n C, rec_loss = geco_constraint(target, prediction, tol) # C has gradient\n\n # Compute moving average of constraint C (without gradient)\n if self.C_ma is None:\n self.C_ma = C.detach()\n else:\n self.C_ma = alpha * self.C_ma + (1 - alpha) * C.detach()\n C_curr = C.detach().item() # keep track for logging\n C = C + (self.C_ma - C.detach()) # Move C without affecting its gradient\n\n # Compute KL divergence\n mu = hgn_output.z_mean\n logvar = hgn_output.z_logvar\n kld = kld_loss(mu=mu, logvar=logvar)\n\n # normalize by number of frames, channels and pixels per frame\n kld_normalizer = prediction.flatten(1).size(1)\n kld = kld / kld_normalizer\n\n # Compute losses\n train_loss = kld + self.langrange_multiplier * C\n\n # clamping the langrange multiplier to avoid inf values\n self.langrange_multiplier = self.langrange_multiplier * torch.exp(\n lagrange_mult_param * C.detach())\n self.langrange_multiplier = torch.clamp(self.langrange_multiplier, 1e-10, 1e10)\n\n losses = {\n 'loss/train': train_loss.item(),\n 'loss/kld': kld.item(),\n 'loss/C': C_curr,\n 'loss/C_ma': self.C_ma.item(),\n 'loss/rec': rec_loss.item(),\n 'other/langrange_mult': self.langrange_multiplier.item()\n }\n\n else: # not variational\n # Compute frame reconstruction error\n train_loss = reconstruction_loss(\n target=target,\n prediction=prediction)\n losses = {'loss/train': train_loss.item()}\n\n train_loss.backward()\n self.optimizer.step()\n\n return losses, hgn_output\n\n def fit(self):\n \"\"\"The trainer fits an HGN.\n\n Returns:\n (HGN) An HGN model that has been fitted to the data\n \"\"\"\n\n # Initial values for geco algorithm\n if self.params[\"networks\"][\"variational\"]:\n self.langrange_multiplier = self.params[\"geco\"][\"initial_lagrange_multiplier\"]\n self.C_ma = None\n\n # TRAIN\n for ep in range(self.params[\"optimization\"][\"epochs\"]):\n print(\"Epoch %s / %s\" % (str(ep + 1), str(self.params[\"optimization\"][\"epochs\"])))\n pbar = tqdm.tqdm(self.train_data_loader)\n for batch_idx, rollout_batch in enumerate(pbar):\n # Move to device and change dtype\n rollout_batch = rollout_batch.to(self.device).type(self.dtype)\n\n # Do an optimization step\n losses, prediction = self.training_step(rollouts=rollout_batch)\n\n # Log progress\n self.training_logger.step(losses=losses,\n rollout_batch=rollout_batch,\n prediction=prediction,\n model=self.hgn)\n\n # Progress-bar msg\n msg = \", \".join([\n f\"{k}: {v:.2e}\" for k, v in losses.items() if v is not None\n ])\n pbar.set_description(msg)\n # Save model\n self.hgn.save(self.model_save_file)\n\n self.test()\n return self.hgn\n \n def compute_reconst_kld_errors(self, dataloader):\n \"\"\"Computes reconstruction error and KL divergence.\n\n Args:\n dataloader (torch.utils.data.DataLoader): DataLoader to retrieve errors from.\n\n Returns:\n (reconst_error_mean, reconst_error_h), (kld_mean, kld_h): Tuples where the mean and 95%\n conficence interval is shown.\n \"\"\"\n first = True\n pbar = tqdm.tqdm(dataloader)\n \n for _, rollout_batch in enumerate(pbar):\n # Move to device and change dtype\n rollout_batch = rollout_batch.to(self.device).type(self.dtype)\n rollout_len = rollout_batch.shape[1]\n input_frames = self.params['optimization']['input_frames']\n assert(input_frames <= rollout_len) # optimization.use_steps must be smaller (or equal) to rollout.sequence_length\n roll = rollout_batch[:, :input_frames]\n hgn_output = self.hgn.forward(rollout_batch=roll, n_steps=rollout_len - input_frames)\n target = rollout_batch[:, input_frames-1:] # Fit first input_frames and try to predict the last + the next (rollout_len - input_frames)\n prediction = hgn_output.reconstructed_rollout\n error = reconstruction_loss(\n target=target,\n prediction=prediction, mean_reduction=False).detach().cpu(\n ).numpy()\n if self.params[\"networks\"][\"variational\"]:\n kld = kld_loss(mu=hgn_output.z_mean, logvar=hgn_output.z_logvar, mean_reduction=False).detach().cpu(\n ).numpy()\n # normalize by number of frames, channels and pixels per frame\n kld_normalizer = prediction.flatten(1).size(1)\n kld = kld / kld_normalizer\n if first:\n first = False\n set_errors = error\n if self.params[\"networks\"][\"variational\"]:\n set_klds = kld\n else:\n set_errors = np.concatenate((set_errors, error))\n if self.params[\"networks\"][\"variational\"]:\n set_klds = np.concatenate((set_klds, kld))\n err_mean, err_h = mean_confidence_interval(set_errors)\n if self.params[\"networks\"][\"variational\"]:\n kld_mean, kld_h = mean_confidence_interval(set_klds)\n return (err_mean, err_h), (kld_mean, kld_h)\n else:\n return (err_mean, err_h), None\n \n def test(self):\n \"\"\"Test after the training is finished and logs result to tensorboard.\n \"\"\"\n print(\"Calculating final training error...\")\n (err_mean, err_h), kld = self.compute_reconst_kld_errors(self.train_data_loader)\n self.training_logger.log_error(\"Train reconstruction error\", err_mean, err_h)\n if kld is not None:\n kld_mean, kld_h = kld\n self.training_logger.log_error(\"Train KL divergence\", kld_mean, kld_h)\n\n print(\"Calculating final test error...\")\n (err_mean, err_h), kld = self.compute_reconst_kld_errors(self.test_data_loader)\n self.training_logger.log_error(\"Test reconstruction error\", err_mean, err_h)\n if kld is not None:\n kld_mean, kld_h = kld\n self.training_logger.log_error(\"Test KL divergence\", kld_mean, kld_h)\n\ndef _overwrite_config_with_cmd_arguments(config, args):\n if args.name is not None:\n config['experiment_id'] = args.name[0]\n if args.epochs is not None:\n config['optimization']['epochs'] = args.epochs[0]\n if args.dataset_path is not None:\n # Read the parameters.yaml file in the given dataset path\n dataset_config = _read_config(os.path.join(_args.dataset_path[0], 'parameters.yaml'))\n for key, value in dataset_config.items():\n config[key] = value\n if args.env is not None:\n if 'train_data' in config['dataset']:\n raise ValueError(\n f'--env was given but configuration is set for offline training: '\n f'train_data={config[\"dataset\"][\"train_data\"]}'\n )\n env_params = _read_config(DEFAULT_ENVIRONMENTS_PATH + args.env[0] + '.yaml')\n config['environment'] = env_params['environment']\n if args.params is not None:\n for p in args.params:\n key, value = p.split('=')\n ptr = config\n keys = key.split('.')\n for i, k in enumerate(keys):\n if i == len(keys) - 1:\n ptr[k] = ast.literal_eval(value)\n else:\n ptr = ptr[k]\n if args.load is not None:\n config['load_path'] = args.load[0]\n if args.reset is not None:\n config['reset'] = args.reset\n\n\ndef _read_config(config_file):\n with open(config_file, 'r') as f:\n config = yaml.load(f, Loader=yaml.FullLoader)\n return config\n\n\ndef _merge_configs(train_config, dataset_config):\n config = copy.deepcopy(train_config)\n for key, value in dataset_config.items():\n config[key] = value\n # If the config specifies a dataset path, we take the rollout from the configuration file\n # in the given dataset\n if 'dataset' in config and 'train_data' in config['dataset']:\n dataset_config = _read_config( # Read parameters.yaml in root of given dataset\n os.path.join(os.path.dirname(config['dataset']['train_data']), 'parameters.yaml'))\n config['dataset']['rollout'] = dataset_config['dataset']['rollout']\n return config\n\n\ndef _ask_confirmation(config):\n printer = pprint.PrettyPrinter(indent=4)\n print(f'The training will be run with the following configuration:')\n printed_config = copy.deepcopy(_config)\n printed_config.pop('networks')\n printer.pprint(printed_config)\n print('Proceed? (y/n):')\n if input() != 'y':\n print('Abort.')\n exit()\n\n\nif __name__ == \"__main__\":\n\n DEFAULT_TRAIN_CONFIG_FILE = \"experiment_params/train_config_default.yaml\"\n DEFAULT_DATASET_CONFIG_FILE = \"experiment_params/dataset_online_default.yaml\"\n DEFAULT_ENVIRONMENTS_PATH = \"experiment_params/default_environments/\"\n DEFAULT_SAVE_MODELS_DIR = \"saved_models/\"\n\n parser = argparse.ArgumentParser()\n parser.add_argument(\n '--train-config', action='store', nargs=1, type=str, required=True,\n help=f'Path to the training configuration yaml file.'\n )\n parser.add_argument(\n '--dataset-config', action='store', nargs=1, type=str, required=False,\n help=f'Path to the dataset configuration yaml file.'\n )\n parser.add_argument(\n '--name', action='store', nargs=1, required=False,\n help='If specified, this name will be used instead of experiment_id of the yaml file.'\n )\n parser.add_argument(\n '--epochs', action='store', nargs=1, type=int, required=False,\n help='The number of training epochs. If not specified, optimization.epochs of the '\n 'training configuration will be used.'\n )\n parser.add_argument(\n '--env', action='store', nargs=1, type=str, required=False,\n help='The environment to use (for online training only). Possible values are '\n '\\'pendulum\\', \\'spring\\', \\'two_bodies\\', \\'three_bodies\\', corresponding to '\n 'environment configurations in experiment_params/default_environments/. If not '\n 'specified, the environment specified in the given --dataset-config will be used.'\n )\n parser.add_argument(\n '--dataset-path', action='store', nargs=1, type=str, required=False,\n help='Path to a stored dataset to use for training. For offline training only. In this '\n 'case no dataset configuration file will be loaded.'\n )\n parser.add_argument(\n '--params', action='store', nargs='+', required=False,\n help='Override one or more parameters in the config. The format of an argument is '\n 'param_name=param_value. Nested parameters are accessible by using a dot, '\n 'i.e. --param dataset.img_size=32. IMPORTANT: lists must be enclosed in double '\n 'quotes, i.e. --param environment.mass:\"[0.5, 0.5]\".'\n )\n parser.add_argument(\n '-y', '-y', action='store_true', default=False, required=False,\n help='Whether to skip asking for user confirmation before starting the training.'\n )\n parser.add_argument(\n '--resume', action='store', required=False, nargs='?', default=None,\n help='NOT IMPLEMENTED YET. Resume the training from a saved model. If a path is provided, '\n 'the training will be resumed from the given checkpoint. Otherwise, the last '\n 'checkpoint will be taken from saved_models/<experiment_id>.'\n )\n parser.add_argument(\n '--load', action='store', type=str, required=False, nargs=1,\n help='Path from which to load the HGN.'\n )\n parser.add_argument(\n '--reset', action='store', nargs='+', required=False,\n help='Use only in combimation with --load, tells the trainer to reinstantiate the given '\n 'networks. Values: \\'encoder\\', \\'transformer\\', \\'decoder\\', \\'hamiltonian\\'.'\n )\n _args = parser.parse_args()\n\n # Read configurations\n _train_config = _read_config(_args.train_config[0])\n if _args.dataset_path is None: # Will use the dataset config file (or default if not given)\n _dataset_config_file = DEFAULT_DATASET_CONFIG_FILE if _args.dataset_config is None else \\\n _args.dataset_config[0]\n _dataset_config = _read_config(_dataset_config_file)\n _config = _merge_configs(_train_config, _dataset_config)\n else: # Will use the dataset given in the command line arguments\n assert _args.dataset_config is None, 'Both --dataset-path and --dataset-config were given.'\n _config = _train_config\n\n # Overwrite configuration with command line arguments\n _overwrite_config_with_cmd_arguments(_config, _args)\n\n # Show configuration and ask user for confirmation\n if not _args.y:\n _ask_confirmation(_config)\n\n # Train HGN network\n trainer = HgnTrainer(_config)\n hgn = trainer.fit()\n"
] | [
[
"torch.__getattribute__",
"torch.optim.Adam",
"torch.cuda.is_available",
"numpy.concatenate",
"torch.clamp"
]
] |
puririshi98/benchmark | [
"2440b3b6e177a9b38011eff3ec25f3b6052acfd0"
] | [
"DeepLearningExamples/PyTorch/Recommendation/DLRM/preproc/parquet_to_binary.py"
] | [
"# Copyright (c) 2021 NVIDIA CORPORATION. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nimport numpy as np\nimport pandas as pd\nimport os\nfrom joblib import Parallel, delayed\nimport glob\nimport argparse\nimport tqdm\nimport subprocess\n\ndef process_file(f, dst):\n label = '_c0'\n dense_columns = [f'_c{i}' for i in range(1, 14)]\n categorical_columns = [f'_c{i}' for i in range(14, 40)]\n all_columns_sorted = [f'_c{i}' for i in range(0, 40)]\n data = pd.read_parquet(f)\n data = data[all_columns_sorted]\n\n data[label] = data[label].astype(np.int32)\n data[dense_columns] = data[dense_columns].astype(np.float32)\n data[categorical_columns] = data[categorical_columns].astype(np.int32)\n\n data = data.to_records(index=False)\n data = data.tobytes()\n\n dst_file = dst + '/' + f.split('/')[-1] + '.bin'\n with open(dst_file, 'wb') as dst_fd:\n dst_fd.write(data)\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument('--src_dir', type=str)\n parser.add_argument('--intermediate_dir', type=str)\n parser.add_argument('--dst_dir', type=str)\n parser.add_argument('--parallel_jobs', default=40, type=int)\n args = parser.parse_args()\n\n print('Processing train files...')\n train_src_files = glob.glob(args.src_dir + '/train/*.parquet')\n train_intermediate_dir = os.path.join(args.intermediate_dir, 'train')\n os.makedirs(train_intermediate_dir, exist_ok=True)\n\n Parallel(n_jobs=args.parallel_jobs)(delayed(process_file)(f, train_intermediate_dir) for f in tqdm.tqdm(train_src_files))\n\n print('Train files conversion done')\n\n print('Processing test files...')\n test_src_files = glob.glob(args.src_dir + '/test/*.parquet')\n test_intermediate_dir = os.path.join(args.intermediate_dir, 'test')\n os.makedirs(test_intermediate_dir, exist_ok=True)\n\n Parallel(n_jobs=args.parallel_jobs)(delayed(process_file)(f, test_intermediate_dir) for f in tqdm.tqdm(test_src_files))\n print('Test files conversion done')\n\n print('Processing validation files...')\n valid_src_files = glob.glob(args.src_dir + '/validation/*.parquet')\n valid_intermediate_dir = os.path.join(args.intermediate_dir, 'validation')\n os.makedirs(valid_intermediate_dir, exist_ok=True)\n\n Parallel(n_jobs=args.parallel_jobs)(delayed(process_file)(f, valid_intermediate_dir) for f in tqdm.tqdm(valid_src_files))\n print('Validation files conversion done')\n\n os.makedirs(args.dst_dir, exist_ok=True)\n\n print('Concatenating train files')\n os.system(f'cat {train_intermediate_dir}/*.bin > {args.dst_dir}/train_data.bin')\n\n print('Concatenating test files')\n os.system(f'cat {test_intermediate_dir}/*.bin > {args.dst_dir}/test_data.bin')\n\n print('Concatenating validation files')\n os.system(f'cat {valid_intermediate_dir}/*.bin > {args.dst_dir}/validation_data.bin')\n print('Done')\n\n\nif __name__ == '__main__':\n main()\n"
] | [
[
"pandas.read_parquet"
]
] |
jklymak/dolfyn | [
"eea98fe0021886cf654e25293c385c5c3707ff8d"
] | [
"dolfyn/io/rdi.py"
] | [
"import numpy as np\nimport xarray as xr\nfrom .. import time as tmlib\nimport warnings\nfrom os.path import getsize\nfrom ._read_bin import bin_reader\nfrom .base import _find_userdata, _create_dataset, _abspath\nfrom ..rotate.rdi import _calc_beam_orientmat, _calc_orientmat\nfrom ..rotate.base import _set_coords\nfrom ..rotate.api import set_declination\n\n\ndef read_rdi(fname, userdata=None, nens=None, debug=0):\n \"\"\"Read a TRDI binary data file.\n\n Parameters\n ----------\n filename : string\n Filename of TRDI file to read.\n userdata : True, False, or string of userdata.json filename (default ``True``) \n Whether to read the '<base-filename>.userdata.json' file.\n nens : None (default: read entire file), int, or 2-element tuple (start, stop)\n Number of pings to read from the file\n\n Returns\n -------\n ds : xarray.Dataset\n An xarray dataset from the binary instrument data\n\n \"\"\"\n # Reads into a dictionary of dictionaries using netcdf naming conventions\n # Should be easier to debug\n with _RdiReader(fname, debug_level=debug) as ldr:\n dat = ldr.load_data(nens=nens)\n\n # Read in userdata\n userdata = _find_userdata(fname, userdata)\n for nm in userdata:\n dat['attrs'][nm] = userdata[nm]\n\n if 'time_gps' in dat['coords']:\n # GPS data not necessarily sampling at the same rate as ADCP DAQ.\n dat = _remove_gps_duplicates(dat)\n\n # Create xarray dataset from upper level dictionary\n ds = _create_dataset(dat)\n ds = _set_coords(ds, ref_frame=ds.coord_sys)\n\n # Create orientation matrices\n if 'beam2inst_orientmat' not in ds:\n ds['beam2inst_orientmat'] = xr.DataArray(_calc_beam_orientmat(\n ds.beam_angle,\n ds.beam_pattern == 'convex'),\n coords={'x': [1, 2, 3, 4],\n 'x*': [1, 2, 3, 4]},\n dims=['x', 'x*'])\n\n if 'orientmat' not in ds:\n ds['orientmat'] = xr.DataArray(_calc_orientmat(ds),\n coords={'earth': ['E', 'N', 'U'],\n 'inst': ['X', 'Y', 'Z'],\n 'time': ds['time']},\n dims=['earth', 'inst', 'time'])\n\n # Check magnetic declination if provided via software and/or userdata\n _set_rdi_declination(ds, fname, inplace=True)\n\n # VMDAS applies gps correction on velocity in .ENX files only\n if fname.rsplit('.')[-1] == 'ENX':\n ds.attrs['vel_gps_corrected'] = 1\n else: # (not ENR or ENS) or WinRiver files\n ds.attrs['vel_gps_corrected'] = 0\n\n # Convert time coords to dt64\n t_coords = [t for t in ds.coords if 'time' in t]\n for ky in t_coords:\n dt = tmlib.epoch2dt64(ds[ky])\n ds = ds.assign_coords({ky: dt})\n\n # Convert time vars to dt64\n t_data = [t for t in ds.data_vars if 'time' in t]\n for ky in t_data:\n dt = tmlib.epoch2dt64(ds[ky])\n ds[ky].data = dt\n\n return ds\n\n\ndef _remove_gps_duplicates(dat):\n \"\"\"\n Removes duplicate and nan timestamp values in 'time_gps' coordinate, and \n ads hardware (ADCP DAQ) timestamp corresponding to GPS acquisition\n (in addition to the GPS unit's timestamp).\n \"\"\"\n\n dat['data_vars']['hdwtime_gps'] = dat['coords']['time']\n dat['units']['hdwtime'] = 'seconds since 1970-01-01 00:00:00'\n\n # Remove duplicate timestamp values, if applicable\n dat['coords']['time_gps'], idx = np.unique(dat['coords']['time_gps'],\n return_index=True)\n # Remove nan values, if applicable\n nan = np.zeros(dat['coords']['time'].shape, dtype=bool)\n if any(np.isnan(dat['coords']['time_gps'])):\n nan = np.isnan(dat['coords']['time_gps'])\n dat['coords']['time_gps'] = dat['coords']['time_gps'][~nan]\n\n for key in dat['data_vars']:\n if 'gps' in key:\n dat['data_vars'][key] = dat['data_vars'][key][idx]\n if sum(nan) > 0:\n dat['data_vars'][key] = dat['data_vars'][key][~nan]\n\n return dat\n\n\ndef _set_rdi_declination(dat, fname, inplace):\n # If magnetic_var_deg is set, this means that the declination is already\n # included in the heading and in the velocity data.\n\n declin = dat.attrs.pop('declination', None) # userdata declination\n\n if dat.attrs['magnetic_var_deg'] != 0: # from TRDI software if set\n dat.attrs['declination'] = dat.attrs['magnetic_var_deg']\n dat.attrs['declination_in_orientmat'] = 1 # logical\n\n if dat.attrs['magnetic_var_deg'] != 0 and declin is not None:\n warnings.warn(\n \"'magnetic_var_deg' is set to {:.2f} degrees in the binary \"\n \"file '{}', AND 'declination' is set in the 'userdata.json' \"\n \"file. DOLfYN WILL USE THE VALUE of {:.2f} degrees in \"\n \"userdata.json. If you want to use the value in \"\n \"'magnetic_var_deg', delete the value from userdata.json and \"\n \"re-read the file.\"\n .format(dat.attrs['magnetic_var_deg'], fname, declin))\n dat.attrs['declination'] = declin\n\n if declin is not None:\n set_declination(dat, declin, inplace)\n\n\ncentury = 2000\ndata_defs = {'number': ([], 'data_vars', 'uint32', ''),\n 'rtc': ([7], 'sys', 'uint16', ''),\n 'builtin_test_fail': ([], 'data_vars', 'bool', ''),\n 'c_sound': ([], 'data_vars', 'float32', 'm/s'),\n 'depth': ([], 'data_vars', 'float32', 'm'),\n 'pitch': ([], 'data_vars', 'float32', 'deg'),\n 'roll': ([], 'data_vars', 'float32', 'deg'),\n 'heading': ([], 'data_vars', 'float32', 'deg'),\n 'temp': ([], 'data_vars', 'float32', 'C'),\n 'salinity': ([], 'data_vars', 'float32', 'psu'),\n 'min_preping_wait': ([], 'data_vars', 'float32', 's'),\n 'heading_std': ([], 'data_vars', 'float32', 'deg'),\n 'pitch_std': ([], 'data_vars', 'float32', 'deg'),\n 'roll_std': ([], 'data_vars', 'float32', 'deg'),\n 'adc': ([8], 'sys', 'uint8', ''),\n 'error_status_wd': ([], 'attrs', 'float32', ''),\n 'pressure': ([], 'data_vars', 'float32', 'dbar'),\n 'pressure_std': ([], 'data_vars', 'float32', 'dbar'),\n 'vel': (['nc', 4], 'data_vars', 'float32', 'm/s'),\n 'amp': (['nc', 4], 'data_vars', 'uint8', 'counts'),\n 'corr': (['nc', 4], 'data_vars', 'uint8', 'counts'),\n 'prcnt_gd': (['nc', 4], 'data_vars', 'uint8', '%'),\n 'status': (['nc', 4], 'data_vars', 'float32', ''),\n 'dist_bt': ([4], 'data_vars', 'float32', 'm'),\n 'vel_bt': ([4], 'data_vars', 'float32', 'm/s'),\n 'corr_bt': ([4], 'data_vars', 'uint8', 'counts'),\n 'amp_bt': ([4], 'data_vars', 'uint8', 'counts'),\n 'prcnt_gd_bt': ([4], 'data_vars', 'uint8', '%'),\n 'time': ([], 'coords', 'float64', ''),\n 'etime_gps': ([], 'coords', 'float64', ''),\n 'elatitude_gps': ([], 'data_vars', 'float64', 'deg'),\n 'elongitude_gps': ([], 'data_vars', 'float64', 'deg'),\n 'time_gps': ([], 'coords', 'float64', ''),\n 'latitude_gps': ([], 'data_vars', 'float64', 'deg'),\n 'longitude_gps': ([], 'data_vars', 'float64', 'deg'),\n 'ntime': ([], 'coords', 'float64', ''),\n 'flags': ([], 'data_vars', 'float32', ''),\n }\n\n\ndef _get(dat, nm):\n grp = data_defs[nm][1]\n if grp is None:\n return dat[nm]\n else:\n return dat[grp][nm]\n\n\ndef _in_group(dat, nm):\n grp = data_defs[nm][1]\n if grp is None:\n return nm in dat\n else:\n return nm in dat[grp]\n\n\ndef _pop(dat, nm):\n grp = data_defs[nm][1]\n if grp is None:\n dat.pop(nm)\n else:\n dat[grp].pop(nm)\n\n\ndef _setd(dat, nm, val):\n grp = data_defs[nm][1]\n if grp is None:\n dat[nm] = val\n else:\n dat[grp][nm] = val\n\n\ndef _idata(dat, nm, sz):\n group = data_defs[nm][1]\n dtype = data_defs[nm][2]\n units = data_defs[nm][3]\n arr = np.empty(sz, dtype=dtype)\n if dtype.startswith('float'):\n arr[:] = np.NaN\n dat[group][nm] = arr\n dat['units'][nm] = units\n return dat\n\n\ndef _get_size(name, n=None, ncell=0):\n sz = list(data_defs[name][0]) # create a copy!\n if 'nc' in sz:\n sz.insert(sz.index('nc'), ncell)\n sz.remove('nc')\n if n is None:\n return tuple(sz)\n return tuple(sz + [n])\n\n\nclass _variable_setlist(set):\n def __iadd__(self, vals):\n if vals[0] not in self:\n self |= set(vals)\n return self\n\n\nclass _ensemble():\n n_avg = 1\n k = -1 # This is the counter for filling the ensemble object\n\n def __getitem__(self, nm):\n return getattr(self, nm)\n\n def __init__(self, navg, n_cells):\n if navg is None or navg == 0:\n navg = 1\n self.n_avg = navg\n for nm in data_defs:\n setattr(self, nm,\n np.zeros(_get_size(nm, n=navg, ncell=n_cells),\n dtype=data_defs[nm][2]))\n\n def clean_data(self,):\n self['vel'][self['vel'] == -32.768] = np.NaN\n\n\nclass _RdiReader():\n _n_beams = 4 # Placeholder for 5-beam adcp, not currently used.\n _pos = 0\n progress = 0\n _cfgnames = dict.fromkeys([4, 5], 'bb-adcp')\n _cfgnames.update(dict.fromkeys([8, 9, 16], 'wh-adcp'))\n _cfgnames.update(dict.fromkeys([14, 23], 'os-adcp'))\n _cfac = 180 / 2 ** 31\n _source = 0\n _fixoffset = 0\n _nbyte = 0\n _winrivprob = False\n _search_num = 30000 # Maximum distance? to search\n _debug7f79 = None\n extrabytes = 0\n\n def __init__(self, fname, navg=1, debug_level=0):\n self.fname = _abspath(fname)\n print('\\nReading file {} ...'.format(fname))\n self._debug_level = debug_level\n self.cfg = {}\n self.cfg['name'] = 'wh-adcp'\n self.cfg['sourceprog'] = 'instrument'\n self.cfg['prog_ver'] = 0\n self.hdr = {}\n self.f = bin_reader(self.fname)\n self.read_hdr()\n self.read_cfg()\n self.f.seek(self._pos, 0)\n self.n_avg = navg\n self.ensemble = _ensemble(self.n_avg, self.cfg['n_cells'])\n self._filesize = getsize(self.fname)\n self._npings = int(self._filesize / (self.hdr['nbyte'] + 2 +\n self.extrabytes))\n self.vars_read = _variable_setlist(['time'])\n\n if self._debug_level > 0:\n print(' %d pings estimated in this file' % self._npings)\n\n def read_hdr(self,):\n fd = self.f\n cfgid = list(fd.read_ui8(2))\n nread = 0\n if self._debug_level > 2:\n print(self.f.pos)\n print(' cfgid0: [{:x}, {:x}]'.format(*cfgid))\n while (cfgid[0] != 127 or cfgid[1] != 127) or not self.checkheader():\n nextbyte = fd.read_ui8(1)\n pos = fd.tell()\n nread += 1\n cfgid[1] = cfgid[0]\n cfgid[0] = nextbyte\n if not pos % 1000:\n print(' Still looking for valid cfgid at file '\n 'position %d ...' % pos)\n self._pos = self.f.tell() - 2\n if self._debug_level > 0:\n print(fd.tell())\n self.read_hdrseg()\n\n def read_cfg(self,):\n cfgid = self.f.read_ui16(1)\n self.read_cfgseg()\n\n def init_data(self,):\n outd = {'data_vars': {}, 'coords': {},\n 'attrs': {}, 'units': {}, 'sys': {}}\n outd['attrs']['inst_make'] = 'TRDI'\n outd['attrs']['inst_model'] = 'Workhorse'\n outd['attrs']['inst_type'] = 'ADCP'\n outd['attrs']['rotate_vars'] = ['vel', ]\n # Currently RDI doesn't use IMUs\n outd['attrs']['has_imu'] = 0\n for nm in data_defs:\n outd = _idata(outd, nm,\n sz=_get_size(nm, self._nens, self.cfg['n_cells']))\n self.outd = outd\n\n def mean(self, dat):\n if self.n_avg == 1:\n return dat[..., 0]\n return np.nanmean(dat, axis=-1)\n\n def load_data(self, nens=None):\n if nens is None:\n self._nens = int(self._npings / self.n_avg)\n self._ens_range = (0, self._nens)\n elif (nens.__class__ is tuple or nens.__class__ is list) and \\\n len(nens) == 2:\n nens = list(nens)\n if nens[1] == -1:\n nens[1] = self._npings\n self._nens = int((nens[1] - nens[0]) / self.n_avg)\n self._ens_range = nens\n self.f.seek((self.hdr['nbyte'] + 2 + self.extrabytes) *\n self._ens_range[0], 1)\n else:\n self._nens = nens\n self._ens_range = (0, nens)\n if self._debug_level > 0:\n print(' taking data from pings %d - %d' % tuple(self._ens_range))\n print(' %d ensembles will be produced.' % self._nens)\n self.init_data()\n dat = self.outd\n dat['coords']['range'] = (self.cfg['bin1_dist_m'] +\n np.arange(self.cfg['n_cells']) *\n self.cfg['cell_size'])\n for nm in self.cfg:\n dat['attrs'][nm] = self.cfg[nm]\n for iens in range(self._nens):\n try:\n self.read_buffer()\n except:\n self.remove_end(iens)\n break\n self.ensemble.clean_data()\n # Fix the 'real-time-clock' century\n clock = self.ensemble.rtc[:, :]\n if clock[0, 0] < 100:\n clock[0, :] += century\n # Copy the ensemble to the dataset.\n for nm in self.vars_read:\n _get(dat, nm)[..., iens] = self.mean(self.ensemble[nm])\n try:\n dats = tmlib.date2epoch(\n tmlib.datetime(*clock[:6, 0],\n microsecond=clock[6, 0] * 10000))[0]\n except ValueError:\n warnings.warn(\"Invalid time stamp in ping {}.\".format(\n int(self.ensemble.number[0])))\n dat['coords']['time'][iens] = np.NaN\n else:\n dat['coords']['time'][iens] = np.median(dats)\n self.finalize()\n if 'vel_bt' in dat['data_vars']:\n dat['attrs']['rotate_vars'].append('vel_bt')\n return dat\n\n def read_buffer(self,):\n fd = self.f\n self.ensemble.k = -1 # so that k+=1 gives 0 on the first loop.\n self.print_progress()\n hdr = self.hdr\n while self.ensemble.k < self.ensemble.n_avg - 1:\n self.search_buffer()\n startpos = fd.tell() - 2\n self.read_hdrseg()\n byte_offset = self._nbyte + 2\n for n in range(len(hdr['dat_offsets'])):\n id = fd.read_ui16(1)\n self._winrivprob = False\n self.print_pos()\n retval = self.read_dat(id)\n if retval == 'FAIL':\n break\n byte_offset += self._nbyte\n if n < (len(hdr['dat_offsets']) - 1):\n oset = hdr['dat_offsets'][n + 1] - byte_offset\n if oset != 0:\n if self._debug_level > 0:\n print(' %s: Adjust location by %d\\n' % (id, oset))\n fd.seek(oset, 1)\n byte_offset = hdr['dat_offsets'][n + 1]\n else:\n if hdr['nbyte'] - 2 != byte_offset:\n if not self._winrivprob:\n if self._debug_level > 0:\n print(' {:d}: Adjust location by {:d}\\n'\n .format(id, hdr['nbyte'] - 2 - byte_offset))\n self.f.seek(hdr['nbyte'] - 2 - byte_offset, 1)\n byte_offset = hdr['nbyte'] - 2\n readbytes = fd.tell() - startpos\n offset = hdr['nbyte'] + 2 - byte_offset\n self.check_offset(offset, readbytes)\n self.print_pos(byte_offset=byte_offset)\n\n def search_buffer(self):\n \"\"\"\n Check to see if the next bytes indicate the beginning of a\n data block. If not, search for the next data block, up to\n _search_num times.\n \"\"\"\n id1 = list(self.f.read_ui8(2))\n search_cnt = 0\n fd = self.f\n if self._debug_level > 3:\n print(' -->In search_buffer...')\n while (search_cnt < self._search_num and\n ((id1[0] != 127 or id1[1] != 127) or\n not self.checkheader())):\n search_cnt += 1\n nextbyte = fd.read_ui8(1)\n id1[1] = id1[0]\n id1[0] = nextbyte\n if search_cnt == self._search_num:\n raise Exception(\n 'Searched {} entries... Bad data encountered. -> {}'\n .format(search_cnt, id1))\n elif search_cnt > 0:\n if self._debug_level > 0:\n print(' WARNING: Searched {} bytes to find next '\n 'valid ensemble start [{:x}, {:x}]'.format(search_cnt,\n *id1))\n\n def checkheader(self,):\n if self._debug_level > 1:\n print(\" ###In checkheader.\")\n fd = self.f\n valid = 0\n # print(self.f.pos)\n numbytes = fd.read_i16(1)\n if numbytes > 0:\n fd.seek(numbytes - 2, 1)\n cfgid = fd.read_ui8(2)\n if len(cfgid) == 2:\n fd.seek(-numbytes - 2, 1)\n if cfgid[0] == 127 and cfgid[1] in [127, 121]:\n if cfgid[1] == 121 and self._debug7f79 is None:\n self._debug7f79 = True\n valid = 1\n else:\n fd.seek(-2, 1)\n if self._debug_level > 1:\n print(\" ###Leaving checkheader.\")\n return valid\n\n def read_hdrseg(self,):\n fd = self.f\n hdr = self.hdr\n hdr['nbyte'] = fd.read_i16(1)\n if self._debug_level > 2:\n print(fd.tell())\n fd.seek(1, 1)\n ndat = fd.read_i8(1)\n hdr['dat_offsets'] = fd.read_i16(ndat)\n self._nbyte = 4 + ndat * 2\n\n def print_progress(self,):\n self.progress = self.f.tell()\n if self._debug_level > 1:\n print(' pos %0.0fmb/%0.0fmb\\n' %\n (self.f.tell() / 1048576., self._filesize / 1048576.))\n if (self.f.tell() - self.progress) < 1048576:\n return\n\n def print_pos(self, byte_offset=-1):\n \"\"\"Print the position in the file, used for debugging.\n \"\"\"\n if self._debug_level > 3:\n if hasattr(self, 'ensemble'):\n k = self.ensemble.k\n else:\n k = 0\n print(' pos: %d, pos_: %d, nbyte: %d, k: %d, byte_offset: %d' %\n (self.f.tell(), self._pos, self._nbyte, k, byte_offset))\n\n def check_offset(self, offset, readbytes):\n fd = self.f\n if offset != 4 and self._fixoffset == 0:\n if self._debug_level >= 1:\n print('\\n ********************************************\\n')\n if fd.tell() == self._filesize:\n print(' EOF reached unexpectedly - discarding this last ensemble\\n')\n else:\n print(\" Adjust location by {:d} (readbytes={:d},hdr['nbyte']={:d}\\n\"\n .format(offset, readbytes, self.hdr['nbyte']))\n print(\"\"\"\n NOTE - If this appears at the beginning of the file, it may be\n a dolfyn problem. Please report this message, with details here:\n https://github.com/lkilcher/dolfyn/issues/8\n\n - If this appears at the end of the file it means\n The file is corrupted and only a partial record\n has been read\\n\n \"\"\")\n print('\\n ********************************************\\n')\n self._fixoffset = offset - 4\n fd.seek(4 + self._fixoffset, 1)\n\n def read_dat(self, id):\n function_map = {0: (self.read_fixed, []), # 0000\n 128: (self.read_var, []), # 0080\n 256: (self.read_vel, []), # 0100\n 512: (self.read_corr, []), # 0200\n 768: (self.read_amp, []), # 0300\n 1024: (self.read_prcnt_gd, []), # 0400\n 1280: (self.read_status, []), # 0500\n 1536: (self.read_bottom, []), # 0600\n 8192: (self.read_vmdas, []), # 2000\n 8226: (self.read_winriver2, []), # 2022\n 8448: (self.read_winriver, [38]), # 2100\n 8449: (self.read_winriver, [97]), # 2101\n 8450: (self.read_winriver, [45]), # 2102\n 8451: (self.read_winriver, [60]), # 2103\n 8452: (self.read_winriver, [38]), # 2104\n # Loading of these data is currently not implemented:\n 1793: (self.skip_Ncol, [4]), # 0701 number of pings\n 1794: (self.skip_Ncol, [4]), # 0702 sum of squared vel\n 1795: (self.skip_Ncol, [4]), # 0703 sum of velocities\n 2560: (self.skip_Ncol, []), # 0A00 Beam 5 velocity\n # 0301 Beam 5 Number of good pings\n 769: (self.skip_Ncol, []),\n # 0302 Beam 5 Sum of squared velocities\n 770: (self.skip_Ncol, []),\n # 0303 Beam 5 Sum of velocities\n 771: (self.skip_Ncol, []),\n # 020C Ambient sound profile\n 524: (self.skip_Nbyte, [4]),\n 12288: (self.skip_Nbyte, [32]),\n # 3000 Fixed attitude data format for OS-ADCPs\n }\n # Call the correct function:\n if id in function_map:\n if self._debug_level >= 2:\n print(' Reading code {}...'.format(hex(id)), end='')\n retval = function_map.get(id)[0](*function_map[id][1])\n if retval:\n return retval\n if self._debug_level >= 2:\n print(' success!')\n else:\n self.read_nocode(id)\n\n def read_fixed(self,):\n if hasattr(self, 'configsize'):\n self.f.seek(self.configsize, 1)\n self._nbyte = self.configsize\n else:\n self.read_cfgseg()\n if self._debug_level >= 1:\n print(self._pos)\n self._nbyte += 2\n\n def read_cfgseg(self,):\n cfgstart = self.f.tell()\n cfg = self.cfg\n fd = self.f\n tmp = fd.read_ui8(5)\n prog_ver0 = tmp[0]\n cfg['prog_ver'] = tmp[0] + tmp[1] / 100.\n cfg['name'] = self._cfgnames.get(tmp[0],\n 'unrecognized firmware version')\n config = tmp[2:4]\n cfg['beam_angle'] = [15, 20, 30][(config[1] & 3)]\n #cfg['numbeams'] = [4, 5][int((config[1] & 16) == 16)]\n cfg['freq'] = ([75, 150, 300, 600, 1200, 2400, 38][(config[0] & 7)])\n cfg['beam_pattern'] = (['concave',\n 'convex'][int((config[0] & 8) == 8)])\n cfg['orientation'] = ['down', 'up'][int((config[0] & 128) == 128)]\n #cfg['simflag'] = ['real', 'simulated'][tmp[4]]\n fd.seek(1, 1)\n cfg['n_beams'] = fd.read_ui8(1)\n cfg['n_cells'] = fd.read_ui8(1)\n cfg['pings_per_ensemble'] = fd.read_ui16(1)\n cfg['cell_size'] = fd.read_ui16(1) * .01\n cfg['blank'] = fd.read_ui16(1) * .01\n cfg['prof_mode'] = fd.read_ui8(1)\n cfg['corr_threshold'] = fd.read_ui8(1)\n cfg['prof_codereps'] = fd.read_ui8(1)\n cfg['min_pgood'] = fd.read_ui8(1)\n cfg['evel_threshold'] = fd.read_ui16(1)\n cfg['sec_between_ping_groups'] = (\n np.sum(np.array(fd.read_ui8(3)) *\n np.array([60., 1., .01])))\n coord_sys = fd.read_ui8(1)\n cfg['coord_sys'] = (['beam', 'inst',\n 'ship', 'earth'][((coord_sys >> 3) & 3)])\n cfg['use_pitchroll'] = ['no', 'yes'][(coord_sys & 4) == 4]\n cfg['use_3beam'] = ['no', 'yes'][(coord_sys & 2) == 2]\n cfg['bin_mapping'] = ['no', 'yes'][(coord_sys & 1) == 1]\n cfg['xducer_misalign_deg'] = fd.read_i16(1) * .01\n cfg['magnetic_var_deg'] = fd.read_i16(1) * .01\n cfg['sensors_src'] = np.binary_repr(fd.read_ui8(1), 8)\n cfg['sensors_avail'] = np.binary_repr(fd.read_ui8(1), 8)\n cfg['bin1_dist_m'] = fd.read_ui16(1) * .01\n cfg['xmit_pulse'] = fd.read_ui16(1) * .01\n cfg['water_ref_cells'] = list(fd.read_ui8(2)) # list for attrs\n cfg['fls_target_threshold'] = fd.read_ui8(1)\n fd.seek(1, 1)\n cfg['xmit_lag_m'] = fd.read_ui16(1) * .01\n self._nbyte = 40\n self.configsize = self.f.tell() - cfgstart\n\n def read_var(self,):\n \"\"\" Read variable leader \"\"\"\n fd = self.f\n self.ensemble.k += 1\n ens = self.ensemble\n k = ens.k\n self.vars_read += ['number',\n 'rtc',\n 'number',\n 'builtin_test_fail',\n 'c_sound',\n 'depth',\n 'heading',\n 'pitch',\n 'roll',\n 'salinity',\n 'temp',\n 'min_preping_wait',\n 'heading_std',\n 'pitch_std',\n 'roll_std',\n 'adc']\n ens.number[k] = fd.read_ui16(1)\n ens.rtc[:, k] = fd.read_ui8(7)\n ens.number[k] += 65535 * fd.read_ui8(1)\n ens.builtin_test_fail[k] = fd.read_ui16(1)\n ens.c_sound[k] = fd.read_ui16(1)\n ens.depth[k] = fd.read_ui16(1) * 0.1\n ens.heading[k] = fd.read_ui16(1) * 0.01\n ens.pitch[k] = fd.read_i16(1) * 0.01\n ens.roll[k] = fd.read_i16(1) * 0.01\n ens.salinity[k] = fd.read_i16(1)\n ens.temp[k] = fd.read_i16(1) * 0.01\n ens.min_preping_wait[k] = (fd.read_ui8(\n 3) * np.array([60, 1, .01])).sum()\n ens.heading_std[k] = fd.read_ui8(1)\n ens.pitch_std[k] = fd.read_ui8(1) * 0.1\n ens.roll_std[k] = fd.read_ui8(1) * 0.1\n ens.adc[:, k] = fd.read_i8(8)\n self._nbyte = 2 + 40\n\n def read_vel(self,):\n ens = self.ensemble\n self.vars_read += ['vel']\n k = ens.k\n ens['vel'][:, :, k] = np.array(\n self.f.read_i16(4 * self.cfg['n_cells'])\n ).reshape((self.cfg['n_cells'], 4)) * .001\n self._nbyte = 2 + 4 * self.cfg['n_cells'] * 2\n\n def read_corr(self,):\n k = self.ensemble.k\n self.vars_read += ['corr']\n self.ensemble.corr[:, :, k] = np.array(\n self.f.read_ui8(4 * self.cfg['n_cells'])\n ).reshape((self.cfg['n_cells'], 4))\n self._nbyte = 2 + 4 * self.cfg['n_cells']\n\n def read_amp(self,):\n k = self.ensemble.k\n self.vars_read += ['amp']\n self.ensemble.amp[:, :, k] = np.array(\n self.f.read_ui8(4 * self.cfg['n_cells'])\n ).reshape((self.cfg['n_cells'], 4))\n self._nbyte = 2 + 4 * self.cfg['n_cells']\n\n def read_prcnt_gd(self,):\n self.vars_read += ['prcnt_gd']\n self.ensemble.prcnt_gd[:, :, self.ensemble.k] = np.array(\n self.f.read_ui8(4 * self.cfg['n_cells'])\n ).reshape((self.cfg['n_cells'], 4))\n self._nbyte = 2 + 4 * self.cfg['n_cells']\n\n def read_status(self,):\n self.vars_read += ['status']\n self.ensemble.status[:, :, self.ensemble.k] = np.array(\n self.f.read_ui8(4 * self.cfg['n_cells'])\n ).reshape((self.cfg['n_cells'], 4))\n self._nbyte = 2 + 4 * self.cfg['n_cells']\n\n def read_bottom(self,):\n self.vars_read += ['dist_bt', 'vel_bt', 'corr_bt', 'amp_bt',\n 'prcnt_gd_bt']\n fd = self.f\n ens = self.ensemble\n k = ens.k\n cfg = self.cfg\n if self._source == 2:\n self.vars_read += ['latitude_gps', 'longitude_gps']\n fd.seek(2, 1)\n long1 = fd.read_ui16(1)\n fd.seek(6, 1)\n ens.latitude_gps[k] = fd.read_i32(1) * self._cfac\n if ens.latitude_gps[k] == 0:\n ens.latitude_gps[k] = np.NaN\n else:\n fd.seek(14, 1)\n ens.dist_bt[:, k] = fd.read_ui16(4) * 0.01\n ens.vel_bt[:, k] = fd.read_i16(4) * 0.001\n ens.corr_bt[:, k] = fd.read_ui8(4)\n ens.amp_bt[:, k] = fd.read_ui8(4)\n ens.prcnt_gd_bt[:, k] = fd.read_ui8(4)\n if self._source == 2:\n fd.seek(2, 1)\n ens.longitude_gps[k] = (\n long1 + 65536 * fd.read_ui16(1)) * self._cfac\n if ens.longitude_gps[k] > 180:\n ens.longitude_gps[k] = ens.longitude_gps[k] - 360\n if ens.longitude_gps[k] == 0:\n ens.longitude_gps[k] = np.NaN\n fd.seek(16, 1)\n qual = fd.read_ui8(1)\n if qual == 0:\n print(' qual==%d,%f %f' % (qual,\n ens.latitude_gps[k],\n ens.longitude_gps[k]))\n ens.latitude_gps[k] = np.NaN\n ens.longitude_gps[k] = np.NaN\n fd.seek(71 - 45 - 16 - 17, 1)\n self._nbyte = 2 + 68\n else:\n fd.seek(71 - 45, 1)\n self._nbyte = 2 + 68\n if cfg['prog_ver'] >= 5.3:\n fd.seek(78 - 71, 1)\n ens.dist_bt[:, k] = ens.dist_bt[:, k] + fd.read_ui8(4) * 655.36\n self._nbyte += 11\n if cfg['name'] == 'wh-adcp':\n if cfg['prog_ver'] >= 16.20:\n fd.seek(4, 1)\n self._nbyte += 4\n\n def read_vmdas(self,):\n \"\"\" Read something from VMDAS \"\"\"\n fd = self.f\n # The raw files produced by VMDAS contain a binary navigation data\n # block.\n self.cfg['sourceprog'] = 'VMDAS'\n ens = self.ensemble\n k = ens.k\n if self._source != 1 and self._debug_level >= 1:\n print(' \\n***** Apparently a VMDAS file \\n\\n')\n self._source = 1\n self.vars_read += ['time_gps',\n 'latitude_gps',\n 'longitude_gps',\n 'etime_gps',\n 'elatitude_gps',\n 'elongitude_gps',\n 'flags',\n 'ntime', ]\n utim = fd.read_ui8(4)\n date = tmlib.datetime(utim[2] + utim[3] * 256, utim[1], utim[0])\n # This byte is in hundredths of seconds (10s of milliseconds):\n time = tmlib.timedelta(milliseconds=(int(fd.read_ui32(1) / 10)))\n fd.seek(4, 1) # \"PC clock offset from UTC\" - clock drift in ms?\n ens.time_gps[k] = tmlib.date2epoch(date + time)[0]\n ens.latitude_gps[k] = fd.read_i32(1) * self._cfac\n ens.longitude_gps[k] = fd.read_i32(1) * self._cfac\n ens.etime_gps[k] = tmlib.date2epoch(date + tmlib.timedelta(\n milliseconds=int(fd.read_ui32(1) * 10)))[0]\n ens.elatitude_gps[k] = fd.read_i32(1) * self._cfac\n ens.elongitude_gps[k] = fd.read_i32(1) * self._cfac\n fd.seek(12, 1)\n ens.flags[k] = fd.read_ui16(1)\n fd.seek(6, 1)\n utim = fd.read_ui8(4)\n date = tmlib.datetime(utim[0] + utim[1] * 256, utim[3], utim[2])\n ens.ntime[k] = tmlib.date2epoch(date + tmlib.timedelta(\n milliseconds=int(fd.read_ui32(1) / 10)))[0]\n fd.seek(16, 1)\n self._nbyte = 2 + 76\n\n def read_winriver2(self, ):\n startpos = self.f.tell()\n self._winrivprob = True\n self.cfg['sourceprog'] = 'WINRIVER'\n ens = self.ensemble\n k = ens.k\n if self._source != 3 and self._debug_level >= 1:\n warnings.warn(' \\n***** Apparently a WINRIVER2 file\\n'\n '***** WARNING: Raw NMEA data '\n 'handler not yet fully implemented\\n\\n')\n self._source = 3\n spid = self.f.read_ui16(1)\n if spid == 104:\n sz = self.f.read_ui16(1)\n dtime = self.f.read_f64(1)\n start_string = self.f.reads(6)\n _ = self.f.reads(1)\n if start_string != '$GPGGA':\n if self._debug_level > 1:\n warnings.warn(f'Invalid GPGGA string found in ensemble {k},'\n ' skipping...')\n return 'FAIL'\n gga_time = str(self.f.reads(9))\n time = tmlib.timedelta(hours=int(gga_time[0:2]),\n minutes=int(gga_time[2:4]),\n seconds=int(gga_time[4:6]),\n milliseconds=int(gga_time[7:])*100)\n clock = self.ensemble.rtc[:, :]\n if clock[0, 0] < 100:\n clock[0, :] += century\n ens.time_gps[k] = tmlib.date2epoch(tmlib.datetime(\n *clock[:3, 0]) + time)[0]\n self.f.seek(1, 1)\n ens.latitude_gps[k] = self.f.read_f64(1)\n tcNS = self.f.reads(1)\n if tcNS == 'S':\n ens.latitude_gps[k] *= -1\n elif tcNS != 'N':\n if self._debug_level > 1:\n warnings.warn(f'Invalid GPGGA string found in ensemble {k},'\n ' skipping...')\n return 'FAIL'\n ens.longitude_gps[k] = self.f.read_f64(1)\n tcEW = self.f.reads(1)\n if tcEW == 'W':\n ens.longitude_gps[k] *= -1\n elif tcEW != 'E':\n if self._debug_level > 1:\n warnings.warn(f'Invalid GPGGA string found in ensemble {k},'\n ' skipping...')\n return 'FAIL'\n ucqual, n_sat = self.f.read_ui8(2)\n tmp = self.f.read_float(2)\n ens.hdop, ens.altitude = tmp\n if self.f.reads(1) != 'M':\n if self._debug_level > 1:\n warnings.warn(f'Invalid GPGGA string found in ensemble {k},'\n ' skipping...')\n return 'FAIL'\n ggeoid_sep = self.f.read_float(1)\n if self.f.reads(1) != 'M':\n if self._debug_level > 1:\n warnings.warn(f'Invalid GPGGA string found in ensemble {k},'\n ' skipping...')\n return 'FAIL'\n gage = self.f.read_float(1)\n gstation_id = self.f.read_ui16(1)\n # 4 unknown bytes (2 reserved+2 checksum?)\n # 78 bytes for GPGGA string (including \\r\\n)\n # 2 reserved + 2 checksum\n self.vars_read += ['longitude_gps', 'latitude_gps', 'time_gps']\n self._nbyte = self.f.tell() - startpos + 2\n if self._debug_level >= 5:\n print('')\n print(sz, ens.longitude_gps[k])\n\n def read_winriver(self, nbt):\n self._winrivprob = True\n self.cfg['sourceprog'] = 'WINRIVER'\n if self._source not in [2, 3]:\n if self._debug_level >= 1:\n warnings.warn('\\n ***** Apparently a WINRIVER file - '\n 'Raw NMEA data handler not yet implemented\\n\\n')\n self._source = 2\n startpos = self.f.tell()\n sz = self.f.read_ui16(1)\n tmp = self.f.reads(sz)\n self._nbyte = self.f.tell() - startpos + 2\n\n def skip_Ncol(self, n_skip=1):\n self.f.seek(n_skip * self.cfg['n_cells'], 1)\n self._nbyte = 2 + n_skip * self.cfg['n_cells']\n\n def skip_Nbyte(self, n_skip):\n self.f.seek(n_skip, 1)\n self._nbyte = self._nbyte = 2 + n_skip\n\n def read_nocode(self, id):\n # Skipping bytes from codes 0340-30FC, commented if needed\n # hxid = hex(id)\n # if hxid[2:4] == '30':\n # raise Exception(\"\")\n # # I want to count the number of 1s in the middle 4 bits\n # # of the 2nd two bytes.\n # # 60 is a 0b00111100 mask\n # nflds = (bin(int(hxid[3]) & 60).count('1') +\n # bin(int(hxid[4]) & 60).count('1'))\n # # I want to count the number of 1s in the highest\n # # 2 bits of byte 3\n # # 3 is a 0b00000011 mask:\n # dfac = bin(int(hxid[3], 0) & 3).count('1')\n # self.skip_Nbyte(12 * nflds * dfac)\n # else:\n print(' Unrecognized ID code: %0.4X\\n' % id)\n\n def remove_end(self, iens):\n dat = self.outd\n print(' Encountered end of file. Cleaning up data.')\n for nm in self.vars_read:\n _setd(dat, nm, _get(dat, nm)[..., :iens])\n\n def finalize(self, ):\n \"\"\"Remove the attributes from the data that were never loaded.\n \"\"\"\n dat = self.outd\n for nm in set(data_defs.keys()) - self.vars_read:\n _pop(dat, nm)\n for nm in self.cfg:\n dat['attrs'][nm] = self.cfg[nm]\n dat['attrs']['fs'] = (dat['attrs']['sec_between_ping_groups'] *\n dat['attrs']['pings_per_ensemble']) ** (-1)\n for nm in data_defs:\n shp = data_defs[nm][0]\n if len(shp) and shp[0] == 'nc' and _in_group(dat, nm):\n _setd(dat, nm, np.swapaxes(_get(dat, nm), 0, 1))\n\n def __exit__(self, type, value, traceback):\n self.f.close()\n\n def __enter__(self,):\n return self\n"
] | [
[
"numpy.empty",
"numpy.zeros",
"numpy.nanmean",
"numpy.median",
"numpy.arange",
"numpy.isnan",
"numpy.array",
"numpy.unique"
]
] |
Prepaire/MolGNN_fewshot | [
"c7c17afdeae7f2ef0c8e3ca2da033091ec7537ca"
] | [
"chem/util.py"
] | [
"import torch\nimport random\nimport collections\n\nimport networkx as nx\nfrom rdkit.Chem import AllChem\nimport numpy as np\n\nfrom loader import graph_data_obj_to_nx_simple, nx_to_graph_data_obj_simple\n\nfrom loader import MoleculeDataset\n\n\ndef get_filtered_fingerprint(smiles):\n \"\"\" Get filtered PubChem fingerprint. The digits related to elements other than C,\n H, O, N, S, F, Cl, and Br are discarded.\n\n Args:\n smiles (str): SMILES string.\n\n Return:\n fp (np.ndarray): The filtered PubChem fingerprint as a vector.\n length (int): length of the filtered vector.\n \"\"\"\n from PyFingerprint.All_Fingerprint import get_fingerprint\n\n fp = get_fingerprint(smiles, fp_type=\"pubchem\", output=\"vector\")\n del_pos = (\n [\n 26,\n 27,\n 28,\n 29,\n 30,\n 31,\n 32,\n 41,\n 42,\n 46,\n 47,\n 48,\n 295,\n 296,\n 298,\n 303,\n 304,\n 348,\n 354,\n 369,\n 407,\n 411,\n 415,\n 456,\n 525,\n 627,\n ]\n + list(range(49, 115))\n + list(range(263, 283))\n + list(range(288, 293))\n + list(range(310, 317))\n + list(range(318, 327))\n + list(range(327, 332))\n + list(range(424, 427))\n )\n fp = np.delete(fp, del_pos)\n return fp\n\n\ndef check_same_molecules(s1, s2):\n mol1 = AllChem.MolFromSmiles(s1)\n mol2 = AllChem.MolFromSmiles(s2)\n return AllChem.MolToInchi(mol1) == AllChem.MolToInchi(mol2)\n\n\nclass NegativeEdge:\n def __init__(self):\n \"\"\"\n Randomly sample negative edges\n \"\"\"\n pass\n\n def __call__(self, data):\n num_nodes = data.num_nodes\n num_edges = data.num_edges\n\n edge_set = set(\n [\n str(data.edge_index[0, i].cpu().item())\n + \",\"\n + str(data.edge_index[1, i].cpu().item())\n for i in range(data.edge_index.shape[1])\n ]\n )\n\n redandunt_sample = torch.randint(0, num_nodes, (2, 5 * num_edges))\n sampled_ind = []\n sampled_edge_set = set([])\n for i in range(5 * num_edges):\n node1 = redandunt_sample[0, i].cpu().item()\n node2 = redandunt_sample[1, i].cpu().item()\n edge_str = str(node1) + \",\" + str(node2)\n if not any(\n [edge_str in edge_set, edge_str in sampled_edge_set, node1 == node2]\n ):\n sampled_edge_set.add(edge_str)\n sampled_ind.append(i)\n if len(sampled_ind) == num_edges / 2:\n break\n\n data.negative_edge_index = redandunt_sample[:, sampled_ind]\n\n return data\n\n\nclass ExtractSubstructureContextPair:\n def __init__(self, k, l1, l2):\n \"\"\"\n Randomly selects a node from the data object, and adds attributes\n that contain the substructure that corresponds to k hop neighbours\n rooted at the node, and the context substructures that corresponds to\n the subgraph that is between l1 and l2 hops away from the\n root node.\n :param k:\n :param l1:\n :param l2:\n \"\"\"\n self.k = k\n self.l1 = l1\n self.l2 = l2\n\n # for the special case of 0, addresses the quirk with\n # single_source_shortest_path_length\n if self.k == 0:\n self.k = -1\n if self.l1 == 0:\n self.l1 = -1\n if self.l2 == 0:\n self.l2 = -1\n\n def __call__(self, data, root_idx=None):\n \"\"\"\n\n :param data: pytorch geometric data object\n :param root_idx: If None, then randomly samples an atom idx.\n Otherwise sets atom idx of root (for debugging only)\n :return: None. Creates new attributes in original data object:\n data.center_substruct_idx\n data.x_substruct\n data.edge_attr_substruct\n data.edge_index_substruct\n data.x_context\n data.edge_attr_context\n data.edge_index_context\n data.overlap_context_substruct_idx\n \"\"\"\n num_atoms = data.x.size(0)\n if root_idx is None:\n root_idx = random.sample(range(num_atoms), 1)[0]\n\n G = graph_data_obj_to_nx_simple(data) # same ordering as input data obj\n\n # Get k-hop subgraph rooted at specified atom idx\n substruct_node_idxes = nx.single_source_shortest_path_length(\n G, root_idx, self.k\n ).keys()\n if len(substruct_node_idxes) > 0:\n substruct_G = G.subgraph(substruct_node_idxes)\n substruct_G, substruct_node_map = reset_idxes(substruct_G) # need\n # to reset node idx to 0 -> num_nodes - 1, otherwise data obj does not\n # make sense, since the node indices in data obj must start at 0\n substruct_data = nx_to_graph_data_obj_simple(substruct_G)\n data.x_substruct = substruct_data.x\n data.edge_attr_substruct = substruct_data.edge_attr\n data.edge_index_substruct = substruct_data.edge_index\n data.center_substruct_idx = torch.tensor(\n [substruct_node_map[root_idx]]\n ) # need\n # to convert center idx from original graph node ordering to the\n # new substruct node ordering\n\n # Get subgraphs that is between l1 and l2 hops away from the root node\n l1_node_idxes = nx.single_source_shortest_path_length(\n G, root_idx, self.l1\n ).keys()\n l2_node_idxes = nx.single_source_shortest_path_length(\n G, root_idx, self.l2\n ).keys()\n context_node_idxes = set(l1_node_idxes).symmetric_difference(set(l2_node_idxes))\n if len(context_node_idxes) > 0:\n context_G = G.subgraph(context_node_idxes)\n context_G, context_node_map = reset_idxes(context_G) # need to\n # reset node idx to 0 -> num_nodes - 1, otherwise data obj does not\n # make sense, since the node indices in data obj must start at 0\n context_data = nx_to_graph_data_obj_simple(context_G)\n data.x_context = context_data.x\n data.edge_attr_context = context_data.edge_attr\n data.edge_index_context = context_data.edge_index\n\n # Get indices of overlapping nodes between substruct and context,\n # WRT context ordering\n context_substruct_overlap_idxes = list(\n set(context_node_idxes).intersection(set(substruct_node_idxes))\n )\n if len(context_substruct_overlap_idxes) > 0:\n context_substruct_overlap_idxes_reorder = [\n context_node_map[old_idx] for old_idx in context_substruct_overlap_idxes\n ]\n # need to convert the overlap node idxes, which is from the\n # original graph node ordering to the new context node ordering\n data.overlap_context_substruct_idx = torch.tensor(\n context_substruct_overlap_idxes_reorder\n )\n\n return data\n\n # ### For debugging ###\n # if len(substruct_node_idxes) > 0:\n # substruct_mol = graph_data_obj_to_mol_simple(data.x_substruct,\n # data.edge_index_substruct,\n # data.edge_attr_substruct)\n # print(AllChem.MolToSmiles(substruct_mol))\n # if len(context_node_idxes) > 0:\n # context_mol = graph_data_obj_to_mol_simple(data.x_context,\n # data.edge_index_context,\n # data.edge_attr_context)\n # print(AllChem.MolToSmiles(context_mol))\n #\n # print(list(context_node_idxes))\n # print(list(substruct_node_idxes))\n # print(context_substruct_overlap_idxes)\n # ### End debugging ###\n\n def __repr__(self):\n return \"{}(k={},l1={}, l2={})\".format(\n self.__class__.__name__, self.k, self.l1, self.l2\n )\n\n\ndef reset_idxes(G):\n \"\"\"\n Resets node indices such that they are numbered from 0 to num_nodes - 1\n :param G:\n :return: copy of G with relabelled node indices, mapping\n \"\"\"\n mapping = {}\n for new_idx, old_idx in enumerate(G.nodes()):\n mapping[old_idx] = new_idx\n new_G = nx.relabel_nodes(G, mapping, copy=True)\n return new_G, mapping\n\n\n# TODO(Bowen): more unittests\nclass MaskAtom:\n def __init__(self, num_atom_features, num_edge_type, mask_rate, mask_edge=True):\n \"\"\"\n Randomly masks an atom, and optionally masks edges connecting to it.\n The mask atom type index is num_possible_atom_type\n The mask edge type index in num_possible_edge_type\n :param num_atom_type:\n :param num_edge_type:\n :param mask_rate: % of atoms to be masked\n :param mask_edge: If True, also mask the edges that connect to the\n masked atoms\n \"\"\"\n self.num_atom_features = num_atom_features\n self.num_edge_type = num_edge_type\n self.mask_rate = mask_rate\n self.mask_edge = mask_edge\n\n def __call__(self, data, masked_atom_indices=None):\n \"\"\"\n\n :param data: pytorch geometric data object. Assume that the edge\n ordering is the default pytorch geometric ordering, where the two\n directions of a single edge occur in pairs.\n Eg. data.edge_index = tensor([[0, 1, 1, 2, 2, 3],\n [1, 0, 2, 1, 3, 2]])\n :param masked_atom_indices: If None, then randomly samples num_atoms\n * mask rate number of atom indices\n Otherwise a list of atom idx that sets the atoms to be masked (for\n debugging only)\n :return: None, Creates new attributes in original data object:\n data.mask_node_idx\n data.mask_node_label\n data.mask_edge_idx\n data.mask_edge_label\n \"\"\"\n\n if masked_atom_indices is None:\n # sample x distinct atoms to be masked, based on mask rate. But\n # will sample at least 1 atom\n num_atoms = data.x.size()[0]\n sample_size = int(round(num_atoms * self.mask_rate))\n if sample_size == 0:\n sample_size = 1\n masked_atom_indices = random.sample(range(num_atoms), sample_size)\n\n # create mask node label by copying atom feature of mask atom\n mask_node_labels_list = []\n for atom_idx in masked_atom_indices:\n mask_node_labels_list.append(data.x[atom_idx].view(1, -1))\n data.mask_node_label = torch.cat(mask_node_labels_list, dim=0)\n data.masked_atom_indices = torch.tensor(masked_atom_indices)\n\n # modify the original node feature of the masked node\n for atom_idx in masked_atom_indices:\n data.x[atom_idx] = torch.tensor([0] * self.num_atom_features)\n\n if self.mask_edge:\n # create mask edge labels by copying edge features of edges that are bonded\n # to mask atoms\n connected_edge_indices = []\n for bond_idx, (u, v) in enumerate(data.edge_index.cpu().numpy().T):\n for atom_idx in masked_atom_indices:\n if (\n atom_idx in set((u, v))\n and bond_idx not in connected_edge_indices\n ):\n connected_edge_indices.append(bond_idx)\n\n if len(connected_edge_indices) > 0:\n # create mask edge labels by copying bond features of the bonds\n # connected to the mask atoms\n mask_edge_labels_list = []\n for bond_idx in connected_edge_indices[::2]: # because the\n # edge ordering is such that two directions of a single\n # edge occur in pairs, so to get the unique undirected\n # edge indices, we take every 2nd edge index from list\n mask_edge_labels_list.append(data.edge_attr[bond_idx].view(1, -1))\n\n data.mask_edge_label = torch.cat(mask_edge_labels_list, dim=0)\n # modify the original bond features of the bonds connected to the mask\n # atoms\n for bond_idx in connected_edge_indices:\n data.edge_attr[bond_idx] = torch.tensor([self.num_edge_type, 0])\n\n data.connected_edge_indices = torch.tensor(connected_edge_indices[::2])\n else:\n data.mask_edge_label = torch.empty((0, 2)).to(torch.int64)\n data.connected_edge_indices = torch.tensor(connected_edge_indices).to(\n torch.int64\n )\n\n # data.x = data.x[2:]\n\n return data\n\n def __repr__(self):\n reprs = \"{}(num_atom_features={}, num_edge_type={}, mask_rate={}, mask_edge={})\"\n return reprs.format(\n self.__class__.__name__,\n self.num_atom_features,\n self.num_edge_type,\n self.mask_rate,\n self.mask_edge,\n )\n\n\nclass ONEHOT_ContextPair(object):\n\n ONEHOTENCODING_CODEBOOKS = {\n \"atom_type\": list(range(119)),\n \"degree\": list(range(11)),\n \"formal_charge\": list(range(11)),\n \"hybridization_type\": list(range(7)),\n \"aromatic\": [0, 1],\n \"chirality_type\": [0, 1, 2, 3],\n }\n\n def __init__(self, dataset, k, l1, l2):\n\n self.dataset = dataset\n self.k = k\n self.l1 = l1\n self.l2 = l2\n\n # for the special case of 0, addresses the quirk with\n # single_source_shortest_path_length\n if self.k == 0:\n self.k = -1\n if self.l1 == 0:\n self.l1 = -1\n if self.l2 == 0:\n self.l2 = -1\n\n self.FEATURE_NAMES = [\n \"atom_type\",\n \"degree\",\n \"formal_charge\",\n \"hybridization_type\",\n \"aromatic\",\n \"chirality_type\",\n ]\n self.ONEHOTENCODING = [0, 1, 2, 3, 4, 5]\n\n def get_CODEBOOKS(self):\n if self.ONEHOTENCODING_CODEBOOKS:\n # print(\"ONEHOTENCODING_CODEBOOKS is available already, do not need to\n # regenerate ONEHOTENCODING_CODEBOOKS\")\n # print(ONEHOTENCODING_CODEBOOKS)\n return\n\n # print(f\"generating ONEHOTENCODING_CODEBOOKS......\")\n features_all = [data.x.numpy() for data in self.dataset]\n features = np.vstack(features_all)\n node_attributes_cnt = {}\n for j, col in enumerate(zip(*features)):\n node_attributes_cnt[self.FEATURE_NAMES[j]] = collections.Counter(col)\n\n self.ONEHOTENCODING_CODEBOOKS.update(\n {\n feature_name: sorted(node_attributes_cnt[feature_name].keys())\n for feature_name in self.FEATURE_NAMES\n }\n )\n\n def get_onehot_features(self, features):\n feature_one_hot = []\n # print(f'input features{features}')\n for row in features.tolist():\n this_row = []\n for j, feature_val_before_onehot in enumerate(row):\n onehot_code = self.ONEHOTENCODING_CODEBOOKS[self.FEATURE_NAMES[j]]\n onehot_val = [0.0] * len(onehot_code)\n assert feature_val_before_onehot in onehot_code\n onehot_val[onehot_code.index(feature_val_before_onehot)] = 1.0\n this_row += onehot_val\n feature_one_hot.append(this_row)\n return torch.Tensor(feature_one_hot)\n\n def __call__(self, data, root_idx=None):\n\n self.get_CODEBOOKS()\n # print(f'before onehot data {data.x.numpy()}')\n\n num_atoms = data.x.size(0)\n if root_idx is None:\n root_idx = random.sample(range(num_atoms), 1)[0]\n\n G = graph_data_obj_to_nx_simple(data) # same ordering as input data obj\n\n # Get k-hop subgraph rooted at specified atom idx\n substruct_node_idxes = nx.single_source_shortest_path_length(\n G, root_idx, self.k\n ).keys()\n if len(substruct_node_idxes) > 0:\n substruct_G = G.subgraph(substruct_node_idxes)\n substruct_G, substruct_node_map = reset_idxes(substruct_G) # need\n # to reset node idx to 0 -> num_nodes - 1, otherwise data obj does not\n # make sense, since the node indices in data obj must start at 0\n substruct_data = nx_to_graph_data_obj_simple(substruct_G)\n data.x_substruct = substruct_data.x\n data.edge_attr_substruct = substruct_data.edge_attr\n data.edge_index_substruct = substruct_data.edge_index\n data.center_substruct_idx = torch.tensor(\n [substruct_node_map[root_idx]]\n ) # need\n # to convert center idx from original graph node ordering to the\n # new substruct node ordering\n\n data.x_substruct = self.get_onehot_features(data.x_substruct.numpy())\n # Get subgraphs that is between l1 and l2 hops away from the root node\n l1_node_idxes = nx.single_source_shortest_path_length(\n G, root_idx, self.l1\n ).keys()\n l2_node_idxes = nx.single_source_shortest_path_length(\n G, root_idx, self.l2\n ).keys()\n context_node_idxes = set(l1_node_idxes).symmetric_difference(set(l2_node_idxes))\n if len(context_node_idxes) > 0:\n context_G = G.subgraph(context_node_idxes)\n context_G, context_node_map = reset_idxes(context_G) # need to\n # reset node idx to 0 -> num_nodes - 1, otherwise data obj does not\n # make sense, since the node indices in data obj must start at 0\n context_data = nx_to_graph_data_obj_simple(context_G)\n data.x_context = context_data.x\n data.edge_attr_context = context_data.edge_attr\n data.edge_index_context = context_data.edge_index\n data.x_context = self.get_onehot_features(data.x_context.numpy())\n\n # Get indices of overlapping nodes between substruct and context,\n # WRT context ordering\n context_substruct_overlap_idxes = list(\n set(context_node_idxes).intersection(set(substruct_node_idxes))\n )\n if len(context_substruct_overlap_idxes) > 0:\n context_substruct_overlap_idxes_reorder = [\n context_node_map[old_idx] for old_idx in context_substruct_overlap_idxes\n ]\n # need to convert the overlap node idxes, which is from the\n # original graph node ordering to the new context node ordering\n data.overlap_context_substruct_idx = torch.tensor(\n context_substruct_overlap_idxes_reorder\n )\n\n # print(f'after onehot data{onehot_features.size()}')\n\n # print()\n # print ( data )\n return data\n\n def __repr__(self):\n return \"{}(k={},l1={}, l2={})\".format(\n self.__class__.__name__, self.k, self.l1, self.l2\n )\n\n # def __repr__(self):\n # return f'{self.__class__.__name__}'\n\n \n \n \n \n \n \nclass ONEHOT_ENCODING(object):\n \n \n ONEHOTENCODING_CODEBOOKS = {\n \"atom_type\": list(range(119)),\n \"degree\": list(range(11)),\n \"formal_charge\": list(range(11)),\n \"hybridization_type\": list(range(7)),\n \"aromatic\": [0, 1],\n \"chirality_type\": [0, 1, 2, 3],\n}\n\n\n\n def __init__(self, dataset):\n\n self.dataset = dataset \n\n \n self.FEATURE_NAMES = [\n \"atom_type\",\n \"degree\",\n \"formal_charge\",\n \"hybridization_type\",\n \"aromatic\",\n \"chirality_type\",\n ]\n self.ONEHOTENCODING = [0, 1, 2, 3, 4, 5]\n \n \n def get_CODEBOOKS(self):\n \n \n if self.ONEHOTENCODING_CODEBOOKS:\n # print(\"ONEHOTENCODING_CODEBOOKS is available already, do not need to\n # regenerate ONEHOTENCODING_CODEBOOKS\")\n # print(ONEHOTENCODING_CODEBOOKS)\n return\n\n \n features_all = [data.x.numpy() for data in self.dataset]\n features = np.vstack(features_all)\n node_attributes_cnt = {}\n for j, col in enumerate(zip(*features)):\n node_attributes_cnt[self.FEATURE_NAMES[j]] = collections.Counter(col)\n\n ONEHOTENCODING_CODEBOOKS.update({\n feature_name: sorted(node_attributes_cnt[feature_name].keys())\n for feature_name in self.FEATURE_NAMES} )\n \n #print(f\"generating ONEHOTENCODING_CODEBOOKS......\")\n\n \n def get_onehot_features(self,features):\n feature_one_hot = []\n #print(f'input features{features}')\n for row in features.tolist():\n this_row = []\n for j, feature_val_before_onehot in enumerate(row):\n onehot_code = self.ONEHOTENCODING_CODEBOOKS[self.FEATURE_NAMES[j]]\n onehot_val = [0.0] * len(onehot_code)\n assert feature_val_before_onehot in onehot_code\n onehot_val[onehot_code.index(feature_val_before_onehot)] = 1.0 \n this_row += onehot_val\n feature_one_hot.append(this_row)\n return torch.Tensor(feature_one_hot)\n\n\n def __call__(self, data):\n\n self.get_CODEBOOKS()\n #print(f'before onehot data {data.x.numpy()}')\n onehot_features = self.get_onehot_features(data.x.numpy()) \n #print(f'after onehot data{onehot_features.size()}')\n data.x = onehot_features\n #print()\n #print ( data )\n return data\n \n\n def __repr__(self):\n return f'{self.__class__.__name__}'\n\nif __name__ == \"__main__\":\n transform = NegativeEdge()\n dataset = MoleculeDataset(\"dataset/tox21\", dataset=\"tox21\")\n transform(dataset[0])\n\n \"\"\"\n # TODO(Bowen): more unit tests\n # test ExtractSubstructureContextPair\n\n smiles = 'C#Cc1c(O)c(Cl)cc(/C=C/N)c1S'\n m = AllChem.MolFromSmiles(smiles)\n data = mol_to_graph_data_obj_simple(m)\n root_idx = 13\n\n # 0 hops: no substructure or context. We just test the absence of x attr\n transform = ExtractSubstructureContextPair(0, 0, 0)\n transform(data, root_idx)\n assert not hasattr(data, 'x_substruct')\n assert not hasattr(data, 'x_context')\n\n # k > n_nodes, l1 = 0 and l2 > n_nodes: substructure and context same as\n # molecule\n data = mol_to_graph_data_obj_simple(m)\n transform = ExtractSubstructureContextPair(100000, 0, 100000)\n transform(data, root_idx)\n substruct_mol = graph_data_obj_to_mol_simple(data.x_substruct,\n data.edge_index_substruct,\n data.edge_attr_substruct)\n context_mol = graph_data_obj_to_mol_simple(data.x_context,\n data.edge_index_context,\n data.edge_attr_context)\n assert check_same_molecules(AllChem.MolToSmiles(substruct_mol),\n AllChem.MolToSmiles(context_mol))\n\n transform = ExtractSubstructureContextPair(1, 1, 10000)\n transform(data, root_idx)\n\n # increase k from 0, and increase l1 from 1 while keeping l2 > n_nodes: the\n # total number of atoms should be n_atoms\n for i in range(len(m.GetAtoms())):\n data = mol_to_graph_data_obj_simple(m)\n print('i: {}'.format(i))\n transform = ExtractSubstructureContextPair(i, i, 100000)\n transform(data, root_idx)\n if hasattr(data, 'x_substruct'):\n n_substruct_atoms = data.x_substruct.size()[0]\n else:\n n_substruct_atoms = 0\n print('n_substruct_atoms: {}'.format(n_substruct_atoms))\n if hasattr(data, 'x_context'):\n n_context_atoms = data.x_context.size()[0]\n else:\n n_context_atoms = 0\n print('n_context_atoms: {}'.format(n_context_atoms))\n assert n_substruct_atoms + n_context_atoms == len(m.GetAtoms())\n\n # l1 < k and l2 >= k, so an overlap exists between context and substruct\n data = mol_to_graph_data_obj_simple(m)\n transform = ExtractSubstructureContextPair(2, 1, 3)\n transform(data, root_idx)\n assert hasattr(data, 'center_substruct_idx')\n\n # check correct overlap atoms between context and substruct\n\n\n # m = AllChem.MolFromSmiles('COC1=CC2=C(NC(=N2)[S@@](=O)CC2=NC=C(C)C(OC)=C2C)C=C1')\n # data = mol_to_graph_data_obj_simple(m)\n # root_idx = 9\n # k = 1\n # l1 = 1\n # l2 = 2\n # transform = ExtractSubstructureContextPaidata =\n # mol_to_graph_data_obj_simple(m)r(k, l1, l2)\n # transform(data, root_idx)\n pass\n\n # TODO(Bowen): more unit tests\n # test MaskAtom\n from loader import mol_to_graph_data_obj_simple, \\\n graph_data_obj_to_mol_simple\n\n smiles = 'C#Cc1c(O)c(Cl)cc(/C=C/N)c1S'\n m = AllChem.MolFromSmiles(smiles)\n original_data = mol_to_graph_data_obj_simple(m)\n num_atom_type = 118\n num_edge_type = 5\n\n # manually specify masked atom indices, don't mask edge\n masked_atom_indices = [13, 12]\n data = mol_to_graph_data_obj_simple(m)\n transform = MaskAtom(num_atom_type, num_edge_type, 0.1, mask_edge=False)\n transform(data, masked_atom_indices)\n assert data.mask_node_label.size() == torch.Size(\n (len(masked_atom_indices), 2))\n assert not hasattr(data, 'mask_edge_label')\n # check that the correct rows in x have been modified to be mask atom type\n assert (data.x[masked_atom_indices] == torch.tensor(([num_atom_type,\n 0]))).all()\n assert (data.mask_node_label == original_data.x[masked_atom_indices]).all()\n\n # manually specify masked atom indices, mask edge\n masked_atom_indices = [13, 12]\n data = mol_to_graph_data_obj_simple(m)\n transform = MaskAtom(num_atom_type, num_edge_type, 0.1, mask_edge=True)\n transform(data, masked_atom_indices)\n assert data.mask_node_label.size() == torch.Size(\n (len(masked_atom_indices), 2))\n # check that the correct rows in x have been modified to be mask atom type\n assert (data.x[masked_atom_indices] == torch.tensor(([num_atom_type,\n 0]))).all()\n assert (data.mask_node_label == original_data.x[masked_atom_indices]).all()\n # check that the correct rows in edge_attr have been modified to be mask edge\n # type, and the mask_edge_label are correct\n rdkit_bonds = []\n for atom_idx in masked_atom_indices:\n bond_indices = list(AllChem.FindAtomEnvironmentOfRadiusN(m, radius=1,\n rootedAtAtom=atom_idx))\n for bond_idx in bond_indices:\n rdkit_bonds.append(\n (m.GetBonds()[bond_idx].GetBeginAtomIdx(), m.GetBonds()[\n bond_idx].GetEndAtomIdx()))\n rdkit_bonds.append(\n (m.GetBonds()[bond_idx].GetEndAtomIdx(), m.GetBonds()[\n bond_idx].GetBeginAtomIdx()))\n rdkit_bonds = set(rdkit_bonds)\n connected_edge_indices = []\n for i in range(data.edge_index.size()[1]):\n if tuple(data.edge_index.numpy().T[i].tolist()) in rdkit_bonds:\n connected_edge_indices.append(i)\n assert (data.edge_attr[connected_edge_indices] ==\n torch.tensor(([num_edge_type, 0]))).all()\n assert (data.mask_edge_label == original_data.edge_attr[\n connected_edge_indices[::2]]).all() # data.mask_edge_label contains\n # the unique edges (ignoring direction). The data obj has edge ordering\n # such that two directions of a single edge occur in pairs, so to get the\n # unique undirected edge indices, we take every 2nd edge index from list\n \"\"\"\n"
] | [
[
"numpy.vstack",
"torch.empty",
"torch.randint",
"torch.tensor",
"numpy.delete",
"torch.cat",
"torch.Tensor"
]
] |
eunice-chan/train-procgen | [
"3f7cc3e54c535ed41aa9cb510f408e87d74c87aa"
] | [
"train_procgen/evaluate.py"
] | [
"import tensorflow as tf\nfrom baselines.ppo2 import ppo2\nfrom baselines.common.models import build_impala_cnn\nfrom baselines.common.mpi_util import setup_mpi_gpus\nfrom procgen import ProcgenEnv\nfrom baselines.common.vec_env import (\n VecExtractDictObs,\n VecMonitor,\n VecFrameStack,\n VecNormalize\n)\nfrom baselines import logger\nfrom mpi4py import MPI\nimport argparse\nfrom .alternate_ppo2 import alt_ppo2\nimport os\nfrom baselines.common import set_global_seeds\nfrom baselines.common.policies import build_policy\n\ndef eval_fn(load_path, args, env_name='fruitbot', distribution_mode='easy', num_levels=500, start_level=500, log_dir='./tmp/procgen', comm=None, num_trials=3, gui=False):\n\n learning_rate = 5e-4\n ent_coef = .01\n gamma = .999\n lam = .95\n nsteps = 256\n nminibatches = 8\n ppo_epochs = 3\n clip_range = .2\n use_vf_clipping = True\n vf_coef = 0.5\n max_grad_norm = 0.5\n\n mpi_rank_weight = 1\n log_interval = 1\n seed=None\n\n log_comm = comm.Split(0, 0)\n format_strs = ['csv', 'stdout'] if log_comm.Get_rank() == 0 else []\n logger.configure(comm=log_comm, dir=log_dir, format_strs=format_strs)\n\n logger.info(\"creating environment\")\n venv = ProcgenEnv(num_envs=1, env_name=env_name, num_levels=num_levels, start_level=start_level, distribution_mode=distribution_mode)\n venv = VecExtractDictObs(venv, \"rgb\")\n\n venv = VecMonitor(\n venv=venv, filename=None, keep_buf=100,\n )\n\n venv = VecNormalize(venv=venv, ob=False)\n\n logger.info(\"creating tf session\")\n setup_mpi_gpus()\n config = tf.ConfigProto()\n config.gpu_options.allow_growth = True #pylint: disable=E1101\n sess = tf.Session(config=config)\n sess.__enter__()\n\n conv_fn = lambda x: build_impala_cnn(x, depths=[16,32,32], emb_size=256)\n\n logger.info(f\"evaluating\")\n\n set_global_seeds(seed)\n\n policy = build_policy(venv, conv_fn)\n\n # Get the nb of env\n nenvs = venv.num_envs\n # Get state_space and action_space\n ob_space = venv.observation_space\n ac_space = venv.action_space\n\n # Calculate the batch_size\n nbatch = nenvs * nsteps\n nbatch_train = nbatch // nminibatches\n\n # Instantiate the model object (that creates act_model and train_model)\n from .alternate_ppo2.model import Model\n model_fn = Model\n\n model = model_fn(policy=policy, ob_space=ob_space, ac_space=ac_space, nbatch_act=nenvs, nbatch_train=nbatch_train,\n nsteps=nsteps, ent_coef=ent_coef, vf_coef=vf_coef,\n max_grad_norm=max_grad_norm, comm=comm, mpi_rank_weight=mpi_rank_weight)\n\n if os.path.isfile(load_path):\n alt_ppo2.eval(\n network=conv_fn,\n nsteps=nsteps,\n ent_coef=ent_coef,\n vf_coef=vf_coef,\n max_grad_norm=max_grad_norm,\n gamma=gamma,\n lam=lam,\n log_interval=log_interval,\n nminibatches=nminibatches,\n noptepochs=ppo_epochs,\n load_path=load_path,\n mpi_rank_weight=mpi_rank_weight,\n comm=comm,\n clip_vf=use_vf_clipping,\n lr=learning_rate,\n cliprange=clip_range,\n policy=policy,\n nenvs=nenvs,\n ob_space=ob_space,\n ac_space=ac_space,\n nbatch=nbatch,\n nbatch_train=nbatch_train,\n model_fn=model_fn,\n model=model,\n num_trials=num_trials,\n num_levels=num_levels,\n start_level=start_level,\n gui=gui,\n args=args\n )\n elif os.path.isdir(load_path):\n for file in os.listdir(load_path):\n log_comm = comm.Split(0, 0)\n format_strs = ['csv', 'stdout'] if log_comm.Get_rank() == 0 else []\n logger.configure(comm=log_comm, dir=log_dir+'/'+file, format_strs=format_strs)\n alt_ppo2.eval(\n network=conv_fn,\n nsteps=nsteps,\n ent_coef=ent_coef,\n vf_coef=vf_coef,\n max_grad_norm=max_grad_norm,\n gamma=gamma,\n lam=lam,\n log_interval=log_interval,\n nminibatches=nminibatches,\n noptepochs=ppo_epochs,\n load_path=load_path+'/'+file,\n mpi_rank_weight=mpi_rank_weight,\n comm=comm,\n clip_vf=use_vf_clipping,\n lr=learning_rate,\n cliprange=clip_range,\n policy=policy,\n nenvs=nenvs,\n ob_space=ob_space,\n ac_space=ac_space,\n nbatch=nbatch,\n nbatch_train=nbatch_train,\n model_fn=model_fn,\n model=model,\n num_trials=num_trials,\n num_levels=num_levels,\n start_level=start_level,\n gui=gui,\n args=args\n )\n else:\n print('Model path does not exist.')\n return\n\ndef main():\n parser = argparse.ArgumentParser(description='Process procgen evaluation arguments.')\n parser.add_argument('--load_model', type=str, required=True)\n parser.add_argument('--log_dir', type=str, default='./logs/eval')\n parser.add_argument('--env_name', type=str, default='fruitbot')\n parser.add_argument('--distribution_mode', type=str, default='easy', choices=[\"easy\", \"hard\", \"exploration\", \"memory\", \"extreme\"])\n parser.add_argument('--num_levels', type=int, default=500)\n parser.add_argument('--start_level', type=int, default=0)\n parser.add_argument('--num_trials', type=int, default=3)\n parser.add_argument('--gui', action='store_true')\n\n args = parser.parse_args()\n\n comm = MPI.COMM_WORLD\n\n eval_fn(args.load_model,\n log_dir=args.log_dir,\n env_name=args.env_name,\n distribution_mode=args.distribution_mode,\n num_levels=args.num_levels,\n start_level=args.start_level,\n num_trials=args.num_trials,\n comm=comm,\n gui=args.gui,\n args=args\n )\n\nif __name__ == '__main__':\n main()\n"
] | [
[
"tensorflow.ConfigProto",
"tensorflow.Session"
]
] |
thomasjpfan/pyamg | [
"b0904d31c8da0c29affcd7d0fcd2bb8cb910b42a"
] | [
"pyamg/aggregation/aggregation.py"
] | [
"\"\"\"Support for aggregation-based AMG.\"\"\"\nfrom __future__ import absolute_import\n\n\nfrom warnings import warn\nimport numpy as np\nfrom scipy.sparse import csr_matrix, isspmatrix_csr, isspmatrix_bsr,\\\n SparseEfficiencyWarning\n\nfrom pyamg.multilevel import multilevel_solver\nfrom pyamg.relaxation.smoothing import change_smoothers\nfrom pyamg.util.utils import relaxation_as_linear_operator,\\\n eliminate_diag_dom_nodes, blocksize,\\\n levelize_strength_or_aggregation, levelize_smooth_or_improve_candidates\nfrom pyamg.strength import classical_strength_of_connection,\\\n symmetric_strength_of_connection, evolution_strength_of_connection,\\\n energy_based_strength_of_connection, distance_strength_of_connection,\\\n algebraic_distance, affinity_distance\nfrom .aggregate import standard_aggregation, naive_aggregation,\\\n lloyd_aggregation\nfrom .tentative import fit_candidates\nfrom .smooth import jacobi_prolongation_smoother,\\\n richardson_prolongation_smoother, energy_prolongation_smoother\n\n__all__ = ['smoothed_aggregation_solver']\n\n\ndef smoothed_aggregation_solver(A, B=None, BH=None,\n symmetry='hermitian', strength='symmetric',\n aggregate='standard',\n smooth=('jacobi', {'omega': 4.0/3.0}),\n presmoother=('block_gauss_seidel',\n {'sweep': 'symmetric'}),\n postsmoother=('block_gauss_seidel',\n {'sweep': 'symmetric'}),\n improve_candidates=[('block_gauss_seidel',\n {'sweep': 'symmetric',\n 'iterations': 4}),\n None],\n max_levels=10, max_coarse=10,\n diagonal_dominance=False,\n keep=False, **kwargs):\n \"\"\"Create a multilevel solver using classical-style Smoothed Aggregation (SA).\n\n Parameters\n ----------\n A : csr_matrix, bsr_matrix\n Sparse NxN matrix in CSR or BSR format\n\n B : None, array_like\n Right near-nullspace candidates stored in the columns of an NxK array.\n The default value B=None is equivalent to B=ones((N,1))\n\n BH : None, array_like\n Left near-nullspace candidates stored in the columns of an NxK array.\n BH is only used if symmetry='nonsymmetric'.\n The default value B=None is equivalent to BH=B.copy()\n\n symmetry : string\n 'symmetric' refers to both real and complex symmetric\n 'hermitian' refers to both complex Hermitian and real Hermitian\n 'nonsymmetric' i.e. nonsymmetric in a hermitian sense\n Note, in the strictly real case, symmetric and hermitian are the same.\n Note, this flag does not denote definiteness of the operator.\n\n strength : string or list\n Method used to determine the strength of connection between unknowns of\n the linear system. Method-specific parameters may be passed in using a\n tuple, e.g. strength=('symmetric',{'theta' : 0.25 }). If strength=None,\n all nonzero entries of the matrix are considered strong.\n Choose from 'symmetric', 'classical', 'evolution', 'algebraic_distance',\n 'affinity', ('predefined', {'C' : csr_matrix}), None\n\n aggregate : string or list\n Method used to aggregate nodes.\n Choose from 'standard', 'lloyd', 'naive',\n ('predefined', {'AggOp' : csr_matrix})\n\n smooth : list\n Method used to smooth the tentative prolongator. Method-specific\n parameters may be passed in using a tuple, e.g. smooth=\n ('jacobi',{'filter' : True }).\n Choose from 'jacobi', 'richardson', 'energy', None\n\n presmoother : tuple, string, list\n Defines the presmoother for the multilevel cycling. The default block\n Gauss-Seidel option defaults to point-wise Gauss-Seidel, if the matrix\n is CSR or is a BSR matrix with blocksize of 1.\n\n postsmoother : tuple, string, list\n Same as presmoother, except defines the postsmoother.\n\n improve_candidates : tuple, string, list\n The ith entry defines the method used to improve the candidates B on\n level i. If the list is shorter than max_levels, then the last entry\n will define the method for all levels lower. If tuple or string, then\n this single relaxation descriptor defines improve_candidates on all\n levels.\n The list elements are relaxation descriptors of the form used for\n presmoother and postsmoother. A value of None implies no action on B.\n\n max_levels : integer\n Maximum number of levels to be used in the multilevel solver.\n\n max_coarse : integer\n Maximum number of variables permitted on the coarse grid.\n\n diagonal_dominance : bool, tuple\n If True (or the first tuple entry is True), then avoid coarsening\n diagonally dominant rows. The second tuple entry requires a\n dictionary, where the key value 'theta' is used to tune the diagonal\n dominance threshold.\n\n keep : bool\n Flag to indicate keeping extra operators in the hierarchy for\n diagnostics. For example, if True, then strength of connection (C),\n tentative prolongation (T), and aggregation (AggOp) are kept.\n\n Other Parameters\n ----------------\n cycle_type : ['V','W','F']\n Structrure of multigrid cycle\n\n coarse_solver : ['splu', 'lu', 'cholesky, 'pinv', 'gauss_seidel', ... ]\n Solver used at the coarsest level of the MG hierarchy.\n Optionally, may be a tuple (fn, args), where fn is a string such as\n ['splu', 'lu', ...] or a callable function, and args is a dictionary of\n arguments to be passed to fn.\n\n Returns\n -------\n ml : multilevel_solver\n Multigrid hierarchy of matrices and prolongation operators\n\n See Also\n --------\n multilevel_solver, classical.ruge_stuben_solver,\n aggregation.smoothed_aggregation_solver\n\n Notes\n -----\n - This method implements classical-style SA, not root-node style SA\n (see aggregation.rootnode_solver).\n\n - The additional parameters are passed through as arguments to\n multilevel_solver. Refer to pyamg.multilevel_solver for additional\n documentation.\n\n - At each level, four steps are executed in order to define the coarser\n level operator.\n\n 1. Matrix A is given and used to derive a strength matrix, C.\n\n 2. Based on the strength matrix, indices are grouped or aggregated.\n\n 3. The aggregates define coarse nodes and a tentative prolongation\n operator T is defined by injection\n\n 4. The tentative prolongation operator is smoothed by a relaxation\n scheme to improve the quality and extent of interpolation from the\n aggregates to fine nodes.\n\n - The parameters smooth, strength, aggregate, presmoother, postsmoother\n can be varied on a per level basis. For different methods on\n different levels, use a list as input so that the i-th entry defines\n the method at the i-th level. If there are more levels in the\n hierarchy than list entries, the last entry will define the method\n for all levels lower.\n\n Examples are:\n smooth=[('jacobi', {'omega':1.0}), None, 'jacobi']\n presmoother=[('block_gauss_seidel', {'sweep':symmetric}), 'sor']\n aggregate=['standard', 'naive']\n strength=[('symmetric', {'theta':0.25}), ('symmetric', {'theta':0.08})]\n\n - Predefined strength of connection and aggregation schemes can be\n specified. These options are best used together, but aggregation can\n be predefined while strength of connection is not.\n\n For predefined strength of connection, use a list consisting of\n tuples of the form ('predefined', {'C' : C0}), where C0 is a\n csr_matrix and each degree-of-freedom in C0 represents a supernode.\n For instance to predefine a three-level hierarchy, use\n [('predefined', {'C' : C0}), ('predefined', {'C' : C1}) ].\n\n Similarly for predefined aggregation, use a list of tuples. For\n instance to predefine a three-level hierarchy, use [('predefined',\n {'AggOp' : Agg0}), ('predefined', {'AggOp' : Agg1}) ], where the\n dimensions of A, Agg0 and Agg1 are compatible, i.e. Agg0.shape[1] ==\n A.shape[0] and Agg1.shape[1] == Agg0.shape[0]. Each AggOp is a\n csr_matrix.\n\n Examples\n --------\n >>> from pyamg import smoothed_aggregation_solver\n >>> from pyamg.gallery import poisson\n >>> from scipy.sparse.linalg import cg\n >>> import numpy as np\n >>> A = poisson((100,100), format='csr') # matrix\n >>> b = np.ones((A.shape[0])) # RHS\n >>> ml = smoothed_aggregation_solver(A) # AMG solver\n >>> M = ml.aspreconditioner(cycle='V') # preconditioner\n >>> x,info = cg(A, b, tol=1e-8, maxiter=30, M=M) # solve with CG\n\n References\n ----------\n .. [1996VaMaBr] Vanek, P. and Mandel, J. and Brezina, M.,\n \"Algebraic Multigrid by Smoothed Aggregation for\n Second and Fourth Order Elliptic Problems\",\n Computing, vol. 56, no. 3, pp. 179--196, 1996.\n http://citeseer.ist.psu.edu/vanek96algebraic.html\n\n \"\"\"\n if not (isspmatrix_csr(A) or isspmatrix_bsr(A)):\n try:\n A = csr_matrix(A)\n warn(\"Implicit conversion of A to CSR\", SparseEfficiencyWarning)\n except BaseException:\n raise TypeError('Argument A must have type csr_matrix or bsr_matrix, or be convertible to csr_matrix')\n\n A = A.asfptype()\n\n if (symmetry != 'symmetric') and (symmetry != 'hermitian') and\\\n (symmetry != 'nonsymmetric'):\n raise ValueError('expected \\'symmetric\\', \\'nonsymmetric\\' or \\'hermitian\\' for the symmetry parameter ')\n A.symmetry = symmetry\n\n if A.shape[0] != A.shape[1]:\n raise ValueError('expected square matrix')\n\n # Right near nullspace candidates use constant for each variable as default\n if B is None:\n B = np.kron(np.ones((int(A.shape[0]/blocksize(A)), 1), dtype=A.dtype),\n np.eye(blocksize(A), dtype=A.dtype))\n else:\n B = np.asarray(B, dtype=A.dtype)\n if len(B.shape) == 1:\n B = B.reshape(-1, 1)\n if B.shape[0] != A.shape[0]:\n raise ValueError('The near null-space modes B have incorrect dimensions for matrix A')\n if B.shape[1] < blocksize(A):\n warn('Having less target vectors, B.shape[1], than blocksize of A can degrade convergence factors.')\n\n # Left near nullspace candidates\n if A.symmetry == 'nonsymmetric':\n if BH is None:\n BH = B.copy()\n else:\n BH = np.asarray(BH, dtype=A.dtype)\n if len(BH.shape) == 1:\n BH = BH.reshape(-1, 1)\n if BH.shape[1] != B.shape[1]:\n raise ValueError('The number of left and right near null-space modes B and BH, must be equal')\n if BH.shape[0] != A.shape[0]:\n raise ValueError('The near null-space modes BH have incorrect dimensions for matrix A')\n\n # Levelize the user parameters, so that they become lists describing the\n # desired user option on each level.\n max_levels, max_coarse, strength =\\\n levelize_strength_or_aggregation(strength, max_levels, max_coarse)\n max_levels, max_coarse, aggregate =\\\n levelize_strength_or_aggregation(aggregate, max_levels, max_coarse)\n improve_candidates =\\\n levelize_smooth_or_improve_candidates(improve_candidates, max_levels)\n smooth = levelize_smooth_or_improve_candidates(smooth, max_levels)\n\n # Construct multilevel structure\n levels = []\n levels.append(multilevel_solver.level())\n levels[-1].A = A # matrix\n\n # Append near nullspace candidates\n levels[-1].B = B # right candidates\n if A.symmetry == 'nonsymmetric':\n levels[-1].BH = BH # left candidates\n\n while len(levels) < max_levels and\\\n int(levels[-1].A.shape[0]/blocksize(levels[-1].A)) > max_coarse:\n extend_hierarchy(levels, strength, aggregate, smooth,\n improve_candidates, diagonal_dominance, keep)\n\n ml = multilevel_solver(levels, **kwargs)\n change_smoothers(ml, presmoother, postsmoother)\n return ml\n\n\ndef extend_hierarchy(levels, strength, aggregate, smooth, improve_candidates,\n diagonal_dominance=False, keep=True):\n \"\"\"Extend the multigrid hierarchy.\n\n Service routine to implement the strength of connection, aggregation,\n tentative prolongation construction, and prolongation smoothing. Called by\n smoothed_aggregation_solver.\n\n \"\"\"\n def unpack_arg(v):\n if isinstance(v, tuple):\n return v[0], v[1]\n else:\n return v, {}\n\n A = levels[-1].A\n B = levels[-1].B\n if A.symmetry == \"nonsymmetric\":\n AH = A.H.asformat(A.format)\n BH = levels[-1].BH\n\n # Compute the strength-of-connection matrix C, where larger\n # C[i,j] denote stronger couplings between i and j.\n fn, kwargs = unpack_arg(strength[len(levels)-1])\n if fn == 'symmetric':\n C = symmetric_strength_of_connection(A, **kwargs)\n elif fn == 'classical':\n C = classical_strength_of_connection(A, **kwargs)\n elif fn == 'distance':\n C = distance_strength_of_connection(A, **kwargs)\n elif (fn == 'ode') or (fn == 'evolution'):\n if 'B' in kwargs:\n C = evolution_strength_of_connection(A, **kwargs)\n else:\n C = evolution_strength_of_connection(A, B, **kwargs)\n elif fn == 'energy_based':\n C = energy_based_strength_of_connection(A, **kwargs)\n elif fn == 'predefined':\n C = kwargs['C'].tocsr()\n elif fn == 'algebraic_distance':\n C = algebraic_distance(A, **kwargs)\n elif fn == 'affinity':\n C = affinity_distance(A, **kwargs)\n elif fn is None:\n C = A.tocsr()\n else:\n raise ValueError('unrecognized strength of connection method: %s' %\n str(fn))\n\n # Avoid coarsening diagonally dominant rows\n flag, kwargs = unpack_arg(diagonal_dominance)\n if flag:\n C = eliminate_diag_dom_nodes(A, C, **kwargs)\n\n # Compute the aggregation matrix AggOp (i.e., the nodal coarsening of A).\n # AggOp is a boolean matrix, where the sparsity pattern for the k-th column\n # denotes the fine-grid nodes agglomerated into k-th coarse-grid node.\n fn, kwargs = unpack_arg(aggregate[len(levels)-1])\n if fn == 'standard':\n AggOp = standard_aggregation(C, **kwargs)[0]\n elif fn == 'naive':\n AggOp = naive_aggregation(C, **kwargs)[0]\n elif fn == 'lloyd':\n AggOp = lloyd_aggregation(C, **kwargs)[0]\n elif fn == 'predefined':\n AggOp = kwargs['AggOp'].tocsr()\n else:\n raise ValueError('unrecognized aggregation method %s' % str(fn))\n\n # Improve near nullspace candidates by relaxing on A B = 0\n fn, kwargs = unpack_arg(improve_candidates[len(levels)-1])\n if fn is not None:\n b = np.zeros((A.shape[0], 1), dtype=A.dtype)\n B = relaxation_as_linear_operator((fn, kwargs), A, b) * B\n levels[-1].B = B\n if A.symmetry == \"nonsymmetric\":\n BH = relaxation_as_linear_operator((fn, kwargs), AH, b) * BH\n levels[-1].BH = BH\n\n # Compute the tentative prolongator, T, which is a tentative interpolation\n # matrix from the coarse-grid to the fine-grid. T exactly interpolates\n # B_fine = T B_coarse.\n T, B = fit_candidates(AggOp, B)\n if A.symmetry == \"nonsymmetric\":\n TH, BH = fit_candidates(AggOp, BH)\n\n # Smooth the tentative prolongator, so that it's accuracy is greatly\n # improved for algebraically smooth error.\n fn, kwargs = unpack_arg(smooth[len(levels)-1])\n if fn == 'jacobi':\n P = jacobi_prolongation_smoother(A, T, C, B, **kwargs)\n elif fn == 'richardson':\n P = richardson_prolongation_smoother(A, T, **kwargs)\n elif fn == 'energy':\n P = energy_prolongation_smoother(A, T, C, B, None, (False, {}),\n **kwargs)\n elif fn is None:\n P = T\n else:\n raise ValueError('unrecognized prolongation smoother method %s' %\n str(fn))\n\n # Compute the restriction matrix, R, which interpolates from the fine-grid\n # to the coarse-grid. If A is nonsymmetric, then R must be constructed\n # based on A.H. Otherwise R = P.H or P.T.\n symmetry = A.symmetry\n if symmetry == 'hermitian':\n R = P.H\n elif symmetry == 'symmetric':\n R = P.T\n elif symmetry == 'nonsymmetric':\n fn, kwargs = unpack_arg(smooth[len(levels)-1])\n if fn == 'jacobi':\n R = jacobi_prolongation_smoother(AH, TH, C, BH, **kwargs).H\n elif fn == 'richardson':\n R = richardson_prolongation_smoother(AH, TH, **kwargs).H\n elif fn == 'energy':\n R = energy_prolongation_smoother(AH, TH, C, BH, None, (False, {}),\n **kwargs)\n R = R.H\n elif fn is None:\n R = T.H\n else:\n raise ValueError('unrecognized prolongation smoother method %s' %\n str(fn))\n\n if keep:\n levels[-1].C = C # strength of connection matrix\n levels[-1].AggOp = AggOp # aggregation operator\n levels[-1].T = T # tentative prolongator\n\n levels[-1].P = P # smoothed prolongator\n levels[-1].R = R # restriction operator\n\n levels.append(multilevel_solver.level())\n A = R * A * P # Galerkin operator\n A.symmetry = symmetry\n levels[-1].A = A\n levels[-1].B = B # right near nullspace candidates\n\n if A.symmetry == \"nonsymmetric\":\n levels[-1].BH = BH # left near nullspace candidates\n"
] | [
[
"scipy.sparse.isspmatrix_bsr",
"numpy.zeros",
"scipy.sparse.csr_matrix",
"numpy.asarray",
"scipy.sparse.isspmatrix_csr"
]
] |
MuellerSeb/jams_python | [
"1bca04557da79d8f8a4c447f5ccc517c40ab7dfc"
] | [
"jams/errormeasures.py"
] | [
"#!/usr/bin/env python\nfrom __future__ import division, absolute_import, print_function\nimport numpy as np\nfrom scipy.stats import t\n\n\"\"\"\n Defines common error measures.\n\n \n Definition\n ----------\n def bias(y_obs,y_mod): bias\n def mae(y_obs,y_mod): mean absolute error\n def mse(y_obs,y_mod): mean squared error\n def rmse(y_obs,y_mod): root mean squared error\n def nse(y_obs,y_mod): Nash-Sutcliffe-Efficiency\n def kge(y_obs,y_mod): Kling-Gupta-Efficiency\n def pear2(y_obs,y_mod): Squared Pearson correlation coefficient\n def confint(y_obs, p=0.95): Confidence interval of samples\n \n\n Input\n -----\n y_obs np.array(N) or np.ma.array(N)\n y_mod np.array(N) or np.ma.array(N)\n\n\n Output\n ------\n measure float: error measure of respective error function\n (see definitions for details)\n\n Restrictions\n ------------\n Deals with masked and unmasked arrays. When nan is found in an unmasked\n array, it will be masked. All measures are applied only on values where\n both, y_obs and y_mod, have valid entries (not masked and not nan)\n \n \n Examples\n --------\n -> see respective function\n\n\n License\n -------\n This file is part of the JAMS Python package, distributed under the MIT\n License. The JAMS Python package originates from the former UFZ Python library,\n Department of Computational Hydrosystems, Helmholtz Centre for Environmental\n Research - UFZ, Leipzig, Germany.\n\n Copyright (c) 2014-2017 Arndt Piayda, Stephan Thober, Matthias Cuntz - mc (at) macu (dot) de\n\n Permission is hereby granted, free of charge, to any person obtaining a copy\n of this software and associated documentation files (the \"Software\"), to deal\n in the Software without restriction, including without limitation the rights\n to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n copies of the Software, and to permit persons to whom the Software is\n furnished to do so, subject to the following conditions:\n\n The above copyright notice and this permission notice shall be included in all\n copies or substantial portions of the Software.\n\n THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n SOFTWARE.\n\n\n History\n -------\n Written, AP, Jul 2014\n Modified MC, Dec 2014 - use simple formulas that work with normal and masked arrays but do not deal with NaN\n Modified AP, Sep 2015 - add confidence interval\n Modified ST, Nov 2015 - added KGE\n Modified ST, Jan 2017 - added components for KGE\n\"\"\"\n\ndef bias(y_obs,y_mod):\n \"\"\"\n calculates bias = mean(y_obs) - mean(y_mod)\n \n Examples\n --------\n >>> # Create some data\n >>> y_obs = np.array([12.7867, 13.465, 14.1433, 15.3733, 16.6033])\n >>> y_mod = np.array([12.8087, 13.151, 14.3741, 16.2302, 17.9433])\n >>> # calculate bias\n >>> print(np.round(bias(y_obs, y_mod),2))\n -0.43\n\n \"\"\"\n # # check\n # if (y_obs.ndim!=1) or (y_mod.ndim!=1):\n # raise ValueError('bias: input must be 1D')\n # elif y_obs.size!=y_mod.size:\n # raise ValueError('bias: input must be of same size')\n # # calc\n # else:\n # # check if masked or not\n # try:\n # temp = y_obs.mask\n # temp = y_mod.mask\n # except AttributeError:\n # y_obs=np.ma.array(y_obs, mask=np.isnan(y_obs))\n # y_mod=np.ma.array(y_mod, mask=np.isnan(y_mod))\n # y_modr = np.ma.array(y_mod, mask=y_mod.mask | y_obs.mask) \n # y_obsr = np.ma.array(y_obs, mask=y_mod.mask | y_obs.mask)\n # return np.ma.mean(y_obsr) - np.ma.mean(y_modr)\n return y_obs.mean() - y_mod.mean()\n\ndef mae(y_obs,y_mod):\n \"\"\"\n calculates mean absolute error = mean(abs(y_obs - y_mod))\n \n Examples\n --------\n >>> # Create some data\n >>> y_obs = np.array([12.7867, 13.465, 14.1433, 15.3733, 16.6033])\n >>> y_mod = np.array([12.8087, 13.151, 14.3741, 16.2302, 17.9433])\n >>> # calculate mean absolute error\n >>> print(np.round(mae(y_obs, y_mod),2))\n 0.55\n\n \"\"\"\n # check\n if (y_obs.ndim!=1) or (y_mod.ndim!=1):\n raise ValueError('mae: input must be 1D')\n elif y_obs.size!=y_mod.size:\n raise ValueError('mae: input must be of same size')\n # calc\n else:\n # check if masked or not\n try:\n temp = y_obs.mask\n temp = y_mod.mask\n except AttributeError:\n y_obs=np.ma.array(y_obs, mask=np.isnan(y_obs))\n y_mod=np.ma.array(y_mod, mask=np.isnan(y_mod))\n y_modr = np.ma.array(y_mod, mask=y_mod.mask | y_obs.mask) \n y_obsr = np.ma.array(y_obs, mask=y_mod.mask | y_obs.mask)\n return np.ma.mean(np.ma.abs(y_obsr-y_modr))\n\ndef mse(y_obs,y_mod):\n \"\"\"\n calculates mean squared error = mean((y_obs - y_mod)**2)\n \n Examples\n --------\n >>> # Create some data\n >>> y_obs = np.array([12.7867, 13.465, 14.1433, 15.3733, 16.6033])\n >>> y_mod = np.array([12.8087, 13.151, 14.3741, 16.2302, 17.9433])\n >>> # calculate mean squared error\n >>> print(np.round(mse(y_obs, y_mod),2))\n 0.54\n\n \"\"\"\n # # check\n # if (y_obs.ndim!=1) or (y_mod.ndim!=1):\n # raise ValueError('mse: input must be 1D')\n # elif y_obs.size!=y_mod.size:\n # raise ValueError('mse: input must be of same size')\n # # calc\n # else:\n # # check if masked or not\n # try:\n # temp = y_obs.mask\n # temp = y_mod.mask\n # except AttributeError:\n # y_obs=np.ma.array(y_obs, mask=np.isnan(y_obs))\n # y_mod=np.ma.array(y_mod, mask=np.isnan(y_mod))\n # y_modr = np.ma.array(y_mod, mask=y_mod.mask | y_obs.mask) \n # y_obsr = np.ma.array(y_obs, mask=y_mod.mask | y_obs.mask)\n # return np.ma.mean((y_obsr-y_modr)**2)\n return ((y_obs-y_mod)**2).mean()\n\ndef rmse(y_obs,y_mod):\n \"\"\"\n calculates root mean squared error = sqrt(mean((y_obs - y_mod)**2))\n \n Examples\n --------\n >>> # Create some data\n >>> y_obs = np.array([12.7867, 13.465, 14.1433, 15.3733, 16.6033])\n >>> y_mod = np.array([12.8087, 13.151, 14.3741, 16.2302, 17.9433])\n >>> # calculate root mean squared error\n >>> print(np.round(rmse(y_obs, y_mod),2))\n 0.73\n\n \"\"\"\n # # check\n # if (y_obs.ndim!=1) or (y_mod.ndim!=1):\n # raise ValueError('rmse: input must be 1D')\n # elif y_obs.size!=y_mod.size:\n # raise ValueError('rmse: input must be of same size')\n # # calc\n # else:\n # # check if masked or not\n # try:\n # temp = y_obs.mask\n # temp = y_mod.mask\n # except AttributeError:\n # y_obs=np.ma.array(y_obs, mask=np.isnan(y_obs))\n # y_mod=np.ma.array(y_mod, mask=np.isnan(y_mod))\n # y_modr = np.ma.array(y_mod, mask=y_mod.mask | y_obs.mask)\n # y_obsr = np.ma.array(y_obs, mask=y_mod.mask | y_obs.mask)\n # return np.ma.sqrt(np.ma.mean((y_obsr-y_modr)**2))\n return ((y_obs-y_mod)**2).mean()**0.5\n\ndef nse(y_obs,y_mod):\n \"\"\"\n calculates Nash-Sutcliffe-Efficiency = 1 - (sum((y_obs - y_mod)**2) / sum((y_obs - mean(y_obs))**2))\n \n Examples\n --------\n >>> # Create some data\n >>> y_obs = np.array([12.7867, 13.465, 14.1433, 15.3733, 16.6033])\n >>> y_mod = np.array([12.8087, 13.151, 14.3741, 16.2302, 17.9433])\n >>> # calculate Nash-Sutcliffe-Efficiency\n >>> print(np.round(nse(y_obs, y_mod),2))\n 0.71\n\n \"\"\"\n # # check\n # if (y_obs.ndim!=1) or (y_mod.ndim!=1):\n # raise ValueError('r2: input must be 1D')\n # elif y_obs.size!=y_mod.size:\n # raise ValueError('r2: input must be of same size')\n # # calc\n # else:\n # # check if masked or not\n # try:\n # temp = y_obs.mask\n # temp = y_mod.mask\n # except AttributeError:\n # y_obs=np.ma.array(y_obs, mask=np.isnan(y_obs))\n # y_mod=np.ma.array(y_mod, mask=np.isnan(y_mod))\n # y_modr = np.ma.array(y_mod, mask=y_mod.mask | y_obs.mask) \n # y_obsr = np.ma.array(y_obs, mask=y_mod.mask | y_obs.mask)\n # a = np.ma.sum((y_obsr - y_modr)**2)\n # b = np.ma.sum((y_obsr - np.ma.mean(y_obsr))**2)\n # return 1. - (a / b)\n return 1. - ((y_obs-y_mod)**2).sum()/((y_obs-y_obs.mean())**2).sum()\n\n\ndef kge(y_obs,y_mod,components=False):\n \"\"\"\n calculates Kling-Gupta-Efficiency = 1 - sqrt((1-r)**2 + (1-a)**2 + (1-b)**2),\n where r is the Pearson correlation of y_obs and y_mod,\n a is mean(y_mod) / mean(y_obs), and\n b is std(y_mod) / std(y_obs)\n if components is True, then r, a, and b are (in this order) additionally returned to the KGE\n \n Examples\n --------\n >>> # Create some data\n >>> y_obs = np.array([12.7867, 13.465, 14.1433, 15.3733, 16.6033])\n >>> y_mod = np.array([12.8087, 13.151, 14.3741, 16.2302, 17.9433])\n >>> # calculate Kling-Gupta-Efficiency\n >>> print(np.round(kge(y_obs, y_mod),2))\n 0.58\n\n \"\"\"\n # # check\n # if (y_obs.ndim!=1) or (y_mod.ndim!=1):\n # raise ValueError('r2: input must be 1D')\n # elif y_obs.size!=y_mod.size:\n # raise ValueError('r2: input must be of same size')\n # # calc\n # else:\n # # check if masked or not\n # try:\n # temp = y_obs.mask\n # temp = y_mod.mask\n # except AttributeError:\n # y_obs=np.ma.array(y_obs, mask=np.isnan(y_obs))\n # y_mod=np.ma.array(y_mod, mask=np.isnan(y_mod))\n # y_modr = np.ma.array(y_mod, mask=y_mod.mask | y_obs.mask) \n # y_obsr = np.ma.array(y_obs, mask=y_mod.mask | y_obs.mask)\n # r = np.ma.corrcoef(y_obsr, y_modr)[0, 1]\n # a = np.ma.mean(y_modr) / np.ma.mean(y_obsr)\n # b = np.ma.std(y_modr) / np.ma.std(y_obsr)\n # return 1. - np.sqrt((1 - r)**2 + (1 - a)**2 + (1 - b)**2)\n r = np.corrcoef(y_obs, y_mod)[0, 1]\n alpha = np.std(y_mod) / np.std(y_obs)\n beta = np.mean(y_mod) / np.mean(y_obs)\n if components:\n return 1. - np.sqrt((1 - r)**2 + (1 - beta)**2 + (1 - alpha)**2), r, alpha, beta\n else:\n return 1. - np.sqrt((1 - r)**2 + (1 - beta)**2 + (1 - alpha)**2)\n\n\ndef pear2(y_obs,y_mod):\n \"\"\"\n calculates squared Pearson correlation coeffcient\n \n Examples\n --------\n >>> # Create some data\n >>> y_obs = np.array([12.7867, 13.465, 14.1433, 15.3733, 16.6033])\n >>> y_mod = np.array([12.8087, 13.151, 14.3741, 16.2302, 17.9433])\n >>> # calculate Squared Pearson correlation coefficient\n >>> print(np.round(pear2(y_obs, y_mod),2))\n 0.99\n\n \"\"\"\n # # check\n # if (y_obs.ndim!=1) or (y_mod.ndim!=1):\n # raise ValueError('pear2: input must be 1D')\n # elif y_obs.size!=y_mod.size:\n # raise ValueError('pear2: input must be of same size')\n # # calc\n # else:\n # # check if masked or not\n # try:\n # temp = y_obs.mask\n # temp = y_mod.mask\n # except AttributeError:\n # y_obs=np.ma.array(y_obs, mask=np.isnan(y_obs))\n # y_mod=np.ma.array(y_mod, mask=np.isnan(y_mod))\n # y_modr = np.ma.array(y_mod, mask=y_mod.mask | y_obs.mask) \n # y_obsr = np.ma.array(y_obs, mask=y_mod.mask | y_obs.mask) \n # return np.corrcoef(y_obsr.compressed(), y_modr.compressed())[0,1]**2\n return ((y_obs-y_obs.mean())*(y_mod-y_mod.mean())).mean()/y_obs.std()/y_mod.std()\n \ndef confint(y_obs, p=0.95):\n \"\"\"\n calculates confidence interval of the mean of the sample applying a \n student-t-distribution to a given probability p (default p=0.95)\n \n Examples\n --------\n >>> # Create some data\n >>> y_obs = np.array([12.7867, 13.465, 14.1433, 15.3733, 16.6033])\n >>> # calculate confident interval\n >>> print(np.round(confint(y_obs)))\n [13. 16.]\n\n \"\"\"\n s = y_obs.size\n return np.array(t.interval(p, s-1., loc=y_obs.mean(), scale=y_obs.std()/np.sqrt(s)))\n\nif __name__ == '__main__':\n import doctest\n doctest.testmod()\n"
] | [
[
"numpy.ma.array",
"numpy.isnan",
"numpy.sqrt",
"numpy.std",
"numpy.ma.abs",
"numpy.mean",
"numpy.corrcoef"
]
] |
Davide-DD/distributed-machine-learning-architectures | [
"998d86368c4122ad9937b505405191b316afb060",
"998d86368c4122ad9937b505405191b316afb060"
] | [
"architectures/gossip-learning/nodes/fog-node/code/classes/aged_model.py",
"analyzer/federated_visualize.py"
] | [
"from keras import backend as K\nfrom keras.models import *\nfrom keras.layers import *\nimport os\nfrom datetime import datetime\nimport tensorflow as tf\nimport numpy as np\n\n\nclass AgedModel:\n\n\n\tdef __init__(self, model=None, age=None):\t\t\n\t\tself.graph = tf.Graph()\n\n\t\twith self.graph.as_default():\n\n\t\t\tself.session = tf.Session()\n\n\t\t\twith self.session.as_default():\n\n\t\t\t\tif model == None:\n\t\t\t\t\tn_sensors, t_periods = 4, 60\n\n\t\t\t\t\t# L'oggetto Sequential crea una pila lineare di livelli\n\t\t\t\t\tmodel = Sequential()\n\n\t\t\t\t\t# Come primo livello, aggiunge un livello di convoluzione a 1 dimensione con i seguenti argomenti: \n\t\t\t\t\t# 1. Filters: specifica il numero di filtri che vogliamo applicare (= larghezza dell'output)\n\t\t\t\t\t# 2. Kernel_size: specifica quanti dati vengono convoluti contemporaneamente (se si sottrae alla lunghezza dell'input e si aggiunge 1 si ha la lunghezza dell'output)\n\t\t\t\t\t# 3. activation: funzione di attivazione dei neuroni\n\t\t\t\t\t# 4. input_shape: definisce la \"forma\" dell'input\n\t\t\t\t\tmodel.add(Conv1D(100, 6, activation='relu', input_shape=(t_periods, n_sensors)))\n\n\t\t\t\t\t# Altro livello come sopra\n\t\t\t\t\tmodel.add(Conv1D(100, 6, activation='relu'))\n\n\t\t\t\t\t# Livello di pooling per convoluzioni 1D: prende 3 input alla volta e li sostituisce con il valore massimo che trova per evitare l'overfitting\n\t\t\t\t\tmodel.add(MaxPooling1D(3))\n\n\t\t\t\t\t# Altro livello di convoluzione 1D\n\t\t\t\t\tmodel.add(Conv1D(160, 6, activation='relu'))\n\n\t\t\t\t\t# Ultimo livello di convoluzione 1D\n\t\t\t\t\tmodel.add(Conv1D(160, 6, activation='relu'))\n\n\t\t\t\t\t# Livello di pooling che computa il valore medio per ogni riga\n\t\t\t\t\tmodel.add(GlobalAveragePooling1D())\n\n\t\t\t\t\t# Non proprio un livello: serve a settare a 0 la metà (0.5) dei valori in input per ridurre l'overfitting\n\t\t\t\t\tmodel.add(Dropout(0.5))\n\n\t\t\t\t\t# Ultimo livello composto da 3 nodi con attivazione softmax, che:\n\t\t\t\t\t# Assegna a ogni valore in uscita dai nodi sopra un valore compreso tra 0 e 1; la somma di questi valori fa 1\n\t\t\t\t\tmodel.add(Dense(3, activation='softmax'))\n\n\t\t\t\t\t# Specifica come si esegue il processo di apprendimento dai dati, utilizzando:\n\t\t\t\t\t# 1. loss: funzione che si cerca di minimizzare\n\t\t\t\t\t# 2. optimizer: funzione che si utilizza per cambiare i pesi (adam è un miglioramento di SGD)\n\t\t\t\t\t# 3. metrics: lista di metriche che vuoi tenere sott'occhio durante l'apprendimento\n\t\t\t\t\tmodel.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])\n\n\t\t\t\t\tself.model = model\n\n\t\t\t\telse:\n\t\t\t\t\tself.model = load_model(model)\n\n\t\tif age != None:\n\t\t\tself.age = age\n\t\telse:\n\t\t\tself.age = datetime.timestamp(datetime.now())\n\n\n\tdef train(self,data):\n\t\twith self.graph.as_default():\n\t\t\twith self.session.as_default():\n\t\t\t\tx_train, y_train = data\n\t\t\t\t# Addestra il modello, restituendo infine un oggetto History con vari parametri che permettono di vedere come si sono evolute le performance\n\t\t\t\t# 1. numpy array o lista di numpy array (secondo la dimensionalità attesa)\n\t\t\t\t# 2. come sopra\n\t\t\t\t# 3. numero di sample da utilizzare prima di aggiornare i pesi\n\t\t\t\t# 4. numero di iterazioni da fare sui dati in input\n\t\t\t\t# 5. frazione dei dati di apprendimento da utilizzare come validazione\n\t\t\t\tself.model.fit(x_train, y_train, batch_size=3, epochs=5, verbose=1)\n\n\n\tdef test(self, data):\n\t\twith self.graph.as_default():\n\t\t\twith self.session.as_default():\n\t\t\t\tx_test, y_test = data\n\n\t\t\t\treturn self.model.evaluate(x_test, y_test, verbose=1)\n\t\t\n\n\tdef predict(self,data):\n\t\twith self.graph.as_default():\n\t\t\twith self.session.as_default():\n\t\t\t\treturn self.model.predict(data)\n\n\n\tdef get_weights(self):\n\t\twith self.graph.as_default():\n\t\t\twith self.session.as_default():\t\n\t\t\t\treturn self.model.get_weights()\n\n\n\tdef set_weights(self, weights):\t\n\t\twith self.graph.as_default():\n\t\t\twith self.session.as_default():\t\n\t\t\t\treturn self.model.set_weights(weights)\n\n\n\tdef export(self):\n\t\twith self.graph.as_default():\n\t\t\twith self.session.as_default():\t\n\t\t\t\tfile_name = 'my_model' + str(datetime.timestamp(datetime.now())) + '.h5'\n\t\t\t\tfile_path = os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), file_name)\n\t\t\t\tfile = open(file_path, 'wb+')\n\t\t\t\tself.model.save(file_path)\n\t\t\t\tfile.close()\n\t\t\t\treturn open(file_path, 'rb'), file_path",
"import pandas as pd\nimport numpy as np\nimport os\nimport matplotlib.pyplot as plt\nimport sys\n\n\nplt.rcParams['figure.figsize'] = (19.2, 8.7)\nindex = 0\n\ndef show_accuracy(df):\n\tglobal index\n\t\n\tdf.plot(kind='line')\n\tplt.xlabel('iterations')\n\tplt.ylabel('accuracy')\n\tplt.savefig(str(index) + '.png', bbox_inches='tight')\n\n\tprint('Minima: ' + str(df.min(axis=1).min() * 100) + ' %', file=report)\n\tprint('Primo indice della minima: ' + str(df.min(axis=1).idxmin()), file=report)\n\tprint('Massima: ' + str(df.max(axis=1).max() * 100) + ' %', file=report)\n\tprint('Primo indice della massima: ' + str(df.max(axis=1).idxmax()) + '\\n', file=report)\n\tprint('Finale: ' + str(df[0].iloc[-1]), file=report)\n\n\tindex += 1\n\ndef calculate_node_info(logs):\n\taccuracy_df = pd.read_csv(logs['log_tests.txt'], sep='\\t', header=None)\n\tshow_accuracy(accuracy_df)\n\narch_path = sys.argv[1]\nlogs = {}\nfor nt in os.listdir(arch_path):\n\tif os.path.isdir(os.path.join(arch_path, nt)):\n\t\tnt_path = os.path.join(arch_path, nt)\n\t\tfor ni in os.listdir(nt_path):\n\t\t\tif os.path.isdir(os.path.join(nt_path, ni)):\n\t\t\t\tni_path = os.path.join(os.path.join(nt_path, ni), 'code')\n\t\t\t\tresult = {}\n\t\t\t\tfor log in os.listdir(ni_path):\n\t\t\t\t\tlog_path = os.path.join(ni_path, log)\n\t\t\t\t\tif os.path.isfile(log_path) and 'log' in log:\n\t\t\t\t\t\tresult[log] = log_path\n\t\t\t\tlogs[ni] = result\n\nreport = open('report.txt', 'w')\nprint('\\n------------------- ACCURATEZZA -------------------', file=report)\ncalculate_node_info(logs['edge-0'])\nprint('')"
] | [
[
"tensorflow.Graph",
"tensorflow.Session"
],
[
"pandas.read_csv",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel"
]
] |
mwisnie5/pybaseball | [
"0a2a84d757e478aa79619100872ef48cf7da52c5"
] | [
"pybaseball/teamid_lookup.py"
] | [
"import logging\nimport os\nfrom datetime import date\nfrom typing import Optional\n\nimport pandas as pd\n\nfrom . import lahman\nfrom .datasources import fangraphs\n\n_DATA_FILENAME = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'data', 'fangraphs_teams.csv')\n\n\ndef team_ids(season: Optional[int] = None, league: str = 'ALL') -> pd.DataFrame:\n if not os.path.exists(_DATA_FILENAME):\n _generate_teams()\n\n fg_team_data = pd.read_csv(_DATA_FILENAME, index_col=0)\n\n if season is not None:\n fg_team_data = fg_team_data.query(f\"yearID == {season}\")\n\n if league is not None and league.upper() != \"ALL\":\n fg_team_data = fg_team_data.query(f\"lgID == '{league.upper()}'\")\n\n return fg_team_data\n\n\n_known_cities = ['Altoona', 'Anaheim', 'Arizona', 'Atlanta', 'Baltimore', 'Boston', 'Brooklyn', 'Buffalo',\n 'California', 'Chicago', 'Cincinnati', 'Cleveland', 'Colorado', 'Detroit', 'Elizabeth', 'Florida',\n 'Fort Wayne', 'Hartford', 'Houston', 'Indianapolis', 'Kansas City', 'Los Angeles', 'Milwaukee',\n 'Minnesota', 'Montreal', 'New York', 'Newark', 'Oakland', 'Philadelphia', 'Pittsburg',\n 'Pittsburgh', 'Richmond', 'San Diego', 'San Francisco', 'Seattle', 'St. Louis', 'St. Paul',\n 'Syracuse', 'Tampa Bay', 'Texas', 'Toronto', 'Troy', 'Washington', 'Washington', 'Wilmington']\n\n_manual_matches = {'CPI': 'Browns/Stogies', 'ANA': 'Angels'}\n\n\ndef _estimate_name(team_row: pd.DataFrame, column: str) -> str:\n if team_row['franchID'] in _manual_matches:\n return _manual_matches[team_row['franchID']]\n estimate = str(team_row[column])\n for city in _known_cities + [str(team_row['city'])]:\n estimate = estimate.replace(f'{city} ', '') if estimate.startswith(city) else estimate\n\n return estimate\n\n\ndef _generate_teams() -> pd.DataFrame:\n \"\"\"\n Creates a datafile with a map of Fangraphs team IDs to lahman data to be used by fangraphss_teams\n\n Should only need to be run when a team is added, removed, or moves to a new city.\n \"\"\"\n\n start_season = 1871\n end_season = date.today().year\n\n # Only getting AB to make payload small, and you have to specify at least one column\n team_data = fangraphs.fg_team_batting_data(start_season, end_season, \"ALL\", stat_columns=['AB'])\n\n # Join the lahman data\n teams_franchises = lahman.teams().merge(lahman.teams_franchises(), how='left', on='franchID', suffixes=['', '.fr'])\n teams_franchises = teams_franchises.merge(lahman.parks(), how='left', left_on='park', right_on='park.name',\n suffixes=['', '.p'])\n\n # Drop lahman data down to just what we need\n teams_franchises = teams_franchises[\n ['yearID', 'lgID', 'teamID', 'franchID', 'divID', 'name', 'park', 'teamIDBR', 'teamIDlahman45', 'teamIDretro',\n 'franchName', 'city', 'state']\n ]\n\n # Try to guess the name Fangraphs would use\n teams_franchises['possibleName'] = teams_franchises.apply(lambda row: _estimate_name(row, 'name'), axis=1)\n teams_franchises['possibleFranchName'] = teams_franchises.apply(lambda row: _estimate_name(row, 'franchName'),\n axis=1)\n\n # Join up the data by team name, and look for what is still without a match\n outer_joined = teams_franchises.merge(team_data, how='outer', left_on=['yearID', 'possibleName'],\n right_on=['Season', 'Team'])\n unjoined_teams_franchises = outer_joined.query('Season.isnull()').drop(team_data.columns, axis=1)\n unjoined_team_data = outer_joined.query('yearID.isnull()').drop(teams_franchises.columns, axis=1)\n\n # Take all the unmatched data and try to join off franchise name, instead of team name\n inner_joined = teams_franchises.merge(team_data, how='inner', left_on=['yearID', 'possibleName'],\n right_on=['Season', 'Team'])\n franch_inner_joined = unjoined_teams_franchises.merge(unjoined_team_data, how='inner',\n left_on=['yearID', 'possibleFranchName'],\n right_on=['Season', 'Team'])\n\n # Clean up the data\n joined = pd.concat([inner_joined, franch_inner_joined])\n\n outer_joined = joined.merge(team_data, how='outer', left_on=['yearID', 'teamIDfg'],\n right_on=['Season', 'teamIDfg'], suffixes=['', '_y'])\n\n unjoined_teams_franchises = outer_joined.query('Season_y.isnull()').drop(team_data.columns, axis=1,\n errors='ignore')\n\n if not unjoined_teams_franchises.empty:\n logging.warning('When trying to join FG data to lahman, found the following extraneous lahman data',\n extra=unjoined_teams_franchises)\n\n unjoined_team_data = outer_joined.query('yearID.isnull()').drop(teams_franchises.columns, axis=1, errors='ignore')\n\n if not unjoined_team_data.empty:\n logging.warning('When trying to join Fangraphs data to lahman, found the following extraneous Fangraphs data',\n extra=unjoined_team_data)\n\n joined = joined[['yearID', 'lgID', 'teamID', 'franchID', 'teamIDfg', 'teamIDBR', 'teamIDretro']]\n\n joined = joined.assign(teamIDfg=joined['teamIDfg'].apply(int))\n joined = joined.assign(yearID=joined['yearID'].apply(int))\n\n joined = joined.sort_values(['yearID', 'lgID', 'teamID', 'franchID']).drop_duplicates()\n joined = joined.reset_index(drop=True)\n\n joined.to_csv(_DATA_FILENAME)\n\n return joined\n\n# For backwards API compatibility\nfangraphs_teams = team_ids\n"
] | [
[
"pandas.read_csv",
"pandas.concat"
]
] |
micsthepick/boxed-bottles | [
"424cc0aec3e5d6897a38fc0507d9c609c9f78a1e"
] | [
"object_detection/utils/variables_helper.py"
] | [
"# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"Helper functions for manipulating collections of variables during training.\n\"\"\"\nfrom tensorflow import logging as logging\nimport re\n\nimport tensorflow as tf\n\nfrom tensorflow.python.ops import variables as tf_variables\n\nslim = tf.contrib.slim\n\n\n# TODO(derekjchow): Consider replacing with tf.contrib.filter_variables in\n# tensorflow/contrib/framework/python/ops/variables.py\ndef filter_variables(variables, filter_regex_list, invert=False):\n \"\"\"Filters out the variables matching the filter_regex.\n\n Filter out the variables whose name matches the any of the regular\n expressions in filter_regex_list and returns the remaining variables.\n Optionally, if invert=True, the complement set is returned.\n\n Args:\n variables: a list of tensorflow variables.\n filter_regex_list: a list of string regular expressions.\n invert: (boolean). If True, returns the complement of the filter set; that\n is, all variables matching filter_regex are kept and all others discarded.\n\n Returns:\n a list of filtered variables.\n \"\"\"\n kept_vars = []\n variables_to_ignore_patterns = list(filter(None, filter_regex_list))\n for var in variables:\n add = True\n for pattern in variables_to_ignore_patterns:\n if re.match(pattern, var.op.name):\n add = False\n break\n if add != invert:\n kept_vars.append(var)\n return kept_vars\n\n\ndef multiply_gradients_matching_regex(grads_and_vars, regex_list, multiplier):\n \"\"\"Multiply gradients whose variable names match a regular expression.\n\n Args:\n grads_and_vars: A list of gradient to variable pairs (tuples).\n regex_list: A list of string regular expressions.\n multiplier: A (float) multiplier to apply to each gradient matching the\n regular expression.\n\n Returns:\n grads_and_vars: A list of gradient to variable pairs (tuples).\n \"\"\"\n variables = [pair[1] for pair in grads_and_vars]\n matching_vars = filter_variables(variables, regex_list, invert=True)\n for var in matching_vars:\n logging.info('Applying multiplier %f to variable [%s]',\n multiplier, var.op.name)\n grad_multipliers = {var: float(multiplier) for var in matching_vars}\n return slim.learning.multiply_gradients(grads_and_vars,\n grad_multipliers)\n\n\ndef freeze_gradients_matching_regex(grads_and_vars, regex_list):\n \"\"\"Freeze gradients whose variable names match a regular expression.\n\n Args:\n grads_and_vars: A list of gradient to variable pairs (tuples).\n regex_list: A list of string regular expressions.\n\n Returns:\n grads_and_vars: A list of gradient to variable pairs (tuples) that do not\n contain the variables and gradients matching the regex.\n \"\"\"\n variables = [pair[1] for pair in grads_and_vars]\n matching_vars = filter_variables(variables, regex_list, invert=True)\n kept_grads_and_vars = [pair for pair in grads_and_vars\n if pair[1] not in matching_vars]\n for var in matching_vars:\n logging.info('Freezing variable [%s]', var.op.name)\n return kept_grads_and_vars\n\n\ndef get_variables_available_in_checkpoint(variables,\n checkpoint_path,\n include_global_step=True):\n \"\"\"Returns the subset of variables available in the checkpoint.\n\n Inspects given checkpoint and returns the subset of variables that are\n available in it.\n\n TODO(rathodv): force input and output to be a dictionary.\n\n Args:\n variables: a list or dictionary of variables to find in checkpoint.\n checkpoint_path: path to the checkpoint to restore variables from.\n include_global_step: whether to include `global_step` variable, if it\n exists. Default True.\n\n Returns:\n A list or dictionary of variables.\n Raises:\n ValueError: if `variables` is not a list or dict.\n \"\"\"\n if isinstance(variables, list):\n variable_names_map = {}\n for variable in variables:\n if isinstance(variable, tf_variables.PartitionedVariable):\n name = variable.name\n else:\n name = variable.op.name\n variable_names_map[name] = variable\n elif isinstance(variables, dict):\n variable_names_map = variables\n else:\n raise ValueError('`variables` is expected to be a list or dict.')\n ckpt_reader = tf.train.NewCheckpointReader(checkpoint_path)\n ckpt_vars_to_shape_map = ckpt_reader.get_variable_to_shape_map()\n if not include_global_step:\n ckpt_vars_to_shape_map.pop(tf.GraphKeys.GLOBAL_STEP, None)\n vars_in_ckpt = {}\n for variable_name, variable in sorted(variable_names_map.items()):\n if variable_name in ckpt_vars_to_shape_map:\n if ckpt_vars_to_shape_map[variable_name] == variable.shape.as_list():\n vars_in_ckpt[variable_name] = variable\n else:\n logging.warning('Variable [%s] is available in checkpoint, but has an '\n 'incompatible shape with model variable. Checkpoint '\n 'shape: [%s], model variable shape: [%s]. This '\n 'variable will not be initialized from the checkpoint.',\n variable_name, ckpt_vars_to_shape_map[variable_name],\n variable.shape.as_list())\n else:\n logging.warning('Variable [%s] is not available in checkpoint',\n variable_name)\n if isinstance(variables, list):\n return vars_in_ckpt.values()\n return vars_in_ckpt\n"
] | [
[
"tensorflow.logging.warning",
"tensorflow.train.NewCheckpointReader",
"tensorflow.logging.info"
]
] |
connectthefuture/tensorflow | [
"93812423fcd5878aa2c1d0b68dc0496980c8519d",
"93812423fcd5878aa2c1d0b68dc0496980c8519d",
"93812423fcd5878aa2c1d0b68dc0496980c8519d",
"93812423fcd5878aa2c1d0b68dc0496980c8519d"
] | [
"tensorflow/contrib/seq2seq/python/kernel_tests/seq2seq_test.py",
"tensorflow/python/training/device_setter_test.py",
"tensorflow/python/ops/parsing_ops.py",
"tensorflow/tools/test/run_and_gather_logs_lib.py"
] | [
"# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"Tests for contrib.seq2seq.python.ops.seq2seq.\"\"\"\n# pylint: disable=unused-import,g-bad-import-order\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n# pylint: enable=unused-import\n\nimport tensorflow as tf\nfrom tensorflow.contrib import layers\n\nclass Seq2SeqTest(tf.test.TestCase):\n\n # test a default call of rnn_decoder\n def test_rnn_decoder(self):\n pass\n\n # test default call with time_major=True\n def test_dynamic_rnn_decoder_time_major(self):\n with self.test_session() as sess:\n with tf.variable_scope(\"root\", initializer=\n tf.constant_initializer(0.5)) as varscope:\n # Define inputs/outputs to model\n batch_size = 2\n encoder_embedding_size = 3\n decoder_embedding_size = 4\n encoder_hidden_size = 5\n decoder_hidden_size = encoder_hidden_size\n input_sequence_length = 6\n decoder_sequence_length = 7\n num_decoder_symbols = 20\n start_of_sequence_id = end_of_sequence_id = 1\n decoder_embeddings = tf.get_variable('decoder_embeddings',\n [num_decoder_symbols, decoder_embedding_size],\n initializer=tf.random_normal_initializer(stddev=0.1))\n inputs = tf.constant(0.5, shape=[input_sequence_length, batch_size,\n encoder_embedding_size])\n decoder_inputs = tf.constant(0.4, shape=[decoder_sequence_length,\n batch_size,\n decoder_embedding_size])\n decoder_length = tf.constant(decoder_sequence_length, dtype=tf.int32,\n shape=[batch_size,])\n with tf.variable_scope(\"rnn\") as scope:\n # setting up weights for computing the final output\n output_fn = lambda x: layers.linear(x, num_decoder_symbols,\n scope=scope)\n\n # Define model\n encoder_outputs, encoder_state = tf.nn.dynamic_rnn(\n cell=tf.nn.rnn_cell.GRUCell(encoder_hidden_size), inputs=inputs,\n dtype=tf.float32, time_major=True, scope=scope)\n\n\n with tf.variable_scope(\"decoder\") as scope:\n # Train decoder\n decoder_cell = tf.nn.rnn_cell.GRUCell(decoder_hidden_size)\n decoder_fn_train = tf.contrib.seq2seq.simple_decoder_fn_train(\n encoder_state=encoder_state)\n decoder_outputs_train, decoder_state_train = (\n tf.contrib.seq2seq.dynamic_rnn_decoder(\n cell=decoder_cell,\n decoder_fn=decoder_fn_train,\n inputs=decoder_inputs,\n sequence_length=decoder_length,\n time_major=True,\n scope=scope))\n decoder_outputs_train = output_fn(decoder_outputs_train)\n\n # Setup variable reuse\n scope.reuse_variables()\n\n # Inference decoder\n decoder_fn_inference = (\n tf.contrib.seq2seq.simple_decoder_fn_inference(\n output_fn=output_fn,\n encoder_state=encoder_state,\n embeddings=decoder_embeddings,\n start_of_sequence_id=start_of_sequence_id,\n end_of_sequence_id=end_of_sequence_id,\n #TODO: find out why it goes to +1\n maximum_length=decoder_sequence_length-1,\n num_decoder_symbols=num_decoder_symbols,\n dtype=tf.int32))\n decoder_outputs_inference, decoder_state_inference = (\n tf.contrib.seq2seq.dynamic_rnn_decoder(\n cell=decoder_cell,\n decoder_fn=decoder_fn_inference,\n time_major=True,\n scope=scope))\n\n # Run model\n tf.global_variables_initializer().run()\n decoder_outputs_train_res, decoder_state_train_res = sess.run(\n [decoder_outputs_train, decoder_state_train])\n decoder_outputs_inference_res, decoder_state_inference_res = sess.run(\n [decoder_outputs_inference, decoder_state_inference])\n\n # Assert outputs\n self.assertEqual((decoder_sequence_length, batch_size,\n num_decoder_symbols),\n decoder_outputs_train_res.shape)\n self.assertEqual((batch_size, num_decoder_symbols),\n decoder_outputs_inference_res.shape[1:3])\n self.assertEqual((batch_size, decoder_hidden_size),\n decoder_state_train_res.shape)\n self.assertEqual((batch_size, decoder_hidden_size),\n decoder_state_inference_res.shape)\n # The dynamic decoder might end earlier than `maximal_length`\n # under inference\n true_value = (decoder_sequence_length>=\n decoder_state_inference_res.shape[0])\n self.assertEqual((true_value), True)\n\nif __name__ == '__main__':\n tf.test.main()\n",
"# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for device function for replicated training.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow as tf\n\n\nclass DeviceSetterTest(tf.test.TestCase):\n\n _cluster_spec = tf.train.ClusterSpec({\n \"ps\": [\"ps0:2222\", \"ps1:2222\"],\n \"worker\": [\"worker0:2222\", \"worker1:2222\", \"worker2:2222\"]})\n\n def testCPUOverride(self):\n with tf.device(tf.train.replica_device_setter(cluster=self._cluster_spec)):\n with tf.device(\"/cpu:0\"):\n v = tf.Variable([1, 2])\n w = tf.Variable([2, 1])\n with tf.device(\"/cpu:0\"):\n a = v + w\n self.assertDeviceEqual(\"/job:ps/task:0/cpu:0\", v.device)\n self.assertDeviceEqual(\"/job:ps/task:0/cpu:0\", v.initializer.device)\n self.assertDeviceEqual(\"/job:ps/task:1\", w.device)\n self.assertDeviceEqual(\"/job:ps/task:1\", w.initializer.device)\n self.assertDeviceEqual(\"/job:worker/cpu:0\", a.device)\n\n def testPS2TasksWithClusterSpecClass(self):\n with tf.device(tf.train.replica_device_setter(cluster=self._cluster_spec)):\n v = tf.Variable([1, 2])\n w = tf.Variable([2, 1])\n a = v + w\n self.assertDeviceEqual(\"/job:ps/task:0\", v.device)\n self.assertDeviceEqual(\"/job:ps/task:0\", v.initializer.device)\n self.assertDeviceEqual(\"/job:ps/task:1\", w.device)\n self.assertDeviceEqual(\"/job:ps/task:1\", w.initializer.device)\n self.assertDeviceEqual(\"/job:worker\", a.device)\n\n def testPS2TasksWithClusterSpecDict(self):\n with tf.device(tf.train.replica_device_setter(\n cluster=self._cluster_spec.as_dict())):\n v = tf.Variable([1, 2])\n w = tf.Variable([2, 1])\n a = v + w\n self.assertDeviceEqual(\"/job:ps/task:0\", v.device)\n self.assertDeviceEqual(\"/job:ps/task:0\", v.initializer.device)\n self.assertDeviceEqual(\"/job:ps/task:1\", w.device)\n self.assertDeviceEqual(\"/job:ps/task:1\", w.initializer.device)\n self.assertDeviceEqual(\"/job:worker\", a.device)\n\n def testPS2TasksWithClusterDef(self):\n with tf.device(tf.train.replica_device_setter(\n cluster=self._cluster_spec.as_cluster_def())):\n v = tf.Variable([1, 2])\n w = tf.Variable([2, 1])\n a = v + w\n self.assertDeviceEqual(\"/job:ps/task:0\", v.device)\n self.assertDeviceEqual(\"/job:ps/task:0\", v.initializer.device)\n self.assertDeviceEqual(\"/job:ps/task:1\", w.device)\n self.assertDeviceEqual(\"/job:ps/task:1\", w.initializer.device)\n self.assertDeviceEqual(\"/job:worker\", a.device)\n\n def testPS2TasksWithDevice(self):\n cluster_spec = tf.train.ClusterSpec({\n \"sun\": [\"sun0:2222\", \"sun1:2222\", \"sun2:2222\"],\n \"moon\": [\"moon0:2222\", \"moon1:2222\"]})\n\n with tf.device(tf.train.replica_device_setter(\n ps_device=\"/job:moon\", worker_device=\"/job:sun\",\n cluster=cluster_spec.as_cluster_def())):\n v = tf.Variable([1, 2])\n w = tf.Variable([2, 1])\n a = v + w\n self.assertDeviceEqual(\"/job:moon/task:0\", v.device)\n self.assertDeviceEqual(\"/job:moon/task:0\", v.initializer.device)\n self.assertDeviceEqual(\"/job:moon/task:1\", w.device)\n self.assertDeviceEqual(\"/job:moon/task:1\", w.initializer.device)\n self.assertDeviceEqual(\"/job:sun\", a.device)\n\n def testPS2TasksWithCPUConstraint(self):\n cluster_spec = tf.train.ClusterSpec({\n \"sun\": [\"sun0:2222\", \"sun1:2222\", \"sun2:2222\"],\n \"moon\": [\"moon0:2222\", \"moon1:2222\"]})\n\n with tf.device(tf.train.replica_device_setter(\n ps_device=\"/job:moon/cpu:0\", worker_device=\"/job:sun\",\n cluster=cluster_spec.as_cluster_def())):\n v = tf.Variable([1, 2])\n w = tf.Variable([2, 1])\n a = v + w\n self.assertDeviceEqual(\"/job:moon/task:0/cpu:0\", v.device)\n self.assertDeviceEqual(\"/job:moon/task:0/cpu:0\", v.initializer.device)\n self.assertDeviceEqual(\"/job:moon/task:1/cpu:0\", w.device)\n self.assertDeviceEqual(\"/job:moon/task:1/cpu:0\", w.initializer.device)\n self.assertDeviceEqual(\"/job:sun\", a.device)\n\n\nif __name__ == \"__main__\":\n tf.test.main()\n",
"# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"Parsing Ops.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport collections\nimport re\n\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import sparse_tensor\nfrom tensorflow.python.framework import tensor_shape\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import control_flow_ops\nfrom tensorflow.python.ops import gen_parsing_ops\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops import sparse_ops\n# go/tf-wildcard-import\n# pylint: disable=wildcard-import,undefined-variable\nfrom tensorflow.python.ops.gen_parsing_ops import *\n# pylint: enable=wildcard-import,undefined-variable\n\n\nops.NotDifferentiable(\"DecodeRaw\")\nops.NotDifferentiable(\"ParseTensor\")\nops.NotDifferentiable(\"StringToNumber\")\n\n\nclass VarLenFeature(collections.namedtuple(\"VarLenFeature\", [\"dtype\"])):\n \"\"\"Configuration for parsing a variable-length input feature.\n\n Fields:\n dtype: Data type of input.\n \"\"\"\n pass\n\n\nclass SparseFeature(\n collections.namedtuple(\n \"SparseFeature\",\n [\"index_key\", \"value_key\", \"dtype\", \"size\", \"already_sorted\"])):\n \"\"\"Configuration for parsing a sparse input feature.\n\n Fields:\n index_key: Name of index feature. The underlying feature's type must\n be `int64` and its length must always match that of the `value_key`\n feature.\n value_key: Name of value feature. The underlying feature's type must\n be `dtype` and its length must always match that of the `index_key`\n feature.\n dtype: Data type of the `value_key` feature.\n size: Each value in the `index_key` feature must be in `[0, size)`.\n already_sorted: A boolean to specify whether the values in `index_key` are\n already sorted. If so skip sorting, False by default (optional).\n \"\"\"\n pass\nSparseFeature.__new__.__defaults__ = (False,)\n\n\nclass FixedLenFeature(collections.namedtuple(\n \"FixedLenFeature\", [\"shape\", \"dtype\", \"default_value\"])):\n \"\"\"Configuration for parsing a fixed-length input feature.\n\n To treat sparse input as dense, provide a `default_value`; otherwise,\n the parse functions will fail on any examples missing this feature.\n\n Fields:\n shape: Shape of input data.\n dtype: Data type of input.\n default_value: Value to be used if an example is missing this feature. It\n must be compatible with `dtype`.\n \"\"\"\n pass\nFixedLenFeature.__new__.__defaults__ = (None,)\n\n\n# NOTE: If we ever support a default_value for sequence dense features, we can\n# remove this class and use FixedLenFeature in its place.\nclass FixedLenSequenceFeature(collections.namedtuple(\n \"FixedLenSequenceFeature\", [\"shape\", \"dtype\", \"allow_missing\"])):\n \"\"\"Configuration for a dense input feature in a sequence item.\n\n To treat a sparse input as dense, provide `allow_missing=True`; otherwise,\n the parse functions will fail on any examples missing this feature.\n\n Fields:\n shape: Shape of input data.\n dtype: Data type of input.\n allow_missing: Whether to allow this feature to be missing from a feature\n list item.\n \"\"\"\n pass\nFixedLenSequenceFeature.__new__.__defaults__ = (False,)\n\n\ndef _features_to_raw_params(features, types):\n \"\"\"Split feature tuples into raw params used by `gen_parsing_ops`.\n\n Args:\n features: A `dict` mapping feature keys to objects of a type in `types`.\n types: Type of features to allow, among `FixedLenFeature`, `VarLenFeature`,\n `SparseFeature`, and `FixedLenSequenceFeature`.\n\n Returns:\n Tuple of `sparse_keys`, `sparse_types`, `dense_keys`, `dense_types`,\n `dense_defaults`, `dense_shapes`.\n\n Raises:\n ValueError: if `features` contains an item not in `types`, or an invalid\n feature.\n \"\"\"\n sparse_keys = []\n sparse_types = []\n dense_keys = []\n dense_types = []\n dense_defaults = {}\n dense_shapes = []\n if features:\n # NOTE: We iterate over sorted keys to keep things deterministic.\n for key in sorted(features.keys()):\n feature = features[key]\n if isinstance(feature, VarLenFeature):\n if VarLenFeature not in types:\n raise ValueError(\"Unsupported VarLenFeature %s.\", feature)\n if not feature.dtype:\n raise ValueError(\"Missing type for feature %s.\" % key)\n sparse_keys.append(key)\n sparse_types.append(feature.dtype)\n elif isinstance(feature, SparseFeature):\n if SparseFeature not in types:\n raise ValueError(\"Unsupported SparseFeature %s.\", feature)\n if not feature.index_key:\n raise ValueError(\n \"Missing index_key for SparseFeature %s.\", feature)\n if not feature.value_key:\n raise ValueError(\n \"Missing value_key for SparseFeature %s.\", feature)\n if not feature.dtype:\n raise ValueError(\"Missing type for feature %s.\" % key)\n if feature.index_key in sparse_keys:\n dtype = sparse_types[sparse_keys.index(feature.index_key)]\n if dtype != dtypes.int64:\n raise ValueError(\"Conflicting type %s vs int64 for feature %s.\" % (\n dtype, feature.index_key))\n else:\n sparse_keys.append(feature.index_key)\n sparse_types.append(dtypes.int64)\n\n if feature.value_key in sparse_keys:\n dtype = sparse_types[sparse_keys.index(feature.value_key)]\n if dtype != feature.dtype:\n raise ValueError(\"Conflicting type %s vs %s for feature %s.\" % (\n dtype, feature.dtype, feature.value_key))\n else:\n sparse_keys.append(feature.value_key)\n sparse_types.append(feature.dtype)\n elif isinstance(feature, FixedLenFeature):\n if FixedLenFeature not in types:\n raise ValueError(\"Unsupported FixedLenFeature %s.\", feature)\n if not feature.dtype:\n raise ValueError(\"Missing type for feature %s.\" % key)\n if feature.shape is None:\n raise ValueError(\"Missing shape for feature %s.\" % key)\n dense_keys.append(key)\n dense_shapes.append(feature.shape)\n dense_types.append(feature.dtype)\n if feature.default_value is not None:\n dense_defaults[key] = feature.default_value\n elif isinstance(feature, FixedLenSequenceFeature):\n if FixedLenSequenceFeature not in types:\n raise ValueError(\"Unsupported FixedLenSequenceFeature %s.\", feature)\n if not feature.dtype:\n raise ValueError(\"Missing type for feature %s.\" % key)\n if feature.shape is None:\n raise ValueError(\"Missing shape for feature %s.\" % key)\n dense_keys.append(key)\n dense_shapes.append(feature.shape)\n dense_types.append(feature.dtype)\n if feature.allow_missing:\n dense_defaults[key] = None\n else:\n raise ValueError(\"Invalid feature %s:%s.\" % (key, feature))\n return (\n sparse_keys, sparse_types, dense_keys, dense_types, dense_defaults,\n dense_shapes)\n\n\ndef _construct_sparse_tensors_for_sparse_features(features, tensor_dict):\n \"\"\"Merges SparseTensors of indices and values of SparseFeatures.\n\n Updates `tensor_dict`. For `SparseFeatures` in the values of `features`\n expects their `index_key`s and `index_value`s to be present in `tensor_dict`\n mapping to `SparseTensor`s. Removes those, constructs a single `SparseTensor`\n from them, and adds it to `tensor_dict` with the key from `features`.\n\n Args:\n features: A `dict` mapping feature keys to `SparseFeature` values.\n Values of other types will be ignored.\n tensor_dict: A `dict` mapping feature keys to `Tensor` and `SparseTensor`\n values. Expected to contain keys of the `SparseFeature`s' `index_key`s and\n `value_key`s and mapping them to `SparseTensor`s.\n \"\"\"\n # Construct SparseTensors for SparseFeatures.\n for key in sorted(features.keys()):\n feature = features[key]\n if isinstance(feature, SparseFeature):\n sp_ids = tensor_dict[feature.index_key]\n sp_values = tensor_dict[feature.value_key]\n tensor_dict[key] = sparse_ops.sparse_merge(\n sp_ids,\n sp_values,\n feature.size,\n feature.already_sorted)\n # Remove tensors from dictionary that were only used to construct\n # SparseTensors for SparseFeature.\n for key in set(tensor_dict.keys()) - set(features.keys()):\n del tensor_dict[key]\n\n\ndef parse_example(serialized, features, name=None, example_names=None):\n # pylint: disable=line-too-long\n \"\"\"Parses `Example` protos into a `dict` of tensors.\n\n Parses a number of serialized [`Example`](https://www.tensorflow.org/code/tensorflow/core/example/example.proto)\n protos given in `serialized`.\n\n `example_names` may contain descriptive names for the corresponding serialized\n protos. These may be useful for debugging purposes, but they have no effect on\n the output. If not `None`, `example_names` must be the same length as\n `serialized`.\n\n This op parses serialized examples into a dictionary mapping keys to `Tensor`\n and `SparseTensor` objects. `features` is a dict from keys to `VarLenFeature`,\n `SparseFeature`, and `FixedLenFeature` objects. Each `VarLenFeature`\n and `SparseFeature` is mapped to a `SparseTensor`, and each\n `FixedLenFeature` is mapped to a `Tensor`.\n\n Each `VarLenFeature` maps to a `SparseTensor` of the specified type\n representing a ragged matrix. Its indices are `[batch, index]` where `batch`\n is the batch entry the value is from in `serialized`, and `index` is the\n value's index in the list of values associated with that feature and example.\n\n Each `SparseFeature` maps to a `SparseTensor` of the specified type\n representing a sparse matrix of shape\n `(serialized.size(), SparseFeature.size)`. Its indices are `[batch, index]`\n where `batch` is the batch entry the value is from in `serialized`, and\n `index` is the value's index is given by the values in the\n `SparseFeature.index_key` feature column.\n\n Each `FixedLenFeature` `df` maps to a `Tensor` of the specified type (or\n `tf.float32` if not specified) and shape `(serialized.size(),) + df.shape`.\n\n `FixedLenFeature` entries with a `default_value` are optional. With no default\n value, we will fail if that `Feature` is missing from any example in\n `serialized`.\n\n Examples:\n\n For example, if one expects a `tf.float32` sparse feature `ft` and three\n serialized `Example`s are provided:\n\n ```\n serialized = [\n features\n { feature { key: \"ft\" value { float_list { value: [1.0, 2.0] } } } },\n features\n { feature []},\n features\n { feature { key: \"ft\" value { float_list { value: [3.0] } } }\n ]\n ```\n\n then the output will look like:\n\n ```\n {\"ft\": SparseTensor(indices=[[0, 0], [0, 1], [2, 0]],\n values=[1.0, 2.0, 3.0],\n dense_shape=(3, 2)) }\n ```\n\n Given two `Example` input protos in `serialized`:\n\n ```\n [\n features {\n feature { key: \"kw\" value { bytes_list { value: [ \"knit\", \"big\" ] } } }\n feature { key: \"gps\" value { float_list { value: [] } } }\n },\n features {\n feature { key: \"kw\" value { bytes_list { value: [ \"emmy\" ] } } }\n feature { key: \"dank\" value { int64_list { value: [ 42 ] } } }\n feature { key: \"gps\" value { } }\n }\n ]\n ```\n\n And arguments\n\n ```\n example_names: [\"input0\", \"input1\"],\n features: {\n \"kw\": VarLenFeature(tf.string),\n \"dank\": VarLenFeature(tf.int64),\n \"gps\": VarLenFeature(tf.float32),\n }\n ```\n\n Then the output is a dictionary:\n\n ```python\n {\n \"kw\": SparseTensor(\n indices=[[0, 0], [0, 1], [1, 0]],\n values=[\"knit\", \"big\", \"emmy\"]\n dense_shape=[2, 2]),\n \"dank\": SparseTensor(\n indices=[[1, 0]],\n values=[42],\n dense_shape=[2, 1]),\n \"gps\": SparseTensor(\n indices=[],\n values=[],\n dense_shape=[2, 0]),\n }\n ```\n\n For dense results in two serialized `Example`s:\n\n ```\n [\n features {\n feature { key: \"age\" value { int64_list { value: [ 0 ] } } }\n feature { key: \"gender\" value { bytes_list { value: [ \"f\" ] } } }\n },\n features {\n feature { key: \"age\" value { int64_list { value: [] } } }\n feature { key: \"gender\" value { bytes_list { value: [ \"f\" ] } } }\n }\n ]\n ```\n\n We can use arguments:\n\n ```\n example_names: [\"input0\", \"input1\"],\n features: {\n \"age\": FixedLenFeature([], dtype=tf.int64, default_value=-1),\n \"gender\": FixedLenFeature([], dtype=tf.string),\n }\n ```\n\n And the expected output is:\n\n ```python\n {\n \"age\": [[0], [-1]],\n \"gender\": [[\"f\"], [\"f\"]],\n }\n ```\n\n Given two `Example` input protos in `serialized`:\n\n ```\n [\n features {\n feature { key: \"val\" value { float_list { value: [ 0.5, -1.0 ] } } }\n feature { key: \"ix\" value { int64_list { value: [ 3, 20 ] } } }\n },\n features {\n feature { key: \"val\" value { float_list { value: [ 0.0 ] } } }\n feature { key: \"ix\" value { int64_list { value: [ 42 ] } } }\n }\n ]\n ```\n\n And arguments\n\n ```\n example_names: [\"input0\", \"input1\"],\n features: {\n \"sparse\": SparseFeature(\"ix\", \"val\", tf.float32, 100),\n }\n ```\n\n Then the output is a dictionary:\n\n ```python\n {\n \"sparse\": SparseTensor(\n indices=[[0, 3], [0, 20], [1, 42]],\n values=[0.5, -1.0, 0.0]\n dense_shape=[2, 100]),\n }\n ```\n\n Args:\n serialized: A vector (1-D Tensor) of strings, a batch of binary\n serialized `Example` protos.\n features: A `dict` mapping feature keys to `FixedLenFeature`,\n `VarLenFeature`, and `SparseFeature` values.\n name: A name for this operation (optional).\n example_names: A vector (1-D Tensor) of strings (optional), the names of\n the serialized protos in the batch.\n\n Returns:\n A `dict` mapping feature keys to `Tensor` and `SparseTensor` values.\n\n Raises:\n ValueError: if any feature is invalid.\n \"\"\"\n if not features:\n raise ValueError(\"Missing: features was %s.\" % features)\n (sparse_keys, sparse_types, dense_keys, dense_types, dense_defaults,\n dense_shapes) = _features_to_raw_params(\n features, [VarLenFeature, SparseFeature, FixedLenFeature])\n outputs = _parse_example_raw(\n serialized, example_names, sparse_keys, sparse_types, dense_keys,\n dense_types, dense_defaults, dense_shapes, name)\n _construct_sparse_tensors_for_sparse_features(features, outputs)\n return outputs\n\n\ndef _parse_example_raw(serialized,\n names=None,\n sparse_keys=None,\n sparse_types=None,\n dense_keys=None,\n dense_types=None,\n dense_defaults=None,\n dense_shapes=None,\n name=None):\n \"\"\"Parses `Example` protos.\n\n Args:\n serialized: A vector (1-D Tensor) of strings, a batch of binary\n serialized `Example` protos.\n names: A vector (1-D Tensor) of strings (optional), the names of\n the serialized protos.\n sparse_keys: A list of string keys in the examples' features.\n The results for these keys will be returned as `SparseTensor` objects.\n sparse_types: A list of `DTypes` of the same length as `sparse_keys`.\n Only `tf.float32` (`FloatList`), `tf.int64` (`Int64List`),\n and `tf.string` (`BytesList`) are supported.\n dense_keys: A list of string keys in the examples' features.\n The results for these keys will be returned as `Tensor`s\n dense_types: A list of DTypes of the same length as `dense_keys`.\n Only `tf.float32` (`FloatList`), `tf.int64` (`Int64List`),\n and `tf.string` (`BytesList`) are supported.\n dense_defaults: A dict mapping string keys to `Tensor`s.\n The keys of the dict must match the dense_keys of the feature.\n dense_shapes: A list of tuples with the same length as `dense_keys`.\n The shape of the data for each dense feature referenced by `dense_keys`.\n Required for any input tensors identified by `dense_keys` whose shapes are\n anything other than `[]` or `[1]`.\n name: A name for this operation (optional).\n\n Returns:\n A `dict` mapping keys to `Tensor`s and `SparseTensor`s.\n\n Raises:\n ValueError: If sparse and dense key sets intersect, or input lengths do not\n match up.\n \"\"\"\n with ops.name_scope(name, \"ParseExample\", [serialized, names]):\n names = [] if names is None else names\n dense_defaults = {} if dense_defaults is None else dense_defaults\n sparse_keys = [] if sparse_keys is None else sparse_keys\n sparse_types = [] if sparse_types is None else sparse_types\n dense_keys = [] if dense_keys is None else dense_keys\n dense_types = [] if dense_types is None else dense_types\n dense_shapes = (\n [[]] * len(dense_keys) if dense_shapes is None else dense_shapes)\n\n num_dense = len(dense_keys)\n num_sparse = len(sparse_keys)\n\n if len(dense_shapes) != num_dense:\n raise ValueError(\"len(dense_shapes) != len(dense_keys): %d vs. %d\"\n % (len(dense_shapes), num_dense))\n if len(dense_types) != num_dense:\n raise ValueError(\"len(dense_types) != len(num_dense): %d vs. %d\"\n % (len(dense_types), num_dense))\n if len(sparse_types) != num_sparse:\n raise ValueError(\"len(sparse_types) != len(sparse_keys): %d vs. %d\"\n % (len(sparse_types), num_sparse))\n if num_dense + num_sparse == 0:\n raise ValueError(\"Must provide at least one sparse key or dense key\")\n if not set(dense_keys).isdisjoint(set(sparse_keys)):\n raise ValueError(\n \"Dense and sparse keys must not intersect; intersection: %s\" %\n set(dense_keys).intersection(set(sparse_keys)))\n\n dense_defaults_vec = []\n for i, key in enumerate(dense_keys):\n default_value = dense_defaults.get(key)\n if default_value is None:\n default_value = constant_op.constant([], dtype=dense_types[i])\n elif not isinstance(default_value, ops.Tensor):\n key_name = \"key_\" + re.sub(\"[^A-Za-z0-9_.\\\\-/]\", \"_\", key)\n default_value = ops.convert_to_tensor(\n default_value, dtype=dense_types[i], name=key_name)\n default_value = array_ops.reshape(default_value, dense_shapes[i])\n\n dense_defaults_vec.append(default_value)\n\n dense_shapes = [tensor_shape.as_shape(shape).as_proto()\n for shape in dense_shapes]\n\n # pylint: disable=protected-access\n outputs = gen_parsing_ops._parse_example(\n serialized=serialized,\n names=names,\n dense_defaults=dense_defaults_vec,\n sparse_keys=sparse_keys,\n sparse_types=sparse_types,\n dense_keys=dense_keys,\n dense_shapes=dense_shapes,\n name=name)\n # pylint: enable=protected-access\n\n (sparse_indices, sparse_values, sparse_shapes, dense_values) = outputs\n\n sparse_tensors = [\n sparse_tensor.SparseTensor(ix, val, shape) for (ix, val, shape)\n in zip(sparse_indices, sparse_values, sparse_shapes)]\n\n return dict(zip(sparse_keys + dense_keys, sparse_tensors + dense_values))\n\n\ndef parse_single_example(serialized, features, name=None, example_names=None):\n \"\"\"Parses a single `Example` proto.\n\n Similar to `parse_example`, except:\n\n For dense tensors, the returned `Tensor` is identical to the output of\n `parse_example`, except there is no batch dimension, the output shape is the\n same as the shape given in `dense_shape`.\n\n For `SparseTensor`s, the first (batch) column of the indices matrix is removed\n (the indices matrix is a column vector), the values vector is unchanged, and\n the first (`batch_size`) entry of the shape vector is removed (it is now a\n single element vector).\n\n Args:\n serialized: A scalar string Tensor, a single serialized Example.\n See `_parse_single_example_raw` documentation for more details.\n features: A `dict` mapping feature keys to `FixedLenFeature` or\n `VarLenFeature` values.\n name: A name for this operation (optional).\n example_names: (Optional) A scalar string Tensor, the associated name.\n See `_parse_single_example_raw` documentation for more details.\n\n Returns:\n A `dict` mapping feature keys to `Tensor` and `SparseTensor` values.\n\n Raises:\n ValueError: if any feature is invalid.\n \"\"\"\n if not features:\n raise ValueError(\"Missing features.\")\n (sparse_keys, sparse_types, dense_keys, dense_types, dense_defaults,\n dense_shapes) = _features_to_raw_params(\n features, [VarLenFeature, FixedLenFeature, SparseFeature])\n outputs = _parse_single_example_raw(\n serialized, example_names, sparse_keys, sparse_types, dense_keys,\n dense_types, dense_defaults, dense_shapes, name)\n _construct_sparse_tensors_for_sparse_features(features, outputs)\n return outputs\n\n\ndef _parse_single_example_raw(serialized,\n names=None,\n sparse_keys=None,\n sparse_types=None,\n dense_keys=None,\n dense_types=None,\n dense_defaults=None,\n dense_shapes=None,\n name=None):\n \"\"\"Parses a single `Example` proto.\n\n Args:\n serialized: A scalar string Tensor, a single serialized Example.\n See `_parse_example_raw` documentation for more details.\n names: (Optional) A scalar string Tensor, the associated name.\n See `_parse_example_raw` documentation for more details.\n sparse_keys: See `_parse_example_raw` documentation for more details.\n sparse_types: See `_parse_example_raw` documentation for more details.\n dense_keys: See `_parse_example_raw` documentation for more details.\n dense_types: See `_parse_example_raw` documentation for more details.\n dense_defaults: See `_parse_example_raw` documentation for more details.\n dense_shapes: See `_parse_example_raw` documentation for more details.\n name: A name for this operation (optional).\n\n Returns:\n A `dict` mapping feature keys to `Tensor` and `SparseTensor` values.\n\n Raises:\n ValueError: if any feature is invalid.\n \"\"\"\n with ops.name_scope(name, \"ParseSingleExample\", [serialized, names]):\n serialized = ops.convert_to_tensor(serialized)\n serialized_shape = serialized.get_shape()\n if serialized_shape.ndims is not None:\n if serialized_shape.ndims != 0:\n raise ValueError(\"Input serialized must be a scalar\")\n else:\n serialized = control_flow_ops.with_dependencies(\n [control_flow_ops.Assert(\n math_ops.equal(array_ops.rank(serialized), 0),\n [\"Input serialized must be a scalar\"],\n name=\"SerializedIsScalar\")],\n serialized,\n name=\"SerializedDependencies\")\n serialized = array_ops.expand_dims(serialized, 0)\n if names is not None:\n names = ops.convert_to_tensor(names)\n names_shape = names.get_shape()\n if names_shape.ndims is not None:\n if names_shape.ndims != 0:\n raise ValueError(\"Input names must be a scalar\")\n else:\n names = control_flow_ops.with_dependencies(\n [control_flow_ops.Assert(\n math_ops.equal(array_ops.rank(names), 0),\n [\"Input names must be a scalar\"],\n name=\"NamesIsScalar\")],\n names,\n name=\"NamesDependencies\")\n names = array_ops.expand_dims(names, 0)\n\n outputs = _parse_example_raw(\n serialized,\n names=names,\n sparse_keys=sparse_keys,\n sparse_types=sparse_types,\n dense_keys=dense_keys,\n dense_types=dense_types,\n dense_defaults=dense_defaults,\n dense_shapes=dense_shapes,\n name=name)\n if dense_keys is not None:\n for d in dense_keys:\n d_name = re.sub(\"[^A-Za-z0-9_.\\\\-/]\", \"_\", d)\n outputs[d] = array_ops.squeeze(\n outputs[d], [0], name=\"Squeeze_%s\" % d_name)\n if sparse_keys is not None:\n for s in sparse_keys:\n s_name = re.sub(\"[^A-Za-z0-9_.\\\\-/]\", \"_\", s)\n outputs[s] = sparse_tensor.SparseTensor(\n array_ops.slice(outputs[s].indices,\n [0, 1], [-1, -1], name=\"Slice_Indices_%s\" % s_name),\n outputs[s].values,\n array_ops.slice(outputs[s].dense_shape,\n [1], [-1], name=\"Squeeze_Shape_%s\" % s_name))\n return outputs\n\n\ndef parse_single_sequence_example(\n serialized, context_features=None, sequence_features=None,\n example_name=None, name=None):\n # pylint: disable=line-too-long\n \"\"\"Parses a single `SequenceExample` proto.\n\n Parses a single serialized [`SequenceExample`](https://www.tensorflow.org/code/tensorflow/core/example/example.proto)\n proto given in `serialized`.\n\n This op parses a serialize sequence example into a tuple of dictionaries\n mapping keys to `Tensor` and `SparseTensor` objects respectively.\n The first dictionary contains mappings for keys appearing in\n `context_features`, and the second dictionary contains mappings for keys\n appearing in `sequence_features`.\n\n At least one of `context_features` and `sequence_features` must be provided\n and non-empty.\n\n The `context_features` keys are associated with a `SequenceExample` as a\n whole, independent of time / frame. In contrast, the `sequence_features` keys\n provide a way to access variable-length data within the `FeatureList` section\n of the `SequenceExample` proto. While the shapes of `context_features` values\n are fixed with respect to frame, the frame dimension (the first dimension)\n of `sequence_features` values may vary between `SequenceExample` protos,\n and even between `feature_list` keys within the same `SequenceExample`.\n\n `context_features` contains `VarLenFeature` and `FixedLenFeature` objects.\n Each `VarLenFeature` is mapped to a `SparseTensor`, and each `FixedLenFeature`\n is mapped to a `Tensor`, of the specified type, shape, and default value.\n\n `sequence_features` contains `VarLenFeature` and `FixedLenSequenceFeature`\n objects. Each `VarLenFeature` is mapped to a `SparseTensor`, and each\n `FixedLenSequenceFeature` is mapped to a `Tensor`, each of the specified type.\n The shape will be `(T,) + df.dense_shape` for `FixedLenSequenceFeature` `df`, where\n `T` is the length of the associated `FeatureList` in the `SequenceExample`.\n For instance, `FixedLenSequenceFeature([])` yields a scalar 1-D `Tensor` of\n static shape `[None]` and dynamic shape `[T]`, while\n `FixedLenSequenceFeature([k])` (for `int k >= 1`) yields a 2-D matrix `Tensor`\n of static shape `[None, k]` and dynamic shape `[T, k]`.\n\n Each `SparseTensor` corresponding to `sequence_features` represents a ragged\n vector. Its indices are `[time, index]`, where `time` is the `FeatureList`\n entry and `index` is the value's index in the list of values associated with\n that time.\n\n `FixedLenFeature` entries with a `default_value` and `FixedLenSequenceFeature`\n entries with `allow_missing=True` are optional; otherwise, we will fail if\n that `Feature` or `FeatureList` is missing from any example in `serialized`.\n\n `example_name` may contain a descriptive name for the corresponding serialized\n proto. This may be useful for debugging purposes, but it has no effect on the\n output. If not `None`, `example_name` must be a scalar.\n\n Args:\n serialized: A scalar (0-D Tensor) of type string, a single binary\n serialized `SequenceExample` proto.\n context_features: A `dict` mapping feature keys to `FixedLenFeature` or\n `VarLenFeature` values. These features are associated with a\n `SequenceExample` as a whole.\n sequence_features: A `dict` mapping feature keys to\n `FixedLenSequenceFeature` or `VarLenFeature` values. These features are\n associated with data within the `FeatureList` section of the\n `SequenceExample` proto.\n example_name: A scalar (0-D Tensor) of strings (optional), the name of\n the serialized proto.\n name: A name for this operation (optional).\n\n Returns:\n A tuple of two `dict`s, each mapping keys to `Tensor`s and `SparseTensor`s.\n The first dict contains the context key/values.\n The second dict contains the feature_list key/values.\n\n Raises:\n ValueError: if any feature is invalid.\n \"\"\"\n # pylint: enable=line-too-long\n if not (context_features or sequence_features):\n raise ValueError(\"Missing features.\")\n (context_sparse_keys, context_sparse_types, context_dense_keys,\n context_dense_types, context_dense_defaults,\n context_dense_shapes) = _features_to_raw_params(\n context_features, [VarLenFeature, FixedLenFeature])\n (feature_list_sparse_keys, feature_list_sparse_types,\n feature_list_dense_keys, feature_list_dense_types,\n feature_list_dense_defaults,\n feature_list_dense_shapes) = _features_to_raw_params(\n sequence_features, [VarLenFeature, FixedLenSequenceFeature])\n return _parse_single_sequence_example_raw(\n serialized, context_sparse_keys, context_sparse_types,\n context_dense_keys, context_dense_types, context_dense_defaults,\n context_dense_shapes, feature_list_sparse_keys,\n feature_list_sparse_types, feature_list_dense_keys,\n feature_list_dense_types, feature_list_dense_shapes,\n feature_list_dense_defaults, example_name, name)\n\n\ndef _parse_single_sequence_example_raw(serialized,\n context_sparse_keys=None,\n context_sparse_types=None,\n context_dense_keys=None,\n context_dense_types=None,\n context_dense_defaults=None,\n context_dense_shapes=None,\n feature_list_sparse_keys=None,\n feature_list_sparse_types=None,\n feature_list_dense_keys=None,\n feature_list_dense_types=None,\n feature_list_dense_shapes=None,\n feature_list_dense_defaults=None,\n debug_name=None,\n name=None):\n \"\"\"Parses a single `SequenceExample` proto.\n\n Args:\n serialized: A scalar (0-D Tensor) of type string, a single binary\n serialized `SequenceExample` proto.\n context_sparse_keys: A list of string keys in the `SequenceExample`'s\n features. The results for these keys will be returned as\n `SparseTensor` objects.\n context_sparse_types: A list of `DTypes`, the same length as `sparse_keys`.\n Only `tf.float32` (`FloatList`), `tf.int64` (`Int64List`),\n and `tf.string` (`BytesList`) are supported.\n context_dense_keys: A list of string keys in the examples' features.\n The results for these keys will be returned as `Tensor`s\n context_dense_types: A list of DTypes, same length as `context_dense_keys`.\n Only `tf.float32` (`FloatList`), `tf.int64` (`Int64List`),\n and `tf.string` (`BytesList`) are supported.\n context_dense_defaults: A dict mapping string keys to `Tensor`s.\n The keys of the dict must match the context_dense_keys of the feature.\n context_dense_shapes: A list of tuples, same length as `context_dense_keys`.\n The shape of the data for each context_dense feature referenced by\n `context_dense_keys`. Required for any input tensors identified by\n `context_dense_keys` whose shapes are anything other than `[]` or `[1]`.\n feature_list_sparse_keys: A list of string keys in the `SequenceExample`'s\n feature_lists. The results for these keys will be returned as\n `SparseTensor` objects.\n feature_list_sparse_types: A list of `DTypes`, same length as `sparse_keys`.\n Only `tf.float32` (`FloatList`), `tf.int64` (`Int64List`),\n and `tf.string` (`BytesList`) are supported.\n feature_list_dense_keys: A list of string keys in the `SequenceExample`'s\n features_lists. The results for these keys will be returned as `Tensor`s.\n feature_list_dense_types: A list of `DTypes`, same length as\n `feature_list_dense_keys`. Only `tf.float32` (`FloatList`),\n `tf.int64` (`Int64List`), and `tf.string` (`BytesList`) are supported.\n feature_list_dense_shapes: A list of tuples, same length as\n `feature_list_dense_keys`. The shape of the data for each\n `FeatureList` feature referenced by `feature_list_dense_keys`.\n feature_list_dense_defaults: A dict mapping key strings to values.\n The only currently allowed value is `None`. Any key appearing\n in this dict with value `None` is allowed to be missing from the\n `SequenceExample`. If missing, the key is treated as zero-length.\n debug_name: A scalar (0-D Tensor) of strings (optional), the name of\n the serialized proto.\n name: A name for this operation (optional).\n\n Returns:\n A tuple of two `dict`s, each mapping keys to `Tensor`s and `SparseTensor`s.\n The first dict contains the context key/values.\n The second dict contains the feature_list key/values.\n\n Raises:\n ValueError: If context_sparse and context_dense key sets intersect,\n if input lengths do not match up, or if a value in\n feature_list_dense_defaults is not None.\n TypeError: if feature_list_dense_defaults is not either None or a dict.\n \"\"\"\n with ops.name_scope(name, \"ParseSingleSequenceExample\", [serialized]):\n context_dense_defaults = (\n {} if context_dense_defaults is None else context_dense_defaults)\n context_sparse_keys = (\n [] if context_sparse_keys is None else context_sparse_keys)\n context_sparse_types = (\n [] if context_sparse_types is None else context_sparse_types)\n context_dense_keys = (\n [] if context_dense_keys is None else context_dense_keys)\n context_dense_types = (\n [] if context_dense_types is None else context_dense_types)\n context_dense_shapes = (\n [[]] * len(context_dense_keys)\n if context_dense_shapes is None else context_dense_shapes)\n feature_list_sparse_keys = (\n [] if feature_list_sparse_keys is None else feature_list_sparse_keys)\n feature_list_sparse_types = (\n [] if feature_list_sparse_types is None else feature_list_sparse_types)\n feature_list_dense_keys = (\n [] if feature_list_dense_keys is None else feature_list_dense_keys)\n feature_list_dense_types = (\n [] if feature_list_dense_types is None else feature_list_dense_types)\n feature_list_dense_shapes = (\n [[]] * len(feature_list_dense_keys)\n if feature_list_dense_shapes is None else feature_list_dense_shapes)\n feature_list_dense_defaults = (\n dict() if feature_list_dense_defaults is None\n else feature_list_dense_defaults)\n debug_name = \"\" if debug_name is None else debug_name\n\n # Internal\n feature_list_dense_missing_assumed_empty = []\n\n num_context_dense = len(context_dense_keys)\n num_feature_list_dense = len(feature_list_dense_keys)\n num_context_sparse = len(context_sparse_keys)\n num_feature_list_sparse = len(feature_list_sparse_keys)\n\n if len(context_dense_shapes) != num_context_dense:\n raise ValueError(\n \"len(context_dense_shapes) != len(context_dense_keys): %d vs. %d\"\n % (len(context_dense_shapes), num_context_dense))\n if len(context_dense_types) != num_context_dense:\n raise ValueError(\n \"len(context_dense_types) != len(num_context_dense): %d vs. %d\"\n % (len(context_dense_types), num_context_dense))\n if len(feature_list_dense_shapes) != num_feature_list_dense:\n raise ValueError(\n \"len(feature_list_dense_shapes) != len(feature_list_dense_keys): \"\n \"%d vs. %d\" % (len(feature_list_dense_shapes),\n num_feature_list_dense))\n if len(feature_list_dense_types) != num_feature_list_dense:\n raise ValueError(\n \"len(feature_list_dense_types) != len(num_feature_list_dense):\"\n \"%d vs. %d\" % (len(feature_list_dense_types), num_feature_list_dense))\n if len(context_sparse_types) != num_context_sparse:\n raise ValueError(\n \"len(context_sparse_types) != len(context_sparse_keys): %d vs. %d\"\n % (len(context_sparse_types), num_context_sparse))\n if len(feature_list_sparse_types) != num_feature_list_sparse:\n raise ValueError(\n \"len(feature_list_sparse_types) != len(feature_list_sparse_keys): \"\n \"%d vs. %d\"\n % (len(feature_list_sparse_types), num_feature_list_sparse))\n if (num_context_dense + num_context_sparse\n + num_feature_list_dense + num_feature_list_sparse) == 0:\n raise ValueError(\n \"Must provide at least one context_sparse key, context_dense key, \"\n \", feature_list_sparse key, or feature_list_dense key\")\n if not set(context_dense_keys).isdisjoint(set(context_sparse_keys)):\n raise ValueError(\n \"context_dense and context_sparse keys must not intersect; \"\n \"intersection: %s\" %\n set(context_dense_keys).intersection(set(context_sparse_keys)))\n if not set(feature_list_dense_keys).isdisjoint(\n set(feature_list_sparse_keys)):\n raise ValueError(\n \"feature_list_dense and feature_list_sparse keys must not intersect; \"\n \"intersection: %s\" %\n set(feature_list_dense_keys).intersection(\n set(feature_list_sparse_keys)))\n if not isinstance(feature_list_dense_defaults, dict):\n raise TypeError(\"feature_list_dense_defaults must be a dict\")\n for k, v in feature_list_dense_defaults.items():\n if v is not None:\n raise ValueError(\"Value feature_list_dense_defaults[%s] must be None\"\n % k)\n feature_list_dense_missing_assumed_empty.append(k)\n\n context_dense_defaults_vec = []\n for i, key in enumerate(context_dense_keys):\n default_value = context_dense_defaults.get(key)\n if default_value is None:\n default_value = constant_op.constant([], dtype=context_dense_types[i])\n elif not isinstance(default_value, ops.Tensor):\n key_name = \"key_\" + re.sub(\"[^A-Za-z0-9_.\\\\-/]\", \"_\", key)\n default_value = ops.convert_to_tensor(\n default_value, dtype=context_dense_types[i], name=key_name)\n default_value = array_ops.reshape(\n default_value, context_dense_shapes[i])\n\n context_dense_defaults_vec.append(default_value)\n\n context_dense_shapes = [tensor_shape.as_shape(shape).as_proto()\n for shape in context_dense_shapes]\n feature_list_dense_shapes = [tensor_shape.as_shape(shape).as_proto()\n for shape in feature_list_dense_shapes]\n\n # pylint: disable=protected-access\n outputs = gen_parsing_ops._parse_single_sequence_example(\n serialized=serialized,\n debug_name=debug_name,\n context_dense_defaults=context_dense_defaults_vec,\n context_sparse_keys=context_sparse_keys,\n context_sparse_types=context_sparse_types,\n context_dense_keys=context_dense_keys,\n context_dense_shapes=context_dense_shapes,\n feature_list_sparse_keys=feature_list_sparse_keys,\n feature_list_sparse_types=feature_list_sparse_types,\n feature_list_dense_keys=feature_list_dense_keys,\n feature_list_dense_types=feature_list_dense_types,\n feature_list_dense_shapes=feature_list_dense_shapes,\n feature_list_dense_missing_assumed_empty=(\n feature_list_dense_missing_assumed_empty),\n name=name)\n # pylint: enable=protected-access\n\n (context_sparse_indices, context_sparse_values,\n context_sparse_shapes, context_dense_values,\n feature_list_sparse_indices, feature_list_sparse_values,\n feature_list_sparse_shapes, feature_list_dense_values) = outputs\n\n context_sparse_tensors = [\n sparse_tensor.SparseTensor(ix, val, shape) for (ix, val, shape)\n in zip(context_sparse_indices,\n context_sparse_values,\n context_sparse_shapes)]\n\n feature_list_sparse_tensors = [\n sparse_tensor.SparseTensor(ix, val, shape) for (ix, val, shape)\n in zip(feature_list_sparse_indices,\n feature_list_sparse_values,\n feature_list_sparse_shapes)]\n\n context_output = dict(\n zip(context_sparse_keys + context_dense_keys,\n context_sparse_tensors + context_dense_values))\n feature_list_output = dict(\n zip(feature_list_sparse_keys + feature_list_dense_keys,\n feature_list_sparse_tensors + feature_list_dense_values))\n\n return (context_output, feature_list_output)\n",
"# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"Library for getting system information during TensorFlow tests.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport shlex\nimport subprocess\nimport tempfile\nimport time\n\nimport tensorflow as tf\n\nfrom tensorflow.core.util import test_log_pb2\nfrom tensorflow.tools.test import system_info_lib\n\n\ndef get_git_commit_sha():\n \"\"\"Get git commit SHA for this build.\n\n Attempt to get the SHA from environment variable GIT_COMMIT, which should\n be available on Jenkins build agents.\n\n Returns:\n SHA hash of the git commit used for the build, if available\n \"\"\"\n\n return os.getenv(\"GIT_COMMIT\")\n\n\ndef process_test_logs(\n name, test_name, test_args, start_time, run_time, log_files):\n \"\"\"Gather test information and put it in a TestResults proto.\n\n Args:\n name: Benchmark target identifier.\n test_name: A unique bazel target, e.g. \"//path/to:test\"\n test_args: A string containing all arguments to run the target with.\n\n start_time: Test starting time (epoch)\n run_time: Wall time that the test ran for\n log_files: Paths to the log files\n\n Returns:\n A TestResults proto\n \"\"\"\n\n results = test_log_pb2.TestResults()\n results.name = name\n results.target = test_name\n results.start_time = start_time\n results.run_time = run_time\n\n # Gather source code information\n git_sha = get_git_commit_sha()\n if git_sha:\n results.commit_id.hash = git_sha\n\n results.entries.CopyFrom(process_benchmarks(log_files))\n results.run_configuration.argument.extend(test_args)\n results.machine_configuration.CopyFrom(\n system_info_lib.gather_machine_configuration())\n return results\n\n\ndef process_benchmarks(log_files):\n benchmarks = test_log_pb2.BenchmarkEntries()\n for f in log_files:\n content = tf.gfile.GFile(f, \"rb\").read()\n if benchmarks.MergeFromString(content) != len(content):\n raise Exception(\"Failed parsing benchmark entry from %s\" % f)\n return benchmarks\n\n\ndef run_and_gather_logs(name, test_name, test_args):\n \"\"\"Run the bazel test given by test_name. Gather and return the logs.\n\n Args:\n name: Benchmark target identifier.\n test_name: A unique bazel target, e.g. \"//path/to:test\"\n test_args: A string containing all arguments to run the target with.\n\n Returns:\n A tuple (test_results, mangled_test_name), where\n test_results: A test_log_pb2.TestResults proto\n mangled_test_name: A string, the mangled test name.\n\n Raises:\n ValueError: If the test_name is not a valid target.\n subprocess.CalledProcessError: If the target itself fails.\n IOError: If there are problems gathering test log output from the test.\n \"\"\"\n if not (test_name\n and test_name.startswith(\"//\")\n and \"..\" not in test_name\n and not test_name.endswith(\":\")\n and not test_name.endswith(\":all\")\n and not test_name.endswith(\"...\")\n and len(test_name.split(\":\")) == 2):\n raise ValueError(\"Expected test_name parameter with a unique test, e.g.: \"\n \"--test_name=//path/to:test\")\n test_executable = test_name.rstrip().strip(\"/\").replace(\":\", \"/\")\n\n if tf.gfile.Exists(os.path.join(\"bazel-bin\", test_executable)):\n # Running in standalone mode from core of the repository\n test_executable = os.path.join(\"bazel-bin\", test_executable)\n else:\n # Hopefully running in sandboxed mode\n test_executable = os.path.join(\".\", test_executable)\n\n temp_directory = tempfile.mkdtemp(prefix=\"run_and_gather_logs\")\n mangled_test_name = test_name.strip(\"/\").replace(\"/\", \"_\").replace(\":\", \"_\")\n test_file_prefix = os.path.join(temp_directory, mangled_test_name)\n test_file_prefix = \"%s.\" % test_file_prefix\n\n try:\n if not tf.gfile.Exists(test_executable):\n raise ValueError(\"Executable does not exist: %s\" % test_executable)\n test_args = shlex.split(test_args)\n\n # This key is defined in tf/core/util/reporter.h as\n # TestReporter::kTestReporterEnv.\n os.environ[\"TEST_REPORT_FILE_PREFIX\"] = test_file_prefix\n start_time = time.time()\n subprocess.check_call([test_executable] + test_args)\n run_time = time.time() - start_time\n log_files = tf.gfile.Glob(\"{}*\".format(test_file_prefix))\n\n return (process_test_logs(name, test_name, test_args,\n start_time=int(start_time),\n run_time=run_time, log_files=log_files),\n mangled_test_name)\n\n finally:\n try:\n tf.gfile.DeleteRecursively(temp_directory)\n except OSError:\n pass\n"
] | [
[
"tensorflow.contrib.seq2seq.simple_decoder_fn_inference",
"tensorflow.constant_initializer",
"tensorflow.nn.rnn_cell.GRUCell",
"tensorflow.global_variables_initializer",
"tensorflow.contrib.seq2seq.dynamic_rnn_decoder",
"tensorflow.variable_scope",
"tensorflow.random_normal_initializer",
"tensorflow.contrib.seq2seq.simple_decoder_fn_train",
"tensorflow.constant",
"tensorflow.contrib.layers.linear",
"tensorflow.test.main"
],
[
"tensorflow.device",
"tensorflow.train.ClusterSpec",
"tensorflow.train.replica_device_setter",
"tensorflow.Variable",
"tensorflow.test.main"
],
[
"tensorflow.python.ops.gen_parsing_ops._parse_example",
"tensorflow.python.ops.array_ops.slice",
"tensorflow.python.ops.array_ops.squeeze",
"tensorflow.python.ops.array_ops.rank",
"tensorflow.python.framework.ops.NotDifferentiable",
"tensorflow.python.ops.array_ops.expand_dims",
"tensorflow.python.framework.ops.name_scope",
"tensorflow.python.ops.gen_parsing_ops._parse_single_sequence_example",
"tensorflow.python.framework.tensor_shape.as_shape",
"tensorflow.python.ops.array_ops.reshape",
"tensorflow.python.framework.sparse_tensor.SparseTensor",
"tensorflow.python.framework.ops.convert_to_tensor",
"tensorflow.python.framework.constant_op.constant",
"tensorflow.python.ops.sparse_ops.sparse_merge"
],
[
"tensorflow.tools.test.system_info_lib.gather_machine_configuration",
"tensorflow.gfile.GFile",
"tensorflow.core.util.test_log_pb2.BenchmarkEntries",
"tensorflow.core.util.test_log_pb2.TestResults",
"tensorflow.gfile.Exists",
"tensorflow.gfile.DeleteRecursively"
]
] |
ReyhaneAskari/SLA_violation_classification | [
"258a3c415cebcd04601e4d794d42d664471df668",
"258a3c415cebcd04601e4d794d42d664471df668"
] | [
"3_create_database/scripts/createDB2.py",
"4_simple_models/scripts/random_forest_SMOTE_bordeline_1.py"
] | [
"# -*- coding: utf-8 -*-\n\n# In this script we find all the history behind an evicted task that has not been finished/killed/failed/lost.\n# We find how many times it has been submitted and what were the events related to this task. \n# The violatedtasks dictionary is the one that is we are looking for. It has only one entry\n# for each (jobid, taskindex) and the rest is stored in a multidimensional array.\n# This database is a test as it is only using 1/500 of the task_events table. \n# Also the not_finished_evictedtasks is only for the first(1/500) part of the tasks_events table.\n# Since the script assumed that the machine state changes in the machine table when a task is added, \n# it is not getting the right results. The problem is that the machines table is only updated when a\n# machine is added so it is just the events of the machines, in order to find the available cpu, \n# memory and disk of a specific machine at the time that a task is assigned to a machine, \n# we need to have more complicated calculations.(refer to createDB3) \n\n# @author: reyhane_askari\n# Universite de Montreal, Dec 2015\n\nfrom os import chdir, listdir\nfrom pandas import read_csv\nfrom os import path\nfrom random import randint, sample, seed\nfrom collections import OrderedDict\nfrom pandas import DataFrame\nimport numpy as np \nimport matplotlib.pyplot as plt\nimport csv\nimport codecs\n\nchdir('/home/askrey/Dropbox/Project_step_by_step/2_find_violations/csvs')\ntask_events_csv_colnames = ['time', 'missing', 'job_id', 'task_idx', 'machine_id', 'event_type', 'user', 'sched_cls', \n 'priority', 'cpu_requested', 'mem_requested', 'disk', 'restriction'] \nevictedtasks = OrderedDict([])\nviolatedtasks = OrderedDict([])\nfor key, val in csv.reader(open(\"not_finished_evictedtasks.csv\")):\n evictedtasks[key] = val\n\nmachines_dictionary = OrderedDict([])\n\n#load machine events table:\nchdir('/home/askrey/Final_project') \nreader = csv.reader(codecs.open('part-00000-of-00001.csv','rU','utf-8'))\n\n# key of machines_dictionary is the primary fields of the machine events table (time, machine id) \n# other fields: event type, platform id, CUPs, memory\n\nfor row in reader:\n machines_dictionary[(row[0],row[1])] = row[2:]\n\n#for fn in sorted(listdir('task_events')):\nfp = path.join('task_events',sorted(listdir('task_events'))[0])\ntask_events_df = read_csv(fp, header = None, index_col = False, names = task_events_csv_colnames, compression = 'gzip')\n\nfor index, event in task_events_df.iterrows():\n \n if (event['job_id'], event['task_idx']) in violatedtasks:\n violatedtasks[event['job_id'],event['task_idx']][0].append(event['time'])\n violatedtasks[event['job_id'],event['task_idx']][2].append(event['machine_id'])\n violatedtasks[event['job_id'],event['task_idx']][3].append(event['event_type'])\n violatedtasks[event['job_id'],event['task_idx']][11].append((machines_dictionary[(str(event['time']),str(event['machine_id']))] if (str(event['time']), str(event['machine_id'])) in machines_dictionary else 0))\n\n elif (\"(\"+str(event['job_id'])+ \", \"+ str(event['task_idx'])+\")\") in evictedtasks:\n violatedtasks[event['job_id'],event['task_idx']] = [[event['time']],event['missing'],[event['machine_id']],\n\t\t\t\t\t\t\t [event['event_type']], event['user'], event['sched_cls'], event['priority'], event['cpu_requested'],\n\t\t\t\t\t\t\t event['mem_requested'], event['disk'], event['restriction'], \n\t\t\t\t\t\t\t [(machines_dictionary[(str(event['time']),str(event['machine_id']))] if (str(event['time']), str(event['machine_id'])) in machines_dictionary else 0 )]]\n\n# moshkel alan in hast k ye jahaE event ha hanooz machine barashoon assign nashode vase hamin hast k machine id nan hast\n\nwriter = csv.writer(open('/home/askrey/Dropbox/Databases/testDB5.csv', 'wb'))\nfor key, value in violatedtasks.items():\n writer.writerow([key, value])\n",
"# -*- coding: utf-8 -*-\n\n# In this script we use a simple classifer called naive bayes and try to predict the violations. But before that we use\n# some methods to tackle the problem of our skewed dataset. :) \n\n# 11 May 2016\n# @author: reyhane_askari\n# Universite de Montreal, DIRO\n\nimport csv\nimport numpy as np\nfrom sklearn.metrics import roc_curve, auc\nfrom sklearn.cross_validation import train_test_split\nfrom sklearn import metrics\nimport pandas as pd\nfrom os import chdir, listdir\nfrom pandas import read_csv\nfrom os import path\nfrom random import randint, sample, seed\nfrom collections import OrderedDict\nfrom pandas import DataFrame, Series\nimport numpy as np \nimport csv\nimport codecs\n\nimport matplotlib\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\nimport matplotlib as mpl\nimport seaborn as sns\nsns.set()\nimport itertools\nfrom sklearn.decomposition import PCA\nfrom unbalanced_dataset import UnderSampler, NearMiss, CondensedNearestNeighbour, OneSidedSelection,\\\nNeighbourhoodCleaningRule, TomekLinks, ClusterCentroids, OverSampler, SMOTE,\\\nSMOTETomek, SMOTEENN, EasyEnsemble, BalanceCascade\n\nalmost_black = '#262626'\n\ncolnames = ['old_index','job_id', 'task_idx','sched_cls', 'priority', 'cpu_requested',\n 'mem_requested', 'disk', 'violation'] \n\ntain_path = r'/home/askrey/Dropbox/Project_step_by_step/3_create_database/csvs/frull_db_2.csv'\n\nX = pd.read_csv(tain_path, header = None, index_col = False ,names = colnames, skiprows = [0], usecols = [3,4,5,6,7])\ny = pd.read_csv(tain_path, header = None, index_col = False ,names = colnames, skiprows = [0], usecols = [8])\ny = y['violation'].values\n# X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.333, random_state=0)\nmain_x = X.values\nmain_y = y\n\nverbose = False\nratio = float(np.count_nonzero(y==1)) / float(np.count_nonzero(y==0))\n# 'SMOTE bordeline 1'\nbsmote1 = SMOTE(ratio=ratio, verbose=verbose, kind='borderline1')\nx, y = bsmote1.fit_transform(main_x, main_y)\n\nratio = float(np.count_nonzero(y==1)) / float(np.count_nonzero(y==0))\nX_train, X_test, y_train, y_test = train_test_split(x, y, test_size=.333, random_state=0)\n\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.cross_validation import cross_val_score\n\nclf = RandomForestClassifier(n_estimators=10)\nscores = cross_val_score(clf, X_test, y_test)\n\ny_pred = clf.fit(X_train, y_train).predict(X_test)\ny_score = clf.fit(X_train, y_train).predict_proba(X_test)[:,1]\n\n\nmean_accuracy = clf.fit(X_train, y_train).score(X_test,y_test,sample_weight=None)\nfpr, tpr, thresholds = metrics.roc_curve(y_test, y_score)\n\nroc_auc = auc(fpr, tpr)\n\nplt.plot(fpr, tpr, label='ROC curve (area = %0.2f)' % roc_auc)\nplt.plot([0, 1], [0, 1], 'k--')\nplt.xlim([0.0, 1.0])\nplt.ylim([0.0, 1.05])\nplt.xlabel('False Positive Rate')\nplt.ylabel('True Positive Rate')\nplt.title('Receiver operating characteristic example')\nplt.legend(loc=\"lower right\")\n\nplt.savefig('/home/askrey/Dropbox/Project_step_by_step/5_simple_models/new_scripts/random_forest_SMOTE_bordeline_1.pdf')\n"
] | [
[
"pandas.read_csv"
],
[
"matplotlib.pyplot.legend",
"pandas.read_csv",
"sklearn.metrics.roc_curve",
"sklearn.metrics.auc",
"matplotlib.pyplot.savefig",
"sklearn.cross_validation.train_test_split",
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.title",
"numpy.count_nonzero",
"sklearn.cross_validation.cross_val_score",
"matplotlib.pyplot.ylabel",
"sklearn.ensemble.RandomForestClassifier",
"matplotlib.pyplot.ylim",
"matplotlib.use",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.xlabel"
]
] |
dg4271/haystack | [
"e930d8a717dfca0c7d502f331d7076b51b5f6898"
] | [
"test/benchmarks/retriever.py"
] | [
"import pandas as pd\nfrom pathlib import Path\nfrom time import perf_counter\nfrom utils import get_document_store, get_retriever, index_to_doc_store, load_config\nfrom haystack.preprocessor.utils import eval_data_from_json\nfrom haystack.document_store.faiss import FAISSDocumentStore\n\nfrom haystack import Document\nimport pickle\nimport time\nfrom tqdm import tqdm\nimport logging\nimport datetime\nimport random\nimport traceback\nimport os\nimport requests\nfrom farm.file_utils import download_from_s3\nimport json\nfrom results_to_json import retriever as retriever_json\nfrom templates import RETRIEVER_TEMPLATE, RETRIEVER_MAP_TEMPLATE, RETRIEVER_SPEED_TEMPLATE\n\nlogger = logging.getLogger(__name__)\nlogging.getLogger(\"haystack.retriever.base\").setLevel(logging.WARN)\nlogging.getLogger(\"elasticsearch\").setLevel(logging.WARN)\n\ndoc_index = \"eval_document\"\nlabel_index = \"label\"\n\nindex_results_file = \"retriever_index_results.csv\"\nquery_results_file = \"retriever_query_results.csv\"\n\noverview_json = \"../../docs/_src/benchmarks/retriever_performance.json\"\nmap_json = \"../../docs/_src/benchmarks/retriever_map.json\"\nspeed_json = \"../../docs/_src/benchmarks/retriever_speed.json\"\n\n\nseed = 42\nrandom.seed(42)\n\ndef benchmark_indexing(n_docs_options, retriever_doc_stores, data_dir, filename_gold, filename_negative, data_s3_url, embeddings_filenames, embeddings_dir, update_json, save_markdown, **kwargs):\n\n retriever_results = []\n for n_docs in n_docs_options:\n for retriever_name, doc_store_name in retriever_doc_stores:\n logger.info(f\"##### Start indexing run: {retriever_name}, {doc_store_name}, {n_docs} docs ##### \")\n try:\n doc_store = get_document_store(doc_store_name)\n retriever = get_retriever(retriever_name, doc_store)\n docs, _ = prepare_data(data_dir=data_dir,\n filename_gold=filename_gold,\n filename_negative=filename_negative,\n data_s3_url=data_s3_url,\n embeddings_filenames=embeddings_filenames,\n embeddings_dir=embeddings_dir,\n n_docs=n_docs)\n\n tic = perf_counter()\n index_to_doc_store(doc_store, docs, retriever)\n toc = perf_counter()\n indexing_time = toc - tic\n\n print(indexing_time)\n\n retriever_results.append({\n \"retriever\": retriever_name,\n \"doc_store\": doc_store_name,\n \"n_docs\": n_docs,\n \"indexing_time\": indexing_time,\n \"docs_per_second\": n_docs / indexing_time,\n \"date_time\": datetime.datetime.now(),\n \"error\": None})\n retriever_df = pd.DataFrame.from_records(retriever_results)\n retriever_df = retriever_df.sort_values(by=\"retriever\").sort_values(by=\"doc_store\")\n retriever_df.to_csv(index_results_file)\n logger.info(\"Deleting all docs from this run ...\")\n\n if isinstance(doc_store, FAISSDocumentStore):\n doc_store.session.close()\n else:\n doc_store.delete_all_documents(index=doc_index)\n doc_store.delete_all_documents(index=label_index)\n\n if save_markdown:\n md_file = index_results_file.replace(\".csv\", \".md\")\n with open(md_file, \"w\") as f:\n f.write(str(retriever_df.to_markdown()))\n time.sleep(10)\n del doc_store\n del retriever\n\n except Exception:\n tb = traceback.format_exc()\n logging.error(f\"##### The following Error was raised while running indexing run: {retriever_name}, {doc_store_name}, {n_docs} docs #####\")\n logging.error(tb)\n retriever_results.append({\n \"retriever\": retriever_name,\n \"doc_store\": doc_store_name,\n \"n_docs\": n_docs,\n \"indexing_time\": 0,\n \"docs_per_second\": 0,\n \"date_time\": datetime.datetime.now(),\n \"error\": str(tb)})\n logger.info(\"Deleting all docs from this run ...\")\n if isinstance(doc_store, FAISSDocumentStore):\n doc_store.session.close()\n else:\n doc_store.delete_all_documents(index=doc_index)\n doc_store.delete_all_documents(index=label_index)\n time.sleep(10)\n del doc_store\n del retriever\n if update_json:\n populate_retriever_json()\n\n\n\ndef benchmark_querying(n_docs_options,\n retriever_doc_stores,\n data_dir,\n data_s3_url,\n filename_gold,\n filename_negative,\n n_queries,\n embeddings_filenames,\n embeddings_dir,\n update_json,\n save_markdown,\n **kwargs):\n \"\"\" Benchmark the time it takes to perform querying. Doc embeddings are loaded from file.\"\"\"\n retriever_results = []\n\n for n_docs in n_docs_options:\n for retriever_name, doc_store_name in retriever_doc_stores:\n try:\n logger.info(f\"##### Start querying run: {retriever_name}, {doc_store_name}, {n_docs} docs ##### \")\n if retriever_name == \"elastic\":\n similarity = \"cosine\"\n else:\n similarity = \"dot_product\"\n doc_store = get_document_store(doc_store_name, similarity=similarity)\n retriever = get_retriever(retriever_name, doc_store)\n add_precomputed = retriever_name in [\"dpr\"]\n # For DPR, precomputed embeddings are loaded from file\n docs, labels = prepare_data(data_dir=data_dir,\n filename_gold=filename_gold,\n filename_negative=filename_negative,\n data_s3_url=data_s3_url,\n embeddings_filenames=embeddings_filenames,\n embeddings_dir=embeddings_dir,\n n_docs=n_docs,\n n_queries=n_queries,\n add_precomputed=add_precomputed)\n logger.info(\"Start indexing...\")\n index_to_doc_store(doc_store, docs, retriever, labels)\n logger.info(\"Start queries...\")\n\n raw_results = retriever.eval()\n results = {\n \"retriever\": retriever_name,\n \"doc_store\": doc_store_name,\n \"n_docs\": n_docs,\n \"n_queries\": raw_results[\"n_questions\"],\n \"retrieve_time\": raw_results[\"retrieve_time\"],\n \"queries_per_second\": raw_results[\"n_questions\"] / raw_results[\"retrieve_time\"],\n \"seconds_per_query\": raw_results[\"retrieve_time\"]/ raw_results[\"n_questions\"],\n \"recall\": raw_results[\"recall\"] * 100,\n \"map\": raw_results[\"map\"] * 100,\n \"top_k\": raw_results[\"top_k\"],\n \"date_time\": datetime.datetime.now(),\n \"error\": None\n }\n\n logger.info(\"Deleting all docs from this run ...\")\n if isinstance(doc_store, FAISSDocumentStore):\n doc_store.session.close()\n else:\n doc_store.delete_all_documents(index=doc_index)\n doc_store.delete_all_documents(index=label_index)\n time.sleep(5)\n del doc_store\n del retriever\n except Exception:\n tb = traceback.format_exc()\n logging.error(f\"##### The following Error was raised while running querying run: {retriever_name}, {doc_store_name}, {n_docs} docs #####\")\n logging.error(tb)\n results = {\n \"retriever\": retriever_name,\n \"doc_store\": doc_store_name,\n \"n_docs\": n_docs,\n \"n_queries\": 0,\n \"retrieve_time\": 0.,\n \"queries_per_second\": 0.,\n \"seconds_per_query\": 0.,\n \"recall\": 0.,\n \"map\": 0.,\n \"top_k\": 0,\n \"date_time\": datetime.datetime.now(),\n \"error\": str(tb)\n }\n logger.info(\"Deleting all docs from this run ...\")\n if isinstance(doc_store, FAISSDocumentStore):\n doc_store.session.close()\n else:\n doc_store.delete_all_documents(index=doc_index)\n doc_store.delete_all_documents(index=label_index)\n time.sleep(5)\n del doc_store\n del retriever\n logger.info(results)\n retriever_results.append(results)\n\n retriever_df = pd.DataFrame.from_records(retriever_results)\n retriever_df = retriever_df.sort_values(by=\"retriever\").sort_values(by=\"doc_store\")\n retriever_df.to_csv(query_results_file)\n if save_markdown:\n md_file = query_results_file.replace(\".csv\", \".md\")\n with open(md_file, \"w\") as f:\n f.write(str(retriever_df.to_markdown()))\n if update_json:\n populate_retriever_json()\n\n\ndef populate_retriever_json():\n retriever_overview_data, retriever_map_data, retriever_speed_data = retriever_json(index_csv=index_results_file,\n query_csv=query_results_file)\n overview = RETRIEVER_TEMPLATE\n overview[\"data\"] = retriever_overview_data\n map = RETRIEVER_MAP_TEMPLATE\n map[\"data\"] = retriever_map_data\n speed = RETRIEVER_SPEED_TEMPLATE\n speed[\"data\"] = retriever_speed_data\n json.dump(overview, open(overview_json, \"w\"), indent=4)\n json.dump(speed, open(speed_json, \"w\"), indent=4)\n json.dump(map, open(map_json, \"w\"), indent=4)\n\n\ndef add_precomputed_embeddings(embeddings_dir, embeddings_filenames, docs):\n ret = []\n id_to_doc = {x.meta[\"passage_id\"]: x for x in docs}\n for ef in embeddings_filenames:\n logger.info(f\"Adding precomputed embeddings from {embeddings_dir + ef}\")\n filename = embeddings_dir + ef\n embeds = pickle.load(open(filename, \"rb\"))\n for i, vec in embeds:\n if int(i) in id_to_doc:\n curr = id_to_doc[int(i)]\n curr.embedding = vec\n ret.append(curr)\n # In the official DPR repo, there are only 20594995 precomputed embeddings for 21015324 wikipedia passages\n # If there isn't an embedding for a given doc, we remove it here\n ret = [x for x in ret if x.embedding is not None]\n logger.info(f\"Embeddings loaded for {len(ret)}/{len(docs)} docs\")\n return ret\n\n\ndef prepare_data(data_dir, filename_gold, filename_negative, data_s3_url, embeddings_filenames, embeddings_dir, n_docs=None, n_queries=None, add_precomputed=False):\n \"\"\"\n filename_gold points to a squad format file.\n filename_negative points to a csv file where the first column is doc_id and second is document text.\n If add_precomputed is True, this fn will look in the embeddings files for precomputed embeddings to add to each Document\n \"\"\"\n\n logging.getLogger(\"farm\").setLevel(logging.INFO)\n download_from_s3(data_s3_url + filename_gold, cache_dir=data_dir)\n download_from_s3(data_s3_url + filename_negative, cache_dir=data_dir)\n if add_precomputed:\n for embedding_filename in embeddings_filenames:\n download_from_s3(data_s3_url + str(embeddings_dir) + embedding_filename, cache_dir=data_dir)\n logging.getLogger(\"farm\").setLevel(logging.WARN)\n\n gold_docs, labels = eval_data_from_json(data_dir + filename_gold)\n\n # Reduce number of docs\n gold_docs = gold_docs[:n_docs]\n\n # Remove labels whose gold docs have been removed\n doc_ids = [x.id for x in gold_docs]\n labels = [x for x in labels if x.document_id in doc_ids]\n\n # Filter labels down to n_queries\n selected_queries = list(set(f\"{x.document_id} | {x.question}\" for x in labels))\n selected_queries = selected_queries[:n_queries]\n labels = [x for x in labels if f\"{x.document_id} | {x.question}\" in selected_queries]\n\n n_neg_docs = max(0, n_docs - len(gold_docs))\n neg_docs = prepare_negative_passages(data_dir, filename_negative, n_neg_docs)\n docs = gold_docs + neg_docs\n\n if add_precomputed:\n docs = add_precomputed_embeddings(data_dir + embeddings_dir, embeddings_filenames, docs)\n\n return docs, labels\n\ndef prepare_negative_passages(data_dir, filename_negative, n_docs):\n if n_docs == 0:\n return []\n with open(data_dir + filename_negative) as f:\n lines = []\n _ = f.readline() # Skip column titles line\n for _ in range(n_docs):\n lines.append(f.readline()[:-1])\n\n docs = []\n for l in lines[:n_docs]:\n id, text, title = l.split(\"\\t\")\n d = {\"text\": text,\n \"meta\": {\"passage_id\": int(id),\n \"title\": title}}\n d = Document(**d)\n docs.append(d)\n return docs\n\n\nif __name__ == \"__main__\":\n params, filenames = load_config(config_filename=\"config.json\", ci=True)\n benchmark_indexing(**params, **filenames, update_json=True, save_markdown=False)\n benchmark_querying(**params, **filenames, update_json=True, save_markdown=False)\n\n"
] | [
[
"pandas.DataFrame.from_records"
]
] |
khawar512/OPVT | [
"690e540e7f54e43751d28a046009993e3e325291"
] | [
"vit_pytorch/do_conv_pytorch.py"
] | [
"# coding=utf-8\n\nimport math\nimport torch\nimport numpy as np\nfrom torch.nn import init\nfrom itertools import repeat\nfrom torch.nn import functional as F\nimport collections.abc as container_abcs\nfrom typing import Optional\nfrom torch.nn.parameter import Parameter\nfrom torch.nn.modules.module import Module\n\n\nclass DOConv2d(Module):\n \"\"\"\n DOConv2d can be used as an alternative for torch.nn.Conv2d.\n The interface is similar to that of Conv2d, with one exception:\n 1. D_mul: the depth multiplier for the over-parameterization.\n Note that the groups parameter switchs between DO-Conv (groups=1),\n DO-DConv (groups=in_channels), DO-GConv (otherwise).\n \"\"\"\n __constants__ = ['stride', 'padding', 'dilation', 'groups',\n 'padding_mode', 'output_padding', 'in_channels',\n 'out_channels', 'kernel_size', 'D_mul']\n __annotations__ = {'bias': Optional[torch.Tensor]}\n\n def __init__(self, in_channels, out_channels, kernel_size, D_mul=None, stride=1,\n padding=0, dilation=1, groups=1, bias=True, padding_mode='zeros'):\n super(DOConv2d, self).__init__()\n\n kernel_size = _pair(kernel_size)\n stride = _pair(stride)\n padding = _pair(padding)\n dilation = _pair(dilation)\n\n if in_channels % groups != 0:\n raise ValueError('in_channels must be divisible by groups')\n if out_channels % groups != 0:\n raise ValueError('out_channels must be divisible by groups')\n valid_padding_modes = {'zeros', 'reflect', 'replicate', 'circular'}\n if padding_mode not in valid_padding_modes:\n raise ValueError(\"padding_mode must be one of {}, but got padding_mode='{}'\".format(\n valid_padding_modes, padding_mode))\n self.in_channels = in_channels\n self.out_channels = out_channels\n self.kernel_size = kernel_size\n self.stride = stride\n self.padding = padding\n self.dilation = dilation\n self.groups = groups\n self.padding_mode = padding_mode\n self._padding_repeated_twice = tuple(x for x in self.padding for _ in range(2))\n\n #################################### Initailization of D & W ###################################\n M = self.kernel_size[0]\n N = self.kernel_size[1]\n self.D_mul = M * N if D_mul is None or M * N <= 1 else D_mul\n self.W = Parameter(torch.Tensor(out_channels, in_channels // groups, self.D_mul))\n init.kaiming_uniform_(self.W, a=math.sqrt(5))\n\n if M * N > 1:\n self.D = Parameter(torch.Tensor(in_channels, M * N, self.D_mul))\n init_zero = np.zeros([in_channels, M * N, self.D_mul], dtype=np.float32)\n self.D.data = torch.from_numpy(init_zero)\n\n eye = torch.reshape(torch.eye(M * N, dtype=torch.float32), (1, M * N, M * N))\n D_diag = eye.repeat((in_channels, 1, self.D_mul // (M * N)))\n if self.D_mul % (M * N) != 0: # the cases when D_mul > M * N\n zeros = torch.zeros([in_channels, M * N, self.D_mul % (M * N)])\n self.D_diag = Parameter(torch.cat([D_diag, zeros], dim=2), requires_grad=False)\n else: # the case when D_mul = M * N\n self.D_diag = Parameter(D_diag, requires_grad=False)\n ##################################################################################################\n\n if bias:\n self.bias = Parameter(torch.Tensor(out_channels))\n fan_in, _ = init._calculate_fan_in_and_fan_out(self.W)\n bound = 1 / math.sqrt(fan_in)\n init.uniform_(self.bias, -bound, bound)\n else:\n self.register_parameter('bias', None)\n\n def extra_repr(self):\n s = ('{in_channels}, {out_channels}, kernel_size={kernel_size}'\n ', stride={stride}')\n if self.padding != (0,) * len(self.padding):\n s += ', padding={padding}'\n if self.dilation != (1,) * len(self.dilation):\n s += ', dilation={dilation}'\n if self.groups != 1:\n s += ', groups={groups}'\n if self.bias is None:\n s += ', bias=False'\n if self.padding_mode != 'zeros':\n s += ', padding_mode={padding_mode}'\n return s.format(**self.__dict__)\n\n def __setstate__(self, state):\n super(DOConv2d, self).__setstate__(state)\n if not hasattr(self, 'padding_mode'):\n self.padding_mode = 'zeros'\n\n def _conv_forward(self, input, weight):\n if self.padding_mode != 'zeros':\n return F.conv2d(F.pad(input, self._padding_repeated_twice, mode=self.padding_mode),\n weight, self.bias, self.stride,\n _pair(0), self.dilation, self.groups)\n return F.conv2d(input, weight, self.bias, self.stride,\n self.padding, self.dilation, self.groups)\n\n def forward(self, input):\n M = self.kernel_size[0]\n N = self.kernel_size[1]\n DoW_shape = (self.out_channels, self.in_channels // self.groups, M, N)\n if M * N > 1:\n ######################### Compute DoW #################\n # (input_channels, D_mul, M * N)\n D = self.D + self.D_diag\n W = torch.reshape(self.W, (self.out_channels // self.groups, self.in_channels, self.D_mul))\n\n # einsum outputs (out_channels // groups, in_channels, M * N),\n # which is reshaped to\n # (out_channels, in_channels // groups, M, N)\n DoW = torch.reshape(torch.einsum('ims,ois->oim', D, W), DoW_shape)\n #######################################################\n else:\n # in this case D_mul == M * N\n # reshape from\n # (out_channels, in_channels // groups, D_mul)\n # to\n # (out_channels, in_channels // groups, M, N)\n DoW = torch.reshape(self.W, DoW_shape)\n return self._conv_forward(input, DoW)\n\n\ndef _ntuple(n):\n def parse(x):\n if isinstance(x, container_abcs.Iterable):\n return x\n return tuple(repeat(x, n))\n\n return parse\n\n\n_pair = _ntuple(2)"
] | [
[
"numpy.zeros",
"torch.nn.init._calculate_fan_in_and_fan_out",
"torch.nn.functional.conv2d",
"torch.nn.functional.pad",
"torch.nn.init.uniform_",
"torch.reshape",
"torch.from_numpy",
"torch.zeros",
"torch.einsum",
"torch.eye",
"torch.cat",
"torch.nn.parameter.Parameter",
"torch.Tensor"
]
] |
gunpowder78/google-research | [
"d41bbaca1eb9bfd980ec2b3fd201c3ddb4d1f2e5",
"d41bbaca1eb9bfd980ec2b3fd201c3ddb4d1f2e5",
"d41bbaca1eb9bfd980ec2b3fd201c3ddb4d1f2e5",
"d41bbaca1eb9bfd980ec2b3fd201c3ddb4d1f2e5",
"d41bbaca1eb9bfd980ec2b3fd201c3ddb4d1f2e5",
"d41bbaca1eb9bfd980ec2b3fd201c3ddb4d1f2e5"
] | [
"unprocessing/process.py",
"tf3d/object_detection/preprocessor_test.py",
"combiner/combiner/tf/approx_attention.py",
"polish/ppo/ppo_model_fn.py",
"scann/configure.py",
"ebp/ebp/common/plot_utils/plot_2d.py"
] | [
"# coding=utf-8\n# Copyright 2022 The Google Research Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Forward processing of raw data to sRGB images.\n\nUnprocessing Images for Learned Raw Denoising\nhttp://timothybrooks.com/tech/unprocessing\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow.compat.v1 as tf\n\n\ndef apply_gains(bayer_images, red_gains, blue_gains):\n \"\"\"Applies white balance gains to a batch of Bayer images.\"\"\"\n bayer_images.shape.assert_is_compatible_with((None, None, None, 4))\n green_gains = tf.ones_like(red_gains)\n gains = tf.stack([red_gains, green_gains, green_gains, blue_gains], axis=-1)\n gains = gains[:, tf.newaxis, tf.newaxis, :]\n return bayer_images * gains\n\n\ndef demosaic(bayer_images):\n \"\"\"Bilinearly demosaics a batch of RGGB Bayer images.\"\"\"\n bayer_images.shape.assert_is_compatible_with((None, None, None, 4))\n\n # This implementation exploits how edges are aligned when upsampling with\n # tf.image.resize_bilinear().\n\n with tf.name_scope(None, 'demosaic'):\n shape = tf.shape(bayer_images)\n shape = [shape[1] * 2, shape[2] * 2]\n\n red = bayer_images[Ellipsis, 0:1]\n red = tf.image.resize_bilinear(red, shape)\n\n green_red = bayer_images[Ellipsis, 1:2]\n green_red = tf.image.flip_left_right(green_red)\n green_red = tf.image.resize_bilinear(green_red, shape)\n green_red = tf.image.flip_left_right(green_red)\n green_red = tf.space_to_depth(green_red, 2)\n\n green_blue = bayer_images[Ellipsis, 2:3]\n green_blue = tf.image.flip_up_down(green_blue)\n green_blue = tf.image.resize_bilinear(green_blue, shape)\n green_blue = tf.image.flip_up_down(green_blue)\n green_blue = tf.space_to_depth(green_blue, 2)\n\n green_at_red = (green_red[Ellipsis, 0] + green_blue[Ellipsis, 0]) / 2\n green_at_green_red = green_red[Ellipsis, 1]\n green_at_green_blue = green_blue[Ellipsis, 2]\n green_at_blue = (green_red[Ellipsis, 3] + green_blue[Ellipsis, 3]) / 2\n\n green_planes = [\n green_at_red, green_at_green_red, green_at_green_blue, green_at_blue\n ]\n green = tf.depth_to_space(tf.stack(green_planes, axis=-1), 2)\n\n blue = bayer_images[Ellipsis, 3:4]\n blue = tf.image.flip_up_down(tf.image.flip_left_right(blue))\n blue = tf.image.resize_bilinear(blue, shape)\n blue = tf.image.flip_up_down(tf.image.flip_left_right(blue))\n\n rgb_images = tf.concat([red, green, blue], axis=-1)\n return rgb_images\n\n\ndef apply_ccms(images, ccms):\n \"\"\"Applies color correction matrices.\"\"\"\n images.shape.assert_has_rank(4)\n images = images[:, :, :, tf.newaxis, :]\n ccms = ccms[:, tf.newaxis, tf.newaxis, :, :]\n return tf.reduce_sum(images * ccms, axis=-1)\n\n\ndef gamma_compression(images, gamma=2.2):\n \"\"\"Converts from linear to gamma space.\"\"\"\n # Clamps to prevent numerical instability of gradients near zero.\n return tf.maximum(images, 1e-8) ** (1.0 / gamma)\n\n\ndef process(bayer_images, red_gains, blue_gains, cam2rgbs):\n \"\"\"Processes a batch of Bayer RGGB images into sRGB images.\"\"\"\n bayer_images.shape.assert_is_compatible_with((None, None, None, 4))\n with tf.name_scope(None, 'process'):\n # White balance.\n bayer_images = apply_gains(bayer_images, red_gains, blue_gains)\n # Demosaic.\n bayer_images = tf.clip_by_value(bayer_images, 0.0, 1.0)\n images = demosaic(bayer_images)\n # Color correction.\n images = apply_ccms(images, cam2rgbs)\n # Gamma compression.\n images = tf.clip_by_value(images, 0.0, 1.0)\n images = gamma_compression(images)\n return images\n",
"# coding=utf-8\n# Copyright 2022 The Google Research Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Tests for ...object_detection.preprocessor.\"\"\"\n\nimport tensorflow as tf\nfrom tf3d import standard_fields\nfrom tf3d.object_detection import preprocessor\n\n\nclass ObjectDetectionPreprocessorTest(tf.test.TestCase):\n\n def _image_correspondence_fn(self, inputs):\n return {\n 'view_images': {\n 'rgb_view':\n tf.cast(\n tf.zeros([5, 200, 300, 3], dtype=tf.int32), dtype=tf.uint8),\n },\n 'view_indices_2d': {\n 'rgb_view':\n tf.random.uniform([5, 100, 2],\n minval=-10,\n maxval=1000,\n dtype=tf.int32)\n }\n }\n\n def _get_input_dict(self, height=240, width=320):\n return {\n standard_fields.InputDataFields.camera_image:\n tf.zeros((height, width, 3), dtype=tf.uint8),\n standard_fields.InputDataFields.point_positions:\n tf.random.uniform((100, 3), minval=-1, maxval=1),\n standard_fields.InputDataFields.camera_intrinsics:\n tf.constant([\n [160.0, 0.0, 160.0], # fx, s, cx\n [0.0, 160.0, 120.0], # 0, fy, cy\n [0.0, 0.0, 1.0], # 0, 0, 1\n ]),\n standard_fields.InputDataFields.camera_rotation_matrix:\n tf.eye(3),\n standard_fields.InputDataFields.camera_translation:\n tf.constant([0., 0., 2.]),\n standard_fields.InputDataFields.objects_class:\n tf.constant([1, 4, 5]),\n standard_fields.InputDataFields.objects_length:\n tf.constant([[4.0], [1.0], [1.0]]),\n standard_fields.InputDataFields.objects_height:\n tf.constant([[2.0], [1.0], [4.0]]),\n standard_fields.InputDataFields.objects_width:\n tf.constant([[2.0], [1.0], [1.0]]),\n standard_fields.InputDataFields.objects_rotation_matrix:\n tf.constant([[[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]],\n [[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]],\n [[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]]]),\n standard_fields.InputDataFields.objects_center:\n tf.constant([[4.0, 4.0, 4.0], [2.5, 2.5, 2.5], [0.5, 1.5, 9.5]]),\n standard_fields.InputDataFields.objects_difficulty:\n tf.constant([[1], [1], [1]]),\n standard_fields.InputDataFields.objects_instance_id:\n tf.constant([[1], [2], [1]]),\n standard_fields.InputDataFields.objects_has_3d_info:\n tf.constant([1, 1, 0]),\n standard_fields.InputDataFields.camera_image_name:\n tf.convert_to_tensor('image', tf.string),\n }\n\n def test_preprocess_output_shapes(self):\n height, width = (240, 320)\n input_dict = self._get_input_dict(height, width)\n object_keys = preprocessor._OBJECT_KEYS\n output_keys = [\n standard_fields.InputDataFields.camera_intrinsics,\n standard_fields.InputDataFields.camera_rotation_matrix,\n standard_fields.InputDataFields.camera_translation,\n standard_fields.InputDataFields.point_positions,\n standard_fields.InputDataFields.num_valid_points,\n standard_fields.InputDataFields.object_class_points,\n standard_fields.InputDataFields.object_center_points,\n standard_fields.InputDataFields.object_height_points,\n standard_fields.InputDataFields.object_width_points,\n standard_fields.InputDataFields.object_rotation_matrix_points,\n standard_fields.InputDataFields.object_length_points,\n standard_fields.InputDataFields.object_instance_id_points,\n ]\n output_dict = preprocessor.preprocess(\n inputs=input_dict,\n images_points_correspondence_fn=self._image_correspondence_fn,\n image_preprocess_fn_dic=None)\n for key in output_keys:\n self.assertIn(key, output_dict)\n self.assertEqual(\n output_dict[standard_fields.InputDataFields.camera_intrinsics].shape,\n (3, 3))\n self.assertEqual(\n output_dict[\n standard_fields.InputDataFields.camera_rotation_matrix].shape,\n (3, 3))\n self.assertEqual(\n output_dict[standard_fields.InputDataFields.camera_translation].shape,\n (3,))\n self.assertEqual(\n output_dict[standard_fields.InputDataFields.point_positions].shape,\n (100, 3))\n self.assertEqual(\n output_dict[standard_fields.InputDataFields.num_valid_points].numpy(),\n 100)\n self.assertEqual(\n output_dict[standard_fields.InputDataFields.object_class_points].shape,\n (100,))\n self.assertEqual(\n output_dict[standard_fields.InputDataFields.object_center_points].shape,\n (100, 3))\n self.assertEqual(\n output_dict[standard_fields.InputDataFields.object_height_points].shape,\n (100, 1))\n self.assertEqual(\n output_dict[standard_fields.InputDataFields.object_width_points].shape,\n (100, 1))\n self.assertEqual(\n output_dict[standard_fields.InputDataFields.object_length_points].shape,\n (100, 1))\n self.assertEqual(\n output_dict[standard_fields.InputDataFields\n .object_rotation_matrix_points].shape, (100, 3, 3))\n self.assertEqual(\n output_dict[\n standard_fields.InputDataFields.object_instance_id_points].shape,\n (100,))\n for key in object_keys:\n self.assertEqual(output_dict[key].shape[0], 2)\n\n def test_preprocess_output_keys(self):\n height, width = (240, 320)\n input_dict = self._get_input_dict(height, width)\n output_dict = preprocessor.preprocess(\n inputs=input_dict,\n images_points_correspondence_fn=self._image_correspondence_fn,\n output_keys=[standard_fields.InputDataFields.camera_image],\n image_preprocess_fn_dic=None)\n self.assertIn(standard_fields.InputDataFields.camera_image, output_dict)\n self.assertEqual(len(output_dict.keys()), 1)\n\n def test_preprocess_missing_input_raises(self):\n with self.assertRaises(ValueError):\n empty_input = {}\n preprocessor.preprocess(inputs=empty_input)\n\n\nif __name__ == '__main__':\n tf.test.main()\n",
"# coding=utf-8\n# Copyright 2022 The Google Research Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# pylint: skip-file\nimport tensorflow.compat.v1 as tf\nimport math\nfrom combiner.tf import attention\nfrom combiner.tf import ops\nimport functools\n\n\ndef shift_right(x, axis):\n \"\"\"Shift input x to the right along given axis.\"\"\"\n pad_widths = [(0, 0)] * len(x.shape)\n pad_widths[axis] = (1, 0)\n padded = tf.pad(x, pad_widths)\n return tf.slice(padded, begin=[0]*len(x.shape), size=x.shape)\n\n\ndef shift_left(x, axis):\n \"\"\"Shift input x to the left along given axis.\"\"\"\n pad_widths = [(0, 0)] * len(x.shape)\n pad_widths[axis] = (0, 1)\n padded = tf.pad(x, pad_widths)\n begin = [0]*len(x.shape)\n begin[axis] = 1\n return tf.slice(padded, begin=begin, size=x.shape)\n\n\ndef approx_cummax(x, axis, exclusive=False, reverse=False):\n \"\"\"Approximate the cummax operation in jax.\"\"\"\n sum_x = tf.math.cumsum(x, axis, exclusive=exclusive, reverse=reverse)\n # return tf.math.cumsum(tf.nn.relu(x), axis, reverse=reverse)\n return sum_x\n\n\ndef get_causal_mask(x, axis, is_strict, upper=False):\n \"\"\"Get attention mask bias (keep a lower triangle).\n\n Args:\n x: input tensor\n axis: across which dim to make mask\n is_strict: if True, the diagonal will be masked out as well.\n upper: upper or lower triangle\n\n Returns:\n mask: tensor of {0, -1e9} ^ (x.shape[axis], x.shape[axis])\n \"\"\"\n seq_len = tf.shape(x)[axis]\n if is_strict:\n if upper:\n mask = tf.linalg.band_part(\n tf.ones([seq_len, seq_len], dtype=x.dtype),\n num_lower=-1, num_upper=0)\n else:\n mask = tf.linalg.band_part(\n tf.ones([seq_len, seq_len], dtype=x.dtype),\n num_lower=0, num_upper=-1)\n else:\n if upper:\n mask = 1.0 - tf.linalg.band_part(\n tf.ones([seq_len, seq_len], dtype=x.dtype),\n num_lower=0, num_upper=-1)\n else:\n mask = 1.0 - tf.linalg.band_part(\n tf.ones([seq_len, seq_len], dtype=x.dtype),\n num_lower=-1, num_upper=0)\n mask = -1e9 * mask\n return mask\n\n\ndef pooling_summary(x, axis, local_summary, keepdims=False):\n \"\"\"Perform a cheap pooling summary of a span.\n\n Args:\n x: input tensor\n axis: over which axis to summarize\n local_summary: str of format activation-pooling, choose\n from {relu, identity}-{max, sum, mean}\n keepdims: whether to keep the summarized singleton axis.\n\n Returns:\n y: the same shape as x for other axis,\n except y.shape[axis] = 1 if keepdims=True,\n otherwise y.rank = x.rank + 1\n \"\"\"\n act, pool = local_summary.split('-')\n if act == 'relu':\n x = tf.nn.relu(x)\n elif act == 'identity':\n pass\n elif act == 'deepset':\n x = ops.trail_dense(x, x.shape.as_list()[-1], bias=False)\n x = tf.nn.relu(x)\n else:\n raise ValueError('Unsupported activation: %s' % act)\n if pool == 'mean':\n x = tf.math.reduce_mean(x, axis=axis, keepdims=keepdims)\n elif pool == 'max':\n x = tf.math.reduce_max(x, axis=axis, keepdims=keepdims)\n elif pool == 'sum':\n x = tf.math.reduce_sum(x, axis=axis, keepdims=keepdims)\n else:\n raise ValueError('Unsupported pooling: %s' % pool)\n return x\n\n\ndef axial_mixture_unidir(x, config, is_training=True, causal=True):\n \"\"\"Full attention matrix with axial pattern as local and mixture for global summary.\"\"\"\n del is_training\n assert causal\n bsize = x.shape[0]\n query, key, value = attention.get_qkv(x, x, x, hidden_size=config.model_size,\n num_heads=config.num_heads, bias=config.dense_use_bias)\n head_dim = config.model_size // config.num_heads\n assert config.max_seq_len % config.max_seg_len == 0\n num_seg = config.max_seq_len // config.max_seg_len\n cur_query = tf.reshape(query, [bsize,\n num_seg,\n config.max_seg_len,\n config.num_heads,\n head_dim])\n cur_key = tf.reshape(key, cur_query.shape)\n cur_val = tf.reshape(value, cur_query.shape)\n\n col_logit_expr = 'BSUNK,BTUNK->BUNST'\n col_attn_expr = 'BUNST,BTUNK->BSUNK'\n col_strict_mask = get_causal_mask(cur_query,\n axis=1,\n is_strict=True)[tf.newaxis, tf.newaxis, tf.newaxis, :, :]\n row_logit_expr = 'BUSNK,BUTNK->BUNST'\n row_attn_expr = 'BUNST,BUTNK->BUSNK'\n row_mask = get_causal_mask(cur_query,\n axis=2,\n is_strict=False)[tf.newaxis, tf.newaxis, tf.newaxis, :, :]\n col_logits = tf.einsum(col_logit_expr, cur_query, cur_key) + col_strict_mask\n row_logits = tf.einsum(row_logit_expr, cur_query, cur_key) + row_mask\n\n ###################\n col_up2down_query = approx_cummax(cur_query, axis=1)\n col_up2down_key = shift_right(approx_cummax(cur_key, axis=1), axis=1)\n col_mask = get_causal_mask(\n cur_query, axis=1, is_strict=False)[tf.newaxis, tf.newaxis,\n tf.newaxis, :, :]\n col_up2down_logits = tf.einsum(col_logit_expr, col_up2down_query,\n cur_key) + col_mask\n col_up2down_attn_weights = attention.float32_softmax(\n col_up2down_logits, axis=-1)\n col_up2down_summary = tf.einsum(col_attn_expr, col_up2down_attn_weights,\n cur_val)\n col_up2down_summary = shift_right(col_up2down_summary, axis=1)\n\n row_only_myself_mask = tf.eye(tf.shape(cur_query)[2], dtype=cur_query.dtype)[tf.newaxis, tf.newaxis, tf.newaxis, :, :]\n row_without_myself_mask = -1e9 * row_only_myself_mask\n all_maskout = tf.cast(tf.fill(row_without_myself_mask.shape, -1e9), cur_query.dtype)\n row_without_myself_mask = tf.concat([all_maskout] + [row_without_myself_mask] * (cur_query.shape[1] - 1),\n axis=1)\n previous_row_logits = tf.einsum(row_logit_expr, cur_query, col_up2down_key) + row_without_myself_mask\n ###################\n\n row_left2right_query = approx_cummax(cur_query, axis=2)\n row_left2right_key = shift_right(approx_cummax(cur_key, axis=2), axis=2)\n row_left2right_logits = tf.einsum(row_logit_expr, row_left2right_query,\n cur_key) + row_mask\n row_left2right_attn_weights = attention.float32_softmax(\n row_left2right_logits, axis=-1)\n row_left2right_summary = tf.einsum(row_attn_expr, row_left2right_attn_weights,\n cur_val)\n row_left2right_summary = shift_right(row_left2right_summary, axis=2)\n\n all_maskout = tf.cast(tf.fill(col_strict_mask.shape, -1e9), cur_query.dtype)\n col_strict_without_first_mask = tf.concat(\n [all_maskout] + [col_strict_mask] * (cur_query.shape[2] - 1), axis=1)\n top_left_col_logits = tf.einsum(\n col_logit_expr, cur_query,\n row_left2right_key) + col_strict_without_first_mask\n ###################\n\n row_right2left_query = approx_cummax(cur_query, axis=2, reverse=True)\n row_right2left_key = shift_left(\n approx_cummax(cur_key, axis=2, reverse=True), axis=2)\n row_upper_mask = get_causal_mask(\n cur_query, axis=2, is_strict=False, upper=True)[tf.newaxis, tf.newaxis,\n tf.newaxis, :, :]\n row_right2left_logits = tf.einsum(row_logit_expr, row_right2left_query,\n cur_key) + row_upper_mask\n row_right2left_attn_weights = attention.float32_softmax(\n row_right2left_logits, axis=-1)\n row_right2left_summary = tf.einsum(row_attn_expr, row_right2left_attn_weights,\n cur_val)\n row_right2left_summary = shift_left(row_right2left_summary, axis=2)\n col_strict_without_last_mask = tf.concat(\n [col_strict_mask] * (cur_query.shape[2] - 1) + [all_maskout], axis=1)\n top_right_col_logits = tf.einsum(\n col_logit_expr, cur_query,\n row_right2left_key) + col_strict_without_last_mask\n ###################\n\n joint_logits = tf.concat([\n tf.transpose(col_logits, perm=[0, 3, 2, 1, 4]), row_logits,\n previous_row_logits,\n tf.transpose(top_left_col_logits, perm=[0, 3, 2, 1, 4]),\n tf.transpose(top_right_col_logits, perm=[0, 3, 2, 1, 4])\n ],\n axis=-1)\n attn_weights = attention.float32_softmax(joint_logits, axis=-1)\n col_att, row_att, previous_row_att, top_left_col_att, top_right_col_att = tf.split(attn_weights,\n [num_seg,\n config.max_seg_len,\n config.max_seg_len,\n num_seg,\n num_seg], axis=-1)\n col_att = tf.transpose(col_att, [0, 3, 2, 1, 4])\n top_left_col_att = tf.transpose(top_left_col_att, [0, 3, 2, 1, 4])\n top_right_col_att = tf.transpose(top_right_col_att, [0, 3, 2, 1, 4])\n col_merged = tf.einsum(col_attn_expr, col_att, cur_val)\n row_merged = tf.einsum(row_attn_expr, row_att, cur_val)\n previous_row_merged = tf.einsum(row_attn_expr, previous_row_att,\n col_up2down_summary)\n top_left_merged = tf.einsum(col_attn_expr, top_left_col_att,\n row_left2right_summary)\n top_right_merged = tf.einsum(col_attn_expr, top_right_col_att,\n row_right2left_summary)\n\n joint_merged = tf.reshape(\n col_merged + row_merged + previous_row_merged + top_left_merged +\n top_right_merged,\n [bsize, num_seg * config.max_seg_len, config.num_heads, head_dim])\n output = ops.trail_dense(joint_merged, config.model_size, begin_axis=-2)\n return output\n\n\ndef sqrt_fixed_full(x, config, is_training=True, causal=True):\n \"\"\"Full attention matrix with sqrt decomposition.\"\"\"\n bsize = x.shape[0]\n query, key, value = attention.get_qkv(x, x, x, hidden_size=config.model_size,\n num_heads=config.num_heads,\n bias=config.dense_use_bias)\n head_dim = config.model_size // config.num_heads\n assert config.max_seq_len % config.max_seg_len == 0\n num_seg = config.max_seq_len // config.max_seg_len\n cur_query = tf.reshape(query, [-1,\n num_seg,\n config.max_seg_len,\n config.num_heads,\n head_dim])\n with tf.variable_scope('pooling_query'):\n merged_query = pooling_summary(cur_query, axis=2,\n local_summary=config.local_summary,\n keepdims=True)\n cur_key = tf.reshape(key, cur_query.shape)\n cur_val = tf.reshape(value, cur_query.shape)\n span_val = attention.dot_product_attention(merged_query,\n cur_key,\n cur_val,\n is_training=is_training,\n attn_axis=1,\n dropatt=config.dropatt)\n span_val = tf.squeeze(span_val, axis=2)\n with tf.variable_scope('pooling_key'):\n span_key = pooling_summary(cur_key, axis=2,\n local_summary=config.local_summary,\n keepdims=False)\n local_logits = tf.einsum('bsqhd,bskhd->bsqhk', cur_query, cur_key)\n if causal:\n local_mask = get_causal_mask(cur_query, axis=2, is_strict=False)\n local_mask = tf.expand_dims(local_mask, axis=-2)\n local_logits += local_mask\n prev_logits = tf.einsum('bqhd,bkhd->bqhk', query, span_key)\n if causal:\n prev_mask = get_causal_mask(cur_query, axis=1, is_strict=True)\n prev_mask = tf.repeat(prev_mask, [config.max_seg_len] * num_seg, axis=0)\n prev_logits += tf.expand_dims(prev_mask, axis=1)\n joint_logits = tf.concat([tf.reshape(local_logits,\n [bsize, config.max_seq_len,\n config.num_heads, -1]),\n prev_logits], axis=-1)\n attn_weights = attention.float32_softmax(joint_logits, axis=-1)\n local_att, prev_att = tf.split(attn_weights, [config.max_seg_len, num_seg],\n axis=-1)\n if is_training:\n local_att = tf.nn.dropout(local_att, rate=config.dropatt)\n local_att = tf.reshape(local_att, [bsize, num_seg,\n config.max_seg_len,\n config.num_heads,\n config.max_seg_len])\n local_merged = tf.einsum('bsqhk,bskhd->bsqhd', local_att, cur_val)\n prev_merged = tf.einsum('bqhk,bkhd->bqhd', prev_att, span_val)\n joint_merged = prev_merged + tf.reshape(local_merged, prev_merged.shape)\n output = ops.trail_dense(joint_merged, config.model_size, begin_axis=-2)\n return output\n\n\ndef axial_rowmajor(x, config, is_training=True, causal=True):\n \"\"\"Full attention matrix with sqrt decomposition.\"\"\"\n bsize = x.shape[0]\n seq_len = x.shape.as_list()[1]\n head_dim = config.model_size // config.num_heads\n assert seq_len % config.max_seg_len == 0\n num_seg = seq_len // config.max_seg_len\n x_sqr = tf.reshape(x,\n [bsize, num_seg, config.max_seg_len, config.model_size])\n q_row_local, key_row_local, value_row_local = attention.get_qkv(\n x_sqr, x_sqr, x_sqr, hidden_size=config.model_size,\n num_heads=config.num_heads, bias=config.dense_use_bias)\n local_logits = tf.einsum('bsqhd,bskhd->bsqhk', q_row_local, key_row_local)\n row_probs = attention.float32_softmax(local_logits, axis=-1)\n if is_training:\n row_probs = tf.nn.dropout(row_probs, rate=config.dropatt)\n\n row_attn_out = tf.einsum('bsqhk,bskhd->bsqhd', row_probs, value_row_local)\n if config.row_summary == 'none':\n key_row = key_row_local\n elif config.row_summary in ['wsum', 'proj', 'wsum_proj']:\n if 'wsum' in config.row_summary:\n pre_summary = tf.einsum('bsqhk,bskhd->bsqhd', row_probs, key_row_local)\n else:\n pre_summary = row_attn_out\n if 'proj' in config.row_summary:\n with tf.variable_scope('rowmajor_param_post'):\n key_row = ops.trail_dense(pre_summary, config.model_size, begin_axis=-2,\n bias=config.dense_use_bias)\n key_row = ops.postprocess(x_sqr, key_row, config, is_training)\n _, key_row = ops.preprocess(key_row, config)\n key_row = ops.trail_dense(key_row, [config.num_heads, head_dim],\n bias=config.dense_use_bias)\n else:\n key_row = pre_summary\n else:\n raise ValueError('Unknown row summary %s' % config.row_summary)\n if causal:\n local_mask = get_causal_mask(q_row_local, axis=2, is_strict=False)\n local_logits += local_mask[:, tf.newaxis, :]\n\n global_logits = tf.einsum('bqlhd,bklhd->bqlhk', q_row_local, key_row)\n if causal:\n global_mask = get_causal_mask(q_row_local, axis=1, is_strict=True)\n global_logits += global_mask[:, tf.newaxis, tf.newaxis, :]\n # (bsize, num_seg, seg_len, n_head, seg_len + num_seg)\n joint_logits = tf.concat([local_logits, global_logits], axis=-1)\n attn_probs = attention.float32_softmax(joint_logits, axis=-1)\n local_att, global_att = tf.split(attn_probs,\n [config.max_seg_len, num_seg],\n axis=-1)\n if is_training:\n local_att = tf.nn.dropout(local_att, rate=config.dropatt)\n local_merged = tf.einsum('bsqhk,bskhd->bsqhd', local_att, value_row_local)\n global_merged = tf.einsum('bqlhv,bvlhd->bqlhd', global_att, row_attn_out)\n joint_merged = tf.reshape(local_merged + global_merged,\n [bsize, seq_len,\n config.num_heads, head_dim])\n output = ops.trail_dense(joint_merged, config.model_size,\n begin_axis=-2, bias=config.dense_use_bias)\n return output\n\n\ndef axial_mixture_bidir(x, config, is_training=True, causal=False):\n \"\"\"Full attention matrix with axial mixture decomposition.\"\"\"\n assert not causal\n bsize = x.shape[0]\n seq_len = x.shape.as_list()[1]\n head_dim = config.model_size // config.num_heads\n assert seq_len % config.max_seg_len == 0\n num_seg = seq_len // config.max_seg_len\n x_sqr = tf.reshape(x,\n [bsize, num_seg, config.max_seg_len, config.model_size])\n query, key, value = attention.get_qkv(\n x_sqr, x_sqr, x_sqr, hidden_size=config.model_size,\n num_heads=config.num_heads, bias=config.dense_use_bias)\n local_row_logits = tf.einsum('bushd,buthd->bhust', query, key)\n local_col_logits = tf.einsum('bsuhd,btuhd->bhsut', query, key)\n # TODO: add self-mask for local_col_logits\n\n span_attn_fn = functools.partial(attention.dot_product_attention,\n key_heads=key,\n value_heads=value,\n is_training=is_training,\n dropatt=config.dropatt)\n\n # === top-down summary ===\n col_query_topdown = approx_cummax(query, 1, exclusive=True)\n col_key_topdown = approx_cummax(key, 1, exclusive=True)\n col_t2d_mask = get_causal_mask(x_sqr, axis=1, is_strict=True)\n col_t2d_val = span_attn_fn(query_heads=col_query_topdown,\n attn_axis=0,\n attn_bias=col_t2d_mask)\n\n # === bottom-up summary ===\n col_query_bottomup = approx_cummax(query, 1, exclusive=True, reverse=True)\n col_key_bottomup = approx_cummax(key, 1, exclusive=True, reverse=True)\n col_b2t_mask = get_causal_mask(x_sqr, axis=1, is_strict=True, upper=True)\n col_b2t_val = span_attn_fn(query_heads=col_query_bottomup,\n attn_axis=0,\n attn_bias=col_b2t_mask)\n\n # === left2right summary ===\n row_query_left2right = approx_cummax(query, 2, exclusive=True)\n row_key_left2right = approx_cummax(key, 2, exclusive=True)\n row_l2r_mask = get_causal_mask(x_sqr, axis=2, is_strict=True)\n row_l2r_val = span_attn_fn(query_heads=row_query_left2right,\n attn_axis=1,\n attn_bias=row_l2r_mask)\n\n # === right2left summary ===\n row_query_right2left = approx_cummax(query, 2, exclusive=True, reverse=True)\n row_key_right2left = approx_cummax(key, 2, exclusive=True, reverse=True)\n row_r2l_mask = get_causal_mask(x_sqr, axis=2, is_strict=True, upper=True)\n row_r2l_val = span_attn_fn(query_heads=row_query_right2left,\n attn_axis=1,\n attn_bias=row_r2l_mask)\n\n global_t2d_logits = tf.einsum('bushd,buthd->bhust', query, col_key_topdown)\n global_b2t_logits = tf.einsum('bushd,buthd->bhust', query, col_key_bottomup)\n global_l2r_logits = tf.einsum('bsuhd,btuhd->bhsut', query, row_key_left2right)\n global_r2l_logits = tf.einsum('bsuhd,btuhd->bhsut', query, row_key_right2left)\n joint_logits = tf.concat([local_row_logits, local_col_logits,\n global_t2d_logits, global_b2t_logits,\n global_l2r_logits, global_r2l_logits], axis=-1)\n attn_probs = attention.float32_softmax(joint_logits, axis=-1)\n prow, pcol, pt2d, pb2t, pl2r, pr2l = tf.split(\n attn_probs, [config.max_seg_len, num_seg, config.max_seg_len,\n config.max_seg_len, num_seg, num_seg], axis=-1)\n mrow = tf.einsum('bhust,buthd->bushd', prow, value)\n mcol = tf.einsum('bhsut,btuhd->bsuhd', pcol, value)\n mt2d = tf.einsum('bhust,buthd->bushd', pt2d, col_t2d_val)\n mb2t = tf.einsum('bhust,buthd->bushd', pb2t, col_b2t_val)\n ml2r = tf.einsum('bhsut,btuhd->bsuhd', pl2r, row_l2r_val)\n mr2l = tf.einsum('bhsut,btuhd->bsuhd', pr2l, row_r2l_val)\n joint_merged = mrow + mcol + mt2d + mb2t + ml2r + mr2l\n joint_merged = tf.reshape(joint_merged,\n [bsize, seq_len, config.num_heads, head_dim])\n output = ops.trail_dense(joint_merged, config.model_size,\n begin_axis=-2, bias=config.dense_use_bias)\n return output\n",
"# coding=utf-8\n# Copyright 2022 The Google Research Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Input function hook for PPO TF estimator.\n\nFor the PPO algorithm, see https://arxiv.org/abs/1707.06347.\n\"\"\"\nfrom absl import logging\nimport gin\nimport numpy as np\nimport tensorflow.compat.v1 as tf\nfrom polish.ppo import ppo_loss\nfrom polish.utils import distributions\nfrom polish.utils import host_call_fn\nfrom polish.utils import tf_layers\nfrom tensorflow.contrib import tpu as contrib_tpu\n\nlogging.set_verbosity(logging.INFO)\n\n\[email protected]\nclass PpoModelFn(object):\n \"\"\"Main class for model function used in tf.estimator.\n\n Attributes:\n policy_loss: Proximal Policy Optimization (PPO) policy loss.\n value_loss: PPO value loss.\n entropy_loss: PPO entropy loss.\n imitation_kl_divergence: The KL-divergence of action distributions between\n the policy and MCTS.\n total_loss: PPO total loss.\n clipfrac: Fraction of examples in a batch clipped by PPO.\n approxkl: `Approximate` KL divergence between new policy and old policy.\n This is an estimate (approximate) of the KL divergence, since we compute\n the KL divergence using the samples drawn from the new and\n old distributions.\n total_params: Total trainable parameters.\n train_op: Training operation.\n mean_new: Mean of new policy distribution.\n logstd_new: Log standard deviation of new policy distribution.\n mean_old: Mean of old policy distribution.\n logstd_old: Log standard deviation of old policy distribution.\n value_new: state-value from the latest trained state-value network.\n kl_divergence: Kullback-Leibler divergence between new and old policy.\n entropy: Entropy of the new policy.\n global_step: Global step of training.\n policy_ratio: the ratio between new policy and old policy.\n last_iteration_mcts_enable: Track the sampling type (PPO sampling/MCTS) in\n the process of training. That is, whether in the last iteration of\n training, we use PPO sampling (False) or MCTS sampling (True).\n mcts_sampling_enable: If True, it means that the current batch\n is generated by MCTS.\n mean_mse_loss: Mean squared error between the mean value of\n policy distribuiton and the mean value returned by MCTS given a state.\n logstd_mse_loss: Mean squared error between log of standard deviation value\n of the policy distribuiton and the log of standard deviation value\n returned by MCTS given a state.\n \"\"\"\n\n def __init__(\n self,\n env_action_space=2,\n iterations_per_loop=320,\n num_timesteps=1000000,\n max_horizon=2048,\n learning_rate=3e-4,\n use_tpu=False,\n ppo2_enable=True,\n policy_coeff=1.0,\n value_coeff=0.5,\n entropy_coeff=0.0,\n tpu_num_shards=8,\n mse_loss_coeff=0.0,\n warmstart_file=None,\n policy_hidden_layer_size=64,\n value_hidden_layer_size=64):\n \"\"\"Creates a model function for PPO algorithm.\n\n The default values for all the parameters are from PPO paper.\n\n Args:\n env_action_space: The size of environment action space.\n iterations_per_loop: Number of steps to run on TPU before outfeeding\n metrics to the CPU. If the number of iterations in the loop would exceed\n the number of train steps, the loop will exit before reaching\n --iterations_per_loop. The larger this value is, the higher the\n utilization on the TPU.\n num_timesteps: Total number of timesteps. Defines the total number of\n samples taken from the environment during the whole process of training.\n max_horizon: Maximum number of samples taken from the environment before\n starting training.\n learning_rate: Initial learning rate value. Note that the actual learning\n rate is linearly decayed.\n use_tpu: If True, training occurs on TPU.\n ppo2_enable: If True, we use the next version of PPO algorithm, known as\n PPO2. In this version, not only does the probability ratio get clipped,\n but also the clipping is performed on the value loss.\n For more information:\n https://github.com/openai/baselines/tree/master/baselines/ppo2.\n policy_coeff: Policy loss coefficient in the total loss calculation.\n value_coeff: Value loss coefficient in the total loss calculation.\n entropy_coeff: Entropy loss coefficient in the total loss calculation.\n tpu_num_shards: Number of TPU shards.\n mse_loss_coeff: The coefficient for Mean Squared Error (MSE) loss.\n warmstart_file: If not None, we restore the weights for the parameters in\n `newpolicy` scope from this file. `newpolicy` scope contains both\n policy and value network.\n policy_hidden_layer_size: The size of hidden layer in policy network.\n Currently, this value is used for both of the hidden layers.\n value_hidden_layer_size: The size of hidden layer in value network.\n Currently, this value is used for both of the hidden layers.\n \"\"\"\n self.policy_loss = 0\n self.value_loss = 0\n self.entropy_loss = 0\n self.total_loss = 0\n self.clipfrac = 0\n self.approxkl = 0\n self.policy_ratio = 0\n\n self.total_params = None\n self.train_op = None\n\n self.mean_new = None\n self.logstd_new = None\n self.mean_old = None\n self.logstd_old = None\n self.value_new = None\n\n self.kl_divergence = None\n self.entropy = 0\n\n self.global_step = None\n\n self._decayed_learning_rate = None\n\n self._env_action_space = env_action_space\n self._iterations_per_loop = iterations_per_loop\n self._num_timesteps = num_timesteps\n self._max_horizon = max_horizon\n self._learning_rate = learning_rate\n self._use_tpu = use_tpu\n self._ppo2_enable = ppo2_enable\n self._policy_coeff = policy_coeff\n self._value_coeff = value_coeff\n self._entropy_coeff = entropy_coeff\n self._tpu_num_shards = tpu_num_shards\n\n self._mse_loss_coeff = mse_loss_coeff\n self._warmstart_file = warmstart_file\n\n self.last_iteration_mcts_enable = False\n self._mcts_global_step = 0\n\n self._policy_hidden_layer_size = policy_hidden_layer_size\n self._value_hidden_layer_size = value_hidden_layer_size\n\n def __call__(self, features, labels, mode, params):\n return self.model_fn(features, labels, mode, params)\n\n def model_inference_fn_ppo(self, features, prefix):\n \"\"\"Builds just the inference part of the model graph.\n\n Args:\n features: input features tensor.\n prefix: prefix to be added to the network.\n\n Returns:\n (value, var, mean) tuple of tensors.\n \"\"\"\n # Policy Network\n features = tf.layers.flatten(features)\n with tf.variable_scope(prefix + 'policy', reuse=tf.AUTO_REUSE):\n policy_1 = tf.tanh(\n tf_layers.fc(\n tensor_in=features,\n num_hidden=self._policy_hidden_layer_size,\n scope_name='/policy_1',\n init_scale=np.sqrt(2)))\n policy_2 = tf.tanh(\n tf_layers.fc(\n tensor_in=policy_1,\n num_hidden=self._policy_hidden_layer_size,\n scope_name='/policy_2',\n init_scale=np.sqrt(2)))\n mean = tf_layers.fc(\n tensor_in=policy_2,\n num_hidden=self._env_action_space,\n scope_name='/mean',\n init_scale=0.01,\n init_bias=0.0)\n logstd_var = tf.get_variable(\n name=prefix + '_logstd',\n shape=[1, self._env_action_space],\n initializer=tf.zeros_initializer())\n # Evaluate logstd_var and broadcast to have a same shape as mean\n logstd = tf.multiply(logstd_var, 1.0)\n\n value_1 = tf.tanh(\n tf_layers.fc(\n tensor_in=features,\n num_hidden=self._value_hidden_layer_size,\n scope_name='/value_1',\n init_scale=np.sqrt(2)))\n value_2 = tf.tanh(\n tf_layers.fc(\n tensor_in=value_1,\n num_hidden=self._value_hidden_layer_size,\n scope_name='/value_2',\n init_scale=np.sqrt(2)))\n value = tf_layers.fc(\n tensor_in=value_2, num_hidden=1, scope_name='/value')[:, 0]\n\n return value, logstd, mean\n\n def learning_rate_update_true_fn(self):\n \"\"\"The function which is performed if the predicate is true.\n\n The predicate that calls this function is defined in 'update_learning_rate'.\n\n Returns:\n The current global step.\n \"\"\"\n return tf.train.get_global_step()\n\n def learning_rate_update_false_fn(self):\n \"\"\"The function which is performed if the predicate is false.\n\n The predicate that calls this function is defined in 'update_learning_rate'.\n\n Returns:\n A type-casted value of `_mcts_global_step` to int64.\n `_mcts_global_step` is the global step at which MCTS algorithm starts.\n The type casting is necessary as the type of returned tensor in `true_fn`\n is an int.64.\n \"\"\"\n return tf.cast(self._mcts_global_step, tf.int64)\n\n def update_learning_rate(self):\n \"\"\"Update the learning rate with a decaying factor.\n \"\"\"\n self._current_global_step = tf.cond(\n tf.equal(self.mcts_sampling_enable,\n True), lambda: self._mcts_global_step, lambda: 0)\n\n self._current_global_step = tf.cast(self._current_global_step, tf.int64)\n\n update = (tf.train.get_global_step() -\n self._current_global_step) // self._iterations_per_loop + 1\n current_frac = self._num_timesteps // self._max_horizon\n update = tf.cast(update, tf.float32)\n current_frac = tf.cast(current_frac, tf.float32)\n frac = 1.0 - (update - 1.0) / current_frac\n self._decayed_learning_rate = self._learning_rate * frac\n self._mcts_global_step = tf.cond(\n tf.not_equal(self.mcts_sampling_enable,\n self.last_iteration_mcts_enable),\n self.learning_rate_update_true_fn, self.learning_rate_update_false_fn)\n self.last_iteration_mcts_enable = self.mcts_sampling_enable\n\n def build_training_op(self, loss):\n \"\"\"Get training operation.\n\n Args:\n loss: a loss function for training.\n\n Define the optimization operation and perform gradient calculation for both\n TPU/Non-TPU training.\n\n Returns:\n Computed gradient.\n \"\"\"\n adam_optimizer = tf.train.AdamOptimizer(\n learning_rate=self._decayed_learning_rate, epsilon=1e-5)\n if self._use_tpu:\n # If we use TPUs, reduce_mean runs on each chip separately and by default\n # only the loss of the first chip is reported.\n #\n # You can either:\n # - execute this if, which synchronizes the losses\n # across the chips to obtain the full loss on all samples.\n # - or remove this section, gaining some performance and getting the\n # loss only from the first chip.\n # compute gradients perform averaging of the loss\n adam_optimizer = tf.tpu.CrossShardOptimizer(adam_optimizer)\n\n tpu_sum_loss = contrib_tpu.cross_replica_sum(loss / self._tpu_num_shards)\n\n grads_and_vars = adam_optimizer.compute_gradients(tpu_sum_loss,\n self.total_params)\n grads, var = zip(*grads_and_vars)\n sum_grads = []\n sum_vars = []\n for (grad, var) in grads_and_vars:\n if grad is None:\n sum_grads.append(grad)\n sum_vars.append(var)\n else:\n sum_grads.append(\n contrib_tpu.cross_replica_sum(grad) / self._tpu_num_shards)\n sum_vars.append(var)\n # calculate sum of grads\n norm_grads, _ = tf.clip_by_global_norm(sum_grads, 0.5)\n grads_and_vars = list(zip(norm_grads, sum_vars))\n else:\n grads_and_vars = adam_optimizer.compute_gradients(loss,\n self.total_params)\n grads, var = zip(*grads_and_vars)\n norm_grads, _ = tf.clip_by_global_norm(grads, 0.5)\n grads_and_vars = list(zip(norm_grads, var))\n\n return adam_optimizer.apply_gradients(\n grads_and_vars, global_step=tf.train.get_global_step())\n\n def calc_normalized_advantage(self, return_tensor, value_tensor):\n \"\"\"Compute General Advantage Estimation (GAE) and normalize it.\n\n Note that, the advantage calculation-normalization is performed for a batch\n of data.\n\n Args:\n return_tensor: The discounted accumulated reward (return) calculated\n for the given rollout trajectory.\n value_tensor: The value for each state for the given rollout trajectory.\n\n Returns:\n Returns the normalized General Advantage Estimation (GAE).\n \"\"\"\n batch_advantage = return_tensor - value_tensor\n batch_advantage_std = tf.keras.backend.std(batch_advantage)\n batch_advantage_mean = tf.reduce_mean(batch_advantage)\n batch_advantage_norm = (batch_advantage - batch_advantage_mean) / (\n batch_advantage_std + 1e-8)\n return batch_advantage_norm\n\n def create_host_call_fn(self, params):\n \"\"\"Create host call function.\n\n `host_call` function is later called by TPU estimator to\n send some metrics to host for logging.\n\n Args:\n params: A dictionary of hyperparameters passed to the tf.estimator.\n\n Returns:\n A host call function that generates a set of tf summaries.\n \"\"\"\n names_and_tensors = [\n ('Batch_Params/mean_mse_loss', self.mean_mse_loss),\n ('Batch_Params/logstd_mse_loss', self.logstd_mse_loss),\n ('Batch_Params/policy_loss', self.policy_loss),\n ('Batch_Params/mcts_enable', self.mcts_sampling_enable),\n ('Batch_Params/value_loss', self.value_loss),\n ('Batch_Params/policy_entropy', self.entropy_loss),\n ('Batch_Params/imitation_kl_divergence', self.imitation_kl_divergence),\n ('Batch_Params/clip_fraction', self.clipfrac),\n ('Batch_Params/max_ratio', tf.reduce_max(self.policy_ratio)),\n ('Batch_Params/min_ratio', tf.reduce_min(self.policy_ratio)),\n ('Batch_Params/mean_ratio', tf.reduce_mean(self.policy_ratio)),\n ('Batch_Params/approx_kl', self.approxkl),\n ('Learning_Rate/learning_rate', self._decayed_learning_rate),\n ('Learning_Rate/global_step', tf.train.get_global_step())\n ]\n\n return host_call_fn.build_host_call_fn_every_n_global_steps(\n params=params,\n names_and_tensors=names_and_tensors,\n n=self._iterations_per_loop)\n\n def compute_total_loss(self, pd_new, pd_old, value_tensor, return_tensor,\n batch_advantage_norm,\n policy_old_neg_logprob_tensor,\n policy_action_tensor):\n \"\"\"Defines the total loss function.\n\n Args:\n pd_new: The current policy distribution\n (a multivariate normal distribution). This policy distribution gets\n updated in the course of training.\n pd_old: The old policy distribution that we use during sampling the\n trajectory (a multivariate normal distribution).\n value_tensor: The values associated to the rollout trajectory.\n return_tensor: The return values computed for the rollout trajectory.\n batch_advantage_norm: The normalized advantage tensor computed for a\n batch of data. For advantage calculation, we use generalized\n advantage estimation (GAE) formula.\n policy_old_neg_logprob_tensor: The negative log probabilities from the\n policy rollouts.\n policy_action_tensor: The actions from the policy rollouts.\n \"\"\"\n # Policy loss\n ppo_policy_loss_out = ppo_loss.ppo_policy_loss(\n neg_logprobs_old=policy_old_neg_logprob_tensor,\n actions=policy_action_tensor,\n advantages=batch_advantage_norm,\n dist_new=pd_new,\n mcts_sampling=self.mcts_sampling_enable)\n\n (self.policy_loss, self.approxkl, self.clipfrac,\n self.policy_ratio) = ppo_policy_loss_out\n\n # Value Loss\n if self._ppo2_enable:\n self.value_loss = ppo_loss.ppo2_value_loss(\n value_old=value_tensor,\n pred_value=self.value_new,\n returns=return_tensor)\n else:\n self.value_loss = ppo_loss.ppo1_value_loss(\n pred_value=self.value_new, returns=return_tensor)\n\n # MSE loss between mean and standard deviations\n self.mean_mse_loss, self.logstd_mse_loss = ppo_loss.l2_norm_policy_loss(\n policy_mean=self.mean_new,\n policy_logstd=self.logstd_new,\n mcts_mean=self.mean_old,\n mcts_logstd=self.logstd_old)\n\n mcts_dist = distributions.MultiVariateNormalDiag(\n mean=self.mean_old, logstd=self.logstd_old)\n policy_dist = distributions.MultiVariateNormalDiag(\n mean=self.mean_new, logstd=self.logstd_new)\n self.imitation_kl_divergence = tf.reduce_mean(\n policy_dist.kl_divergence(mcts_dist))\n # Calculate KL divergence and entropy of new distribution\n self.kl_divergence = tf.reduce_mean(pd_new.kl_divergence(pd_old))\n self.entropy = pd_new.entropy()\n\n # Calculate entropy loss\n self.entropy_loss = tf.reduce_mean(self.entropy)\n\n # Calulate total loss\n total_loss_ppo = (self._policy_coeff * self.policy_loss) + (\n self._value_coeff * self.value_loss) - (\n self._entropy_coeff * self.entropy_loss)\n\n total_loss_mcts = (self._value_coeff * self.value_loss) + (\n self._mse_loss_coeff *\n (self.imitation_kl_divergence + self.entropy_loss))\n\n self.total_loss = tf.cond(\n tf.equal(self.mcts_sampling_enable,\n True), lambda: total_loss_mcts, lambda: total_loss_ppo)\n\n def model_fn(self, features, labels, mode, params):\n \"\"\"The implementation of PPO algorithm.\n\n Args:\n features: dict from string to tensor with shape\n 'state_tensor': [BATCH_SIZE, env.state_space]\n labels: dict from string to tensor with shape\n 'action_tensor': [BATCH_SIZE, self._env_action_space]\n 'advantage_tensor': [BATCH_SIZE]\n 'returns_tensor': [BATCH_SIZE]\n mode: a tf.estimator.ModeKeys (batchnorm params update for TRAIN only).\n params: (Ignored; needed for compat with TPUEstimator).\n\n Returns:\n tf.estimator.EstimatorSpec with props.\n mode: same as mode arg.\n predictions: dict of tensors\n 'mean': [BATCH_SIZE, self._env_action_space]\n 'logstd': [BATCH_SIZE, self._env_action_space]\n 'value': [BATCH_SIZE]\n 'action': [BATCH_SIZE, self._env_action_space]\n 'neg_logprob': [BATCH_SIZE, self._env_action_space]\n loss: a single value tensor.\n train_op: train op eval_metric_ops return dict of tensors.\n \"\"\"\n\n # Policy network\n network_out = self.model_inference_fn_ppo(features['mcts_features'], 'new')\n self.value_new = network_out[0]\n self.logstd_new = network_out[1]\n self.mean_new = network_out[2]\n\n self.global_step = tf.train.get_or_create_global_step()\n # Sample an action\n pd_new = distributions.MultiVariateNormalDiag(\n mean=self.mean_new, logstd=self.logstd_new)\n action_sample = pd_new.sample()\n action_sample_neg_logprob = pd_new.negative_log_prob(action_sample)\n\n # Used during TF estimator prediction\n if mode == tf.estimator.ModeKeys.PREDICT:\n predictions = {\n 'mean': self.mean_new,\n 'logstd': self.logstd_new,\n 'value': self.value_new,\n 'action': action_sample,\n 'neg_logprob': action_sample_neg_logprob\n }\n pred_estimator = tf.estimator.tpu.TPUEstimatorSpec(\n mode,\n predictions=predictions,\n export_outputs={\n 'ppo_inference':\n tf.estimator.export.PredictOutput({\n 'mean': self.mean_new,\n 'logstd': self.logstd_new,\n 'value': self.value_new,\n 'action': action_sample,\n 'neg_logprob': action_sample_neg_logprob\n })\n })\n return pred_estimator.as_estimator_spec()\n\n # Placeholder\n self.mcts_sampling_enable = tf.reduce_all(labels['mcts_enable_tensor'])\n\n self.mean_old = labels['mean_tensor']\n self.logstd_old = labels['logstd_tensor']\n pd_old = distributions.MultiVariateNormalDiag(\n mean=self.mean_old, logstd=self.logstd_old)\n\n batch_advantage_norm = self.calc_normalized_advantage(\n return_tensor=labels['policy_return_tensor'],\n value_tensor=labels['policy_value_tensor'])\n\n self.compute_total_loss(pd_new, pd_old, labels['value_tensor'],\n labels['return_tensor'], batch_advantage_norm,\n labels['policy_old_neg_logprob_tensor'],\n labels['policy_action_tensor'])\n # Update learning rate\n self.update_learning_rate()\n\n # Build training operation\n self.total_params = tf.trainable_variables(scope='newpolicy')\n\n train_ops = self.build_training_op(self.total_loss)\n\n host_call = self.create_host_call_fn(params)\n\n if mode != tf.estimator.ModeKeys.TRAIN:\n raise ValueError('Estimator mode should be train at this point.')\n\n if mode == tf.estimator.ModeKeys.TRAIN:\n # Setup fine tune scaffold\n # The scaffold here is used to restore the weights from _warmstart_file.\n # If _warmstart_file is None, the training starts from the beginning.\n if self._warmstart_file:\n logging.info('Warmstart')\n def tpu_scaffold():\n # restore all the variables\n tf.init_from_checkpoint(self._warmstart_file,\n {'newpolicy/': 'newpolicy/'})\n return tf.train.Scaffold()\n scaffold_fn = tpu_scaffold\n else:\n scaffold_fn = None\n\n tpu_estimator_spec = tf.estimator.tpu.TPUEstimatorSpec(\n mode=mode,\n loss=self.total_loss,\n train_op=train_ops,\n host_call=host_call,\n scaffold_fn=scaffold_fn)\n if self._use_tpu:\n return tpu_estimator_spec\n else:\n return tpu_estimator_spec.as_estimator_spec()\n",
"# coding=utf-8\n# Copyright 2022 The Google Research Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Copyright 2022 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n# Usage: configure.py [--quiet] [--no-deps]\n#\n# Options:\n# --quiet Give less output.\n# --no-deps Don't install Python dependencies\n\"\"\"Configures ScaNN to be built from source.\"\"\"\n\nfrom __future__ import print_function\n\nimport argparse\nimport logging\nimport os\nimport subprocess\nimport sys\n\n_BAZELRC = \".bazelrc\"\n_BAZEL_QUERY = \".bazel-query.sh\"\n\n\n# Writes variables to bazelrc file\ndef write_to_bazelrc(line):\n with open(_BAZELRC, \"a\") as f:\n f.write(line + \"\\n\")\n\n\ndef write_action_env(var_name, var):\n write_to_bazelrc('build --action_env %s=\"%s\"' % (var_name, str(var)))\n with open(_BAZEL_QUERY, \"a\") as f:\n f.write('{}=\"{}\" '.format(var_name, var))\n\n\ndef get_input(question):\n try:\n return input(question)\n except EOFError:\n return \"\"\n\n\ndef generate_shared_lib_name(namespec):\n \"\"\"Converts the linkflag namespec to the full shared library name.\"\"\"\n # Assume Linux for now\n return namespec[1][3:]\n\n\ndef create_build_configuration():\n \"\"\"Main function to create build configuration.\"\"\"\n print()\n print(\"Configuring ScaNN to be built from source...\")\n\n pip_install_options = [\"--upgrade\"]\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--quiet\", action=\"store_true\", help=\"Give less output.\")\n parser.add_argument(\n \"--no-deps\",\n action=\"store_true\",\n help=\"Do not check and install Python dependencies.\",\n )\n args = parser.parse_args()\n if args.quiet:\n pip_install_options.append(\"--quiet\")\n\n python_path = sys.executable\n with open(\"requirements.txt\") as f:\n required_packages = f.read().splitlines()\n\n print()\n if args.no_deps:\n print(\"> Using pre-installed Tensorflow.\")\n else:\n print(\"> Installing\", required_packages)\n install_cmd = [python_path, \"-m\", \"pip\", \"install\"]\n install_cmd.extend(pip_install_options)\n install_cmd.extend(required_packages)\n subprocess.check_call(install_cmd)\n\n if os.path.isfile(_BAZELRC):\n os.remove(_BAZELRC)\n if os.path.isfile(_BAZEL_QUERY):\n os.remove(_BAZEL_QUERY)\n\n logging.disable(logging.WARNING)\n\n import tensorflow.compat.v2 as tf # pylint: disable=g-import-not-at-top\n\n # pylint: disable=invalid-name\n _TF_CFLAGS = tf.sysconfig.get_compile_flags()\n _TF_LFLAGS = tf.sysconfig.get_link_flags()\n _TF_CXX11_ABI_FLAG = tf.sysconfig.CXX11_ABI_FLAG\n\n _TF_SHARED_LIBRARY_NAME = generate_shared_lib_name(_TF_LFLAGS)\n _TF_HEADER_DIR = _TF_CFLAGS[0][2:]\n _TF_SHARED_LIBRARY_DIR = _TF_LFLAGS[0][2:]\n # pylint: enable=invalid-name\n\n write_action_env(\"TF_HEADER_DIR\", _TF_HEADER_DIR)\n write_action_env(\"TF_SHARED_LIBRARY_DIR\", _TF_SHARED_LIBRARY_DIR)\n write_action_env(\"TF_SHARED_LIBRARY_NAME\", _TF_SHARED_LIBRARY_NAME)\n write_action_env(\"TF_CXX11_ABI_FLAG\", _TF_CXX11_ABI_FLAG)\n\n write_to_bazelrc(\"build --spawn_strategy=standalone\")\n write_to_bazelrc(\"build --strategy=Genrule=standalone\")\n write_to_bazelrc(\"build -c opt\")\n\n print()\n print(\"Build configurations successfully written to\", _BAZELRC)\n print()\n\n with open(_BAZEL_QUERY, \"a\") as f:\n f.write('bazel query \"$@\"')\n\n\nif __name__ == \"__main__\":\n create_build_configuration()\n",
"# coding=utf-8\n# Copyright 2022 The Google Research Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import print_function\nfrom __future__ import division\nfrom __future__ import absolute_import\n\nimport matplotlib\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n\ndef plot_heatmap(pdf_func, out_name, size=3):\n w = 100\n x = np.linspace(-size, size, w)\n y = np.linspace(-size, size, w)\n xx, yy = np.meshgrid(x, y)\n coords = np.stack([xx.flatten(), yy.flatten()]).transpose()\n\n scores = pdf_func(coords)\n a = scores.reshape((w, w))\n\n plt.imshow(a)\n plt.axis('equal')\n plt.axis('off')\n plt.savefig(out_name, bbox_inches='tight')\n plt.close()\n\n\ndef plot_samples(samples, out_name):\n plt.scatter(samples[:, 0], samples[:, 1])\n plt.axis('equal')\n plt.savefig(out_name, bbox_inches='tight')\n plt.close()\n\n\ndef plot_joint(dataset, samples, out_name):\n x = np.max(dataset)\n y = np.max(-dataset)\n z = np.ceil(max((x, y)))\n plt.scatter(dataset[:, 0], dataset[:, 1], c='r', marker='x')\n plt.scatter(samples[:, 0], samples[:, 1], c='b', marker='.')\n plt.legend(['training data', 'ADE sampled'])\n plt.axis('equal')\n plt.xlim(-z, z)\n plt.ylim(-z, z)\n plt.savefig(out_name, bbox_inches='tight')\n plt.close()\n\n fname = out_name.split('/')[-1]\n out_name = '/'.join(out_name.split('/')[:-1]) + '/none-' + fname\n plt.figure(figsize=(8, 8))\n plt.scatter(dataset[:, 0], dataset[:, 1], c='r', marker='x')\n plt.scatter(samples[:, 0], samples[:, 1], c='b', marker='.')\n plt.axis('equal')\n plt.xlim(-z, z)\n plt.ylim(-z, z)\n plt.savefig(out_name, bbox_inches='tight')\n plt.close()\n"
] | [
[
"tensorflow.compat.v1.image.flip_left_right",
"tensorflow.compat.v1.ones_like",
"tensorflow.compat.v1.image.flip_up_down",
"tensorflow.compat.v1.space_to_depth",
"tensorflow.compat.v1.image.resize_bilinear",
"tensorflow.compat.v1.concat",
"tensorflow.compat.v1.maximum",
"tensorflow.compat.v1.reduce_sum",
"tensorflow.compat.v1.shape",
"tensorflow.compat.v1.stack",
"tensorflow.compat.v1.clip_by_value",
"tensorflow.compat.v1.name_scope"
],
[
"tensorflow.zeros",
"tensorflow.eye",
"tensorflow.convert_to_tensor",
"tensorflow.random.uniform",
"tensorflow.constant",
"tensorflow.test.main"
],
[
"tensorflow.compat.v1.math.reduce_mean",
"tensorflow.compat.v1.expand_dims",
"tensorflow.compat.v1.shape",
"tensorflow.compat.v1.repeat",
"tensorflow.compat.v1.nn.relu",
"tensorflow.compat.v1.pad",
"tensorflow.compat.v1.reshape",
"tensorflow.compat.v1.transpose",
"tensorflow.compat.v1.slice",
"tensorflow.compat.v1.math.reduce_sum",
"tensorflow.compat.v1.math.reduce_max",
"tensorflow.compat.v1.ones",
"tensorflow.compat.v1.concat",
"tensorflow.compat.v1.split",
"tensorflow.compat.v1.math.cumsum",
"tensorflow.compat.v1.variable_scope",
"tensorflow.compat.v1.squeeze",
"tensorflow.compat.v1.fill",
"tensorflow.compat.v1.einsum",
"tensorflow.compat.v1.nn.dropout"
],
[
"tensorflow.compat.v1.not_equal",
"tensorflow.compat.v1.zeros_initializer",
"tensorflow.compat.v1.train.AdamOptimizer",
"tensorflow.compat.v1.keras.backend.std",
"tensorflow.compat.v1.layers.flatten",
"tensorflow.compat.v1.multiply",
"tensorflow.compat.v1.equal",
"tensorflow.compat.v1.reduce_max",
"tensorflow.compat.v1.train.Scaffold",
"tensorflow.compat.v1.reduce_mean",
"tensorflow.contrib.tpu.cross_replica_sum",
"tensorflow.compat.v1.cast",
"tensorflow.compat.v1.reduce_all",
"tensorflow.compat.v1.clip_by_global_norm",
"tensorflow.compat.v1.estimator.tpu.TPUEstimatorSpec",
"tensorflow.compat.v1.estimator.export.PredictOutput",
"tensorflow.compat.v1.init_from_checkpoint",
"tensorflow.compat.v1.trainable_variables",
"tensorflow.compat.v1.tpu.CrossShardOptimizer",
"tensorflow.compat.v1.reduce_min",
"tensorflow.compat.v1.train.get_global_step",
"tensorflow.compat.v1.variable_scope",
"numpy.sqrt",
"tensorflow.compat.v1.train.get_or_create_global_step"
],
[
"tensorflow.compat.v2.sysconfig.get_compile_flags",
"tensorflow.compat.v2.sysconfig.get_link_flags"
],
[
"matplotlib.pyplot.legend",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.axis",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.imshow",
"numpy.max",
"matplotlib.pyplot.close",
"matplotlib.pyplot.ylim",
"matplotlib.use",
"numpy.meshgrid",
"numpy.linspace",
"matplotlib.pyplot.scatter"
]
] |
manuelmusngi/machine_learning_algorithms_for_development | [
"f344634f84d8f3a60fbb93892bdaed877855b710"
] | [
"preprocessing_tools.py"
] | [
"# Data Preprocessing Tools for Data Cleaning\n\n# Import libraries\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\n# Importing the dataset\ndataset = pd.read_ ('')\nX = dataset.iloc[:, :-1].values\ny = dataset.iloc[:, -1].values\nprint(X)\nprint(y)\n\n# Taking care of missing data\nfrom sklearn.impute import SimpleImputer\nimputer = SimpleImputer(missing_values=np.nan, strategy='mean')\nimputer.fit(X[:, 1:3])\nX[:, 1:3] = imputer.transform(X[:, 1:3])\nprint(X)\n\n# Encoding categorical data\n\n# Encoding the Independent Variable\nfrom sklearn.compose import ColumnTransformer\nfrom sklearn.preprocessing import OneHotEncoder\nct = ColumnTransformer(transformers=[('encoder', OneHotEncoder(), [0])], remainder='passthrough')\nX = np.array(ct.fit_transform(X))\nprint(X)\n\n# Encoding the Dependent Variable\nfrom sklearn.preprocessing import LabelEncoder\nle = LabelEncoder()\ny = le.fit_transform(y)\nprint(y)\n\n# Splitting the dataset into the Training set and Test set\nfrom sklearn.model_selection import train_test_split\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 1)\nprint(X_train)\nprint(X_test)\nprint(y_train)\nprint(y_test)\n\n# Feature Scaling\nfrom sklearn.preprocessing import StandardScaler\nsc = StandardScaler()\nX_train[:, 3:] = sc.fit_transform(X_train[:, 3:])\nX_test[:, 3:] = sc.transform(X_test[:, 3:])\nprint(X_train)\nprint(X_test)\n"
] | [
[
"sklearn.impute.SimpleImputer",
"sklearn.preprocessing.LabelEncoder",
"sklearn.preprocessing.StandardScaler",
"pandas.read_",
"sklearn.model_selection.train_test_split",
"sklearn.preprocessing.OneHotEncoder"
]
] |
qianrusun1015/Disentangled-Person-Image-Generation | [
"e4703860bb1b351050ce50f339985ff0811f1d64"
] | [
"score_mask.py"
] | [
"from __future__ import print_function\n\nimport os, pdb, sys, glob\n# we need to set GPUno first, otherwise may out of memory\nstage = int(sys.argv[1])\ngpuNO = sys.argv[2]\nmodel_dir = sys.argv[3]\ntest_dir = sys.argv[4]\nos.environ[\"CUDA_DEVICE_ORDER\"]=\"PCI_BUS_ID\"\nos.environ[\"CUDA_VISIBLE_DEVICES\"]=str(gpuNO)\nimport StringIO\nimport scipy.misc\nimport numpy as np\nfrom skimage.measure import compare_ssim as ssim\nfrom skimage.measure import compare_psnr as psnr\nfrom skimage.color import rgb2gray\n# from PIL import Image\nimport scipy.misc\nimport tflib\nimport tflib.inception_score\n\ndef l1_mean_dist(x,y): \n # return np.sum(np.abs(x-y))\n diff = x.astype(float)-y.astype(float)\n return np.sum(np.abs(diff))/np.product(x.shape)\n\ndef l2_mean_dist(x,y): \n # return np.sqrt(np.sum((x-y)**2))\n diff = x.astype(float)-y.astype(float)\n return np.sqrt(np.sum(diff**2))/np.product(x.shape)\n\n# pdb.set_trace()\n\nif 1==stage:\n test_result_dir_x = os.path.join(model_dir, test_dir, 'x_target')\n # test_result_dir_x = os.path.join(model_dir, test_dir, 'x')\n test_result_dir_G = os.path.join(model_dir, test_dir, 'G')\n test_result_dir_mask = os.path.join(model_dir, test_dir, 'mask')\n score_path = os.path.join(model_dir, test_dir, 'score_mask.txt')\n\n types = ('*.jpg', '*.png') # the tuple of file types\n x_files = []\n G_files = []\n mask_files = []\n for files in types:\n x_files.extend(glob.glob(os.path.join(test_result_dir_x, files)))\n G_files.extend(glob.glob(os.path.join(test_result_dir_G, files)))\n mask_files.extend(glob.glob(os.path.join(test_result_dir_mask, files)))\n x_target_list = []\n for path in x_files:\n x_target_list.append(scipy.misc.imread(path))\n G_list = []\n for path in G_files:\n G_list.append(scipy.misc.imread(path))\n mask_target_list = []\n for path in mask_files:\n mask_target_list.append(scipy.misc.imread(path))\n N = len(G_files)\n \n ##################### SSIM ##################\n ssim_G_x = []\n psnr_G_x = []\n L1_mean_G_x = []\n L2_mean_G_x = []\n # x_0_255 = utils_wgan.unprocess_image(x_fixed, 127.5, 127.5)\n x_0_255 = x_target_list\n for i in xrange(N):\n # G_gray = rgb2gray((G_list[i]/127.5-1).clip(min=-1,max=1))\n # x_target_gray = rgb2gray((x_target_list[i]/127.5-1).clip(min=-1,max=1))\n # gray image, [0,1]\n # G_gray = rgb2gray((G_list[i]).clip(min=0,max=255))\n # x_target_gray = rgb2gray((x_target_list[i]).clip(min=0,max=255))\n # ssim_G_x.append(ssim(G_gray, x_target_gray, data_range=x_target_gray.max()-x_target_gray.min(), multichannel=False))\n # psnr_G_x.append(psnr(im_true=x_target_gray, im_test=G_gray, data_range=x_target_gray.max()-x_target_gray.min())) \n # L1_mean_G_x.append(l1_mean_dist(G_gray, x_target_gray))\n # L2_mean_G_x.append(l2_mean_dist(G_gray, x_target_gray))\n \n # color image\n # ssim_G_x.append(ssim(G_list[i], x_target_list[i], multichannel=True))\n masked_G_array = np.uint8(mask_target_list[i][:,:,np.newaxis]/255.*G_list[i])\n masked_x_target_array = np.uint8(mask_target_list[i][:,:,np.newaxis]/255.*x_target_list[i])\n ssim_G_x.append(ssim(masked_G_array, masked_x_target_array, multichannel=True))\n \n psnr_G_x.append(psnr(im_true=masked_x_target_array, im_test=masked_G_array))\n L1_mean_G_x.append(l1_mean_dist(masked_G_array, masked_x_target_array))\n L2_mean_G_x.append(l2_mean_dist(masked_G_array, masked_x_target_array))\n # pdb.set_trace()\n ssim_G_x_mean = np.mean(ssim_G_x)\n ssim_G_x_std = np.std(ssim_G_x)\n psnr_G_x_mean = np.mean(psnr_G_x)\n psnr_G_x_std = np.std(psnr_G_x)\n L1_G_x_mean = np.mean(L1_mean_G_x)\n L1_G_x_std = np.std(L1_mean_G_x)\n L2_G_x_mean = np.mean(L2_mean_G_x)\n L2_G_x_std = np.std(L2_mean_G_x)\n print('ssim_G_x_mean: %f\\n' % ssim_G_x_mean)\n print('ssim_G_x_std: %f\\n' % ssim_G_x_std)\n print('psnr_G_x_mean: %f\\n' % psnr_G_x_mean)\n print('psnr_G_x_std: %f\\n' % psnr_G_x_std)\n print('L1_G_x_mean: %f\\n' % L1_G_x_mean)\n print('L1_G_x_std: %f\\n' % L1_G_x_std)\n print('L2_G_x_mean: %f\\n' % L2_G_x_mean)\n print('L2_G_x_std: %f\\n' % L2_G_x_std)\n\n # ##################### Inception score ##################\n # # IS_G_mean, IS_G_std = tflib.inception_score.get_inception_score(G_list)\n # G_list_masked = [np.uint8(mask_target_list[i][:,:,np.newaxis]/255.*G_list[i]) for i in range(len(G_list))]\n # IS_G_mean, IS_G_std = tflib.inception_score.get_inception_score(G_list_masked)\n # print('IS_G_mean: %f\\n' % IS_G_mean)\n # print('IS_G_std: %f\\n' % IS_G_std)\n\n # with open(score_path, 'w') as f:\n # f.write('Image number: %d\\n' % N)\n # f.write('ssim: %.5f +- %.5f ' % (ssim_G_x_mean, ssim_G_x_std))\n # f.write('IS: %.5f +- %.5f ' % (IS_G_mean, IS_G_std))\n # f.write('psnr: %.5f +- %.5f ' % (psnr_G_x_mean, psnr_G_x_std))\n # f.write('L1: %.5f +- %.5f ' % (L1_G_x_mean, L1_G_x_std))\n # f.write('L2: %.5f +- %.5f' % (L2_G_x_mean, L2_G_x_std))\n\n ## IS of fake data\n G_list_masked = [np.uint8(mask_target_list[i][:,:,np.newaxis]/255.*G_list[i]) for i in range(len(G_list))]\n IS_G_mean, IS_G_std = tflib.inception_score.get_inception_score(G_list_masked)\n print('IS_G_mean: %f\\n' % IS_G_mean)\n print('IS_G_std: %f\\n' % IS_G_std)\n with open(score_path, 'w') as f:\n f.write('Image number: %d\\n' % N)\n f.write('IS: %.5f +- %.5f ' % (IS_G_mean, IS_G_std))\n\n ## IS of real data\n # x_target_list_masked = [np.uint8(mask_target_list[i][:,:,np.newaxis]/255.*x_target_list[i]) for i in range(len(x_target_list))]\n # IS_G_mean, IS_G_std = tflib.inception_score.get_inception_score(x_target_list_masked)\n # print('IS_G_mean: %f\\n' % IS_G_mean)\n # print('IS_G_std: %f\\n' % IS_G_std)\n # with open(score_path+'_x_target', 'w') as f:\n # f.write('Image number: %d\\n' % N)\n # f.write('IS: %.5f +- %.5f ' % (IS_G_mean, IS_G_std))\n \nelif 2==stage:\n test_result_dir_x = os.path.join(model_dir, test_dir, 'x_target')\n test_result_dir_G1 = os.path.join(model_dir, test_dir, 'G1')\n test_result_dir_G2 = os.path.join(model_dir, test_dir, 'G2')\n test_result_dir_mask = os.path.join(model_dir, test_dir, 'mask')\n score_path = os.path.join(model_dir, test_dir, 'score_mask.txt') #\n\n types = ('*.jpg', '*.png') # the tuple of file types\n x_files = []\n G1_files = []\n G2_files = []\n mask_files = []\n for files in types:\n x_files.extend(glob.glob(os.path.join(test_result_dir_x, files)))\n G1_files.extend(glob.glob(os.path.join(test_result_dir_G1, files)))\n G2_files.extend(glob.glob(os.path.join(test_result_dir_G2, files)))\n mask_files.extend(glob.glob(os.path.join(test_result_dir_mask, files))) \n x_target_list = []\n for path in x_files:\n x_target_list.append(scipy.misc.imread(path))\n G1_list = []\n for path in G1_files:\n G1_list.append(scipy.misc.imread(path))\n G2_list = []\n for path in G2_files:\n G2_list.append(scipy.misc.imread(path))\n mask_target_list = []\n for path in mask_files:\n mask_target_list.append(scipy.misc.imread(path))\n\n ##################### SSIM G1 ##################\n N = len(x_files)\n ssim_G_x = []\n psnr_G_x = []\n L1_mean_G_x = []\n L2_mean_G_x = []\n # x_0_255 = utils_wgan.unprocess_image(x_fixed, 127.5, 127.5)\n # x_0_255 = x_target_list\n for i in xrange(N):\n # G1_gray = rgb2gray((G1_list[i]/127.5-1).clip(min=-1,max=1))\n # x_target_gray = rgb2gray((x_target_list[i]/127.5-1).clip(min=-1,max=1))\n # gray image, [0,1]\n # G1_gray = rgb2gray((G1_list[i]).clip(min=0,max=255))\n # x_target_gray = rgb2gray((x_target_list[i]).clip(min=0,max=255))\n # ssim_G_x.append(ssim(G_gray, x_target_gray, data_range=x_target_gray.max()-x_target_gray.min(), multichannel=False))\n # psnr_G_x.append(psnr(im_true=x_target_gray, im_test=G1_gray, data_range=x_target_gray.max()-x_target_gray.min())) \n # L1_mean_G_x.append(l1_mean_dist(G1_gray, x_target_gray))\n # L2_mean_G_x.append(l2_mean_dist(G1_gray, x_target_gray))\n \n # color image\n # ssim_G_x.append(ssim(G1_list[i], x_target_list[i], multichannel=True))\n masked_G1_array = np.uint8(mask_target_list[i][:,:,np.newaxis]/255.*G1_list[i])\n masked_x_target_array = np.uint8(mask_target_list[i][:,:,np.newaxis]/255.*x_target_list[i])\n ssim_G_x.append(ssim(masked_G1_array, masked_x_target_array, multichannel=True))\n \n psnr_G_x.append(psnr(im_true=masked_x_target_array, im_test=masked_G1_array))\n L1_mean_G_x.append(l1_mean_dist(masked_G1_array, masked_x_target_array))\n L2_mean_G_x.append(l2_mean_dist(masked_G1_array, masked_x_target_array))\n # pdb.set_trace()\n ssim_G1_x_mean = np.mean(ssim_G_x)\n ssim_G1_x_std = np.std(ssim_G_x)\n psnr_G1_x_mean = np.mean(psnr_G_x)\n psnr_G1_x_std = np.std(psnr_G_x)\n L1_G1_x_mean = np.mean(L1_mean_G_x)\n L1_G1_x_std = np.std(L1_mean_G_x)\n L2_G1_x_mean = np.mean(L2_mean_G_x)\n L2_G1_x_std = np.std(L2_mean_G_x)\n print('ssim_G1_x_mean: %f\\n' % ssim_G1_x_mean)\n print('ssim_G1_x_std: %f\\n' % ssim_G1_x_std)\n print('psnr_G1_x_mean: %f\\n' % psnr_G1_x_mean)\n print('psnr_G1_x_std: %f\\n' % psnr_G1_x_std)\n print('L1_G1_x_mean: %f\\n' % L1_G1_x_mean)\n print('L1_G1_x_std: %f\\n' % L1_G1_x_std)\n print('L2_G1_x_mean: %f\\n' % L2_G1_x_mean)\n print('L2_G1_x_std: %f\\n' % L2_G1_x_std)\n ##################### SSIM G2 ##################\n N = len(x_files)\n ssim_G_x = []\n psnr_G_x = []\n L1_mean_G_x = []\n L2_mean_G_x = []\n # x_0_255 = utils_wgan.unprocess_image(x_fixed, 127.5, 127.5)\n # x_0_255 = x_target_list\n for i in xrange(N):\n # G2_gray = rgb2gray((G2_list[i]/127.5-1).clip(min=-1,max=1))\n # x_target_gray = rgb2gray((x_target_list[i]/127.5-1).clip(min=-1,max=1))\n # gray image, [0,1]\n # G2_gray = rgb2gray((G2_list[i]).clip(min=0,max=255))\n # x_target_gray = rgb2gray((x_target_list[i]).clip(min=0,max=255))\n # ssim_G_x.append(ssim(G_gray, x_target_gray, data_range=x_target_gray.max()-x_target_gray.min(), multichannel=False))\n # psnr_G_x.append(psnr(im_true=x_target_gray, im_test=G2_gray, data_range=x_target_gray.max()-x_target_gray.min())) \n # L1_mean_G_x.append(l1_mean_dist(G2_gray, x_target_gray))\n # L2_mean_G_x.append(l2_mean_dist(G2_gray, x_target_gray))\n \n # color image\n # ssim_G_x.append(ssim(G2_list[i], x_target_list[i], multichannel=True))\n masked_G2_array = np.uint8(mask_target_list[i][:,:,np.newaxis]/255.*G2_list[i])\n masked_x_target_array = np.uint8(mask_target_list[i][:,:,np.newaxis]/255.*x_target_list[i])\n ssim_G_x.append(ssim(masked_G2_array, masked_x_target_array, multichannel=True))\n \n psnr_G_x.append(psnr(im_true=masked_x_target_array, im_test=masked_G2_array))\n L1_mean_G_x.append(l1_mean_dist(masked_G2_array, masked_x_target_array))\n L2_mean_G_x.append(l2_mean_dist(masked_G2_array, masked_x_target_array))\n # pdb.set_trace()\n ssim_G2_x_mean = np.mean(ssim_G_x)\n ssim_G2_x_std = np.std(ssim_G_x)\n psnr_G2_x_mean = np.mean(psnr_G_x)\n psnr_G2_x_std = np.std(psnr_G_x)\n L1_G2_x_mean = np.mean(L1_mean_G_x)\n L1_G2_x_std = np.std(L1_mean_G_x)\n L2_G2_x_mean = np.mean(L2_mean_G_x)\n L2_G2_x_std = np.std(L2_mean_G_x)\n print('ssim_G2_x_mean: %f\\n' % ssim_G2_x_mean)\n print('ssim_G2_x_std: %f\\n' % ssim_G2_x_std)\n print('psnr_G2_x_mean: %f\\n' % psnr_G2_x_mean)\n print('psnr_G2_x_std: %f\\n' % psnr_G2_x_std)\n print('L1_G2_x_mean: %f\\n' % L1_G2_x_mean)\n print('L1_G2_x_std: %f\\n' % L1_G2_x_std)\n print('L2_G2_x_mean: %f\\n' % L2_G2_x_mean)\n print('L2_G2_x_std: %f\\n' % L2_G2_x_std)\n\n ##################### Inception score ##################\n G1_list_masked = [np.uint8(mask_target_list[i][:,:,np.newaxis]/255.*G1_list[i]) for i in range(len(G1_list))]\n G2_list_masked = [np.uint8(mask_target_list[i][:,:,np.newaxis]/255.*G2_list[i]) for i in range(len(G2_list))]\n # IS_G1_mean, IS_G1_std = tflib.inception_score.get_inception_score(G1_list)\n IS_G1_mean, IS_G1_std = tflib.inception_score.get_inception_score(G1_list_masked)\n print('IS_G1_mean: %f\\n' % IS_G1_mean)\n print('IS_G1_std: %f\\n' % IS_G1_std)\n # IS_G2_mean, IS_G2_std = tflib.inception_score.get_inception_score(G2_list)\n IS_G2_mean, IS_G2_std = tflib.inception_score.get_inception_score(G2_list_masked)\n print('IS_G2_mean: %f\\n' % IS_G2_mean)\n print('IS_G2_std: %f\\n' % IS_G2_std)\n\n with open(score_path, 'w') as f:\n f.write('N: %d ' % N)\n f.write('ssimG1: %.5f +- %.5f ' % (ssim_G1_x_mean, ssim_G1_x_std))\n f.write('ISG1: %.5f +- %.5f ' % (IS_G1_mean, IS_G1_std))\n f.write('psnrG1: %.5f +- %.5f ' % (psnr_G1_x_mean, psnr_G1_x_std))\n f.write('L1G1: %.5f +- %.5f ' % (L1_G1_x_mean, L1_G1_x_std))\n f.write('L2G1: %.5f +- %.5f ' % (L2_G1_x_mean, L2_G1_x_std))\n f.write('ssimG2: %.5f +- %.5f ' % (ssim_G2_x_mean, ssim_G2_x_std))\n f.write('ISG2: %.5f +- %.5f ' % (IS_G2_mean, IS_G2_std))\n f.write('psnrG2: %.5f +- %.5f ' % (psnr_G2_x_mean, psnr_G2_x_std))\n f.write('L1G2: %.5f +- %.5f ' % (L1_G2_x_mean, L1_G2_x_std))\n f.write('L2G2: %.5f +- %.5f' % (L2_G2_x_mean, L2_G2_x_std))\n\n\n\n # f.write('ssim_std: %f ' % ssim_G_x_std)\n # f.write('IS_mean: %f ' % IS_G_mean)\n # f.write('IS_std: %f ' % IS_G_std)\n # f.write('psnr_mean: %f ' % psnr_G_x_mean)\n # f.write('psnr_std: %f' % psnr_G_x_std)\n"
] | [
[
"numpy.sum",
"numpy.abs",
"numpy.product",
"numpy.std",
"numpy.mean",
"numpy.uint8"
]
] |
fogx/docarray | [
"2cb60f893ebcfd29708132e44202ccb20e639c6b"
] | [
"tests/unit/document/test_converters.py"
] | [
"import os\n\nimport numpy as np\nimport pytest\n\nfrom docarray import Document\nfrom docarray.document.generators import from_files\nfrom docarray.helper import __windows__\n\ncur_dir = os.path.dirname(os.path.abspath(__file__))\n\n\ndef test_video_convert_pipe(pytestconfig, tmpdir):\n num_d = 0\n fname = str(tmpdir / f'tmp{num_d}.mp4')\n d = Document(uri=os.path.join(cur_dir, 'toydata/mov_bbb.mp4'))\n d.load_uri_to_video_tensor()\n d.save_video_tensor_to_file(fname)\n assert os.path.exists(fname)\n\n\ndef test_audio_convert_pipe(pytestconfig, tmpdir):\n num_d = 0\n for d in from_files(f'{cur_dir}/toydata/*.wav'):\n fname = str(tmpdir / f'tmp{num_d}.wav')\n d.load_uri_to_audio_tensor()\n d.tensor = d.tensor[::-1]\n d.save_audio_tensor_to_file(fname)\n assert os.path.exists(fname)\n num_d += 1\n assert num_d\n\n\ndef test_image_convert_pipe(pytestconfig):\n for d in from_files(f'{pytestconfig.rootdir}/.github/**/*.png'):\n (\n d.load_uri_to_image_tensor()\n .convert_uri_to_datauri()\n .set_image_tensor_shape((64, 64))\n .set_image_tensor_normalization()\n .set_image_tensor_channel_axis(-1, 0)\n )\n assert d.tensor.shape == (3, 64, 64)\n assert d.uri\n\n\ndef test_uri_to_tensor():\n doc = Document(uri=os.path.join(cur_dir, 'toydata/test.png'))\n doc.load_uri_to_image_tensor()\n assert isinstance(doc.tensor, np.ndarray)\n assert doc.tensor.shape == (85, 152, 3) # h,w,c\n assert doc.mime_type == 'image/png'\n\n\ndef test_datauri_to_tensor():\n doc = Document(uri=os.path.join(cur_dir, 'toydata/test.png'))\n doc.convert_uri_to_datauri()\n assert not doc.tensor\n assert doc.mime_type == 'image/png'\n\n\ndef test_blob_to_tensor():\n doc = Document(uri=os.path.join(cur_dir, 'toydata/test.png'))\n doc.load_uri_to_blob()\n doc.convert_blob_to_image_tensor()\n assert isinstance(doc.tensor, np.ndarray)\n assert doc.mime_type == 'image/png'\n assert doc.tensor.shape == (85, 152, 3) # h,w,c\n\n\ndef test_convert_blob_to_tensor():\n rand_state = np.random.RandomState(0)\n array = rand_state.random([10, 10])\n doc = Document(content=array.tobytes())\n assert doc.content_type == 'blob'\n intialiazed_blob = doc.blob\n\n doc.convert_blob_to_tensor()\n assert doc.content_type == 'tensor'\n converted_blob_in_one_of = doc.blob\n assert intialiazed_blob != converted_blob_in_one_of\n np.testing.assert_almost_equal(doc.content.reshape([10, 10]), array)\n\n\[email protected]('shape, channel_axis', [((3, 32, 32), 0), ((32, 32, 3), -1)])\ndef test_image_normalize(shape, channel_axis):\n doc = Document(content=np.random.randint(0, 255, shape, dtype=np.uint8))\n doc.set_image_tensor_normalization(channel_axis=channel_axis)\n assert doc.tensor.ndim == 3\n assert doc.tensor.shape == shape\n assert doc.tensor.dtype == np.float32\n\n\[email protected](\n 'arr_size, channel_axis, height, width',\n [\n ([32, 28, 3], -1, 32, 28), # h, w, c (rgb)\n ([3, 32, 28], 0, 32, 28), # c, h, w (rgb)\n ([1, 32, 28], 0, 32, 28), # c, h, w, (greyscale)\n ([32, 28, 1], -1, 32, 28), # h, w, c, (greyscale)\n ],\n)\ndef test_convert_image_tensor_to_uri(arr_size, channel_axis, width, height):\n doc = Document(content=np.random.randint(0, 255, arr_size))\n assert doc.tensor.any()\n assert not doc.uri\n doc.set_image_tensor_shape(channel_axis=channel_axis, shape=(width, height))\n\n doc.convert_image_tensor_to_uri(channel_axis=channel_axis)\n assert doc.uri.startswith('data:image/png;base64,')\n assert doc.mime_type == 'image/png'\n assert doc.tensor.any() # assure after conversion tensor still exist.\n\n\[email protected](\n condition=__windows__, reason='x-python is not detected on windows CI'\n)\[email protected](\n 'uri, mimetype',\n [\n (__file__, 'text/x-python'),\n ('http://google.com/index.html', 'text/html'),\n ('https://google.com/index.html', 'text/html'),\n ],\n)\ndef test_convert_uri_to_blob(uri, mimetype):\n d = Document(uri=uri)\n assert not d.blob\n d.load_uri_to_blob()\n assert d.blob\n assert d.mime_type == mimetype\n\n\[email protected](\n 'converter', ['convert_blob_to_datauri', 'convert_content_to_datauri']\n)\ndef test_convert_blob_to_uri(converter):\n d = Document(content=open(__file__).read().encode(), mime_type='text/x-python')\n assert d.blob\n getattr(d, converter)()\n assert d.uri.startswith('data:text/x-python;')\n\n\[email protected](\n 'converter', ['convert_text_to_datauri', 'convert_content_to_datauri']\n)\ndef test_convert_text_to_uri(converter):\n d = Document(content=open(__file__).read())\n assert d.text\n getattr(d, converter)()\n assert d.uri.startswith('data:text/plain;')\n\n\[email protected](\n condition=__windows__, reason='x-python is not detected on windows CI'\n)\[email protected](\n 'uri, mimetype',\n [\n pytest.param(\n __file__,\n 'text/x-python',\n marks=pytest.mark.xfail(\n condition=__windows__, reason='x-python is not detected on windows CI'\n ),\n ),\n ('http://google.com/index.html', 'text/html'),\n ('https://google.com/index.html', 'text/html'),\n ],\n)\ndef test_convert_uri_to_text(uri, mimetype):\n doc = Document(uri=uri, mime_type=mimetype)\n doc.load_uri_to_text()\n if mimetype == 'text/html':\n assert '<!doctype html>' in doc.text\n elif mimetype == 'text/x-python':\n text_from_file = open(__file__).read()\n assert doc.text == text_from_file\n\n\ndef test_convert_text_to_uri_and_back():\n text_from_file = open(__file__).read()\n doc = Document(content=text_from_file, mime_type='text/x-python')\n assert doc.text\n assert doc.mime_type == 'text/x-python'\n doc.convert_text_to_datauri()\n doc.load_uri_to_text()\n assert doc.mime_type == 'text/plain'\n assert doc.text == text_from_file\n\n\ndef test_convert_text_diff_encoding(tmpfile):\n otext = 'testä'\n text = otext.encode('iso8859')\n with open(tmpfile, 'wb') as fp:\n fp.write(text)\n with pytest.raises(UnicodeDecodeError):\n d = Document(uri=str(tmpfile)).load_uri_to_text()\n\n d = Document(uri=str(tmpfile)).load_uri_to_text(charset='iso8859')\n assert d.text == otext\n\n with open(tmpfile, 'w', encoding='iso8859') as fp:\n fp.write(otext)\n with pytest.raises(UnicodeDecodeError):\n d = Document(uri=str(tmpfile)).load_uri_to_text()\n\n d = Document(uri=str(tmpfile)).load_uri_to_text(charset='iso8859')\n assert d.text == otext\n\n\ndef test_convert_content_to_uri():\n d = Document(content=np.random.random([10, 10]))\n with pytest.raises(NotImplementedError):\n d.convert_content_to_datauri()\n\n\[email protected](\n 'uri, mimetype',\n [\n (__file__, 'text/x-python'),\n ('http://google.com/index.html', 'text/html'),\n ('https://google.com/index.html', 'text/html'),\n ],\n)\ndef test_convert_uri_to_data_uri(uri, mimetype):\n doc = Document(uri=uri, mime_type=mimetype)\n doc.convert_uri_to_datauri()\n assert doc.uri.startswith(f'data:{mimetype}')\n assert doc.mime_type == mimetype\n\n\ndef test_glb_converters():\n doc = Document(uri=os.path.join(cur_dir, 'toydata/test.glb'))\n doc.load_uri_to_point_cloud_tensor(2000)\n assert doc.tensor.shape == (2000, 3)\n\n doc.load_uri_to_point_cloud_tensor(2000, as_chunks=True)\n assert len(doc.chunks) == 1\n assert doc.chunks[0].tensor.shape == (2000, 3)\n"
] | [
[
"numpy.random.RandomState",
"numpy.random.randint",
"numpy.random.random"
]
] |
mevalle/r-DEP-Classifier | [
"c18625fe6b69ec9cc4edead7bb7f647ebf71956a"
] | [
"rDEP.py"
] | [
"import numpy as np\nimport cvxpy as cp\nimport dccp\nimport matplotlib.pyplot as plt\nfrom matplotlib.patches import Polygon\nfrom sklearn.base import BaseEstimator, ClassifierMixin, TransformerMixin\nfrom sklearn.utils.multiclass import unique_labels\nfrom sklearn.metrics import pairwise_distances\nfrom sklearn.utils.validation import check_X_y, check_array, check_is_fitted\nfrom sklearn.model_selection import StratifiedKFold\nimport time\n\n# ##################################################\n# Plot the decision boundary of a classifier\n# ##################################################\n\ndef decision_boundary(self, X, y, ind=[0,1], Nh = 101, colors=\"black\", label = None): \n # Scatter plot\n sc = plt.scatter(X[:,ind[0]], X[:,ind[1]], c = y.astype(int))\n xlimits = plt.xlim()\n ylimits = plt.ylim()\n \n if X.shape[1]>2:\n print(\"Dimension larger than two! Cannot show the decision boundary!\")\n else:\n # create a mesh to plot in\n x_min, x_max = xlimits[0], xlimits[1]\n y_min, y_max = ylimits[0], ylimits[1]\n hx = (x_max-x_min)/Nh\n hy = (y_max-y_min)/Nh\n xx, yy = np.meshgrid(np.arange(x_min, x_max, hx),np.arange(y_min, y_max, hy))\n \n # Plot the decision boundary. For that, we will assign a color to each\n # point in the mesh [x_min, m_max]x[y_min, y_max].\n Z = np.array(self.predict(np.c_[xx.ravel(), yy.ravel()]))\n Z = Z.reshape(xx.shape)\n \n # Put the result into a color plot\n plt.contourf(xx, yy, Z, alpha = 0.1, cmap='plasma')\n plt.contour(xx, yy, Z, colors=colors, linestyles = 'dashed')\n \n plt.grid(\"True\")\n plt.xlabel(\"Variable %d\" % ind[0])\n plt.ylabel(\"Variable %d\" % ind[1])\n return sc\n\n# ##################################################\n# Ensemble (or Bagging) Transform\n# ##################################################\nclass EnsembleTransform(TransformerMixin, BaseEstimator):\n def __init__(self,ensemble):\n self.ensemble = ensemble\n \n def fit(self, X, y):\n (self.ensemble).fit(X, y)\n return self\n \n def transform(self, X):\n return np.vstack([clf.decision_function(X) for clf in (self.ensemble).estimators_]).T\n\n\n# ##################################################\n# Dilation-Erosion Perceptron with DCCP\n# ################################################## \nclass DEP(BaseEstimator, ClassifierMixin):\n \n def __init__(self, weighted = True, ref = \"maximum\", C = 1.e-2, \n beta = None, beta_loss = \"hinge\", Split2Beta = False, \n solver = cp.MOSEK, verbose = False):\n self.verbose = verbose\n self.solver = solver\n self.weighted = weighted\n self.ref = ref\n self.C = C\n self.beta = beta\n self.beta_loss = beta_loss\n self.Split2Beta = Split2Beta\n \n def fit(self, X, y):\n start_time = time.time()\n \n # Check that X and y have correct shape\n X, y = check_X_y(X, y)\n \n # Store the classes seen during fit\n self.classes_ = unique_labels(y)\n \n if len(self.classes_)>2:\n print(\"Dilation-Erosion Morphological Perceptron can be used for binary classification!\")\n return \n \n if self.Split2Beta == True:\n skf = StratifiedKFold(n_splits=3, shuffle=True)\n WM_index, beta_index = next(iter(skf.split(X,y)))\n X_WM, X_beta = X[WM_index], X[beta_index]\n y_WM, y_beta = y[WM_index], y[beta_index]\n else:\n X_WM, X_beta = X, X\n y_WM, y_beta = y, y\n \n M, N = X_beta.shape\n \n indPos = (y_WM == self.classes_[1])\n Xpos = X_WM[indPos,:]\n Xneg = X_WM[~indPos,:]\n Mpos = Xpos.shape[0]\n Mneg = Xneg.shape[0]\n\n if self.weighted == True:\n Lpos = 1/pairwise_distances(Xpos,[np.mean(Xpos,axis=0)],metric=\"euclidean\").flatten()\n Lneg = 1/pairwise_distances(Xneg,[np.mean(Xneg,axis=0)],metric=\"euclidean\").flatten()\n nuPos = Lpos/Lpos.max()\n nuNeg = Lneg/Lneg.max()\n else:\n nuPos = np.ones((Mpos))\n nuNeg = np.ones((Mneg))\n \n # Solve DCCP problem for dilation\n if self.ref == \"mean\":\n ref = -np.mean(Xneg,axis=0).reshape((1,N))\n elif self.ref == \"maximum\":\n ref = -np.max(Xneg,axis=0).reshape((1,N))\n elif self.ref == \"minimum\":\n ref = -np.min(Xneg,axis=0).reshape((1,N))\n else:\n ref = np.zeros((1,N))\n \n w = cp.Variable((1,N))\n xiPos = cp.Variable((Mpos))\n xiNeg = cp.Variable((Mneg))\n \n lossDil = cp.sum(nuPos*cp.pos(xiPos))/Mpos+cp.sum(nuNeg*cp.pos(xiNeg))/Mneg+self.C*cp.norm(w-ref,1)\n objectiveDil = cp.Minimize(lossDil)\n \n ZposDil = cp.max(np.ones((Mpos,1))@w + Xpos, axis=1)\n ZnegDil = cp.max(np.ones((Mneg,1))@w + Xneg, axis=1) \n constraintsDil = [ZposDil >= -xiPos, ZnegDil <= xiNeg]\n\n probDil = cp.Problem(objectiveDil,constraintsDil) \n probDil.solve(solver=self.solver, method = 'dccp', verbose = self.verbose)\n self.dil_ = (w.value).flatten()\n \n # Solve DCCP problem for erosion\n if self.ref == \"mean\":\n ref = -np.mean(Xpos,axis=0).reshape((1,N))\n elif self.ref == \"maximum\":\n ref = -np.min(Xpos,axis=0).reshape((1,N))\n elif self.ref == \"minimum\":\n ref = -np.max(Xpos,axis=0).reshape((1,N))\n else:\n ref = np.zeros((1,N))\n \n m = cp.Variable((1,N))\n etaPos = cp.Variable((Mpos))\n etaNeg = cp.Variable((Mneg))\n \n lossEro = cp.sum(nuPos*cp.pos(etaPos))/Mpos+cp.sum(nuNeg*cp.pos(etaNeg))/Mneg+self.C*cp.norm(m-ref,1)\n objectiveEro = cp.Minimize(lossEro)\n \n ZposEro = cp.min(np.ones((Mpos,1))@m + Xpos, axis=1)\n ZnegEro = cp.min(np.ones((Mneg,1))@m + Xneg, axis=1) \n constraintsEro = [ZposEro >= -etaPos, ZnegEro <= etaNeg]\n\n probEro = cp.Problem(objectiveEro,constraintsEro) \n probEro.solve(solver=self.solver, method = 'dccp', verbose = self.verbose)\n self.ero_ = (m.value).flatten()\n \n # Fine tune beta\n if self.beta == None:\n beta = cp.Variable(nonneg=True)\n beta.value = 0.5\n \n if self.beta_loss == \"squared_hinge\":\n # Squared Hinge Loss\n lossBeta = cp.sum_squares(cp.pos(-cp.multiply(2*((y_beta == self.classes_[1]).astype(int))-1,\n beta*cp.max(np.ones((M,1))@w.value + X_beta, axis=1) +\n (1-beta)*cp.min(np.ones((M,1))@m.value + X_beta, axis=1))))\n else:\n # Hinge Loss\n lossBeta = cp.sum(cp.pos(-cp.multiply(2*((y_beta == self.classes_[1]).astype(int))-1,\n beta*cp.max(np.ones((M,1))@w.value + X_beta, axis=1) +\n (1-beta)*cp.min(np.ones((M,1))@m.value + X_beta, axis=1))))\n \n constraintsBeta = [beta<=1]\n probBeta = cp.Problem(cp.Minimize(lossBeta),constraintsBeta)\n probBeta.solve(solver = cp.SCS, verbose = self.verbose, warm_start=True)\n self.beta = beta.value\n \n if self.verbose == True:\n print(\"\\nTime to train: %2.2f seconds.\" % (time.time() - start_time))\n return self\n \n def decision_function(self, X):\n # Check is fit had been called\n check_is_fitted(self,attributes=\"dil_\")\n \n # Input validation\n X = check_array(X)\n \n M,N = X.shape\n Y = np.zeros((M,2))\n # Compute the dilation\n Y[:,0] = np.amax(np.ones((M,1))@self.dil_.reshape((1,N))+X,axis=1)\n # Compute the erosion\n Y[:,1] = np.amin(np.ones((M,1))@self.ero_.reshape((1,N))+X,axis=1)\n \n return np.dot(Y,np.array([self.beta,1-self.beta]))\n \n def predict(self, X):\n return np.array([self.classes_[(y>=0).astype(int)] for y in self.decision_function(X)])\n\n def show(self, X, y, ind=[0,1], show_boxes = True, decision_boundary = True, Nh = 101):\n # Check that X and y have correct shape\n X, y = check_X_y(X, y)\n\n # Check is fit had been called\n check_is_fitted(self,attributes=\"dil_\")\n\n plt.figure(figsize=(10, 8))\n \n # Scatter plot\n sc = plt.scatter(X[:,ind[0]], X[:,ind[1]], c = y.astype(int))\n xlimits = plt.xlim()\n ylimits = plt.ylim()\n \n if decision_boundary:\n if X.shape[1]>2:\n print(\"Dimension larger than two! Cannot show the decision boundary!\")\n else:\n # create a mesh to plot in\n x_min, x_max = xlimits[0], xlimits[1]\n y_min, y_max = ylimits[0], ylimits[1]\n hx = (x_max-x_min)/Nh\n hy = (y_max-y_min)/Nh\n xx, yy = np.meshgrid(np.arange(x_min, x_max, hx),np.arange(y_min, y_max, hy))\n \n # Plot the decision boundary. For that, we will assign a color to each\n # point in the mesh [x_min, m_max]x[y_min, y_max].\n Z = np.array(self.predict(np.c_[xx.ravel(), yy.ravel()]))\n Z = Z.reshape(xx.shape)\n \n # Put the result into a color plot\n plt.contourf(xx, yy, Z, alpha = 0.1, cmap='plasma')\n plt.contour(xx, yy, Z, colors='black', linestyles = 'dashed')\n \n if show_boxes:\n # Draw dilation box\n box = [-1000*np.ones((X.shape[1],)),-self.dil_]\n Vertices = np.array([box[0][ind],[box[1][ind[0]],box[0][ind[1]]],box[1][ind],[box[0][ind[0]],box[1][ind[1]]]])\n plt.gca().add_patch(Polygon(Vertices, alpha = 0.3, color=sc.to_rgba(0)))\n \n # Draw erosion box\n box = [-self.ero_,1000*np.ones((X.shape[1],))]\n Vertices = np.array([box[0][ind],[box[1][ind[0]],box[0][ind[1]]],box[1][ind],[box[0][ind[0]],box[1][ind[1]]]])\n plt.gca().add_patch(Polygon(Vertices, alpha = 0.3, color=sc.to_rgba(1)))\n \n plt.grid(True)\n plt.xlabel(\"Variable %d\" % ind[0])\n plt.ylabel(\"Variable %d\" % ind[1])\n"
] | [
[
"sklearn.utils.validation.check_is_fitted",
"numpy.ones",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.gca",
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.contour",
"numpy.mean",
"numpy.zeros",
"numpy.arange",
"numpy.max",
"numpy.min",
"matplotlib.pyplot.ylim",
"sklearn.utils.validation.check_array",
"sklearn.utils.multiclass.unique_labels",
"sklearn.model_selection.StratifiedKFold",
"matplotlib.pyplot.grid",
"numpy.array",
"matplotlib.pyplot.contourf",
"sklearn.utils.validation.check_X_y",
"matplotlib.pyplot.xlabel"
]
] |
zzdxfei/detectron_cascade_code | [
"59706f1687ee067d1df5da60e7a4a442fb2d59f8"
] | [
"detectron/ops/distribute_cascade_proposals.py"
] | [
"# Copyright (c) 2017-present, Facebook, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n##############################################################################\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport numpy as np\n\nfrom detectron.core.config import cfg\nfrom detectron.datasets import json_dataset\nimport detectron.modeling.FPN as fpn\nimport detectron.roi_data.cascade_rcnn as cascade_rcnn_roi_data\nimport detectron.utils.blob as blob_utils\n\n\nclass DistributeCascadeProposalsOp(object):\n\n def __init__(self, train, stage):\n self._train = train\n self._stage = stage\n\n def forward(self, inputs, outputs):\n \"\"\"See modeling.detector.DistributeCascadeProposals for\n inputs/outputs documentation.\n \"\"\"\n rois = inputs[0].data\n if self._train:\n # During training we reuse the data loader code. We populate roidb\n # entries on the fly using the rois generated by RPN.\n # im_info: [[im_height, im_width, im_scale], ...]\n roidb = blob_utils.deserialize(inputs[1].data)\n im_info = inputs[2].data\n im_scales = im_info[:, 2]\n\n # For historical consistency with the original Faster R-CNN\n # implementation we are *not* filtering crowd proposals.\n # This choice should be investigated in the future (it likely does\n # not matter).\n json_dataset.add_proposals(roidb, rois, im_scales, crowd_thresh=0)\n # Compute training labels for the RPN proposals; also handles\n # distributing the proposals over FPN levels\n output_blob_names = cascade_rcnn_roi_data.get_cascade_rcnn_blob_names(\n self._stage\n )\n blobs = {k: [] for k in output_blob_names}\n\n # 进行rois映射到了合适的fpn层, 并重新进行采样构成训练数据\n cascade_rcnn_roi_data.add_cascade_rcnn_blobs(\n blobs, im_scales, roidb, self._stage\n )\n for i, k in enumerate(output_blob_names):\n blob_utils.py_op_copy_blob(blobs[k], outputs[i])\n else:\n # For inference we have a special code path that avoids some data\n # loader overhead\n distribute(rois, None, outputs, self._train)\n\n\ndef distribute(rois, label_blobs, outputs, train):\n \"\"\"To understand the output blob order see return value of\n roi_data.cascade_rcnn.get_cascade_rcnn_blob_names(is_training=False)\n \"\"\"\n lvl_min = cfg.FPN.ROI_MIN_LEVEL\n lvl_max = cfg.FPN.ROI_MAX_LEVEL\n lvls = fpn.map_rois_to_fpn_levels(rois[:, 1:5], lvl_min, lvl_max)\n\n outputs[0].reshape(rois.shape)\n outputs[0].data[...] = rois\n\n # Create new roi blobs for each FPN level\n # (See: modeling.FPN.add_multilevel_roi_blobs which is similar but annoying\n # to generalize to support this particular case.)\n rois_idx_order = np.empty((0,))\n for output_idx, lvl in enumerate(range(lvl_min, lvl_max + 1)):\n idx_lvl = np.where(lvls == lvl)[0]\n blob_roi_level = rois[idx_lvl, :]\n outputs[output_idx + 1].reshape(blob_roi_level.shape)\n outputs[output_idx + 1].data[...] = blob_roi_level\n rois_idx_order = np.concatenate((rois_idx_order, idx_lvl))\n rois_idx_restore = np.argsort(rois_idx_order)\n blob_utils.py_op_copy_blob(rois_idx_restore.astype(np.int32), outputs[-1])\n"
] | [
[
"numpy.where",
"numpy.concatenate",
"numpy.argsort",
"numpy.empty"
]
] |
iamchosenlee/MolDQN-pytorch | [
"bda8a74eb9e5d2f3232a6a27b6a32928a3797f6d"
] | [
"dqn.py"
] | [
"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\nclass MolDQN(nn.Module):\n def __init__(self, input_length, output_length):\n super(MolDQN, self).__init__()\n\n self.linear_1 = nn.Linear(input_length, 1024)\n self.linear_2 = nn.Linear(1024, 512)\n self.linear_3 = nn.Linear(512, 128)\n self.linear_4 = nn.Linear(128, 32)\n self.linear_5 = nn.Linear(32, output_length)\n\n self.activation = nn.ReLU()\n\n def forward(self, x):\n x = self.activation(self.linear_1(x))\n x = self.activation(self.linear_2(x))\n x = self.activation(self.linear_3(x))\n x = self.activation(self.linear_4(x))\n x = self.linear_5(x)\n\n return x\n"
] | [
[
"torch.nn.ReLU",
"torch.nn.Linear"
]
] |
markqiu/zvt | [
"1bcfb71279f2652c3600f0f8e45d941f98ceaa10"
] | [
"zvt/recorders/joinquant/meta/china_stock_status_recorder.py"
] | [
"# -*- coding: utf-8 -*-\nimport pandas as pd\nfrom jqdatasdk import auth, logout, finance, query\nfrom zvt.recorders.joinquant.common import to_jq_entity_id\n\nfrom zvt import zvt_env\nfrom zvt.api import TIME_FORMAT_DAY, get_str_schema\nfrom zvt.contract.api import df_to_db\nfrom zvt.contract.recorder import TimeSeriesDataRecorder\nfrom zvt.domain import StockDetail,StockStatus\n\nfrom zvt.utils.pd_utils import pd_is_not_null\nfrom zvt.utils.time_utils import now_pd_timestamp, to_time_str\n\nclass JqChinaStockStatusRecorder(TimeSeriesDataRecorder):\n entity_provider = 'joinquant'\n entity_schema = StockDetail\n\n # 数据来自jq\n provider = 'joinquant'\n\n data_schema = StockStatus\n\n def __init__(self, entity_type='stock', exchanges=['sh', 'sz'], entity_ids=None, codes=None, batch_size=10,\n force_update=False, sleeping_time=5, default_size=2000, real_time=False,\n fix_duplicate_way='add', start_timestamp=None, end_timestamp=None, close_hour=0,\n close_minute=0) -> None:\n self.data_schema = StockStatus\n super().__init__(entity_type, exchanges, entity_ids, codes, batch_size, force_update, sleeping_time,\n default_size, real_time, fix_duplicate_way, start_timestamp, end_timestamp, close_hour,\n close_minute)\n # 调用登录函数(激活后使用,不需要用户名密码)\n auth(zvt_env['jq_username'], zvt_env['jq_password'])\n\n def on_finish(self):\n super().on_finish()\n logout()\n\n def record(self, entity, start, end, size, timestamps):\n if not end:\n end = to_time_str(now_pd_timestamp())\n start = to_time_str(start)\n q = query(finance.STK_STATUS_CHANGE).filter(\n finance.STK_STATUS_CHANGE.code == to_jq_entity_id(entity)).filter(\n finance.STK_STATUS_CHANGE.pub_date >= to_time_str(start)).limit(10)\n df = finance.run_query(q)\n\n if pd_is_not_null(df):\n df['pub_date'] = pd.to_datetime(df['pub_date'])\n df['exchange'] = entity.exchange\n df['entity_type'] = entity.entity_type\n df['change_date'] = pd.to_datetime(df['change_date'])\n df['timestamp'] = df['change_date']\n\n df['entity_id'] = entity.id\n df['provider'] = 'joinquant'\n df['code'] = entity.code\n\n def generate_finance_id(se):\n return \"{}_{}\".format(se['entity_id'], to_time_str(se['timestamp'], fmt=TIME_FORMAT_DAY))\n\n df['id'] = df[['entity_id', 'timestamp']].apply(generate_finance_id, axis=1)\n\n df_to_db(df=df, data_schema=self.data_schema, provider=self.provider, force_update=self.force_update)\n return None\n\n__all__ = ['JqChinaStockStatusRecorder']\n"
] | [
[
"pandas.to_datetime"
]
] |
TUM-AIMED/hyfed | [
"06e0ea66a8bf88ecaf09ebc0ff20cdd850d81b7f"
] | [
"hyfed-compensator/hyfed_compensator/project/hyfed_compensator_project.py"
] | [
"\"\"\"\n The main class to obtain the compensation parameters from the clients, aggregate them,\n and share the aggregation results with the server\n\n Copyright 2021 Reza NasiriGerdeh. All Rights Reserved.\n\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n\"\"\"\n\nfrom hyfed_compensator.util.hyfed_parameters import Parameter, AuthenticationParameter, SyncParameter, ConnectionParameter, MonitoringParameter\nfrom hyfed_compensator.util.status import OperationStatus\nfrom hyfed_compensator.util.endpoint import EndPoint\nfrom hyfed_compensator.util.utils import aggregate\nfrom hyfed_compensator.util.monitoring import Timer, Counter\n\nimport pickle\nimport numpy as np\nimport time\nimport hashlib\nimport requests\nfrom datetime import datetime\n\nimport logging\nlogger = logging.getLogger(__name__)\n\n\nclass HyFedCompensatorProject:\n \"\"\"\n Provide main functions to communicate with the clients and server,\n and to aggregate the compensation parameters from the clients\n \"\"\"\n\n def __init__(self, project_id_hash, client_count):\n \"\"\" Initialize the compensator project using the hash of the project ID and the number of clients \"\"\"\n\n # for compensator to know whether it has received compensation parameters from all clients\n self.client_count = client_count\n\n # hash of the project ID, which should be the same for all clients\n self.project_id_hash = project_id_hash\n\n # authentication parameters from the clients\n self.client_token_hashes = list()\n self.client_username_hashes = list()\n\n # sync parameters from the clients\n self.client_steps = list()\n self.client_comm_rounds = list()\n\n # compensation parameters (noise values) from the clients\n self.client_compensation_parameters = list()\n\n # data type parameters from clients\n self.client_data_type_parameters = list()\n\n # clients tell compensator where to send the aggregated noise values\n self.server_urls = list()\n\n # aggregated parameters have the same parameter names as the local model parameters of the clients\n self.aggregated_compensation_parameters = dict()\n\n # to tell the server whether the aggregation of noise values have been successful\n self.operation_status = OperationStatus.DONE\n\n # monitoring timers\n self.computation_timer = Timer(name='Computation')\n self.network_send_timer = Timer(name='Network Send')\n\n # counter to track the traffic client -> compensator (in terms of bytes)\n self.client_compensator_traffic = Counter(\"client->compensator\")\n\n self.upload_parameters_timeout = 600\n\n # used for garbage collection purposes\n self.last_updated_date = datetime.now().timestamp()\n\n def add_client_parameters(self, request):\n \"\"\" Append client's authentication, sync, connection, and compensation parameters to the corresponding lists \"\"\"\n\n try:\n # new communication round starts for compensator if the parameters from the first client is received\n if len(self.client_compensation_parameters) == 0:\n self.computation_timer.new_round()\n self.network_send_timer.new_round()\n\n # add traffic size to client -> compensator traffic counter\n traffic_size = int(request.headers['Content-Length'])\n self.client_compensator_traffic.increment(traffic_size)\n logger.debug(f'Project {self.project_id_hash}: {traffic_size} bytes added to client -> compensator traffic.')\n\n self.computation_timer.start()\n\n # extract client parameters from the request body\n request_body = pickle.loads(request.body)\n\n authentication_parameters = request_body[Parameter.AUTHENTICATION]\n sync_parameters = request_body[Parameter.SYNCHRONIZATION]\n compensation_parameters = request_body[Parameter.COMPENSATION]\n connection_parameters = request_body[Parameter.CONNECTION]\n data_type_parameters = request_body[Parameter.DATA_TYPE]\n\n # authentication parameters\n hash_username = authentication_parameters[AuthenticationParameter.HASH_USERNAME]\n hash_token = authentication_parameters[AuthenticationParameter.HASH_TOKEN]\n\n # sync parameters\n step = sync_parameters[SyncParameter.PROJECT_STEP]\n comm_round = sync_parameters[SyncParameter.COMM_ROUND]\n\n # connection parameter\n server_url = connection_parameters[ConnectionParameter.SERVER_URL]\n\n # add the parameters to the lists\n self.client_username_hashes.append(hash_username)\n self.client_token_hashes.append(hash_token)\n self.client_steps.append(step)\n self.client_comm_rounds.append(comm_round)\n self.server_urls.append(server_url)\n self.client_compensation_parameters.append(compensation_parameters)\n self.client_data_type_parameters.append(data_type_parameters)\n\n self.computation_timer.stop()\n\n logger.debug(f'Project {self.project_id_hash}: Client parameters added!')\n\n except Exception as add_parameter_exp:\n logger.error(f'Project {self.project_id_hash}: Adding client parameters was failed!')\n logger.error(f'Project {self.project_id_hash}: The exception is: {add_parameter_exp}')\n self.computation_timer.stop()\n self.set_operation_status_failed()\n\n def aggregate_client_parameters(self):\n \"\"\" Aggregate client parameters including the compensation parameters from all clients \"\"\"\n\n try:\n self.computation_timer.start()\n\n logger.debug(f\"Project {self.project_id_hash}: Aggregating client parameters ...\")\n\n # make sure all clients are in the same step and communication round\n if not self.is_client_sync_ok():\n logger.error(f'Project {self.project_id_hash}: The step/comm_round of the clients are different!')\n self.computation_timer.stop()\n self.set_operation_status_failed()\n return\n\n # ensure all clients are coordinated by the same server\n if not self.is_server_url_same():\n logger.error(f'Project {self.project_id_hash}: Server URL is different for the clients!')\n self.computation_timer.stop()\n self.set_operation_status_failed()\n return\n\n # make sure compensator parameter names are the same across the clients\n if not self.is_client_compensation_parameters_ok():\n logger.error(f'Project {self.project_id_hash}: Compensation parameter names are different across clients!')\n self.computation_timer.stop()\n self.set_operation_status_failed()\n return\n\n # aggregate the compensation parameters\n for parameter_name in self.client_compensation_parameters[0].keys():\n compensation_values = self.compensation_parameter_to_list(parameter_name)\n parameter_data_type = self.client_data_type_parameters[0][parameter_name]\n aggregated_compensation_value = aggregate(compensation_values, parameter_data_type)\n self.aggregated_compensation_parameters[parameter_name] = -aggregated_compensation_value\n\n self.computation_timer.stop()\n\n except Exception as aggregate_exp:\n logger.error(f'Project {self.project_id_hash}: Aggregating the compensation parameters was failed!')\n logger.error(f'Project {self.project_id_hash}: The exception is: {aggregate_exp}')\n self.computation_timer.stop()\n self.set_operation_status_failed()\n\n def send_to_server(self):\n \"\"\" Send aggregated authentication, sync, monitoring, and compensation parameters to the server \"\"\"\n\n # create and serialize request body\n parameters_serialized = self.prepare_server_parameters()\n\n max_tries = 10\n for _ in range(max_tries):\n try:\n\n logger.debug(f\"Project {self.project_id_hash}: Sending the aggregated parameters to the server ...\")\n\n self.network_send_timer.start()\n response = requests.post(url=f'{self.server_urls[0]}/{EndPoint.MODEL_COMPENSATION}',\n data=parameters_serialized,\n timeout=self.upload_parameters_timeout)\n\n if response.status_code == 200:\n logger.debug(f\"Project {self.project_id_hash}: Sending done!\")\n self.network_send_timer.stop()\n return\n\n logger.error(f\"Project {self.project_id_hash}: Sending failed, got {response.status_code} status code from the server!\")\n self.network_send_timer.stop()\n\n time.sleep(30)\n continue\n except Exception as send_server_exp:\n logger.error(f\"Project {self.project_id_hash}: Sending failed!\")\n logger.error(f'Project {self.project_id_hash}: The exception is: {send_server_exp}')\n self.network_send_timer.stop()\n time.sleep(30)\n\n def aggregate_and_send(self):\n \"\"\" First aggregate, and then, send aggregated parameters to the server \"\"\"\n\n # aggregate client parameters including compensation parameters\n self.aggregate_client_parameters()\n\n # send the aggregated parameters to the server\n self.send_to_server()\n\n # empty the lists/dictionaries for the next round\n self.client_token_hashes = list()\n self.client_username_hashes = list()\n self.client_steps = list()\n self.client_comm_rounds = list()\n self.client_compensation_parameters = list()\n self.client_data_type_parameters = list()\n self.server_urls = list()\n self.aggregated_compensation_parameters = dict()\n\n # ########## setter/getter functions\n def set_operation_status_done(self):\n \"\"\" If current operation is still in progress (not failed), then set it to Done \"\"\"\n\n if self.operation_status == OperationStatus.IN_PROGRESS:\n self.operation_status = OperationStatus.DONE\n\n def set_operation_status_in_progress(self):\n \"\"\" If previous operation is done (not failed), then set current operation status to In Progress \"\"\"\n\n if self.operation_status == OperationStatus.DONE:\n self.operation_status = OperationStatus.IN_PROGRESS\n\n def set_operation_status_failed(self):\n \"\"\" Regardless of the current status, set the operation status to Failed \"\"\"\n\n logger.error(\"Operation failed!\")\n self.operation_status = OperationStatus.FAILED\n\n def set_last_updated_date(self):\n self.last_updated_date = datetime.now().timestamp()\n\n def is_operation_failed(self):\n return self.operation_status == OperationStatus.FAILED\n\n def get_last_updated_date(self):\n return self.last_updated_date\n\n # ########## Helper functions\n def is_client_sync_ok(self):\n \"\"\" Ensure the project step and communication round of all clients is the same \"\"\"\n\n try:\n logger.debug(f\"Project {self.project_id_hash}: checking synchronization status of the clients ...\")\n\n return (np.all(np.array(self.client_steps) == self.client_steps[0]) and\n np.all(np.array(self.client_comm_rounds) == self.client_comm_rounds[0]))\n\n except Exception as sync_exp:\n logger.error(f'Project {self.project_id_hash}: Checking sync status of the clients was failed')\n logger.error(f'Project {self.project_id_hash}: The exception is: {sync_exp}')\n return False\n\n def is_server_url_same(self):\n \"\"\" Ensure the the server urls from all clients are the same \"\"\"\n\n try:\n\n logger.debug(f\"Project {self.project_id_hash}: Checking whether clients are coordinated by the same server ...\")\n\n return np.all(np.array(self.server_urls) == self.server_urls[0])\n\n except Exception as server_url_exp:\n logger.error(f'Project {self.project_id_hash}: Checking server urls was failed!')\n logger.error(f'Project {self.project_id_hash}: The exception is: {server_url_exp}')\n return False\n\n def is_client_compensation_parameters_ok(self):\n \"\"\" Make sure the names of the compensation parameters are consistent across clients \"\"\"\n try:\n logger.debug(f\"Project {self.project_id_hash}: checking whether compensation parameter names are consistent across all clients ...\")\n client1_compensation_parameter_names = self.client_compensation_parameters[0].keys()\n for client_parameters in self.client_compensation_parameters:\n\n if client_parameters.keys() != client1_compensation_parameter_names:\n return False\n\n return True\n except Exception as compensation_param_exp:\n logger.error(f'Project {self.project_id_hash}: Checking compensation parameter names was failed!')\n logger.error(f'Project {self.project_id_hash}: The exception is: {compensation_param_exp}')\n return False\n\n def is_client_data_type_parameters_ok(self):\n \"\"\" Make sure the names of the data type parameters are consistent across clients \"\"\"\n try:\n logger.debug(f\"Project {self.project_id_hash}: checking whether data type parameter names are consistent across all clients ...\")\n client1_data_type_parameter_names = self.client_data_type_parameters[0].keys()\n for client_parameters in self.client_data_type_parameters:\n if client_parameters.keys() != client1_data_type_parameter_names:\n return False\n\n return True\n except Exception as compensation_param_exp:\n logger.error(f'Project {self.project_id_hash}: Checking data type parameter names was failed!')\n logger.error(f'Project {self.project_id_hash}: The exception is: {compensation_param_exp}')\n return False\n\n def should_aggregate_and_send(self):\n \"\"\" Check whether compensation parameters from all clients received \"\"\"\n\n return len(self.client_username_hashes) == self.client_count\n\n def compensation_parameter_to_list(self, parameter_name):\n \"\"\"\n Extract the compensation parameter of the clients specified with parameter_name as a list\n \"\"\"\n\n compensation_parameter_list = []\n try:\n for compensation_parameter in self.client_compensation_parameters:\n compensation_parameter_list.append(compensation_parameter[parameter_name])\n except Exception as convert_exp:\n logger.error(f'Project {self.project_id_hash}: Converting compensation parameters to list was failed!')\n logger.error(f'Project {self.project_id_hash}: The exception is: {convert_exp}')\n self.set_operation_status_failed()\n\n return compensation_parameter_list\n\n def prepare_server_parameters(self):\n \"\"\" Prepare the parameters shared with the server \"\"\"\n\n try:\n self.computation_timer.start()\n\n # initialize authentication parameters\n authentication_parameters = dict()\n\n hash_username_hashes = hashlib.sha256(''.join(sorted(self.client_username_hashes)).encode('utf-8')).hexdigest()\n hash_token_hashes = hashlib.sha256(''.join(sorted(self.client_token_hashes)).encode('utf-8')).hexdigest()\n\n authentication_parameters[AuthenticationParameter.HASH_PROJECT_ID] = self.project_id_hash\n authentication_parameters[AuthenticationParameter.HASH_USERNAME_HASHES] = hash_username_hashes\n authentication_parameters[AuthenticationParameter.HASH_TOKEN_HASHES] = hash_token_hashes\n\n # initialize synchronization parameters\n sync_parameters = dict()\n sync_parameters[SyncParameter.PROJECT_STEP] = self.client_steps[0]\n sync_parameters[SyncParameter.COMM_ROUND] = self.client_comm_rounds[0]\n sync_parameters[SyncParameter.OPERATION_STATUS] = self.operation_status\n\n monitoring_parameters = dict()\n monitoring_parameters[MonitoringParameter.COMPUTATION_TIME] = self.computation_timer.get_total_duration()\n monitoring_parameters[MonitoringParameter.NETWORK_SEND_TIME] = self.network_send_timer.get_total_duration()\n monitoring_parameters[MonitoringParameter.CLIENT_COMPENSATOR_TRAFFIC] = self.client_compensator_traffic.total_count\n\n # server parameters in json\n server_parameters_json = {Parameter.AUTHENTICATION: authentication_parameters,\n Parameter.SYNCHRONIZATION: sync_parameters,\n Parameter.MONITORING: monitoring_parameters,\n Parameter.COMPENSATION: self.aggregated_compensation_parameters\n }\n server_parameters_serialized = pickle.dumps(server_parameters_json)\n\n self.computation_timer.stop()\n\n return server_parameters_serialized\n\n except Exception as prepare_exp:\n logger.error(f'Project {self.project_id_hash}: Preparing server parameters was failed!')\n logger.error(f'Project {self.project_id_hash}: The exception is: {prepare_exp}')\n self.computation_timer.stop()\n self.set_operation_status_failed()\n"
] | [
[
"numpy.array"
]
] |
muzi2045/second_TANET.pytorch | [
"3e10c93075a76684871fe0f188819c7b282671fd"
] | [
"second/core/non_max_suppression/nms_cpu.py"
] | [
"import math\r\nfrom pathlib import Path\r\nimport numba\r\nimport numpy as np\r\nfrom spconv.spconv_utils import (\r\n non_max_suppression_cpu, rotate_non_max_suppression_cpu)\r\nfrom second.core import box_np_ops\r\nfrom second.core.non_max_suppression.nms_gpu import rotate_iou_gpu\r\n\r\n\r\ndef nms_cc(dets, thresh):\r\n scores = dets[:, 4]\r\n order = scores.argsort()[::-1].astype(np.int32) # highest->lowest\r\n return non_max_suppression_cpu(dets, order, thresh, 1.0)\r\n\r\n\r\ndef rotate_nms_cc(dets, thresh):\r\n scores = dets[:, 5]\r\n order = scores.argsort()[::-1].astype(np.int32) # highest->lowest\r\n dets_corners = box_np_ops.center_to_corner_box2d(dets[:, :2], dets[:, 2:4],\r\n dets[:, 4])\r\n\r\n dets_standup = box_np_ops.corner_to_standup_nd(dets_corners)\r\n\r\n standup_iou = box_np_ops.iou_jit(dets_standup, dets_standup, eps=0.0)\r\n # print(dets_corners.shape, order.shape, standup_iou.shape)\r\n return rotate_non_max_suppression_cpu(dets_corners, order, standup_iou,\r\n thresh)\r\n\r\[email protected](nopython=True)\r\ndef nms_jit(dets, thresh, eps=0.0):\r\n x1 = dets[:, 0]\r\n y1 = dets[:, 1]\r\n x2 = dets[:, 2]\r\n y2 = dets[:, 3]\r\n scores = dets[:, 4]\r\n areas = (x2 - x1 + eps) * (y2 - y1 + eps)\r\n order = scores.argsort()[::-1].astype(np.int32) # highest->lowest\r\n ndets = dets.shape[0]\r\n suppressed = np.zeros((ndets), dtype=np.int32)\r\n keep = []\r\n for _i in range(ndets):\r\n i = order[_i] # start with highest score box\r\n if suppressed[\r\n i] == 1: # if any box have enough iou with this, remove it\r\n continue\r\n keep.append(i)\r\n for _j in range(_i + 1, ndets):\r\n j = order[_j]\r\n if suppressed[j] == 1:\r\n continue\r\n # calculate iou between i and j box\r\n w = max(min(x2[i], x2[j]) - max(x1[i], x1[j]) + eps, 0.0)\r\n h = max(min(y2[i], y2[j]) - max(y1[i], y1[j]) + eps, 0.0)\r\n inter = w * h\r\n ovr = inter / (areas[i] + areas[j] - inter)\r\n # ovr = inter / areas[j]\r\n if ovr >= thresh:\r\n suppressed[j] = 1\r\n return keep\r\n\r\n\r\[email protected]('float32[:, :], float32, float32, float32, uint32', nopython=True)\r\ndef soft_nms_jit(boxes, sigma=0.5, Nt=0.3, threshold=0.001, method=0):\r\n N = boxes.shape[0]\r\n pos = 0\r\n maxscore = 0\r\n maxpos = 0\r\n for i in range(N):\r\n maxscore = boxes[i, 4]\r\n maxpos = i\r\n\r\n tx1 = boxes[i, 0]\r\n ty1 = boxes[i, 1]\r\n tx2 = boxes[i, 2]\r\n ty2 = boxes[i, 3]\r\n ts = boxes[i, 4]\r\n pos = i + 1\r\n # get max box\r\n while pos < N:\r\n if maxscore < boxes[pos, 4]:\r\n maxscore = boxes[pos, 4]\r\n maxpos = pos\r\n pos = pos + 1\r\n\r\n # add max box as a detection\r\n boxes[i, 0] = boxes[maxpos, 0]\r\n boxes[i, 1] = boxes[maxpos, 1]\r\n boxes[i, 2] = boxes[maxpos, 2]\r\n boxes[i, 3] = boxes[maxpos, 3]\r\n boxes[i, 4] = boxes[maxpos, 4]\r\n\r\n # swap ith box with position of max box\r\n boxes[maxpos, 0] = tx1\r\n boxes[maxpos, 1] = ty1\r\n boxes[maxpos, 2] = tx2\r\n boxes[maxpos, 3] = ty2\r\n boxes[maxpos, 4] = ts\r\n\r\n tx1 = boxes[i, 0]\r\n ty1 = boxes[i, 1]\r\n tx2 = boxes[i, 2]\r\n ty2 = boxes[i, 3]\r\n ts = boxes[i, 4]\r\n\r\n pos = i + 1\r\n # NMS iterations, note that N changes if detection boxes fall below threshold\r\n while pos < N:\r\n x1 = boxes[pos, 0]\r\n y1 = boxes[pos, 1]\r\n x2 = boxes[pos, 2]\r\n y2 = boxes[pos, 3]\r\n s = boxes[pos, 4]\r\n\r\n area = (x2 - x1 + 1) * (y2 - y1 + 1)\r\n iw = (min(tx2, x2) - max(tx1, x1) + 1)\r\n if iw > 0:\r\n ih = (min(ty2, y2) - max(ty1, y1) + 1)\r\n if ih > 0:\r\n ua = float((tx2 - tx1 + 1) * (ty2 - ty1 + 1) + area -\r\n iw * ih)\r\n ov = iw * ih / ua #iou between max box and detection box\r\n\r\n if method == 1: # linear\r\n if ov > Nt:\r\n weight = 1 - ov\r\n else:\r\n weight = 1\r\n elif method == 2: # gaussian\r\n weight = np.exp(-(ov * ov) / sigma)\r\n else: # original NMS\r\n if ov > Nt:\r\n weight = 0\r\n else:\r\n weight = 1\r\n\r\n boxes[pos, 4] = weight * boxes[pos, 4]\r\n\r\n # if box score falls below threshold, discard the box by swapping with last box\r\n # update N\r\n if boxes[pos, 4] < threshold:\r\n boxes[pos, 0] = boxes[N - 1, 0]\r\n boxes[pos, 1] = boxes[N - 1, 1]\r\n boxes[pos, 2] = boxes[N - 1, 2]\r\n boxes[pos, 3] = boxes[N - 1, 3]\r\n boxes[pos, 4] = boxes[N - 1, 4]\r\n N = N - 1\r\n pos = pos - 1\r\n\r\n pos = pos + 1\r\n\r\n keep = [i for i in range(N)]\r\n return keep\r\n\r\n"
] | [
[
"numpy.exp",
"numpy.zeros"
]
] |
MuhammadSYahyaS/cs-flow | [
"bef320ae7b2063f1dce41fb2f2225228cd43a589"
] | [
"train.py"
] | [
"import numpy as np\nimport torch\nfrom sklearn.metrics import roc_auc_score\nfrom tqdm import tqdm\nimport config as c\nfrom model import get_cs_flow_model, save_model, FeatureExtractor, nf_forward\nfrom utils import *\n\n\ndef train(train_loader, test_loader):\n model = get_cs_flow_model()\n optimizer = torch.optim.Adam(model.parameters(), lr=c.lr_init, eps=1e-04, weight_decay=1e-5)\n model.to(c.device)\n if not c.pre_extracted:\n fe = FeatureExtractor()\n fe.eval()\n fe.to(c.device)\n for param in fe.parameters():\n param.requires_grad = False\n\n z_obs = Score_Observer('AUROC')\n\n for epoch in range(c.meta_epochs):\n # train some epochs\n model.train()\n if c.verbose:\n print(F'\\nTrain epoch {epoch}')\n for sub_epoch in range(c.sub_epochs):\n train_loss = list()\n for i, data in enumerate(tqdm(train_loader, disable=c.hide_tqdm_bar)):\n optimizer.zero_grad()\n\n inputs, labels = preprocess_batch(data) # move to device and reshape\n if not c.pre_extracted:\n inputs = fe(inputs)\n\n z, jac = nf_forward(model, inputs)\n\n loss = get_loss(z, jac)\n train_loss.append(t2np(loss))\n\n loss.backward()\n norm = torch.nn.utils.clip_grad_norm_(model.parameters(), c.max_grad_norm)\n optimizer.step()\n\n mean_train_loss = np.mean(train_loss)\n if c.verbose and epoch == 0 and sub_epoch % 4 == 0:\n print('Epoch: {:d}.{:d} \\t train loss: {:.4f}'.format(epoch, sub_epoch, mean_train_loss))\n\n # evaluate\n model.eval()\n if c.verbose:\n print('\\nCompute loss and scores on test set:')\n test_loss = list()\n test_z = list()\n test_labels = list()\n\n with torch.no_grad():\n for i, data in enumerate(tqdm(test_loader, disable=c.hide_tqdm_bar)):\n inputs, labels = preprocess_batch(data)\n if not c.pre_extracted:\n inputs = fe(inputs)\n\n z, jac = nf_forward(model, inputs)\n loss = get_loss(z, jac)\n\n z_concat = t2np(concat_maps(z))\n score = np.mean(z_concat ** 2, axis=(1, 2))\n test_z.append(score)\n test_loss.append(t2np(loss))\n test_labels.append(t2np(labels))\n\n test_loss = np.mean(np.array(test_loss))\n if c.verbose:\n print('Epoch: {:d} \\t test_loss: {:.4f}'.format(epoch, test_loss))\n\n test_labels = np.concatenate(test_labels)\n is_anomaly = np.array([0 if l == 0 else 1 for l in test_labels])\n\n anomaly_score = np.concatenate(test_z, axis=0)\n z_obs.update(roc_auc_score(is_anomaly, anomaly_score), epoch,\n print_score=c.verbose or epoch == c.meta_epochs - 1)\n\n if c.save_model:\n model.to('cpu')\n save_model(model, c.modelname)\n\n return z_obs.max_score, z_obs.last, z_obs.min_loss_score\n"
] | [
[
"torch.no_grad",
"sklearn.metrics.roc_auc_score",
"numpy.array",
"numpy.concatenate",
"numpy.mean"
]
] |
sungheeyun/optmlstat | [
"11d529c915bf27976da9157471a6dbf7df34d205"
] | [
"optmlstat/test/test_basic_functions.py"
] | [
"import unittest\nfrom logging import Logger, getLogger\n\nfrom numpy import ndarray, power, allclose\nfrom numpy.random import randn\nfrom freq_used.logging_utils import set_logging_basic_config\n\nfrom optmlstat.functions.function_base import FunctionBase\nfrom optmlstat.functions.example_functions import get_sum_of_square_function, get_sum_function\n\n\nlogger: Logger = getLogger()\n\n\nclass TestBasicFunctions(unittest.TestCase):\n num_inputs: int = 30\n num_data_points: int = 100\n x_array_2d: ndarray\n\n @classmethod\n def setUpClass(cls) -> None:\n set_logging_basic_config(__file__)\n cls.x_array_2d = randn(cls.num_data_points, cls.num_inputs)\n\n def test_sum_of_squares_function(self):\n y_array_1d: ndarray = TestBasicFunctions._get_y_array_1d(\n get_sum_of_square_function(TestBasicFunctions.num_inputs)\n )\n true_y_array_1d: ndarray = power(TestBasicFunctions.x_array_2d, 2.0).sum(axis=1)\n\n logger.info(y_array_1d.shape)\n logger.info(true_y_array_1d.shape)\n logger.info(allclose(y_array_1d, true_y_array_1d))\n\n self.assertTrue(allclose(y_array_1d, true_y_array_1d))\n\n def test_sum_function(self):\n y_array_1d: ndarray = TestBasicFunctions._get_y_array_1d(get_sum_function(TestBasicFunctions.num_inputs))\n true_y_array_1d: ndarray = power(TestBasicFunctions.x_array_2d, 1.0).sum(axis=1)\n\n logger.info(y_array_1d.shape)\n logger.info(true_y_array_1d.shape)\n logger.info(allclose(y_array_1d, true_y_array_1d))\n\n self.assertTrue(allclose(y_array_1d, true_y_array_1d))\n\n @classmethod\n def _get_y_array_1d(cls, function: FunctionBase) -> ndarray:\n return function.get_y_values_2d(cls.x_array_2d).ravel()\n\n\nif __name__ == \"__main__\":\n unittest.main()\n"
] | [
[
"numpy.allclose",
"numpy.power",
"numpy.random.randn"
]
] |
mmderakhshani/ML-From-Scratch | [
"86ccc5273e0182b66c5d93c428f75dad61d8ced3"
] | [
"mlfromscratch/unsupervised_learning/gaussian_mixture_model.py"
] | [
"from __future__ import division, print_function\nimport sys\nimport os\nimport math\nimport random\nfrom sklearn import datasets\nimport numpy as np\n\n# Import helper functions\nfrom mlfromscratch.utils.data_manipulation import normalize\nfrom mlfromscratch.utils.data_operation import euclidean_distance, calculate_covariance_matrix\nfrom mlfromscratch.unsupervised_learning import PCA\nfrom mlfromscratch.utils import Plot\n\n\nclass GaussianMixtureModel():\n \"\"\"A probabilistic clustering method for determining groupings among data samples.\n\n Parameters:\n -----------\n k: int\n The number of clusters the algorithm will form.\n max_iterations: int\n The number of iterations the algorithm will run for if it does\n not converge before that. \n tolerance: float\n If the difference of the results from one iteration to the next is\n smaller than this value we will say that the algorithm has converged.\n \"\"\"\n def __init__(self, k=2, max_iterations=2000, tolerance=1e-8):\n self.k = k\n self.parameters = []\n self.max_iterations = max_iterations\n self.tolerance = tolerance\n self.responsibilities = []\n self.sample_assignments = None\n self.responsibility = None\n\n # Initialize gaussian randomly\n def _init_random_gaussians(self, X):\n n_samples = np.shape(X)[0]\n self.priors = (1 / self.k) * np.ones(self.k)\n for i in range(self.k):\n params = {}\n params[\"mean\"] = X[np.random.choice(range(n_samples))]\n params[\"cov\"] = calculate_covariance_matrix(X)\n self.parameters.append(params)\n\n # Likelihood\n def multivariate_gaussian(self, X, params):\n n_features = np.shape(X)[1]\n mean = params[\"mean\"]\n covar = params[\"cov\"]\n determinant = np.linalg.det(covar)\n likelihoods = np.zeros(np.shape(X)[0])\n for i, sample in enumerate(X):\n d = n_features # dimension\n coeff = (1.0 / (math.pow((2.0 * math.pi), d / 2)\n * math.sqrt(determinant)))\n exponent = math.exp(-0.5 * (sample - mean).T.dot(np.linalg.pinv(covar)).dot((sample - mean)))\n likelihoods[i] = coeff * exponent\n\n return likelihoods\n\n # Calculate the likelihood over all samples\n def _get_likelihoods(self, X):\n n_samples = np.shape(X)[0]\n likelihoods = np.zeros((n_samples, self.k))\n for i in range(self.k):\n likelihoods[\n :, i] = self.multivariate_gaussian(\n X, self.parameters[i])\n return likelihoods\n\n # Calculate the responsibility\n def _expectation(self, X):\n # Calculate probabilities of X belonging to the different clusters\n weighted_likelihoods = self._get_likelihoods(X) * self.priors\n sum_likelihoods = np.expand_dims(\n np.sum(weighted_likelihoods, axis=1), axis=1)\n # Determine responsibility as P(X|y)*P(y)/P(X)\n self.responsibility = weighted_likelihoods / sum_likelihoods\n # Assign samples to cluster that has largest probability\n self.sample_assignments = self.responsibility.argmax(axis=1)\n # Save value for convergence check\n self.responsibilities.append(np.max(self.responsibility, axis=1))\n\n # Update the parameters and priors\n def _maximization(self, X):\n # Iterate through clusters and recalculate mean and covariance\n for i in range(self.k):\n resp = np.expand_dims(self.responsibility[:, i], axis=1)\n mean = (resp * X).sum(axis=0) / resp.sum()\n covariance = (X - mean).T.dot((X - mean) * resp) / resp.sum()\n self.parameters[i][\"mean\"], self.parameters[\n i][\"cov\"] = mean, covariance\n\n # Update weights\n n_samples = np.shape(X)[0]\n self.priors = self.responsibility.sum(axis=0) / n_samples\n\n # Covergence if || likehood - last_likelihood || < tolerance\n def _converged(self, X):\n if len(self.responsibilities) < 2:\n return False\n diff = np.linalg.norm(\n self.responsibilities[-1] - self.responsibilities[-2])\n # print (\"Likelihood update: %s (tol: %s)\" % (diff, self.tolerance))\n return diff <= self.tolerance\n\n # Run GMM and return the cluster indices\n def predict(self, X):\n # Initialize the gaussians randomly\n self._init_random_gaussians(X)\n\n # Run EM until convergence or for max iterations\n for _ in range(self.max_iterations):\n self._expectation(X) # E-step\n self._maximization(X) # M-step\n\n # Check convergence\n if self._converged(X):\n break\n\n # Make new assignments and return them\n self._expectation(X)\n return self.sample_assignments\n\ndef main():\n # Load the dataset\n X, y = datasets.make_blobs()\n\n # Cluster the data\n clf = GaussianMixtureModel(k=3)\n y_pred = clf.predict(X)\n\n p = MatplotlibWrapper()\n p.plot_in_2d(X, y_pred, title=\"GMM Clustering\")\n p.plot_in_2d(X, y, title=\"Actual Clustering\")\n\n\nif __name__ == \"__main__\":\n main()\n"
] | [
[
"numpy.ones",
"numpy.sum",
"numpy.linalg.norm",
"numpy.zeros",
"numpy.linalg.det",
"numpy.max",
"numpy.shape",
"numpy.expand_dims",
"numpy.linalg.pinv",
"sklearn.datasets.make_blobs"
]
] |
EEEGUI/Mapillary-vistas-semseg | [
"d07a107fd08a7536f09f25e426a6f15033cbb609"
] | [
"ptsemseg/loader/mapillary_vistas_loader.py"
] | [
"import os\nimport json\nimport torch\nimport numpy as np\n\nfrom torch.utils import data\nfrom PIL import Image\n\nfrom ptsemseg.utils import recursive_glob\nfrom ptsemseg.augmentations import Compose, RandomHorizontallyFlip, RandomRotate\n\nclass mapillaryVistasLoader(data.Dataset):\n def __init__(\n self,\n root,\n split=\"training\",\n img_size=(1025, 2049),\n is_transform=True,\n augmentations=None,\n test_mode=False,\n ):\n self.root = root\n self.split = split\n self.is_transform = is_transform\n self.augmentations = augmentations\n self.n_classes = 9\n\n self.img_size = img_size if isinstance(img_size, tuple) else (img_size, img_size)\n self.mean = np.array([80.5423, 91.3162, 81.4312])\n self.files = {}\n\n if not test_mode:\n self.images_base = os.path.join(self.root, self.split, \"images\")\n self.annotations_base = os.path.join(self.root, self.split, \"labels\")\n self.files[split] = recursive_glob(rootdir=self.images_base, suffix=\".jpg\")\n if not self.files[split]:\n raise Exception(\"No files for split=[%s] found in %s\" % (split, self.images_base))\n\n print(\"Found %d %s images\" % (len(self.files[split]), split))\n self.class_names, self.class_ids, self.class_colors, self.class_major_ids = self.parse_config()\n\n self.ignore_id = 250\n\n\n\n def parse_config(self):\n with open(os.path.join(self.root, \"config.json\")) as config_file:\n config = json.load(config_file)\n\n labels = config[\"labels\"]\n\n class_names = []\n class_ids = []\n class_colors = []\n class_major_ids = []\n\n for label_id, label in enumerate(labels):\n class_names.append(label[\"readable\"])\n class_ids.append(label_id)\n class_colors.append(label[\"color\"])\n class_major_ids.append(label['majorclass'])\n print(\"There are {} labels in the config file\".format(len(set(class_major_ids))))\n return class_names, class_ids, class_colors, class_major_ids\n\n def __len__(self):\n \"\"\"__len__\"\"\"\n return len(self.files[self.split])\n\n def __getitem__(self, index):\n \"\"\"__getitem__\n :param index:\n \"\"\"\n img_path = self.files[self.split][index].rstrip()\n lbl_path = os.path.join(\n self.annotations_base, os.path.basename(img_path).replace(\".jpg\", \".png\")\n )\n\n img = Image.open(img_path)\n lbl = Image.open(lbl_path)\n if self.augmentations is not None:\n img, lbl = self.augmentations(img, lbl)\n if self.is_transform:\n img, lbl = self.transform(img, lbl)\n return img, lbl\n\n def transform(self, img, lbl):\n if self.img_size == (\"same\", \"same\"):\n pass\n else:\n img = img.resize(\n (self.img_size[1], self.img_size[0]), resample=Image.LANCZOS\n ) # uint8 with RGB mode\n lbl = lbl.resize((self.img_size[1], self.img_size[0]))\n img = np.array(img).astype(np.float64) / 255.0\n img = torch.from_numpy(img.transpose(2, 0, 1)).float() # From HWC to CHW\n #\n # lbl = torch.from_numpy(np.array(lbl)).long()\n # lbl[lbl == 65] = self.ignore_id\n #\n lbl = torch.from_numpy(np.array(lbl)).long()\n lbl[lbl == self.ignore_id] = 65\n lbl = self.encode_segmap(lbl)\n lbl[lbl == 0] = self.ignore_id\n return img, lbl\n\n def decode_segmap(self, temp):\n class_major_colors = [[0, 0, 0],\n [70, 70, 70],\n [180, 165, 180],\n [128, 64, 64],\n [220, 20, 60],\n [255, 255, 255],\n [70, 130, 180],\n [250, 170, 30],\n [0, 0, 142]]\n r = temp.copy()\n g = temp.copy()\n b = temp.copy()\n for l in range(0, len(class_major_colors)):\n r[temp == l] = class_major_colors[l][0]\n g[temp == l] = class_major_colors[l][1]\n b[temp == l] = class_major_colors[l][2]\n\n rgb = np.zeros((temp.shape[0], temp.shape[1], 3))\n # rgb[:, :, 0] = r / 255.0\n # rgb[:, :, 1] = g / 255.0\n # rgb[:, :, 2] = b / 255.0\n rgb[:, :, 0] = r\n rgb[:, :, 1] = g\n rgb[:, :, 2] = b\n return rgb\n\n def encode_segmap(self, mask):\n # Put all void classes to zero\n for id in self.class_ids:\n mask[mask == id] = self.class_major_ids[id]+100\n\n mask = mask - 100\n return mask\n\n\nif __name__ == \"__main__\":\n augment = Compose([RandomHorizontallyFlip(0.5), RandomRotate(6)])\n\n local_path = \"/home/lin/Documents/dataset/mapillary\"\n dst = mapillaryVistasLoader(\n local_path, split='validation', img_size=(512, 1024), is_transform=True, augmentations=None\n )\n bs = 1\n trainloader = data.DataLoader(dst, batch_size=bs, num_workers=4, shuffle=True)\n for i, data_samples in enumerate(trainloader):\n x = dst.decode_segmap(data_samples[1][0].numpy())\n x = Image.fromarray(np.uint8(x))\n x.show()\n\n"
] | [
[
"numpy.array",
"torch.utils.data.DataLoader",
"numpy.zeros",
"numpy.uint8"
]
] |
vinay-swamy/gMVP | [
"62202baa0769dfe0e47c230e78dffa42fb1280f1"
] | [
"trainer/trainer.py"
] | [
"import time\nimport json\nimport argparse\nimport os\nimport sys\nimport logging\nimport shutil\nfrom datetime import datetime\nimport glob\nimport random\n\nfrom scipy.stats import mannwhitneyu\nfrom scipy.stats import spearmanr\n\nimport numpy as np\n\nfrom sklearn.metrics import roc_auc_score, precision_recall_curve, auc\n\nimport tensorflow as tf\nimport tensorflow_addons as tfa\n\n#from optimization import create_optimizer\n\nfrom model_attention import ModelAttention\n\nfrom dataset import build_dataset\nfrom loss import compute_loss\n\nos.environ['CUDA_VISIBLE_DEVICES'] = '0'\n\ntf.config.threading.set_intra_op_parallelism_threads(60)\ntf.config.threading.set_inter_op_parallelism_threads(60)\n\nlogger = logging.getLogger()\nlogger.setLevel(logging.DEBUG)\nlogging_formatter = logging.Formatter(\n '%(asctime)s - %(levelname)s: %(message)s', datefmt='%Y-%m-%d %H:%M:%S')\nch = logging.StreamHandler(sys.stdout)\nch.setFormatter(logging_formatter)\nlogger.addHandler(ch)\n\n\nclass LearningRate(tf.keras.optimizers.schedules.LearningRateSchedule):\n def __init__(self, base_lr, end_learning_rate, warmup_steps, decay_steps):\n super(LearningRate, self).__init__()\n self.base_lr = base_lr\n self.warmup_steps = warmup_steps\n self.decay_steps = decay_steps\n if decay_steps == 0:\n self.poly_decay_fn = lambda x: self.base_lr\n else:\n self.poly_decay_fn = tf.keras.optimizers.schedules.PolynomialDecay(\n base_lr,\n decay_steps,\n end_learning_rate=end_learning_rate,\n power=1.0)\n\n def __call__(self, step):\n lr = tf.cond(\n step < self.warmup_steps, lambda: self.base_lr * tf.cast(\n step + 1, tf.float32) / tf.cast(self.warmup_steps, tf.float32),\n lambda: self.poly_decay_fn(step - self.warmup_steps))\n #if step % 100 == 0:\n # tf.print('learning_rate', step, lr)\n\n return lr\n\n\nclass TestMetric(object):\n def __init__(self):\n self._targets = tf.zeros((0, ), tf.int32)\n self._preds = tf.zeros((0, ), tf.float32)\n\n def reset_states(self):\n self._targets = tf.zeros((0, ), tf.int32)\n self._preds = tf.zeros((0, ), tf.float32)\n\n def update_state(self, targets, preds):\n self._targets = tf.concat(\n [self._targets, tf.cast(targets, tf.int32)], axis=-1)\n self._preds = tf.concat(\n [self._preds, tf.cast(preds, tf.float32)], axis=-1)\n\n def result_auROC(self):\n try:\n auROC = roc_auc_score(self._targets.numpy(), self._preds.numpy())\n return auROC\n except:\n return 0.0\n\n def result_auPR(self):\n try:\n precision, recall, _ = precision_recall_curve(\n self._targets.numpy(), self._preds.numpy())\n auPR = auc(recall, precision)\n return auPR\n except:\n return 0.0\n\n def result_pvalue(self):\n all_pred = self._preds.numpy()\n all_label = self._targets.numpy()\n mtest = mannwhitneyu(all_pred[all_label == 1],\n all_pred[all_label == 0],\n alternative='two-sided')\n pvalue = mtest.pvalue\n return pvalue\n\n def result_total(self):\n res = self._targets.numpy()\n return res.shape[0]\n\n def result_neg(self):\n res = self._targets.numpy()\n return res.shape[0] - np.sum(res)\n\n def result_pos(self):\n res = self._targets.numpy()\n return np.sum(res)\n\n def result_corr(self):\n try:\n all_pred = self._preds.numpy()\n all_label = self._targets.numpy()\n corr, pvalue = spearmanr(all_pred, all_label)\n return corr, pvalue\n except:\n return 0.0\n\n def result_max(self):\n try:\n all_pred = self._preds.numpy()\n return np.max(all_pred)\n except:\n return 0.0\n\n\ndef train_single_gpu(config, args):\n #setup logger\n str_t = datetime.now().strftime('%Y-%m-%d-%H-%M-%S')\n train_dir = f'./res/{str_t}'\n config['train']['train_dir'] = train_dir\n os.makedirs(train_dir)\n os.makedirs(train_dir + '/result')\n os.makedirs(train_dir + '/model')\n\n fh = logging.FileHandler(f'{train_dir}/train.log')\n fh.setFormatter(logging_formatter)\n logger.addHandler(fh)\n\n logger.info(json.dumps(config, indent=4))\n\n #train and validate files\n batch_size = config['train']['batch_size']\n input_config = config['input']\n input_base_dir = input_config['base_dir']\n all_files = glob.glob(input_base_dir + '/' + input_config['train'][:-1] +\n args.random + '*tfrec')\n #all_files = glob.glob('../dataset/tf/f_v1_w64_2021_v2' + '/' +\n # input_config['train'][:-1] + args.random + '*tfrec')\n random.seed(2020)\n random.shuffle(all_files)\n train_files, validate_files = [], []\n for i in range(10):\n if i == args.cv:\n validate_files.append(all_files[i])\n else:\n train_files.append(all_files[i])\n\n print(train_files)\n print(validate_files)\n\n asd = glob.glob(input_base_dir + '/' + 'ASD' + '.tfrec')\n ndd = glob.glob(input_base_dir + '/' + 'NDD' + '.tfrec')\n control = glob.glob(input_base_dir + '/' + 'Control' + '.tfrec')\n brca2 = glob.glob(input_base_dir + '/' + 'BRCA2' + '.tfrec')\n pparg = glob.glob(input_base_dir + '/' + 'PPARG' + '.tfrec')\n #train_files += pparg\n\n train_dataset = build_dataset(train_files, batch_size)\n validate_dataset = build_dataset(validate_files, batch_size)\n\n #model\n model_type = config['train']['model_type']\n if model_type == 'attention':\n model = ModelAttention(config['model'])\n else:\n raise ValueError(f'model type {model_type} does not exist.')\n #learning rate\n init_learning_rate = config['train']['learning_rate']\n end_learning_rate = config['train']['end_learning_rate']\n '''\n warmup_epochs = config['train']['warmup_epochs']\n decay_epochs = config['train']['decay_epochs']\n\n training_samples = 0\n for inputs in train_dataset:\n training_samples += inputs[0].shape[0]\n logger.info(f'training_samples= {training_samples}')\n\n batches_each_epoch = int(training_samples / batch_size)\n warmup_steps = batches_each_epoch * warmup_epochs\n decay_steps = batches_each_epoch * decay_epochs\n '''\n\n warmup_steps, decay_steps = config['train']['warmup_steps'], config[\n 'train']['decay_steps']\n\n learning_rate = LearningRate(init_learning_rate,\n end_learning_rate=end_learning_rate,\n warmup_steps=warmup_steps,\n decay_steps=decay_steps)\n\n #training algorithm\n opt = config['train'].get('opt', 'adam')\n if opt == 'adam':\n optimizer = tf.keras.optimizers.Adam(learning_rate=learning_rate)\n #optimizer = tf.keras.optimizers.Adam(learning_rate=1e-3)\n elif opt == 'adamw':\n weight_decay_rate = config['train']['weight_decay_rate']\n optimizer = tfa.optimizers.AdamW(\n weight_decay=weight_decay_rate,\n learning_rate=learning_rate,\n beta_1=0.9,\n beta_2=0.999,\n epsilon=1e-8,\n )\n '''\n optimizer = create_optimizer(init_learning_rate,\n decay_steps + warmup_steps,\n warmup_steps,\n end_lr=end_learning_rate,\n optimizer_type='adamw')\n '''\n\n else:\n raise NotImplementedError(f\"opt {opt} not NotImplementedError\")\n\n #metrics\n metric_train_loss = tf.keras.metrics.Mean(name='train_loss')\n metric_test_loss = tf.keras.metrics.Mean(name='test_loss')\n metric_test = TestMetric()\n\n #summary\n train_log_dir = f'{train_dir}/summary/train'\n train_summary_writer = tf.summary.create_file_writer(train_log_dir)\n\n def _update_histogram_summary():\n with train_summary_writer.as_default():\n for var in model.trainable_variables:\n if 'kernel:' in var.name or 'gamma:' in var.name or 'beta:' in var.name:\n tf.summary.histogram(var.name,\n var,\n step=optimizer.iterations)\n\n def _update_gradient_norm_summary(var, grad):\n with train_summary_writer.as_default():\n for v, g in zip(var, grad):\n if 'kernel:' in v.name or 'gamma:' in v.name or 'beta:' in v.name:\n tf.summary.scalar(f'gradient_norm/{v.name}',\n tf.norm(g, ord='euclidean'),\n step=optimizer.iterations)\n\n @tf.function(input_signature=[validate_dataset.element_spec])\n def test_step(sample):\n var, ref_aa, alt_aa, feature, label, padding_mask = sample\n\n logit = model((ref_aa, alt_aa, feature), False, padding_mask)\n\n loss = compute_loss(label, logit)\n\n pred = model.predict_from_logit(logit)\n\n return var, label, pred, loss\n\n def _save_res(var_id, target, pred, name, epoch):\n with open(f'{train_dir}/result/epoch_{epoch}_{name}.score', 'w') as f:\n f.write('var\\ttarget\\tScore\\n')\n for a, c, d in zip(var_id, target, pred):\n f.write('{}\\t{:d}\\t{:f}\\n'.format(a.numpy().decode('utf-8'),\n int(c), d))\n return True\n\n def test(test_dataset,\n data_name,\n epoch,\n auc=False,\n pvalue=False,\n corr=False):\n metric_test_loss.reset_states()\n metric_test.reset_states()\n\n all_pred, all_label, all_var = [], [], []\n\n for step, sample in enumerate(test_dataset):\n var, label, pred, loss = test_step(sample)\n metric_test.update_state(label, pred)\n metric_test_loss.update_state(loss)\n\n all_pred.extend(list(pred))\n all_label.extend(list(label))\n all_var.extend(list(var))\n\n all_var = np.array(all_var)\n all_label = np.array(all_label)\n all_pred = np.array(all_pred)\n\n _save_res(all_var, all_label, all_pred, data_name, epoch)\n\n if auc:\n logger.info(\n f'{data_name} pos= {metric_test.result_pos()} neg= {metric_test.result_neg()} loss= {metric_test_loss.result()} auPR= {metric_test.result_auPR()} auROC= {metric_test.result_auROC()} max= {metric_test.result_max()}'\n )\n if pvalue:\n logger.info(\n f'{data_name} pos= {metric_test.result_pos()} neg= {metric_test.result_neg()} loss= {metric_test_loss.result()} pvalue= {metric_test.result_pvalue()}'\n )\n\n if corr:\n corr, pvalue = metric_test.result_corr()\n logger.info(\n f'{data_name} pos= {metric_test.result_total()} corr= {corr} pvalue= {pvalue} max= {metric_test.result_max()}'\n )\n\n return metric_test_loss.result()\n\n @tf.function(input_signature=[train_dataset.element_spec])\n def train_step(sample):\n var, ref_aa, alt_aa, feature, label, padding_mask = sample\n with tf.GradientTape() as tape:\n logit = model((ref_aa, alt_aa, feature), True, padding_mask)\n loss = compute_loss(label, logit)\n\n gradients = tape.gradient(loss, model.trainable_variables)\n optimizer.apply_gradients(zip(gradients, model.trainable_variables))\n\n metric_train_loss.update_state(loss)\n #if optimizer.iterations % 512 == 0:\n # _update_gradient_norm_summary(model.trainable_variables, gradients)\n\n return loss\n\n EPOCHS = 512\n watch_loss = 10000.0\n watch_epoch = -1\n patience_epochs = 5\n for epoch in range(EPOCHS):\n start = time.time()\n\n for step, samples in enumerate(train_dataset):\n loss = train_step(samples)\n #tf.print(\n # f'lr= {learning_rate(global_step)} wd={weight_decay(global_step)}'\n #)\n\n #model summary\n if optimizer.iterations == 1:\n model.summary(print_fn=logger.info)\n\n #logging kernel weights\n #if (optimizer.iterations + 1) % 512 == 0:\n # _update_histogram_summary()\n\n logger.info(f'Epoch {epoch} Loss {metric_train_loss.result():.4f}')\n metric_train_loss.reset_states()\n\n model.save_weights(f'{train_dir}/model/epoch-{epoch}.h5')\n\n #validate and test\n validate_loss = test(validate_dataset,\n 'validate',\n epoch,\n pvalue=False,\n auc=True,\n corr=False)\n if validate_loss < watch_loss:\n watch_loss = validate_loss\n watch_epoch = epoch\n\n #denovo\n if epoch - watch_epoch == patience_epochs:\n logger.info(f'best_epoch {watch_epoch} min_loss= {watch_loss}')\n break\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument('--config', type=str, required=True)\n parser.add_argument('--cv', type=int, default=0)\n parser.add_argument('--random', type=str, default='0')\n args = parser.parse_args()\n\n with open(args.config) as f:\n config = json.load(f)\n\n train_single_gpu(config, args)\n\n\nif __name__ == '__main__':\n main()\n"
] | [
[
"numpy.sum",
"tensorflow.zeros",
"tensorflow.keras.optimizers.Adam",
"tensorflow.summary.histogram",
"tensorflow.norm",
"tensorflow.function",
"tensorflow.keras.optimizers.schedules.PolynomialDecay",
"sklearn.metrics.auc",
"tensorflow.config.threading.set_intra_op_parallelism_threads",
"tensorflow.cast",
"tensorflow.GradientTape",
"tensorflow.config.threading.set_inter_op_parallelism_threads",
"numpy.max",
"scipy.stats.mannwhitneyu",
"numpy.array",
"tensorflow.keras.metrics.Mean",
"tensorflow.summary.create_file_writer",
"scipy.stats.spearmanr"
]
] |
v1259397/cosmic-gnuradio | [
"64c149520ac6a7d44179c3f4a38f38add45dd5dc"
] | [
"gnuradio-3.7.13.4/gr-analog/python/analog/qa_fastnoise.py"
] | [
"#!/usr/bin/env python\n#\n# Copyright 2007,2010,2012 Free Software Foundation, Inc.\n#\n# This file is part of GNU Radio\n#\n# GNU Radio is free software; you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation; either version 3, or (at your option)\n# any later version.\n#\n# GNU Radio is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with GNU Radio; see the file COPYING. If not, write to\n# the Free Software Foundation, Inc., 51 Franklin Street,\n# Boston, MA 02110-1301, USA.\n#\n\nfrom gnuradio import gr, gr_unittest, analog, blocks\nimport numpy\n\n\nclass test_fastnoise_source(gr_unittest.TestCase):\n\n def setUp (self):\n\n self.num = 2**22\n self.num_items = 10**6\n self.default_args = {\"samples\": self.num, \"seed\": 43, \"ampl\": 1}\n\n def tearDown (self):\n pass\n\n def run_test_real(self, form):\n \"\"\" Run test case with float input/output\n \"\"\"\n tb = gr.top_block()\n src = analog.fastnoise_source_f(type=form, **self.default_args)\n head = blocks.head(nitems=self.num_items, sizeof_stream_item=gr.sizeof_float)\n sink = blocks.vector_sink_f()\n tb.connect(src, head, sink)\n tb.run()\n return numpy.array(sink.data())\n\n def run_test_complex(self, form):\n \"\"\" Run test case with complex input/output\n \"\"\"\n tb = gr.top_block()\n src = analog.fastnoise_source_c(type=form, **self.default_args)\n head = blocks.head(nitems=self.num_items, sizeof_stream_item=gr.sizeof_gr_complex)\n sink = blocks.vector_sink_c()\n tb.connect(src, head, sink)\n tb.run()\n return numpy.array(sink.data())\n\n def test_001_real_uniform_moments(self):\n\n data = self.run_test_real(analog.GR_UNIFORM)\n\n self.assertAlmostEqual(min(data), -1, places=4)\n self.assertAlmostEqual(max(data), 1, places=4)\n\n # mean, variance\n self.assertAlmostEqual(data.mean(), 0, places=2)\n self.assertAlmostEqual(data.var(), (1-(-1))**2./12, places=3)\n\n def test_001_real_gaussian_moments(self):\n data = self.run_test_real(analog.GR_GAUSSIAN)\n\n # mean, variance\n self.assertAlmostEqual(data.mean(), 0, places=2)\n self.assertAlmostEqual(data.var(), 1, places=2)\n\n def test_001_real_laplacian_moments(self):\n data = self.run_test_real(analog.GR_LAPLACIAN)\n\n # mean, variance\n self.assertAlmostEqual(data.mean(), 0, places=2)\n self.assertAlmostEqual(data.var(), 2, places=2)\n\n def test_001_complex_uniform_moments(self):\n data = self.run_test_complex(analog.GR_UNIFORM)\n\n # mean, variance\n self.assertAlmostEqual(data.real.mean(), 0, places=2)\n self.assertAlmostEqual(data.real.var(), 0.5*(1-(-1))**2./12, places=3)\n\n self.assertAlmostEqual(data.imag.mean(), 0, places=2)\n self.assertAlmostEqual(data.imag.var(), 0.5*(1-(-1))**2./12, places=3)\n\n def test_001_complex_gaussian_moments(self):\n data = self.run_test_complex(analog.GR_GAUSSIAN)\n\n # mean, variance\n self.assertAlmostEqual(data.real.mean(), 0, places=2)\n self.assertAlmostEqual(data.real.var(), 0.5, places=2)\n\n self.assertAlmostEqual(data.imag.mean(), 0, places=2)\n self.assertAlmostEqual(data.imag.var(), 0.5, places=2)\n\n def test_002_real_uniform_reproducibility(self):\n data1 = self.run_test_real(analog.GR_UNIFORM)\n data2 = self.run_test_real(analog.GR_UNIFORM)\n\n # It's pseudoramdo thus must be equal\n self.assertTrue(numpy.array_equal(data1, data2))\n\n def test_002_real_gaussian_reproducibility(self):\n data1 = self.run_test_real(analog.GR_GAUSSIAN)\n data2 = self.run_test_real(analog.GR_GAUSSIAN)\n\n self.assertTrue(numpy.array_equal(data1, data2))\n\n def test_003_real_uniform_pool(self):\n src = analog.fastnoise_source_f(type=analog.GR_UNIFORM, **self.default_args)\n src2 = analog.fastnoise_source_f(type=analog.GR_UNIFORM, **self.default_args)\n self.assertTrue(numpy.array_equal(numpy.array(src.samples()), numpy.array(src2.samples())))\n def test_003_real_gaussian_pool(self):\n src = analog.fastnoise_source_f(type=analog.GR_GAUSSIAN, **self.default_args)\n src2 = analog.fastnoise_source_f(type=analog.GR_GAUSSIAN, **self.default_args)\n self.assertTrue(numpy.array_equal(numpy.array(src.samples()), numpy.array(src2.samples())))\n def test_003_cmplx_gaussian_pool(self):\n src = analog.fastnoise_source_c(type=analog.GR_GAUSSIAN, **self.default_args)\n src2 = analog.fastnoise_source_c(type=analog.GR_GAUSSIAN, **self.default_args)\n self.assertTrue(numpy.array_equal(numpy.array(src.samples()), numpy.array(src2.samples())))\n def test_003_cmplx_uniform_pool(self):\n src = analog.fastnoise_source_c(type=analog.GR_UNIFORM, **self.default_args)\n src2 = analog.fastnoise_source_c(type=analog.GR_UNIFORM, **self.default_args)\n self.assertTrue(numpy.array_equal(numpy.array(src.samples()), numpy.array(src2.samples())))\n def test_003_real_laplacian_pool(self):\n src = analog.fastnoise_source_f(type=analog.GR_LAPLACIAN, **self.default_args)\n src2 = analog.fastnoise_source_f(type=analog.GR_LAPLACIAN, **self.default_args)\n self.assertTrue(numpy.array_equal(numpy.array(src.samples()), numpy.array(src2.samples())))\nif __name__ == '__main__':\n gr_unittest.run(test_fastnoise_source, \"test_fastnoise_source.xml\")\n"
] | [
[
"numpy.array_equal"
]
] |
Saiprasad16/transform | [
"774458bf0c296f8275fedf3ace303427654dace7"
] | [
"tensorflow_transform/schema_inference.py"
] | [
"# Copyright 2017 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Logic associated with schema inference and propagation.\n\nThis module contains functionality to set the schema assciated with a Tensor,\nand to infer the schema for a tensor, including any information that has been\nset. This module will also contain any schema propagation logic, i.e. deducing\nthe schema of a tensor from its parents in the graph.\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport collections\nfrom typing import Callable, Dict, Mapping, Optional, Tuple\n\n# GOOGLE-INITIALIZATION\n\nimport tensorflow as tf\nfrom tensorflow_transform import common\nfrom tensorflow_transform import common_types\nfrom tensorflow_transform import graph_context\nfrom tensorflow_transform import tf2_utils\nfrom tensorflow_transform import tf_utils\nfrom tensorflow_transform.saved import saved_transform_io_v2\nfrom tensorflow_transform.tf_metadata import schema_utils\n\nfrom google.protobuf import any_pb2\n# pylint: disable=g-direct-tensorflow-import\nfrom tensorflow.python.eager import function\nfrom tensorflow.python.framework import ops\n# pylint: enable=g-direct-tensorflow-import\nfrom tensorflow_metadata.proto.v0 import schema_pb2\n\n\ndef _feature_spec_from_batched_tensors(tensors):\n \"\"\"Infer a feature spec from a dict of tensors.\n\n Args:\n tensors: A dict whose keys are strings and values are `Tensor`,\n `SparseTensor`, or `RaggedTensor`s.\n\n Returns:\n A feature spec inferred from the types and shapes of the tensors.\n\n Raises:\n ValueError: If the feature spec cannot be inferred.\n TypeError: If any of the values of `tensors` are not a `Tensor`,\n `SparseTensor`, or `RaggedTensor`.\n \"\"\"\n feature_spec = {}\n for name, tensor in tensors.items():\n if tensor.dtype not in (tf.string, tf.int64, tf.float32):\n raise ValueError('Feature {} ({}) had invalid dtype {} for feature spec'\n .format(name, tensor, tensor.dtype))\n if isinstance(tensor, tf.SparseTensor):\n shape = tensor.get_shape()\n if shape.ndims > 2:\n feature_spec[name] = tf.io.SparseFeature(\n index_key=[\n '{}$sparse_indices_{}'.format(name, idx)\n for idx in range(shape.ndims - 1)\n ],\n value_key='{}$sparse_values'.format(name),\n dtype=tensor.dtype,\n size=shape[1:],\n already_sorted=True)\n else:\n feature_spec[name] = tf.io.VarLenFeature(tensor.dtype)\n elif isinstance(tensor, tf.Tensor):\n shape = tensor.get_shape()\n if shape.ndims in [None, 0]:\n raise ValueError(\n 'Feature {} ({}) had invalid shape {} for FixedLenFeature: must '\n 'have rank at least 1'.format(name, tensor, shape))\n if any(dim is None for dim in shape.as_list()[1:]):\n raise ValueError(\n 'Feature {} ({}) had invalid shape {} for FixedLenFeature: apart '\n 'from the batch dimension, all dimensions must have known size'\n .format(name, tensor, shape))\n feature_spec[name] = tf.io.FixedLenFeature(shape.as_list()[1:],\n tensor.dtype)\n elif isinstance(tensor, tf.RaggedTensor):\n tf.compat.v1.logging.warn(\n 'Feature %s was a RaggedTensor. A Schema will be generated but the '\n 'Schema cannot be used with a coder (e.g. to materialize output '\n 'data) or to generated a feature spec.', name)\n # Arbitrarily select VarLenFeature.\n feature_spec[name] = tf.io.VarLenFeature(tensor.dtype)\n else:\n raise TypeError(\n 'Expected a Tensor, SparseTensor, or RaggedTensor got {} of type {} '\n 'for feature {}'\n .format(tensor, type(tensor), name))\n\n return feature_spec\n\n\ndef infer_feature_schema(features, graph, session=None):\n \"\"\"Given a dict of tensors, creates a `Schema`.\n\n Infers a schema, in the format of a tf.Transform `Schema`, for the given\n dictionary of tensors.\n\n If there is an override specified, we override the inferred schema for the\n given feature's tensor. An override has the meaning that we should set\n is_categorical=True. If session is not provided then we just set\n is_categorical=True, and if the session is provided then was also compute\n values of the tensors representing the min and max values and set them in the\n schema.\n\n If annotations have been specified, they are added to the output schema.\n\n Args:\n features: A dict mapping column names to `Tensor` or `SparseTensor`s. The\n `Tensor` or `SparseTensor`s should have a 0'th dimension which is\n interpreted as the batch dimension.\n graph: A `tf.Graph` used to determine schema overrides.\n session: (optional) A `tf.Session` used to compute schema overrides. If\n None, schema overrides will not be computed.\n\n Returns:\n A `Schema` proto.\n \"\"\"\n tensor_ranges = _get_tensor_ranges(graph)\n if session is None:\n tensor_ranges = {hashable: (None, None) for hashable in tensor_ranges}\n tensor_annotations = {}\n global_annotations = []\n else:\n tensor_ranges = session.run(tensor_ranges)\n tensor_annotations, global_annotations = _get_schema_annotations(\n graph, session)\n modified_tensor_ranges = {}\n feature_annotations = {}\n for name, tensor in features.items():\n if isinstance(tensor, tf.SparseTensor):\n values = tensor.values\n elif isinstance(tensor, tf.RaggedTensor):\n values = tensor.flat_values\n else:\n values = tensor\n hashable_values = tf_utils.hashable_tensor_or_op(values)\n if hashable_values in tensor_ranges:\n assert values.dtype == tf.int64\n modified_tensor_ranges[name] = tensor_ranges[hashable_values]\n # tensor_annotations is a defaultdict(list) so always returns a list.\n feature_annotations[name] = tensor_annotations.get(hashable_values, [])\n\n return _infer_feature_schema_common(features, modified_tensor_ranges,\n feature_annotations, global_annotations)\n\n\ndef infer_feature_schema_v2(features, concrete_metadata_fn,\n evaluate_schema_overrides):\n \"\"\"Given a dict of tensors, creates a `Schema`.\n\n Infers a schema, in the format of a tf.Transform `Schema`, for the given\n dictionary of tensors.\n\n If there is an override specified, we override the inferred schema for the\n given feature's tensor. An override has the meaning that we should set\n is_categorical=True. If evaluate_schema_overrides is False then we just set\n is_categorical=True, and if evaluate_schema_overrides is True then we also\n compute values of the tensors representing the min and max values and set them\n in the schema.\n\n If annotations have been specified, they are added to the output schema.\n\n Args:\n features: A dict mapping column names to `Tensor` or `SparseTensor`s. The\n `Tensor` or `SparseTensor`s should have a 0'th dimension which is\n interpreted as the batch dimension.\n concrete_metadata_fn: A `tf.ConcreteFunction` that returns a dictionary\n containing the deferred annotations added to the graph when invoked with\n any valid input.\n evaluate_schema_overrides: A Boolean used to compute schema overrides. If\n `False`, schema overrides will not be computed.\n\n Returns:\n A `Schema` proto.\n \"\"\"\n optimized_concrete_fn = saved_transform_io_v2.optimize_concrete_function(\n concrete_metadata_fn)\n metadata = collections.defaultdict(list, optimized_concrete_fn())\n\n if not evaluate_schema_overrides:\n tensor_ranges = {\n tensor.numpy().decode(): (None, None)\n for tensor in metadata[_TF_METADATA_TENSOR_COLLECTION]\n }\n tensor_annotations = {}\n global_annotations = []\n else:\n tensor_ranges = _get_tensor_ranges_v2(metadata)\n tensor_annotations, global_annotations = _get_schema_annotations_v2(\n metadata)\n return _infer_feature_schema_common(features, tensor_ranges,\n tensor_annotations, global_annotations)\n\n\ndef _infer_feature_schema_common(features, tensor_ranges, feature_annotations,\n global_annotations):\n \"\"\"Given a dict of tensors, creates a `Schema`.\n\n Args:\n features: A dict mapping column names to `Tensor` or `SparseTensor`s. The\n `Tensor` or `SparseTensor`s should have a 0'th dimension which is\n interpreted as the batch dimension.\n tensor_ranges: A dict mapping a tensor to a tuple containing its min and max\n value.\n feature_annotations: dictionary from feature name to list of any_pb2.Any\n protos to be added as an annotation for that feature in the schema.\n global_annotations: list of any_pb2.Any protos to be added at the global\n schema level.\n\n Returns:\n A `Schema` proto.\n \"\"\"\n domains = {}\n feature_tags = collections.defaultdict(list)\n for name, tensor in features.items():\n if isinstance(tensor, tf.RaggedTensor):\n # Add the 'ragged_tensor' tag which will cause coder and\n # schema_as_feature_spec to raise an error, as currently there is no\n # feature spec for ragged tensors.\n feature_tags[name].append(schema_utils.RAGGED_TENSOR_TAG)\n if name in tensor_ranges:\n min_value, max_value = tensor_ranges[name]\n domains[name] = schema_pb2.IntDomain(\n min=min_value, max=max_value, is_categorical=True)\n feature_spec = _feature_spec_from_batched_tensors(features)\n\n schema_proto = schema_utils.schema_from_feature_spec(feature_spec, domains)\n\n # Add the annotations to the schema.\n for annotation in global_annotations:\n schema_proto.annotation.extra_metadata.add().CopyFrom(annotation)\n # Build a map from logical feature names to Feature protos\n feature_protos_by_name = {}\n for feature in schema_proto.feature:\n feature_protos_by_name[feature.name] = feature\n for sparse_feature in schema_proto.sparse_feature:\n for index_feature in sparse_feature.index_feature:\n feature_protos_by_name.pop(index_feature.name)\n value_feature = feature_protos_by_name.pop(\n sparse_feature.value_feature.name)\n feature_protos_by_name[sparse_feature.name] = value_feature\n # Update annotations\n for feature_name, annotations in feature_annotations.items():\n feature_proto = feature_protos_by_name[feature_name]\n for annotation in annotations:\n feature_proto.annotation.extra_metadata.add().CopyFrom(annotation)\n for feature_name, tags in feature_tags.items():\n feature_proto = feature_protos_by_name[feature_name]\n for tag in tags:\n feature_proto.annotation.tag.append(tag)\n return schema_proto\n\n\n# Names of collections, which should all be the same length and contain tensors.\n# Each tensor in the first collection should have its min/max described by the\n# tensors in the other two collections.\n_TF_METADATA_TENSOR_COLLECTION = 'tft_schema_override_tensor'\n_TF_METADATA_TENSOR_MIN_COLLECTION = 'tft_schema_override_min'\n_TF_METADATA_TENSOR_MAX_COLLECTION = 'tft_schema_override_max'\n# Collections for adding to annotation.extra_metadata on the schema. Each\n# tensor in the first collection should have a proto type and proto message in\n# the other two collections\n_TF_METADATA_EXTRA_ANNOTATION = 'tft_schema_override_annotation_tensor'\n_TF_METADATA_EXTRA_ANNOTATION_TYPE_URL = 'tft_schema_override_annotation_type'\n_TF_METADATA_EXTRA_ANNOTATION_PROTO = 'tft_schema_override_annotation_proto'\n# Used to indicate that an annotation should be applied at the schema level.\n_TF_METADATA_EXTRA_ANNOTATION_GLOBAL = 'tft_schema_override_global_sentinel'\n\n\ndef set_tensor_schema_override(tensor, min_value, max_value):\n \"\"\"Override parts of the schema of a `Tensor`.\n\n Args:\n tensor: The `Tensor` whose range is being set. Must have dtype int64.\n min_value: A `Tensor` representing the min value of `tensor`.\n max_value: A `Tensor` representing the max value of `tensor`.\n\n Raises:\n ValueError: If any arguments are invalid.\n \"\"\"\n if not isinstance(tensor, tf.Tensor):\n raise ValueError('tensor {} was not a Tensor'.format(tensor))\n if tensor.dtype != tf.int64:\n raise ValueError(\n 'Range can only be set for feature of type tf.int64, got {}'.format(\n tensor.dtype))\n if not isinstance(min_value, tf.Tensor):\n raise ValueError('min_value {} was not a Tensor'.format(min_value))\n if not isinstance(max_value, tf.Tensor):\n raise ValueError('max_value {} was not a Tensor'.format(max_value))\n tf.compat.v1.add_to_collection(_TF_METADATA_TENSOR_COLLECTION, tensor)\n tf.compat.v1.add_to_collection(_TF_METADATA_TENSOR_MIN_COLLECTION, min_value)\n tf.compat.v1.add_to_collection(_TF_METADATA_TENSOR_MAX_COLLECTION, max_value)\n\n\ndef _get_tensor_ranges(graph):\n \"\"\"Lookup overrides for `Tensor`s or `SparseTensor`s.\"\"\"\n tensors = graph.get_collection(_TF_METADATA_TENSOR_COLLECTION)\n min_values = graph.get_collection(_TF_METADATA_TENSOR_MIN_COLLECTION)\n max_values = graph.get_collection(_TF_METADATA_TENSOR_MAX_COLLECTION)\n assert len(tensors) == len(min_values), '{} != {}'.format(tensors, min_values)\n assert len(tensors) == len(max_values), '{} != {}'.format(tensors, max_values)\n return dict(zip(map(tf_utils.hashable_tensor_or_op, tensors),\n zip(min_values, max_values)))\n\n\ndef _get_tensor_ranges_v2(metadata):\n \"\"\"Lookup overrides for `Tensor`s or `SparseTensor`s.\"\"\"\n tensors = metadata[_TF_METADATA_TENSOR_COLLECTION]\n min_values = metadata[_TF_METADATA_TENSOR_MIN_COLLECTION]\n max_values = metadata[_TF_METADATA_TENSOR_MAX_COLLECTION]\n assert len(tensors) == len(min_values), '{} != {}'.format(tensors, min_values)\n assert len(tensors) == len(max_values), '{} != {}'.format(tensors, max_values)\n return {\n tensor.numpy().decode(): (min_value.numpy(), max_value.numpy())\n for (tensor, min_value, max_value) in zip(tensors, min_values, max_values)\n }\n\n\ndef get_tensor_schema_override(\n tensor: common_types.TensorType) -> Tuple[tf.Tensor, tf.Tensor]:\n \"\"\"Lookup schema overrides for a `Tensor` or `SparseTensor`.\"\"\"\n if isinstance(tensor, tf.SparseTensor):\n tensor = tensor.values\n overrides = _get_tensor_ranges(tensor.graph)\n min_max = overrides.get(tf_utils.hashable_tensor_or_op(tensor), None)\n if min_max is None:\n raise ValueError('Requested tensor does not have recorded min/max values')\n return min_max\n\n\ndef annotate(type_url, proto_message, tensor=None):\n \"\"\"Adds a deferred annotation to the schema.\n\n Experimental: This API is subject to change.\n\n This function allows analyzers or end users to annotate the post-transform\n schema with additional information based on analyzer output. These annotations\n are stored in the annotation.extra_metadata field of the tf.metadata schema:\n https://github.com/tensorflow/metadata/blob/master/tensorflow_metadata/proto/v0/schema.proto#L193\n\n Args:\n type_url: A string or string `Tensor` containing the type url which uniquely\n identifies the type of the serialized proto message. See\n https://github.com/protocolbuffers/protobuf/blob/master/src/google/protobuf/any.proto#L151\n proto_message: A deferred string tensor containing the serialized proto to\n write to the feature schema.\n tensor: (optional) If provided, the annotation will be written to the\n Feature proto that is created for this tensor in the schema. If None,\n the annotation is assumed to be global. Note: if the tensor is not present\n in the output signature of `preprocessing_fn`, this will be a no-op.\n \"\"\"\n if tensor is None:\n tensor = tf.constant('unused', name=_TF_METADATA_EXTRA_ANNOTATION_GLOBAL)\n\n if not isinstance(tensor, (tf.Tensor, tf.SparseTensor)):\n raise ValueError('tensor {} was not a Tensor'.format(tensor))\n if not isinstance(proto_message, tf.Tensor):\n raise ValueError('proto_message {} was not a Tensor'.format(proto_message))\n\n # If the type_url is passed as a plain string, create a string tensor.\n if not isinstance(type_url, tf.Tensor):\n type_url = tf.constant(type_url, dtype=tf.string)\n # Note: The tensors, types, and messages are stored in separate collections\n # because SavedModel only supports primitive types in collections.\n tf.compat.v1.add_to_collection(_TF_METADATA_EXTRA_ANNOTATION, tensor)\n tf.compat.v1.add_to_collection(_TF_METADATA_EXTRA_ANNOTATION_TYPE_URL,\n type_url)\n tf.compat.v1.add_to_collection(_TF_METADATA_EXTRA_ANNOTATION_PROTO,\n proto_message)\n\n\ndef _get_schema_annotations(graph, session):\n \"\"\"Fetch extra_metadata annotations to be applied to the schema.\n\n Extracts any deferred annotations that have been added to the graph and\n evaluates them to obtain any_pb2.Any proto messages.\n\n Args:\n graph: A `tf.Graph` used to determine schema overrides.\n session: (optional) A `tf.Session` used to compute schema annotations. If\n None, schema annotations will not be computed.\n\n Returns:\n tensor_annotations: dictionary from tensor to list of any_pb2.Any protos to\n be added as an annotation for that tensor's feature in the schema.\n global_annotations: list of any_pb2.Any protos to be added at the global\n schema level.\n \"\"\"\n tensors = graph.get_collection(_TF_METADATA_EXTRA_ANNOTATION)\n type_urls = session.run(\n graph.get_collection(_TF_METADATA_EXTRA_ANNOTATION_TYPE_URL))\n proto_values = session.run(\n graph.get_collection(_TF_METADATA_EXTRA_ANNOTATION_PROTO))\n tensor_annotation_keys = []\n for tensor in tensors:\n # Entries meant for the global schema annotation will have names like\n # tft_schema_override_global_sentinel:0 or\n # transform/tft_schema_override_global_sentinel_1:0\n tensor_name = tensor.name.split('/')[-1]\n if tensor_name.startswith(_TF_METADATA_EXTRA_ANNOTATION_GLOBAL):\n tensor_annotation_keys.append(_TF_METADATA_EXTRA_ANNOTATION_GLOBAL)\n else:\n tensor_annotation_keys.append(tf_utils.hashable_tensor_or_op(tensor))\n return _get_schema_annotations_common(tensor_annotation_keys, type_urls,\n proto_values)\n\n\ndef _get_schema_annotations_v2(metadata):\n \"\"\"Fetch extra_metadata annotations to be applied to the schema.\n\n Extracts any deferred annotations that have been added to the graph and\n evaluates them to obtain any_pb2.Any proto messages.\n\n Args:\n metadata: A dictionary containing the deferred annotations added to the\n graph.\n\n Returns:\n tensor_annotations: dictionary from tensor to list of any_pb2.Any protos to\n be added as an annotation for that tensor's feature in the schema.\n global_annotations: list of any_pb2.Any protos to be added at the global\n schema level.\n \"\"\"\n type_urls = [\n type_url.numpy()\n for type_url in metadata[_TF_METADATA_EXTRA_ANNOTATION_TYPE_URL]\n ]\n proto_values = [\n proto_value.numpy()\n for proto_value in metadata[_TF_METADATA_EXTRA_ANNOTATION_PROTO]\n ]\n tensor_annotation_keys = [\n tensor.numpy().decode()\n for tensor in metadata[_TF_METADATA_EXTRA_ANNOTATION]\n ]\n return _get_schema_annotations_common(tensor_annotation_keys, type_urls,\n proto_values)\n\n\ndef _get_schema_annotations_common(tensor_annotation_keys, type_urls,\n proto_values):\n \"\"\"Fetch extra_metadata annotations to be applied to the schema.\n\n Args:\n tensor_annotation_keys: A list containing either\n `_TF_METADATA_EXTRA_ANNOTATION_GLOBAL` or a hashed tensor representation\n corresponding to each entry in `proto_values`. If an entry\n is`_TF_METADATA_EXTRA_ANNOTATION_GLOBAL`, the corresponding any_pb2.Any\n proto in `proto_values` is returned in `global_annotations`. Otherwise, it\n is returned in `feature_annotations`.\n type_urls: A list of type urls corresponding to the serialized protos in\n `proto_values`.\n proto_values: A list of serialized any_pb2.Any protos.\n\n Returns:\n A tuple of:\n tensor_annotations: dictionary from tensor to list of any_pb2.Any protos to\n be added as an annotation for that tensor's feature in the schema.\n global_annotations: list of any_pb2.Any protos to be added at the global\n schema level.\n \"\"\"\n tensor_annotations = collections.defaultdict(list)\n global_annotations = []\n if not common.IS_ANNOTATIONS_PB_AVAILABLE:\n return tensor_annotations, global_annotations\n assert len(tensor_annotation_keys) == len(type_urls) == len(proto_values)\n for (tensor_annotation_key, type_url,\n proto_value) in zip(tensor_annotation_keys, type_urls, proto_values):\n annotation = any_pb2.Any(type_url=type_url, value=proto_value)\n if (isinstance(_TF_METADATA_EXTRA_ANNOTATION_GLOBAL,\n type(tensor_annotation_key)) and\n tensor_annotation_key == _TF_METADATA_EXTRA_ANNOTATION_GLOBAL):\n global_annotations.append(annotation)\n else:\n tensor_annotations[tensor_annotation_key].append(annotation)\n return tensor_annotations, global_annotations\n\n\ndef _get_tensor_value_to_key_map(features_dict):\n \"\"\"Get reverse map from name of tensor values to key in `features_dict`.\"\"\"\n result = {}\n for key, tensor in features_dict.items():\n if isinstance(tensor, tf.SparseTensor):\n values = tensor.values\n elif isinstance(tensor, tf.RaggedTensor):\n values = tensor.flat_values\n else:\n values = tensor\n result[values.name] = key\n return result\n\n\ndef _get_schema_overrides(graph,\n tensor_name_to_key_map,\n tensor_collection_key,\n overrides_keys,\n default_tensor_name=None):\n \"\"\"Obtain schema overrides from graph collections.\n\n For every tensor in the `tensor_collection_key` collection, the corresponding\n feature name is in `tensor_name_to_key_map` and its schema overrides are in\n the graph collections defined by keys in `overrides_keys`.\n If a tensor does not exist in `tensor_name_to_key_map` but its name starts\n with `default_tensor_name` (if provided), the overrides are returned with this\n key.\n\n Args:\n graph: A `FuncGraph`.\n tensor_name_to_key_map: A dictionary from tensor name to output feature key.\n tensor_collection_key: Key for the graph collection that contains list of\n tensors to annotate.\n overrides_keys: A list of graph collection keys that contain schema\n overrides/annotations.\n default_tensor_name: (Optional) A String. If provided, use as feature key if\n a tensor in the graph collections is not in `tensor_name_to_key_map`.\n\n Returns:\n A dictionary from graph collection keys to lists of features and their\n schema overrides/annotations.\n\n \"\"\"\n tensors = graph.get_collection(tensor_collection_key)\n overrides_list = [graph.get_collection(k) for k in overrides_keys]\n\n result = collections.defaultdict(list)\n assert (len(tensors) == len(overrides_list[0]) and\n all(len(lst) == len(overrides_list[0]) for lst in overrides_list))\n for tensor, overrides_tuple in zip(tensors, zip(*overrides_list)):\n if tensor.name in tensor_name_to_key_map:\n result[tensor_collection_key].append(tensor_name_to_key_map[tensor.name])\n else:\n if default_tensor_name is None:\n continue\n tensor_name = tensor.name.split('/')[-1]\n if tensor.dtype == tf.string and tensor_name.startswith(\n default_tensor_name):\n result[tensor_collection_key].append(default_tensor_name)\n else:\n continue\n\n # If a feature name was added to the result list for tensor_collection_key,\n # add its annotations as well.\n assert len(overrides_keys) == len(overrides_tuple)\n for overrides_key, override in zip(overrides_keys, overrides_tuple):\n result[overrides_key].append(override)\n return result\n\n\ndef get_traced_metadata_fn(\n tensor_replacement_map: Optional[Dict[str, tf.Tensor]],\n preprocessing_fn: Callable[[Mapping[str, common_types.InputTensorType]],\n Mapping[str, common_types.InputTensorType]],\n structured_inputs: Mapping[str, common_types.InputTensorType],\n base_temp_dir: str, evaluate_schema_overrides: bool) -> function.Function:\n \"\"\"Get a tf.function that returns a dictionary of annotations.\n\n Annotations are added to graph collections keyed by graph tensor names when\n `preprocessing_fn` is being traced. The metadata fn defined by this method\n converts the graph tensor names to output feature keys.\n\n If `evaluate_schema_overrides` is True, tracing the `preprocessing_fn` will\n add overrides for feature ranges (min/max) and/or feature protos to the graph\n collection, if applicable. These overrides are returned when the function\n returned by this method is invoked.\n\n Args:\n tensor_replacement_map: A map from placeholder tensor names to their\n evaluated replacement tensors.\n preprocessing_fn: A user defined python function to be traced.\n structured_inputs: A dictionary of placeholder inputs to `preprocessing_fn`.\n base_temp_dir: Base path to write any dummy assets to during tracing.\n evaluate_schema_overrides: If `False`, the returned dictionary contains a\n single key `_TF_METADATA_TENSOR_COLLECTION` as all other annotations are\n deferred. Else, the returned dictionary contains several deferred\n annotations.\n\n Returns:\n A dictionary whose keys represent the types of annotations and the values\n are collections of feature keys/annotations.\n \"\"\"\n\n # Since this is a TFT-internal function with constant outputs, autograph will\n # not affect its behavior. It will only increase tracing time, if enabled.\n # Hence, trace with `autograph=False` here.\n @tf.function(input_signature=[], autograph=False)\n def metadata_fn():\n graph = ops.get_default_graph()\n inputs = tf2_utils.supply_missing_inputs(structured_inputs, batch_size=1)\n with graph_context.TFGraphContext(\n temp_dir=base_temp_dir, evaluated_replacements=tensor_replacement_map):\n transformed_features = preprocessing_fn(inputs)\n\n # Get a map from tensor value names to feature keys.\n reversed_features = _get_tensor_value_to_key_map(transformed_features)\n\n result = collections.defaultdict(list)\n if not evaluate_schema_overrides:\n schema_override_tensors = graph.get_collection(\n _TF_METADATA_TENSOR_COLLECTION)\n for tensor in schema_override_tensors:\n if tensor.name in reversed_features:\n result[_TF_METADATA_TENSOR_COLLECTION].append(\n reversed_features[tensor.name])\n else:\n # Obtain schema overrides for feature tensor ranges.\n result.update(\n _get_schema_overrides(graph, reversed_features,\n _TF_METADATA_TENSOR_COLLECTION, [\n _TF_METADATA_TENSOR_MIN_COLLECTION,\n _TF_METADATA_TENSOR_MAX_COLLECTION\n ]))\n # Obtain schema overrides for feature protos. If no feature tensor is in\n # the `_TF_METADATA_EXTRA_ANNOTATION` collection for a specified\n # annotation, `_TF_METADATA_EXTRA_ANNOTATION_GLOBAL` is used as the\n # feature name to indicate that this annotation should be added to the\n # global schema.\n result.update(\n _get_schema_overrides(graph, reversed_features,\n _TF_METADATA_EXTRA_ANNOTATION, [\n _TF_METADATA_EXTRA_ANNOTATION_TYPE_URL,\n _TF_METADATA_EXTRA_ANNOTATION_PROTO\n ], _TF_METADATA_EXTRA_ANNOTATION_GLOBAL))\n return result\n\n return metadata_fn\n"
] | [
[
"tensorflow.compat.v1.logging.warn",
"tensorflow.python.framework.ops.get_default_graph",
"tensorflow.function",
"tensorflow.io.VarLenFeature",
"tensorflow.constant",
"tensorflow.compat.v1.add_to_collection"
]
] |
ZzhKlaus/2018-SURF-Trajectory-Estimation | [
"71c62c816d1531f6806bfa9455fec9affe36496c"
] | [
"Data_Processing/BoxPlot_error.py"
] | [
"#By Zhenghang(Klaus) Zhong\n\n#Box Plot of error distribution\n\nfrom pandas import DataFrame\nfrom pandas import read_csv\nimport pandas as pd\nimport numpy as np\nfrom matplotlib import pyplot\n# load results into a dataframe\nfilenames_128 = ['dis_diff_128.csv']\nfilenames_256 = ['dis_diff_256.csv']\nfilenames_512 = ['dis_diff_512.csv']\nresults = DataFrame()\n\nfor name in filenames_128:\n\tresults_128 = read_csv(name, header=0,usecols = [1])\n# describe all results, as 1 unit = 10cm, we want to transfer to meters, /10\nresults_128 = results_128.div(10, axis = 0)\n\nfor name in filenames_256:\n\tresults_256 = read_csv(name, header=0,usecols = [1])\n# describe all results\nresults_256 = results_256.div(10, axis = 0)\n\nfor name in filenames_512:\n\tresults_512 = read_csv(name, header=0,usecols = [1])\n# describe all results\nresults_512 = results_512.div(10, axis = 0)\n\nprint(results_128.describe())\nprint(results_256.describe())\nprint(results_512.describe())\n\n# box and whisker plot\ndf = pd.DataFrame(np.concatenate((results_128,results_512),axis = 1),\ncolumns=['128', '512'])\n\ndf.boxplot(sym='k',showmeans = True,showfliers = False,return_type='dict')\n#results_256.boxplot(sym='k',showmeans = True,whis = [0,8],showfliers = False,return_type='dict')\n\npyplot.xlabel('Hidden node')\npyplot.ylabel('Error (m)')\npyplot.show()"
] | [
[
"pandas.read_csv",
"pandas.DataFrame",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylabel",
"numpy.concatenate",
"matplotlib.pyplot.xlabel"
]
] |
python3f/spectral_clustering | [
"bd5900dfa7ada69bd77080b905ef08ea62b420e9"
] | [
"main.py"
] | [
"from sklearn.cluster import KMeans\nfrom sklearn.neighbors import kneighbors_graph\nfrom scipy.spatial.distance import pdist, squareform\nfrom scipy.sparse.csgraph import laplacian\nimport numpy as np\n\n\n\"\"\"Args:\nX: input samples, array (num, dim)\nn_clusters: no. of clusters\nn_neighbours: neighborhood size\n\nReturns:\nY: labels for samples, array (num,)\n\"\"\"\ndef spectral_clustering(X, n_clusters=2, n_neighbors=10):\n n, d = X.shape\n A = kneighbors_graph(X, n_neighbors, mode='connectivity').toarray()\n L = laplacian(A, normed=True)\n w, v = np.linalg.eig(L)\n w, v = w.real, v.real\n i = np.argsort(w)\n w, v = w[i], v[:,i]\n Y = KMeans(n_clusters).fit_predict(v[:,:2])\n return Y\n"
] | [
[
"numpy.argsort",
"sklearn.neighbors.kneighbors_graph",
"scipy.sparse.csgraph.laplacian",
"sklearn.cluster.KMeans",
"numpy.linalg.eig"
]
] |
sinzlab/orbit_transfer | [
"812d89af5c7ab26d9ea26766a4250ae023bb20b8"
] | [
"orbit_transfer/models/__init__.py"
] | [
"import torch\nimport numpy as np\nfrom torch.hub import load_state_dict_from_url\n\nfrom nnfabrik.utility.nn_helpers import load_state_dict\n\n\nfrom nntransfer.models.resnet import resnet_builder\nfrom nntransfer.models.utils import get_model_parameters\nfrom nntransfer.models.vgg import vgg_builder\nfrom nntransfer.models.lenet import lenet_builder\nfrom nntransfer.models.wrappers import *\n\nfrom ..configs.model import (\n ClassificationModel,\n)\nfrom .cnn import cnn_builder\nfrom .group_cnn import gcnn_builder\nfrom .learned_equiv import equiv_builder\nfrom .mlp import mlp_builder\nfrom .vit import vit_builder\n\n\ndef classification_model_builder(data_loader, seed: int, **config):\n config = ClassificationModel.from_dict(config)\n torch.manual_seed(seed)\n np.random.seed(seed)\n if \"vgg\" in config.type:\n model = vgg_builder(seed, config)\n from torchvision.models.vgg import model_urls\n elif \"resnet\" in config.type:\n model = resnet_builder(seed, config)\n from torchvision.models.resnet import model_urls\n elif \"lenet\" in config.type:\n model = lenet_builder(seed, config)\n elif \"mlp\" in config.type:\n model = mlp_builder(seed, config)\n elif \"vit\" in config.type:\n model = vit_builder(seed, config)\n elif \"gcnn\" in config.type:\n model = gcnn_builder(seed, config)\n elif \"cnn\" in config.type:\n model = cnn_builder(seed, config)\n elif \"equiv_transfer\" in config.type:\n model = equiv_builder(seed, config)\n else:\n raise Exception(\"Unknown type {}\".format(config.type))\n\n if config.pretrained:\n print(\"Downloading pretrained model:\", flush=True)\n url = (\n model_urls[config.type]\n if not config.pretrained_url\n else config.pretrained_url\n )\n state_dict = load_state_dict_from_url(url, progress=True)\n try:\n load_state_dict(model, state_dict)\n except:\n load_state_dict(model, state_dict[\"model_state_dict\"])\n\n print(\"Model with {} parameters.\".format(get_model_parameters(model)))\n if config.add_buffer:\n for n, p in model.named_parameters():\n if p.requires_grad:\n n = n.replace(\".\", \"__\")\n for b in config.add_buffer:\n if isinstance(b, str):\n model.register_buffer(\n f\"{n}_{b}\",\n p.detach().clone().zero_(),\n )\n else:\n k = b[1]\n b = b[0]\n model.register_buffer(\n f\"{n}_{b}\",\n torch.zeros(k, *p.data.shape),\n )\n if config.add_custom_buffer:\n for key, size in config.add_custom_buffer.items():\n model.register_buffer(\n key,\n torch.zeros(size),\n )\n # Add wrappers\n if config.get_intermediate_rep:\n model = IntermediateLayerGetter(\n model, return_layers=config.get_intermediate_rep, keep_output=True\n )\n if config.noise_adv_regression or config.noise_adv_classification:\n assert not config.self_attention\n model = NoiseAdvWrapper(\n model,\n input_size=model.fc.in_features\n if \"resnet\" in config.type\n else model.n_features,\n hidden_size=model.fc.in_features if \"resnet\" in config.type else 4096,\n classification=config.noise_adv_classification,\n num_noise_readout_layers=config.num_noise_readout_layers,\n sigmoid_output=config.noise_sigmoid_output,\n )\n return model\n"
] | [
[
"torch.zeros",
"torch.hub.load_state_dict_from_url",
"torch.manual_seed",
"numpy.random.seed"
]
] |
inamori/DeepLearningImplementations | [
"8bbd3c5a4a7d24b2c098ba47cfd45fe2c152771d"
] | [
"GAN_tf/src/model/flags.py"
] | [
"\nimport tensorflow as tf\n\nFLAGS = tf.app.flags.FLAGS\n\n\ndef define_flags():\n\n ############\n # Run mode\n ############\n tf.app.flags.DEFINE_string('run', None, \"Which operation to run. [train|inference]\")\n\n ##########################\n # Training parameters\n ###########################\n tf.app.flags.DEFINE_integer('nb_epoch', 400, \"Number of epochs\")\n tf.app.flags.DEFINE_integer('batch_size', 64, \"Number of samples per batch.\")\n tf.app.flags.DEFINE_integer('nb_batch_per_epoch', 500, \"Number of batches per epoch\")\n tf.app.flags.DEFINE_float('learning_rate', 2E-4, \"Learning rate used for AdamOptimizer\")\n tf.app.flags.DEFINE_integer('noise_dim', 100, \"Noise dimension for GAN generation\")\n tf.app.flags.DEFINE_integer('random_seed', 0, \"Seed used to initialize rng.\")\n\n ############################################\n # General tensorflow parameters parameters\n #############################################\n tf.app.flags.DEFINE_bool('use_XLA', False, \"Whether to use XLA compiler.\")\n tf.app.flags.DEFINE_integer('num_threads', 2, \"Number of threads to fetch the data\")\n tf.app.flags.DEFINE_float('capacity_factor', 32, \"Nuumber of batches to store in queue\")\n\n ##########\n # Datasets\n ##########\n tf.app.flags.DEFINE_string('data_format', \"NCHW\", \"Tensorflow image data format.\")\n tf.app.flags.DEFINE_string('celebA_path', \"../../data/raw/img_align_celeba\", \"Path to celebA images\")\n tf.app.flags.DEFINE_integer('channels', 3, \"Number of channels\")\n tf.app.flags.DEFINE_float('central_fraction', 0.8, \"Central crop as a fraction of total image\")\n tf.app.flags.DEFINE_integer('img_size', 64, \"Image size\")\n\n ##############\n # Directories\n ##############\n tf.app.flags.DEFINE_string('model_dir', '../../models', \"Output folder where checkpoints are dumped.\")\n tf.app.flags.DEFINE_string('log_dir', '../../logs', \"Logs for tensorboard.\")\n tf.app.flags.DEFINE_string('fig_dir', '../../figures', \"Where to save figures.\")\n tf.app.flags.DEFINE_string('raw_dir', '../../data/raw', \"Where raw data is saved\")\n tf.app.flags.DEFINE_string('data_dir', '../../data/processed', \"Where processed data is saved\")\n"
] | [
[
"tensorflow.app.flags.DEFINE_float",
"tensorflow.app.flags.DEFINE_bool",
"tensorflow.app.flags.DEFINE_string",
"tensorflow.app.flags.DEFINE_integer"
]
] |
kwangsungjun/lrbandit | [
"2f1f7ca4bbefe2bfd3e0bc50c4423a9791bfcde8"
] | [
"matrixrecovery/matrixrecovery.py"
] | [
"import myutils_cython\nimport numpy as np, numpy.random as ra, scipy.linalg as sla\nfrom tqdm import tqdm\n\ndef rankone(X,Z,y,r,R=.1, C=.1, tolPred=0.01, tolTh=0.01, maxIter=400, verbose=False):\n \"\"\"\n matrix recovery with rank-one measurements using Burer-Monteiro approach \n measurement model: (X[i,:] @ Theta) @ Z[i,:] == y[i]\n (IN)\n X, Z: N by d matrix\n y: N-dim vector\n r: the deemed rank of Theta\n R: noise level (subgaussian parameter)\n C: regularization parameter (larger => more regularization)\n tol: stopping condition\n maxIter: maximum number of iterations\n (OUT)\n (U,V,out_nIter,stat) so that [email protected] ≈ Theta;\n stat['objs'] has the objective values over time\n stat['stoppingPredList'], stat['stoppingThetaList'] has stopping conditions over time\n \"\"\"\n N,d = X.shape\n initU = ra.randn(d,r)\n U = initU\n V = initU # just a placeholder\n M = np.zeros( (d*r,d*r) )\n hatTh = initU @ initU.T # very bad initial hatTh\n if (verbose):\n my_tqdm = tqdm\n else:\n my_tqdm = lambda x: x\n\n objs = []; stoppingPredList = []; stoppingThetaList = []\n myeye = R*C*np.eye(d*r) \n for iIter in my_tqdm(range(1,1+maxIter)):\n D = np.zeros((N,d*r))\n if iIter % 2 == 0: # update U\n ZV = Z @ V\n myutils_cython.calcRowwiseKron(D, X, ZV) #- note D will be written!\n else: # update V\n XU = X @ U\n myutils_cython.calcRowwiseKron(D, Z, XU)\n\n M[:,:] = myeye + D.T@D\n b = D.T @ y\n sol = sla.solve(M,b, assume_a='pos', overwrite_a=True).reshape(d,r)\n if iIter % 2 == 0:\n prevU = U\n U = sol\n else:\n prevV = V\n V = sol\n prev_hatTh = hatTh\n hatTh = [email protected]\n #- compute residual\n predy = ((X@hatTh)*Z).sum(1)\n obj = sla.norm(predy - y, 2)**2 + R*C*(sla.norm(U, 'fro')**2 + sla.norm(V, 'fro')**2)\n objs.append( obj )\n stoppingPred = sla.norm(predy - y, 2) / sla.norm(y,2)\n stoppingPredList.append( stoppingPred )\n stoppingTheta = sla.norm(hatTh - prev_hatTh, 'fro')\n stoppingThetaList.append( stoppingTheta )\n if (stoppingPred < tolPred):\n break\n if (stoppingTheta < tolTh):\n break\n out_nIter = iIter\n stat = {'objs': objs, 'stoppingPredList': stoppingPredList, 'stoppingThetaList': stoppingThetaList}\n return U,V,out_nIter,stat\n\n"
] | [
[
"numpy.eye",
"scipy.linalg.norm",
"numpy.zeros",
"numpy.random.randn",
"scipy.linalg.solve"
]
] |
kassyray/NeuroKit | [
"b84d110a71d5d17c0d1efde0d60d00446fda16cb"
] | [
"tests/tests_complexity.py"
] | [
"import numpy as np\nimport pandas as pd\nimport neurokit2 as nk\nimport nolds\n\nfrom pyentrp import entropy as pyentrp\n\n\"\"\"\nFor the testing of complexity, we test our implementations against existing and established ones.\nHowever, some of these other implementations are not really packaged in a way\nSO THAT we can easily import them. Thus, we directly copied their content in this file\n(below the tests).\n\"\"\"\n\n\n# =============================================================================\n# Some sanity checks\n# =============================================================================\ndef test_complexity_sanity():\n\n signal = np.cos(np.linspace(start=0, stop=30, num=1000))\n\n # Entropy\n assert np.allclose(nk.entropy_fuzzy(signal), nk.entropy_sample(signal, fuzzy=True), atol=0.000001)\n\n # Fractal\n assert np.allclose(nk.fractal_dfa(signal, windows=np.array([4, 8, 12, 20])), 2.1009048365682133, atol=0.000001)\n assert np.allclose(nk.fractal_dfa(signal), 1.957966586191164, atol=0.000001)\n assert np.allclose(nk.fractal_dfa(signal, multifractal=True), 1.957966586191164, atol=0.000001)\n\n assert np.allclose(nk.fractal_correlation(signal), 0.7884473170763334, atol=0.000001)\n assert np.allclose(nk.fractal_correlation(signal, r=\"nolds\"), nolds.corr_dim(signal, 2), atol=0.0001)\n\n\n# =============================================================================\n# Comparison against R\n# =============================================================================\n\"\"\"\nR code:\n\nlibrary(TSEntropies)\nlibrary(pracma)\n\nsignal <- read.csv(\"https://raw.githubusercontent.com/neuropsychology/NeuroKit/master/data/bio_eventrelated_100hz.csv\")$RSP\nr <- 0.2 * sd(signal)\n\n# ApEn --------------------------------------------------------------------\n\nTSEntropies::ApEn(signal, dim=2, lag=1, r=r)\n0.04383386\nTSEntropies::ApEn(signal, dim=3, lag=2, r=1)\n0.0004269369\npracma::approx_entropy(signal[1:200], edim=2, r=r, elag=1)\n0.03632554\n\n# SampEn ------------------------------------------------------------------\n\nTSEntropies::SampEn(signal[1:300], dim=2, lag=1, r=r)\n0.04777648\nTSEntropies::FastSampEn(signal[1:300], dim=2, lag=1, r=r)\n0.003490405\npracma::sample_entropy(signal[1:300], edim=2, tau=1, r=r)\n0.03784376\npracma::sample_entropy(signal[1:300], edim=3, tau=2, r=r)\n0.09185509\n\"\"\"\n\n\ndef test_complexity_vs_R():\n\n signal = pd.read_csv(\"https://raw.githubusercontent.com/neuropsychology/NeuroKit/master/data/bio_eventrelated_100hz.csv\")[\"RSP\"].values\n r = 0.2 * np.std(signal, ddof=1)\n\n # ApEn\n apen = nk.entropy_approximate(signal, dimension=2, r=r)\n assert np.allclose(apen, 0.04383386, atol=0.0001)\n apen = nk.entropy_approximate(signal, dimension=3, delay=2, r=1)\n assert np.allclose(apen, 0.0004269369, atol=0.0001)\n apen = nk.entropy_approximate(signal[0:200], dimension=2, delay=1, r=r)\n assert np.allclose(apen, 0.03632554, atol=0.0001)\n\n # SampEn\n sampen = nk.entropy_sample(signal[0:300], dimension=2, r=r)\n assert np.allclose(sampen, nk.entropy_sample(signal[0:300], dimension=2, r=r, distance=\"infinity\"), atol=0.001)\n assert np.allclose(sampen, 0.03784376, atol=0.001)\n sampen = nk.entropy_sample(signal[0:300], dimension=3, delay=2, r=r)\n assert np.allclose(sampen, 0.09185509, atol=0.01)\n\n\n# =============================================================================\n# Comparison against Python implementations\n# =============================================================================\n\n\ndef test_complexity_vs_Python():\n\n signal = np.cos(np.linspace(start=0, stop=30, num=100))\n\n # Shannon\n shannon = nk.entropy_shannon(signal)\n# assert scipy.stats.entropy(shannon, pd.Series(signal).value_counts())\n assert np.allclose(shannon - pyentrp.shannon_entropy(signal), 0)\n\n\n # Approximate\n assert np.allclose(nk.entropy_approximate(signal), 0.17364897858477146)\n assert np.allclose(nk.entropy_approximate(signal, dimension=2, r=0.2*np.std(signal, ddof=1)) - entropy_app_entropy(signal, 2), 0)\n\n assert nk.entropy_approximate(signal, dimension=2, r=0.2*np.std(signal, ddof=1)) != pyeeg_ap_entropy(signal, 2, 0.2*np.std(signal, ddof=1))\n\n\n # Sample\n assert np.allclose(nk.entropy_sample(signal, dimension=2, r=0.2*np.std(signal, ddof=1)) - entropy_sample_entropy(signal, 2), 0)\n assert np.allclose(nk.entropy_sample(signal, dimension=2, r=0.2) - nolds.sampen(signal, 2, 0.2), 0)\n assert np.allclose(nk.entropy_sample(signal, dimension=2, r=0.2) - entro_py_sampen(signal, 2, 0.2, scale=False), 0)\n assert np.allclose(nk.entropy_sample(signal, dimension=2, r=0.2) - pyeeg_samp_entropy(signal, 2, 0.2), 0)\n\n# import sampen\n# sampen.sampen2(signal[0:300], mm=2, r=r)\n\n assert nk.entropy_sample(signal, dimension=2, r=0.2) != pyentrp.sample_entropy(signal, 2, 0.2)[1]\n assert nk.entropy_sample(signal, dimension=2, r=0.2*np.sqrt(np.var(signal))) != MultiscaleEntropy_sample_entropy(signal, 2, 0.2)[0.2][2]\n\n # MSE\n# assert nk.entropy_multiscale(signal, 2, 0.2*np.sqrt(np.var(signal))) != np.trapz(MultiscaleEntropy_mse(signal, [i+1 for i in range(10)], 2, 0.2, return_type=\"list\"))\n# assert nk.entropy_multiscale(signal, 2, 0.2*np.std(signal, ddof=1)) != np.trapz(pyentrp.multiscale_entropy(signal, 2, 0.2, 10))\n\n # Fuzzy\n assert np.allclose(nk.entropy_fuzzy(signal, dimension=2, r=0.2, delay=1) - entro_py_fuzzyen(signal, 2, 0.2, 1, scale=False), 0)\n\n # DFA\n assert nk.fractal_dfa(signal, windows=np.array([4, 8, 12, 20])) != nolds.dfa(signal, nvals=[4, 8, 12, 20], fit_exp=\"poly\")\n\n\n# =============================================================================\n# Wikipedia\n# =============================================================================\ndef wikipedia_sampen(signal, m=2, r=1):\n N = len(signal)\n B = 0.0\n A = 0.0\n\n # Split time series and save all templates of length m\n xmi = np.array([signal[i : i + m] for i in range(N - m)])\n xmj = np.array([signal[i : i + m] for i in range(N - m + 1)])\n\n # Save all matches minus the self-match, compute B\n B = np.sum([np.sum(np.abs(xmii - xmj).max(axis=1) <= r) - 1 for xmii in xmi])\n\n # Similar for computing A\n m += 1\n xm = np.array([signal[i : i + m] for i in range(N - m + 1)])\n\n A = np.sum([np.sum(np.abs(xmi - xm).max(axis=1) <= r) - 1 for xmi in xm])\n\n # Return SampEn\n return -np.log(A / B)\n\n\n# =============================================================================\n# Pyeeg\n# =============================================================================\n\n\ndef pyeeg_embed_seq(time_series, tau, embedding_dimension):\n if not type(time_series) == np.ndarray:\n typed_time_series = np.asarray(time_series)\n else:\n typed_time_series = time_series\n\n shape = (\n typed_time_series.size - tau * (embedding_dimension - 1),\n embedding_dimension\n )\n\n strides = (typed_time_series.itemsize, tau * typed_time_series.itemsize)\n\n return np.lib.stride_tricks.as_strided(\n typed_time_series,\n shape=shape,\n strides=strides\n )\n\n\n\n\ndef pyeeg_bin_power(X, Band, Fs):\n C = np.fft.fft(X)\n C = abs(C)\n Power = np.zeros(len(Band) - 1)\n for Freq_Index in range(0, len(Band) - 1):\n Freq = float(Band[Freq_Index])\n Next_Freq = float(Band[Freq_Index + 1])\n Power[Freq_Index] = sum(\n C[int(np.floor(Freq / Fs * len(X))):\n int(np.floor(Next_Freq / Fs * len(X)))]\n )\n Power_Ratio = Power / sum(Power)\n return Power, Power_Ratio\n\n\n\n\n\ndef pyeeg_ap_entropy(X, M, R):\n N = len(X)\n\n Em = pyeeg_embed_seq(X, 1, M)\n A = np.tile(Em, (len(Em), 1, 1))\n B = np.transpose(A, [1, 0, 2])\n D = np.abs(A - B) # D[i,j,k] = |Em[i][k] - Em[j][k]|\n InRange = np.max(D, axis=2) <= R\n\n # Probability that random M-sequences are in range\n Cm = InRange.mean(axis=0)\n\n # M+1-sequences in range if M-sequences are in range & last values are close\n Dp = np.abs(\n np.tile(X[M:], (N - M, 1)) - np.tile(X[M:], (N - M, 1)).T\n )\n\n Cmp = np.logical_and(Dp <= R, InRange[:-1, :-1]).mean(axis=0)\n\n Phi_m, Phi_mp = np.sum(np.log(Cm)), np.sum(np.log(Cmp))\n\n Ap_En = (Phi_m - Phi_mp) / (N - M)\n\n return Ap_En\n\n\ndef pyeeg_samp_entropy(X, M, R):\n N = len(X)\n\n Em = pyeeg_embed_seq(X, 1, M)[:-1]\n A = np.tile(Em, (len(Em), 1, 1))\n B = np.transpose(A, [1, 0, 2])\n D = np.abs(A - B) # D[i,j,k] = |Em[i][k] - Em[j][k]|\n InRange = np.max(D, axis=2) <= R\n np.fill_diagonal(InRange, 0) # Don't count self-matches\n\n Cm = InRange.sum(axis=0) # Probability that random M-sequences are in range\n Dp = np.abs(\n np.tile(X[M:], (N - M, 1)) - np.tile(X[M:], (N - M, 1)).T\n )\n\n Cmp = np.logical_and(Dp <= R, InRange).sum(axis=0)\n\n # Avoid taking log(0)\n Samp_En = np.log(np.sum(Cm + 1e-100) / np.sum(Cmp + 1e-100))\n\n return Samp_En\n\n# =============================================================================\n# Entropy\n# =============================================================================\n\n\nfrom sklearn.neighbors import KDTree\n\ndef entropy_embed(x, order=3, delay=1):\n N = len(x)\n if order * delay > N:\n raise ValueError(\"Error: order * delay should be lower than x.size\")\n if delay < 1:\n raise ValueError(\"Delay has to be at least 1.\")\n if order < 2:\n raise ValueError(\"Order has to be at least 2.\")\n Y = np.zeros((order, N - (order - 1) * delay))\n for i in range(order):\n Y[i] = x[i * delay:i * delay + Y.shape[1]]\n return Y.T\n\n\n\ndef entropy_app_samp_entropy(x, order, metric='chebyshev', approximate=True):\n _all_metrics = KDTree.valid_metrics\n if metric not in _all_metrics:\n raise ValueError('The given metric (%s) is not valid. The valid '\n 'metric names are: %s' % (metric, _all_metrics))\n phi = np.zeros(2)\n r = 0.2 * np.std(x, axis=-1, ddof=1)\n\n # compute phi(order, r)\n _emb_data1 = entropy_embed(x, order, 1)\n if approximate:\n emb_data1 = _emb_data1\n else:\n emb_data1 = _emb_data1[:-1]\n count1 = KDTree(emb_data1, metric=metric).query_radius(emb_data1, r,\n count_only=True\n ).astype(np.float64)\n # compute phi(order + 1, r)\n emb_data2 = entropy_embed(x, order + 1, 1)\n count2 = KDTree(emb_data2, metric=metric).query_radius(emb_data2, r,\n count_only=True\n ).astype(np.float64)\n if approximate:\n phi[0] = np.mean(np.log(count1 / emb_data1.shape[0]))\n phi[1] = np.mean(np.log(count2 / emb_data2.shape[0]))\n else:\n phi[0] = np.mean((count1 - 1) / (emb_data1.shape[0] - 1))\n phi[1] = np.mean((count2 - 1) / (emb_data2.shape[0] - 1))\n return phi\n\n\n\n\ndef entropy_app_entropy(x, order=2, metric='chebyshev'):\n phi = entropy_app_samp_entropy(x, order=order, metric=metric, approximate=True)\n return np.subtract(phi[0], phi[1])\n\n\n\ndef entropy_sample_entropy(x, order=2, metric='chebyshev'):\n x = np.asarray(x, dtype=np.float64)\n phi = entropy_app_samp_entropy(x, order=order, metric=metric,\n approximate=False)\n return -np.log(np.divide(phi[1], phi[0]))\n\n\n\n# =============================================================================\n# entro-py\n# =============================================================================\n\ndef entro_py_sampen(x, dim, r, scale=True):\n return entro_py_entropy(x, dim, r, scale=scale)\n\n\ndef entro_py_cross_sampen(x1, x2, dim, r, scale=True):\n return entro_py_entropy([x1, x2], dim, r, scale)\n\n\ndef entro_py_fuzzyen(x, dim, r, n, scale=True):\n return entro_py_entropy(x, dim, r, n=n, scale=scale, remove_baseline=True)\n\n\ndef entro_py_cross_fuzzyen(x1, x2, dim, r, n, scale=True):\n return entro_py_entropy([x1, x2], dim, r, n, scale=scale, remove_baseline=True)\n\n\ndef entro_py_pattern_mat(x, m):\n x = np.asarray(x).ravel()\n if m == 1:\n return x\n else:\n N = len(x)\n patterns = np.zeros((m, N-m+1))\n for i in range(m):\n patterns[i, :] = x[i:N-m+i+1]\n return patterns\n\n\ndef entro_py_entropy(x, dim, r, n=1, scale=True, remove_baseline=False):\n fuzzy = True if remove_baseline else False\n cross = True if type(x) == list else False\n N = len(x[0]) if cross else len(x)\n\n if scale:\n if cross:\n x = [entro_py_scale(np.copy(x[0])), entro_py_scale(np.copy(x[1]))]\n else:\n x = entro_py_scale(np.copy(x))\n\n phi = [0, 0] # phi(m), phi(m+1)\n for j in [0, 1]:\n m = dim + j\n npat = N-dim # https://github.com/ixjlyons/entro-py/pull/2/files\n if cross:\n# patterns = [entro_py_pattern_mat(x[0], m), entro_py_pattern_mat(x[1], m)]\n patterns = [entro_py_pattern_mat(x[0], m)[:, :npat], entro_py_pattern_mat(x[1], m)[:, :npat]] # https://github.com/ixjlyons/entro-py/pull/2/files\n else:\n# patterns = entro_py_pattern_mat(x, m)\n patterns = entro_py_pattern_mat(x, m)[:, :npat]\n\n if remove_baseline:\n if cross:\n patterns[0] = entro_py_remove_baseline(patterns[0], axis=0)\n patterns[1] = entro_py_remove_baseline(patterns[1], axis=0)\n else:\n patterns = entro_py_remove_baseline(patterns, axis=0)\n\n# count = np.zeros(N-m) # https://github.com/ixjlyons/entro-py/pull/2/files\n# for i in range(N-m): # https://github.com/ixjlyons/entro-py/pull/2/files\n count = np.zeros(npat)\n for i in range(npat):\n if cross:\n if m == 1:\n sub = patterns[1][i]\n else:\n sub = patterns[1][:, [i]]\n dist = np.max(np.abs(patterns[0] - sub), axis=0)\n else:\n if m == 1:\n sub = patterns[i]\n else:\n sub = patterns[:, [i]]\n dist = np.max(np.abs(patterns - sub), axis=0)\n\n if fuzzy:\n sim = np.exp(-np.power(dist, n) / r)\n else:\n sim = dist < r\n\n count[i] = np.sum(sim) - 1\n\n# phi[j] = np.mean(count) / (N-m-1)\n phi[j] = np.mean(count) / (N-dim-1) # https://github.com/ixjlyons/entro-py/pull/2/files\n\n return np.log(phi[0] / phi[1])\n\n\ndef entro_py_scale(x, axis=None):\n x = entro_py_remove_baseline(x, axis=axis)\n x /= np.std(x, ddof=1, axis=axis, keepdims=True)\n return x\n\n\ndef entro_py_remove_baseline(x, axis=None):\n x -= np.mean(x, axis=axis, keepdims=True)\n return x\n\n\n\n\n\n\n\n\n\n\n\n\n\n# =============================================================================\n# MultiscaleEntropy https://github.com/reatank/MultiscaleEntropy/blob/master/MultiscaleEntropy/mse.py\n# =============================================================================\n\nimport math\nfrom collections.abc import Iterable\n\ndef MultiscaleEntropy_init_return_type(return_type):\n if return_type == 'dict':\n return {}\n else:\n return []\n\ndef MultiscaleEntropy_check_type(x, num_type, name):\n if isinstance(x, num_type):\n tmp = [x]\n elif not isinstance(x, Iterable):\n raise ValueError(name + ' should be a ' + num_type.__name__ + ' or an iterator of ' + num_type.__name__)\n else:\n tmp = []\n for i in x:\n tmp.append(i)\n if not isinstance(i, num_type):\n raise ValueError(name + ' should be a ' + num_type.__name__ + ' or an iterator of ' + num_type.__name__)\n return tmp\n\n# sum of seperate intervals of x\ndef MultiscaleEntropy_coarse_grain(x, scale_factor):\n x = np.array(x)\n x_len = len(x)\n if x_len % scale_factor:\n padded_len = (1+int(x_len/scale_factor))*scale_factor\n else:\n padded_len = x_len\n tmp_x = np.zeros(padded_len)\n tmp_x[:x_len] = x\n tmp_x = np.reshape(tmp_x, (int(padded_len/scale_factor), scale_factor))\n ans = np.reshape(np.sum(tmp_x, axis=1), (-1))/scale_factor\n\n return ans\n\ndef MultiscaleEntropy_sample_entropy(x, m=[2], r=[0.15], sd=None, return_type='dict', safe_mode=False):\n '''[Sample Entropy, the threshold will be r*sd]\n\n Arguments:\n x {[input signal]} -- [an iterator of numbers]\n\n Keyword Arguments:\n m {list} -- [m in sample entropy] (default: {[2]})\n r {list} -- [r in sample entropy] (default: {[0.15]})\n sd {number} -- [standard derivation of x, if None, will be calculated] (default: {None})\n return_type {str} -- [can be dict or list] (default: {'dict'})\n safe_mode {bool} -- [if set True, type checking will be skipped] (default: {False})\n\n Raises:\n ValueError -- [some values too big]\n\n Returns:\n [dict or list as return_type indicates] -- [if dict, nest as [scale_factor][m][r] for each value of m, r; if list, nest as [i][j] for lengths of m, r]\n '''\n # type checking\n if not safe_mode:\n m = MultiscaleEntropy_check_type(m, int, 'm')\n r = MultiscaleEntropy_check_type(r, float, 'r')\n if not (sd == None) and not (isinstance(sd, float) or isinstance(sd, int)):\n raise ValueError('sd should be a number')\n try:\n x = np.array(x)\n except:\n raise ValueError('x should be a sequence of numbers')\n # value checking\n if len(x) < max(m):\n raise ValueError('the max m is bigger than x\\'s length')\n\n # initialization\n if sd == None:\n sd = np.sqrt(np.var(x))\n ans = MultiscaleEntropy_init_return_type(return_type)\n\n # calculation\n for i, rr in enumerate(r):\n threshold = rr * sd\n if return_type == 'dict':\n ans[rr] = MultiscaleEntropy_init_return_type(return_type)\n else:\n ans.append(MultiscaleEntropy_init_return_type(return_type))\n count = {}\n tmp_m = []\n for mm in m:\n tmp_m.append(mm)\n tmp_m.append(mm+1)\n tmp_m = list(set(tmp_m))\n for mm in tmp_m:\n count[mm] = 0\n\n for j in range(1, len(x)-min(m)+1):\n cont = 0\n for inc in range(0, len(x)-j):\n if abs(x[inc]-x[j+inc]) < threshold:\n cont += 1\n elif cont > 0:\n for mm in tmp_m:\n tmp = cont - mm + 1\n count[mm] += tmp if tmp > 0 else 0\n cont = 0\n if cont > 0:\n for mm in tmp_m:\n tmp = cont - mm + 1\n count[mm] += tmp if tmp > 0 else 0\n for mm in m:\n if count[mm+1] == 0 or count[mm] == 0:\n t = len(x)-mm+1\n tmp = -math.log(1/(t*(t-1)))\n else:\n tmp = -math.log(count[mm+1]/count[mm])\n if return_type == 'dict':\n ans[rr][mm] = tmp\n else:\n ans[i].append(tmp)\n return ans\n\ndef MultiscaleEntropy_mse(x, scale_factor=[i for i in range(1,21)], m=[2], r=[0.15], return_type='dict', safe_mode=False):\n '''[Multiscale Entropy]\n\n Arguments:\n x {[input signal]} -- [an iterator of numbers]\n\n Keyword Arguments:\n scale_factor {list} -- [scale factors of coarse graining] (default: {[i for i in range(1,21)]})\n m {list} -- [m in sample entropy] (default: {[2]})\n r {list} -- [r in sample entropy] (default: {[0.15]})\n return_type {str} -- [can be dict or list] (default: {'dict'})\n safe_mode {bool} -- [if set True, type checking will be skipped] (default: {False})\n\n Raises:\n ValueError -- [some values too big]\n\n Returns:\n [dict or list as return_type indicates] -- [if dict, nest as [scale_factor][m][r] for each value of scale_factor, m, r; if list nest as [i][j][k] for lengths of scale_factor, m, r]\n '''\n # type checking\n if not safe_mode:\n m = MultiscaleEntropy_check_type(m, int, 'm')\n r = MultiscaleEntropy_check_type(r, float, 'r')\n scale_factor = MultiscaleEntropy_check_type(scale_factor, int, 'scale_factor')\n try:\n x = np.array(x)\n except:\n print('x should be a sequence of numbers')\n # value checking\n if max(scale_factor) > len(x):\n raise ValueError('the max scale_factor is bigger than x\\'s length')\n\n # calculation\n sd = np.sqrt(np.var(x))\n ms_en = MultiscaleEntropy_init_return_type(return_type)\n for s_f in scale_factor:\n y = MultiscaleEntropy_coarse_grain(x, s_f)\n if return_type == 'dict':\n ms_en[s_f] = MultiscaleEntropy_sample_entropy(y, m, r, sd, 'dict', True)\n else:\n ms_en.append(MultiscaleEntropy_sample_entropy(y, m, r, sd, 'list', True))\n\n if return_type == \"list\":\n ms_en = [i[0] for i in ms_en]\n ms_en = [i[0] for i in ms_en]\n return ms_en\n"
] | [
[
"numpy.sum",
"numpy.subtract",
"numpy.var",
"numpy.asarray",
"numpy.copy",
"numpy.log",
"numpy.fill_diagonal",
"numpy.allclose",
"numpy.transpose",
"numpy.logical_and",
"numpy.abs",
"numpy.linspace",
"numpy.mean",
"numpy.tile",
"numpy.zeros",
"pandas.read_csv",
"numpy.lib.stride_tricks.as_strided",
"numpy.max",
"numpy.power",
"numpy.std",
"numpy.fft.fft",
"numpy.divide",
"sklearn.neighbors.KDTree",
"numpy.array"
]
] |
shangz-ai/transformers | [
"75259b44bf2e2b98b5a4d431fb400b7190342a01",
"75259b44bf2e2b98b5a4d431fb400b7190342a01"
] | [
"tests/models/tapas/test_tokenization_tapas.py",
"tests/models/big_bird/test_modeling_big_bird.py"
] | [
"# coding=utf-8\n# Copyright 2020 The HuggingFace Inc. team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport inspect\nimport os\nimport shutil\nimport tempfile\nimport unittest\nfrom typing import List\n\nimport numpy as np\nimport pandas as pd\n\nfrom transformers import AddedToken\nfrom transformers.models.tapas.tokenization_tapas import (\n VOCAB_FILES_NAMES,\n BasicTokenizer,\n TapasTokenizer,\n WordpieceTokenizer,\n _is_control,\n _is_punctuation,\n _is_whitespace,\n)\nfrom transformers.testing_utils import (\n is_pt_tf_cross_test,\n require_pandas,\n require_scatter,\n require_tensorflow_probability,\n require_tokenizers,\n require_torch,\n slow,\n)\n\nfrom ...test_tokenization_common import TokenizerTesterMixin, filter_non_english, merge_model_tokenizer_mappings\n\n\n@require_tokenizers\n@require_pandas\nclass TapasTokenizationTest(TokenizerTesterMixin, unittest.TestCase):\n tokenizer_class = TapasTokenizer\n test_rust_tokenizer = False\n space_between_special_tokens = True\n from_pretrained_filter = filter_non_english\n test_seq2seq = False\n\n def get_table(\n self,\n tokenizer: TapasTokenizer,\n length=5,\n ):\n toks = [tokenizer.decode([i], clean_up_tokenization_spaces=False) for i in range(len(tokenizer))]\n\n if length == 0:\n data = {}\n else:\n data = {toks[0]: [toks[tok] for tok in range(1, length)]}\n\n table = pd.DataFrame.from_dict(data)\n\n return table\n\n def get_table_and_query(\n self,\n tokenizer: TapasTokenizer,\n length=5,\n ):\n toks = [tokenizer.decode([i], clean_up_tokenization_spaces=False) for i in range(len(tokenizer))]\n table = self.get_table(tokenizer, length=length - 3)\n query = \" \".join(toks[:3])\n\n return table, query\n\n def get_clean_sequence(\n self,\n tokenizer: TapasTokenizer,\n with_prefix_space=False,\n max_length=20,\n min_length=5,\n empty_table: bool = False,\n add_special_tokens: bool = True,\n return_table_and_query: bool = False,\n ):\n\n toks = [tokenizer.decode([i], clean_up_tokenization_spaces=False) for i in range(len(tokenizer))]\n\n if empty_table:\n table = pd.DataFrame.from_dict({})\n query = \" \".join(toks[:min_length])\n else:\n data = {toks[0]: [toks[tok] for tok in range(1, min_length - 3)]}\n table = pd.DataFrame.from_dict(data)\n query = \" \".join(toks[:3])\n\n output_ids = tokenizer.encode(table, query, add_special_tokens=add_special_tokens)\n output_txt = tokenizer.decode(output_ids)\n\n assert len(output_ids) >= min_length, \"Update the code to generate the sequences so that they are larger\"\n assert len(output_ids) <= max_length, \"Update the code to generate the sequences so that they are smaller\"\n\n if return_table_and_query:\n return output_txt, output_ids, table, query\n\n return output_txt, output_ids\n\n def setUp(self):\n super().setUp()\n\n vocab_tokens = [\n \"[UNK]\",\n \"[CLS]\",\n \"[SEP]\",\n \"[PAD]\",\n \"[MASK]\",\n \"want\",\n \"##want\",\n \"##ed\",\n \"wa\",\n \"un\",\n \"runn\",\n \"##ing\",\n \",\",\n \"low\",\n \"lowest\",\n ]\n self.vocab_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES[\"vocab_file\"])\n with open(self.vocab_file, \"w\", encoding=\"utf-8\") as vocab_writer:\n vocab_writer.write(\"\".join([x + \"\\n\" for x in vocab_tokens]))\n\n def get_input_output_texts(self, tokenizer):\n input_text = \"UNwant\\u00E9d,running\"\n output_text = \"unwanted, running\"\n return input_text, output_text\n\n @require_tensorflow_probability\n def test_tf_encode_plus_sent_to_model(self):\n super().test_tf_encode_plus_sent_to_model()\n\n def test_rust_and_python_full_tokenizers(self):\n if not self.test_rust_tokenizer:\n return\n\n tokenizer = self.get_tokenizer()\n rust_tokenizer = self.get_rust_tokenizer()\n\n sequence = \"UNwant\\u00E9d,running\"\n\n tokens = tokenizer.tokenize(sequence)\n rust_tokens = rust_tokenizer.tokenize(sequence)\n self.assertListEqual(tokens, rust_tokens)\n\n ids = tokenizer.encode(sequence, add_special_tokens=False)\n rust_ids = rust_tokenizer.encode(sequence, add_special_tokens=False)\n self.assertListEqual(ids, rust_ids)\n\n rust_tokenizer = self.get_rust_tokenizer()\n ids = tokenizer.encode(sequence)\n rust_ids = rust_tokenizer.encode(sequence)\n self.assertListEqual(ids, rust_ids)\n\n # With lower casing\n tokenizer = self.get_tokenizer(do_lower_case=True)\n rust_tokenizer = self.get_rust_tokenizer(do_lower_case=True)\n\n sequence = \"UNwant\\u00E9d,running\"\n\n tokens = tokenizer.tokenize(sequence)\n rust_tokens = rust_tokenizer.tokenize(sequence)\n self.assertListEqual(tokens, rust_tokens)\n\n ids = tokenizer.encode(sequence, add_special_tokens=False)\n rust_ids = rust_tokenizer.encode(sequence, add_special_tokens=False)\n self.assertListEqual(ids, rust_ids)\n\n rust_tokenizer = self.get_rust_tokenizer()\n ids = tokenizer.encode(sequence)\n rust_ids = rust_tokenizer.encode(sequence)\n self.assertListEqual(ids, rust_ids)\n\n def test_chinese(self):\n tokenizer = BasicTokenizer()\n\n self.assertListEqual(tokenizer.tokenize(\"ah\\u535A\\u63A8zz\"), [\"ah\", \"\\u535A\", \"\\u63A8\", \"zz\"])\n\n def test_basic_tokenizer_lower(self):\n tokenizer = BasicTokenizer(do_lower_case=True)\n\n self.assertListEqual(\n tokenizer.tokenize(\" \\tHeLLo!how \\n Are yoU? \"), [\"hello\", \"!\", \"how\", \"are\", \"you\", \"?\"]\n )\n self.assertListEqual(tokenizer.tokenize(\"H\\u00E9llo\"), [\"hello\"])\n\n def test_basic_tokenizer_lower_strip_accents_false(self):\n tokenizer = BasicTokenizer(do_lower_case=True, strip_accents=False)\n\n self.assertListEqual(\n tokenizer.tokenize(\" \\tHäLLo!how \\n Are yoU? \"), [\"hällo\", \"!\", \"how\", \"are\", \"you\", \"?\"]\n )\n self.assertListEqual(tokenizer.tokenize(\"H\\u00E9llo\"), [\"h\\u00E9llo\"])\n\n def test_basic_tokenizer_lower_strip_accents_true(self):\n tokenizer = BasicTokenizer(do_lower_case=True, strip_accents=True)\n\n self.assertListEqual(\n tokenizer.tokenize(\" \\tHäLLo!how \\n Are yoU? \"), [\"hallo\", \"!\", \"how\", \"are\", \"you\", \"?\"]\n )\n self.assertListEqual(tokenizer.tokenize(\"H\\u00E9llo\"), [\"hello\"])\n\n def test_basic_tokenizer_lower_strip_accents_default(self):\n tokenizer = BasicTokenizer(do_lower_case=True)\n\n self.assertListEqual(\n tokenizer.tokenize(\" \\tHäLLo!how \\n Are yoU? \"), [\"hallo\", \"!\", \"how\", \"are\", \"you\", \"?\"]\n )\n self.assertListEqual(tokenizer.tokenize(\"H\\u00E9llo\"), [\"hello\"])\n\n def test_basic_tokenizer_no_lower(self):\n tokenizer = BasicTokenizer(do_lower_case=False)\n\n self.assertListEqual(\n tokenizer.tokenize(\" \\tHeLLo!how \\n Are yoU? \"), [\"HeLLo\", \"!\", \"how\", \"Are\", \"yoU\", \"?\"]\n )\n\n def test_basic_tokenizer_no_lower_strip_accents_false(self):\n tokenizer = BasicTokenizer(do_lower_case=False, strip_accents=False)\n\n self.assertListEqual(\n tokenizer.tokenize(\" \\tHäLLo!how \\n Are yoU? \"), [\"HäLLo\", \"!\", \"how\", \"Are\", \"yoU\", \"?\"]\n )\n\n def test_basic_tokenizer_no_lower_strip_accents_true(self):\n tokenizer = BasicTokenizer(do_lower_case=False, strip_accents=True)\n\n self.assertListEqual(\n tokenizer.tokenize(\" \\tHäLLo!how \\n Are yoU? \"), [\"HaLLo\", \"!\", \"how\", \"Are\", \"yoU\", \"?\"]\n )\n\n def test_basic_tokenizer_respects_never_split_tokens(self):\n tokenizer = BasicTokenizer(do_lower_case=False, never_split=[\"[UNK]\"])\n\n self.assertListEqual(\n tokenizer.tokenize(\" \\tHeLLo!how \\n Are yoU? [UNK]\"), [\"HeLLo\", \"!\", \"how\", \"Are\", \"yoU\", \"?\", \"[UNK]\"]\n )\n\n def test_wordpiece_tokenizer(self):\n vocab_tokens = [\"[UNK]\", \"[CLS]\", \"[SEP]\", \"want\", \"##want\", \"##ed\", \"wa\", \"un\", \"runn\", \"##ing\"]\n\n vocab = {}\n for i, token in enumerate(vocab_tokens):\n vocab[token] = i\n tokenizer = WordpieceTokenizer(vocab=vocab, unk_token=\"[UNK]\")\n\n self.assertListEqual(tokenizer.tokenize(\"\"), [])\n\n self.assertListEqual(tokenizer.tokenize(\"unwanted running\"), [\"un\", \"##want\", \"##ed\", \"runn\", \"##ing\"])\n\n self.assertListEqual(tokenizer.tokenize(\"unwantedX running\"), [\"[UNK]\", \"runn\", \"##ing\"])\n\n def test_is_whitespace(self):\n self.assertTrue(_is_whitespace(\" \"))\n self.assertTrue(_is_whitespace(\"\\t\"))\n self.assertTrue(_is_whitespace(\"\\r\"))\n self.assertTrue(_is_whitespace(\"\\n\"))\n self.assertTrue(_is_whitespace(\"\\u00A0\"))\n\n self.assertFalse(_is_whitespace(\"A\"))\n self.assertFalse(_is_whitespace(\"-\"))\n\n def test_is_control(self):\n self.assertTrue(_is_control(\"\\u0005\"))\n\n self.assertFalse(_is_control(\"A\"))\n self.assertFalse(_is_control(\" \"))\n self.assertFalse(_is_control(\"\\t\"))\n self.assertFalse(_is_control(\"\\r\"))\n\n def test_is_punctuation(self):\n self.assertTrue(_is_punctuation(\"-\"))\n self.assertTrue(_is_punctuation(\"$\"))\n self.assertTrue(_is_punctuation(\"`\"))\n self.assertTrue(_is_punctuation(\".\"))\n\n self.assertFalse(_is_punctuation(\"A\"))\n self.assertFalse(_is_punctuation(\" \"))\n\n def test_clean_text(self):\n tokenizer = self.get_tokenizer()\n\n # Example taken from the issue https://github.com/huggingface/tokenizers/issues/340\n self.assertListEqual(\n [tokenizer.tokenize(t) for t in [\"Test\", \"\\xad\", \"test\"]], [[\"[UNK]\"], [\"[EMPTY]\"], [\"[UNK]\"]]\n )\n\n @slow\n def test_sequence_builders(self):\n tokenizer = self.tokenizer_class.from_pretrained(\"google/tapas-base-finetuned-wtq\")\n\n empty_table = self.get_table(tokenizer, length=0)\n table = self.get_table(tokenizer, length=10)\n\n text = tokenizer.encode(table, add_special_tokens=False)\n text_2 = tokenizer.encode(empty_table, \"multi-sequence build\", add_special_tokens=False)\n\n encoded_pair = tokenizer.build_inputs_with_special_tokens(text, text_2)\n\n assert encoded_pair == [101] + text + [102] + text_2\n\n def test_offsets_with_special_characters(self):\n for tokenizer, pretrained_name, kwargs in self.tokenizers_list:\n with self.subTest(f\"{tokenizer.__class__.__name__} ({pretrained_name})\"):\n tokenizer_r = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs)\n\n sentence = f\"A, naïve {tokenizer_r.mask_token} AllenNLP sentence.\"\n tokens = tokenizer_r.encode_plus(\n sentence,\n return_attention_mask=False,\n return_token_type_ids=False,\n return_offsets_mapping=True,\n add_special_tokens=True,\n )\n\n do_lower_case = tokenizer_r.do_lower_case if hasattr(tokenizer_r, \"do_lower_case\") else False\n expected_results = (\n [\n ((0, 0), tokenizer_r.cls_token),\n ((0, 1), \"A\"),\n ((1, 2), \",\"),\n ((3, 5), \"na\"),\n ((5, 6), \"##ï\"),\n ((6, 8), \"##ve\"),\n ((9, 15), tokenizer_r.mask_token),\n ((16, 21), \"Allen\"),\n ((21, 23), \"##NL\"),\n ((23, 24), \"##P\"),\n ((25, 33), \"sentence\"),\n ((33, 34), \".\"),\n ((0, 0), tokenizer_r.sep_token),\n ]\n if not do_lower_case\n else [\n ((0, 0), tokenizer_r.cls_token),\n ((0, 1), \"a\"),\n ((1, 2), \",\"),\n ((3, 8), \"naive\"),\n ((9, 15), tokenizer_r.mask_token),\n ((16, 21), \"allen\"),\n ((21, 23), \"##nl\"),\n ((23, 24), \"##p\"),\n ((25, 33), \"sentence\"),\n ((33, 34), \".\"),\n ((0, 0), tokenizer_r.sep_token),\n ]\n )\n\n self.assertEqual(\n [e[1] for e in expected_results], tokenizer_r.convert_ids_to_tokens(tokens[\"input_ids\"])\n )\n self.assertEqual([e[0] for e in expected_results], tokens[\"offset_mapping\"])\n\n def test_add_special_tokens(self):\n tokenizers: List[TapasTokenizer] = self.get_tokenizers(do_lower_case=False)\n for tokenizer in tokenizers:\n with self.subTest(f\"{tokenizer.__class__.__name__}\"):\n input_table = self.get_table(tokenizer, length=0)\n\n special_token = \"[SPECIAL_TOKEN]\"\n\n tokenizer.add_special_tokens({\"cls_token\": special_token})\n encoded_special_token = tokenizer.encode(input_table, special_token, add_special_tokens=False)\n self.assertEqual(len(encoded_special_token), 1)\n\n decoded = tokenizer.decode(encoded_special_token, skip_special_tokens=True)\n self.assertTrue(special_token not in decoded)\n\n def test_add_tokens_tokenizer(self):\n tokenizers: List[TapasTokenizer] = self.get_tokenizers(do_lower_case=False)\n for tokenizer in tokenizers:\n with self.subTest(f\"{tokenizer.__class__.__name__}\"):\n table = self.get_table(tokenizer, length=0)\n vocab_size = tokenizer.vocab_size\n all_size = len(tokenizer)\n\n self.assertNotEqual(vocab_size, 0)\n\n # We usually have added tokens from the start in tests because our vocab fixtures are\n # smaller than the original vocabs - let's not assert this\n # self.assertEqual(vocab_size, all_size)\n\n new_toks = [\"aaaaa bbbbbb\", \"cccccccccdddddddd\"]\n added_toks = tokenizer.add_tokens(new_toks)\n vocab_size_2 = tokenizer.vocab_size\n all_size_2 = len(tokenizer)\n\n self.assertNotEqual(vocab_size_2, 0)\n self.assertEqual(vocab_size, vocab_size_2)\n self.assertEqual(added_toks, len(new_toks))\n self.assertEqual(all_size_2, all_size + len(new_toks))\n\n tokens = tokenizer.encode(table, \"aaaaa bbbbbb low cccccccccdddddddd l\", add_special_tokens=False)\n\n self.assertGreaterEqual(len(tokens), 4)\n self.assertGreater(tokens[0], tokenizer.vocab_size - 1)\n self.assertGreater(tokens[-2], tokenizer.vocab_size - 1)\n\n new_toks_2 = {\"eos_token\": \">>>>|||<||<<|<<\", \"pad_token\": \"<<<<<|||>|>>>>|>\"}\n added_toks_2 = tokenizer.add_special_tokens(new_toks_2)\n vocab_size_3 = tokenizer.vocab_size\n all_size_3 = len(tokenizer)\n\n self.assertNotEqual(vocab_size_3, 0)\n self.assertEqual(vocab_size, vocab_size_3)\n self.assertEqual(added_toks_2, len(new_toks_2))\n self.assertEqual(all_size_3, all_size_2 + len(new_toks_2))\n\n tokens = tokenizer.encode(\n table,\n \">>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l\",\n add_special_tokens=False,\n )\n\n self.assertGreaterEqual(len(tokens), 6)\n self.assertGreater(tokens[0], tokenizer.vocab_size - 1)\n self.assertGreater(tokens[0], tokens[1])\n self.assertGreater(tokens[-2], tokenizer.vocab_size - 1)\n self.assertGreater(tokens[-2], tokens[-3])\n self.assertEqual(tokens[0], tokenizer.eos_token_id)\n self.assertEqual(tokens[-2], tokenizer.pad_token_id)\n\n @require_tokenizers\n def test_encode_decode_with_spaces(self):\n tokenizers = self.get_tokenizers(do_lower_case=False)\n for tokenizer in tokenizers:\n with self.subTest(f\"{tokenizer.__class__.__name__}\"):\n table = self.get_table(tokenizer, length=0)\n\n new_toks = [AddedToken(\"[ABC]\", normalized=False), AddedToken(\"[DEF]\", normalized=False)]\n tokenizer.add_tokens(new_toks)\n input = \"[ABC][DEF][ABC][DEF]\"\n if self.space_between_special_tokens:\n output = \"[ABC] [DEF] [ABC] [DEF]\"\n else:\n output = input\n encoded = tokenizer.encode(table, input, add_special_tokens=False)\n decoded = tokenizer.decode(encoded, spaces_between_special_tokens=self.space_between_special_tokens)\n self.assertIn(decoded, [output, output.lower()])\n\n def test_encode_plus_with_padding(self):\n tokenizers = self.get_tokenizers(do_lower_case=False)\n for tokenizer in tokenizers:\n with self.subTest(f\"{tokenizer.__class__.__name__}\"):\n table = self.get_table(tokenizer, length=0)\n sequence = \"Sequence\"\n\n # check correct behaviour if no pad_token_id exists and add it eventually\n self._check_no_pad_token_padding(tokenizer, sequence)\n\n padding_size = 10\n padding_idx = tokenizer.pad_token_id\n token_type_padding_idx = tokenizer.pad_token_type_id\n\n encoded_sequence = tokenizer.encode_plus(table, sequence, return_special_tokens_mask=True)\n input_ids = encoded_sequence[\"input_ids\"]\n special_tokens_mask = encoded_sequence[\"special_tokens_mask\"]\n sequence_length = len(input_ids)\n\n # Test 'longest' and 'no_padding' don't do anything\n tokenizer.padding_side = \"right\"\n\n not_padded_sequence = tokenizer.encode_plus(\n table,\n sequence,\n padding=False,\n return_special_tokens_mask=True,\n )\n not_padded_input_ids = not_padded_sequence[\"input_ids\"]\n\n not_padded_special_tokens_mask = not_padded_sequence[\"special_tokens_mask\"]\n not_padded_sequence_length = len(not_padded_input_ids)\n\n assert sequence_length == not_padded_sequence_length\n assert input_ids == not_padded_input_ids\n assert special_tokens_mask == not_padded_special_tokens_mask\n\n not_padded_sequence = tokenizer.encode_plus(\n table,\n sequence,\n padding=False,\n return_special_tokens_mask=True,\n )\n not_padded_input_ids = not_padded_sequence[\"input_ids\"]\n\n not_padded_special_tokens_mask = not_padded_sequence[\"special_tokens_mask\"]\n not_padded_sequence_length = len(not_padded_input_ids)\n\n assert sequence_length == not_padded_sequence_length\n assert input_ids == not_padded_input_ids\n assert special_tokens_mask == not_padded_special_tokens_mask\n\n # Test right padding\n tokenizer.padding_side = \"right\"\n\n right_padded_sequence = tokenizer.encode_plus(\n table,\n sequence,\n max_length=sequence_length + padding_size,\n padding=\"max_length\",\n return_special_tokens_mask=True,\n )\n right_padded_input_ids = right_padded_sequence[\"input_ids\"]\n\n right_padded_special_tokens_mask = right_padded_sequence[\"special_tokens_mask\"]\n right_padded_sequence_length = len(right_padded_input_ids)\n\n assert sequence_length + padding_size == right_padded_sequence_length\n assert input_ids + [padding_idx] * padding_size == right_padded_input_ids\n assert special_tokens_mask + [1] * padding_size == right_padded_special_tokens_mask\n\n # Test left padding\n tokenizer.padding_side = \"left\"\n left_padded_sequence = tokenizer.encode_plus(\n table,\n sequence,\n max_length=sequence_length + padding_size,\n padding=\"max_length\",\n return_special_tokens_mask=True,\n )\n left_padded_input_ids = left_padded_sequence[\"input_ids\"]\n left_padded_special_tokens_mask = left_padded_sequence[\"special_tokens_mask\"]\n left_padded_sequence_length = len(left_padded_input_ids)\n\n assert sequence_length + padding_size == left_padded_sequence_length\n assert [padding_idx] * padding_size + input_ids == left_padded_input_ids\n assert [1] * padding_size + special_tokens_mask == left_padded_special_tokens_mask\n\n if \"token_type_ids\" in tokenizer.model_input_names:\n token_type_ids = encoded_sequence[\"token_type_ids\"]\n left_padded_token_type_ids = left_padded_sequence[\"token_type_ids\"]\n right_padded_token_type_ids = right_padded_sequence[\"token_type_ids\"]\n\n assert (\n token_type_ids + [[token_type_padding_idx] * 7] * padding_size == right_padded_token_type_ids\n )\n assert [[token_type_padding_idx] * 7] * padding_size + token_type_ids == left_padded_token_type_ids\n\n if \"attention_mask\" in tokenizer.model_input_names:\n attention_mask = encoded_sequence[\"attention_mask\"]\n right_padded_attention_mask = right_padded_sequence[\"attention_mask\"]\n left_padded_attention_mask = left_padded_sequence[\"attention_mask\"]\n\n assert attention_mask + [0] * padding_size == right_padded_attention_mask\n assert [0] * padding_size + attention_mask == left_padded_attention_mask\n\n def test_internal_consistency(self):\n tokenizers = self.get_tokenizers()\n for tokenizer in tokenizers:\n with self.subTest(f\"{tokenizer.__class__.__name__}\"):\n table = self.get_table(tokenizer, length=0)\n input_text, output_text = self.get_input_output_texts(tokenizer)\n\n tokens = tokenizer.tokenize(input_text)\n ids = tokenizer.convert_tokens_to_ids(tokens)\n ids_2 = tokenizer.encode(table, input_text, add_special_tokens=False)\n self.assertListEqual(ids, ids_2)\n\n tokens_2 = tokenizer.convert_ids_to_tokens(ids)\n self.assertNotEqual(len(tokens_2), 0)\n text_2 = tokenizer.decode(ids)\n self.assertIsInstance(text_2, str)\n\n self.assertEqual(text_2, output_text)\n\n def test_mask_output(self):\n tokenizers = self.get_tokenizers(fast=False, do_lower_case=False)\n for tokenizer in tokenizers:\n with self.subTest(f\"{tokenizer.__class__.__name__}\"):\n table, query = self.get_table_and_query(tokenizer)\n\n if (\n tokenizer.build_inputs_with_special_tokens.__qualname__.split(\".\")[0] != \"PreTrainedTokenizer\"\n and \"token_type_ids\" in tokenizer.model_input_names\n ):\n information = tokenizer.encode_plus(table, query, add_special_tokens=True)\n sequences, mask = information[\"input_ids\"], information[\"token_type_ids\"]\n self.assertEqual(len(sequences), len(mask))\n\n @unittest.skip(\"TAPAS tokenizer only handles two sequences.\")\n def test_maximum_encoding_length_pair_input(self):\n pass\n\n @unittest.skip(\"TAPAS tokenizer only handles two sequences.\")\n def test_maximum_encoding_length_single_input(self):\n pass\n\n def test_number_of_added_tokens(self):\n tokenizers = self.get_tokenizers(do_lower_case=False)\n for tokenizer in tokenizers:\n with self.subTest(f\"{tokenizer.__class__.__name__}\"):\n\n table, query = self.get_table_and_query(tokenizer)\n\n sequences = tokenizer.encode(table, query, add_special_tokens=False)\n attached_sequences = tokenizer.encode(table, query, add_special_tokens=True)\n\n # Method is implemented (e.g. not GPT-2)\n if len(attached_sequences) != 2:\n self.assertEqual(\n tokenizer.num_special_tokens_to_add(pair=True), len(attached_sequences) - len(sequences)\n )\n\n def test_padding_to_max_length(self):\n \"\"\"We keep this test for backward compatibility but it should be removed when `pad_to_max_length` will be deprecated\"\"\"\n tokenizers = self.get_tokenizers(do_lower_case=False)\n for tokenizer in tokenizers:\n with self.subTest(f\"{tokenizer.__class__.__name__}\"):\n table = self.get_table(tokenizer)\n sequence = \"Sequence\"\n padding_size = 10\n\n # check correct behaviour if no pad_token_id exists and add it eventually\n self._check_no_pad_token_padding(tokenizer, sequence)\n\n padding_idx = tokenizer.pad_token_id\n\n # Check that it correctly pads when a maximum length is specified along with the padding flag set to True\n tokenizer.padding_side = \"right\"\n encoded_sequence = tokenizer.encode(table, sequence)\n sequence_length = len(encoded_sequence)\n # FIXME: the next line should be padding(max_length) to avoid warning\n padded_sequence = tokenizer.encode(\n table, sequence, max_length=sequence_length + padding_size, padding=True\n )\n padded_sequence_length = len(padded_sequence)\n assert sequence_length + padding_size == padded_sequence_length\n assert encoded_sequence + [padding_idx] * padding_size == padded_sequence\n\n # Check that nothing is done when a maximum length is not specified\n encoded_sequence = tokenizer.encode(table, sequence)\n sequence_length = len(encoded_sequence)\n\n tokenizer.padding_side = \"right\"\n padded_sequence_right = tokenizer.encode(table, sequence, pad_to_max_length=True)\n padded_sequence_right_length = len(padded_sequence_right)\n assert sequence_length == padded_sequence_right_length\n assert encoded_sequence == padded_sequence_right\n\n def test_call(self):\n # Tests that all call wrap to encode_plus and batch_encode_plus\n tokenizers = self.get_tokenizers(do_lower_case=False)\n for tokenizer in tokenizers:\n with self.subTest(f\"{tokenizer.__class__.__name__}\"):\n sequences = [\n \"Testing batch encode plus\",\n \"Testing batch encode plus with different sequence lengths\",\n \"Testing batch encode plus with different sequence lengths correctly pads\",\n ]\n\n # Test not batched\n table = self.get_table(tokenizer, length=0)\n encoded_sequences_1 = tokenizer.encode_plus(table, sequences[0])\n encoded_sequences_2 = tokenizer(table, sequences[0])\n self.assertEqual(encoded_sequences_1, encoded_sequences_2)\n\n # Test not batched pairs\n table = self.get_table(tokenizer, length=10)\n encoded_sequences_1 = tokenizer.encode_plus(table, sequences[1])\n encoded_sequences_2 = tokenizer(table, sequences[1])\n self.assertEqual(encoded_sequences_1, encoded_sequences_2)\n\n # Test batched\n table = self.get_table(tokenizer, length=0)\n encoded_sequences_1 = tokenizer.batch_encode_plus(table, sequences)\n encoded_sequences_2 = tokenizer(table, sequences)\n self.assertEqual(encoded_sequences_1, encoded_sequences_2)\n\n def test_batch_encode_plus_batch_sequence_length(self):\n # Tests that all encoded values have the correct size\n tokenizers = self.get_tokenizers(do_lower_case=False)\n for tokenizer in tokenizers:\n with self.subTest(f\"{tokenizer.__class__.__name__}\"):\n table = self.get_table(tokenizer, length=0)\n sequences = [\n \"Testing batch encode plus\",\n \"Testing batch encode plus with different sequence lengths\",\n \"Testing batch encode plus with different sequence lengths correctly pads\",\n ]\n\n encoded_sequences = [tokenizer.encode_plus(table, sequence) for sequence in sequences]\n encoded_sequences_batch = tokenizer.batch_encode_plus(table, sequences, padding=False)\n self.assertListEqual(\n encoded_sequences, self.convert_batch_encode_plus_format_to_encode_plus(encoded_sequences_batch)\n )\n\n maximum_length = len(\n max([encoded_sequence[\"input_ids\"] for encoded_sequence in encoded_sequences], key=len)\n )\n\n # check correct behaviour if no pad_token_id exists and add it eventually\n self._check_no_pad_token_padding(tokenizer, sequences)\n\n encoded_sequences_padded = [\n tokenizer.encode_plus(table, sequence, max_length=maximum_length, padding=\"max_length\")\n for sequence in sequences\n ]\n\n encoded_sequences_batch_padded = tokenizer.batch_encode_plus(table, sequences, padding=True)\n self.assertListEqual(\n encoded_sequences_padded,\n self.convert_batch_encode_plus_format_to_encode_plus(encoded_sequences_batch_padded),\n )\n\n # check 'longest' is unsensitive to a max length\n encoded_sequences_batch_padded_1 = tokenizer.batch_encode_plus(table, sequences, padding=True)\n encoded_sequences_batch_padded_2 = tokenizer.batch_encode_plus(\n table, sequences, max_length=maximum_length + 10, padding=\"longest\"\n )\n for key in encoded_sequences_batch_padded_1.keys():\n self.assertListEqual(\n encoded_sequences_batch_padded_1[key],\n encoded_sequences_batch_padded_2[key],\n )\n\n # check 'no_padding' is unsensitive to a max length\n encoded_sequences_batch_padded_1 = tokenizer.batch_encode_plus(table, sequences, padding=False)\n encoded_sequences_batch_padded_2 = tokenizer.batch_encode_plus(\n table, sequences, max_length=maximum_length + 10, padding=False\n )\n for key in encoded_sequences_batch_padded_1.keys():\n self.assertListEqual(\n encoded_sequences_batch_padded_1[key],\n encoded_sequences_batch_padded_2[key],\n )\n\n @unittest.skip(\"batch_encode_plus does not handle overflowing tokens.\")\n def test_batch_encode_plus_overflowing_tokens(self):\n pass\n\n def test_batch_encode_plus_padding(self):\n # Test that padded sequences are equivalent between batch_encode_plus and encode_plus\n\n # Right padding tests\n tokenizers = self.get_tokenizers(do_lower_case=False)\n for tokenizer in tokenizers:\n with self.subTest(f\"{tokenizer.__class__.__name__}\"):\n table = self.get_table(tokenizer, length=0)\n sequences = [\n \"Testing batch encode plus\",\n \"Testing batch encode plus with different sequence lengths\",\n \"Testing batch encode plus with different sequence lengths correctly pads\",\n ]\n\n max_length = 100\n\n # check correct behaviour if no pad_token_id exists and add it eventually\n self._check_no_pad_token_padding(tokenizer, sequences)\n\n encoded_sequences = [\n tokenizer.encode_plus(table, sequence, max_length=max_length, padding=\"max_length\")\n for sequence in sequences\n ]\n encoded_sequences_batch = tokenizer.batch_encode_plus(\n table, sequences, max_length=max_length, padding=\"max_length\"\n )\n self.assertListEqual(\n encoded_sequences, self.convert_batch_encode_plus_format_to_encode_plus(encoded_sequences_batch)\n )\n\n # Left padding tests\n tokenizers = self.get_tokenizers(do_lower_case=False)\n for tokenizer in tokenizers:\n with self.subTest(f\"{tokenizer.__class__.__name__}\"):\n tokenizer.padding_side = \"left\"\n sequences = [\n \"Testing batch encode plus\",\n \"Testing batch encode plus with different sequence lengths\",\n \"Testing batch encode plus with different sequence lengths correctly pads\",\n ]\n\n max_length = 100\n\n # check correct behaviour if no pad_token_id exists and add it eventually\n self._check_no_pad_token_padding(tokenizer, sequences)\n\n encoded_sequences = [\n tokenizer.encode_plus(table, sequence, max_length=max_length, padding=\"max_length\")\n for sequence in sequences\n ]\n encoded_sequences_batch = tokenizer.batch_encode_plus(\n table, sequences, max_length=max_length, padding=\"max_length\"\n )\n self.assertListEqual(\n encoded_sequences, self.convert_batch_encode_plus_format_to_encode_plus(encoded_sequences_batch)\n )\n\n def test_padding_to_multiple_of(self):\n tokenizers = self.get_tokenizers()\n for tokenizer in tokenizers:\n with self.subTest(f\"{tokenizer.__class__.__name__}\"):\n table = self.get_table(tokenizer, length=0)\n if tokenizer.pad_token is None:\n self.skipTest(\"No padding token.\")\n else:\n empty_tokens = tokenizer(table, padding=True, pad_to_multiple_of=8)\n normal_tokens = tokenizer(table, \"This is a sample input\", padding=True, pad_to_multiple_of=8)\n for key, value in empty_tokens.items():\n self.assertEqual(len(value) % 8, 0, f\"BatchEncoding.{key} is not multiple of 8\")\n for key, value in normal_tokens.items():\n self.assertEqual(len(value) % 8, 0, f\"BatchEncoding.{key} is not multiple of 8\")\n\n normal_tokens = tokenizer(table, \"This\", pad_to_multiple_of=8)\n for key, value in normal_tokens.items():\n self.assertNotEqual(len(value) % 8, 0, f\"BatchEncoding.{key} is not multiple of 8\")\n\n # Should also work with truncation\n normal_tokens = tokenizer(table, \"This\", padding=True, truncation=True, pad_to_multiple_of=8)\n for key, value in normal_tokens.items():\n self.assertEqual(len(value) % 8, 0, f\"BatchEncoding.{key} is not multiple of 8\")\n\n @unittest.skip(\"TAPAS cannot handle `prepare_for_model` without passing by `encode_plus` or `batch_encode_plus`\")\n def test_prepare_for_model(self):\n pass\n\n def test_tokenizer_slow_store_full_signature(self):\n signature = inspect.signature(self.tokenizer_class.__init__)\n tokenizer = self.get_tokenizer()\n\n for parameter_name, parameter in signature.parameters.items():\n if parameter.default != inspect.Parameter.empty:\n self.assertIn(parameter_name, tokenizer.init_kwargs)\n\n def test_special_tokens_mask_input_pairs(self):\n tokenizers = self.get_tokenizers(do_lower_case=False)\n for tokenizer in tokenizers:\n with self.subTest(f\"{tokenizer.__class__.__name__}\"):\n sequence_0 = \"Encode this.\"\n empty_table = self.get_table(tokenizer, length=0)\n table = self.get_table(tokenizer, length=10)\n encoded_sequence = tokenizer.encode(empty_table, sequence_0, add_special_tokens=False)\n encoded_sequence += tokenizer.encode(table, \"\", add_special_tokens=False)\n encoded_sequence_dict = tokenizer.encode_plus(\n table,\n sequence_0,\n add_special_tokens=True,\n return_special_tokens_mask=True,\n # add_prefix_space=False,\n )\n encoded_sequence_w_special = encoded_sequence_dict[\"input_ids\"]\n special_tokens_mask = encoded_sequence_dict[\"special_tokens_mask\"]\n self.assertEqual(len(special_tokens_mask), len(encoded_sequence_w_special))\n\n filtered_sequence = [\n (x if not special_tokens_mask[i] else None) for i, x in enumerate(encoded_sequence_w_special)\n ]\n filtered_sequence = [x for x in filtered_sequence if x is not None]\n self.assertEqual(encoded_sequence, filtered_sequence)\n\n def test_special_tokens_mask(self):\n tokenizers = self.get_tokenizers(do_lower_case=False)\n for tokenizer in tokenizers:\n with self.subTest(f\"{tokenizer.__class__.__name__}\"):\n table = self.get_table(tokenizer, length=0)\n sequence_0 = \"Encode this.\"\n # Testing single inputs\n encoded_sequence = tokenizer.encode(table, sequence_0, add_special_tokens=False)\n encoded_sequence_dict = tokenizer.encode_plus(\n table, sequence_0, add_special_tokens=True, return_special_tokens_mask=True\n )\n encoded_sequence_w_special = encoded_sequence_dict[\"input_ids\"]\n special_tokens_mask = encoded_sequence_dict[\"special_tokens_mask\"]\n self.assertEqual(len(special_tokens_mask), len(encoded_sequence_w_special))\n\n filtered_sequence = [x for i, x in enumerate(encoded_sequence_w_special) if not special_tokens_mask[i]]\n self.assertEqual(encoded_sequence, filtered_sequence)\n\n def test_save_and_load_tokenizer(self):\n # safety check on max_len default value so we are sure the test works\n tokenizers = self.get_tokenizers()\n for tokenizer in tokenizers:\n with self.subTest(f\"{tokenizer.__class__.__name__}\"):\n self.assertNotEqual(tokenizer.model_max_length, 42)\n\n # Now let's start the test\n tokenizers = self.get_tokenizers()\n for tokenizer in tokenizers:\n with self.subTest(f\"{tokenizer.__class__.__name__}\"):\n # Isolate this from the other tests because we save additional tokens/etc\n table = self.get_table(tokenizer, length=0)\n tmpdirname = tempfile.mkdtemp()\n\n sample_text = \" He is very happy, UNwant\\u00E9d,running\"\n before_tokens = tokenizer.encode(table, sample_text, add_special_tokens=False)\n before_vocab = tokenizer.get_vocab()\n tokenizer.save_pretrained(tmpdirname)\n\n after_tokenizer = tokenizer.__class__.from_pretrained(tmpdirname)\n after_tokens = after_tokenizer.encode(table, sample_text, add_special_tokens=False)\n after_vocab = after_tokenizer.get_vocab()\n self.assertListEqual(before_tokens, after_tokens)\n self.assertDictEqual(before_vocab, after_vocab)\n\n shutil.rmtree(tmpdirname)\n\n @unittest.skip(\"Not implemented\")\n def test_right_and_left_truncation(self):\n pass\n\n def test_right_and_left_padding(self):\n tokenizers = self.get_tokenizers(do_lower_case=False)\n for tokenizer in tokenizers:\n with self.subTest(f\"{tokenizer.__class__.__name__}\"):\n table = self.get_table(tokenizer, length=0)\n sequence = \"Sequence\"\n padding_size = 10\n\n # check correct behaviour if no pad_token_id exists and add it eventually\n self._check_no_pad_token_padding(tokenizer, sequence)\n\n padding_idx = tokenizer.pad_token_id\n\n # RIGHT PADDING - Check that it correctly pads when a maximum length is specified along with the padding flag set to True\n tokenizer.padding_side = \"right\"\n encoded_sequence = tokenizer.encode(table, sequence)\n sequence_length = len(encoded_sequence)\n padded_sequence = tokenizer.encode(\n table, sequence, max_length=sequence_length + padding_size, padding=\"max_length\"\n )\n padded_sequence_length = len(padded_sequence)\n assert sequence_length + padding_size == padded_sequence_length\n assert encoded_sequence + [padding_idx] * padding_size == padded_sequence\n\n # LEFT PADDING - Check that it correctly pads when a maximum length is specified along with the padding flag set to True\n tokenizer.padding_side = \"left\"\n encoded_sequence = tokenizer.encode(table, sequence)\n sequence_length = len(encoded_sequence)\n padded_sequence = tokenizer.encode(\n table, sequence, max_length=sequence_length + padding_size, padding=\"max_length\"\n )\n padded_sequence_length = len(padded_sequence)\n assert sequence_length + padding_size == padded_sequence_length\n assert [padding_idx] * padding_size + encoded_sequence == padded_sequence\n\n # RIGHT & LEFT PADDING - Check that nothing is done for 'longest' and 'no_padding'\n encoded_sequence = tokenizer.encode(table, sequence)\n sequence_length = len(encoded_sequence)\n\n tokenizer.padding_side = \"right\"\n padded_sequence_right = tokenizer.encode(table, sequence, padding=True)\n padded_sequence_right_length = len(padded_sequence_right)\n assert sequence_length == padded_sequence_right_length\n assert encoded_sequence == padded_sequence_right\n\n tokenizer.padding_side = \"left\"\n padded_sequence_left = tokenizer.encode(table, sequence, padding=\"longest\")\n padded_sequence_left_length = len(padded_sequence_left)\n assert sequence_length == padded_sequence_left_length\n assert encoded_sequence == padded_sequence_left\n\n tokenizer.padding_side = \"right\"\n padded_sequence_right = tokenizer.encode(table, sequence)\n padded_sequence_right_length = len(padded_sequence_right)\n assert sequence_length == padded_sequence_right_length\n assert encoded_sequence == padded_sequence_right\n\n tokenizer.padding_side = \"left\"\n padded_sequence_left = tokenizer.encode(table, sequence, padding=False)\n padded_sequence_left_length = len(padded_sequence_left)\n assert sequence_length == padded_sequence_left_length\n assert encoded_sequence == padded_sequence_left\n\n def test_token_type_ids(self):\n tokenizers = self.get_tokenizers()\n for tokenizer in tokenizers:\n with self.subTest(f\"{tokenizer.__class__.__name__}\"):\n empty_table = self.get_table(tokenizer, length=0)\n seq_0 = \"Test this method.\"\n\n # We want to have sequence 0 and sequence 1 are tagged\n # respectively with 0 and 1 token_ids\n # (regardless of whether the model use token type ids)\n # We use this assumption in the QA pipeline among other place\n output = tokenizer(empty_table, seq_0, return_token_type_ids=True)\n\n # Assert that the token type IDs have the same length as the input IDs\n self.assertEqual(len(output[\"token_type_ids\"]), len(output[\"input_ids\"]))\n\n # Assert that each token type ID has 7 values\n self.assertTrue(all(len(token_type_ids) == 7 for token_type_ids in output[\"token_type_ids\"]))\n\n # Do the same test as modeling common.\n self.assertIn(0, output[\"token_type_ids\"][0])\n\n @require_torch\n @slow\n @require_scatter\n def test_torch_encode_plus_sent_to_model(self):\n import torch\n\n from transformers import MODEL_MAPPING, TOKENIZER_MAPPING\n\n MODEL_TOKENIZER_MAPPING = merge_model_tokenizer_mappings(MODEL_MAPPING, TOKENIZER_MAPPING)\n\n tokenizers = self.get_tokenizers(do_lower_case=False)\n for tokenizer in tokenizers:\n with self.subTest(f\"{tokenizer.__class__.__name__}\"):\n\n if tokenizer.__class__ not in MODEL_TOKENIZER_MAPPING:\n return\n\n config_class, model_class = MODEL_TOKENIZER_MAPPING[tokenizer.__class__]\n config = config_class()\n\n if config.is_encoder_decoder or config.pad_token_id is None:\n return\n\n model = model_class(config)\n\n # Make sure the model contains at least the full vocabulary size in its embedding matrix\n is_using_common_embeddings = hasattr(model.get_input_embeddings(), \"weight\")\n assert (\n (model.get_input_embeddings().weight.shape[0] >= len(tokenizer))\n if is_using_common_embeddings\n else True\n )\n\n # Build sequence\n first_ten_tokens = list(tokenizer.get_vocab().keys())[:10]\n sequence = \" \".join(first_ten_tokens)\n table = self.get_table(tokenizer, length=0)\n encoded_sequence = tokenizer.encode_plus(table, sequence, return_tensors=\"pt\")\n batch_encoded_sequence = tokenizer.batch_encode_plus(table, [sequence, sequence], return_tensors=\"pt\")\n # This should not fail\n\n with torch.no_grad(): # saves some time\n model(**encoded_sequence)\n model(**batch_encoded_sequence)\n\n @unittest.skip(\"TAPAS doesn't handle pre-tokenized inputs.\")\n def test_pretokenized_inputs(self):\n pass\n\n @slow\n def test_tapas_truncation_integration_test(self):\n data = {\n \"Actors\": [\"Brad Pitt\", \"Leonardo Di Caprio\", \"George Clooney\"],\n \"Age\": [\"56\", \"45\", \"59\"],\n \"Number of movies\": [\"87\", \"53\", \"69\"],\n \"Date of birth\": [\"18 december 1963\", \"11 november 1974\", \"6 may 1961\"],\n }\n queries = [\n \"When was Brad Pitt born?\",\n \"Which actor appeared in the least number of movies?\",\n \"What is the average number of movies?\",\n ]\n table = pd.DataFrame.from_dict(data)\n\n tokenizer = TapasTokenizer.from_pretrained(\"lysandre/tapas-temporary-repo\", model_max_length=512)\n\n for i in range(12):\n # The table cannot even encode the headers, so raise an error\n with self.assertRaises(ValueError):\n tokenizer.encode(table=table, query=queries[0], max_length=i, truncation=\"drop_rows_to_fit\")\n\n for i in range(12, 512):\n new_encoded_inputs = tokenizer.encode(\n table=table, query=queries[0], max_length=i, truncation=\"drop_rows_to_fit\"\n )\n\n # Ensure that the input IDs are less than the max length defined.\n self.assertLessEqual(len(new_encoded_inputs), i)\n\n tokenizer.model_max_length = 20\n new_encoded_inputs = tokenizer.encode(table=table, query=queries[0], truncation=True)\n dropped_encoded_inputs = tokenizer.encode(table=table, query=queries[0], truncation=\"drop_rows_to_fit\")\n\n # Ensure that the input IDs are still truncated when no max_length is specified\n self.assertListEqual(new_encoded_inputs, dropped_encoded_inputs)\n self.assertLessEqual(len(new_encoded_inputs), 20)\n\n @slow\n def test_min_max_question_length(self):\n data = {\n \"Actors\": [\"Brad Pitt\", \"Leonardo Di Caprio\", \"George Clooney\"],\n \"Age\": [\"56\", \"45\", \"59\"],\n \"Number of movies\": [\"87\", \"53\", \"69\"],\n \"Date of birth\": [\"18 december 1963\", \"11 november 1974\", \"6 may 1961\"],\n }\n queries = \"When was Brad Pitt born?\"\n table = pd.DataFrame.from_dict(data)\n\n # test max_question_length\n tokenizer = TapasTokenizer.from_pretrained(\"lysandre/tapas-temporary-repo\", max_question_length=2)\n\n encoding = tokenizer(table=table, queries=queries)\n\n # query should not be tokenized as it's longer than the specified max_question_length\n expected_results = [101, 102]\n\n self.assertListEqual(encoding.input_ids[:2], expected_results)\n\n # test min_question_length\n tokenizer = TapasTokenizer.from_pretrained(\"lysandre/tapas-temporary-repo\", min_question_length=30)\n\n encoding = tokenizer(table=table, queries=queries)\n\n # query should not be tokenized as it's shorter than the specified min_question_length\n expected_results = [101, 102]\n\n self.assertListEqual(encoding.input_ids[:2], expected_results)\n\n @is_pt_tf_cross_test\n def test_batch_encode_plus_tensors(self):\n tokenizers = self.get_tokenizers(do_lower_case=False)\n for tokenizer in tokenizers:\n with self.subTest(f\"{tokenizer.__class__.__name__}\"):\n sequences = [\n \"Testing batch encode plus\",\n \"Testing batch encode plus with different sequence lengths\",\n \"Testing batch encode plus with different sequence lengths correctly pads\",\n ]\n\n table = self.get_table(tokenizer, length=0)\n\n # A Tensor cannot be build by sequences which are not the same size\n self.assertRaises(ValueError, tokenizer.batch_encode_plus, table, sequences, return_tensors=\"pt\")\n self.assertRaises(ValueError, tokenizer.batch_encode_plus, table, sequences, return_tensors=\"tf\")\n\n if tokenizer.pad_token_id is None:\n self.assertRaises(\n ValueError,\n tokenizer.batch_encode_plus,\n table,\n sequences,\n padding=True,\n return_tensors=\"pt\",\n )\n self.assertRaises(\n ValueError,\n tokenizer.batch_encode_plus,\n table,\n sequences,\n padding=\"longest\",\n return_tensors=\"tf\",\n )\n else:\n pytorch_tensor = tokenizer.batch_encode_plus(table, sequences, padding=True, return_tensors=\"pt\")\n tensorflow_tensor = tokenizer.batch_encode_plus(\n table, sequences, padding=\"longest\", return_tensors=\"tf\"\n )\n encoded_sequences = tokenizer.batch_encode_plus(table, sequences, padding=True)\n\n for key in encoded_sequences.keys():\n pytorch_value = pytorch_tensor[key].tolist()\n tensorflow_value = tensorflow_tensor[key].numpy().tolist()\n encoded_value = encoded_sequences[key]\n\n self.assertEqual(pytorch_value, tensorflow_value, encoded_value)\n\n @slow\n def test_tapas_integration_test(self):\n data = {\n \"Actors\": [\"Brad Pitt\", \"Leonardo Di Caprio\", \"George Clooney\"],\n \"Age\": [\"56\", \"45\", \"59\"],\n \"Number of movies\": [\"87\", \"53\", \"69\"],\n \"Date of birth\": [\"18 december 1963\", \"11 november 1974\", \"6 may 1961\"],\n }\n queries = [\n \"When was Brad Pitt born?\",\n \"Which actor appeared in the least number of movies?\",\n \"What is the average number of movies?\",\n ]\n table = pd.DataFrame.from_dict(data)\n\n tokenizer = TapasTokenizer.from_pretrained(\"google/tapas-base-finetuned-wtq\", model_max_length=512)\n\n # fmt: off\n expected_results = {'input_ids':[101,2043,2001,8226,15091,2141,1029,102,5889,2287,2193,1997,5691,3058,1997,4182,8226,15091,5179,6584,2324,2285,3699,14720,4487,6178,9488,3429,5187,2340,2281,3326,2577,18856,7828,3240,5354,6353,1020,2089,3777],'attention_mask':[1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1],'token_type_ids':[[0,0,0,0,0,0,0],[0,0,0,0,0,0,0],[0,0,0,0,0,0,0],[0,0,0,0,0,0,0],[0,0,0,0,0,0,0],[0,0,0,0,0,0,0],[0,0,0,0,0,0,0],[0,0,0,0,0,0,0],[1,1,0,0,0,0,0],[1,2,0,0,0,0,0],[1,3,0,0,0,0,0],[1,3,0,0,0,0,0],[1,3,0,0,0,0,0],[1,4,0,0,0,0,0],[1,4,0,0,0,0,0],[1,4,0,0,0,0,0],[1,1,1,0,0,0,0],[1,1,1,0,0,0,0],[1,2,1,0,2,2,0],[1,3,1,0,3,1,0],[1,4,1,0,2,2,0],[1,4,1,0,2,2,0],[1,4,1,0,2,2,0],[1,1,2,0,0,0,0],[1,1,2,0,0,0,0],[1,1,2,0,0,0,0],[1,1,2,0,0,0,0],[1,2,2,0,1,3,0],[1,3,2,0,1,3,0],[1,4,2,0,3,1,0],[1,4,2,0,3,1,0],[1,4,2,0,3,1,0],[1,1,3,0,0,0,0],[1,1,3,0,0,0,0],[1,1,3,0,0,0,0],[1,1,3,0,0,0,0],[1,2,3,0,3,1,0],[1,3,3,0,2,2,0],[1,4,3,0,1,3,0],[1,4,3,0,1,3,0],[1,4,3,0,1,3,0]]} # noqa: E231\n # fmt: on\n\n new_encoded_inputs = tokenizer.encode_plus(table=table, query=queries[0])\n\n self.assertDictEqual(dict(new_encoded_inputs), expected_results)\n\n @slow\n def test_full_tokenizer(self):\n data = [\n [\"Pos\", \"No\", \"Driver\", \"Team\", \"Laps\", \"Time/Retired\", \"Grid\", \"Points\"],\n [\"1\", \"32\", \"Patrick Carpentier\", \"Team Player's\", \"87\", \"1:48:11.023\", \"1\", \"22\"],\n [\"2\", \"1\", \"Bruno Junqueira\", \"Newman/Haas Racing\", \"87\", \"+0.8 secs\", \"2\", \"17\"],\n [\"3\", \"3\", \"Paul Tracy\", \"Team Player's\", \"87\", \"+28.6 secs\", \"3\", \"14\"],\n [\"4\", \"9\", \"Michel Jourdain, Jr.\", \"Team Rahal\", \"87\", \"+40.8 secs\", \"13\", \"12\"],\n [\"5\", \"34\", \"Mario Haberfeld\", \"Mi-Jack Conquest Racing\", \"87\", \"+42.1 secs\", \"6\", \"10\"],\n [\"6\", \"20\", \"Oriol Servia\", \"Patrick Racing\", \"87\", \"+1:00.2\", \"10\", \"8\"],\n [\"7\", \"51\", \"Adrian Fernandez\", \"Fernandez Racing\", \"87\", \"+1:01.4\", \"5\", \"6\"],\n [\"8\", \"12\", \"Jimmy Vasser\", \"American Spirit Team Johansson\", \"87\", \"+1:01.8\", \"8\", \"5\"],\n [\"9\", \"7\", \"Tiago Monteiro\", \"Fittipaldi-Dingman Racing\", \"86\", \"+ 1 Lap\", \"15\", \"4\"],\n [\"10\", \"55\", \"Mario Dominguez\", \"Herdez Competition\", \"86\", \"+ 1 Lap\", \"11\", \"3\"],\n [\"11\", \"27\", \"Bryan Herta\", \"PK Racing\", \"86\", \"+ 1 Lap\", \"12\", \"2\"],\n [\"12\", \"31\", \"Ryan Hunter-Reay\", \"American Spirit Team Johansson\", \"86\", \"+ 1 Lap\", \"17\", \"1\"],\n [\"13\", \"19\", \"Joel Camathias\", \"Dale Coyne Racing\", \"85\", \"+ 2 Laps\", \"18\", \"0\"],\n [\"14\", \"33\", \"Alex Tagliani\", \"Rocketsports Racing\", \"85\", \"+ 2 Laps\", \"14\", \"0\"],\n [\"15\", \"4\", \"Roberto Moreno\", \"Herdez Competition\", \"85\", \"+ 2 Laps\", \"9\", \"0\"],\n [\"16\", \"11\", \"Geoff Boss\", \"Dale Coyne Racing\", \"83\", \"Mechanical\", \"19\", \"0\"],\n [\"17\", \"2\", \"Sebastien Bourdais\", \"Newman/Haas Racing\", \"77\", \"Mechanical\", \"4\", \"0\"],\n [\"18\", \"15\", \"Darren Manning\", \"Walker Racing\", \"12\", \"Mechanical\", \"7\", \"0\"],\n [\"19\", \"5\", \"Rodolfo Lavin\", \"Walker Racing\", \"10\", \"Mechanical\", \"16\", \"0\"],\n ]\n query = \"what were the drivers names?\"\n table = pd.DataFrame.from_records(data[1:], columns=data[0])\n\n tokenizer = TapasTokenizer.from_pretrained(\"google/tapas-base-finetuned-wtq\", model_max_length=512)\n model_inputs = tokenizer(table, query, padding=\"max_length\")\n\n input_ids = model_inputs[\"input_ids\"]\n token_type_ids = np.array(model_inputs[\"token_type_ids\"])\n segment_ids = token_type_ids[:, 0]\n column_ids = token_type_ids[:, 1]\n row_ids = token_type_ids[:, 2]\n\n # fmt: off\n expected_results = {'input_ids':[101,2054,2020,1996,6853,3415,1029,102,13433,2015,2053,4062,2136,10876,2051,1013,3394,8370,2685,1015,3590,4754,29267,4765,3771,2136,2447,1005,1055,6584,1015,1024,4466,1024,2340,1012,6185,2509,1015,2570,1016,1015,10391,12022,4226,7895,10625,1013,22996,3868,6584,1009,1014,1012,1022,10819,2015,1016,2459,1017,1017,2703,10555,2136,2447,1005,1055,6584,1009,2654,1012,1020,10819,2015,1017,2403,1018,1023,8709,8183,3126,21351,2078,1010,3781,1012,2136,10958,8865,6584,1009,2871,1012,1022,10819,2015,2410,2260,1019,4090,7986,5292,5677,8151,2771,1011,2990,9187,3868,6584,1009,4413,1012,1015,10819,2015,1020,2184,1020,2322,2030,20282,14262,9035,4754,3868,6584,1009,1015,1024,4002,1012,1016,2184,1022,1021,4868,7918,12023,12023,3868,6584,1009,1015,1024,5890,1012,1018,1019,1020,1022,2260,5261,12436,18116,2137,4382,2136,26447,6584,1009,1015,1024,5890,1012,1022,1022,1019,1023,1021,27339,3995,10125,9711,4906,25101,24657,1011,22033,2386,3868,6564,1009,1015,5001,2321,1018,2184,4583,7986,14383,2075,29488,14906,9351,2971,6564,1009,1015,5001,2340,1017,2340,2676,8527,2014,2696,1052,2243,3868,6564,1009,1015,5001,2260,1016,2260,2861,4575,4477,1011,2128,4710,2137,4382,2136,26447,6564,1009,1015,5001,2459,1015,2410,2539,8963,11503,25457,3022,8512,2522,9654,3868,5594,1009,1016,10876,2324,1014,2403,3943,4074,6415,15204,2072,12496,25378,3868,5594,1009,1016,10876,2403,1014,2321,1018,10704,17921,14906,9351,2971,5594,1009,1016,10876,1023,1014,2385,2340,14915,5795,8512,2522,9654,3868,6640,6228,2539,1014,2459,1016,28328,8945,3126,21351,2015,10625,1013,22996,3868,6255,6228,1018,1014,2324,2321,12270,11956,5232,3868,2260,6228,1021,1014,2539,1019,8473,28027,2080,2474,6371,5232,3868,2184,6228,2385,1014,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],'column_ids':[0,0,0,0,0,0,0,0,1,1,2,3,4,5,6,6,6,7,8,1,2,3,3,3,3,4,4,4,4,5,6,6,6,6,6,6,6,6,7,8,1,2,3,3,3,3,4,4,4,4,5,6,6,6,6,6,6,7,8,1,2,3,3,4,4,4,4,5,6,6,6,6,6,6,7,8,1,2,3,3,3,3,3,3,3,3,4,4,4,5,6,6,6,6,6,6,7,8,1,2,3,3,3,3,4,4,4,4,4,5,6,6,6,6,6,6,7,8,1,2,3,3,3,3,4,4,5,6,6,6,6,6,6,7,8,1,2,3,3,4,4,5,6,6,6,6,6,6,7,8,1,2,3,3,3,4,4,4,4,5,6,6,6,6,6,6,7,8,1,2,3,3,3,3,4,4,4,4,4,4,4,5,6,6,6,7,8,1,2,3,3,3,3,4,4,4,5,6,6,6,7,8,1,2,3,3,3,4,4,4,5,6,6,6,7,8,1,2,3,3,3,3,3,4,4,4,4,5,6,6,6,7,8,1,2,3,3,3,3,4,4,4,4,5,6,6,6,7,8,1,2,3,3,3,3,4,4,4,5,6,6,6,7,8,1,2,3,3,4,4,4,5,6,6,6,7,8,1,2,3,3,4,4,4,4,5,6,7,8,1,2,3,3,3,3,3,4,4,4,4,5,6,7,8,1,2,3,3,4,4,5,6,7,8,1,2,3,3,3,3,3,4,4,5,6,7,8,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],'row_ids':[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,11,11,11,11,11,11,11,11,11,11,11,11,11,11,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,15,15,15,15,15,15,15,15,15,15,15,15,15,16,16,16,16,16,16,16,16,16,16,16,16,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,18,18,18,18,18,18,18,18,18,18,19,19,19,19,19,19,19,19,19,19,19,19,19,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],'segment_ids':[0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]} # noqa: E231\n # fmt: on\n\n self.assertListEqual(input_ids, expected_results[\"input_ids\"])\n self.assertListEqual(segment_ids.tolist(), expected_results[\"segment_ids\"])\n self.assertListEqual(column_ids.tolist(), expected_results[\"column_ids\"])\n self.assertListEqual(row_ids.tolist(), expected_results[\"row_ids\"])\n\n @unittest.skip(\"Skip this test while all models are still to be uploaded.\")\n def test_pretrained_model_lists(self):\n pass\n\n @unittest.skip(\"Doesn't support another framework than PyTorch\")\n def test_np_encode_plus_sent_to_model(self):\n pass\n",
"# coding=utf-8\n# Copyright 2021 The HuggingFace Inc. team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\" Testing suite for the PyTorch BigBird model. \"\"\"\n\n\nimport unittest\n\nfrom transformers import BigBirdConfig, is_torch_available\nfrom transformers.models.auto import get_values\nfrom transformers.models.big_bird.tokenization_big_bird import BigBirdTokenizer\nfrom transformers.testing_utils import require_torch, slow, torch_device\n\nfrom ...test_configuration_common import ConfigTester\nfrom ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask\n\n\nif is_torch_available():\n import torch\n\n from transformers import (\n MODEL_FOR_PRETRAINING_MAPPING,\n BigBirdForCausalLM,\n BigBirdForMaskedLM,\n BigBirdForMultipleChoice,\n BigBirdForPreTraining,\n BigBirdForQuestionAnswering,\n BigBirdForSequenceClassification,\n BigBirdForTokenClassification,\n BigBirdModel,\n )\n from transformers.models.big_bird.modeling_big_bird import BIG_BIRD_PRETRAINED_MODEL_ARCHIVE_LIST\n\n\nclass BigBirdModelTester:\n def __init__(\n self,\n parent,\n batch_size=7,\n seq_length=128,\n is_training=True,\n use_input_mask=True,\n use_token_type_ids=True,\n use_labels=True,\n vocab_size=99,\n hidden_size=32,\n num_hidden_layers=2,\n num_attention_heads=4,\n intermediate_size=37,\n hidden_act=\"gelu_new\",\n hidden_dropout_prob=0.1,\n attention_probs_dropout_prob=0.1,\n max_position_embeddings=256,\n type_vocab_size=16,\n type_sequence_label_size=2,\n initializer_range=0.02,\n num_labels=3,\n num_choices=4,\n attention_type=\"block_sparse\",\n use_bias=True,\n rescale_embeddings=False,\n block_size=8,\n num_rand_blocks=3,\n position_embedding_type=\"absolute\",\n scope=None,\n ):\n self.parent = parent\n self.batch_size = batch_size\n self.seq_length = seq_length\n self.is_training = is_training\n self.use_input_mask = use_input_mask\n self.use_token_type_ids = use_token_type_ids\n self.use_labels = use_labels\n self.vocab_size = vocab_size\n self.hidden_size = hidden_size\n self.num_hidden_layers = num_hidden_layers\n self.num_attention_heads = num_attention_heads\n self.intermediate_size = intermediate_size\n self.hidden_act = hidden_act\n self.hidden_dropout_prob = hidden_dropout_prob\n self.attention_probs_dropout_prob = attention_probs_dropout_prob\n self.max_position_embeddings = max_position_embeddings\n self.type_vocab_size = type_vocab_size\n self.type_sequence_label_size = type_sequence_label_size\n self.initializer_range = initializer_range\n self.num_labels = num_labels\n self.num_choices = num_choices\n self.scope = scope\n\n self.attention_type = attention_type\n self.use_bias = use_bias\n self.rescale_embeddings = rescale_embeddings\n self.block_size = block_size\n self.num_rand_blocks = num_rand_blocks\n self.position_embedding_type = position_embedding_type\n\n def prepare_config_and_inputs(self):\n input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)\n\n input_mask = None\n if self.use_input_mask:\n input_mask = random_attention_mask([self.batch_size, self.seq_length])\n\n token_type_ids = None\n if self.use_token_type_ids:\n token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size)\n\n sequence_labels = None\n token_labels = None\n choice_labels = None\n if self.use_labels:\n sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size)\n token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels)\n choice_labels = ids_tensor([self.batch_size], self.num_choices)\n\n config = self.get_config()\n\n return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels\n\n def get_config(self):\n return BigBirdConfig(\n vocab_size=self.vocab_size,\n hidden_size=self.hidden_size,\n num_hidden_layers=self.num_hidden_layers,\n num_attention_heads=self.num_attention_heads,\n intermediate_size=self.intermediate_size,\n hidden_act=self.hidden_act,\n hidden_dropout_prob=self.hidden_dropout_prob,\n attention_probs_dropout_prob=self.attention_probs_dropout_prob,\n max_position_embeddings=self.max_position_embeddings,\n type_vocab_size=self.type_vocab_size,\n is_encoder_decoder=False,\n initializer_range=self.initializer_range,\n attention_type=self.attention_type,\n use_bias=self.use_bias,\n rescale_embeddings=self.rescale_embeddings,\n block_size=self.block_size,\n num_random_blocks=self.num_rand_blocks,\n position_embedding_type=self.position_embedding_type,\n )\n\n def prepare_config_and_inputs_for_decoder(self):\n (\n config,\n input_ids,\n token_type_ids,\n input_mask,\n sequence_labels,\n token_labels,\n choice_labels,\n ) = self.prepare_config_and_inputs()\n\n config.is_decoder = True\n encoder_hidden_states = floats_tensor([self.batch_size, self.seq_length, self.hidden_size])\n encoder_attention_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2)\n\n return (\n config,\n input_ids,\n token_type_ids,\n input_mask,\n sequence_labels,\n token_labels,\n choice_labels,\n encoder_hidden_states,\n encoder_attention_mask,\n )\n\n def create_and_check_model(\n self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels\n ):\n model = BigBirdModel(config=config)\n model.to(torch_device)\n model.eval()\n result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids)\n result = model(input_ids, token_type_ids=token_type_ids)\n result = model(input_ids)\n self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))\n\n def create_and_check_for_pretraining(\n self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels\n ):\n model = BigBirdForPreTraining(config=config)\n model.to(torch_device)\n model.eval()\n result = model(\n input_ids,\n attention_mask=input_mask,\n token_type_ids=token_type_ids,\n labels=token_labels,\n next_sentence_label=sequence_labels,\n )\n self.parent.assertEqual(result.prediction_logits.shape, (self.batch_size, self.seq_length, self.vocab_size))\n self.parent.assertEqual(result.seq_relationship_logits.shape, (self.batch_size, config.num_labels))\n\n def create_and_check_model_as_decoder(\n self,\n config,\n input_ids,\n token_type_ids,\n input_mask,\n sequence_labels,\n token_labels,\n choice_labels,\n encoder_hidden_states,\n encoder_attention_mask,\n ):\n config.add_cross_attention = True\n model = BigBirdModel(config)\n model.to(torch_device)\n model.eval()\n result = model(\n input_ids,\n attention_mask=input_mask,\n token_type_ids=token_type_ids,\n encoder_hidden_states=encoder_hidden_states,\n encoder_attention_mask=encoder_attention_mask,\n )\n result = model(\n input_ids,\n attention_mask=input_mask,\n token_type_ids=token_type_ids,\n encoder_hidden_states=encoder_hidden_states,\n )\n result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids)\n self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))\n\n def create_and_check_for_causal_lm(\n self,\n config,\n input_ids,\n token_type_ids,\n input_mask,\n sequence_labels,\n token_labels,\n choice_labels,\n encoder_hidden_states,\n encoder_attention_mask,\n ):\n model = BigBirdForCausalLM(config=config)\n model.to(torch_device)\n model.eval()\n result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels)\n self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size))\n\n def create_and_check_for_masked_lm(\n self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels\n ):\n model = BigBirdForMaskedLM(config=config)\n model.to(torch_device)\n model.eval()\n result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels)\n self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size))\n\n def create_and_check_decoder_model_past_large_inputs(\n self,\n config,\n input_ids,\n token_type_ids,\n input_mask,\n sequence_labels,\n token_labels,\n choice_labels,\n encoder_hidden_states,\n encoder_attention_mask,\n ):\n config.is_decoder = True\n config.add_cross_attention = True\n model = BigBirdForCausalLM(config=config)\n model.to(torch_device)\n model.eval()\n\n # first forward pass\n outputs = model(\n input_ids,\n attention_mask=input_mask,\n encoder_hidden_states=encoder_hidden_states,\n encoder_attention_mask=encoder_attention_mask,\n use_cache=True,\n )\n past_key_values = outputs.past_key_values\n\n # create hypothetical multiple next token and extent to next_input_ids\n next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size)\n next_mask = ids_tensor((self.batch_size, 3), vocab_size=2)\n\n # append to next input_ids and\n next_input_ids = torch.cat([input_ids, next_tokens], dim=-1)\n next_attention_mask = torch.cat([input_mask, next_mask], dim=-1)\n\n output_from_no_past = model(\n next_input_ids,\n attention_mask=next_attention_mask,\n encoder_hidden_states=encoder_hidden_states,\n encoder_attention_mask=encoder_attention_mask,\n output_hidden_states=True,\n )[\"hidden_states\"][0]\n output_from_past = model(\n next_tokens,\n attention_mask=next_attention_mask,\n encoder_hidden_states=encoder_hidden_states,\n encoder_attention_mask=encoder_attention_mask,\n past_key_values=past_key_values,\n output_hidden_states=True,\n )[\"hidden_states\"][0]\n\n # select random slice\n random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item()\n output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx].detach()\n output_from_past_slice = output_from_past[:, :, random_slice_idx].detach()\n\n self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1])\n\n # test that outputs are equal for slice\n self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3))\n\n def create_and_check_for_question_answering(\n self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels\n ):\n model = BigBirdForQuestionAnswering(config=config)\n model.to(torch_device)\n model.eval()\n result = model(\n input_ids,\n attention_mask=input_mask,\n token_type_ids=token_type_ids,\n start_positions=sequence_labels,\n end_positions=sequence_labels,\n )\n self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length))\n self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length))\n\n def create_and_check_for_sequence_classification(\n self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels\n ):\n config.num_labels = self.num_labels\n model = BigBirdForSequenceClassification(config)\n model.to(torch_device)\n model.eval()\n result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=sequence_labels)\n self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels))\n\n def create_and_check_for_token_classification(\n self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels\n ):\n config.num_labels = self.num_labels\n model = BigBirdForTokenClassification(config=config)\n model.to(torch_device)\n model.eval()\n result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels)\n self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels))\n\n def create_and_check_for_multiple_choice(\n self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels\n ):\n config.num_choices = self.num_choices\n model = BigBirdForMultipleChoice(config=config)\n model.to(torch_device)\n model.eval()\n multiple_choice_inputs_ids = input_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous()\n multiple_choice_token_type_ids = token_type_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous()\n multiple_choice_input_mask = input_mask.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous()\n result = model(\n multiple_choice_inputs_ids,\n attention_mask=multiple_choice_input_mask,\n token_type_ids=multiple_choice_token_type_ids,\n labels=choice_labels,\n )\n self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices))\n\n def prepare_config_and_inputs_for_common(self):\n config_and_inputs = self.prepare_config_and_inputs()\n (\n config,\n input_ids,\n token_type_ids,\n input_mask,\n sequence_labels,\n token_labels,\n choice_labels,\n ) = config_and_inputs\n inputs_dict = {\"input_ids\": input_ids, \"token_type_ids\": token_type_ids, \"attention_mask\": input_mask}\n return config, inputs_dict\n\n def create_and_check_for_auto_padding(\n self,\n config,\n input_ids,\n token_type_ids,\n input_mask,\n sequence_labels,\n token_labels,\n choice_labels,\n ):\n model = BigBirdModel(config)\n model.to(torch_device)\n model.eval()\n result = model(input_ids)\n self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))\n\n def create_and_check_for_change_to_full_attn(\n self,\n config,\n input_ids,\n token_type_ids,\n input_mask,\n sequence_labels,\n token_labels,\n choice_labels,\n ):\n model = BigBirdModel(config)\n model.to(torch_device)\n model.eval()\n result = model(input_ids)\n self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))\n # the config should not be changed\n self.parent.assertTrue(model.config.attention_type == \"block_sparse\")\n\n\n@require_torch\nclass BigBirdModelTest(ModelTesterMixin, unittest.TestCase):\n\n # head masking & pruning is currently not supported for big bird\n test_head_masking = False\n test_pruning = False\n\n # torchscript should be possible, but takes prohibitively long to test.\n # Also torchscript is not an important feature to have in the beginning.\n test_torchscript = False\n\n all_model_classes = (\n (\n BigBirdModel,\n BigBirdForPreTraining,\n BigBirdForMaskedLM,\n BigBirdForCausalLM,\n BigBirdForMultipleChoice,\n BigBirdForQuestionAnswering,\n BigBirdForSequenceClassification,\n BigBirdForTokenClassification,\n )\n if is_torch_available()\n else ()\n )\n all_generative_model_classes = (BigBirdForCausalLM,) if is_torch_available() else ()\n\n # special case for ForPreTraining model\n def _prepare_for_class(self, inputs_dict, model_class, return_labels=False):\n inputs_dict = super()._prepare_for_class(inputs_dict, model_class, return_labels=return_labels)\n\n if return_labels:\n if model_class in get_values(MODEL_FOR_PRETRAINING_MAPPING):\n inputs_dict[\"labels\"] = torch.zeros(\n (self.model_tester.batch_size, self.model_tester.seq_length), dtype=torch.long, device=torch_device\n )\n inputs_dict[\"next_sentence_label\"] = torch.zeros(\n self.model_tester.batch_size, dtype=torch.long, device=torch_device\n )\n return inputs_dict\n\n def setUp(self):\n self.model_tester = BigBirdModelTester(self)\n self.config_tester = ConfigTester(self, config_class=BigBirdConfig, hidden_size=37)\n\n def test_config(self):\n self.config_tester.run_common_tests()\n\n def test_model(self):\n config_and_inputs = self.model_tester.prepare_config_and_inputs()\n self.model_tester.create_and_check_model(*config_and_inputs)\n\n def test_for_pretraining(self):\n config_and_inputs = self.model_tester.prepare_config_and_inputs()\n self.model_tester.create_and_check_for_pretraining(*config_and_inputs)\n\n def test_for_masked_lm(self):\n config_and_inputs = self.model_tester.prepare_config_and_inputs()\n self.model_tester.create_and_check_for_masked_lm(*config_and_inputs)\n\n def test_for_multiple_choice(self):\n config_and_inputs = self.model_tester.prepare_config_and_inputs()\n self.model_tester.create_and_check_for_multiple_choice(*config_and_inputs)\n\n def test_decoder_model_past_with_large_inputs(self):\n config_and_inputs = self.model_tester.prepare_config_and_inputs_for_decoder()\n self.model_tester.create_and_check_decoder_model_past_large_inputs(*config_and_inputs)\n\n def test_for_question_answering(self):\n config_and_inputs = self.model_tester.prepare_config_and_inputs()\n self.model_tester.create_and_check_for_question_answering(*config_and_inputs)\n\n def test_for_sequence_classification(self):\n config_and_inputs = self.model_tester.prepare_config_and_inputs()\n self.model_tester.create_and_check_for_sequence_classification(*config_and_inputs)\n\n def test_for_token_classification(self):\n config_and_inputs = self.model_tester.prepare_config_and_inputs()\n self.model_tester.create_and_check_for_token_classification(*config_and_inputs)\n\n def test_model_as_decoder(self):\n config_and_inputs = self.model_tester.prepare_config_and_inputs_for_decoder()\n self.model_tester.create_and_check_model_as_decoder(*config_and_inputs)\n\n def test_model_as_decoder_with_default_input_mask(self):\n # This regression test was failing with PyTorch < 1.3\n (\n config,\n input_ids,\n token_type_ids,\n input_mask,\n sequence_labels,\n token_labels,\n choice_labels,\n encoder_hidden_states,\n encoder_attention_mask,\n ) = self.model_tester.prepare_config_and_inputs_for_decoder()\n\n input_mask = None\n\n self.model_tester.create_and_check_model_as_decoder(\n config,\n input_ids,\n token_type_ids,\n input_mask,\n sequence_labels,\n token_labels,\n choice_labels,\n encoder_hidden_states,\n encoder_attention_mask,\n )\n\n def test_retain_grad_hidden_states_attentions(self):\n # bigbird cannot keep gradients in attentions when `attention_type=block_sparse`\n\n if self.model_tester.attention_type == \"original_full\":\n super().test_retain_grad_hidden_states_attentions()\n\n @slow\n def test_model_from_pretrained(self):\n for model_name in BIG_BIRD_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:\n model = BigBirdForPreTraining.from_pretrained(model_name)\n self.assertIsNotNone(model)\n\n def test_model_various_attn_type(self):\n config_and_inputs = self.model_tester.prepare_config_and_inputs()\n for type in [\"original_full\", \"block_sparse\"]:\n config_and_inputs[0].attention_type = type\n self.model_tester.create_and_check_model(*config_and_inputs)\n\n def test_fast_integration(self):\n # fmt: off\n input_ids = torch.tensor(\n [[6, 117, 33, 36, 70, 22, 63, 31, 71, 72, 88, 58, 109, 49, 48, 116, 92, 6, 19, 95, 118, 100, 80, 111, 93, 2, 31, 84, 26, 5, 6, 82, 46, 96, 109, 4, 39, 19, 109, 13, 92, 31, 36, 90, 111, 18, 75, 6, 56, 74, 16, 42, 56, 92, 69, 108, 127, 81, 82, 41, 106, 19, 44, 24, 82, 121, 120, 65, 36, 26, 72, 13, 36, 98, 43, 64, 8, 53, 100, 92, 51, 122, 66, 17, 61, 50, 104, 127, 26, 35, 94, 23, 110, 71, 80, 67, 109, 111, 44, 19, 51, 41, 86, 71, 76, 44, 18, 68, 44, 77, 107, 81, 98, 126, 100, 2, 49, 98, 84, 39, 23, 98, 52, 46, 10, 82, 121, 73],[6, 117, 33, 36, 70, 22, 63, 31, 71, 72, 88, 58, 109, 49, 48, 116, 92, 6, 19, 95, 118, 100, 80, 111, 93, 2, 31, 84, 26, 5, 6, 82, 46, 96, 109, 4, 39, 19, 109, 13, 92, 31, 36, 90, 111, 18, 75, 6, 56, 74, 16, 42, 56, 92, 69, 108, 127, 81, 82, 41, 106, 19, 44, 24, 82, 121, 120, 65, 36, 26, 72, 13, 36, 98, 43, 64, 8, 53, 100, 92, 51, 12, 66, 17, 61, 50, 104, 127, 26, 35, 94, 23, 110, 71, 80, 67, 109, 111, 44, 19, 51, 41, 86, 71, 76, 28, 18, 68, 44, 77, 107, 81, 98, 126, 100, 2, 49, 18, 84, 39, 23, 98, 52, 46, 10, 82, 121, 73]], # noqa: E231\n dtype=torch.long,\n device=torch_device,\n )\n # fmt: on\n input_ids = input_ids % self.model_tester.vocab_size\n input_ids[1] = input_ids[1] - 1\n\n attention_mask = torch.ones((input_ids.shape), device=torch_device)\n attention_mask[:, :-10] = 0\n\n config, _, _, _, _, _, _ = self.model_tester.prepare_config_and_inputs()\n torch.manual_seed(0)\n model = BigBirdModel(config).eval().to(torch_device)\n\n with torch.no_grad():\n hidden_states = model(input_ids, attention_mask=attention_mask).last_hidden_state\n self.assertTrue(\n torch.allclose(\n hidden_states[0, 0, :5],\n torch.tensor([1.4825, 0.0774, 0.8226, -0.2962, -0.9593], device=torch_device),\n atol=1e-3,\n )\n )\n\n def test_auto_padding(self):\n self.model_tester.seq_length = 241\n config_and_inputs = self.model_tester.prepare_config_and_inputs()\n self.model_tester.create_and_check_for_auto_padding(*config_and_inputs)\n\n def test_for_change_to_full_attn(self):\n self.model_tester.seq_length = 9\n config_and_inputs = self.model_tester.prepare_config_and_inputs()\n self.model_tester.create_and_check_for_change_to_full_attn(*config_and_inputs)\n\n # overwrite from common in order to skip the check on `attentions`\n # also use `5e-5` to avoid flaky test failure\n def check_pt_flax_outputs(self, fx_outputs, pt_outputs, model_class, tol=5e-5, name=\"outputs\", attributes=None):\n # `bigbird_block_sparse_attention` in `FlaxBigBird` returns `attention_probs = None`, while in PyTorch version,\n # an effort was done to return `attention_probs` (yet to be verified).\n if name.startswith(\"outputs.attentions\"):\n return\n else:\n super().check_pt_flax_outputs(fx_outputs, pt_outputs, model_class, tol, name, attributes)\n\n\n@require_torch\n@slow\nclass BigBirdModelIntegrationTest(unittest.TestCase):\n # we can have this true once block_sparse attn_probs works accurately\n test_attention_probs = False\n\n def _get_dummy_input_ids(self):\n # fmt: off\n ids = torch.tensor(\n [[6, 117, 33, 36, 70, 22, 63, 31, 71, 72, 88, 58, 109, 49, 48, 116, 92, 6, 19, 95, 118, 100, 80, 111, 93, 2, 31, 84, 26, 5, 6, 82, 46, 96, 109, 4, 39, 19, 109, 13, 92, 31, 36, 90, 111, 18, 75, 6, 56, 74, 16, 42, 56, 92, 69, 108, 127, 81, 82, 41, 106, 19, 44, 24, 82, 121, 120, 65, 36, 26, 72, 13, 36, 98, 43, 64, 8, 53, 100, 92, 51, 122, 66, 17, 61, 50, 104, 127, 26, 35, 94, 23, 110, 71, 80, 67, 109, 111, 44, 19, 51, 41, 86, 71, 76, 44, 18, 68, 44, 77, 107, 81, 98, 126, 100, 2, 49, 98, 84, 39, 23, 98, 52, 46, 10, 82, 121, 73]], # noqa: E231\n dtype=torch.long,\n device=torch_device,\n )\n # fmt: on\n return ids\n\n def test_inference_block_sparse_pretraining(self):\n model = BigBirdForPreTraining.from_pretrained(\"google/bigbird-roberta-base\", attention_type=\"block_sparse\")\n model.to(torch_device)\n\n input_ids = torch.tensor([[20920, 232, 328, 1437] * 1024], dtype=torch.long, device=torch_device)\n outputs = model(input_ids)\n prediction_logits = outputs.prediction_logits\n seq_relationship_logits = outputs.seq_relationship_logits\n\n self.assertEqual(prediction_logits.shape, torch.Size((1, 4096, 50358)))\n self.assertEqual(seq_relationship_logits.shape, torch.Size((1, 2)))\n\n expected_prediction_logits_slice = torch.tensor(\n [\n [-0.2420, -0.6048, -0.0614, 7.8422],\n [-0.0596, -0.0104, -1.8408, 9.3352],\n [1.0588, 0.7999, 5.0770, 8.7555],\n [-0.1385, -1.7199, -1.7613, 6.1094],\n ],\n device=torch_device,\n )\n self.assertTrue(\n torch.allclose(prediction_logits[0, 128:132, 128:132], expected_prediction_logits_slice, atol=1e-4)\n )\n\n expected_seq_relationship_logits = torch.tensor([[58.8196, 56.3629]], device=torch_device)\n self.assertTrue(torch.allclose(seq_relationship_logits, expected_seq_relationship_logits, atol=1e-4))\n\n def test_inference_full_pretraining(self):\n model = BigBirdForPreTraining.from_pretrained(\"google/bigbird-roberta-base\", attention_type=\"original_full\")\n model.to(torch_device)\n\n input_ids = torch.tensor([[20920, 232, 328, 1437] * 512], dtype=torch.long, device=torch_device)\n outputs = model(input_ids)\n prediction_logits = outputs.prediction_logits\n seq_relationship_logits = outputs.seq_relationship_logits\n\n self.assertEqual(prediction_logits.shape, torch.Size((1, 512 * 4, 50358)))\n self.assertEqual(seq_relationship_logits.shape, torch.Size((1, 2)))\n\n expected_prediction_logits_slice = torch.tensor(\n [\n [0.1499, -1.1217, 0.1990, 8.4499],\n [-2.7757, -3.0687, -4.8577, 7.5156],\n [1.5446, 0.1982, 4.3016, 10.4281],\n [-1.3705, -4.0130, -3.9629, 5.1526],\n ],\n device=torch_device,\n )\n self.assertTrue(\n torch.allclose(prediction_logits[0, 128:132, 128:132], expected_prediction_logits_slice, atol=1e-4)\n )\n\n expected_seq_relationship_logits = torch.tensor([[41.4503, 41.2406]], device=torch_device)\n self.assertTrue(torch.allclose(seq_relationship_logits, expected_seq_relationship_logits, atol=1e-4))\n\n def test_block_sparse_attention_probs(self):\n \"\"\"\n Asserting if outputted attention matrix is similar to hard coded attention matrix\n \"\"\"\n\n if not self.test_attention_probs:\n return\n\n model = BigBirdModel.from_pretrained(\n \"google/bigbird-roberta-base\", attention_type=\"block_sparse\", num_random_blocks=3, block_size=16\n )\n model.to(torch_device)\n model.eval()\n config = model.config\n\n input_ids = self._get_dummy_input_ids()\n\n hidden_states = model.embeddings(input_ids)\n\n batch_size, seqlen, _ = hidden_states.size()\n attn_mask = torch.ones(batch_size, seqlen, device=torch_device, dtype=torch.float)\n to_seq_length = from_seq_length = seqlen\n from_block_size = to_block_size = config.block_size\n\n blocked_mask, band_mask, from_mask, to_mask = model.create_masks_for_block_sparse_attn(\n attn_mask, config.block_size\n )\n from_blocked_mask = to_blocked_mask = blocked_mask\n\n for i in range(config.num_hidden_layers):\n pointer = model.encoder.layer[i].attention.self\n\n query_layer = pointer.transpose_for_scores(pointer.query(hidden_states))\n key_layer = pointer.transpose_for_scores(pointer.key(hidden_states))\n value_layer = pointer.transpose_for_scores(pointer.value(hidden_states))\n\n context_layer, attention_probs = pointer.bigbird_block_sparse_attention(\n query_layer,\n key_layer,\n value_layer,\n band_mask,\n from_mask,\n to_mask,\n from_blocked_mask,\n to_blocked_mask,\n pointer.num_attention_heads,\n pointer.num_random_blocks,\n pointer.attention_head_size,\n from_block_size,\n to_block_size,\n batch_size,\n from_seq_length,\n to_seq_length,\n seed=pointer.seed,\n plan_from_length=None,\n plan_num_rand_blocks=None,\n output_attentions=True,\n )\n\n context_layer = context_layer.contiguous().view(batch_size, from_seq_length, -1)\n cl = torch.einsum(\"bhqk,bhkd->bhqd\", attention_probs, value_layer)\n cl = cl.view(context_layer.size())\n\n self.assertTrue(torch.allclose(context_layer, cl, atol=0.001))\n\n def test_block_sparse_context_layer(self):\n model = BigBirdModel.from_pretrained(\n \"google/bigbird-roberta-base\", attention_type=\"block_sparse\", num_random_blocks=3, block_size=16\n )\n model.to(torch_device)\n model.eval()\n config = model.config\n\n input_ids = self._get_dummy_input_ids()\n dummy_hidden_states = model.embeddings(input_ids)\n\n attn_mask = torch.ones_like(input_ids, device=torch_device)\n blocked_mask, band_mask, from_mask, to_mask = model.create_masks_for_block_sparse_attn(\n attn_mask, config.block_size\n )\n targeted_cl = torch.tensor(\n [\n [0.1874, 1.5260, 0.2335, -0.0473, -0.0961, 1.8384, -0.0141, 0.1250, 0.0085, -0.0048],\n [-0.0554, 0.0728, 0.1683, -0.1332, 0.1741, 0.1337, -0.2380, -0.1849, -0.0390, -0.0259],\n [-0.0419, 0.0767, 0.1591, -0.1399, 0.1789, 0.1257, -0.2406, -0.1772, -0.0261, -0.0079],\n [0.1860, 1.5172, 0.2326, -0.0473, -0.0953, 1.8291, -0.0147, 0.1245, 0.0082, -0.0046],\n [0.1879, 1.5296, 0.2335, -0.0471, -0.0975, 1.8433, -0.0136, 0.1260, 0.0086, -0.0054],\n [0.1854, 1.5147, 0.2334, -0.0480, -0.0956, 1.8250, -0.0149, 0.1222, 0.0082, -0.0060],\n [0.1859, 1.5184, 0.2334, -0.0474, -0.0955, 1.8297, -0.0143, 0.1234, 0.0079, -0.0054],\n [0.1885, 1.5336, 0.2335, -0.0467, -0.0979, 1.8481, -0.0130, 0.1269, 0.0085, -0.0049],\n [0.1881, 1.5305, 0.2335, -0.0471, -0.0976, 1.8445, -0.0135, 0.1262, 0.0086, -0.0053],\n [0.1852, 1.5148, 0.2333, -0.0480, -0.0949, 1.8254, -0.0151, 0.1225, 0.0079, -0.0055],\n [0.1877, 1.5292, 0.2335, -0.0470, -0.0972, 1.8431, -0.0135, 0.1259, 0.0084, -0.0052],\n [0.1874, 1.5261, 0.2334, -0.0472, -0.0968, 1.8393, -0.0140, 0.1251, 0.0084, -0.0052],\n [0.1853, 1.5151, 0.2331, -0.0478, -0.0948, 1.8256, -0.0154, 0.1228, 0.0086, -0.0052],\n [0.1867, 1.5233, 0.2334, -0.0475, -0.0965, 1.8361, -0.0139, 0.1247, 0.0084, -0.0054],\n ],\n device=torch_device,\n )\n\n context_layer = model.encoder.layer[0].attention.self(\n dummy_hidden_states,\n band_mask=band_mask,\n from_mask=from_mask,\n to_mask=to_mask,\n from_blocked_mask=blocked_mask,\n to_blocked_mask=blocked_mask,\n )\n context_layer = context_layer[0]\n\n self.assertEqual(context_layer.shape, torch.Size((1, 128, 768)))\n self.assertTrue(torch.allclose(context_layer[0, 64:78, 300:310], targeted_cl, atol=0.0001))\n\n def test_tokenizer_inference(self):\n tokenizer = BigBirdTokenizer.from_pretrained(\"google/bigbird-roberta-base\")\n model = BigBirdModel.from_pretrained(\n \"google/bigbird-roberta-base\", attention_type=\"block_sparse\", num_random_blocks=3, block_size=16\n )\n model.to(torch_device)\n\n text = [\n \"Transformer-based models are unable to process long sequences due to their self-attention operation,\"\n \" which scales quadratically with the sequence length. To address this limitation, we introduce the\"\n \" Longformer with an attention mechanism that scales linearly with sequence length, making it easy to\"\n \" process documents of thousands of tokens or longer. Longformer’s attention mechanism is a drop-in\"\n \" replacement for the standard self-attention and combines a local windowed attention with a task\"\n \" motivated global attention. Following prior work on long-sequence transformers, we evaluate Longformer\"\n \" on character-level language modeling and achieve state-of-the-art results on text8 and enwik8. In\"\n \" contrast to most prior work, we also pretrain Longformer and finetune it on a variety of downstream\"\n \" tasks. Our pretrained Longformer consistently outperforms RoBERTa on long document tasks and sets new\"\n \" state-of-the-art results on WikiHop and TriviaQA.\"\n ]\n inputs = tokenizer(text)\n\n for k in inputs:\n inputs[k] = torch.tensor(inputs[k], device=torch_device, dtype=torch.long)\n\n prediction = model(**inputs)\n prediction = prediction[0]\n\n self.assertEqual(prediction.shape, torch.Size((1, 199, 768)))\n\n expected_prediction = torch.tensor(\n [\n [-0.0213, -0.2213, -0.0061, 0.0687],\n [0.0977, 0.1858, 0.2374, 0.0483],\n [0.2112, -0.2524, 0.5793, 0.0967],\n [0.2473, -0.5070, -0.0630, 0.2174],\n [0.2885, 0.1139, 0.6071, 0.2991],\n [0.2328, -0.2373, 0.3648, 0.1058],\n [0.2517, -0.0689, 0.0555, 0.0880],\n [0.1021, -0.1495, -0.0635, 0.1891],\n [0.0591, -0.0722, 0.2243, 0.2432],\n [-0.2059, -0.2679, 0.3225, 0.6183],\n [0.2280, -0.2618, 0.1693, 0.0103],\n [0.0183, -0.1375, 0.2284, -0.1707],\n ],\n device=torch_device,\n )\n self.assertTrue(torch.allclose(prediction[0, 52:64, 320:324], expected_prediction, atol=1e-4))\n\n def test_inference_question_answering(self):\n tokenizer = BigBirdTokenizer.from_pretrained(\"google/bigbird-base-trivia-itc\")\n model = BigBirdForQuestionAnswering.from_pretrained(\n \"google/bigbird-base-trivia-itc\", attention_type=\"block_sparse\", block_size=16, num_random_blocks=3\n )\n model.to(torch_device)\n\n context = (\n \"The BigBird model was proposed in Big Bird: Transformers for Longer Sequences by Zaheer, Manzil and\"\n \" Guruganesh, Guru and Dubey, Kumar Avinava and Ainslie, Joshua and Alberti, Chris and Ontanon, Santiago\"\n \" and Pham, Philip and Ravula, Anirudh and Wang, Qifan and Yang, Li and others. BigBird, is a\"\n \" sparse-attention based transformer which extends Transformer based models, such as BERT to much longer\"\n \" sequences. In addition to sparse attention, BigBird also applies global attention as well as random\"\n \" attention to the input sequence. Theoretically, it has been shown that applying sparse, global, and\"\n \" random attention approximates full attention, while being computationally much more efficient for longer\"\n \" sequences. As a consequence of the capability to handle longer context, BigBird has shown improved\"\n \" performance on various long document NLP tasks, such as question answering and summarization, compared\"\n \" to BERT or RoBERTa.\"\n )\n\n question = [\n \"Which is better for longer sequences- BigBird or BERT?\",\n \"What is the benefit of using BigBird over BERT?\",\n ]\n inputs = tokenizer(\n question,\n [context, context],\n padding=True,\n return_tensors=\"pt\",\n add_special_tokens=True,\n max_length=256,\n truncation=True,\n )\n\n inputs = {k: v.to(torch_device) for k, v in inputs.items()}\n\n start_logits, end_logits = model(**inputs).to_tuple()\n\n # fmt: off\n target_start_logits = torch.tensor(\n [[-8.9304, -10.3849, -14.4997, -9.6497, -13.9469, -7.8134, -8.9687, -13.3585, -9.7987, -13.8869, -9.2632, -8.9294, -13.6721, -7.3198, -9.5434, -11.2641, -14.3245, -9.5705, -12.7367, -8.6168, -11.083, -13.7573, -8.1151, -14.5329, -7.6876, -15.706, -12.8558, -9.1135, 8.0909, -3.1925, -11.5812, -9.4822], [-11.5595, -14.5591, -10.2978, -14.8445, -10.2092, -11.1899, -13.8356, -10.5644, -14.7706, -9.9841, -11.0052, -14.1862, -8.8173, -11.1098, -12.4686, -15.0531, -11.0196, -13.6614, -10.0236, -11.8151, -14.8744, -9.5123, -15.1605, -8.6472, -15.4184, -8.898, -9.6328, -7.0258, -11.3365, -14.4065, -10.2587, -8.9103]], # noqa: E231\n device=torch_device,\n )\n target_end_logits = torch.tensor(\n [[-12.4131, -8.5959, -15.7163, -11.1524, -15.9913, -12.2038, -7.8902, -16.0296, -12.164, -16.5017, -13.3332, -6.9488, -15.7756, -13.8506, -11.0779, -9.2893, -15.0426, -10.1963, -17.3292, -12.2945, -11.5337, -16.4514, -9.1564, -17.5001, -9.1562, -16.2971, -13.3199, -7.5724, -5.1175, 7.2168, -10.3804, -11.9873], [-10.8654, -14.9967, -11.4144, -16.9189, -14.2673, -9.7068, -15.0182, -12.8846, -16.8716, -13.665, -10.3113, -15.1436, -14.9069, -13.3364, -11.2339, -16.0118, -11.8331, -17.0613, -13.8852, -12.4163, -16.8978, -10.7772, -17.2324, -10.6979, -16.9811, -10.3427, -9.497, -13.7104, -11.1107, -13.2936, -13.855, -14.1264]], # noqa: E231\n device=torch_device,\n )\n # fmt: on\n\n self.assertTrue(torch.allclose(start_logits[:, 64:96], target_start_logits, atol=1e-4))\n self.assertTrue(torch.allclose(end_logits[:, 64:96], target_end_logits, atol=1e-4))\n\n input_ids = inputs[\"input_ids\"].tolist()\n answer = [\n input_ids[i][torch.argmax(start_logits, dim=-1)[i] : torch.argmax(end_logits, dim=-1)[i] + 1]\n for i in range(len(input_ids))\n ]\n answer = tokenizer.batch_decode(answer)\n\n self.assertTrue(answer == [\"BigBird\", \"global attention\"])\n\n def test_fill_mask(self):\n tokenizer = BigBirdTokenizer.from_pretrained(\"google/bigbird-roberta-base\")\n model = BigBirdForMaskedLM.from_pretrained(\"google/bigbird-roberta-base\")\n model.to(torch_device)\n\n input_ids = tokenizer(\"The goal of life is [MASK] .\", return_tensors=\"pt\").input_ids.to(torch_device)\n logits = model(input_ids).logits\n\n # [MASK] is token at 6th position\n pred_token = tokenizer.decode(torch.argmax(logits[0, 6:7], axis=-1))\n self.assertEqual(pred_token, \"happiness\")\n\n def test_auto_padding(self):\n model = BigBirdModel.from_pretrained(\n \"google/bigbird-roberta-base\", attention_type=\"block_sparse\", num_random_blocks=3, block_size=16\n )\n model.to(torch_device)\n model.eval()\n\n input_ids = torch.tensor([200 * [10] + 40 * [2] + [1]], device=torch_device, dtype=torch.long)\n output = model(input_ids).to_tuple()[0]\n\n # fmt: off\n target = torch.tensor(\n [[-0.045136, -0.068013, 0.12246, -0.01356, 0.018386, 0.025333, -0.0044439, -0.0030996, -0.064031, 0.0006439], [-0.045018, -0.067638, 0.12317, -0.013998, 0.019216, 0.025695, -0.0043705, -0.0031895, -0.063153, 0.00088899], [-0.045042, -0.067305, 0.1234, -0.014512, 0.020057, 0.026084, -0.004615, -0.0031728, -0.062442, 0.0010263], [-0.044589, -0.067655, 0.12416, -0.014287, 0.019416, 0.026065, -0.0050958, -0.002702, -0.063158, 0.0004827], [-0.044627, -0.067535, 0.1239, -0.014319, 0.019491, 0.026213, -0.0059482, -0.0025906, -0.063116, 0.00014669], [-0.044899, -0.067704, 0.12337, -0.014231, 0.019256, 0.026345, -0.0065565, -0.0022938, -0.063433, -0.00011409], [-0.045599, -0.067764, 0.12235, -0.014151, 0.019206, 0.026417, -0.0068965, -0.0024494, -0.063313, -4.4499e-06], [-0.045557, -0.068372, 0.12199, -0.013747, 0.017962, 0.026103, -0.0070607, -0.0023552, -0.06447, -0.00048756], [-0.045334, -0.068913, 0.1217, -0.013566, 0.01693, 0.025745, -0.006311, -0.0024903, -0.065575, -0.0006719], [-0.045171, -0.068726, 0.12164, -0.013688, 0.017139, 0.025629, -0.005213, -0.0029412, -0.065237, -0.00020669], [-0.044411, -0.069267, 0.12206, -0.013645, 0.016212, 0.025589, -0.0044121, -0.002972, -0.066277, -0.00067963], [-0.043487, -0.069792, 0.1232, -0.013663, 0.015303, 0.02613, -0.0036294, -0.0030616, -0.067483, -0.0012642], [-0.042622, -0.069287, 0.12469, -0.013936, 0.016204, 0.026474, -0.0040534, -0.0027365, -0.066994, -0.0014148], [-0.041879, -0.070031, 0.12593, -0.014047, 0.015082, 0.027751, -0.0040683, -0.0027189, -0.068985, -0.0027146]], # noqa: E231\n device=torch_device,\n )\n # fmt: on\n\n self.assertEqual(output.shape, torch.Size((1, 241, 768)))\n self.assertTrue(torch.allclose(output[0, 64:78, 300:310], target, atol=0.0001))\n"
] | [
[
"numpy.array",
"pandas.DataFrame.from_records",
"torch.no_grad",
"pandas.DataFrame.from_dict"
],
[
"torch.ones_like",
"torch.ones",
"torch.Size",
"torch.argmax",
"torch.manual_seed",
"torch.no_grad",
"torch.tensor",
"torch.zeros",
"torch.einsum",
"torch.cat",
"torch.allclose"
]
] |
Haiper-ai/kubric | [
"d096ba178b8a78ea2c840ae121646d36281d31d9"
] | [
"challenges/multiview_matting/worker.py"
] | [
"# Copyright 2021 The Kubric Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nWorker file for the Multi-View Background removal dataset.\n\nThis dataset creates a scene where a foreground object is to be distinguished\nfrom the background. Foreground objects are borrowed from shapnet. Backgrounds\nare from indoor scenes of polyhaven. All foreground objects are situated on top\nof a \"table\" which is gernated to be random in color. Instead of background\nremoval with a single image. This dataset is special in that multiple images of\nthe foreground object (taken from different camera poses) are given. This\n\"multi-view\" persepctive should be very helpful for background removal but is\ncurrently underexplored in the literature.\n\"\"\"\nimport logging\nimport numpy as np\n\nimport kubric as kb\nfrom kubric.renderer import Blender as KubricRenderer\n\n# --- WARNING: this path is not yet public\nsource_path = (\n \"gs://tensorflow-graphics/public/60c9de9c410be30098c297ac/ShapeNetCore.v2\")\n\n# --- CLI arguments (and modified defaults)\nparser = kb.ArgumentParser()\nparser.set_defaults(\n seed=1,\n frame_start=1,\n frame_end=10,\n width=128,\n height=128,\n)\n\nparser.add_argument(\"--backgrounds_split\",\n choices=[\"train\", \"test\"], default=\"train\")\nparser.add_argument(\"--dataset_mode\",\n choices=[\"easy\", \"hard\"], default=\"hard\")\nparser.add_argument(\"--hdri_dir\",\n type=str, default=\"gs://mv_bckgr_removal/hdri_haven/4k/\")\n # \"/mnt/mydata/images/\"\nFLAGS = parser.parse_args()\n\n\nif FLAGS.dataset_mode == \"hard\":\n add_distractors = False\n\ndef add_hdri_dome(hdri_source, scene, background_hdri=None):\n dome_path = hdri_source.fetch(\"dome.blend\")\n dome = kb.FileBasedObject(\n name=\"BackgroundDome\",\n position=(0, 0, 0),\n static=True, background=True,\n simulation_filename=None,\n render_filename=str(dome_path),\n render_import_kwargs={\n \"filepath\": str(dome_path / \"Object\" / \"Dome\"),\n \"directory\": str(dome_path / \"Object\"),\n \"filename\": \"Dome\",\n })\n scene.add(dome)\n # pylint: disable=import-outside-toplevel\n from kubric.renderer import Blender\n import bpy\n blender_renderer = [v for v in scene.views if isinstance(v, Blender)]\n if blender_renderer:\n dome_blender = dome.linked_objects[blender_renderer[0]]\n dome_blender.cycles_visibility.shadow = False\n if background_hdri is not None:\n dome_mat = dome_blender.data.materials[0]\n texture_node = dome_mat.node_tree.nodes[\"Image Texture\"]\n texture_node.image = bpy.data.images.load(background_hdri.filename)\n return dome\n\n# --- Common setups\nkb.utils.setup_logging(FLAGS.logging_level)\nkb.utils.log_my_flags(FLAGS)\njob_dir = kb.as_path(FLAGS.job_dir)\nrng = np.random.RandomState(FLAGS.seed)\nscene = kb.Scene.from_flags(FLAGS)\n\n# --- Add a renderer\nrenderer = KubricRenderer(scene,\n use_denoising=True,\n adaptive_sampling=False,\n background_transparency=True)\n\n# --- Fetch a random asset\nasset_source = kb.AssetSource(source_path)\nall_ids = list(asset_source.db['id'])\nfraction = 0.1\nheld_out_obj_ids = list(asset_source.db.sample(\n frac=fraction, replace=False, random_state=42)[\"id\"])\ntrain_obj_ids = [i for i in asset_source.db[\"id\"] if\n i not in held_out_obj_ids]\n\nif FLAGS.backgrounds_split == \"train\":\n asset_id = rng.choice(train_obj_ids)\nelse:\n asset_id = rng.choice(held_out_obj_ids)\n\nobj = asset_source.create(asset_id=asset_id)\nlogging.info(f\"selected '{asset_id}'\")\n\n# --- make object flat on X/Y and not penetrate floor\nobj.quaternion = kb.Quaternion(axis=[1,0,0], degrees=90)\nobj.position = obj.position - (0, 0, obj.aabbox[0][2])\n\nobj_size = np.linalg.norm(obj.aabbox[1] - obj.aabbox[0])\nif add_distractors:\n obj_radius = np.linalg.norm(obj.aabbox[1][:2] - obj.aabbox[0][:2])\nobj_height = obj.aabbox[1][2] - obj.aabbox[0][2]\nobj.metadata = {\n \"asset_id\": obj.asset_id,\n \"category\": asset_source.db[\n asset_source.db[\"id\"] == obj.asset_id].iloc[0][\"category_name\"],\n}\nscene.add(obj)\n\nsize_multiple = 1.\nif add_distractors:\n distractor_locs = []\n for i in range(4):\n asset_id_2 = rng.choice(train_obj_ids)\n obj2 = asset_source.create(asset_id=asset_id_2)\n logging.info(f\"selected '{asset_id}'\")\n\n # --- make object flat on X/Y and not penetrate floor\n obj2.quaternion = kb.Quaternion(axis=[1,0,0], degrees=90)\n obj_2_radius = np.linalg.norm(obj2.aabbox[1][:2] - obj2.aabbox[0][:2])\n\n position = rng.rand((2)) * 2 - 1\n position /= np.linalg.norm(position)\n position *= (obj_radius + obj_2_radius) / 2.\n\n distractor_locs.append(-position)\n obj2.position = obj2.position - (position[0], position[1], obj2.aabbox[0][2])\n\n obj_size_2 = np.linalg.norm(obj2.aabbox[1] - obj2.aabbox[0])\n\n obj_height_2 = obj2.aabbox[1][2] - obj2.aabbox[0][2]\n obj2.metadata = {\n \"asset_id\": obj.asset_id,\n \"category\": asset_source.db[\n asset_source.db[\"id\"] == obj2.asset_id].iloc[0][\"category_name\"],\n }\n scene.add(obj2)\n\n distractor_dir = np.vstack(distractor_locs)\n distractor_dir /= np.linalg.norm(distractor_dir, axis=-1, keepdims=True)\n\n size_multiple = 1.5\n\nmaterial = kb.PrincipledBSDFMaterial(\n color=kb.Color.from_hsv(rng.uniform(), 1, 1),\n metallic=1.0, roughness=0.2, ior=2.5)\n\ntable = kb.Cube(name=\"floor\", scale=(obj_size*size_multiple, obj_size*size_multiple, 0.02),\n position=(0, 0, -0.02), material=material)\nscene += table\n\nlogging.info(\"Loading background HDRIs from %s\", FLAGS.hdri_dir)\n\nhdri_source = kb.TextureSource(FLAGS.hdri_dir)\ntrain_backgrounds, held_out_backgrounds = hdri_source.get_test_split(\n fraction=0.1)\nif FLAGS.backgrounds_split == \"train\":\n logging.info(\"Choosing one of the %d training backgrounds...\",\n len(train_backgrounds))\n background_hdri = hdri_source.create(texture_name=rng.choice(train_backgrounds))\nelse:\n logging.info(\"Choosing one of the %d held-out backgrounds...\",\n len(held_out_backgrounds))\n background_hdri = hdri_source.create(\n texture_name=rng.choice(held_out_backgrounds))\ndome = kb.assets.utils.add_hdri_dome(hdri_source, scene, background_hdri)\n\ndome = add_hdri_dome(hdri_source, scene, background_hdri)\nrenderer._set_ambient_light_hdri(background_hdri.filename)\n# table = add_table(hdri_source, scene, background_hdri)\n\n# --- Add Klevr-like lights to the scene\nscene += kb.assets.utils.get_clevr_lights(rng=rng)\n# scene.ambient_illumination = kb.Color.from_hsv(np.random.uniform(), 1, 1)\n# scene.ambient_illumination = kb.Color(0.05, 0.05, 0.05)\n\ndef sample_point_in_half_sphere_shell(\n inner_radius: float,\n outer_radius: float,\n rng: np.random.RandomState\n ):\n \"\"\"Uniformly sample points that are in a given distance\n range from the origin and with z >= 0.\"\"\"\n\n while True:\n v = rng.uniform((-outer_radius, -outer_radius, obj_height/1.2),\n (outer_radius, outer_radius, obj_height))\n len_v = np.linalg.norm(v)\n correct_angle = True\n if add_distractors:\n cam_dir = v[:2] / np.linalg.norm(v[:2])\n correct_angle = np.all(np.dot(distractor_dir, cam_dir) < np.cos(np.pi / 9.))\n if inner_radius <= len_v <= outer_radius and correct_angle:\n return tuple(v)\n\n# --- Keyframe the camera\nscene.camera = kb.PerspectiveCamera()\nfor frame in range(FLAGS.frame_start, FLAGS.frame_end + 1):\n # scene.camera.position = (1, 1, 1) #< frozen camera\n scene.camera.position = sample_point_in_half_sphere_shell(\n obj_size*1.7, obj_size*2, rng)\n scene.camera.look_at((0, 0, obj_height/2))\n scene.camera.keyframe_insert(\"position\", frame)\n scene.camera.keyframe_insert(\"quaternion\", frame)\n\n# --- Rendering\nlogging.info(\"Rendering the scene ...\")\nrenderer.save_state(job_dir / \"scene.blend\")\ndata_stack = renderer.render()\n\n# --- Postprocessing\nkb.compute_visibility(data_stack[\"segmentation\"], scene.assets)\ndata_stack[\"segmentation\"] = kb.adjust_segmentation_idxs(\n data_stack[\"segmentation\"],\n scene.assets,\n [obj]).astype(np.uint8)\n\n# --- Discard non-used information\ndel data_stack[\"uv\"]\ndel data_stack[\"forward_flow\"]\ndel data_stack[\"backward_flow\"]\ndel data_stack[\"depth\"]\ndel data_stack[\"normal\"]\n\n# --- Save to image files\nkb.file_io.write_image_dict(data_stack, job_dir)\n\n# --- Collect metadata\nlogging.info(\"Collecting and storing metadata for each object.\")\ndata = {\n \"metadata\": kb.get_scene_metadata(scene),\n \"camera\": kb.get_camera_info(scene.camera),\n}\nkb.file_io.write_json(filename=job_dir / \"metadata.json\", data=data)\nkb.done()\n"
] | [
[
"numpy.vstack",
"numpy.cos",
"numpy.random.RandomState",
"numpy.dot",
"numpy.linalg.norm"
]
] |
KiLJ4EdeN/tf2_nn | [
"0ccec7692f061e7e066a4a2439683e3b09faa7bc"
] | [
"tfnn_mlp.py"
] | [
"import tensorflow as tf\nimport matplotlib.pyplot as plt\n\n# MNIST dataset parameters.\nnum_classes = 10 # 0 to 9 digits\nnum_features = 784 # 28*28\n\n# Training parameters.\nlearning_rate = 0.001\ntraining_steps = 1000\nbatch_size = 256\ndisplay_step = 100\n\n# Network parameters.\nn_hidden_1 = 128 # 1st layer number of neurons.\nn_hidden_2 = 256 # 2nd layer number of neurons.\n\n# Prepare MNIST data.\nfrom tensorflow.keras.datasets import mnist\n(X_train, Y_train), (X_test, Y_test) = mnist.load_data()\n# Convert to float32.\nX_train = tf.Variable(X_train, dtype=tf.float32)\nX_test = tf.Variable(X_test, dtype=tf.float32)\n# Flatten images to 1-D vector of 784 features (28*28).\nX_train = tf.reshape(X_train, [-1, num_features])\nX_test = tf.reshape(X_test, [-1, num_features])\n# Normalize images value from [0, 255] to [0, 1].\nX_train = X_train / 255.\nX_test = X_test / 255.\n\nprint(X_train.shape)\nprint(X_test.shape)\n\n# Use tf.data API to shuffle and batch data.\ntrain_data = tf.data.Dataset.from_tensor_slices((X_train, Y_train))\n# repeat adds the data again, prefetch speeds up outputs with the cost of ram.\ntrain_data = train_data.repeat().shuffle(5000).batch(batch_size).prefetch(1)\n\nnum_hidden_units = [n_hidden_1, n_hidden_2, num_classes]\nrandom_normal = tf.initializers.RandomNormal()\n# Weight of shape [784, 10], the 28*28 image features, and total number of classes.\nW1 = tf.Variable(random_normal([num_features, num_hidden_units[0]]), name=\"weight1\")\nW2 = tf.Variable(random_normal([num_hidden_units[0], num_hidden_units[1]]), name=\"weight2\")\nW3 = tf.Variable(random_normal([num_hidden_units[1], num_hidden_units[2]]), name=\"weight3\")\n# Bias of shape [10], the total number of classes.\nb1 = tf.Variable(tf.zeros([num_hidden_units[0]]), name=\"bias1\")\nb2 = tf.Variable(tf.zeros([num_hidden_units[1]]), name=\"bias2\")\nb3 = tf.Variable(tf.zeros([num_hidden_units[2]]), name=\"bias3\")\n\ndef multilayer_perceptron(x):\n # Apply softmax to normalize the logits to a probability distribution.\n h1 = tf.nn.relu(tf.add(tf.matmul(x, W1), b1))\n h2 = tf.nn.relu(tf.add(tf.matmul(h1, W2), b2))\n h3 = tf.nn.relu(tf.add(tf.matmul(h2, W3), b3))\n return tf.nn.softmax(h3)\n\n# Cross-Entropy loss function.\ndef cross_entropy(y_pred, y_true):\n # Encode label to a one hot vector.\n y_true = tf.one_hot(y_true, depth=num_classes)\n # Clip prediction values to avoid log(0) error.\n y_pred = tf.clip_by_value(y_pred, 1e-9, 1.)\n # Compute cross-entropy.\n return tf.reduce_mean(-tf.reduce_sum(y_true * tf.math.log(y_pred)))\n\n# Accuracy metric.\ndef accuracy(y_pred, y_true):\n # Predicted class is the index of highest score in prediction vector (i.e. argmax).\n correct_prediction = tf.equal(tf.argmax(y_pred, 1), tf.cast(y_true, tf.int64))\n return tf.reduce_mean(tf.cast(correct_prediction, tf.float32), axis=-1)\n\n# Stochastic gradient descent optimizer.\noptimizer = tf.optimizers.SGD(learning_rate)\n\n# Optimization process. \ndef run_optimization(x, y):\n # Wrap computation inside a GradientTape for automatic differentiation.\n with tf.GradientTape() as g:\n pred = multilayer_perceptron(x)\n loss = cross_entropy(pred, y)\n\n # Compute gradients.\n gradients = g.gradient(loss, [W1, W2, W3, b1, b2, b3])\n \n # Update W and b following gradients.\n optimizer.apply_gradients(zip(gradients, [W1, W2, W3, b1, b2, b3]))\n\n# Run training for the given number of steps.\nfor step, (batch_x, batch_y) in enumerate(train_data.take(training_steps), 1):\n # Run the optimization to update W and b values.\n run_optimization(batch_x, batch_y)\n \n if step % display_step == 0:\n pred = multilayer_perceptron(batch_x)\n loss = cross_entropy(pred, batch_y)\n acc = accuracy(pred, batch_y)\n print(\"step: %i, loss: %f, accuracy: %f\" % (step, loss, acc))\n\n# Test model on validation set.\npred = multilayer_perceptron(X_test)\nprint(\"Test Accuracy: %f\" % accuracy(pred, Y_test))\n\n# Visualize predictions.\n# Predict 5 images from validation set.\nn_images = 5\ntest_images = X_test[:n_images]\npredictions = multilayer_perceptron(test_images)\n\n# Display image and model prediction.\nfor i in range(n_images):\n plt.imshow(tf.reshape(test_images[i], [28, 28]), cmap='gray')\n plt.show()\n print(\"Model prediction: %i\" % tf.argmax(predictions.numpy()[i]))\n"
] | [
[
"tensorflow.zeros",
"tensorflow.math.log",
"tensorflow.reshape",
"tensorflow.initializers.RandomNormal",
"tensorflow.matmul",
"tensorflow.keras.datasets.mnist.load_data",
"tensorflow.cast",
"matplotlib.pyplot.show",
"tensorflow.one_hot",
"tensorflow.clip_by_value",
"tensorflow.argmax",
"tensorflow.Variable",
"tensorflow.GradientTape",
"tensorflow.optimizers.SGD",
"tensorflow.data.Dataset.from_tensor_slices",
"tensorflow.nn.softmax"
]
] |
kevin3/cwl-ica | [
"cf706ea42993d563f364c0847ee4b882f8fe067c"
] | [
"src/subcommands/listers/list_users.py"
] | [
"#!/usr/bin/env python3\n\n\"\"\"\nList all users registered in <CWL_ICA_REPO_PATH>/config/user.yaml\n\"\"\"\n\nfrom classes.command import Command\nfrom utils.logging import get_logger\nimport pandas as pd\nfrom utils.repo import read_yaml, get_user_yaml_path\nimport sys\n\nlogger = get_logger()\n\n\nclass ListUsers(Command):\n \"\"\"Usage:\n cwl-ica [options] list-users help\n cwl-ica [options] list-users\n\nDescription:\n List all registered users in <CWL_ICA_REPO_PATH>/config/user.yaml\n\nExample:\n cwl-ica list-users\n \"\"\"\n\n def __init__(self, command_argv):\n # Collect args from doc strings\n super().__init__(command_argv)\n\n # Check args\n self.check_args()\n\n def __call__(self):\n \"\"\"\n Just run through this\n :return:\n \"\"\"\n\n # Check project.yaml exists\n user_yaml_path = get_user_yaml_path()\n\n user_list = read_yaml(user_yaml_path)['users']\n\n # Create pandas df of user yaml path\n user_df = pd.DataFrame(user_list)\n\n # Write user to stdout\n user_df.to_markdown(sys.stdout, index=False)\n\n # Add new line\n print()\n\n def check_args(self):\n \"\"\"\n Check if --tenant-name is defined or CWL_ICA_DEFAULT_TENANT is present\n Or if --tenant-name is set to 'all'\n :return:\n \"\"\"\n\n # Just make sure the user.yaml path exists\n _ = get_user_yaml_path()\n"
] | [
[
"pandas.DataFrame"
]
] |
Broly498/sentinel2-cloud-detector | [
"912880fcd6fed482475b4cd8da07bda17993ebe8"
] | [
"examples/plotting_utils.py"
] | [
"\"\"\"\nPlotting utilities for example notebooks\n\"\"\"\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n\ndef plot_image(image=None, mask=None, ax=None, factor=3.5/255, clip_range=(0, 1), **kwargs):\n \"\"\" Utility function for plotting RGB images and masks.\n \"\"\"\n if ax is None:\n _, ax = plt.subplots(nrows=1, ncols=1, figsize=(15, 15))\n\n mask_color = [255, 255, 255, 255] if image is None else [255, 255, 0, 100]\n\n if image is None:\n if mask is None:\n raise ValueError('image or mask should be given')\n image = np.zeros(mask.shape + (3,), dtype=np.uint8)\n\n ax.imshow(np.clip(image * factor, *clip_range), **kwargs)\n\n if mask is not None:\n cloud_image = np.zeros((mask.shape[0], mask.shape[1], 4), dtype=np.uint8)\n\n cloud_image[mask == 1] = np.asarray(mask_color, dtype=np.uint8)\n\n ax.imshow(cloud_image)\n\n\ndef plot_probabilities(image, proba, factor=3.5/255):\n \"\"\" Utility function for plotting a RGB image and its cloud probability map next to each other.\n \"\"\"\n plt.figure(figsize=(15, 15))\n ax = plt.subplot(1, 2, 1)\n ax.imshow(np.clip(image * factor, 0, 1))\n plt.show\n ax = plt.subplot(1, 2, 2)\n ax.imshow(proba, cmap=plt.cm.inferno)\n plt.show\n\n"
] | [
[
"numpy.zeros",
"matplotlib.pyplot.figure",
"numpy.asarray",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.subplot",
"numpy.clip"
]
] |
henrykironde/DeepForest | [
"9df98ea30debc8a1dc98edfa45dada063b109e6e"
] | [
"deepforest/preprocess.py"
] | [
"# Deepforest Preprocessing model\n\"\"\"The preprocessing module is used to reshape data into format suitable for\ntraining or prediction.\n\nFor example cutting large tiles into smaller images.\n\"\"\"\nimport os\n\nimport numpy as np\nimport pandas as pd\nimport slidingwindow\nfrom PIL import Image\nimport torch\nimport warnings\nimport rasterio\n\ndef preprocess_image(image, device):\n \"\"\"Preprocess a single RGB numpy array as a prediction from channels last, to channels first\"\"\"\n image = torch.tensor(image, device=device).permute(2, 0, 1).unsqueeze(0)\n image = image / 255\n\n return image\n\n\ndef image_name_from_path(image_path):\n \"\"\"Convert path to image name for use in indexing.\"\"\"\n image_name = os.path.basename(image_path)\n image_name = os.path.splitext(image_name)[0]\n\n return image_name\n\n\ndef compute_windows(numpy_image, patch_size, patch_overlap):\n \"\"\"Create a sliding window object from a raster tile.\n\n Args:\n numpy_image (array): Raster object as numpy array to cut into crops\n\n Returns:\n windows (list): a sliding windows object\n \"\"\"\n\n if patch_overlap > 1:\n raise ValueError(\"Patch overlap {} must be between 0 - 1\".format(patch_overlap))\n\n # Generate overlapping sliding windows\n windows = slidingwindow.generate(numpy_image,\n slidingwindow.DimOrder.HeightWidthChannel,\n patch_size, patch_overlap)\n\n return (windows)\n\n\ndef select_annotations(annotations, windows, index, allow_empty=False):\n \"\"\"Select annotations that overlap with selected image crop.\n\n Args:\n image_name (str): Name of the image in the annotations file to lookup.\n annotations_file: path to annotations file in\n the format -> image_path, xmin, ymin, xmax, ymax, label\n windows: A sliding window object (see compute_windows)\n index: The index in the windows object to use a crop bounds\n allow_empty (bool): If True, allow window crops\n that have no annotations to be included\n\n Returns:\n selected_annotations: a pandas dataframe of annotations\n \"\"\"\n\n # Window coordinates - with respect to tile\n window_xmin, window_ymin, w, h = windows[index].getRect()\n window_xmax = window_xmin + w\n window_ymax = window_ymin + h\n\n # buffer coordinates a bit to grab boxes that might start just against\n # the image edge. Don't allow boxes that start and end after the offset\n offset = 40\n selected_annotations = annotations[(annotations.xmin > (window_xmin - offset)) &\n (annotations.xmin < (window_xmax)) &\n (annotations.xmax >\n (window_xmin)) & (annotations.ymin >\n (window_ymin - offset)) &\n (annotations.xmax <\n (window_xmax + offset)) & (annotations.ymin <\n (window_ymax)) &\n (annotations.ymax >\n (window_ymin)) & (annotations.ymax <\n (window_ymax + offset))]\n\n # change the image name\n image_name = os.path.splitext(\"{}\".format(annotations.image_path.unique()[0]))[0]\n image_basename = os.path.splitext(image_name)[0]\n selected_annotations.image_path = \"{}_{}.png\".format(image_basename, index)\n\n # If no matching annotations, return a line with the image name, but no\n # records\n if selected_annotations.empty:\n if allow_empty:\n selected_annotations = pd.DataFrame(\n [\"{}_{}.png\".format(image_basename, index)], columns=[\"image_path\"])\n selected_annotations[\"xmin\"] = 0\n selected_annotations[\"ymin\"] = 0\n selected_annotations[\"xmax\"] = 0\n selected_annotations[\"ymax\"] = 0\n #Dummy label\n selected_annotations[\"label\"] = annotations.label.unique()[0]\n else:\n return None\n else:\n # update coordinates with respect to origin\n selected_annotations.xmax = (selected_annotations.xmin - window_xmin) + (\n selected_annotations.xmax - selected_annotations.xmin)\n selected_annotations.xmin = (selected_annotations.xmin - window_xmin)\n selected_annotations.ymax = (selected_annotations.ymin - window_ymin) + (\n selected_annotations.ymax - selected_annotations.ymin)\n selected_annotations.ymin = (selected_annotations.ymin - window_ymin)\n\n # cut off any annotations over the border.\n selected_annotations.loc[selected_annotations.xmin < 0, \"xmin\"] = 0\n selected_annotations.loc[selected_annotations.xmax > w, \"xmax\"] = w\n selected_annotations.loc[selected_annotations.ymin < 0, \"ymin\"] = 0\n selected_annotations.loc[selected_annotations.ymax > h, \"ymax\"] = h\n\n return selected_annotations\n\n\ndef save_crop(base_dir, image_name, index, crop):\n \"\"\"Save window crop as image file to be read by PIL.\n\n Filename should match the image_name + window index\n \"\"\"\n # create dir if needed\n if not os.path.exists(base_dir):\n os.makedirs(base_dir)\n\n im = Image.fromarray(crop)\n image_basename = os.path.splitext(image_name)[0]\n filename = \"{}/{}_{}.png\".format(base_dir, image_basename, index)\n im.save(filename)\n\n return filename\n\n\ndef split_raster(annotations_file,\n path_to_raster=None,\n numpy_image=None,\n base_dir=\".\",\n patch_size=400,\n patch_overlap=0.05,\n allow_empty=False,\n image_name = None):\n \"\"\"Divide a large tile into smaller arrays. Each crop will be saved to\n file.\n\n Args:\n numpy_image: a numpy object to be used as a raster, usually opened from rasterio.open.read()\n path_to_raster: (str): Path to a tile that can be read by rasterio on disk\n annotations_file (str): Path to annotations file (with column names)\n data in the format -> image_path, xmin, ymin, xmax, ymax, label\n base_dir (str): Where to save the annotations and image\n crops relative to current working dir\n patch_size (int): Maximum dimensions of square window\n patch_overlap (float): Percent of overlap among windows 0->1\n allow_empty: If True, include images with no annotations\n to be included in the dataset\n image_name (str): If numpy_image arg is used, what name to give the raster?\n\n Returns:\n A pandas dataframe with annotations file for training.\n \"\"\"\n \n # Load raster as image\n # Load raster as image\n if (numpy_image is None) & (path_to_raster is None):\n raise IOError(\"supply a raster either as a path_to_raster or if ready from existing in memory numpy object, as numpy_image=\")\n \n if path_to_raster:\n numpy_image = rasterio.open(path_to_raster).read()\n numpy_image = np.moveaxis(numpy_image,0,2)\n else:\n if image_name is None:\n raise(IOError(\"If passing an numpy_image, please also specify a image_name to match the column in the annotation.csv file\"))\n\n # Check that its 3 band\n bands = numpy_image.shape[2]\n if not bands == 3:\n warnings.warn(\"Input rasterio had non-3 band shape of {}, ignoring alpha channel\".format(numpy_image.shape))\n try:\n numpy_image = numpy_image[:,:,:3].astype(\"uint8\") \n except:\n raise IOError(\"Input file {} has {} bands. DeepForest only accepts 3 band RGB \"\n \"rasters in the order (height, width, channels). Selecting the first three bands failed, please reshape manually.\"\n \"If the image was cropped and saved as a .jpg, \"\n \"please ensure that no alpha channel was used.\".format(\n path_to_raster, bands))\n\n # Check that patch size is greater than image size\n height = numpy_image.shape[0]\n width = numpy_image.shape[1]\n if any(np.array([height, width]) < patch_size):\n raise ValueError(\"Patch size of {} is larger than the image dimensions {}\".format(\n patch_size, [height, width]))\n\n # Compute sliding window index\n windows = compute_windows(numpy_image, patch_size, patch_overlap)\n\n # Get image name for indexing\n if image_name is None:\n image_name = os.path.basename(path_to_raster) \n\n # Load annotations file and coerce dtype\n annotations = pd.read_csv(annotations_file)\n\n # open annotations file\n image_annotations = annotations[annotations.image_path == image_name]\n\n # Sanity checks\n if image_annotations.empty:\n raise ValueError(\n \"No image names match between the file:{} and the image_path: {}. \"\n \"Reminder that image paths should be the relative \"\n \"path (e.g. 'image_name.tif'), not the full path \"\n \"(e.g. path/to/dir/image_name.tif)\".format(annotations_file, image_name))\n\n if not all([\n x in annotations.columns\n for x in [\"image_path\", \"xmin\", \"ymin\", \"xmax\", \"ymax\", \"label\"]\n ]):\n raise ValueError(\"Annotations file has {} columns, should have \"\n \"format image_path, xmin, ymin, xmax, ymax, label\".format(\n annotations.shape[1]))\n\n annotations_files = []\n for index, window in enumerate(windows):\n\n # Crop image\n crop = numpy_image[windows[index].indices()]\n \n #skip if empty crop\n if crop.size == 0:\n continue\n\n # Find annotations, image_name is the basename of the path\n crop_annotations = select_annotations(image_annotations, windows, index,\n allow_empty)\n\n # If empty images not allowed, select annotations returns None\n if crop_annotations is not None:\n # save annotations\n annotations_files.append(crop_annotations)\n\n # save image crop\n save_crop(base_dir, image_name, index, crop)\n if len(annotations_files) == 0:\n raise ValueError(\n \"Input file has no overlapping annotations and allow_empty is {}\".format(\n allow_empty))\n\n annotations_files = pd.concat(annotations_files)\n\n # Checkpoint csv files, useful for parallelization\n # Use filename of the raster path to save the annotations\n image_basename = os.path.splitext(image_name)[0]\n file_path = image_basename + \".csv\"\n file_path = os.path.join(base_dir, file_path)\n annotations_files.to_csv(file_path, index=False, header=True)\n\n return annotations_files\n"
] | [
[
"pandas.read_csv",
"torch.tensor",
"numpy.moveaxis",
"pandas.concat",
"numpy.array"
]
] |
furkannturkmen/CS231n-2021 | [
"2c6618d16bfd4e02e0493e8b8a411a6509206bb4"
] | [
"assignment2/cs231n/optim.py"
] | [
"import numpy as np\n\n\"\"\"\nThis file implements various first-order update rules that are commonly used\nfor training neural networks. Each update rule accepts current weights and the\ngradient of the loss with respect to those weights and produces the next set of\nweights. Each update rule has the same interface:\n\ndef update(w, dw, config=None):\n\nInputs:\n - w: A numpy array giving the current weights.\n - dw: A numpy array of the same shape as w giving the gradient of the\n loss with respect to w.\n - config: A dictionary containing hyperparameter values such as learning\n rate, momentum, etc. If the update rule requires caching values over many\n iterations, then config will also hold these cached values.\n\nReturns:\n - next_w: The next point after the update.\n - config: The config dictionary to be passed to the next iteration of the\n update rule.\n\nNOTE: For most update rules, the default learning rate will probably not\nperform well; however the default values of the other hyperparameters should\nwork well for a variety of different problems.\n\nFor efficiency, update rules may perform in-place updates, mutating w and\nsetting next_w equal to w.\n\"\"\"\n\n\ndef sgd(w, dw, config=None):\n \"\"\"\n Performs vanilla stochastic gradient descent.\n\n config format:\n - learning_rate: Scalar learning rate.\n \"\"\"\n if config is None:\n config = {}\n config.setdefault(\"learning_rate\", 1e-2)\n\n w -= config[\"learning_rate\"] * dw\n return w, config\n\n\ndef sgd_momentum(w, dw, config=None):\n \"\"\"\n Performs stochastic gradient descent with momentum.\n\n config format:\n - learning_rate: Scalar learning rate.\n - momentum: Scalar between 0 and 1 giving the momentum value.\n Setting momentum = 0 reduces to sgd.\n - velocity: A numpy array of the same shape as w and dw used to store a\n moving average of the gradients.\n \"\"\"\n if config is None:\n config = {}\n config.setdefault(\"learning_rate\", 1e-2)\n config.setdefault(\"momentum\", 0.9)\n v = config.get(\"velocity\", np.zeros_like(w))\n\n next_w = None\n ###########################################################################\n # TODO: Implement the momentum update formula. Store the updated value in #\n # the next_w variable. You should also use and update the velocity v. #\n ###########################################################################\n # *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n \n v = v * config[\"momentum\"] - config[\"learning_rate\"] * dw # for example -> momentum = 0.9 lr = 0.1\n next_w = w + v\n \n\n \"\"\"\n v = config[\"momentum\"] * v + (1 - config[\"momentum\"]) * dw\n next_w = w - config[\"learning_rate\"] * v\n \"\"\"\n # *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n ###########################################################################\n # END OF YOUR CODE #\n ###########################################################################\n config[\"velocity\"] = v\n\n return next_w, config\n\n\ndef rmsprop(w, dw, config=None):\n \"\"\"\n Uses the RMSProp update rule, which uses a moving average of squared\n gradient values to set adaptive per-parameter learning rates.\n\n config format:\n - learning_rate: Scalar learning rate.\n - decay_rate: Scalar between 0 and 1 giving the decay rate for the squared\n gradient cache.\n - epsilon: Small scalar used for smoothing to avoid dividing by zero.\n - cache: Moving average of second moments of gradients.\n \"\"\"\n if config is None:\n config = {}\n config.setdefault(\"learning_rate\", 1e-2)\n config.setdefault(\"decay_rate\", 0.99)\n config.setdefault(\"epsilon\", 1e-8)\n config.setdefault(\"cache\", np.zeros_like(w))\n\n next_w = None\n ###########################################################################\n # TODO: Implement the RMSprop update formula, storing the next value of w #\n # in the next_w variable. Don't forget to update cache value stored in #\n # config['cache']. #\n ###########################################################################\n # *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n\n \"\"\"\n ADAGRAD\n \n config[\"cache\"] += dw * dw\n w = w - config[\"learning_rate\"] * dw / (np.sqrt(config[\"cache\"]) + config[\"epsilon\"])\n \"\"\"\n\n config[\"cache\"] = config[\"decay_rate\"] * config[\"cache\"] + (1 - config[\"decay_rate\"]) * dw * dw\n next_w = w - config[\"learning_rate\"] * dw / (np.sqrt(config[\"cache\"]) + config[\"epsilon\"])\n\n # *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n ###########################################################################\n # END OF YOUR CODE #\n ###########################################################################\n\n return next_w, config\n\n\ndef adam(w, dw, config=None):\n \"\"\"\n Uses the Adam update rule, which incorporates moving averages of both the\n gradient and its square and a bias correction term.\n\n config format:\n - learning_rate: Scalar learning rate.\n - beta1: Decay rate for moving average of first moment of gradient.\n - beta2: Decay rate for moving average of second moment of gradient.\n - epsilon: Small scalar used for smoothing to avoid dividing by zero.\n - m: Moving average of gradient.\n - v: Moving average of squared gradient.\n - t: Iteration number.\n \"\"\"\n if config is None:\n config = {}\n config.setdefault(\"learning_rate\", 1e-3)\n config.setdefault(\"beta1\", 0.9)\n config.setdefault(\"beta2\", 0.999)\n config.setdefault(\"epsilon\", 1e-8)\n config.setdefault(\"m\", np.zeros_like(w))\n config.setdefault(\"v\", np.zeros_like(w))\n config.setdefault(\"t\", 0)\n\n next_w = None\n ###########################################################################\n # TODO: Implement the Adam update formula, storing the next value of w in #\n # the next_w variable. Don't forget to update the m, v, and t variables #\n # stored in config. #\n # #\n # NOTE: In order to match the reference output, please modify t _before_ #\n # using it in any calculations. #\n ###########################################################################\n # *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n config[\"t\"] += 1\n # Momentum\n config[\"m\"] = config[\"beta1\"] * config[\"m\"] + (1 - config[\"beta1\"]) * dw\n m_unbias = config[\"m\"] / (1 - config[\"beta1\"] ** config[\"t\"])\n # RMSProp / ADAGRAD\n config[\"v\"] = config[\"beta2\"] * config[\"v\"] + (1 - config[\"beta2\"]) * dw ** 2\n v_unbias = config[\"v\"] / (1 - config[\"beta2\"] ** config[\"t\"])\n\n next_w = w - config[\"learning_rate\"] * m_unbias / (np.sqrt(v_unbias) + config[\"epsilon\"])\n\n # *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n ###########################################################################\n # END OF YOUR CODE #\n ###########################################################################\n\n return next_w, config\n"
] | [
[
"numpy.sqrt",
"numpy.zeros_like"
]
] |
kaikun213/fonduer-troy200 | [
"d5653df48e3ce3037f4f1b500d454947ad0d010c"
] | [
"src/fonduer_utils.py"
] | [
"import emmental\nimport numpy as np\n\nfrom fonduer import Meta\nfrom emmental.modules.embedding_module import EmbeddingModule\nfrom emmental.data import EmmentalDataLoader\nfrom emmental.model import EmmentalModel\nfrom emmental.learner import EmmentalLearner\nfrom fonduer.learning.utils import collect_word_counter\nfrom fonduer.learning.dataset import FonduerDataset\nfrom fonduer.learning.task import create_task\nfrom troy200_utils import entity_level_f1\n\nABSTAIN = -1\nFALSE = 0\nTRUE = 1\n\ndef get_methods(ATTRIBUTE, gold, gold_file, all_docs):\n train_docs = all_docs[0]\n dev_docs = all_docs[1]\n test_docs = all_docs[2]\n\n def train_model(cands, F, align_type, model_type=\"LogisticRegression\"):\n # Extract candidates and features based on the align type (row/column)\n align_val = 0 if align_type == \"row\" else 1\n train_cands = cands[align_val][0]\n F_train = F[align_val][0]\n train_marginals = np.array([[0,1] if gold[align_val](x) else [1,0] for x in train_cands[0]])\n \n # 1.) Setup training config\n config = {\n \"meta_config\": {\"verbose\": True},\n \"model_config\": {\"model_path\": None, \"device\": 0, \"dataparallel\": False},\n \"learner_config\": {\n \"n_epochs\": 50,\n \"optimizer_config\": {\"lr\": 0.001, \"l2\": 0.0},\n \"task_scheduler\": \"round_robin\",\n },\n \"logging_config\": {\n \"evaluation_freq\": 1,\n \"counter_unit\": \"epoch\",\n \"checkpointing\": False,\n \"checkpointer_config\": {\n \"checkpoint_metric\": {f\"{ATTRIBUTE}/{ATTRIBUTE}/train/loss\": \"min\"},\n \"checkpoint_freq\": 1,\n \"checkpoint_runway\": 2,\n \"clear_intermediate_checkpoints\": True,\n \"clear_all_checkpoints\": True,\n },\n },\n }\n\n emmental.init(Meta.log_path)\n emmental.Meta.update_config(config=config)\n \n # 2.) Collect word counter from training data\n word_counter = collect_word_counter(train_cands)\n \n # 3.) Generate word embedding module for LSTM model\n # (in Logistic Regression, we generate it since Fonduer dataset requires word2id dict)\n # Geneate special tokens\n arity = 2\n specials = []\n for i in range(arity):\n specials += [f\"~~[[{i}\", f\"{i}]]~~\"]\n\n emb_layer = EmbeddingModule(\n word_counter=word_counter, word_dim=300, specials=specials\n )\n \n # 4.) Generate dataloader for training set\n # No noise in Gold labels\n train_dataloader = EmmentalDataLoader(\n task_to_label_dict={ATTRIBUTE: \"labels\"},\n dataset=FonduerDataset(\n ATTRIBUTE,\n train_cands[0],\n F_train[0],\n emb_layer.word2id,\n train_marginals,\n ),\n split=\"train\",\n batch_size=100,\n shuffle=True,\n )\n \n # 5.) Training \n tasks = create_task(\n ATTRIBUTE, 2, F_train[0].shape[1], 2, emb_layer, model=model_type # \"LSTM\" \n )\n\n model = EmmentalModel(name=f\"{ATTRIBUTE}_task\")\n\n for task in tasks:\n model.add_task(task)\n\n emmental_learner = EmmentalLearner()\n emmental_learner.learn(model, [train_dataloader])\n \n return (model, emb_layer)\n\n\n def eval_model(model, emb_layer, cands, F, align_type = \"row\"):\n # Extract candidates and features based on the align type (row/column)\n align_val = 0 if align_type == \"row\" else 1\n train_cands = cands[align_val][0]\n dev_cands = cands[align_val][1]\n test_cands = cands[align_val][2] \n F_train = F[align_val][0]\n F_dev = F[align_val][1]\n F_test = F[align_val][2]\n row_on = True if align_type == \"row\" else False\n col_on = True if align_type == \"col\" else False\n \n # Generate dataloader for test data\n test_dataloader = EmmentalDataLoader(\n task_to_label_dict={ATTRIBUTE: \"labels\"},\n dataset=FonduerDataset(\n ATTRIBUTE, test_cands[0], F_test[0], emb_layer.word2id, 2\n ),\n split=\"test\",\n batch_size=100,\n shuffle=False,\n )\n\n test_preds = model.predict(test_dataloader, return_preds=True)\n positive = np.where(np.array(test_preds[\"probs\"][ATTRIBUTE])[:, TRUE] > 0.6)\n true_pred = [test_cands[0][_] for _ in positive[0]]\n test_results = entity_level_f1(true_pred, gold_file, ATTRIBUTE, test_docs, row_on=row_on, col_on=col_on)\n \n # Run on dev and train set for validation\n # We run the predictions also on our training and dev set, to validate that everything seems to work smoothly\n \n # Generate dataloader for dev data\n dev_dataloader = EmmentalDataLoader(\n task_to_label_dict={ATTRIBUTE: \"labels\"},\n dataset=FonduerDataset(\n ATTRIBUTE, dev_cands[0], F_dev[0], emb_layer.word2id, 2\n ),\n split=\"test\",\n batch_size=100,\n shuffle=False,\n )\n\n\n dev_preds = model.predict(dev_dataloader, return_preds=True)\n positive_dev = np.where(np.array(dev_preds[\"probs\"][ATTRIBUTE])[:, TRUE] > 0.6)\n true_dev_pred = [dev_cands[0][_] for _ in positive_dev[0]]\n dev_results = entity_level_f1(true_dev_pred, gold_file, ATTRIBUTE, dev_docs, row_on=row_on, col_on=col_on)\n \n # Generate dataloader for train data\n train_dataloader = EmmentalDataLoader(\n task_to_label_dict={ATTRIBUTE: \"labels\"},\n dataset=FonduerDataset(\n ATTRIBUTE, train_cands[0], F_train[0], emb_layer.word2id, 2\n ),\n split=\"test\",\n batch_size=100,\n shuffle=False,\n )\n\n\n train_preds = model.predict(train_dataloader, return_preds=True)\n positive_train = np.where(np.array(train_preds[\"probs\"][ATTRIBUTE])[:, TRUE] > 0.6)\n true_train_pred = [train_cands[0][_] for _ in positive_train[0]]\n train_results = entity_level_f1(true_train_pred, gold_file, ATTRIBUTE, train_docs, row_on=row_on, col_on=col_on)\n \n return [train_results, dev_results, test_results]\n\n return (train_model, eval_model)"
] | [
[
"numpy.array"
]
] |
neptune-ai/examples | [
"e64cfaadb028e2187063fc43768dfee44074729b"
] | [
"integrations-and-supported-tools/optuna/scripts/Neptune_Optuna_integration_customize_callback.py"
] | [
"import lightgbm as lgb\nimport neptune.new as neptune\nimport neptune.new.integrations.optuna as optuna_utils\nimport optuna\nfrom sklearn.datasets import load_breast_cancer\nfrom sklearn.metrics import roc_auc_score\nfrom sklearn.model_selection import train_test_split\n\n\ndef objective(trial):\n data, target = load_breast_cancer(return_X_y=True)\n train_x, test_x, train_y, test_y = train_test_split(data, target, test_size=0.25)\n dtrain = lgb.Dataset(train_x, label=train_y)\n\n param = {\n \"verbose\": -1,\n \"objective\": \"binary\",\n \"metric\": \"binary_logloss\",\n \"num_leaves\": trial.suggest_int(\"num_leaves\", 2, 256),\n \"feature_fraction\": trial.suggest_uniform(\"feature_fraction\", 0.2, 1.0),\n \"bagging_fraction\": trial.suggest_uniform(\"bagging_fraction\", 0.2, 1.0),\n \"min_child_samples\": trial.suggest_int(\"min_child_samples\", 3, 100),\n }\n\n gbm = lgb.train(param, dtrain)\n preds = gbm.predict(test_x)\n accuracy = roc_auc_score(test_y, preds)\n\n return accuracy\n\n\n# Create a Neptune Run\nrun = neptune.init(\n api_token=\"ANONYMOUS\", project=\"common/optuna-integration\"\n) # you can pass your credentials here\n\n# Create a NeptuneCallback for Optuna\nneptune_callback = optuna_utils.NeptuneCallback(\n run,\n plots_update_freq=10,\n log_plot_slice=False,\n log_plot_contour=False,\n)\n\n# Pass NeptuneCallback to Optuna Study .optimize()\nstudy = optuna.create_study(direction=\"maximize\")\nstudy.optimize(objective, n_trials=50, callbacks=[neptune_callback])\n\n# Stop logging to a Neptune Run\nrun.stop()\n"
] | [
[
"sklearn.metrics.roc_auc_score",
"sklearn.datasets.load_breast_cancer",
"sklearn.model_selection.train_test_split"
]
] |
mttgdd/liegroups | [
"ca637bd461300d70d70f90bff7a18462d06f5f82"
] | [
"liegroups/torch/se2.py"
] | [
"import torch\n\nfrom . import _base\nfrom . import utils\nfrom .so2 import SO2Matrix\n\n\nclass SE2Matrix(_base.SEMatrixBase):\n \"\"\"See :mod:`liegroups.SE2`\"\"\"\n dim = 3\n dof = 3\n RotationType = SO2Matrix\n\n def adjoint(self):\n rot_part = self.rot.as_matrix()\n if rot_part.dim() < 3:\n rot_part = rot_part.unsqueeze(dim=0) # matrix --> batch\n\n trans = self.trans\n if trans.dim() < 2:\n # vector --> vectorbatch\n trans = trans.unsqueeze(dim=0)\n\n trans_part = trans.new_empty(\n trans.shape[0], trans.shape[1], 1)\n trans_part[:, 0, 0] = trans[:, 1]\n trans_part[:, 1, 0] = -trans[:, 0]\n\n bottom_row = trans.new_zeros(self.dof)\n bottom_row[-1] = 1.\n bottom_row = bottom_row.unsqueeze_(dim=0).unsqueeze_(\n dim=0).expand(trans.shape[0], 1, self.dof)\n\n return torch.cat([torch.cat([rot_part, trans_part], dim=2),\n bottom_row], dim=1).squeeze_()\n\n @classmethod\n def exp(cls, xi):\n if xi.dim() < 2:\n xi = xi.unsqueeze(dim=0)\n\n if xi.shape[1] != cls.dof:\n raise ValueError(\n \"xi must have shape ({},) or (N,{})\".format(cls.dof, cls.dof))\n\n rho = xi[:, 0:2]\n phi = xi[:, 2]\n\n rot = cls.RotationType.exp(phi)\n rot_jac = cls.RotationType.left_jacobian(phi)\n\n if rot_jac.dim() < 3:\n rot_jac.unsqueeze_(dim=0)\n if rho.dim() < 3:\n rho.unsqueeze_(dim=2)\n\n trans = torch.bmm(rot_jac, rho).squeeze_()\n\n return cls(rot, trans)\n\n @classmethod\n def inv_left_jacobian(cls, xi):\n\n if xi.dim() < 2:\n xi = xi.unsqueeze(dim=0)\n\n if xi.shape[1] != cls.dof:\n raise ValueError(\n \"xi must have shape ({},) or (N,{})\".format(cls.dof, cls.dof))\n\n rho = xi[:, 0:2] # translation part\n phi = xi[:, 2] # rotation part\n\n cos_phi = torch.cos(phi)\n sin_phi = torch.sin(phi)\n phi_sq = phi * phi\n\n small_angle_mask = utils.isclose(phi_sq, 0.)\n small_angle_inds = small_angle_mask.nonzero().squeeze_(dim=1)\n\n large_angle_mask = small_angle_mask.logical_not()\n large_angle_inds = large_angle_mask.nonzero().squeeze_(dim=1)\n\n jac = torch.zeros((xi.shape[0], cls.dof, cls.dof)).to(xi.device)\n\n jac[small_angle_inds, 0, 0] = -(96*(phi_sq[small_angle_inds] - 6))/(phi_sq[small_angle_inds]**2*phi_sq[small_angle_inds] + 16*phi_sq[small_angle_inds]**2 - 24*phi_sq[small_angle_inds]*phi_sq[small_angle_inds] - 192*phi_sq[small_angle_inds] + 144*phi_sq[small_angle_inds] + 576)\n jac[small_angle_inds, 0, 1] = -(24*phi[small_angle_inds]*(phi_sq[small_angle_inds] - 12))/(phi_sq[small_angle_inds]**2*phi_sq[small_angle_inds] + 16*phi_sq[small_angle_inds]**2 - 24*phi_sq[small_angle_inds]*phi_sq[small_angle_inds] - 192*phi_sq[small_angle_inds] + 144*phi_sq[small_angle_inds] + 576)\n jac[small_angle_inds, 0, 2] = (4*(12*phi[small_angle_inds]*rho[small_angle_inds,0] - 72*rho[small_angle_inds,1] + 12*phi_sq[small_angle_inds]*rho[small_angle_inds,1] - 12*phi_sq[small_angle_inds]*rho[small_angle_inds,1] + phi_sq[small_angle_inds]*phi_sq[small_angle_inds]*rho[small_angle_inds,1] + phi_sq[small_angle_inds]*phi[small_angle_inds]*rho[small_angle_inds,0]))/(phi_sq[small_angle_inds]**2*phi_sq[small_angle_inds] + 16*phi_sq[small_angle_inds]**2 - 24*phi_sq[small_angle_inds]*phi_sq[small_angle_inds] - 192*phi_sq[small_angle_inds] + 144*phi_sq[small_angle_inds] + 576)\n jac[small_angle_inds, 1, 0] = (24*phi[small_angle_inds]*(phi_sq[small_angle_inds] - 12))/(phi_sq[small_angle_inds]**2*phi_sq[small_angle_inds] + 16*phi_sq[small_angle_inds]**2 - 24*phi_sq[small_angle_inds]*phi_sq[small_angle_inds] - 192*phi_sq[small_angle_inds] + 144*phi_sq[small_angle_inds] + 576)\n jac[small_angle_inds, 1, 1] = -(96*(phi_sq[small_angle_inds] - 6))/(phi_sq[small_angle_inds]**2*phi_sq[small_angle_inds] + 16*phi_sq[small_angle_inds]**2 - 24*phi_sq[small_angle_inds]*phi_sq[small_angle_inds] - 192*phi_sq[small_angle_inds] + 144*phi_sq[small_angle_inds] + 576)\n jac[small_angle_inds, 1, 2] = (4*(72*rho[small_angle_inds,0] - 12*phi_sq[small_angle_inds]*rho[small_angle_inds,0] + 12*phi[small_angle_inds]*rho[small_angle_inds,1] + 12*phi_sq[small_angle_inds]*rho[small_angle_inds,0] - phi_sq[small_angle_inds]*phi_sq[small_angle_inds]*rho[small_angle_inds,0] + phi_sq[small_angle_inds]*phi[small_angle_inds]*rho[small_angle_inds,1]))/(phi_sq[small_angle_inds]**2*phi_sq[small_angle_inds] + 16*phi_sq[small_angle_inds]**2 - 24*phi_sq[small_angle_inds]*phi_sq[small_angle_inds] - 192*phi_sq[small_angle_inds] + 144*phi_sq[small_angle_inds] + 576)\n\n jac[large_angle_inds, 0, 0] = (sin_phi[large_angle_inds]*phi[large_angle_inds])/(cos_phi[large_angle_inds]**2 - 2*cos_phi[large_angle_inds] + sin_phi[large_angle_inds]**2 + 1)\n jac[large_angle_inds, 0, 1] = -(phi[large_angle_inds]*(cos_phi[large_angle_inds] - 1))/(cos_phi[large_angle_inds]**2 - 2*cos_phi[large_angle_inds] + sin_phi[large_angle_inds]**2 + 1)\n jac[large_angle_inds, 0, 2] = (phi[large_angle_inds]*(rho[large_angle_inds,0] - 2*cos_phi[large_angle_inds]*rho[large_angle_inds,0] - phi[large_angle_inds]*rho[large_angle_inds,1] + cos_phi[large_angle_inds]**2*rho[large_angle_inds,0] + sin_phi[large_angle_inds]**2*rho[large_angle_inds,0] + cos_phi[large_angle_inds]*phi[large_angle_inds]*rho[large_angle_inds,1] - sin_phi[large_angle_inds]*phi[large_angle_inds]*rho[large_angle_inds,0]))/(phi_sq[large_angle_inds]*(cos_phi[large_angle_inds]**2 - 2*cos_phi[large_angle_inds] + sin_phi[large_angle_inds]**2 + 1))\n jac[large_angle_inds, 1, 0] = (phi[large_angle_inds]*(cos_phi[large_angle_inds] - 1))/(cos_phi[large_angle_inds]**2 - 2*cos_phi[large_angle_inds] + sin_phi[large_angle_inds]**2 + 1)\n jac[large_angle_inds, 1, 1] = (sin_phi[large_angle_inds]*phi[large_angle_inds])/(cos_phi[large_angle_inds]**2 - 2*cos_phi[large_angle_inds] + sin_phi[large_angle_inds]**2 + 1)\n jac[large_angle_inds, 1, 2] = (phi[large_angle_inds]*(rho[large_angle_inds,1] - 2*cos_phi[large_angle_inds]*rho[large_angle_inds,1] + phi[large_angle_inds]*rho[large_angle_inds,0] + cos_phi[large_angle_inds]**2*rho[large_angle_inds,1] + sin_phi[large_angle_inds]**2*rho[large_angle_inds,1] - cos_phi[large_angle_inds]*phi[large_angle_inds]*rho[large_angle_inds,0] - sin_phi[large_angle_inds]*phi[large_angle_inds]*rho[large_angle_inds,1]))/(phi_sq[large_angle_inds]*(cos_phi[large_angle_inds]**2 - 2*cos_phi[large_angle_inds] + sin_phi[large_angle_inds]**2 + 1))\n \n jac[:, 2, 0] = 0\n jac[:, 2, 1] = 0\n jac[:, 2, 2] = 1\n\n return jac.squeeze_()\n\n @classmethod\n def left_jacobian(cls, xi):\n\n if xi.dim() < 2:\n xi = xi.unsqueeze(dim=0)\n\n if xi.shape[1] != cls.dof:\n raise ValueError(\n \"xi must have shape ({},) or (N,{})\".format(cls.dof, cls.dof))\n\n rho = xi[:, 0:2] # translation part\n phi = xi[:, 2] # rotation part\n\n cos_phi = torch.cos(phi)\n sin_phi = torch.sin(phi)\n phi_sq = phi * phi\n\n small_angle_mask = utils.isclose(phi_sq, 0.)\n small_angle_inds = small_angle_mask.nonzero().squeeze_(dim=1)\n\n large_angle_mask = small_angle_mask.logical_not()\n large_angle_inds = large_angle_mask.nonzero().squeeze_(dim=1)\n\n jac = torch.zeros((xi.shape[0], cls.dof, cls.dof)).to(xi.device)\n\n jac[small_angle_inds, 0, 0] = 1 - 1./6. * phi_sq[small_angle_inds]\n jac[small_angle_inds, 0, 1] = -(0.5 * phi[small_angle_inds] - 1./24. * phi[small_angle_inds] * phi_sq[small_angle_inds])\n jac[small_angle_inds, 0, 2] = rho[small_angle_inds,1] / 2. + phi[small_angle_inds] * rho[small_angle_inds,0] / 6.\n jac[small_angle_inds, 1, 0] = 0.5 * phi[small_angle_inds] - 1./24. * phi[small_angle_inds] * phi_sq[small_angle_inds]\n jac[small_angle_inds, 1, 1] = 1 - 1./6. * phi_sq[small_angle_inds]\n jac[small_angle_inds, 1, 2] = -rho[small_angle_inds,0] / 2. + phi[small_angle_inds] * rho[small_angle_inds,1] / 6.\n\n jac[large_angle_inds, 0, 0] = sin_phi[large_angle_inds] / phi[large_angle_inds]\n jac[large_angle_inds, 0, 1] = -(1 - cos_phi[large_angle_inds]) / phi[large_angle_inds]\n jac[large_angle_inds, 0, 2] = ( rho[large_angle_inds,1] + phi[large_angle_inds]*rho[large_angle_inds,0] - rho[large_angle_inds,1]*cos_phi[large_angle_inds] - rho[large_angle_inds,0]*sin_phi[large_angle_inds])/phi_sq[large_angle_inds]\n jac[large_angle_inds, 1, 0] = (1 - cos_phi[large_angle_inds]) / phi[large_angle_inds]\n jac[large_angle_inds, 1, 1] = sin_phi[large_angle_inds] / phi[large_angle_inds]\n jac[large_angle_inds, 1, 2] = (-rho[large_angle_inds,0] + phi[large_angle_inds]*rho[large_angle_inds,1] + rho[large_angle_inds,0]*cos_phi[large_angle_inds] - rho[large_angle_inds,1]*sin_phi[large_angle_inds])/phi_sq[large_angle_inds]\n\n jac[:, 2, 0] = 0\n jac[:, 2, 1] = 0\n jac[:, 2, 2] = 1\n\n return jac.squeeze_()\n\n def log(self):\n phi = self.rot.log()\n inv_rot_jac = self.RotationType.inv_left_jacobian(phi)\n\n if self.trans.dim() < 2:\n trans = self.trans.unsqueeze(dim=0)\n else:\n trans = self.trans\n\n if phi.dim() < 1:\n phi.unsqueeze_(dim=0)\n phi.unsqueeze_(dim=1) # because phi is 1-dimensional for SE2\n\n if inv_rot_jac.dim() < 3:\n inv_rot_jac.unsqueeze_(dim=0)\n if trans.dim() < 3:\n trans = trans.unsqueeze(dim=2)\n\n rho = torch.bmm(inv_rot_jac, trans).squeeze_()\n if rho.dim() < 2:\n rho.unsqueeze_(dim=0)\n\n return torch.cat([rho, phi], dim=1).squeeze_()\n\n @classmethod\n def odot(cls, p, directional=False):\n if p.dim() < 2:\n p = p.unsqueeze(dim=0) # vector --> vectorbatch\n\n result = p.__class__(p.shape[0], p.shape[1], cls.dof).zero_()\n\n # Got euclidean coordinates\n if p.shape[1] == cls.dim - 1:\n # Assume scale parameter is 1 unless p is a direction\n # vector, in which case the scale is 0\n if not directional:\n result[:, 0:2, 0:2] = torch.eye(\n cls.RotationType.dim).unsqueeze_(dim=0).expand(\n p.shape[0], cls.RotationType.dim, cls.RotationType.dim)\n\n result[:, 0:2, 2] = torch.mm(\n cls.RotationType.wedge(p.__class__([1.])),\n p.transpose(1, 0)).transpose_(1, 0)\n\n # Got homogeneous coordinates\n elif p.shape[1] == cls.dim:\n result[:, 0:2, 0:2] = \\\n p[:, 2].unsqueeze_(dim=1).unsqueeze_(dim=2) * \\\n torch.eye(\n cls.RotationType.dim).unsqueeze_(dim=0).repeat(\n p.shape[0], 1, 1)\n\n result[:, 0:2, 2] = torch.mm(\n cls.RotationType.wedge(p.__class__([1.])),\n p[:, 0:2].transpose_(1, 0)).transpose_(1, 0)\n\n # Got wrong dimension\n else:\n raise ValueError(\"p must have shape ({},), ({},), (N,{}) or (N,{})\".format(\n cls.dim - 1, cls.dim, cls.dim - 1, cls.dim))\n\n return result.squeeze_()\n\n @classmethod\n def vee(cls, Xi):\n if Xi.dim() < 3:\n Xi = Xi.unsqueeze(dim=0)\n\n if Xi.shape[1:3] != (cls.dim, cls.dim):\n raise ValueError(\"Xi must have shape ({},{}) or (N,{},{})\".format(\n cls.dim, cls.dim, cls.dim, cls.dim))\n\n xi = Xi.new_empty(Xi.shape[0], cls.dof)\n xi[:, 0:2] = Xi[:, 0:2, 2]\n xi[:, 2] = cls.RotationType.vee(Xi[:, 0:2, 0:2])\n\n return xi.squeeze_()\n\n @classmethod\n def wedge(cls, xi):\n if xi.dim() < 2:\n xi = xi.unsqueeze(dim=0)\n\n if xi.shape[1] != cls.dof:\n raise ValueError(\n \"phi must have shape ({},) or (N,{})\".format(cls.dof, cls.dof))\n\n Xi = xi.new_zeros(xi.shape[0], cls.dim, cls.dim)\n Xi[:, 0:2, 0:2] = cls.RotationType.wedge(xi[:, 2])\n Xi[:, 0:2, 2] = xi[:, 0:2]\n\n return Xi.squeeze_()\n"
] | [
[
"torch.cos",
"torch.sin",
"torch.zeros",
"torch.eye",
"torch.bmm",
"torch.cat"
]
] |
972d5defe3218bd62b741e6a2f11f5b3/riptable | [
"bb928c11752e831ec701f91964979b31db53826a",
"bb928c11752e831ec701f91964979b31db53826a"
] | [
"riptable/tests/test_scalar.py",
"riptable/tests/test_accum2.py"
] | [
"\"\"\"Test around scalar constructors and scalar methods.\"\"\"\r\nimport riptable as rt\r\nimport numpy as np\r\nimport pytest\r\n\r\nfrom numpy.testing import assert_almost_equal, assert_warns\r\n\r\n\r\nclass TestScalarConstructor(object):\r\n # Type-coercion from strings test cases adapted from numpy/core/tests/test_scalar_ctors.py.\r\n # https://github.com/numpy/numpy/blob/c31cc36a8a814ed4844a2a553454185601914a5a/numpy/core/tests/test_scalar_ctors.py\r\n @pytest.mark.parametrize(\r\n \"scalar_ctor, numeric_string\",\r\n [\r\n # simple numeric string\r\n (\"single\", \"1.234\"),\r\n (\"double\", \"1.234\"),\r\n (\"longdouble\", \"1.234\"),\r\n # numeric string with overflow overflow; expect inf value\r\n (\"half\", \"1e10000\"),\r\n (\"single\", \"1e10000\"),\r\n (\"double\", \"1e10000\"),\r\n (\"longdouble\", \"1e10000\"),\r\n (\"longdouble\", \"-1e10000\"),\r\n ],\r\n )\r\n def test_floating(self, scalar_ctor, numeric_string):\r\n rt_value = getattr(rt, scalar_ctor)(numeric_string)\r\n np_value = getattr(np, scalar_ctor)(numeric_string)\r\n assert_almost_equal(rt_value, np_value)\r\n\r\n @pytest.mark.parametrize(\r\n \"scalar_ctor, numeric_string\",\r\n [(\"longdouble\", \"1e10000\"), (\"longdouble\", \"-1e10000\"),],\r\n )\r\n def test_overflow_warning(self, scalar_ctor, numeric_string):\r\n assert_warns(RuntimeWarning, getattr(np, scalar_ctor), numeric_string)\r\n",
"import unittest\r\nimport pandas as pd\r\nimport pytest\r\n\r\nimport riptable as rt\r\n# N.B. TL;DR We have to import the actual implementation module to override the module global\r\n# variable \"tm.N\" and \"tm.K\".\r\n# In pandas 1.0 they move the code from pandas/util/testing.py to pandas/_testing.py.\r\n# The \"import pandas.util.testing\" still works but because it doesn't contain the actual code\r\n# our attempt to override the \"tm.N\" and \"tm.K\" will not change the actual value for\r\n# makeTimeDataFrame, which will produce data with different shape and make the test\r\n# \"test_accum_table\" fail. Maybe we want to reconsider using the pandas internal testing utils.\r\ntry:\r\n import pandas._testing as tm\r\nexcept ImportError:\r\n import pandas.util.testing as tm\r\n\r\nfrom riptable import *\r\nfrom numpy.testing import (\r\n assert_array_equal,\r\n assert_almost_equal,\r\n assert_array_almost_equal,\r\n)\r\nfrom riptable.rt_numpy import arange\r\n# To create AccumTable test data\r\nfrom riptable.Utils.pandas_utils import dataset_from_pandas_df\r\nfrom riptable.rt_datetime import DateTimeNano\r\n\r\n\r\ntm.N = 3\r\ntm.K = 5\r\n\r\n\r\nclass Accum2_Test(unittest.TestCase):\r\n '''\r\n TODO: add more tests for different types\r\n '''\r\n\r\n def test_accum2(self):\r\n c = cut(arange(10), 3)\r\n self.assertTrue(sum(c._np - FA([1, 1, 1, 1, 2, 2, 2, 3, 3, 3])) == 0)\r\n\r\n c = cut(arange(10.0), 3)\r\n self.assertTrue(sum(c._np - FA([1, 1, 1, 1, 2, 2, 2, 3, 3, 3])) == 0)\r\n\r\n c = cut(arange(11), 3)\r\n self.assertTrue(sum(c._np - FA([1, 1, 1, 1, 2, 2, 2, 3, 3, 3, 3])) == 0)\r\n\r\n c = cut(FA([2, 4, 6, 8, 10]), FA([0, 2, 4, 6, 8, 10]))\r\n self.assertTrue(sum(c._np - FA([1, 2, 3, 4, 5])) == 0)\r\n\r\n c = cut(\r\n FA([2, 4, 6, 8, 10]),\r\n FA([0, 2, 4, 6, 8, 10]),\r\n labels=['a', 'b', 'c', 'd', 'e'],\r\n )\r\n self.assertTrue(sum(c._np - FA([1, 2, 3, 4, 5])) == 0)\r\n\r\n def test_qcut(self):\r\n c = qcut(arange(10), 3)\r\n self.assertTrue(sum(c._np - FA([2, 2, 2, 2, 3, 3, 3, 4, 4, 4])) == 0)\r\n\r\n c = qcut(arange(11), 3)\r\n self.assertTrue(sum(c._np - FA([2, 2, 2, 2, 3, 3, 3, 4, 4, 4, 4])) == 0)\r\n\r\n c = qcut(range(5), 3, labels=[\"good\", \"medium\", \"bad\"])\r\n self.assertTrue(sum(c._np - FA([2, 2, 3, 4, 4])) == 0)\r\n\r\n c = cut(\r\n FA([2, 4, 6, 8, 10]),\r\n FA([0, 2, 4, 6, 8, 10]),\r\n labels=['a', 'b', 'c', 'd', 'e'],\r\n )\r\n\r\n def test_cut_errors(self):\r\n with self.assertRaises(ValueError):\r\n c = cut(\r\n FA([2, 4, 6, 8, 10]),\r\n FA([0, 2, 4, 6, 8, 10]),\r\n labels=['a', 'b', 'c', 'd', 'e', 'f'],\r\n )\r\n\r\n def test_simple_cats(self):\r\n data = arange(1, 6) * 10\r\n colnames = FastArray(['a', 'b', 'c', 'd', 'e'])\r\n c1 = Categorical(colnames)\r\n c2 = Categorical(arange(5))\r\n\r\n # no filter\r\n ac = Accum2(c2, c1)\r\n result = ac.sum(data)\r\n self.assertEqual(result._ncols, 7)\r\n for i, colname in enumerate(colnames):\r\n arr = result[colname]\r\n self.assertEqual(arr[i], data[i])\r\n\r\n def test_simple_cats_filter_accum(self):\r\n data = arange(1, 6) * 10\r\n colnames = FastArray(['a', 'b', 'c', 'd', 'e'])\r\n c1 = Categorical(colnames)\r\n c2 = Categorical(arange(5))\r\n\r\n # filtered accum object\r\n ac = Accum2(c2, c1, showfilter=True)\r\n result = ac.sum(data)\r\n self.assertEqual(result._ncols, 8)\r\n for i, colname in enumerate(colnames):\r\n arr = result[colname]\r\n self.assertEqual(arr[i + 1], data[i])\r\n\r\n def test_simple_cats_filter_operation(self):\r\n data = arange(1, 6) * 10\r\n colnames = FastArray(['a', 'b', 'c', 'd', 'e'])\r\n c1 = Categorical(colnames)\r\n c2 = Categorical(arange(5))\r\n\r\n # filtered operation\r\n ac = Accum2(c2, c1)\r\n result = ac.sum(data, showfilter=True)\r\n self.assertEqual(result._ncols, 8)\r\n for i, colname in enumerate(colnames):\r\n arr = result[colname]\r\n self.assertEqual(arr[i + 1], data[i])\r\n\r\n def test_multikey_cats(self):\r\n unsorted_str = FastArray(['c', 'e', 'b', 'd', 'a'])\r\n ints = arange(1, 6) * 10\r\n data = np.random.rand(5) * 10\r\n\r\n # unsorted no filter\r\n c1 = Categorical([unsorted_str, ints])\r\n c2 = Categorical([unsorted_str, ints])\r\n ac = Accum2(c2, c1)\r\n result = ac.sum(data)\r\n self.assertEqual(result._ncols, 8)\r\n for i, key1 in enumerate(unsorted_str):\r\n k1 = bytes.decode(key1)\r\n k2 = ints[i]\r\n full_colname = \"('\" + k1 + \"', \" + str(k2) + \")\"\r\n arr = result[full_colname]\r\n self.assertEqual(arr[i], data[i])\r\n\r\n # sorted no filter\r\n sortidx = np.argsort(unsorted_str)\r\n sorted_str = unsorted_str[sortidx]\r\n sorted_ints = ints[sortidx]\r\n sorted_data = data[sortidx]\r\n c1 = Categorical([unsorted_str, ints], ordered=True)\r\n c2 = Categorical([unsorted_str, ints], ordered=True)\r\n ac = Accum2(c2, c1)\r\n result = ac.sum(data)\r\n self.assertEqual(result._ncols, 8)\r\n for i, key1 in enumerate(sorted_str):\r\n k1 = bytes.decode(key1)\r\n k2 = sorted_ints[i]\r\n full_colname = \"('\" + k1 + \"', \" + str(k2) + \")\"\r\n arr = result[full_colname]\r\n self.assertEqual(arr[i], sorted_data[i])\r\n\r\n @pytest.mark.xfail(reason='20200416 This test was previously overridden by a later test in the file with the same name. Need to revisit and get back in a working state.')\r\n def test_multikey_cats_filter_accum_sorted(self):\r\n unsorted_str = FastArray(['c', 'e', 'b', 'd', 'a'])\r\n ints = arange(1, 6) * 10\r\n data = np.random.rand(5) * 10\r\n\r\n # unsorted filter accum object\r\n c1 = Categorical([unsorted_str, ints])\r\n c2 = Categorical([unsorted_str, ints])\r\n ac = Accum2(c2, c1, showfilter=True)\r\n result = ac.sum(data)\r\n self.assertEqual(result._ncols, 9)\r\n for i, key1 in enumerate(unsorted_str):\r\n k1 = bytes.decode(key1)\r\n k2 = ints[i]\r\n full_colname = \"('\" + k1 + \"', \" + str(k2) + \")\"\r\n arr = result[full_colname]\r\n self.assertEqual(arr[i + 1], data[i])\r\n\r\n # sorted filter accum object\r\n sortidx = np.argsort(unsorted_str)\r\n sorted_str = unsorted_str[sortidx]\r\n sorted_ints = ints[sortidx]\r\n sorted_data = data[sortidx]\r\n c1 = Categorical([unsorted_str, ints], sort_gb=True)\r\n c2 = Categorical([unsorted_str, ints], sort_gb=True)\r\n ac = Accum2(c2, c1, showfilter=True)\r\n result = ac.sum(data)\r\n self.assertEqual(result._ncols, 9)\r\n for i, key1 in enumerate(sorted_str):\r\n k1 = bytes.decode(key1)\r\n k2 = sorted_ints[i]\r\n full_colname = \"('\" + k1 + \"', \" + str(k2) + \")\"\r\n arr = result[full_colname]\r\n # TODO fix this regression that was masked due to duplicate test names\r\n # self.assertAlmostEqual(arr[i + 1], sorted_data[i])\r\n\r\n def test_multikey_cats_filter_accum_ordered(self):\r\n unsorted_str = FastArray(['c', 'e', 'b', 'd', 'a'])\r\n ints = arange(1, 6) * 10\r\n data = np.random.rand(5) * 10\r\n\r\n # unsorted filter accum object\r\n c1 = Categorical([unsorted_str, ints])\r\n c2 = Categorical([unsorted_str, ints])\r\n ac = Accum2(c2, c1)\r\n result = ac.sum(data, showfilter=True)\r\n self.assertEqual(result._ncols, 9)\r\n for i, key1 in enumerate(unsorted_str):\r\n k1 = bytes.decode(key1)\r\n k2 = ints[i]\r\n full_colname = \"('\" + k1 + \"', \" + str(k2) + \")\"\r\n arr = result[full_colname]\r\n self.assertEqual(arr[i + 1], data[i])\r\n\r\n # sorted filter accum object\r\n sortidx = np.argsort(unsorted_str)\r\n sorted_str = unsorted_str[sortidx]\r\n sorted_ints = ints[sortidx]\r\n sorted_data = data[sortidx]\r\n c1 = Categorical([unsorted_str, ints], ordered=True)\r\n c2 = Categorical([unsorted_str, ints], ordered=True)\r\n ac = Accum2(c2, c1)\r\n result = ac.sum(data, showfilter=True)\r\n self.assertEqual(result._ncols, 9)\r\n for i, key1 in enumerate(sorted_str):\r\n k1 = bytes.decode(key1)\r\n k2 = sorted_ints[i]\r\n full_colname = \"('\" + k1 + \"', \" + str(k2) + \")\"\r\n arr = result[full_colname]\r\n self.assertEqual(arr[i + 1], sorted_data[i])\r\n\r\n def test_dataset_accum2(self):\r\n # test from accum2 off dataset and with a filter\r\n ds = Dataset({'test': arange(10), 'data': arange(10) // 2})\r\n x = ds.accum2('data', 'test').sum(ds.test, filter=ds.data == 3)\r\n totalcol = x.summary_get_names()[0]\r\n self.assertEqual(x[totalcol][3], 13)\r\n\r\n def test_accum2_mean(self):\r\n ds = Dataset({'time': arange(200.0)})\r\n ds.data = np.random.randint(7, size=200)\r\n ds.data2 = np.random.randint(7, size=200)\r\n symbols = ['AAPL', 'AMZN', 'FB', 'GOOG', 'IBM']\r\n ds.symbol = Cat(1 + arange(200) % 5, symbols)\r\n ac = Accum2(ds.data, ds.symbol).mean(ds.time)\r\n totalcol = ac[ac.summary_get_names()[0]]\r\n footer = ac.footer_get_values()['Mean']\r\n for i in range(len(symbols)):\r\n s_mean = ds[ds.symbol == symbols[i], :].time.mean()\r\n self.assertEqual(footer[i + 1], s_mean)\r\n for i in range(7):\r\n s_mean = ds[ds.data == i, :].time.mean()\r\n self.assertEqual(totalcol[i], s_mean)\r\n\r\n def test_accum2_median(self):\r\n ds = Dataset({'time': arange(200.0)})\r\n ds.data = np.random.randint(7, size=200)\r\n ds.data2 = np.random.randint(7, size=200)\r\n symbols = ['AAPL', 'AMZN', 'FB', 'GOOG', 'IBM']\r\n ds.symbol = Cat(1 + arange(200) % 5, symbols)\r\n ac = Accum2(ds.data, ds.symbol).median(ds.time)\r\n totalcol = ac[ac.summary_get_names()[0]]\r\n footer = ac.footer_get_values()['Median']\r\n for i in range(len(symbols)):\r\n s_median = ds[ds.symbol == symbols[i], :].time.median()\r\n self.assertEqual(footer[i + 1], s_median)\r\n for i in range(7):\r\n s_median = ds[ds.data == i, :].time.median()\r\n self.assertEqual(totalcol[i], s_median)\r\n\r\n def test_accum2_nanmedian_with_filter(self):\r\n ds = Dataset({'time': arange(200.0)})\r\n ds.data = np.random.randint(7, size=200)\r\n ds.data2 = np.random.randint(7, size=200)\r\n symbols = ['AAPL', 'AMZN', 'FB', 'GOOG', 'IBM']\r\n # N.B. make a copy here for testing\r\n symbol_categorical = Cat(1 + arange(200) % 5, symbols)\r\n # N.B. Categorical.copy and Categorical constructor doesn't do deep copy?!\r\n ds.symbol = Cat(1 + arange(200) % 5, symbols)\r\n\r\n chosen_symbols = ['AMZN', 'AAPL']\r\n filt = symbol_categorical.isin(chosen_symbols)\r\n ac = Accum2(ds.data, ds.symbol)\r\n stat1 = ac.nanmedian(ds.time, filter=filt)\r\n totalcol = stat1[stat1.summary_get_names()[0]]\r\n footer = stat1.footer_get_values()['Median']\r\n # Make sure we don't change the input data\r\n self.assertTrue(not rt.any(ds.symbol._fa == 0))\r\n for sym in chosen_symbols:\r\n s_median = rt.nanmedian(ds[symbol_categorical == sym, :].time)\r\n i = rt.where(symbol_categorical.category_array == sym)[0].item()\r\n self.assertEqual(footer[i + 1], s_median)\r\n for i in range(7):\r\n s_median = rt.nanmedian(ds[(ds.data == i) & filt, :].time)\r\n self.assertEqual(totalcol[i], s_median)\r\n\r\n chosen_symbols = ['IBM', 'FB']\r\n filt = symbol_categorical.isin(chosen_symbols)\r\n stat2 = ac.nanmedian(ds.time, filter=filt)\r\n totalcol = stat2[stat2.summary_get_names()[0]]\r\n footer = stat2.footer_get_values()['Median']\r\n # Make sure we don't change the input data\r\n self.assertTrue(not rt.any(ds.symbol._fa == 0))\r\n for sym in chosen_symbols:\r\n s_median = rt.nanmedian(ds[symbol_categorical == sym, :].time)\r\n i = rt.where(symbol_categorical.category_array == sym)[0].item()\r\n self.assertEqual(footer[i + 1], s_median)\r\n for i in range(7):\r\n s_median = rt.nanmedian(ds[(ds.data == i) & filt, :].time)\r\n self.assertEqual(totalcol[i], s_median)\r\n\r\n def test_showfilter_label_subclass(self):\r\n d = Date.range('20190201', '20190210')\r\n c = Categorical(d)\r\n c2 = Categorical(arange(10))\r\n ac = Accum2(c, c2)\r\n result = ac.count(showfilter=True)\r\n\r\n self.assertTrue(isinstance(result.YLabel, Date))\r\n self.assertTrue(result.YLabel.isnan()[0])\r\n\r\n d = DateTimeNano.random(10)\r\n c = Categorical(d)\r\n c2 = Categorical(arange(10))\r\n ac = Accum2(c, c2)\r\n result = ac.count(showfilter=True)\r\n\r\n self.assertTrue(isinstance(result.YLabel, DateTimeNano))\r\n self.assertTrue(result.YLabel.isnan()[0])\r\n\r\n d = DateSpan(arange(10, 20))\r\n c = Categorical(d)\r\n c2 = Categorical(arange(10))\r\n ac = Accum2(c, c2)\r\n result = ac.count(showfilter=True)\r\n\r\n self.assertTrue(isinstance(result.YLabel, DateSpan))\r\n self.assertTrue(result.YLabel.isnan()[0])\r\n\r\n d = TimeSpan(np.random.rand(10) * 10_000_000_000)\r\n c = Categorical(d)\r\n c2 = Categorical(arange(10))\r\n ac = Accum2(c, c2)\r\n result = ac.count(showfilter=True)\r\n\r\n self.assertTrue(isinstance(result.YLabel, TimeSpan))\r\n self.assertTrue(result.YLabel.isnan()[0])\r\n\r\n def test_apply(self):\r\n arrsize = 200\r\n numrows = 7\r\n\r\n ds = Dataset({'time': arange(arrsize * 1.0)})\r\n ds.data = np.random.randint(numrows, size=arrsize)\r\n ds.data2 = np.random.randint(numrows, size=arrsize)\r\n symbols = ['AAPL', 'AMZN', 'FB', 'GOOG', 'IBM']\r\n ds.symbol = Cat(1 + arange(arrsize) % len(symbols), symbols)\r\n ds.accum2('symbol', 'data').sum(ds.data2)\r\n ds.accum2('symbol', 'data').sum(ds.data2, showfilter=True)\r\n ds.accum2('symbol', 'data').median(ds.data2, showfilter=True)\r\n ds.accum2('symbol', 'data').median(ds.data2, showfilter=False)\r\n ds.accum2('symbol', 'data').apply_reduce(np.median, ds.data2, showfilter=True)\r\n ds.accum2('symbol', 'data').apply_reduce(np.median, ds.data2, showfilter=False)\r\n f = logical(arange(200) % 2)\r\n ds.accum2('symbol', 'data').apply_reduce(np.median, ds.data2, filter=f)\r\n ds.accum2('symbol', 'data').apply_reduce(\r\n np.median, ds.data2, filter=f, showfilter=True\r\n )\r\n ds.accum2('symbol', 'data').median(ds.data2, filter=f, showfilter=True)\r\n\r\n def test_apply_nonreduce(self):\r\n arrsize = 200\r\n numrows = 7\r\n ds = rt.Dataset({'time': rt.arange(arrsize * 1.0)})\r\n ds.data = arange(arrsize) % numrows\r\n ds.data2 = (arange(arrsize) + 3) % numrows\r\n symbols = [\r\n 'AAPL',\r\n 'AMZN',\r\n 'FB',\r\n 'GOOG',\r\n 'IBM',\r\n '6',\r\n '7',\r\n '8',\r\n '9',\r\n '10',\r\n '11',\r\n '12',\r\n '13',\r\n '14',\r\n '15',\r\n '16',\r\n '17',\r\n '18',\r\n ]\r\n ds.symbol = rt.Cat(1 + rt.arange(arrsize) % len(symbols), symbols)\r\n result = ds.symbol.apply_reduce(\r\n lambda x, y: np.sum(np.minimum(x, y)), (ds.data, ds.data)\r\n )\r\n\r\n ac = ds.accum2('symbol', 'data')\r\n newds = ac.apply_nonreduce(np.cumsum)\r\n ds2 = ac.apply_reduce(\r\n lambda x, y: np.sum(np.maximum(x, y)), (newds.data, newds.data2)\r\n )\r\n\r\n x = np.maximum(newds.data, newds.data2)\r\n y = ac.apply_nonreduce(\r\n lambda x, y: np.maximum(x, y), (newds.data, newds.data2)\r\n )[0]\r\n self.assertTrue(np.all(x == y))\r\n\r\n\r\nclass AccumTable_Test(unittest.TestCase):\r\n @pytest.mark.skip(reason=\"Test needs to be re-written to remove the np.random.seed usage -- it's not stable across numpy versions.\")\r\n def test_accum_table(self):\r\n\r\n # Create the test data\r\n\r\n def unpivot(frame):\r\n N, K = frame.shape\r\n data = {\r\n 'value': frame.values.ravel('F'),\r\n 'variable': np.asarray(frame.columns).repeat(N),\r\n 'date': np.tile(np.asarray(frame.index), K),\r\n }\r\n return pd.DataFrame(data, columns=['date', 'variable', 'value'])\r\n\r\n np.random.seed(1234)\r\n df = unpivot(pd.concat([tm.makeTimeDataFrame(), tm.makeTimeDataFrame()]))\r\n ds = dataset_from_pandas_df(df)\r\n ds.date = DateTimeNano(ds.date, from_tz='NYC').to_iso()\r\n ds.date = rt.FastArray([d[:10] for d in ds.date])\r\n ds.variable = rt.Categorical(ds.variable)\r\n ds.date = rt.Categorical(ds.date)\r\n\r\n at = rt.AccumTable(ds.date, ds.variable)\r\n\r\n # Add and view inner tables with totals\r\n at['Sum'] = at.sum(ds.value)\r\n self.assertEqual(at['Sum'].shape, (3, 7))\r\n assert_array_almost_equal(\r\n at['Sum']['A'], np.array([0.47, -0.79, 1.72]), decimal=2\r\n )\r\n\r\n vw = at.gen('Sum')\r\n self.assertEqual(vw.shape, (3, 7))\r\n assert_array_almost_equal(vw['A'], np.array([0.47, -0.79, 1.72]), decimal=2)\r\n\r\n assert_array_almost_equal(vw['Sum'], np.array([-0.10, -5.02, 5.37]), decimal=2)\r\n assert_array_almost_equal(\r\n vw.footer_get_values(columns=['Sum'])['Sum'], np.array([0.25]), decimal=2\r\n )\r\n\r\n at['Mean'] = at.mean(ds.value)\r\n self.assertEqual(at['Mean'].shape, (3, 7))\r\n assert_array_almost_equal(\r\n at['Mean']['A'], np.array([0.24, -0.39, 0.86]), decimal=2\r\n )\r\n\r\n at['Half'] = at['Mean'] / at['Sum']\r\n self.assertEqual(at['Half'].shape, (3, 7))\r\n assert_array_almost_equal(at['Half']['A'], np.array([0.5, 0.5, 0.5]), decimal=2)\r\n\r\n # Add and view inner tables with blanks\r\n\r\n at['Blanks'] = at['Sum'].copy()\r\n at['Blanks']['C'] = 0.0\r\n for col in at['Blanks'][:, 1:]:\r\n at['Blanks'][col][2] = np.nan\r\n\r\n vw = at.gen('Blanks')\r\n self.assertEqual(vw.shape, (2, 9))\r\n assert_array_almost_equal(vw['A'], np.array([0.47, -0.79]), decimal=2)\r\n assert_array_almost_equal(vw['Blanks'], np.array([-0.10, -5.02]), decimal=2)\r\n self.assertAlmostEqual(\r\n vw.footer_get_dict()['Blanks']['Blanks'], 0.245, places=2\r\n )\r\n\r\n vw = at.gen('Blanks', remove_blanks=False)\r\n self.assertEqual(vw.shape, (3, 10))\r\n assert_array_almost_equal(vw['A'], np.array([0.47, -0.79, np.nan]), decimal=2)\r\n assert_array_almost_equal(\r\n vw['Blanks'], np.array([-0.10, -5.02, np.nan]), decimal=2\r\n )\r\n\r\n # Test division with zeros and nans\r\n at['Bad'] = at['Blanks'] / at['Half']\r\n self.assertEqual(at['Blanks'].shape, (3, 7))\r\n vw = at.gen('Bad')\r\n self.assertEqual(vw.shape, (2, 10))\r\n vw = at.gen('Blanks')\r\n self.assertEqual(vw.shape, (2, 10))\r\n vw = at.gen('Half')\r\n self.assertEqual(vw.shape, (3, 11))\r\n\r\n # Set margin columns to the right\r\n\r\n at.set_margin_columns(['Blanks', 'Mean'])\r\n vw = at.gen('Half')\r\n self.assertEqual(vw.shape, (3, 9))\r\n self.assertEqual(vw.keys()[6], 'Half')\r\n self.assertEqual(vw.keys()[7], 'Blanks')\r\n self.assertEqual(vw.keys()[8], 'Mean')\r\n self.assertEqual(\r\n list(vw.footer_get_dict().keys()), ['Half', 'Sum', 'Mean', 'Blanks', 'Bad']\r\n )\r\n\r\n vw = at.gen()\r\n self.assertEqual(vw.keys()[6], 'Half')\r\n\r\n vw = at.gen('Sum')\r\n self.assertEqual(vw.keys()[6], 'Sum')\r\n self.assertEqual(vw.keys()[7], 'Blanks')\r\n self.assertEqual(vw.keys()[8], 'Mean')\r\n self.assertEqual(\r\n list(vw.footer_get_dict().keys()), ['Sum', 'Mean', 'Half', 'Blanks', 'Bad']\r\n )\r\n\r\n # Set footer rows at the bottom\r\n\r\n at.set_footer_rows(['Mean'])\r\n vw = at.gen('Half')\r\n self.assertEqual(vw.shape, (3, 9))\r\n self.assertEqual(vw.keys()[6], 'Half')\r\n self.assertEqual(vw.keys()[7], 'Blanks')\r\n self.assertEqual(vw.keys()[8], 'Mean')\r\n self.assertEqual(list(vw.footer_get_dict().keys()), ['Half', 'Mean'])\r\n\r\n vw = at.gen('Sum')\r\n self.assertEqual(vw.keys()[6], 'Sum')\r\n self.assertEqual(vw.keys()[7], 'Blanks')\r\n self.assertEqual(vw.keys()[8], 'Mean')\r\n self.assertEqual(list(vw.footer_get_dict().keys()), ['Sum', 'Mean'])\r\n\r\n # Access view Dataset elements\r\n\r\n vw = at.gen('Sum')\r\n assert_array_equal(\r\n vw.date, rt.FastArray(['2000-01-03', '2000-01-04', '2000-01-05'])\r\n )\r\n assert_array_almost_equal(vw['Sum'], np.array([-0.10, -5.02, 5.37]), decimal=2)\r\n assert_almost_equal(vw[vw.date == '2000-01-03', 'A'][0], 0.47355353, decimal=2)\r\n assert_almost_equal(\r\n list(vw.footer_get_values('Sum', columns=['A']).values())[0],\r\n 1.409830,\r\n decimal=2,\r\n )\r\n\r\n\r\nif __name__ == \"__main__\":\r\n tester = unittest.main()\r\n"
] | [
[
"numpy.testing.assert_almost_equal"
],
[
"numpy.testing.assert_almost_equal",
"pandas.util.testing.makeTimeDataFrame",
"pandas.DataFrame"
]
] |
vik748/OpenSfM | [
"bd949246e3e0d6d3a707a08224038034d27e3ee8",
"569144c26df860cfa45d183f7701d0414e35d086"
] | [
"scripts/track_length_analysis_test.py",
"opensfm/test/test_triangulation.py"
] | [
"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Apr 19 08:11:51 2021\n\n@author: vik748\n\"\"\"\nimport json\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport sys,os\nimport pandas as pd\n\ndef tracks_histogram(recon_file, tracks_file, ax, model_num=0, bins=np.linspace(2,15,14)):\n '''\n How the tracks.csv file is written\n template <class S>\n void WriteToStreamCurrentVersion(S& ostream, const TracksManager& manager) {\n ostream << manager.TRACKS_HEADER << \"_v\" << manager.TRACKS_VERSION\n << std::endl;\n const auto shotsIDs = manager.GetShotIds();\n for (const auto& shotID : shotsIDs) {\n const auto observations = manager.GetShotObservations(shotID);\n for (const auto& observation : observations) {\n ostream << shotID << \"\\t\" << observation.first << \"\\t\"\n << observation.second.id << \"\\t\" << observation.second.point(0)\n << \"\\t\" << observation.second.point(1) << \"\\t\"\n << observation.second.scale << \"\\t\" << observation.second.color(0)\n << \"\\t\" << observation.second.color(1) << \"\\t\"\n << observation.second.color(2) << std::endl;\n }\n }\n }\n\n '''\n with open(recon_file) as f:\n data = json.load(f)\n\n if model_num == -1:\n points_dict = {}\n for d in data:\n points_dict.update(d['points'])\n\n else:\n points_dict = data[model_num]['points']\n\n model_0_point_ids_int = [int(k) for k in points_dict.keys()]\n\n tracks_df = pd.read_csv(tracks_file, sep='\\t', skiprows=1,\n names=['image', 'track_id', 'feature_id', 'x', 'y',\n 'scale', 'r', 'g', 'b'])\n track_id_counts = tracks_df.track_id.value_counts()\n\n\n model_0_track_id_counts = track_id_counts[model_0_point_ids_int]\n\n ax.hist(model_0_track_id_counts, bins=bins)\n\n return model_0_track_id_counts\n\n\n########################################\n# Skerki_mud SIFT - RAW vs CLAHE - Model 0\n########################################\nrecon_file = '/home/vik748/data/OpenSfM_data/track_length_test_data/Skerki_mud/Skerki_mud_RAW_SIFT_reconstruction.json'\ntracks_file = '/home/vik748/data/OpenSfM_data/track_length_test_data/Skerki_mud/Skerki_mud_RAW_SIFT_tracks.csv'\n\nfig1, ax = plt.subplots(nrows=2, sharex=True, sharey=True)\nfig1.suptitle('Skerki Mud SIFT')\n\ntracks_histogram(recon_file, tracks_file, ax[0], bins=np.linspace(2,15,14))\n\nax[0].set_xlim([2, None])\nax[0].set_yscale('log')\nax[0].set_ylim([None, 10000])\nax[0].set_title('RAW')\nax[0].set_xlabel('Feature Track Length')\nax[0].set_ylabel('Fequency')\nax[0].xaxis.set_major_locator(plt.MultipleLocator(1))\n\nrecon_file = '/home/vik748/data/OpenSfM_data/track_length_test_data/Skerki_mud/Skerki_mud_CLAHE_SIFT_reconstruction.json'\ntracks_file = '/home/vik748/data/OpenSfM_data/track_length_test_data/Skerki_mud/Skerki_mud_CLAHE_SIFT_tracks.csv'\n\n\ntracks_histogram(recon_file, tracks_file, ax[1], bins=np.linspace(2,15,14))\nax[1].set_title('CLAHE')\nax[1].set_xlabel('Feature Track Length')\nax[1].set_ylabel('Fequency')\n\n\n########################################\n# Skerki_mud RAW - SIFT vs Zernike\n########################################\nrecon_file = '/home/vik748/data/OpenSfM_data/track_length_test_data/Skerki_mud/Skerki_mud_RAW_SIFT_reconstruction.json'\ntracks_file = '/home/vik748/data/OpenSfM_data/track_length_test_data/Skerki_mud/Skerki_mud_RAW_SIFT_tracks.csv'\n\nfig2, ax = plt.subplots(nrows=2, sharex=True, sharey=True)\nfig2.suptitle('Skerki Mud RAW')\n\ntracks_histogram(recon_file, tracks_file, ax[0], bins=np.linspace(2,15,14))\n\nax[0].set_xlim([2, None])\nax[0].set_yscale('log')\nax[0].set_ylim([None, 10000])\nax[0].set_title('SIFT')\nax[0].set_xlabel('Feature Track Length')\nax[0].set_ylabel('Fequency')\nax[0].xaxis.set_major_locator(plt.MultipleLocator(1))\n\nrecon_file = '/home/vik748/data/OpenSfM_data/track_length_test_data/Skerki_mud/Skerki_mud_RAW_ZERNIKE_reconstruction.json'\ntracks_file = '/home/vik748/data/OpenSfM_data/track_length_test_data/Skerki_mud/Skerki_mud_RAW_ZERNIKE_tracks.csv'\n\n\ntracks_histogram(recon_file, tracks_file, ax[1], model_num=1, bins=np.linspace(2,15,14))\nax[1].set_title('ZERNIKE')\nax[1].set_xlabel('Feature Track Length')\nax[1].set_ylabel('Fequency')\n\n########################################\n# Skerki_mud Zernike - RAW vs CLAHE\n########################################\nrecon_file = '/home/vik748/data/OpenSfM_data/track_length_test_data/Skerki_mud/Skerki_mud_RAW_ZERNIKE_reconstruction.json'\ntracks_file = '/home/vik748/data/OpenSfM_data/track_length_test_data/Skerki_mud/Skerki_mud_RAW_ZERNIKE_tracks.csv'\n\nfig3, ax = plt.subplots(nrows=2, sharex=True, sharey=True)\nfig3.suptitle('Skerki Mud ZERNIKE')\n\ntracks_histogram(recon_file, tracks_file, ax[0], model_num=1, bins=np.linspace(2,15,14))\n\nax[0].set_xlim([2, None])\nax[0].set_yscale('log')\nax[0].set_ylim([None, 10000])\nax[0].set_title('RAW')\nax[0].set_xlabel('Feature Track Length')\nax[0].set_ylabel('Fequency')\nax[0].xaxis.set_major_locator(plt.MultipleLocator(1))\n\nrecon_file = '/home/vik748/data/OpenSfM_data/track_length_test_data/Skerki_mud/Skerki_mud_CLAHE_ZERNIKE_reconstruction.json'\ntracks_file = '/home/vik748/data/OpenSfM_data/track_length_test_data/Skerki_mud/Skerki_mud_CLAHE_ZERNIKE_tracks.csv'\n\n\ntracks_histogram(recon_file, tracks_file, ax[1], bins=np.linspace(2,15,14))\nax[1].set_title('CLAHE')\nax[1].set_xlabel('Feature Track Length')\nax[1].set_ylabel('Fequency')\n\n\n########################################\n# Stingray - SIFT vs Zernike\n########################################\nrecon_file = '/home/vik748/data/OpenSfM_data/track_length_test_data/Stingray/Stingray_SIFT_reconstruction.json'\ntracks_file = '/home/vik748/data/OpenSfM_data/track_length_test_data/Stingray/Stingray_SIFT_tracks.csv'\n\nfig4, ax = plt.subplots(nrows=2, sharex=True, sharey=True)\nfig4.suptitle('Stingray RAW')\n\ntracks_histogram(recon_file, tracks_file, ax[0], bins=np.linspace(2,15,14))\n\nax[0].set_xlim([2, None])\nax[0].set_yscale('log')\nax[0].set_ylim([None, 10000])\nax[0].set_title('SIFT')\nax[0].set_xlabel('Feature Track Length')\nax[0].set_ylabel('Fequency')\nax[0].xaxis.set_major_locator(plt.MultipleLocator(1))\n\nrecon_file = '/home/vik748/data/OpenSfM_data/track_length_test_data/Stingray/Stingray_ZERNIKE_reconstruction.json'\ntracks_file = '/home/vik748/data/OpenSfM_data/track_length_test_data/Stingray/Stingray_ZERNIKE_tracks.csv'\n\n\ntracks_histogram(recon_file, tracks_file, ax[1], bins=np.linspace(2,15,14))\nax[1].set_title('ZERNIKE')\nax[1].set_xlabel('Feature Track Length')\nax[1].set_ylabel('Fequency')\n\n########################################\n# Skerki_mud SIFT - RAW vs CLAHE - Combined\n########################################\nrecon_file = '/home/vik748/data/OpenSfM_data/track_length_test_data/Skerki_mud/Skerki_mud_RAW_SIFT_reconstruction.json'\ntracks_file = '/home/vik748/data/OpenSfM_data/track_length_test_data/Skerki_mud/Skerki_mud_RAW_SIFT_tracks.csv'\n\nfig5, ax = plt.subplots(nrows=2, sharex=True, sharey=True)\nfig5.suptitle('Skerki Mud SIFT - Combined')\n\ntracks_histogram(recon_file, tracks_file, ax[0], model_num=-1, bins=np.linspace(2,15,14))\n\nax[0].set_xlim([2, None])\nax[0].set_yscale('log')\nax[0].set_ylim([None, 10000])\nax[0].set_title('RAW')\nax[0].set_xlabel('Feature Track Length')\nax[0].set_ylabel('Fequency')\nax[0].xaxis.set_major_locator(plt.MultipleLocator(1))\n\nrecon_file = '/home/vik748/data/OpenSfM_data/track_length_test_data/Skerki_mud/Skerki_mud_CLAHE_SIFT_reconstruction.json'\ntracks_file = '/home/vik748/data/OpenSfM_data/track_length_test_data/Skerki_mud/Skerki_mud_CLAHE_SIFT_tracks.csv'\n\n\ntracks_histogram(recon_file, tracks_file, ax[1], bins=np.linspace(2,15,14))\nax[1].set_title('CLAHE')\nax[1].set_xlabel('Feature Track Length')\nax[1].set_ylabel('Fequency')\n\n########################################\n# Stingray SIFT - RAW vs CLAHE\n########################################\nrecon_file = '/home/vik748/data/OpenSfM_data/track_length_test_data/Stingray/Stingray_SIFT_reconstruction.json'\ntracks_file = '/home/vik748/data/OpenSfM_data/track_length_test_data/Stingray/Stingray_SIFT_tracks.csv'\n\nfig6, ax = plt.subplots(nrows=2, sharex=True, sharey=True)\nfig6.suptitle('Stingray SIFT')\n\ntracks_histogram(recon_file, tracks_file, ax[0], bins=np.linspace(2,15,14))\n\nax[0].set_xlim([2, None])\nax[0].set_yscale('log')\nax[0].set_ylim([None, 10000])\nax[0].set_title('RAW')\nax[0].set_xlabel('Feature Track Length')\nax[0].set_ylabel('Fequency')\nax[0].xaxis.set_major_locator(plt.MultipleLocator(1))\n\nrecon_file = '/home/vik748/data/OpenSfM_data/track_length_test_data/Stingray/Stingray_CLAHE_SIFT_reconstruction.json'\ntracks_file = '/home/vik748/data/OpenSfM_data/track_length_test_data/Stingray/Stingray_CLAHE_SIFT_tracks.csv'\n\ntracks_histogram(recon_file, tracks_file, ax[1], bins=np.linspace(2,15,14))\nax[1].set_title('CLAHE')\nax[1].set_xlabel('Feature Track Length')\nax[1].set_ylabel('Fequency')\n\n########################################\n# Stingray Zernike - RAW vs CLAHE\n########################################\nrecon_file = '/home/vik748/data/OpenSfM_data/track_length_test_data/Stingray/Stingray_ZERNIKE_reconstruction.json'\ntracks_file = '/home/vik748/data/OpenSfM_data/track_length_test_data/Stingray/Stingray_ZERNIKE_tracks.csv'\n\nfig7, ax = plt.subplots(nrows=2, sharex=True, sharey=True)\nfig7.suptitle('Stingray ZERNIKE')\n\ncounts0 = tracks_histogram(recon_file, tracks_file, ax[0], bins=np.linspace(2,15,14))\n\nax[0].set_xlim([2, None])\nax[0].set_yscale('log')\nax[0].set_ylim([None, 10000])\nax[0].set_title('RAW')\nax[0].set_xlabel('Feature Track Length')\nax[0].set_ylabel('Fequency')\nax[0].xaxis.set_major_locator(plt.MultipleLocator(1))\n\nrecon_file = '/home/vik748/data/OpenSfM_data/track_length_test_data/Stingray/Stingray_CLAHE_ZERNIKE_reconstruction.json'\ntracks_file = '/home/vik748/data/OpenSfM_data/track_length_test_data/Stingray/Stingray_CLAHE_ZERNIKE_tracks.csv'\n\ncounts1 = tracks_histogram(recon_file, tracks_file, ax[1], bins=np.linspace(2,15,14))\nax[1].set_title('CLAHE')\nax[1].set_xlabel('Feature Track Length')\nax[1].set_ylabel('Fequency')\n\n\n\nplt.hist([counts1, counts2], np.linspace(2,15,14), label=['RAW', 'CLAHE'])\nplt.legend(loc='upper right')\nplt.show()\n",
"import numpy as np\nimport networkx as nx\n\nfrom opensfm import io\nfrom opensfm import pygeometry\nfrom opensfm import reconstruction\nfrom opensfm import pysfm\n\n\ndef test_track_triangulator_equirectangular():\n \"\"\"Test triangulating tracks of spherical images.\"\"\"\n tracks_manager = pysfm.TracksManager()\n tracks_manager.add_observation('im1', '1', pysfm.Observation(0, 0, 1.0, 0, 0, 0, 0))\n tracks_manager.add_observation('im2', '1', pysfm.Observation(-0.1, 0, 1.0, 0, 0, 0, 1))\n\n rec = io.reconstruction_from_json({\n \"cameras\": {\n \"theta\": {\n \"projection_type\": \"equirectangular\",\n \"width\": 800,\n \"height\": 400,\n }\n },\n\n \"shots\": {\n 'im1': {\n \"camera\": \"theta\",\n \"rotation\": [0.0, 0.0, 0.0],\n \"translation\": [0.0, 0.0, 0.0],\n },\n 'im2': {\n \"camera\": \"theta\",\n \"rotation\": [0, 0, 0.0],\n \"translation\": [-1, 0, 0.0],\n },\n },\n\n \"points\": {\n },\n })\n\n triangulator = reconstruction.TrackTriangulator(tracks_manager, rec)\n triangulator.triangulate('1', 0.01, 2.0)\n assert '1' in rec.points\n p = rec.points['1'].coordinates\n assert np.allclose(p, [0, 0, 1.3763819204711])\n assert len(rec.points['1'].get_observations()) == 2\n\n\ndef unit_vector(x):\n return np.array(x) / np.linalg.norm(x)\n\n\ndef test_triangulate_bearings_dlt():\n rt1 = np.append(np.identity(3), [[0], [0], [0]], axis=1)\n rt2 = np.append(np.identity(3), [[-1], [0], [0]], axis=1)\n b1 = unit_vector([0.0, 0, 1])\n b2 = unit_vector([-1.0, 0, 1])\n max_reprojection = 0.01\n min_ray_angle = np.radians(2.0)\n res, X = pygeometry.triangulate_bearings_dlt(\n [rt1, rt2], [b1, b2], max_reprojection, min_ray_angle)\n assert np.allclose(X, [0, 0, 1.0])\n assert res is True\n\n\ndef test_triangulate_bearings_midpoint():\n o1 = np.array([0.0, 0, 0])\n b1 = unit_vector([0.0, 0, 1])\n o2 = np.array([1.0, 0, 0])\n b2 = unit_vector([-1.0, 0, 1])\n max_reprojection = 0.01\n min_ray_angle = np.radians(2.0)\n res, X = pygeometry.triangulate_bearings_midpoint(\n [o1, o2], [b1, b2], 2 * [max_reprojection], min_ray_angle)\n assert np.allclose(X, [0, 0, 1.0])\n assert res is True\n\n\ndef test_triangulate_two_bearings_midpoint():\n o1 = np.array([0.0, 0, 0])\n b1 = unit_vector([0.0, 0, 1])\n o2 = np.array([1.0, 0, 0])\n b2 = unit_vector([-1.0, 0, 1])\n max_reprojection = 0.01\n min_ray_angle = np.radians(2.0)\n X = pygeometry.triangulate_two_bearings_midpoint([o1, o2], [b1, b2])\n assert np.allclose(X, [0, 0, 1.0])\n"
] | [
[
"matplotlib.pyplot.legend",
"pandas.read_csv",
"matplotlib.pyplot.MultipleLocator",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.show",
"numpy.linspace"
],
[
"numpy.allclose",
"numpy.linalg.norm",
"numpy.array",
"numpy.identity",
"numpy.radians"
]
] |
prkkumar/WarpX | [
"a83c9c03ecc9850cd724efc14075eb95ff8a6138"
] | [
"Python/pywarpx/fields.py"
] | [
"# Copyright 2017-2019 David Grote\n#\n# This file is part of WarpX.\n#\n# License: BSD-3-Clause-LBNL\n\n\"\"\"Provides wrappers around field and current density on multiFABs\n\nAvailable routines:\n\nExWrapper, EyWrapper, EzWrapper\nBxWrapper, ByWrapper, BzWrapper\nJxWrapper, JyWrapper, JzWrapper\n\n\"\"\"\nimport numpy as np\ntry:\n from mpi4py import MPI as mpi\n comm_world = mpi.COMM_WORLD\n npes = comm_world.Get_size()\nexcept ImportError:\n npes = 1\n\nfrom . import _libwarpx\n\n\nclass _MultiFABWrapper(object):\n \"\"\"Wrapper around field arrays at level 0\n This provides a convenient way to query and set fields that are broken up into FABs.\n The indexing is based on global indices.\n - direction: component to access, one of the values (0, 1, 2) or None\n - get_lovects: routine that returns the list of lo vectors\n - get_fabs: routine that returns the list of FABs\n - get_nodal_flag: routine that returns the list of nodal flag\n - level: refinement level\n \"\"\"\n def __init__(self, direction, get_lovects, get_fabs, get_nodal_flag, level, include_ghosts=False):\n self.direction = direction\n self.get_lovects = get_lovects\n self.get_fabs = get_fabs\n self.get_nodal_flag = get_nodal_flag\n self.level = level\n self.include_ghosts = include_ghosts\n\n self.dim = _libwarpx.dim\n\n # overlaps is one along the axes where the grid boundaries overlap the neighboring grid,\n # which is the case with node centering.\n # This presumably will never change during a calculation.\n self.overlaps = self.get_nodal_flag()\n\n def _getlovects(self):\n if self.direction is None:\n lovects, ngrow = self.get_lovects(self.level, self.include_ghosts)\n else:\n lovects, ngrow = self.get_lovects(self.level, self.direction, self.include_ghosts)\n return lovects, ngrow\n\n def _gethivects(self):\n lovects, ngrow = self._getlovects()\n fields = self._getfields()\n\n hivects = np.zeros_like(lovects)\n for i in range(len(fields)):\n hivects[:,i] = lovects[:,i] + np.array(fields[i].shape[:self.dim]) - self.overlaps\n\n return hivects, ngrow\n\n def _getfields(self):\n if self.direction is None:\n return self.get_fabs(self.level, self.include_ghosts)\n else:\n return self.get_fabs(self.level, self.direction, self.include_ghosts)\n\n def __len__(self):\n lovects, ngrow = self._getlovects()\n return len(lovects)\n\n def mesh(self, direction):\n \"\"\"Returns the mesh along the specified direction with the appropriate centering.\n - direction: In 3d, one of 'x', 'y', or 'z'.\n In 2d, Cartesian, one of 'x', or 'z'.\n In RZ, one of 'r', or 'z'\n \"\"\"\n\n try:\n if _libwarpx.geometry_dim == '3d':\n idir = ['x', 'y', 'z'].index(direction)\n celldir = idir\n elif _libwarpx.geometry_dim == '2d':\n idir = ['x', 'z'].index(direction)\n celldir = 2*idir\n elif _libwarpx.geometry_dim == 'rz':\n idir = ['r', 'z'].index(direction)\n celldir = 2*idir\n except ValueError:\n raise Exception('Inappropriate direction given')\n\n # --- Get the total number of cells along the direction\n hivects, ngrow = self._gethivects()\n nn = hivects[idir,:].max() - ngrow[idir] + self.overlaps[idir]\n if npes > 1:\n nn = comm_world.allreduce(nn, op=mpi.MAX)\n\n # --- Cell size in the direction\n dd = _libwarpx.getCellSize(celldir, self.level)\n\n # --- Get the nodal flag along direction\n nodal_flag = self.get_nodal_flag()[idir]\n\n # --- The centering shift\n if nodal_flag == 1:\n # node centered\n shift = 0.\n else:\n # cell centered\n shift = 0.5*dd\n\n return np.arange(nn)*dd + shift\n\n def __getitem__(self, index):\n \"\"\"Returns slices of a decomposed array, The shape of\n the object returned depends on the number of ix, iy and iz specified, which\n can be from none to all three. Note that the values of ix, iy and iz are\n relative to the fortran indexing, meaning that 0 is the lower boundary\n of the whole domain.\n \"\"\"\n if index == Ellipsis:\n index = tuple(self.dim*[slice(None)])\n\n if len(index) < self.dim:\n # --- Add extra dims to index if needed\n index = list(index)\n for i in range(len(index), self.dim):\n index.append(slice(None))\n index = tuple(index)\n\n if self.dim == 2:\n return self._getitem2d(index)\n elif self.dim == 3:\n return self._getitem3d(index)\n\n def _getitem3d(self, index):\n \"\"\"Returns slices of a 3D decomposed array,\n \"\"\"\n\n lovects, ngrow = self._getlovects()\n hivects, ngrow = self._gethivects()\n fields = self._getfields()\n\n ix = index[0]\n iy = index[1]\n iz = index[2]\n\n if len(fields[0].shape) > self.dim:\n ncomps = fields[0].shape[-1]\n else:\n ncomps = 1\n\n if len(index) > self.dim:\n if ncomps > 1:\n ic = index[-1]\n else:\n raise Exception('Too many indices given')\n else:\n ic = None\n\n nx = hivects[0,:].max() - ngrow[0]\n ny = hivects[1,:].max() - ngrow[1]\n nz = hivects[2,:].max() - ngrow[2]\n\n if npes > 1:\n nx = comm_world.allreduce(nx, op=mpi.MAX)\n ny = comm_world.allreduce(ny, op=mpi.MAX)\n nz = comm_world.allreduce(nz, op=mpi.MAX)\n\n if isinstance(ix, slice):\n ixstart = max(ix.start or -ngrow[0], -ngrow[0])\n ixstop = min(ix.stop or nx + 1 + ngrow[0], nx + self.overlaps[0] + ngrow[0])\n else:\n ixstart = ix\n ixstop = ix + 1\n if isinstance(iy, slice):\n iystart = max(iy.start or -ngrow[1], -ngrow[1])\n iystop = min(iy.stop or ny + 1 + ngrow[1], ny + self.overlaps[1] + ngrow[1])\n else:\n iystart = iy\n iystop = iy + 1\n if isinstance(iz, slice):\n izstart = max(iz.start or -ngrow[2], -ngrow[2])\n izstop = min(iz.stop or nz + 1 + ngrow[2], nz + self.overlaps[2] + ngrow[2])\n else:\n izstart = iz\n izstop = iz + 1\n\n # --- Setup the size of the array to be returned and create it.\n # --- Space is added for multiple components if needed.\n sss = (max(0, ixstop - ixstart),\n max(0, iystop - iystart),\n max(0, izstop - izstart))\n if ncomps > 1 and ic is None:\n sss = tuple(list(sss) + [ncomps])\n resultglobal = np.zeros(sss, dtype=_libwarpx._numpy_real_dtype)\n\n datalist = []\n for i in range(len(fields)):\n\n # --- The ix1, 2 etc are relative to global indexing\n ix1 = max(ixstart, lovects[0,i])\n ix2 = min(ixstop, lovects[0,i] + fields[i].shape[0])\n iy1 = max(iystart, lovects[1,i])\n iy2 = min(iystop, lovects[1,i] + fields[i].shape[1])\n iz1 = max(izstart, lovects[2,i])\n iz2 = min(izstop, lovects[2,i] + fields[i].shape[2])\n\n if ix1 < ix2 and iy1 < iy2 and iz1 < iz2:\n\n sss = (slice(ix1 - lovects[0,i], ix2 - lovects[0,i]),\n slice(iy1 - lovects[1,i], iy2 - lovects[1,i]),\n slice(iz1 - lovects[2,i], iz2 - lovects[2,i]))\n if ic is not None:\n sss = tuple(list(sss) + [ic])\n\n vslice = (slice(ix1 - ixstart, ix2 - ixstart),\n slice(iy1 - iystart, iy2 - iystart),\n slice(iz1 - izstart, iz2 - izstart))\n\n datalist.append((vslice, fields[i][sss]))\n\n if npes == 1:\n all_datalist = [datalist]\n else:\n all_datalist = comm_world.allgather(datalist)\n\n for datalist in all_datalist:\n for vslice, ff in datalist:\n resultglobal[vslice] = ff\n\n # --- Now remove any of the reduced dimensions.\n sss = [slice(None), slice(None), slice(None)]\n if not isinstance(ix, slice):\n sss[0] = 0\n if not isinstance(iy, slice):\n sss[1] = 0\n if not isinstance(iz, slice):\n sss[2] = 0\n\n return resultglobal[tuple(sss)]\n\n def _getitem2d(self, index):\n \"\"\"Returns slices of a 2D decomposed array,\n \"\"\"\n\n lovects, ngrow = self._getlovects()\n hivects, ngrow = self._gethivects()\n fields = self._getfields()\n\n ix = index[0]\n iz = index[1]\n\n if len(fields[0].shape) > self.dim:\n ncomps = fields[0].shape[-1]\n else:\n ncomps = 1\n\n if len(index) > self.dim:\n if ncomps > 1:\n ic = index[2]\n else:\n raise Exception('Too many indices given')\n else:\n ic = None\n\n nx = hivects[0,:].max() - ngrow[0]\n nz = hivects[1,:].max() - ngrow[1]\n\n if npes > 1:\n nx = comm_world.allreduce(nx, op=mpi.MAX)\n nz = comm_world.allreduce(nz, op=mpi.MAX)\n\n if isinstance(ix, slice):\n ixstart = max(ix.start or -ngrow[0], -ngrow[0])\n ixstop = min(ix.stop or nx + 1 + ngrow[0], nx + self.overlaps[0] + ngrow[0])\n else:\n ixstart = ix\n ixstop = ix + 1\n if isinstance(iz, slice):\n izstart = max(iz.start or -ngrow[1], -ngrow[1])\n izstop = min(iz.stop or nz + 1 + ngrow[1], nz + self.overlaps[1] + ngrow[1])\n else:\n izstart = iz\n izstop = iz + 1\n\n # --- Setup the size of the array to be returned and create it.\n # --- Space is added for multiple components if needed.\n sss = (max(0, ixstop - ixstart),\n max(0, izstop - izstart))\n if ncomps > 1 and ic is None:\n sss = tuple(list(sss) + [ncomps])\n resultglobal = np.zeros(sss, dtype=_libwarpx._numpy_real_dtype)\n\n datalist = []\n for i in range(len(fields)):\n\n # --- The ix1, 2 etc are relative to global indexing\n ix1 = max(ixstart, lovects[0,i])\n ix2 = min(ixstop, lovects[0,i] + fields[i].shape[0])\n iz1 = max(izstart, lovects[1,i])\n iz2 = min(izstop, lovects[1,i] + fields[i].shape[1])\n\n if ix1 < ix2 and iz1 < iz2:\n\n sss = (slice(ix1 - lovects[0,i], ix2 - lovects[0,i]),\n slice(iz1 - lovects[1,i], iz2 - lovects[1,i]))\n if ic is not None:\n sss = tuple(list(sss) + [ic])\n\n vslice = (slice(ix1 - ixstart, ix2 - ixstart),\n slice(iz1 - izstart, iz2 - izstart))\n\n datalist.append((vslice, fields[i][sss]))\n\n if npes == 1:\n all_datalist = [datalist]\n else:\n all_datalist = comm_world.allgather(datalist)\n\n for datalist in all_datalist:\n for vslice, ff in datalist:\n resultglobal[vslice] = ff\n\n # --- Now remove any of the reduced dimensions.\n sss = [slice(None), slice(None)]\n if not isinstance(ix, slice):\n sss[0] = 0\n if not isinstance(iz, slice):\n sss[1] = 0\n\n return resultglobal[tuple(sss)]\n\n def __setitem__(self, index, value):\n \"\"\"Sets slices of a decomposed array. The shape of\n the input object depends on the number of arguments specified, which can\n be from none to all three.\n - value: input array (must be supplied)\n \"\"\"\n if index == Ellipsis:\n index = tuple(self.dim*[slice(None)])\n\n if len(index) < self.dim:\n # --- Add extra dims to index if needed\n index = list(index)\n for i in range(len(index), self.dim):\n index.append(slice(None))\n index = tuple(index)\n\n if self.dim == 2:\n return self._setitem2d(index, value)\n elif self.dim == 3:\n return self._setitem3d(index, value)\n\n def _setitem3d(self, index, value):\n \"\"\"Sets slices of a decomposed 3D array.\n \"\"\"\n ix = index[0]\n iy = index[1]\n iz = index[2]\n\n lovects, ngrow = self._getlovects()\n hivects, ngrow = self._gethivects()\n fields = self._getfields()\n\n if len(index) > self.dim:\n if ncomps > 1:\n ic = index[-1]\n else:\n raise Exception('Too many indices given')\n else:\n ic = None\n\n nx = hivects[0,:].max() - ngrow[0]\n ny = hivects[1,:].max() - ngrow[1]\n nz = hivects[2,:].max() - ngrow[2]\n\n # --- Add extra dimensions so that the input has the same number of\n # --- dimensions as array.\n if isinstance(value, np.ndarray):\n value3d = np.array(value, copy=False)\n sss = list(value3d.shape)\n if not isinstance(ix, slice): sss[0:0] = [1]\n if not isinstance(iy, slice): sss[1:1] = [1]\n if not isinstance(iz, slice): sss[2:2] = [1]\n value3d.shape = sss\n\n if isinstance(ix, slice):\n ixstart = max(ix.start or -ngrow[0], -ngrow[0])\n ixstop = min(ix.stop or nx + 1 + ngrow[0], nx + self.overlaps[0] + ngrow[0])\n else:\n ixstart = ix\n ixstop = ix + 1\n if isinstance(iy, slice):\n iystart = max(iy.start or -ngrow[1], -ngrow[1])\n iystop = min(iy.stop or ny + 1 + ngrow[1], ny + self.overlaps[1] + ngrow[1])\n else:\n iystart = iy\n iystop = iy + 1\n if isinstance(iz, slice):\n izstart = max(iz.start or -ngrow[2], -ngrow[2])\n izstop = min(iz.stop or nz + 1 + ngrow[2], nz + self.overlaps[2] + ngrow[2])\n else:\n izstart = iz\n izstop = iz + 1\n\n for i in range(len(fields)):\n\n # --- The ix1, 2 etc are relative to global indexing\n ix1 = max(ixstart, lovects[0,i])\n ix2 = min(ixstop, lovects[0,i] + fields[i].shape[0])\n iy1 = max(iystart, lovects[1,i])\n iy2 = min(iystop, lovects[1,i] + fields[i].shape[1])\n iz1 = max(izstart, lovects[2,i])\n iz2 = min(izstop, lovects[2,i] + fields[i].shape[2])\n\n if ix1 < ix2 and iy1 < iy2 and iz1 < iz2:\n\n sss = (slice(ix1 - lovects[0,i], ix2 - lovects[0,i]),\n slice(iy1 - lovects[1,i], iy2 - lovects[1,i]),\n slice(iz1 - lovects[2,i], iz2 - lovects[2,i]))\n if ic is not None:\n sss = tuple(list(sss) + [ic])\n\n if isinstance(value, np.ndarray):\n vslice = (slice(ix1 - ixstart, ix2 - ixstart),\n slice(iy1 - iystart, iy2 - iystart),\n slice(iz1 - izstart, iz2 - izstart))\n fields[i][sss] = value3d[vslice]\n else:\n fields[i][sss] = value\n\n def _setitem2d(self, index, value):\n \"\"\"Sets slices of a decomposed 2D array.\n \"\"\"\n ix = index[0]\n iz = index[2]\n\n lovects, ngrow = self._getlovects()\n hivects, ngrow = self._gethivects()\n fields = self._getfields()\n\n if len(fields[0].shape) > self.dim:\n ncomps = fields[0].shape[-1]\n else:\n ncomps = 1\n\n if len(index) > self.dim:\n if ncomps > 1:\n ic = index[2]\n else:\n raise Exception('Too many indices given')\n else:\n ic = None\n\n nx = hivects[0,:].max() - ngrow[0]\n nz = hivects[2,:].max() - ngrow[1]\n\n # --- Add extra dimensions so that the input has the same number of\n # --- dimensions as array.\n if isinstance(value, np.ndarray):\n value3d = np.array(value, copy=False)\n sss = list(value3d.shape)\n if not isinstance(ix, slice): sss[0:0] = [1]\n if not isinstance(iz, slice): sss[1:1] = [1]\n value3d.shape = sss\n\n if isinstance(ix, slice):\n ixstart = max(ix.start or -ngrow[0], -ngrow[0])\n ixstop = min(ix.stop or nx + 1 + ngrow[0], nx + self.overlaps[0] + ngrow[0])\n else:\n ixstart = ix\n ixstop = ix + 1\n if isinstance(iz, slice):\n izstart = max(iz.start or -ngrow[1], -ngrow[1])\n izstop = min(iz.stop or nz + 1 + ngrow[1], nz + self.overlaps[2] + ngrow[1])\n else:\n izstart = iz\n izstop = iz + 1\n\n for i in range(len(fields)):\n\n # --- The ix1, 2 etc are relative to global indexing\n ix1 = max(ixstart, lovects[0,i])\n ix2 = min(ixstop, lovects[0,i] + fields[i].shape[0])\n iz1 = max(izstart, lovects[2,i])\n iz2 = min(izstop, lovects[2,i] + fields[i].shape[2])\n\n if ix1 < ix2 and iz1 < iz2:\n\n sss = (slice(ix1 - lovects[0,i], ix2 - lovects[0,i]),\n slice(iz1 - lovects[2,i], iz2 - lovects[2,i]))\n if ic is not None:\n sss = tuple(list(sss) + [ic])\n\n if isinstance(value, np.ndarray):\n vslice = (slice(ix1 - ixstart, ix2 - ixstart),\n slice(iz1 - izstart, iz2 - izstart))\n fields[i][sss] = value3d[vslice]\n else:\n fields[i][sss] = value\n\n\ndef ExWrapper(level=0, include_ghosts=False):\n return _MultiFABWrapper(direction=0,\n get_lovects=_libwarpx.get_mesh_electric_field_lovects,\n get_fabs=_libwarpx.get_mesh_electric_field,\n get_nodal_flag=_libwarpx.get_Ex_nodal_flag,\n level=level, include_ghosts=include_ghosts)\n\ndef EyWrapper(level=0, include_ghosts=False):\n return _MultiFABWrapper(direction=1,\n get_lovects=_libwarpx.get_mesh_electric_field_lovects,\n get_fabs=_libwarpx.get_mesh_electric_field,\n get_nodal_flag=_libwarpx.get_Ey_nodal_flag,\n level=level, include_ghosts=include_ghosts)\n\ndef EzWrapper(level=0, include_ghosts=False):\n return _MultiFABWrapper(direction=2,\n get_lovects=_libwarpx.get_mesh_electric_field_lovects,\n get_fabs=_libwarpx.get_mesh_electric_field,\n get_nodal_flag=_libwarpx.get_Ez_nodal_flag,\n level=level, include_ghosts=include_ghosts)\n\ndef BxWrapper(level=0, include_ghosts=False):\n return _MultiFABWrapper(direction=0,\n get_lovects=_libwarpx.get_mesh_magnetic_field_lovects,\n get_fabs=_libwarpx.get_mesh_magnetic_field,\n get_nodal_flag=_libwarpx.get_Bx_nodal_flag,\n level=level, include_ghosts=include_ghosts)\n\ndef ByWrapper(level=0, include_ghosts=False):\n return _MultiFABWrapper(direction=1,\n get_lovects=_libwarpx.get_mesh_magnetic_field_lovects,\n get_fabs=_libwarpx.get_mesh_magnetic_field,\n get_nodal_flag=_libwarpx.get_By_nodal_flag,\n level=level, include_ghosts=include_ghosts)\n\ndef BzWrapper(level=0, include_ghosts=False):\n return _MultiFABWrapper(direction=2,\n get_lovects=_libwarpx.get_mesh_magnetic_field_lovects,\n get_fabs=_libwarpx.get_mesh_magnetic_field,\n get_nodal_flag=_libwarpx.get_Bz_nodal_flag,\n level=level, include_ghosts=include_ghosts)\n\ndef JxWrapper(level=0, include_ghosts=False):\n return _MultiFABWrapper(direction=0,\n get_lovects=_libwarpx.get_mesh_current_density_lovects,\n get_fabs=_libwarpx.get_mesh_current_density,\n get_nodal_flag=_libwarpx.get_Jx_nodal_flag,\n level=level, include_ghosts=include_ghosts)\n\ndef JyWrapper(level=0, include_ghosts=False):\n return _MultiFABWrapper(direction=1,\n get_lovects=_libwarpx.get_mesh_current_density_lovects,\n get_fabs=_libwarpx.get_mesh_current_density,\n get_nodal_flag=_libwarpx.get_Jy_nodal_flag,\n level=level, include_ghosts=include_ghosts)\n\ndef JzWrapper(level=0, include_ghosts=False):\n return _MultiFABWrapper(direction=2,\n get_lovects=_libwarpx.get_mesh_current_density_lovects,\n get_fabs=_libwarpx.get_mesh_current_density,\n get_nodal_flag=_libwarpx.get_Jz_nodal_flag,\n level=level, include_ghosts=include_ghosts)\n\ndef ExCPWrapper(level=1, include_ghosts=False):\n assert level>0, Exception('Coarse patch only available on levels > 0')\n return _MultiFABWrapper(direction=0,\n get_lovects=_libwarpx.get_mesh_electric_field_cp_lovects,\n get_fabs=_libwarpx.get_mesh_electric_field_cp,\n get_nodal_flag=_libwarpx.get_Ex_nodal_flag,\n level=level, include_ghosts=include_ghosts)\n\ndef EyCPWrapper(level=1, include_ghosts=False):\n assert level>0, Exception('Coarse patch only available on levels > 0')\n return _MultiFABWrapper(direction=1,\n get_lovects=_libwarpx.get_mesh_electric_field_cp_lovects,\n get_fabs=_libwarpx.get_mesh_electric_field_cp,\n get_nodal_flag=_libwarpx.get_Ey_nodal_flag,\n level=level, include_ghosts=include_ghosts)\n\ndef EzCPWrapper(level=1, include_ghosts=False):\n assert level>0, Exception('Coarse patch only available on levels > 0')\n return _MultiFABWrapper(direction=2,\n get_lovects=_libwarpx.get_mesh_electric_field_cp_lovects,\n get_fabs=_libwarpx.get_mesh_electric_field_cp,\n get_nodal_flag=_libwarpx.get_Ez_nodal_flag,\n level=level, include_ghosts=include_ghosts)\n\ndef BxCPWrapper(level=1, include_ghosts=False):\n assert level>0, Exception('Coarse patch only available on levels > 0')\n return _MultiFABWrapper(direction=0,\n get_lovects=_libwarpx.get_mesh_magnetic_field_cp_lovects,\n get_fabs=_libwarpx.get_mesh_magnetic_field_cp,\n get_nodal_flag=_libwarpx.get_Bx_nodal_flag,\n level=level, include_ghosts=include_ghosts)\n\ndef ByCPWrapper(level=1, include_ghosts=False):\n assert level>0, Exception('Coarse patch only available on levels > 0')\n return _MultiFABWrapper(direction=1,\n get_lovects=_libwarpx.get_mesh_magnetic_field_cp_lovects,\n get_fabs=_libwarpx.get_mesh_magnetic_field_cp,\n get_nodal_flag=_libwarpx.get_By_nodal_flag,\n level=level, include_ghosts=include_ghosts)\n\ndef BzCPWrapper(level=1, include_ghosts=False):\n assert level>0, Exception('Coarse patch only available on levels > 0')\n return _MultiFABWrapper(direction=2,\n get_lovects=_libwarpx.get_mesh_magnetic_field_cp_lovects,\n get_fabs=_libwarpx.get_mesh_magnetic_field_cp,\n get_nodal_flag=_libwarpx.get_Bz_nodal_flag,\n level=level, include_ghosts=include_ghosts)\n\ndef JxCPWrapper(level=1, include_ghosts=False):\n assert level>0, Exception('Coarse patch only available on levels > 0')\n return _MultiFABWrapper(direction=0,\n get_lovects=_libwarpx.get_mesh_current_density_cp_lovects,\n get_fabs=_libwarpx.get_mesh_current_density_cp,\n get_nodal_flag=_libwarpx.get_Jx_nodal_flag,\n level=level, include_ghosts=include_ghosts)\n\ndef JyCPWrapper(level=1, include_ghosts=False):\n assert level>0, Exception('Coarse patch only available on levels > 0')\n return _MultiFABWrapper(direction=1,\n get_lovects=_libwarpx.get_mesh_current_density_cp_lovects,\n get_fabs=_libwarpx.get_mesh_current_density_cp,\n get_nodal_flag=_libwarpx.get_Jy_nodal_flag,\n level=level, include_ghosts=include_ghosts)\n\ndef JzCPWrapper(level=1, include_ghosts=False):\n assert level>0, Exception('Coarse patch only available on levels > 0')\n return _MultiFABWrapper(direction=2,\n get_lovects=_libwarpx.get_mesh_current_density_cp_lovects,\n get_fabs=_libwarpx.get_mesh_current_density_cp,\n get_nodal_flag=_libwarpx.get_Jz_nodal_flag,\n level=level, include_ghosts=include_ghosts)\n\ndef RhoCPWrapper(level=1, include_ghosts=False):\n assert level>0, Exception('Coarse patch only available on levels > 0')\n return _MultiFABWrapper(direction=None,\n get_lovects=_libwarpx.get_mesh_charge_density_cp_lovects,\n get_fabs=_libwarpx.get_mesh_charge_density_cp,\n get_nodal_flag=_libwarpx.get_Rho_nodal_flag,\n level=level, include_ghosts=include_ghosts)\n\ndef ExFPWrapper(level=0, include_ghosts=False):\n return _MultiFABWrapper(direction=0,\n get_lovects=_libwarpx.get_mesh_electric_field_fp_lovects,\n get_fabs=_libwarpx.get_mesh_electric_field_fp,\n get_nodal_flag=_libwarpx.get_Ex_nodal_flag,\n level=level, include_ghosts=include_ghosts)\n\ndef EyFPWrapper(level=0, include_ghosts=False):\n return _MultiFABWrapper(direction=1,\n get_lovects=_libwarpx.get_mesh_electric_field_fp_lovects,\n get_fabs=_libwarpx.get_mesh_electric_field_fp,\n get_nodal_flag=_libwarpx.get_Ey_nodal_flag,\n level=level, include_ghosts=include_ghosts)\n\ndef EzFPWrapper(level=0, include_ghosts=False):\n return _MultiFABWrapper(direction=2,\n get_lovects=_libwarpx.get_mesh_electric_field_fp_lovects,\n get_fabs=_libwarpx.get_mesh_electric_field_fp,\n get_nodal_flag=_libwarpx.get_Ez_nodal_flag,\n level=level, include_ghosts=include_ghosts)\n\ndef BxFPWrapper(level=0, include_ghosts=False):\n return _MultiFABWrapper(direction=0,\n get_lovects=_libwarpx.get_mesh_magnetic_field_fp_lovects,\n get_fabs=_libwarpx.get_mesh_magnetic_field_fp,\n get_nodal_flag=_libwarpx.get_Bx_nodal_flag,\n level=level, include_ghosts=include_ghosts)\n\ndef ByFPWrapper(level=0, include_ghosts=False):\n return _MultiFABWrapper(direction=1,\n get_lovects=_libwarpx.get_mesh_magnetic_field_fp_lovects,\n get_fabs=_libwarpx.get_mesh_magnetic_field_fp,\n get_nodal_flag=_libwarpx.get_By_nodal_flag,\n level=level, include_ghosts=include_ghosts)\n\ndef BzFPWrapper(level=0, include_ghosts=False):\n return _MultiFABWrapper(direction=2,\n get_lovects=_libwarpx.get_mesh_magnetic_field_fp_lovects,\n get_fabs=_libwarpx.get_mesh_magnetic_field_fp,\n get_nodal_flag=_libwarpx.get_Bz_nodal_flag,\n level=level, include_ghosts=include_ghosts)\n\ndef JxFPWrapper(level=0, include_ghosts=False):\n return _MultiFABWrapper(direction=0,\n get_lovects=_libwarpx.get_mesh_current_density_fp_lovects,\n get_fabs=_libwarpx.get_mesh_current_density_fp,\n get_nodal_flag=_libwarpx.get_Jx_nodal_flag,\n level=level, include_ghosts=include_ghosts)\n\ndef JyFPWrapper(level=0, include_ghosts=False):\n return _MultiFABWrapper(direction=1,\n get_lovects=_libwarpx.get_mesh_current_density_fp_lovects,\n get_fabs=_libwarpx.get_mesh_current_density_fp,\n get_nodal_flag=_libwarpx.get_Jy_nodal_flag,\n level=level, include_ghosts=include_ghosts)\n\ndef JzFPWrapper(level=0, include_ghosts=False):\n return _MultiFABWrapper(direction=2,\n get_lovects=_libwarpx.get_mesh_current_density_fp_lovects,\n get_fabs=_libwarpx.get_mesh_current_density_fp,\n get_nodal_flag=_libwarpx.get_Jz_nodal_flag,\n level=level, include_ghosts=include_ghosts)\n\ndef RhoFPWrapper(level=0, include_ghosts=False):\n return _MultiFABWrapper(direction=None,\n get_lovects=_libwarpx.get_mesh_charge_density_fp_lovects,\n get_fabs=_libwarpx.get_mesh_charge_density_fp,\n get_nodal_flag=_libwarpx.get_Rho_nodal_flag,\n level=level, include_ghosts=include_ghosts)\ndef ExCPPMLWrapper(level=1, include_ghosts=False):\n assert level>0, Exception('Coarse patch only available on levels > 0')\n return _MultiFABWrapper(direction=0,\n get_lovects=_libwarpx.get_mesh_electric_field_cp_lovects_pml,\n get_fabs=_libwarpx.get_mesh_electric_field_cp_pml,\n get_nodal_flag=_libwarpx.get_Ex_nodal_flag,\n level=level, include_ghosts=include_ghosts)\n\ndef EyCPPMLWrapper(level=1, include_ghosts=False):\n assert level>0, Exception('Coarse patch only available on levels > 0')\n return _MultiFABWrapper(direction=1,\n get_lovects=_libwarpx.get_mesh_electric_field_cp_lovects_pml,\n get_fabs=_libwarpx.get_mesh_electric_field_cp_pml,\n get_nodal_flag=_libwarpx.get_Ey_nodal_flag,\n level=level, include_ghosts=include_ghosts)\n\ndef EzCPPMLWrapper(level=1, include_ghosts=False):\n assert level>0, Exception('Coarse patch only available on levels > 0')\n return _MultiFABWrapper(direction=2,\n get_lovects=_libwarpx.get_mesh_electric_field_cp_lovects_pml,\n get_fabs=_libwarpx.get_mesh_electric_field_cp_pml,\n get_nodal_flag=_libwarpx.get_Ez_nodal_flag,\n level=level, include_ghosts=include_ghosts)\n\ndef BxCPPMLWrapper(level=1, include_ghosts=False):\n assert level>0, Exception('Coarse patch only available on levels > 0')\n return _MultiFABWrapper(direction=0,\n get_lovects=_libwarpx.get_mesh_magnetic_field_cp_lovects_pml,\n get_fabs=_libwarpx.get_mesh_magnetic_field_cp_pml,\n get_nodal_flag=_libwarpx.get_Bx_nodal_flag,\n level=level, include_ghosts=include_ghosts)\n\ndef ByCPPMLWrapper(level=1, include_ghosts=False):\n assert level>0, Exception('Coarse patch only available on levels > 0')\n return _MultiFABWrapper(direction=1,\n get_lovects=_libwarpx.get_mesh_magnetic_field_cp_lovects_pml,\n get_fabs=_libwarpx.get_mesh_magnetic_field_cp_pml,\n get_nodal_flag=_libwarpx.get_By_nodal_flag,\n level=level, include_ghosts=include_ghosts)\n\ndef BzCPPMLWrapper(level=1, include_ghosts=False):\n assert level>0, Exception('Coarse patch only available on levels > 0')\n return _MultiFABWrapper(direction=2,\n get_lovects=_libwarpx.get_mesh_magnetic_field_cp_lovects_pml,\n get_fabs=_libwarpx.get_mesh_magnetic_field_cp_pml,\n get_nodal_flag=_libwarpx.get_Bz_nodal_flag,\n level=level, include_ghosts=include_ghosts)\n\ndef JxCPPMLWrapper(level=1, include_ghosts=False):\n assert level>0, Exception('Coarse patch only available on levels > 0')\n return _MultiFABWrapper(direction=0,\n get_lovects=_libwarpx.get_mesh_current_density_cp_lovects_pml,\n get_fabs=_libwarpx.get_mesh_current_density_cp_pml,\n get_nodal_flag=_libwarpx.get_Jx_nodal_flag,\n level=level, include_ghosts=include_ghosts)\n\ndef JyCPPMLWrapper(level=1, include_ghosts=False):\n assert level>0, Exception('Coarse patch only available on levels > 0')\n return _MultiFABWrapper(direction=1,\n get_lovects=_libwarpx.get_mesh_current_density_cp_lovects_pml,\n get_fabs=_libwarpx.get_mesh_current_density_cp_pml,\n get_nodal_flag=_libwarpx.get_Jy_nodal_flag,\n level=level, include_ghosts=include_ghosts)\n\ndef JzCPPMLWrapper(level=1, include_ghosts=False):\n assert level>0, Exception('Coarse patch only available on levels > 0')\n return _MultiFABWrapper(direction=2,\n get_lovects=_libwarpx.get_mesh_current_density_cp_lovects_pml,\n get_fabs=_libwarpx.get_mesh_current_density_cp_pml,\n get_nodal_flag=_libwarpx.get_Jz_nodal_flag,\n level=level, include_ghosts=include_ghosts)\n\ndef ExFPPMLWrapper(level=0, include_ghosts=False):\n return _MultiFABWrapper(direction=0,\n get_lovects=_libwarpx.get_mesh_electric_field_fp_lovects_pml,\n get_fabs=_libwarpx.get_mesh_electric_field_fp_pml,\n get_nodal_flag=_libwarpx.get_Ex_nodal_flag,\n level=level, include_ghosts=include_ghosts)\n\ndef EyFPPMLWrapper(level=0, include_ghosts=False):\n return _MultiFABWrapper(direction=1,\n get_lovects=_libwarpx.get_mesh_electric_field_fp_lovects_pml,\n get_fabs=_libwarpx.get_mesh_electric_field_fp_pml,\n get_nodal_flag=_libwarpx.get_Ey_nodal_flag,\n level=level, include_ghosts=include_ghosts)\n\ndef EzFPPMLWrapper(level=0, include_ghosts=False):\n return _MultiFABWrapper(direction=2,\n get_lovects=_libwarpx.get_mesh_electric_field_fp_lovects_pml,\n get_fabs=_libwarpx.get_mesh_electric_field_fp_pml,\n get_nodal_flag=_libwarpx.get_Ez_nodal_flag,\n level=level, include_ghosts=include_ghosts)\n\ndef BxFPPMLWrapper(level=0, include_ghosts=False):\n return _MultiFABWrapper(direction=0,\n get_lovects=_libwarpx.get_mesh_magnetic_field_fp_lovects_pml,\n get_fabs=_libwarpx.get_mesh_magnetic_field_fp_pml,\n get_nodal_flag=_libwarpx.get_Bx_nodal_flag,\n level=level, include_ghosts=include_ghosts)\n\ndef ByFPPMLWrapper(level=0, include_ghosts=False):\n return _MultiFABWrapper(direction=1,\n get_lovects=_libwarpx.get_mesh_magnetic_field_fp_lovects_pml,\n get_fabs=_libwarpx.get_mesh_magnetic_field_fp_pml,\n get_nodal_flag=_libwarpx.get_By_nodal_flag,\n level=level, include_ghosts=include_ghosts)\n\ndef BzFPPMLWrapper(level=0, include_ghosts=False):\n return _MultiFABWrapper(direction=2,\n get_lovects=_libwarpx.get_mesh_magnetic_field_fp_lovects_pml,\n get_fabs=_libwarpx.get_mesh_magnetic_field_fp_pml,\n get_nodal_flag=_libwarpx.get_Bz_nodal_flag,\n level=level, include_ghosts=include_ghosts)\n\ndef JxFPPMLWrapper(level=0, include_ghosts=False):\n return _MultiFABWrapper(direction=0,\n get_lovects=_libwarpx.get_mesh_current_density_fp_lovects_pml,\n get_fabs=_libwarpx.get_mesh_current_density_fp_pml,\n get_nodal_flag=_libwarpx.get_Jx_nodal_flag,\n level=level, include_ghosts=include_ghosts)\n\ndef JyFPPMLWrapper(level=0, include_ghosts=False):\n return _MultiFABWrapper(direction=1,\n get_lovects=_libwarpx.get_mesh_current_density_fp_lovects_pml,\n get_fabs=_libwarpx.get_mesh_current_density_fp_pml,\n get_nodal_flag=_libwarpx.get_Jy_nodal_flag,\n level=level, include_ghosts=include_ghosts)\n\ndef JzFPPMLWrapper(level=0, include_ghosts=False):\n return _MultiFABWrapper(direction=2,\n get_lovects=_libwarpx.get_mesh_current_density_fp_lovects_pml,\n get_fabs=_libwarpx.get_mesh_current_density_fp_pml,\n get_nodal_flag=_libwarpx.get_Jz_nodal_flag,\n level=level, include_ghosts=include_ghosts)\n"
] | [
[
"numpy.array",
"numpy.zeros_like",
"numpy.arange",
"numpy.zeros"
]
] |
brianwa84/probability | [
"6f8e78d859ac41170be5147c8c7bde54cc5aa83e",
"6f8e78d859ac41170be5147c8c7bde54cc5aa83e",
"6f8e78d859ac41170be5147c8c7bde54cc5aa83e"
] | [
"tensorflow_probability/python/distributions/skellam_test.py",
"tensorflow_probability/python/distributions/lognormal_test.py",
"tensorflow_probability/python/internal/tensor_util.py"
] | [
"# Copyright 2018 The TensorFlow Probability Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\n# Dependency imports\nimport numpy as np\nfrom scipy import stats\nimport tensorflow.compat.v2 as tf\nimport tensorflow_probability as tfp\n\nfrom tensorflow_probability.python.internal import test_util\ntfd = tfp.distributions\n\n\n@test_util.test_all_tf_execution_regimes\nclass _SkellamTest(object):\n\n def _make_skellam(self,\n rate1,\n rate2,\n validate_args=True,\n force_probs_to_zero_outside_support=False):\n return tfd.Skellam(\n rate1=rate1,\n rate2=rate2,\n validate_args=validate_args,\n force_probs_to_zero_outside_support=force_probs_to_zero_outside_support)\n\n def testSkellamShape(self):\n rate1 = tf.constant([3.0] * 5, dtype=self.dtype)\n rate2 = tf.constant([3.0] * 4, dtype=self.dtype)[..., tf.newaxis]\n skellam = self._make_skellam(rate1=rate1, rate2=rate2)\n\n self.assertAllEqual(self.evaluate(skellam.batch_shape_tensor()), (4, 5))\n self.assertEqual(skellam.batch_shape, tf.TensorShape([4, 5]))\n self.assertAllEqual(self.evaluate(skellam.event_shape_tensor()), [])\n self.assertEqual(skellam.event_shape, tf.TensorShape([]))\n\n def testInvalidLam(self):\n invalid_rate = self.dtype([-.01, 1., 2.])\n valid_rate = self.dtype([1., 2., 3.])\n with self.assertRaisesOpError('Argument `rate1` must be non-negative.'):\n skellam = self._make_skellam(rate1=invalid_rate, rate2=valid_rate)\n self.evaluate(skellam.rate1_parameter())\n\n with self.assertRaisesOpError('Argument `rate2` must be non-negative.'):\n skellam = self._make_skellam(rate1=valid_rate, rate2=invalid_rate)\n self.evaluate(skellam.rate2_parameter())\n\n def testZeroRate(self):\n lam = self.dtype(0.)\n skellam = tfd.Skellam(rate1=lam, rate2=lam, validate_args=True)\n self.assertAllClose(lam, self.evaluate(skellam.rate1))\n self.assertAllClose(lam, self.evaluate(skellam.rate2))\n self.assertAllClose(0., skellam.prob(3.))\n self.assertAllClose(1., skellam.prob(0.))\n self.assertAllClose(0., skellam.log_prob(0.))\n\n def testSkellamLogPmfDiscreteMatchesScipy(self):\n batch_size = 12\n rate1 = np.linspace(1, 12, 12).astype(self.dtype)\n rate2 = np.array([[1.2], [2.3]]).astype(self.dtype)\n x = np.array([-3., -1., 0., 2., 4., 3., 7., 4., 8., 9., 6., 7.],\n dtype=self.dtype)\n skellam = self._make_skellam(\n rate1=rate1, rate2=rate2,\n force_probs_to_zero_outside_support=True, validate_args=False)\n log_pmf = skellam.log_prob(x)\n self.assertEqual(log_pmf.shape, (2, batch_size))\n self.assertAllClose(\n self.evaluate(log_pmf),\n stats.skellam.logpmf(x, rate1, rate2))\n\n pmf = skellam.prob(x)\n self.assertEqual(pmf.shape, (2, batch_size,))\n self.assertAllClose(\n self.evaluate(pmf),\n stats.skellam.pmf(x, rate1, rate2))\n\n @test_util.numpy_disable_gradient_test\n def testSkellamLogPmfGradient(self):\n batch_size = 6\n rate1 = tf.constant([3.] * batch_size, dtype=self.dtype)\n rate2 = tf.constant([2.7] * batch_size, dtype=self.dtype)\n x = np.array([-1., 2., 3., 4., 5., 6.], dtype=self.dtype)\n\n err = self.compute_max_gradient_error(\n lambda lam: self._make_skellam( # pylint:disable=g-long-lambda\n rate1=lam, rate2=rate2).log_prob(x), [rate1])\n self.assertLess(err, 7e-4)\n\n err = self.compute_max_gradient_error(\n lambda lam: self._make_skellam( # pylint:disable=g-long-lambda\n rate1=rate1, rate2=lam).log_prob(x), [rate2])\n self.assertLess(err, 7e-4)\n\n @test_util.numpy_disable_gradient_test\n def testSkellamLogPmfGradientAtZeroPmf(self):\n # Check that the derivative wrt parameter at the zero-prob points is zero.\n batch_size = 6\n rate1 = tf.constant(np.linspace(1, 7, 6), dtype=self.dtype)\n rate2 = tf.constant(np.linspace(9.1, 12.1, 6), dtype=self.dtype)\n x = tf.constant([-2.1, -1.3, -0.5, 0.2, 1.5, 10.5], dtype=self.dtype)\n\n def make_skellam_log_prob(apply_to_second_rate=False):\n def skellam_log_prob(lam):\n return self._make_skellam(\n rate1=rate1 if apply_to_second_rate else lam,\n rate2=lam if apply_to_second_rate else rate2,\n force_probs_to_zero_outside_support=True,\n validate_args=False).log_prob(x)\n return skellam_log_prob\n _, dlog_pmf_dlam = self.evaluate(tfp.math.value_and_gradient(\n make_skellam_log_prob(), rate1))\n\n self.assertEqual(dlog_pmf_dlam.shape, (batch_size,))\n self.assertAllClose(dlog_pmf_dlam, np.zeros([batch_size]))\n\n _, dlog_pmf_dlam = self.evaluate(tfp.math.value_and_gradient(\n make_skellam_log_prob(True), rate2))\n\n self.assertEqual(dlog_pmf_dlam.shape, (batch_size,))\n self.assertAllClose(dlog_pmf_dlam, np.zeros([batch_size]))\n\n def testSkellamMean(self):\n rate1 = np.array([1.0, 3.0, 2.5], dtype=self.dtype)\n rate2 = np.array([5.0, 7.13, 2.56, 41.], dtype=self.dtype)[..., np.newaxis]\n skellam = self._make_skellam(rate1=rate1, rate2=rate2)\n self.assertEqual(skellam.mean().shape, (4, 3))\n self.assertAllClose(\n self.evaluate(skellam.mean()), stats.skellam.mean(rate1, rate2))\n self.assertAllClose(self.evaluate(skellam.mean()), rate1 - rate2)\n\n def testSkellamVariance(self):\n rate1 = np.array([1.0, 3.0, 2.5], dtype=self.dtype)\n rate2 = np.array([5.0, 7.13, 2.56, 41.], dtype=self.dtype)[..., np.newaxis]\n skellam = self._make_skellam(rate1=rate1, rate2=rate2)\n self.assertEqual(skellam.variance().shape, (4, 3))\n self.assertAllClose(\n self.evaluate(skellam.variance()), stats.skellam.var(rate1, rate2))\n self.assertAllClose(self.evaluate(skellam.variance()), rate1 + rate2)\n\n def testSkellamStd(self):\n rate1 = np.array([1.0, 3.0, 2.5], dtype=self.dtype)\n rate2 = np.array([5.0, 7.13, 2.56, 41.], dtype=self.dtype)[..., np.newaxis]\n skellam = self._make_skellam(rate1=rate1, rate2=rate2)\n self.assertEqual(skellam.stddev().shape, (4, 3))\n self.assertAllClose(\n self.evaluate(skellam.stddev()), stats.skellam.std(rate1, rate2))\n self.assertAllClose(self.evaluate(skellam.stddev()), np.sqrt(rate1 + rate2))\n\n def testSkellamSample(self):\n rate1 = self.dtype([2., 3., 4.])\n rate2 = self.dtype([7.1, 3.2])[..., np.newaxis]\n n = int(2e5)\n skellam = self._make_skellam(rate1=rate1, rate2=rate2)\n samples = skellam.sample(n, seed=test_util.test_seed())\n sample_values = self.evaluate(samples)\n self.assertEqual(samples.shape, (n, 2, 3))\n self.assertEqual(sample_values.shape, (n, 2, 3))\n self.assertAllClose(\n sample_values.mean(axis=0), stats.skellam.mean(rate1, rate2), rtol=.03)\n self.assertAllClose(\n sample_values.var(axis=0), stats.skellam.var(rate1, rate2), rtol=.03)\n\n def testAssertValidSample(self):\n rate1 = np.array([1.0, 3.0, 2.5], dtype=self.dtype)\n rate2 = np.array([2.1, 7.0, 42.5], dtype=self.dtype)\n skellam = self._make_skellam(rate1=rate1, rate2=rate2)\n with self.assertRaisesOpError('has non-integer components'):\n self.evaluate(skellam.prob([-1.2, 3., 4.2]))\n\n def testSkellamSampleMultidimensionalMean(self):\n rate1 = self.dtype([2., 3., 4., 5., 6.])\n rate2 = self.dtype([7.1, 3.2, 10., 9.])[..., np.newaxis]\n skellam = self._make_skellam(rate1=rate1, rate2=rate2)\n n = int(2e5)\n samples = skellam.sample(n, seed=test_util.test_seed())\n sample_values = self.evaluate(samples)\n self.assertEqual(samples.shape, (n, 4, 5))\n self.assertEqual(sample_values.shape, (n, 4, 5))\n self.assertAllClose(\n sample_values.mean(axis=0),\n stats.skellam.mean(rate1, rate2), rtol=.04, atol=0)\n\n def testSkellamSampleMultidimensionalVariance(self):\n rate1 = self.dtype([2., 3., 4., 5., 6.])\n rate2 = self.dtype([7.1, 3.2, 10., 9.])[..., np.newaxis]\n skellam = self._make_skellam(rate1=rate1, rate2=rate2)\n n = int(1e5)\n samples = skellam.sample(n, seed=test_util.test_seed())\n sample_values = self.evaluate(samples)\n self.assertEqual(samples.shape, (n, 4, 5))\n self.assertEqual(sample_values.shape, (n, 4, 5))\n\n self.assertAllClose(\n sample_values.var(axis=0),\n stats.skellam.var(rate1, rate2), rtol=.03, atol=0)\n\n @test_util.tf_tape_safety_test\n def testGradientThroughRate(self):\n rate1 = tf.Variable(3.)\n rate2 = tf.Variable(4.)\n dist = self._make_skellam(rate1=rate1, rate2=rate2)\n with tf.GradientTape() as tape:\n loss = -dist.log_prob([1., 2., 4.])\n grad = tape.gradient(loss, dist.trainable_variables)\n self.assertLen(grad, 2)\n self.assertAllNotNone(grad)\n\n def testAssertsNonNegativeRate(self):\n rate1 = tf.Variable([-1., 2., -3.])\n rate2 = tf.Variable([1., 2., 3.])\n self.evaluate([rate1.initializer, rate2.initializer])\n with self.assertRaisesOpError('Argument `rate1` must be non-negative.'):\n dist = self._make_skellam(\n rate1=rate1, rate2=rate2, validate_args=True)\n self.evaluate(dist.sample(seed=test_util.test_seed()))\n\n rate1 = tf.Variable([1., 2., 3.])\n rate2 = tf.Variable([-1., 2., -3.])\n self.evaluate([rate1.initializer, rate2.initializer])\n\n with self.assertRaisesOpError('Argument `rate2` must be non-negative.'):\n dist = self._make_skellam(\n rate1=rate1, rate2=rate2, validate_args=True)\n self.evaluate(dist.sample(seed=test_util.test_seed()))\n\n def testAssertsNonNegativeRateAfterMutation(self):\n rate1 = tf.Variable([1., 2., 3.])\n rate2 = tf.Variable([1., 2., 3.])\n self.evaluate([rate1.initializer, rate2.initializer])\n dist = self._make_skellam(\n rate1=rate1, rate2=rate2, validate_args=True)\n self.evaluate(dist.mean())\n with self.assertRaisesOpError('Argument `rate1` must be non-negative.'):\n with tf.control_dependencies([rate1.assign([1., 2., -3.])]):\n self.evaluate(dist.sample(seed=test_util.test_seed()))\n\n rate1 = tf.Variable([1., 2., 3.])\n rate2 = tf.Variable([1., 2., 3.])\n self.evaluate([rate1.initializer, rate2.initializer])\n dist = self._make_skellam(\n rate1=rate1, rate2=rate2, validate_args=True)\n self.evaluate(dist.mean())\n\n with self.assertRaisesOpError('Argument `rate2` must be non-negative.'):\n with tf.control_dependencies([rate2.assign([1., 2., -3.])]):\n self.evaluate(dist.sample(seed=test_util.test_seed()))\n\n\n@test_util.test_all_tf_execution_regimes\nclass SkellamTestFloat32(test_util.TestCase, _SkellamTest):\n dtype = np.float32\n\n\n@test_util.test_all_tf_execution_regimes\nclass SkellamTestFloat64(test_util.TestCase, _SkellamTest):\n dtype = np.float64\n\n\n@test_util.test_all_tf_execution_regimes\nclass SkellamLogRateTest(_SkellamTest):\n\n def _make_skellam(self,\n rate1,\n rate2,\n validate_args=True,\n force_probs_to_zero_outside_support=False):\n return tfd.Skellam(\n log_rate1=tf.math.log(rate1),\n log_rate2=tf.math.log(rate2),\n validate_args=validate_args,\n force_probs_to_zero_outside_support=force_probs_to_zero_outside_support)\n\n # No need to worry about the non-negativity of `rate` when using the\n # `log_rate` parameterization.\n def testInvalidLam(self):\n pass\n\n def testAssertsNonNegativeRate(self):\n pass\n\n def testAssertsNonNegativeRateAfterMutation(self):\n pass\n\n # The gradient is not tracked through tf.math.log(rate) in _make_skellam(),\n # so log_rate needs to be defined as a Variable and passed directly.\n @test_util.tf_tape_safety_test\n def testGradientThroughRate(self):\n log_rate1 = tf.Variable(3.)\n log_rate2 = tf.Variable(4.)\n dist = tfd.Skellam(\n log_rate1=log_rate1, log_rate2=log_rate2, validate_args=True)\n with tf.GradientTape() as tape:\n loss = -dist.log_prob([1., 2., 4.])\n grad = tape.gradient(loss, dist.trainable_variables)\n self.assertLen(grad, 2)\n self.assertAllNotNone(grad)\n\n\nif __name__ == '__main__':\n tf.test.main()\n",
"# Copyright 2018 The TensorFlow Probability Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"Tests for LogNormal.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\n# Dependency imports\n\nimport numpy as np\nimport tensorflow.compat.v2 as tf\nfrom tensorflow_probability.python import distributions as tfd\nfrom tensorflow_probability.python.internal import test_util\n\n\n@test_util.test_all_tf_execution_regimes\nclass LogNormalTest(test_util.TestCase):\n\n def setUp(self):\n self._rng = np.random.RandomState(123)\n\n def testLogNormalStats(self):\n\n loc = np.float32([3., 1.5])\n scale = np.float32([0.4, 1.1])\n dist = tfd.LogNormal(loc=loc, scale=scale, validate_args=True)\n\n self.assertAllClose(self.evaluate(dist.mean()),\n np.exp(loc + scale**2 / 2))\n self.assertAllClose(self.evaluate(dist.variance()),\n (np.exp(scale**2) - 1) * np.exp(2 * loc + scale**2))\n self.assertAllClose(self.evaluate(dist.stddev()),\n np.sqrt(self.evaluate(dist.variance())))\n self.assertAllClose(self.evaluate(dist.mode()),\n np.exp(loc - scale**2))\n self.assertAllClose(self.evaluate(dist.entropy()),\n np.log(scale * np.exp(loc + 0.5) * np.sqrt(2 * np.pi)))\n\n def testLogNormalSample(self):\n loc, scale = 1.5, 0.4\n dist = tfd.LogNormal(loc=loc, scale=scale, validate_args=True)\n samples = self.evaluate(dist.sample(6000, seed=test_util.test_seed()))\n self.assertAllClose(np.mean(samples),\n self.evaluate(dist.mean()),\n atol=0.1)\n self.assertAllClose(np.std(samples),\n self.evaluate(dist.stddev()),\n atol=0.1)\n\n def testLogNormalPDF(self):\n loc, scale = 1.5, 0.4\n dist = tfd.LogNormal(loc=loc, scale=scale, validate_args=True)\n\n x = np.array([1e-4, 1.0, 2.0], dtype=np.float32)\n\n log_pdf = dist.log_prob(x)\n analytical_log_pdf = -np.log(x * scale * np.sqrt(2 * np.pi)) - (\n np.log(x) - loc)**2 / (2. * scale**2)\n\n self.assertAllClose(self.evaluate(log_pdf), analytical_log_pdf)\n\n def testLogNormalCDF(self):\n loc, scale = 1.5, 0.4\n dist = tfd.LogNormal(loc=loc, scale=scale, validate_args=True)\n\n x = np.array([1e-4, 1.0, 2.0], dtype=np.float32)\n\n cdf = dist.cdf(x)\n analytical_cdf = .5 + .5 * tf.math.erf(\n (np.log(x) - loc) / (scale * np.sqrt(2)))\n self.assertAllClose(self.evaluate(cdf),\n self.evaluate(analytical_cdf))\n\n def testLogNormalLogNormalKL(self):\n batch_size = 6\n mu_a = np.array([3.0] * batch_size)\n sigma_a = np.array([1.0, 2.0, 3.0, 1.5, 2.5, 3.5])\n mu_b = np.array([-3.0] * batch_size)\n sigma_b = np.array([0.5, 1.0, 1.5, 2.0, 2.5, 3.0])\n\n ln_a = tfd.LogNormal(loc=mu_a, scale=sigma_a, validate_args=True)\n ln_b = tfd.LogNormal(loc=mu_b, scale=sigma_b, validate_args=True)\n\n kl = tfd.kl_divergence(ln_a, ln_b)\n kl_val = self.evaluate(kl)\n\n normal_a = tfd.Normal(loc=mu_a, scale=sigma_a, validate_args=True)\n normal_b = tfd.Normal(loc=mu_b, scale=sigma_b, validate_args=True)\n kl_expected_from_normal = tfd.kl_divergence(normal_a, normal_b)\n\n kl_expected_from_formula = ((mu_a - mu_b)**2 / (2 * sigma_b**2) + 0.5 * (\n (sigma_a**2 / sigma_b**2) - 1 - 2 * np.log(sigma_a / sigma_b)))\n\n x = ln_a.sample(int(2e5), seed=test_util.test_seed())\n kl_sample = tf.reduce_mean(ln_a.log_prob(x) - ln_b.log_prob(x), axis=0)\n kl_sample_ = self.evaluate(kl_sample)\n\n self.assertEqual(kl.shape, (batch_size,))\n self.assertAllClose(kl_val, kl_expected_from_normal)\n self.assertAllClose(kl_val, kl_expected_from_formula)\n self.assertAllClose(\n kl_expected_from_formula, kl_sample_, atol=0.0, rtol=1e-2)\n\n # TODO(b/144948687) Avoid `nan` at boundary. Ideally we'd do this test:\n # def testPdfAtBoundary(self):\n # dist = tfd.LogNormal(loc=5., scale=2.)\n # pdf = self.evaluate(dist.prob(0.))\n # log_pdf = self.evaluate(dist.log_prob(0.))\n # self.assertEqual(pdf, 0.)\n # self.assertAllNegativeInf(log_pdf)\n\n def testAssertValidSample(self):\n dist = tfd.LogNormal(loc=[-3., 1., 4.], scale=2., validate_args=True)\n with self.assertRaisesOpError('Sample must be non-negative.'):\n self.evaluate(dist.cdf([3., -0.2, 1.]))\n\n def testSupportBijectorOutsideRange(self):\n dist = tfd.LogNormal(loc=1., scale=2., validate_args=True)\n with self.assertRaisesOpError('must be greater than or equal to 0'):\n dist.experimental_default_event_space_bijector().inverse(\n [-4.2, -1e-6, -1.3])\n\nif __name__ == '__main__':\n tf.test.main()\n",
"# Copyright 2018 The TensorFlow Probability Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"Tools for processing Tensors.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow.compat.v2 as tf\n\nfrom tensorflow_probability.python.internal import dtype_util\nfrom tensorflow_probability.python.internal import prefer_static\n\n__all__ = [\n 'convert_nonref_to_tensor',\n 'discover_trainable_variables',\n 'discover_variables',\n 'is_module',\n 'is_ref',\n 'is_trainable_variable',\n 'is_variable',\n]\n\n\ndef convert_nonref_to_tensor(value, dtype=None, dtype_hint=None,\n as_shape_tensor=False, name=None):\n \"\"\"Converts the given `value` to a `Tensor` if input is nonreference type.\n\n This function converts Python objects of various types to `Tensor` objects\n only if the input has nonreference semantics. Reference semantics are\n characterized by `tensor_util.is_ref` and is any object which is a\n `tf.Variable` or instance of `tf.Module`. This function accepts any input\n which `tf.convert_to_tensor` would also.\n\n Note: This function diverges from default Numpy behavior for `float` and\n `string` types when `None` is present in a Python list or scalar. Rather\n than silently converting `None` values, an error will be thrown.\n\n Args:\n value: An object whose type has a registered `Tensor` conversion function.\n dtype: Optional element type for the returned tensor. If missing, the\n type is inferred from the type of `value`.\n dtype_hint: Optional element type for the returned tensor,\n used when dtype is None. In some cases, a caller may not have a\n dtype in mind when converting to a tensor, so dtype_hint\n can be used as a soft preference. If the conversion to\n `dtype_hint` is not possible, this argument has no effect.\n as_shape_tensor: Optional boolean when if `True` uses\n `prefer_static.convert_to_shape_tensor` instead of `tf.convert_to_tensor`\n for JAX compatibility.\n name: Optional name to use if a new `Tensor` is created.\n\n Returns:\n tensor: A `Tensor` based on `value`.\n\n Raises:\n TypeError: If no conversion function is registered for `value` to `dtype`.\n RuntimeError: If a registered conversion function returns an invalid value.\n ValueError: If the `value` is a tensor not of given `dtype` in graph mode.\n\n\n #### Examples:\n\n ```python\n from tensorflow_probability.python.internal import tensor_util\n\n x = tf.Variable(0.)\n y = tensor_util.convert_nonref_to_tensor(x)\n x is y\n # ==> True\n\n x = tf.constant(0.)\n y = tensor_util.convert_nonref_to_tensor(x)\n x is y\n # ==> True\n\n x = np.array(0.)\n y = tensor_util.convert_nonref_to_tensor(x)\n x is y\n # ==> False\n tf.is_tensor(y)\n # ==> True\n\n x = tfp.util.DeferredTensor(13.37, lambda x: x)\n y = tensor_util.convert_nonref_to_tensor(x)\n x is y\n # ==> True\n tf.is_tensor(y)\n # ==> True\n tf.equal(y, 13.37)\n # ==> True\n ```\n\n \"\"\"\n # We explicitly do not use a tf.name_scope to avoid graph clutter.\n if value is None:\n return None\n if is_ref(value):\n if dtype is None:\n return value\n dtype_base = dtype_util.base_dtype(dtype)\n value_dtype_base = dtype_util.base_dtype(value.dtype)\n if dtype_base != value_dtype_base:\n raise TypeError('Mutable type must be of dtype \"{}\" but is \"{}\".'.format(\n dtype_util.name(dtype_base), dtype_util.name(value_dtype_base)))\n return value\n if as_shape_tensor:\n return prefer_static.convert_to_shape_tensor(\n value, dtype=dtype, dtype_hint=dtype_hint, name=name)\n return tf.convert_to_tensor(\n value, dtype=dtype, dtype_hint=dtype_hint, name=name)\n\n\ndef is_ref(x):\n \"\"\"Evaluates if the object has reference semantics.\n\n An object is deemed \"reference\" if it is a `tf.Variable` instance or is\n derived from a `tf.Module` with `dtype` and `shape` properties.\n\n Args:\n x: Any object.\n\n Returns:\n is_ref: Python `bool` indicating input is has nonreference semantics, i.e.,\n is a `tf.Variable` or a `tf.Module` with `dtype` and `shape` properties.\n \"\"\"\n # TODO(b/134430874): Consider making this recurse through nests, e.g.,\n # `tensor_util.is_ref([tf.Variable(0.), np.array(1.)])`\n # returns True. Note: we'd need to actually create a tf.Module on user's\n # behalf and it would need a `dtype` and `shape`. (I.e., there would be some\n # work to support this.)\n return (\n is_variable(x) or\n (is_module(x) and hasattr(x, 'dtype') and hasattr(x, 'shape'))\n )\n\n\ndef is_variable(x):\n \"\"\"Returns `True` when input is a `tf.Variable`, otherwise `False`.\"\"\"\n return isinstance(x, tf.Variable)\n\n\ndef is_trainable_variable(x):\n \"\"\"Returns `True` when input is trainable `tf.Variable`, otherwise `False`.\"\"\"\n return is_variable(x) and getattr(x, 'trainable', False)\n\n\ndef is_module(x):\n \"\"\"Returns `True` when input is a `tf.Module`, otherwise `False`.\"\"\"\n return isinstance(x, tf.Module)\n\n\nclass _Track(tf.Module):\n \"\"\"Bridge to create functional interface for variable tracking.\"\"\"\n\n def __init__(self, *args, **kwargs):\n self._args = args\n self._kwargs = kwargs\n\n\ndef discover_trainable_variables(x):\n \"\"\"Returns `tuple` of all trainable `tf.Variables` discoverable in input.\n\n Warning: unlike possibly `tf.Module`, use of this function only does a static,\n \"one-time\" discovery. (This is self-evidently true from its functional\n nature.)\n\n Args:\n x: An object to inspected for `tf.Variable` dependencies.\n\n Returns:\n trainable_vars: A Python `tuple` of `tf.Variable`s with `trainable=True`.\n \"\"\"\n return _Track(x).trainable_variables\n\n\ndef discover_variables(x):\n \"\"\"Returns `tuple` of all `tf.Variables` discoverable in input.\n\n Warning: unlike possibly `tf.Module`, use of this function only does a static,\n \"one-time\" discovery. (This is self-evidently true from its functional\n nature.)\n\n Args:\n x: An object to inspected for `tf.Variable` dependencies.\n\n Returns:\n vars: A Python `tuple` of `tf.Variable`s, regardless of their value of\n `trainable`.\n \"\"\"\n return _Track(x).variables\n"
] | [
[
"numpy.sqrt",
"scipy.stats.skellam.std",
"numpy.zeros",
"tensorflow.compat.v2.test.main",
"tensorflow.compat.v2.GradientTape",
"scipy.stats.skellam.mean",
"scipy.stats.skellam.var",
"scipy.stats.skellam.logpmf",
"tensorflow.compat.v2.TensorShape",
"scipy.stats.skellam.pmf",
"numpy.array",
"tensorflow.compat.v2.constant",
"numpy.linspace",
"tensorflow.compat.v2.Variable",
"tensorflow.compat.v2.math.log"
],
[
"numpy.sqrt",
"numpy.float32",
"numpy.exp",
"tensorflow.compat.v2.test.main",
"numpy.random.RandomState",
"numpy.log",
"numpy.array",
"numpy.std",
"numpy.mean"
],
[
"tensorflow.compat.v2.convert_to_tensor"
]
] |
Pandinosaurus/model-optimization | [
"12dc84dd34ee3c6eb08b381c0abcd65b31a42366"
] | [
"tensorflow_model_optimization/python/core/quantization/keras/collaborative_optimizations/cluster_preserve/cluster_preserve_quantize_registry.py"
] | [
"# Copyright 2021 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Registry responsible for built-in keras classes.\"\"\"\n\nimport logging\nimport tensorflow as tf\n\nfrom tensorflow_model_optimization.python.core.clustering.keras import clustering_registry\nfrom tensorflow_model_optimization.python.core.quantization.keras import quant_ops\nfrom tensorflow_model_optimization.python.core.quantization.keras import quantizers\nfrom tensorflow_model_optimization.python.core.quantization.keras.default_8bit import default_8bit_quantize_registry\nfrom tensorflow_model_optimization.python.core.quantization.keras.default_8bit import default_8bit_quantizers\n\nlayers = tf.keras.layers\nK = tf.keras.backend\n\nCLUSTER_CENTROIDS = 'cluster_centroids_tf'\nPULLING_INDICES = 'pulling_indices_tf'\nORIGINAL_WEIGHTS = 'ori_weights_vars_tf'\nWEIGHT_NAME = 'weight_name'\nCLUSTERING_IMPL = 'clst_impl'\nCENTROIDS_MASK = 'centroids_mask'\nSPARSITY_MASK = 'sparsity_mask'\n\n\ndef get_unique(t):\n \"\"\"Get unique values and lookup index from N-D tensor.\n\n Args:\n t: tensor\n Returns:\n unique value, lookup index (same shape as input tensor)\n Example:\n t:\n ([[1.0, 2.0],\n [2.0, 3.0],\n [3.0, 3.0],\n [1.0, 2.0]]\n )\n uniques:\n ([1.0, 2.0, 3.0])\n output final index:\n ([[0, 1],\n [1, 2],\n [2, 2],\n [0, 1]]\n )\n \"\"\"\n t_flatten = tf.reshape(t, shape=(-1,))\n uniques, index = tf.unique(t_flatten)\n return uniques, tf.reshape(index, shape=tf.shape(t))\n\n\nclass _ClusterPreserveInfo(object):\n \"\"\"ClusterPreserveInfo.\"\"\"\n\n def __init__(self, weight_attrs, quantize_config_attrs):\n \"\"\"ClusterPreserveInfo.\n\n Args:\n weight_attrs: list of cluster preservable weight attributes of layer.\n quantize_config_attrs: list of quantization configuration class name.\n \"\"\"\n self.weight_attrs = weight_attrs\n self.quantize_config_attrs = quantize_config_attrs\n\n\nclass ClusterPreserveQuantizeRegistry(object):\n \"\"\"ClusterPreserveQuantizeRegistry is for built-in keras layers.\"\"\"\n # The keys represent built-in keras layers; the first values represent the\n # the variables within the layers which hold the kernel weights, second\n # values represent the class name of quantization configuration for layers.\n # This decide the weights of layers with quantization configurations are\n # cluster preservable.\n _LAYERS_CONFIG_MAP = {\n layers.Conv2D:\n _ClusterPreserveInfo(['kernel'], ['Default8BitConvQuantizeConfig']),\n layers.Dense:\n _ClusterPreserveInfo(['kernel'], ['Default8BitQuantizeConfig']),\n\n # DepthwiseConv2D is supported with 8bit qat, but not with\n # clustering, thus for DepthwiseConv2D CQAT,\n # preserving clustered weights is disabled.\n layers.DepthwiseConv2D:\n _ClusterPreserveInfo(['depthwise_kernel'],\n ['Default8BitQuantizeConfig']),\n\n # layers that are supported with clustering, but not yet with qat\n # layers.Conv1D:\n # _ClusterPreserveInfo(['kernel'], []),\n # layers.Conv2DTranspose:\n # _ClusterPreserveInfo(['kernel'], []),\n # layers.Conv3D:\n # _ClusterPreserveInfo(['kernel'], []),\n # layers.Conv3DTranspose:\n # _ClusterPreserveInfo(['kernel'], []),\n # layers.LocallyConnected1D:\n # _ClusterPreserveInfo(['kernel'], ['Default8BitQuantizeConfig']),\n # layers.LocallyConnected2D:\n # _ClusterPreserveInfo(['kernel'], ['Default8BitQuantizeConfig']),\n\n # SeparableConv need verify from 8bit qat\n # layers.SeparableConv1D:\n # _ClusterPreserveInfo(['pointwise_kernel'],\n # ['Default8BitConvQuantizeConfig']),\n # layers.SeparableConv2D:\n # _ClusterPreserveInfo(['pointwise_kernel'],\n # ['Default8BitConvQuantizeConfig']),\n\n # Embedding need verify from 8bit qat\n # layers.Embedding: _ClusterPreserveInfo(['embeddings'], []),\n }\n\n _DISABLE_CLUSTER_PRESERVE = frozenset({\n layers.DepthwiseConv2D,\n })\n\n def __init__(self, preserve_sparsity):\n self._config_quantizer_map = {\n 'Default8BitQuantizeConfig':\n ClusterPreserveDefault8BitWeightsQuantizer(preserve_sparsity),\n 'Default8BitConvQuantizeConfig':\n ClusterPreserveDefault8BitConvWeightsQuantizer(preserve_sparsity),\n }\n\n @classmethod\n def _no_trainable_weights(cls, layer):\n \"\"\"Returns whether this layer has trainable weights.\n\n Args:\n layer: The layer to check for trainable weights.\n Returns:\n True/False whether the layer has trainable weights.\n \"\"\"\n return not layer.trainable_weights\n\n @classmethod\n def _disable_cluster_preserve(cls, layer):\n \"\"\"Returns whether to disable this layer for preserving clusters.\n\n Args:\n layer: The layer to check for disabling.\n Returns:\n True/False whether disabling this layer for preserving clusters.\n \"\"\"\n return layer.__class__ in cls._DISABLE_CLUSTER_PRESERVE\n\n @classmethod\n def supports(cls, layer):\n \"\"\"Returns whether the registry supports this layer type.\n\n Args:\n layer: The layer to check for support.\n Returns:\n True/False whether the layer type is supported.\n \"\"\"\n # layers without trainable weights are consider supported,\n # e.g., ReLU, Softmax, and AveragePooling2D.\n if cls._no_trainable_weights(layer):\n return True\n\n if layer.__class__ in cls._LAYERS_CONFIG_MAP:\n return True\n\n return False\n\n @classmethod\n def _weight_names(cls, layer):\n\n if cls._no_trainable_weights(layer):\n return []\n\n return cls._LAYERS_CONFIG_MAP[layer.__class__].weight_attrs\n\n def apply_cluster_preserve_quantize_config(self, layer, quantize_config):\n \"\"\"Applies cluster-preserve weight quantizer.\n\n Args:\n layer: The layer to check for support.\n quantize_config: quantization config for supporting cluster preservation\n on clustered weights\n Returns:\n The quantize_config with addon cluster preserve weight_quantizer.\n \"\"\"\n if not self.supports(layer):\n raise ValueError('Layer ' + str(layer.__class__) + ' is not supported.')\n\n # Example: ReLU, Softmax, and AveragePooling2D (without trainable weights)\n # DepthwiseConv2D (cluster_preserve is disabled)\n if self._no_trainable_weights(layer) or self._disable_cluster_preserve(\n layer):\n return quantize_config\n # Example: Conv2D, Dense layers\n if quantize_config.__class__.__name__ in self._LAYERS_CONFIG_MAP[\n layer.__class__].quantize_config_attrs:\n quantize_config.weight_quantizer = self._config_quantizer_map[\n quantize_config.__class__.__name__]\n else:\n raise ValueError('Configuration ' +\n str(quantize_config.__class__.__name__) +\n ' is not supported for Layer ' + str(layer.__class__) +\n '.')\n\n return quantize_config\n\n\nclass Default8bitClusterPreserveQuantizeRegistry(\n ClusterPreserveQuantizeRegistry):\n \"\"\"Default 8 bit ClusterPreserveQuantizeRegistry.\"\"\"\n\n def __init__(self, preserve_sparsity):\n super(Default8bitClusterPreserveQuantizeRegistry, self).__init__(\n preserve_sparsity)\n self.preserve_sparsity = preserve_sparsity\n\n def get_quantize_config(self, layer):\n \"\"\"Returns the quantization config with weight_quantizer for a given layer.\n\n Args:\n layer: input layer to return quantize config for.\n Returns:\n Returns the quantization config for cluster preserve weight_quantizer.\n \"\"\"\n quantize_config = (default_8bit_quantize_registry.\n Default8BitQuantizeRegistry().\n get_quantize_config(layer))\n cluster_aware_quantize_config = super(\n Default8bitClusterPreserveQuantizeRegistry,\n self).apply_cluster_preserve_quantize_config(layer, quantize_config)\n\n return cluster_aware_quantize_config\n\n\nclass ClusterPreserveDefaultWeightsQuantizer(quantizers.LastValueQuantizer):\n \"\"\"Quantize weights while preserving clusters.\"\"\"\n\n def __init__(\n self, num_bits, per_axis, symmetric, narrow_range, preserve_sparsity):\n \"\"\"ClusterPreserveDefaultWeightsQuantizer.\n\n Args:\n num_bits: Number of bits for quantization\n per_axis: Whether to apply per_axis quantization. The last dimension is\n used as the axis.\n symmetric: If true, use symmetric quantization limits instead of training\n the minimum and maximum of each quantization range separately.\n narrow_range: In case of 8 bits, narrow_range nudges the quantized range\n to be [-127, 127] instead of [-128, 127]. This ensures symmetric\n range has 0 as the centre.\n preserve_sparsity: Whether to apply prune-cluster-preserving quantization\n aware training.\n \"\"\"\n super(ClusterPreserveDefaultWeightsQuantizer, self).__init__(\n num_bits=num_bits,\n per_axis=per_axis,\n symmetric=symmetric,\n narrow_range=narrow_range,\n )\n self.preserve_sparsity = preserve_sparsity\n\n def _build_clusters(self, name, layer):\n \"\"\"Extracts the cluster centroids and cluster indices.\n\n Extracts cluster centroids and cluster indices from the pretrained\n clustered model when the input layer is clustered.\n\n Args:\n name: Name of weights in layer.\n layer: Quantization wrapped keras layer.\n Returns:\n A dictionary of the initial values of the\n cluster centroids, cluster indices, original weights,\n the pretrained flag for marking the first training\n epoch, and weight name.\n \"\"\"\n result = {}\n weights = getattr(layer.layer, name)\n if self.preserve_sparsity and not tf.reduce_any(weights == 0):\n self.preserve_sparsity = False\n logging.warning(\n 'Input layer does not contain zero weights, so apply CQAT instead.')\n centroids_mask = None\n centroids, lookup = get_unique(weights)\n num_centroids = tf.size(centroids)\n\n if self.preserve_sparsity:\n sparsity_mask = tf.math.divide_no_nan(weights, weights)\n zero_idx = tf.argmin(tf.abs(centroids), axis=-1)\n centroids_mask = 1.0 - tf.one_hot(zero_idx, num_centroids)\n result = {SPARSITY_MASK: sparsity_mask}\n\n # Prepare clustering variables for the Keras graph when clusters\n # exist, assuming we do not use number_of_clusters larger than 1024\n if num_centroids > 1024:\n return result\n else:\n clst_centroids_tf = layer.add_weight(\n CLUSTER_CENTROIDS,\n shape=centroids.shape,\n initializer=tf.keras.initializers.Constant(\n value=K.batch_get_value([centroids])[0]),\n dtype=centroids.dtype,\n trainable=True)\n\n ori_weights_tf = layer.add_weight(\n ORIGINAL_WEIGHTS,\n shape=weights.shape,\n initializer=tf.keras.initializers.Constant(\n value=K.batch_get_value([weights])[0]),\n dtype=weights.dtype,\n trainable=True)\n\n # Get clustering implementation according to layer type\n clustering_impl_cls = clustering_registry.ClusteringLookupRegistry(\n ).get_clustering_impl(layer.layer, name)\n clustering_impl = clustering_impl_cls(clst_centroids_tf)\n\n pulling_indices = tf.dtypes.cast(\n clustering_impl.get_pulling_indices(ori_weights_tf),\n lookup.dtype\n )\n\n pulling_indices_tf = layer.add_weight(\n PULLING_INDICES,\n shape=lookup.shape,\n initializer=tf.keras.initializers.Constant(\n value=K.batch_get_value([pulling_indices])[0]),\n dtype=lookup.dtype,\n trainable=False)\n\n result_clst = {\n CLUSTER_CENTROIDS: clst_centroids_tf,\n PULLING_INDICES: pulling_indices_tf,\n ORIGINAL_WEIGHTS: ori_weights_tf,\n WEIGHT_NAME: name,\n CLUSTERING_IMPL: clustering_impl,\n CENTROIDS_MASK: centroids_mask,\n }\n result.update(result_clst)\n return result\n\n def build(self, tensor_shape, name, layer):\n \"\"\"Build (P)CQAT wrapper.\n\n When preserve_sparsity is true and the input is clustered.\n\n Args:\n tensor_shape: Shape of weights which needs to be quantized.\n name: Name of weights in layer.\n layer: Quantization wrapped keras layer.\n Returns:\n Dictionary of centroids, indices and\n quantization params, the dictionary will be passed\n to __call__ function.\n \"\"\"\n # To get all the initial values from pretrained clustered model\n result = self._build_clusters(name, layer)\n # Result can have clustering nodes, then this is CQAT\n # Result can have both clustering nodes and sparsity mask, then\n # this will be PCQAT\n result.update(\n super(ClusterPreserveDefaultWeightsQuantizer,\n self).build(tensor_shape, name, layer))\n\n return result\n\n def __call__(self, inputs, training, weights, **kwargs):\n \"\"\"Apply cluster preserved quantization to the input tensor.\n\n Args:\n inputs: Input tensor (layer's weights) to be quantized.\n training: Whether the graph is currently training.\n weights: Dictionary of weights (params) the quantizer can use to\n quantize the tensor (layer's weights). This contains the weights\n created in the `build` function.\n **kwargs: Additional variables which may be passed to the quantizer.\n Returns:\n quantized tensor.\n \"\"\"\n if training:\n if CLUSTER_CENTROIDS in weights:\n if self.preserve_sparsity:\n weights[ORIGINAL_WEIGHTS].assign(\n tf.multiply(weights[ORIGINAL_WEIGHTS],\n weights[SPARSITY_MASK]))\n weights[CLUSTERING_IMPL].cluster_centroids.assign(\n weights[CLUSTERING_IMPL].\n cluster_centroids * weights[CENTROIDS_MASK]\n )\n weights[CLUSTER_CENTROIDS].assign(\n weights[CLUSTERING_IMPL].cluster_centroids\n )\n # Insert clustering variables\n weights[PULLING_INDICES].assign(tf.dtypes.cast(\n weights[CLUSTERING_IMPL].get_pulling_indices(\n weights[ORIGINAL_WEIGHTS]),\n weights[PULLING_INDICES].dtype\n ))\n\n output = weights[CLUSTERING_IMPL].get_clustered_weight(\n weights[PULLING_INDICES], weights[ORIGINAL_WEIGHTS])\n inputs.assign(output)\n else:\n if self.preserve_sparsity:\n inputs = tf.multiply(inputs, weights[SPARSITY_MASK])\n output = inputs\n else:\n output = inputs\n\n return quant_ops.LastValueQuantize(\n output,\n weights['min_var'],\n weights['max_var'],\n is_training=training,\n num_bits=self.num_bits,\n per_channel=self.per_axis,\n symmetric=self.symmetric,\n narrow_range=self.narrow_range\n )\n\n\nclass ClusterPreserveDefault8BitWeightsQuantizer(\n ClusterPreserveDefaultWeightsQuantizer):\n \"\"\"ClusterPreserveWeightsQuantizer for default 8bit weights.\"\"\"\n\n def __init__(self, preserve_sparsity):\n super(ClusterPreserveDefault8BitWeightsQuantizer,\n self).__init__(num_bits=8,\n per_axis=False,\n symmetric=True,\n narrow_range=True,\n preserve_sparsity=preserve_sparsity)\n self.preserve_sparsity = preserve_sparsity\n\n\nclass ClusterPreserveDefault8BitConvWeightsQuantizer(\n ClusterPreserveDefaultWeightsQuantizer,\n default_8bit_quantizers.Default8BitConvWeightsQuantizer):\n \"\"\"ClusterPreserveWeightsQuantizer for default 8bit Conv2D weights.\"\"\"\n\n def __init__(self, preserve_sparsity): # pylint: disable=super-init-not-called\n default_8bit_quantizers.Default8BitConvWeightsQuantizer.__init__(self)\n self.preserve_sparsity = preserve_sparsity\n\n def build(self, tensor_shape, name, layer):\n result = ClusterPreserveDefaultWeightsQuantizer._build_clusters(\n self, name, layer)\n result.update(\n default_8bit_quantizers.Default8BitConvWeightsQuantizer.build(\n self, tensor_shape, name, layer))\n return result\n"
] | [
[
"tensorflow.size",
"tensorflow.shape",
"tensorflow.reshape",
"tensorflow.multiply",
"tensorflow.unique",
"tensorflow.reduce_any",
"tensorflow.abs",
"tensorflow.one_hot",
"tensorflow.math.divide_no_nan"
]
] |
Luciano233/OCR_Japanease | [
"055bdd0cc8e4d053dfb471cd642b1616ba0938d1"
] | [
"nets/classifiernet.py"
] | [
"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom .block import Mish, SeparableConv2d, Block\n\nclass WideTipXception(nn.Module):\n def __init__(self, num_class):\n super(WideTipXception, self).__init__()\n\n self.conv1 = nn.Conv2d(1, 192, 3, 2, 1, bias=True)\n self.bn1 = nn.BatchNorm2d(192)\n self.mish = Mish()\n\n self.conv2 = nn.Conv2d(192, 512, 3, 1, 1, bias=True)\n self.bn2 = nn.BatchNorm2d(512)\n\n self.block1 = Block(512,1024,3,1)\n self.block2 = Block(1024,1024,3,1)\n self.block3 = Block(1024,1024,3,1)\n self.block4 = Block(1024,1024,3,1)\n self.block5 = Block(1024,1024,3,1)\n self.block6 = Block(1024,2048,2,2)\n self.block7 = Block(2048,3072,2,2)\n\n self.conv3 = SeparableConv2d(3072,4096,3,stride=1,padding=0,bias=True)\n self.fc = nn.Linear(4096, num_class)\n\n def forward(self, input):\n x = self.conv1(input)\n x = self.bn1(x)\n x = self.mish(x)\n\n x = self.conv2(x)\n x = self.bn2(x)\n\n x = self.block1(x)\n x = self.block2(x)\n x = self.block3(x)\n x = self.block4(x)\n x = self.block5(x)\n x = self.block6(x)\n x = self.block7(x)\n\n x = self.mish(x)\n x = self.conv3(x)\n\n x = self.mish(x)\n x = F.adaptive_avg_pool2d(x, (1, 1))\n x = x.view(x.size(0), -1)\n result = self.fc(x)\n\n return result\n\ndef get_classifiernet(num_class):\n model = WideTipXception(num_class)\n return model\n"
] | [
[
"torch.nn.Linear",
"torch.nn.BatchNorm2d",
"torch.nn.functional.adaptive_avg_pool2d",
"torch.nn.Conv2d"
]
] |
HMS-CardiacMR/MyoMapNet-Myocardial-Parametric-Mapping | [
"1e2dee8d6d1f97722eba91618462537faf9efba7"
] | [
"InLine_Implementation/Code/utils/fftutils.py"
] | [
"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\nimport torch\n\n################################################################################\n################################################################################\n\ndef roll_nn(X, axis, n):\n f_idx = tuple(slice(None, None, None) if i != axis else slice(0, n, None) for i in range(X.dim()))\n b_idx = tuple(slice(None, None, None) if i != axis else slice(n, None, None) for i in range(X.dim()))\n front = X[f_idx]\n back = X[b_idx]\n return torch.cat([back, front], axis)\n\ndef fftshift2d(x, dims = [2, 3]):\n real, imag = torch.unbind(x, -1)\n for dim in dims:\n n_shift = real.size(dim)//2\n if real.size(dim) % 2 != 0:\n n_shift += 1 # for odd-sized images\n real = roll_nn(real, axis=dim, n=n_shift)\n imag = roll_nn(imag, axis=dim, n=n_shift)\n return torch.stack((real, imag), -1) # last dim=2 (real&imag)\n\ndef ifftshift2d(x, dims = [2, 3]):\n real, imag = torch.unbind(x, -1)\n for dim in dims[::-1]:\n real = roll_nn(real, axis=dim, n=real.size(dim)//2)\n imag = roll_nn(imag, axis=dim, n=imag.size(dim)//2)\n return torch.stack((real, imag), -1) # last dim=2 (real&imag)\n\n\n################################################################################\n################################################################################\n\n\ndef roll_n(X, axis, n):\n f_idx = tuple(slice(None, None, None) if i != axis else slice(0, n, None) for i in range(X.dim()))\n b_idx = tuple(slice(None, None, None) if i != axis else slice(n, None, None) for i in range(X.dim()))\n front = X[f_idx]\n back = X[b_idx]\n return torch.cat([back, front], axis)\n\ndef batch_fftshift2d(x, dims = [-2, -3]):\n real, imag = torch.unbind(x, -1)\n for dim in range(1, len(real.size())):\n n_shift = real.size(dim)//2\n if real.size(dim) % 2 != 0:\n n_shift += 1 # for odd-sized images\n real = roll_n(real, axis=dim, n=n_shift)\n imag = roll_n(imag, axis=dim, n=n_shift)\n return torch.stack((real, imag), -1) # last dim=2 (real&imag)\n\ndef batch_ifftshift2d(x):\n real, imag = torch.unbind(x, -1)\n for dim in range(len(real.size()) - 1, 0, -1):\n real = roll_n(real, axis=dim, n=real.size(dim)//2)\n imag = roll_n(imag, axis=dim, n=imag.size(dim)//2)\n return torch.stack((real, imag), -1) # last dim=2 (real&imag)\n\n\n\n\n\n################################################################################\n################################################################################\n\ndef prepare_grid(m, n):\n x = np.linspace(-(m // 2)/(m / 2), (m // 2)/(m / 2) - (1 - m % 2)*2/m, num=m)\n y = np.linspace(-(n // 2)/(n / 2), (n // 2)/(n / 2) - (1 - n % 2)*2/n, num=n)\n xv, yv = np.meshgrid(y, x)\n angle = np.arctan2(yv, xv)\n rad = np.sqrt(xv**2 + yv**2)\n rad[m//2][n//2] = rad[m//2][n//2 - 1]\n log_rad = np.log2(rad)\n return log_rad, angle\n\ndef rcosFn(width, position):\n N = 256 # abritrary\n X = np.pi * np.array(range(-N-1, 2))/2/N\n Y = np.cos(X)**2\n Y[0] = Y[1]\n Y[N+2] = Y[N+1]\n X = position + 2*width/np.pi*(X + np.pi/4)\n return X, Y\n\ndef pointOp(im, Y, X):\n out = np.interp(im.flatten(), X, Y)\n return np.reshape(out, im.shape)\n\ndef getlist(coeff):\n straight = [bands for scale in coeff[1:-1] for bands in scale]\n straight = [coeff[0]] + straight + [coeff[-1]]\n return straight\n\n################################################################################\n# NumPy reference implementation (fftshift and ifftshift)\n\n# def fftshift(x, axes=None):\n# \"\"\"\n# Shift the zero-frequency component to the center of the spectrum.\n# This function swaps half-spaces for all axes listed (defaults to all).\n# Note that ``y[0]`` is the Nyquist component only if ``len(x)`` is even.\n# Parameters\n# \"\"\"\n# x = np.asarray(x)\n# if axes is None:\n# axes = tuple(range(x.ndim))\n# shift = [dim // 2 for dim in x.shape]\n# shift = [x.shape[ax] // 2 for ax in axes]\n# return np.roll(x, shift, axes)\n#\n# def ifftshift(x, axes=None):\n# \"\"\"\n# The inverse of `fftshift`. Although identical for even-length `x`, the\n# functions differ by one sample for odd-length `x`.\n# \"\"\"\n# x = np.asarray(x)\n# if axes is None:\n# axes = tuple(range(x.ndim))\n# shift = [-(dim // 2) for dim in x.shape]\n# shift = [-(x.shape[ax] // 2) for ax in axes]\n# return np.roll(x, shift, axes)"
] | [
[
"torch.unbind",
"numpy.arctan2",
"torch.stack",
"numpy.log2",
"numpy.reshape",
"numpy.cos",
"numpy.sqrt",
"numpy.meshgrid",
"numpy.linspace",
"torch.cat"
]
] |
natemalek/molen-pater-nathan-rma-thesis-coreference-with-singletons | [
"3d2d6c751eadd6438a80b0c24f48b2635bc6acc7"
] | [
"deep-coref/modified_keras/examples/babi_memnn.py"
] | [
"from __future__ import print_function\nfrom keras.models import Sequential\nfrom keras.layers.embeddings import Embedding\nfrom keras.layers.core import Activation, Dense, Merge, Permute, Dropout\nfrom keras.layers.recurrent import LSTM\nfrom keras.datasets.data_utils import get_file\nfrom keras.preprocessing.sequence import pad_sequences\nfrom functools import reduce\nimport tarfile\nimport numpy as np\nimport re\n\n\"\"\"\nTrain a memory network on the bAbI dataset.\n\nReferences:\n- Jason Weston, Antoine Bordes, Sumit Chopra, Tomas Mikolov, Alexander M. Rush,\n \"Towards AI-Complete Question Answering: A Set of Prerequisite Toy Tasks\",\n http://arxiv.org/abs/1503.08895\n\n- Sainbayar Sukhbaatar, Arthur Szlam, Jason Weston, Rob Fergus,\n \"End-To-End Memory Networks\",\n http://arxiv.org/abs/1503.08895\n\nReaches 93% accuracy on task 'single_supporting_fact_10k' after 70 epochs.\nTime per epoch: 3s on CPU (core i7).\n\"\"\"\n\n\ndef tokenize(sent):\n '''Return the tokens of a sentence including punctuation.\n\n >>> tokenize('Bob dropped the apple. Where is the apple?')\n ['Bob', 'dropped', 'the', 'apple', '.', 'Where', 'is', 'the', 'apple', '?']\n '''\n return [x.strip() for x in re.split('(\\W+)?', sent) if x.strip()]\n\n\ndef parse_stories(lines, only_supporting=False):\n '''Parse stories provided in the bAbi tasks format\n\n If only_supporting is true, only the sentences that support the answer are kept.\n '''\n data = []\n story = []\n for line in lines:\n line = line.decode('utf-8').strip()\n nid, line = line.split(' ', 1)\n nid = int(nid)\n if nid == 1:\n story = []\n if '\\t' in line:\n q, a, supporting = line.split('\\t')\n q = tokenize(q)\n substory = None\n if only_supporting:\n # Only select the related substory\n supporting = map(int, supporting.split())\n substory = [story[i - 1] for i in supporting]\n else:\n # Provide all the substories\n substory = [x for x in story if x]\n data.append((substory, q, a))\n story.append('')\n else:\n sent = tokenize(line)\n story.append(sent)\n return data\n\n\ndef get_stories(f, only_supporting=False, max_length=None):\n '''Given a file name, read the file, retrieve the stories, and then convert the sentences into a single story.\n\n If max_length is supplied, any stories longer than max_length tokens will be discarded.\n '''\n data = parse_stories(f.readlines(), only_supporting=only_supporting)\n flatten = lambda data: reduce(lambda x, y: x + y, data)\n data = [(flatten(story), q, answer) for story, q, answer in data if not max_length or len(flatten(story)) < max_length]\n return data\n\n\ndef vectorize_stories(data, word_idx, story_maxlen, query_maxlen):\n X = []\n Xq = []\n Y = []\n for story, query, answer in data:\n x = [word_idx[w] for w in story]\n xq = [word_idx[w] for w in query]\n y = np.zeros(len(word_idx) + 1) # let's not forget that index 0 is reserved\n y[word_idx[answer]] = 1\n X.append(x)\n Xq.append(xq)\n Y.append(y)\n return (pad_sequences(X, maxlen=story_maxlen),\n pad_sequences(Xq, maxlen=query_maxlen), np.array(Y))\n\n\npath = get_file('babi-tasks-v1-2.tar.gz',\n origin='http://www.thespermwhale.com/jaseweston/babi/tasks_1-20_v1-2.tar.gz')\ntar = tarfile.open(path)\n\nchallenges = {\n # QA1 with 10,000 samples\n 'single_supporting_fact_10k': 'tasks_1-20_v1-2/en-10k/qa1_single-supporting-fact_{}.txt',\n # QA2 with 10,000 samples\n 'two_supporting_facts_10k': 'tasks_1-20_v1-2/en-10k/qa2_two-supporting-facts_{}.txt',\n}\nchallenge_type = 'single_supporting_fact_10k'\nchallenge = challenges[challenge_type]\n\nprint('Extracting stories for the challenge:', challenge_type)\ntrain_stories = get_stories(tar.extractfile(challenge.format('train')))\ntest_stories = get_stories(tar.extractfile(challenge.format('test')))\n\nvocab = sorted(reduce(lambda x, y: x | y, (set(story + q + [answer]) for story, q, answer in train_stories + test_stories)))\n# Reserve 0 for masking via pad_sequences\nvocab_size = len(vocab) + 1\nstory_maxlen = max(map(len, (x for x, _, _ in train_stories + test_stories)))\nquery_maxlen = max(map(len, (x for _, x, _ in train_stories + test_stories)))\n\nprint('-')\nprint('Vocab size:', vocab_size, 'unique words')\nprint('Story max length:', story_maxlen, 'words')\nprint('Query max length:', query_maxlen, 'words')\nprint('Number of training stories:', len(train_stories))\nprint('Number of test stories:', len(test_stories))\nprint('-')\nprint('Here\\'s what a \"story\" tuple looks like (input, query, answer):')\nprint(train_stories[0])\nprint('-')\nprint('Vectorizing the word sequences...')\n\nword_idx = dict((c, i + 1) for i, c in enumerate(vocab))\ninputs_train, queries_train, answers_train = vectorize_stories(train_stories, word_idx, story_maxlen, query_maxlen)\ninputs_test, queries_test, answers_test = vectorize_stories(test_stories, word_idx, story_maxlen, query_maxlen)\n\nprint('-')\nprint('inputs: integer tensor of shape (samples, max_length)')\nprint('inputs_train shape:', inputs_train.shape)\nprint('inputs_test shape:', inputs_test.shape)\nprint('-')\nprint('queries: integer tensor of shape (samples, max_length)')\nprint('queries_train shape:', queries_train.shape)\nprint('queries_test shape:', queries_test.shape)\nprint('-')\nprint('answers: binary (1 or 0) tensor of shape (samples, vocab_size)')\nprint('answers_train shape:', answers_train.shape)\nprint('answers_test shape:', answers_test.shape)\nprint('-')\nprint('Compiling...')\n\n# embed the input sequence into a sequence of vectors\ninput_encoder_m = Sequential()\ninput_encoder_m.add(Embedding(input_dim=vocab_size,\n output_dim=64,\n input_length=story_maxlen))\n# output: (samples, story_maxlen, embedding_dim)\n# embed the question into a single vector\nquestion_encoder = Sequential()\nquestion_encoder.add(Embedding(input_dim=vocab_size,\n output_dim=64,\n input_length=query_maxlen))\n# output: (samples, query_maxlen, embedding_dim)\n# compute a 'match' between input sequence elements (which are vectors)\n# and the question vector\nmatch = Sequential()\nmatch.add(Merge([input_encoder_m, question_encoder],\n mode='dot',\n dot_axes=[(2,), (2,)]))\n# output: (samples, story_maxlen, query_maxlen)\n# embed the input into a single vector with size = story_maxlen:\ninput_encoder_c = Sequential()\ninput_encoder_c.add(Embedding(input_dim=vocab_size,\n output_dim=query_maxlen,\n input_length=story_maxlen))\n# output: (samples, story_maxlen, query_maxlen)\n# sum the match vector with the input vector:\nresponse = Sequential()\nresponse.add(Merge([match, input_encoder_c], mode='sum'))\n# output: (samples, story_maxlen, query_maxlen)\nresponse.add(Permute((2, 1))) # output: (samples, query_maxlen, story_maxlen)\n\n# concatenate the match vector with the question vector,\n# and do logistic regression on top\nanswer = Sequential()\nanswer.add(Merge([response, question_encoder], mode='concat', concat_axis=-1))\n# the original paper uses a matrix multiplication for this reduction step.\n# we choose to use a RNN instead.\nanswer.add(LSTM(64))\n# one regularization layer -- more would probably be needed.\nanswer.add(Dropout(0.25))\nanswer.add(Dense(vocab_size))\n# we output a probability distribution over the vocabulary\nanswer.add(Activation('softmax'))\n\nanswer.compile(optimizer='rmsprop', loss='categorical_crossentropy')\n# Note: you could use a Graph model to avoid repeat the input twice\nanswer.fit([inputs_train, queries_train, inputs_train], answers_train,\n batch_size=32,\n nb_epoch=70,\n show_accuracy=True,\n validation_data=([inputs_test, queries_test, inputs_test], answers_test))\n"
] | [
[
"numpy.array"
]
] |
TatsuyaShirakawa/torchemb | [
"b72912c7602537f368c16fdeb2bb6cf177b742be"
] | [
"scripts/poincare_embeddings.py"
] | [
"'''\nA rouch implementation of the following paper\n\nM. Nickel+, \"Poincaré Embeddings for Learning Hierarchical Representations\", NeurIPS2017\nhttps://arxiv.org/pdf/1705.08039.pdf\n'''\nimport argparse\nfrom pathlib import Path\nimport itertools\nimport math\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nfrom torch.utils.data import (IterableDataset,\n RandomSampler,\n DataLoader)\nfrom gensim.corpora import Dictionary\nfrom tqdm import tqdm\nfrom torchemb.poincare_ball import PoincareBallEmbedding, PoincareBall\n\n\nparser = argparse.ArgumentParser('Poincare Embeddings')\nparser.add_argument('data_file', type=Path)\nparser.add_argument('result_file', type=Path)\nparser.add_argument('-e', '--embedding_dim', default=10, type=int)\nparser.add_argument('-m', '--max_epoch', default=200, type=int)\nparser.add_argument('-s', '--samples_per_iter', default=10000, type=int)\nparser.add_argument('-b', '--batch_size', default=512, type=int)\nparser.add_argument('-n', '--neg_size', default=10, type=int)\nparser.add_argument('-c', default=1, type=float)\nparser.add_argument('--lr', default=1.0e-3, type=float)\nparser.add_argument('--momentum', default=0.9, type=float)\nparser.add_argument('--seed', default=0, type=int)\nargs = parser.parse_args()\nprint(args)\n\nnp.random.seed(args.seed)\ntorch.manual_seed(args.seed)\n\n\ndef load_data(data_file):\n data_file = Path(data_file)\n pairs = []\n with data_file.open() as fin:\n for line in fin:\n a, b = line.strip().split('\\t')\n pairs.append((a, b))\n d = Dictionary(pairs)\n pairs = np.asarray([d.doc2idx(pair) for pair in pairs])\n return d, pairs\n\n\ndictionary, pairs = load_data(args.data_file)\nprint(len(dictionary), len(pairs))\n\n\nclass Dataset(IterableDataset):\n def __init__(self, pairs, neg_size):\n self.pairs = pairs\n self.neg_size = neg_size\n self.pair_sampler = RandomSampler(list(range(len(self.pairs))), replacement=True)\n\n def __iter__(self):\n pair_iter = itertools.cycle(iter(self.pair_sampler))\n while True:\n idx = next(pair_iter)\n x, y = self.pairs[idx]\n ys = [y] + [self.pairs[next(pair_iter)][1] for _ in range(self.neg_size - 1)]\n yield x, torch.LongTensor(ys)\n\n\ndata = DataLoader(Dataset(pairs, args.neg_size),\n batch_size=args.batch_size)\n\nembeddings = PoincareBallEmbedding(len(dictionary), args.embedding_dim, c=args.c)\nmanifold = PoincareBall(c=args.c)\n\nloss = nn.CrossEntropyLoss()\nwarmup_rate = 100\nlr = args.lr * (args.batch_size ** 0.5) / (args.embedding_dim ** 0.5)\noptimizer = optim.Adam(embeddings.parameters(), lr=lr)\nlr_scheduler = optim.lr_scheduler.ExponentialLR(optimizer,\n gamma=math.exp(math.log(0.01) / args.max_epoch))\n\n\ndef train(embeddings, loss, optimizer, data, samples_per_iter):\n embeddings.train()\n data_iter = iter(data)\n avg_loss_ = 0\n N = samples_per_iter // args.batch_size\n for i in tqdm(range(N)):\n idx1, idx2 = next(data_iter)\n x, ys = embeddings(idx1), embeddings(idx2)\n assert(not torch.any(torch.isnan(x)))\n assert(not torch.any(torch.isnan(ys)))\n ds = manifold.distance(x[:, None, :].expand_as(ys), ys)\n logits = -ds\n loss_ = loss(logits, torch.zeros(len(logits), dtype=torch.long))\n optimizer.zero_grad()\n loss_.backward()\n optimizer.step()\n avg_loss_ += loss_.item()\n avg_loss_ /= N\n print('train loss: {:.5f}'.format(avg_loss_))\n\n\ndef save(embeddings, dictionary, result_file):\n embeddings.eval()\n result_file = Path(result_file)\n result_file.parent.mkdir(parents=True, exist_ok=True)\n with torch.no_grad():\n with result_file.open('w') as fout:\n for i, c in sorted(dictionary.dfs.items()):\n e = embeddings(torch.LongTensor([i]))[0]\n print(dictionary[i], *[_.item() for _ in e], sep='\\t', file=fout)\n\n\nfor epoch in range(args.max_epoch):\n print('epoch:', epoch + 1, '/', args.max_epoch)\n train(embeddings, loss, optimizer, data, args.samples_per_iter)\n lr_scheduler.step()\n save(embeddings, dictionary, args.result_file)\n"
] | [
[
"torch.manual_seed",
"torch.no_grad",
"numpy.random.seed",
"torch.nn.CrossEntropyLoss",
"torch.isnan",
"torch.LongTensor"
]
] |
kbots-dga/ml_classificator | [
"0163141de7389825787b9813019582be98d0f266"
] | [
"app.py"
] | [
"import streamlit as st\nimport plotly.express as px\nimport pandas as pd\nimport pickle\nimport os\nimport base64\nfrom io import BytesIO\nfrom datetime import datetime\n\n\ndef to_excel(df):\n output = BytesIO()\n writer = pd.ExcelWriter(output, engine='xlsxwriter')\n df.to_excel(writer, index=False, sheet_name='Sheet1')\n writer.save()\n processed_data = output.getvalue()\n return processed_data\n\n\ndef get_table_download_link(df):\n val = to_excel(df)\n b64 = base64.b64encode(val)\n date_now = datetime.utcnow()\n file_name = f'data_resultado-{date_now.strftime(\"%Y%m%d%H%M%S\")}.xlsx'\n link_download = f\"\"\" <a href=\"data:application/octet-stream;base64,{b64.decode()}\" download=\"{file_name}\">Download xlsx file</a> \"\"\"\n return link_download\n\n\ndef plot_graph(df_graph):\n fig = px.bar(\n df_graph,\n x='Labels',\n y='Descrição',\n # text='Text test',\n title='Test',\n labels={\n \"Labels\": \"Labels\",\n \"Descrição\": 'Número de coisas'\n },\n # width=1400,\n height=500\n )\n return fig\n\n\ndef main(classificador):\n st.title('Model')\n process_file = st.file_uploader(\n \"Faça o upload do arquivo no campo abaixo.\",\n type=[\"csv\", \"xlsx\"],\n accept_multiple_files=False\n )\n\n print(process_file)\n print(os.environ.get('TOKEN'))\n if process_file != None:\n if process_file.name.endswith('.csv'):\n df = pd.read_csv(\n process_file, header=0, skip_blank_lines=True, skipinitialspace=True, encoding='latin-1')\n\n elif process_file.name.endswith('.xlsx'):\n df = pd.read_excel(\n process_file, engine=\"openpyxl\")\n\n with st.empty():\n st.write('Fazendo as predições ...')\n df['Labels'] = classificador.predict(\n df[\"Descrição\"].astype(\"unicode\"))\n st.write('Predições feitas com sucesso !!!')\n\n st.dataframe(df.head(20))\n\n df_graph = df.groupby(['Labels'], as_index=False)['Descrição'].count()\n df_graph.sort_values(by=['Descrição'], inplace=True, ascending=False)\n print(df_graph)\n st.plotly_chart(plot_graph(df_graph), use_container_width=True)\n st.text('Gerando link para download ...')\n st.markdown(get_table_download_link(df), unsafe_allow_html=True)\n st.success('Link gerado com sucesso.')\n\n\nif __name__ == '__main__':\n classificador = pickle.load(open(\"modelo_final.pkl\", \"rb\"))\n main(classificador)\n"
] | [
[
"pandas.read_csv",
"pandas.ExcelWriter",
"pandas.read_excel"
]
] |
tomkimpson/ML4L | [
"ffa8360cb80df25bd6af4fa5cc39b42bd6f405cd"
] | [
"legacy/legacy_scripts/legacy/.ipynb_checkpoints/train_and_predict-checkpoint.py"
] | [
"\n\n\n\n\nimport tensorflow as tf\nimport os\nimport time\nimport json\nimport pandas as pd\n\n\n\n\"\"\"\nScript to train a sequential NN.\nTakes a df, filters based on `condition` (default None), and separates into test/train based on time\nNN trains on training data, all results output to disk\n\"\"\"\n\n\ndef train_test_split(df,filter_condition,train_condition, test_condition,features,targets):\n \n \n \"\"\"\n Separate df into a train and test set.\n Returns training and testing dfs as well as split into inputs/outputs \n \"\"\"\n \n #Filter dataset\n if filter_condition is not None:\n df_filtered = df.query(filter_condition)\n else:\n df_filtered = df\n \n #Select train/test data\n training_data = df_filtered.query(train_condition)\n test_data = df_filtered.query(test_condition)\n \n \n #Separate into features/targets\n\n x_train = training_data[features]\n y_train = training_data[targets]\n\n x_test = test_data[features]\n y_test = test_data[targets]\n \n \n return x_train,y_train,x_test,y_test,training_data, test_data #\n \n \n\n\ndef create_normalizer_layer(x_train):\n #Create a normaliser layer\n print ('Creating a normalization layer')\n normalizer = tf.keras.layers.Normalization(axis=-1)\n normalizer.adapt(x_train)\n \n return normalizer\n\ndef train_NN(x_train,y_train,normalizer):\n\n\n #Check GPU available\n print(\"Num GPUs Available: \", len(tf.config.list_physical_devices('GPU')))\n\n\n #Create a basic NN model\n model = tf.keras.Sequential([\n normalizer,\n tf.keras.layers.Dense(int(len(features)/2), activation='relu',input_shape=(len(features),),name='layer1'),\n tf.keras.layers.Dense(1, name='output')\n ])\n\n #Compile it\n print ('Compiling model')\n model.compile(optimizer='adam',\n loss='mse',\n metrics=['accuracy'])\n \n\n \n #Train it\n print('Training model')\n history = model.fit(x_train, y_train, epochs=100, batch_size=10000) \n \n \n return history, model\n\n\ndef write_outputs(output_path,model,history,x_train,y_train,x_test,y_test,df_train,df_test):\n\n print ('Writing outputs to dir: ', fout)\n id = int(time.time())\n fout = output_path+f'ML_{str(id)}/'\n os.makedirs(fout)\n print ('Writing outputs to dir: ', fout)\n\n\n \n #NN\n model.save(fout+'trained_model') \n history_dict = history.history\n json.dump(history_dict, open(fout+'history.json', 'w'))\n \n #Data\n #Probaby overkill saving all of these\n x_train.to_pickle(fout + \"x_train.pkl\") \n y_train.to_pickle(fout + \"y_train.pkl\") \n x_test.to_pickle(fout + \"x_test.pkl\") \n x_test.to_pickle(fout + \"y_test.pkl\")\n df_train.to_pickle(fout + \"df_train.pkl\") \n df_test.to_pickle(fout + \"df_test.pkl\") \n\n\n\n \n \n\n\ndef pipeline(input_file,output_path,filter_condition,train_condition, test_condition,features):\n \n \n #Load the data\n print('Loading the data')\n df = pd.read_pickle(input_file)\n\n \n #Process into train/test\n targets = ['MODIS_LST']\n x_train,y_train,x_test,y_test,df_train,df_test = train_test_split(df,filter_condition,train_condition, test_condition,features,targets)\n\n \n \n #Train NN\n normalizer = create_normalizer_layer(x_train)\n history,model = train_NN(x_train,y_train,normalizer)\n \n \n #Save trained NN in new dir, along with train/test sets\n write_outputs(output_path,model,history,x_train,y_train,x_test,y_test,df_train,df_test)\n\n \n \n \n#Parameters\n\n#IO\ninput_file = '/network/group/aopp/predict/TIP016_PAXTON_RPSPEEDY/ML4L/ECMWF_files/raw/MODIS_ERA_joined_data_averaged.pkl'\noutput_path = '/network/group/aopp/predict/TIP016_PAXTON_RPSPEEDY/ML4L/'\n\n\n\n#Pre Processing\nfilter_condition = None\ntrain_condition = 'time < \"2019-01-01 00:00:00\"'\ntest_condition = 'time >= \"2020-01-01 00:00:00\"'\nfeatures = ['sp', 'msl', 'u10', 'v10','t2m',\n 'aluvp', 'aluvd', 'alnip', 'alnid', 'cl',\n 'cvl', 'cvh', 'slt', 'sdfor', 'z', 'sd', 'sdor', 'isor', 'anor', 'slor',\n 'd2m', 'lsm', 'fal'] \n\n\n\n\n#Go\npipeline(input_file,output_path,filter_condition,train_condition, test_condition,features)\n\n"
] | [
[
"pandas.read_pickle",
"tensorflow.keras.layers.Dense",
"tensorflow.keras.layers.Normalization",
"tensorflow.config.list_physical_devices"
]
] |
paul-ang/nas-segm-pytorch | [
"e83704b6cdac6426d6ee51059f82cf650238677c"
] | [
"src/utils/solvers.py"
] | [
"\"\"\"Initialising Optimisers\"\"\"\n\nimport torch\n\n\ndef create_optimisers(\n optim_enc,\n optim_dec,\n lr_enc,\n lr_dec,\n mom_enc,\n mom_dec,\n wd_enc,\n wd_dec,\n param_enc,\n param_dec,\n):\n \"\"\"Create optimisers for encoder, decoder\n\n Args:\n optim_enc (str) : type of optimiser for encoder\n optim_dec (str) : type of optimiser for decoder\n lr_enc (float) : learning rate for encoder\n lr_dec (float) : learning rate for decoder\n mom_enc (float) : momentum for encoder\n mom_dec (float) : momentum for decoder\n wd_enc (float) : weight decay for encoder\n wd_dec (float) : weight decay for decoder\n param_enc (torch.parameters()) : encoder parameters\n param_dec (torch.parameters()) : decoder parameters\n\n Returns optim_enc, optim_dec (torch.optim)\n\n \"\"\"\n if optim_enc == \"sgd\":\n optim_enc = torch.optim.SGD(\n param_enc, lr=lr_enc, momentum=mom_enc, weight_decay=wd_enc\n )\n elif optim_enc == \"adam\":\n optim_enc = torch.optim.Adam(param_enc, lr=lr_enc, weight_decay=wd_enc)\n else:\n raise ValueError(\"Unknown Encoder Optimiser: {}\".format(optim_enc))\n\n if optim_dec == \"sgd\":\n optim_dec = torch.optim.SGD(\n param_dec, lr=lr_dec, momentum=mom_dec, weight_decay=wd_dec\n )\n elif optim_dec == \"adam\":\n optim_dec = torch.optim.Adam(param_dec, lr=lr_dec, weight_decay=wd_dec)\n else:\n raise ValueError(\"Unknown Decoder Optimiser: {}\".format(optim_dec))\n return optim_enc, optim_dec\n"
] | [
[
"torch.optim.Adam",
"torch.optim.SGD"
]
] |
rpalovics/Alpenglow | [
"7a15d5c57b511787379f095e7310e67423159fa0"
] | [
"examples/external_models/turicreate/run_turicreate.py"
] | [
"import os\nos.environ[\"OMP_NUM_THREADS\"] = \"10\"\n\nimport sys\nimport pandas as pd\nimport numpy as np\nimport turicreate as tc\n\n\nfor i in range(1, 14):\n print(\"running batch %d\" % i)\n batch = pd.read_csv(\"batches/batch_%d_train.dat\" % i)\n test_users = pd.read_csv(\"batches/batch_%d_test.dat\" % i)\n model = tc.ranking_factorization_recommender.create(\n tc.SFrame(batch),\n 'user',\n 'item',\n num_factors=10,\n verbose=True,\n solver='ials',\n max_iterations=50,\n ials_confidence_scaling_factor=30\n )\n results = model.recommend(users=test_users.user.values, k=100, exclude_known=True, verbose=False)\n results.to_dataframe()[['user', 'item', 'rank']].to_csv('batches/batch_%d_predictions.dat' % i, sep=' ', header=False, index=False)"
] | [
[
"pandas.read_csv"
]
] |
smarsu/facenet | [
"a0fa3ffe32e295b4cc980a4a178593cc7f1bad12"
] | [
"compare.py"
] | [
"# --------------------------------------------------------\n# SMNet FaceNet\n# Licensed under The MIT License [see LICENSE for details]\n# Copyright 2019 smarsu. All Rights Reserved.\n# --------------------------------------------------------\n\nimport os.path as osp\nimport numpy as np\nfrom sklearn import metrics\nimport matplotlib.pyplot as plt\nfrom euclidean import euclidean_distance\n\nEPS = 1e-12\n\n\ndef load_feature_map_from_txt(path_txt):\n \"\"\"\"\"\"\n with open(path_txt, 'r') as fb:\n lines = fb.readlines()\n feature_map = {}\n for line in lines:\n line = line.strip().split()\n name = line[0]\n feature = [float(v) for v in line[1:]]\n feature_map[name] = np.array(feature, dtype=np.float64)\n return feature_map\n\n\ndef load_pairs(pair_path):\n with open(pair_path, 'r') as fb:\n lfw_root = '/datasets/lfw_detected'\n lines = fb.readlines()\n pairs = []\n for line in lines:\n fst, snd, match = line.strip().split()\n fst = osp.join(lfw_root, fst)\n snd = osp.join(lfw_root, snd)\n pairs.append([fst, snd, int(match)])\n return pairs\n\n\ndef l2_norm(x):\n \"\"\"\n Args:\n x: ndarray, [n, feature_len]\n \"\"\"\n x = np.array(x, dtype=np.float64)\n return x / (np.sqrt(np.sum(np.square(x), axis=-1, keepdims=True)) + EPS)\n\n\ndef cosine_similarity(a, b):\n \"\"\"\n Args:\n a: ndarray, [feature_len]\n b: ndarray, [feature_len]\n \"\"\"\n a = np.array(a, dtype=np.float64)\n b = np.array(b, dtype=np.float64)\n return np.sum(a * b)\n\n\ndef auc(scores, labels):\n \"\"\"\"\"\"\n return metrics.roc_auc_score(labels, scores)\n\n\ndef roc(scores, labels):\n \"\"\"\"\"\"\n fpr, tpr, thresholds = metrics.roc_curve(labels, scores)\n plt.plot(fpr, tpr)\n plt.savefig('roc.png')\n\n\ndef main():\n feature_map = load_feature_map_from_txt('features.txt')\n feature_map = {k: l2_norm(v) for k, v in feature_map.items()}\n\n pairs = load_pairs('parsed_pair.txt')\n scores = []\n labels = []\n for fst, snd, match in pairs:\n labels.append(match)\n if fst not in feature_map:\n scores.append(1)\n print('WARNING: not found', fst)\n continue\n elif snd not in feature_map:\n scores.append(1)\n print('WARNING: not found', snd)\n continue\n score = 2 - euclidean_distance(feature_map[fst], feature_map[snd])\n scores.append(score)\n\n print(scores)\n print(labels)\n\n print(min(scores))\n print(max(scores))\n \n print(auc(scores, labels))\n roc(scores, labels)\n\n\nif __name__ == '__main__':\n main()\n"
] | [
[
"numpy.sum",
"sklearn.metrics.roc_curve",
"matplotlib.pyplot.savefig",
"sklearn.metrics.roc_auc_score",
"numpy.array",
"matplotlib.pyplot.plot",
"numpy.square"
]
] |
Ombarus/python_env | [
"4615976a51aa4f02206f5e03fc091b088d3273fc",
"4615976a51aa4f02206f5e03fc091b088d3273fc"
] | [
"python35/Lib/site-packages/sklearn/linear_model/base.py",
"python35/Lib/site-packages/sklearn/linear_model/ridge.py"
] | [
"\"\"\"\nGeneralized Linear models.\n\"\"\"\n\n# Author: Alexandre Gramfort <[email protected]>\n# Fabian Pedregosa <[email protected]>\n# Olivier Grisel <[email protected]>\n# Vincent Michel <[email protected]>\n# Peter Prettenhofer <[email protected]>\n# Mathieu Blondel <[email protected]>\n# Lars Buitinck\n# Maryan Morel <[email protected]>\n# Giorgio Patrini <[email protected]>\n# License: BSD 3 clause\n\nfrom __future__ import division\nfrom abc import ABCMeta, abstractmethod\nimport numbers\nimport warnings\n\nimport numpy as np\nimport scipy.sparse as sp\nfrom scipy import linalg\nfrom scipy import sparse\n\nfrom ..externals import six\nfrom ..externals.joblib import Parallel, delayed\nfrom ..base import BaseEstimator, ClassifierMixin, RegressorMixin\nfrom ..utils import check_array, check_X_y, deprecated, as_float_array\nfrom ..utils.validation import FLOAT_DTYPES\nfrom ..utils import check_random_state\nfrom ..utils.extmath import safe_sparse_dot\nfrom ..utils.sparsefuncs import mean_variance_axis, inplace_column_scale\nfrom ..utils.fixes import sparse_lsqr\nfrom ..utils.seq_dataset import ArrayDataset, CSRDataset\nfrom ..utils.validation import check_is_fitted\nfrom ..exceptions import NotFittedError\nfrom ..preprocessing.data import normalize as f_normalize\n\n# TODO: bayesian_ridge_regression and bayesian_regression_ard\n# should be squashed into its respective objects.\n\nSPARSE_INTERCEPT_DECAY = 0.01\n# For sparse data intercept updates are scaled by this decay factor to avoid\n# intercept oscillation.\n\n\ndef make_dataset(X, y, sample_weight, random_state=None):\n \"\"\"Create ``Dataset`` abstraction for sparse and dense inputs.\n\n This also returns the ``intercept_decay`` which is different\n for sparse datasets.\n \"\"\"\n\n rng = check_random_state(random_state)\n # seed should never be 0 in SequentialDataset\n seed = rng.randint(1, np.iinfo(np.int32).max)\n\n if sp.issparse(X):\n dataset = CSRDataset(X.data, X.indptr, X.indices, y, sample_weight,\n seed=seed)\n intercept_decay = SPARSE_INTERCEPT_DECAY\n else:\n dataset = ArrayDataset(X, y, sample_weight, seed=seed)\n intercept_decay = 1.0\n\n return dataset, intercept_decay\n\n\n@deprecated(\"sparse_center_data was deprecated in version 0.18 and will be \"\n \"removed in 0.20. Use utilities in preprocessing.data instead\")\ndef sparse_center_data(X, y, fit_intercept, normalize=False):\n \"\"\"\n Compute information needed to center data to have mean zero along\n axis 0. Be aware that X will not be centered since it would break\n the sparsity, but will be normalized if asked so.\n \"\"\"\n if fit_intercept:\n # we might require not to change the csr matrix sometimes\n # store a copy if normalize is True.\n # Change dtype to float64 since mean_variance_axis accepts\n # it that way.\n if sp.isspmatrix(X) and X.getformat() == 'csr':\n X = sp.csr_matrix(X, copy=normalize, dtype=np.float64)\n else:\n X = sp.csc_matrix(X, copy=normalize, dtype=np.float64)\n\n X_offset, X_var = mean_variance_axis(X, axis=0)\n if normalize:\n # transform variance to std in-place\n X_var *= X.shape[0]\n X_std = np.sqrt(X_var, X_var)\n del X_var\n X_std[X_std == 0] = 1\n inplace_column_scale(X, 1. / X_std)\n else:\n X_std = np.ones(X.shape[1])\n y_offset = y.mean(axis=0)\n y = y - y_offset\n else:\n X_offset = np.zeros(X.shape[1])\n X_std = np.ones(X.shape[1])\n y_offset = 0. if y.ndim == 1 else np.zeros(y.shape[1], dtype=X.dtype)\n\n return X, y, X_offset, y_offset, X_std\n\n\n@deprecated(\"center_data was deprecated in version 0.18 and will be removed in \"\n \"0.20. Use utilities in preprocessing.data instead\")\ndef center_data(X, y, fit_intercept, normalize=False, copy=True,\n sample_weight=None):\n \"\"\"\n Centers data to have mean zero along axis 0. This is here because\n nearly all linear models will want their data to be centered.\n If sample_weight is not None, then the weighted mean of X and y\n is zero, and not the mean itself\n \"\"\"\n X = as_float_array(X, copy)\n if fit_intercept:\n if isinstance(sample_weight, numbers.Number):\n sample_weight = None\n if sp.issparse(X):\n X_offset = np.zeros(X.shape[1])\n X_std = np.ones(X.shape[1])\n else:\n X_offset = np.average(X, axis=0, weights=sample_weight)\n X -= X_offset\n # XXX: currently scaled to variance=n_samples\n if normalize:\n X_std = np.sqrt(np.sum(X ** 2, axis=0))\n X_std[X_std == 0] = 1\n X /= X_std\n else:\n X_std = np.ones(X.shape[1])\n y_offset = np.average(y, axis=0, weights=sample_weight)\n y = y - y_offset\n else:\n X_offset = np.zeros(X.shape[1])\n X_std = np.ones(X.shape[1])\n y_offset = 0. if y.ndim == 1 else np.zeros(y.shape[1], dtype=X.dtype)\n return X, y, X_offset, y_offset, X_std\n\n\ndef _preprocess_data(X, y, fit_intercept, normalize=False, copy=True,\n sample_weight=None, return_mean=False):\n \"\"\"\n Centers data to have mean zero along axis 0. If fit_intercept=False or if\n the X is a sparse matrix, no centering is done, but normalization can still\n be applied. The function returns the statistics necessary to reconstruct\n the input data, which are X_offset, y_offset, X_scale, such that the output\n\n X = (X - X_offset) / X_scale\n\n X_scale is the L2 norm of X - X_offset. If sample_weight is not None,\n then the weighted mean of X and y is zero, and not the mean itself. If\n return_mean=True, the mean, eventually weighted, is returned, independently\n of whether X was centered (option used for optimization with sparse data in\n coordinate_descend).\n\n This is here because nearly all linear models will want their data to be\n centered.\n \"\"\"\n\n if isinstance(sample_weight, numbers.Number):\n sample_weight = None\n\n X = check_array(X, copy=copy, accept_sparse=['csr', 'csc'],\n dtype=FLOAT_DTYPES)\n\n if fit_intercept:\n if sp.issparse(X):\n X_offset, X_var = mean_variance_axis(X, axis=0)\n if not return_mean:\n X_offset = np.zeros(X.shape[1])\n\n if normalize:\n\n # TODO: f_normalize could be used here as well but the function\n # inplace_csr_row_normalize_l2 must be changed such that it\n # can return also the norms computed internally\n\n # transform variance to norm in-place\n X_var *= X.shape[0]\n X_scale = np.sqrt(X_var, X_var)\n del X_var\n X_scale[X_scale == 0] = 1\n inplace_column_scale(X, 1. / X_scale)\n else:\n X_scale = np.ones(X.shape[1])\n\n else:\n X_offset = np.average(X, axis=0, weights=sample_weight)\n X -= X_offset\n if normalize:\n X, X_scale = f_normalize(X, axis=0, copy=False,\n return_norm=True)\n else:\n X_scale = np.ones(X.shape[1])\n y_offset = np.average(y, axis=0, weights=sample_weight)\n y = y - y_offset\n else:\n X_offset = np.zeros(X.shape[1])\n X_scale = np.ones(X.shape[1])\n y_offset = 0. if y.ndim == 1 else np.zeros(y.shape[1], dtype=X.dtype)\n\n return X, y, X_offset, y_offset, X_scale\n\n\n# TODO: _rescale_data should be factored into _preprocess_data.\n# Currently, the fact that sag implements its own way to deal with\n# sample_weight makes the refactoring tricky.\n\ndef _rescale_data(X, y, sample_weight):\n \"\"\"Rescale data so as to support sample_weight\"\"\"\n n_samples = X.shape[0]\n sample_weight = sample_weight * np.ones(n_samples)\n sample_weight = np.sqrt(sample_weight)\n sw_matrix = sparse.dia_matrix((sample_weight, 0),\n shape=(n_samples, n_samples))\n X = safe_sparse_dot(sw_matrix, X)\n y = safe_sparse_dot(sw_matrix, y)\n return X, y\n\n\nclass LinearModel(six.with_metaclass(ABCMeta, BaseEstimator)):\n \"\"\"Base class for Linear Models\"\"\"\n\n @abstractmethod\n def fit(self, X, y):\n \"\"\"Fit model.\"\"\"\n\n @deprecated(\" and will be removed in 0.19.\")\n def decision_function(self, X):\n \"\"\"Decision function of the linear model.\n\n Parameters\n ----------\n X : {array-like, sparse matrix}, shape = (n_samples, n_features)\n Samples.\n\n Returns\n -------\n C : array, shape = (n_samples,)\n Returns predicted values.\n \"\"\"\n return self._decision_function(X)\n\n def _decision_function(self, X):\n check_is_fitted(self, \"coef_\")\n\n X = check_array(X, accept_sparse=['csr', 'csc', 'coo'])\n return safe_sparse_dot(X, self.coef_.T,\n dense_output=True) + self.intercept_\n\n def predict(self, X):\n \"\"\"Predict using the linear model\n\n Parameters\n ----------\n X : {array-like, sparse matrix}, shape = (n_samples, n_features)\n Samples.\n\n Returns\n -------\n C : array, shape = (n_samples,)\n Returns predicted values.\n \"\"\"\n return self._decision_function(X)\n\n _preprocess_data = staticmethod(_preprocess_data)\n\n def _set_intercept(self, X_offset, y_offset, X_scale):\n \"\"\"Set the intercept_\n \"\"\"\n if self.fit_intercept:\n self.coef_ = self.coef_ / X_scale\n self.intercept_ = y_offset - np.dot(X_offset, self.coef_.T)\n else:\n self.intercept_ = 0.\n\n\n# XXX Should this derive from LinearModel? It should be a mixin, not an ABC.\n# Maybe the n_features checking can be moved to LinearModel.\nclass LinearClassifierMixin(ClassifierMixin):\n \"\"\"Mixin for linear classifiers.\n\n Handles prediction for sparse and dense X.\n \"\"\"\n\n def decision_function(self, X):\n \"\"\"Predict confidence scores for samples.\n\n The confidence score for a sample is the signed distance of that\n sample to the hyperplane.\n\n Parameters\n ----------\n X : {array-like, sparse matrix}, shape = (n_samples, n_features)\n Samples.\n\n Returns\n -------\n array, shape=(n_samples,) if n_classes == 2 else (n_samples, n_classes)\n Confidence scores per (sample, class) combination. In the binary\n case, confidence score for self.classes_[1] where >0 means this\n class would be predicted.\n \"\"\"\n if not hasattr(self, 'coef_') or self.coef_ is None:\n raise NotFittedError(\"This %(name)s instance is not fitted \"\n \"yet\" % {'name': type(self).__name__})\n\n X = check_array(X, accept_sparse='csr')\n\n n_features = self.coef_.shape[1]\n if X.shape[1] != n_features:\n raise ValueError(\"X has %d features per sample; expecting %d\"\n % (X.shape[1], n_features))\n\n scores = safe_sparse_dot(X, self.coef_.T,\n dense_output=True) + self.intercept_\n return scores.ravel() if scores.shape[1] == 1 else scores\n\n def predict(self, X):\n \"\"\"Predict class labels for samples in X.\n\n Parameters\n ----------\n X : {array-like, sparse matrix}, shape = [n_samples, n_features]\n Samples.\n\n Returns\n -------\n C : array, shape = [n_samples]\n Predicted class label per sample.\n \"\"\"\n scores = self.decision_function(X)\n if len(scores.shape) == 1:\n indices = (scores > 0).astype(np.int)\n else:\n indices = scores.argmax(axis=1)\n return self.classes_[indices]\n\n def _predict_proba_lr(self, X):\n \"\"\"Probability estimation for OvR logistic regression.\n\n Positive class probabilities are computed as\n 1. / (1. + np.exp(-self.decision_function(X)));\n multiclass is handled by normalizing that over all classes.\n \"\"\"\n prob = self.decision_function(X)\n prob *= -1\n np.exp(prob, prob)\n prob += 1\n np.reciprocal(prob, prob)\n if prob.ndim == 1:\n return np.vstack([1 - prob, prob]).T\n else:\n # OvR normalization, like LibLinear's predict_probability\n prob /= prob.sum(axis=1).reshape((prob.shape[0], -1))\n return prob\n\n\nclass SparseCoefMixin(object):\n \"\"\"Mixin for converting coef_ to and from CSR format.\n\n L1-regularizing estimators should inherit this.\n \"\"\"\n\n def densify(self):\n \"\"\"Convert coefficient matrix to dense array format.\n\n Converts the ``coef_`` member (back) to a numpy.ndarray. This is the\n default format of ``coef_`` and is required for fitting, so calling\n this method is only required on models that have previously been\n sparsified; otherwise, it is a no-op.\n\n Returns\n -------\n self: estimator\n \"\"\"\n msg = \"Estimator, %(name)s, must be fitted before densifying.\"\n check_is_fitted(self, \"coef_\", msg=msg)\n if sp.issparse(self.coef_):\n self.coef_ = self.coef_.toarray()\n return self\n\n def sparsify(self):\n \"\"\"Convert coefficient matrix to sparse format.\n\n Converts the ``coef_`` member to a scipy.sparse matrix, which for\n L1-regularized models can be much more memory- and storage-efficient\n than the usual numpy.ndarray representation.\n\n The ``intercept_`` member is not converted.\n\n Notes\n -----\n For non-sparse models, i.e. when there are not many zeros in ``coef_``,\n this may actually *increase* memory usage, so use this method with\n care. A rule of thumb is that the number of zero elements, which can\n be computed with ``(coef_ == 0).sum()``, must be more than 50% for this\n to provide significant benefits.\n\n After calling this method, further fitting with the partial_fit\n method (if any) will not work until you call densify.\n\n Returns\n -------\n self: estimator\n \"\"\"\n msg = \"Estimator, %(name)s, must be fitted before sparsifying.\"\n check_is_fitted(self, \"coef_\", msg=msg)\n self.coef_ = sp.csr_matrix(self.coef_)\n return self\n\n\nclass LinearRegression(LinearModel, RegressorMixin):\n \"\"\"\n Ordinary least squares Linear Regression.\n\n Parameters\n ----------\n fit_intercept : boolean, optional\n whether to calculate the intercept for this model. If set\n to false, no intercept will be used in calculations\n (e.g. data is expected to be already centered).\n\n normalize : boolean, optional, default False\n If True, the regressors X will be normalized before regression.\n This parameter is ignored when `fit_intercept` is set to False.\n When the regressors are normalized, note that this makes the\n hyperparameters learnt more robust and almost independent of the number\n of samples. The same property is not valid for standardized data.\n However, if you wish to standardize, please use\n `preprocessing.StandardScaler` before calling `fit` on an estimator\n with `normalize=False`.\n\n copy_X : boolean, optional, default True\n If True, X will be copied; else, it may be overwritten.\n\n n_jobs : int, optional, default 1\n The number of jobs to use for the computation.\n If -1 all CPUs are used. This will only provide speedup for\n n_targets > 1 and sufficient large problems.\n\n Attributes\n ----------\n coef_ : array, shape (n_features, ) or (n_targets, n_features)\n Estimated coefficients for the linear regression problem.\n If multiple targets are passed during the fit (y 2D), this\n is a 2D array of shape (n_targets, n_features), while if only\n one target is passed, this is a 1D array of length n_features.\n\n residues_ : array, shape (n_targets,) or (1,) or empty\n Sum of residuals. Squared Euclidean 2-norm for each target passed\n during the fit. If the linear regression problem is under-determined\n (the number of linearly independent rows of the training matrix is less\n than its number of linearly independent columns), this is an empty\n array. If the target vector passed during the fit is 1-dimensional,\n this is a (1,) shape array.\n\n intercept_ : array\n Independent term in the linear model.\n\n Notes\n -----\n From the implementation point of view, this is just plain Ordinary\n Least Squares (scipy.linalg.lstsq) wrapped as a predictor object.\n\n \"\"\"\n\n def __init__(self, fit_intercept=True, normalize=False, copy_X=True,\n n_jobs=1):\n self.fit_intercept = fit_intercept\n self.normalize = normalize\n self.copy_X = copy_X\n self.n_jobs = n_jobs\n\n @property\n @deprecated(\"``residues_`` is deprecated and will be removed in 0.19\")\n def residues_(self):\n \"\"\"Get the residues of the fitted model.\"\"\"\n return self._residues\n\n def fit(self, X, y, sample_weight=None):\n \"\"\"\n Fit linear model.\n\n Parameters\n ----------\n X : numpy array or sparse matrix of shape [n_samples,n_features]\n Training data\n\n y : numpy array of shape [n_samples, n_targets]\n Target values\n\n sample_weight : numpy array of shape [n_samples]\n Individual weights for each sample\n\n .. versionadded:: 0.17\n parameter *sample_weight* support to LinearRegression.\n\n Returns\n -------\n self : returns an instance of self.\n \"\"\"\n\n n_jobs_ = self.n_jobs\n X, y = check_X_y(X, y, accept_sparse=['csr', 'csc', 'coo'],\n y_numeric=True, multi_output=True)\n\n if sample_weight is not None and np.atleast_1d(sample_weight).ndim > 1:\n raise ValueError(\"Sample weights must be 1D array or scalar\")\n\n X, y, X_offset, y_offset, X_scale = self._preprocess_data(\n X, y, fit_intercept=self.fit_intercept, normalize=self.normalize,\n copy=self.copy_X, sample_weight=sample_weight)\n\n if sample_weight is not None:\n # Sample weight can be implemented via a simple rescaling.\n X, y = _rescale_data(X, y, sample_weight)\n\n if sp.issparse(X):\n if y.ndim < 2:\n out = sparse_lsqr(X, y)\n self.coef_ = out[0]\n self._residues = out[3]\n else:\n # sparse_lstsq cannot handle y with shape (M, K)\n outs = Parallel(n_jobs=n_jobs_)(\n delayed(sparse_lsqr)(X, y[:, j].ravel())\n for j in range(y.shape[1]))\n self.coef_ = np.vstack(out[0] for out in outs)\n self._residues = np.vstack(out[3] for out in outs)\n else:\n self.coef_, self._residues, self.rank_, self.singular_ = \\\n linalg.lstsq(X, y)\n self.coef_ = self.coef_.T\n\n if y.ndim == 1:\n self.coef_ = np.ravel(self.coef_)\n self._set_intercept(X_offset, y_offset, X_scale)\n return self\n\n\ndef _pre_fit(X, y, Xy, precompute, normalize, fit_intercept, copy):\n \"\"\"Aux function used at beginning of fit in linear models\"\"\"\n n_samples, n_features = X.shape\n\n if sparse.isspmatrix(X):\n precompute = False\n X, y, X_offset, y_offset, X_scale = _preprocess_data(\n X, y, fit_intercept=fit_intercept, normalize=normalize,\n return_mean=True)\n else:\n # copy was done in fit if necessary\n X, y, X_offset, y_offset, X_scale = _preprocess_data(\n X, y, fit_intercept=fit_intercept, normalize=normalize, copy=copy)\n if hasattr(precompute, '__array__') and (\n fit_intercept and not np.allclose(X_offset, np.zeros(n_features)) or\n normalize and not np.allclose(X_scale, np.ones(n_features))):\n warnings.warn(\"Gram matrix was provided but X was centered\"\n \" to fit intercept, \"\n \"or X was normalized : recomputing Gram matrix.\",\n UserWarning)\n # recompute Gram\n precompute = 'auto'\n Xy = None\n\n # precompute if n_samples > n_features\n if isinstance(precompute, six.string_types) and precompute == 'auto':\n precompute = (n_samples > n_features)\n\n if precompute is True:\n # make sure that the 'precompute' array is contiguous.\n precompute = np.empty(shape=(n_features, n_features), dtype=X.dtype,\n order='C')\n np.dot(X.T, X, out=precompute)\n\n if not hasattr(precompute, '__array__'):\n Xy = None # cannot use Xy if precompute is not Gram\n\n if hasattr(precompute, '__array__') and Xy is None:\n common_dtype = np.find_common_type([X.dtype, y.dtype], [])\n if y.ndim == 1:\n # Xy is 1d, make sure it is contiguous.\n Xy = np.empty(shape=n_features, dtype=common_dtype, order='C')\n np.dot(X.T, y, out=Xy)\n else:\n # Make sure that Xy is always F contiguous even if X or y are not\n # contiguous: the goal is to make it fast to extract the data for a\n # specific target.\n n_targets = y.shape[1]\n Xy = np.empty(shape=(n_features, n_targets), dtype=common_dtype,\n order='F')\n np.dot(y.T, X, out=Xy.T)\n\n return X, y, X_offset, y_offset, X_scale, precompute, Xy\n",
"\"\"\"\nRidge regression\n\"\"\"\n\n# Author: Mathieu Blondel <[email protected]>\n# Reuben Fletcher-Costin <[email protected]>\n# Fabian Pedregosa <[email protected]>\n# Michael Eickenberg <[email protected]>\n# License: BSD 3 clause\n\n\nfrom abc import ABCMeta, abstractmethod\nimport warnings\n\nimport numpy as np\nfrom scipy import linalg\nfrom scipy import sparse\nfrom scipy.sparse import linalg as sp_linalg\n\nfrom .base import LinearClassifierMixin, LinearModel, _rescale_data\nfrom .sag import sag_solver\nfrom ..base import RegressorMixin\nfrom ..utils.extmath import safe_sparse_dot\nfrom ..utils.extmath import row_norms\nfrom ..utils import check_X_y\nfrom ..utils import check_array\nfrom ..utils import check_consistent_length\nfrom ..utils import compute_sample_weight\nfrom ..utils import column_or_1d\nfrom ..preprocessing import LabelBinarizer\nfrom ..model_selection import GridSearchCV\nfrom ..externals import six\nfrom ..metrics.scorer import check_scoring\n\n\ndef _solve_sparse_cg(X, y, alpha, max_iter=None, tol=1e-3, verbose=0):\n n_samples, n_features = X.shape\n X1 = sp_linalg.aslinearoperator(X)\n coefs = np.empty((y.shape[1], n_features))\n\n if n_features > n_samples:\n def create_mv(curr_alpha):\n def _mv(x):\n return X1.matvec(X1.rmatvec(x)) + curr_alpha * x\n return _mv\n else:\n def create_mv(curr_alpha):\n def _mv(x):\n return X1.rmatvec(X1.matvec(x)) + curr_alpha * x\n return _mv\n\n for i in range(y.shape[1]):\n y_column = y[:, i]\n\n mv = create_mv(alpha[i])\n if n_features > n_samples:\n # kernel ridge\n # w = X.T * inv(X X^t + alpha*Id) y\n C = sp_linalg.LinearOperator(\n (n_samples, n_samples), matvec=mv, dtype=X.dtype)\n coef, info = sp_linalg.cg(C, y_column, tol=tol)\n coefs[i] = X1.rmatvec(coef)\n else:\n # linear ridge\n # w = inv(X^t X + alpha*Id) * X.T y\n y_column = X1.rmatvec(y_column)\n C = sp_linalg.LinearOperator(\n (n_features, n_features), matvec=mv, dtype=X.dtype)\n coefs[i], info = sp_linalg.cg(C, y_column, maxiter=max_iter,\n tol=tol)\n if info < 0:\n raise ValueError(\"Failed with error code %d\" % info)\n\n if max_iter is None and info > 0 and verbose:\n warnings.warn(\"sparse_cg did not converge after %d iterations.\" %\n info)\n\n return coefs\n\n\ndef _solve_lsqr(X, y, alpha, max_iter=None, tol=1e-3):\n n_samples, n_features = X.shape\n coefs = np.empty((y.shape[1], n_features))\n n_iter = np.empty(y.shape[1], dtype=np.int32)\n\n # According to the lsqr documentation, alpha = damp^2.\n sqrt_alpha = np.sqrt(alpha)\n\n for i in range(y.shape[1]):\n y_column = y[:, i]\n info = sp_linalg.lsqr(X, y_column, damp=sqrt_alpha[i],\n atol=tol, btol=tol, iter_lim=max_iter)\n coefs[i] = info[0]\n n_iter[i] = info[2]\n\n return coefs, n_iter\n\n\ndef _solve_cholesky(X, y, alpha):\n # w = inv(X^t X + alpha*Id) * X.T y\n n_samples, n_features = X.shape\n n_targets = y.shape[1]\n\n A = safe_sparse_dot(X.T, X, dense_output=True)\n Xy = safe_sparse_dot(X.T, y, dense_output=True)\n\n one_alpha = np.array_equal(alpha, len(alpha) * [alpha[0]])\n\n if one_alpha:\n A.flat[::n_features + 1] += alpha[0]\n return linalg.solve(A, Xy, sym_pos=True,\n overwrite_a=True).T\n else:\n coefs = np.empty([n_targets, n_features])\n for coef, target, current_alpha in zip(coefs, Xy.T, alpha):\n A.flat[::n_features + 1] += current_alpha\n coef[:] = linalg.solve(A, target, sym_pos=True,\n overwrite_a=False).ravel()\n A.flat[::n_features + 1] -= current_alpha\n return coefs\n\n\ndef _solve_cholesky_kernel(K, y, alpha, sample_weight=None, copy=False):\n # dual_coef = inv(X X^t + alpha*Id) y\n n_samples = K.shape[0]\n n_targets = y.shape[1]\n\n if copy:\n K = K.copy()\n\n alpha = np.atleast_1d(alpha)\n one_alpha = (alpha == alpha[0]).all()\n has_sw = isinstance(sample_weight, np.ndarray) \\\n or sample_weight not in [1.0, None]\n\n if has_sw:\n # Unlike other solvers, we need to support sample_weight directly\n # because K might be a pre-computed kernel.\n sw = np.sqrt(np.atleast_1d(sample_weight))\n y = y * sw[:, np.newaxis]\n K *= np.outer(sw, sw)\n\n if one_alpha:\n # Only one penalty, we can solve multi-target problems in one time.\n K.flat[::n_samples + 1] += alpha[0]\n\n try:\n # Note: we must use overwrite_a=False in order to be able to\n # use the fall-back solution below in case a LinAlgError\n # is raised\n dual_coef = linalg.solve(K, y, sym_pos=True,\n overwrite_a=False)\n except np.linalg.LinAlgError:\n warnings.warn(\"Singular matrix in solving dual problem. Using \"\n \"least-squares solution instead.\")\n dual_coef = linalg.lstsq(K, y)[0]\n\n # K is expensive to compute and store in memory so change it back in\n # case it was user-given.\n K.flat[::n_samples + 1] -= alpha[0]\n\n if has_sw:\n dual_coef *= sw[:, np.newaxis]\n\n return dual_coef\n else:\n # One penalty per target. We need to solve each target separately.\n dual_coefs = np.empty([n_targets, n_samples])\n\n for dual_coef, target, current_alpha in zip(dual_coefs, y.T, alpha):\n K.flat[::n_samples + 1] += current_alpha\n\n dual_coef[:] = linalg.solve(K, target, sym_pos=True,\n overwrite_a=False).ravel()\n\n K.flat[::n_samples + 1] -= current_alpha\n\n if has_sw:\n dual_coefs *= sw[np.newaxis, :]\n\n return dual_coefs.T\n\n\ndef _solve_svd(X, y, alpha):\n U, s, Vt = linalg.svd(X, full_matrices=False)\n idx = s > 1e-15 # same default value as scipy.linalg.pinv\n s_nnz = s[idx][:, np.newaxis]\n UTy = np.dot(U.T, y)\n d = np.zeros((s.size, alpha.size))\n d[idx] = s_nnz / (s_nnz ** 2 + alpha)\n d_UT_y = d * UTy\n return np.dot(Vt.T, d_UT_y).T\n\n\ndef ridge_regression(X, y, alpha, sample_weight=None, solver='auto',\n max_iter=None, tol=1e-3, verbose=0, random_state=None,\n return_n_iter=False, return_intercept=False):\n \"\"\"Solve the ridge equation by the method of normal equations.\n\n Read more in the :ref:`User Guide <ridge_regression>`.\n\n Parameters\n ----------\n X : {array-like, sparse matrix, LinearOperator},\n shape = [n_samples, n_features]\n Training data\n\n y : array-like, shape = [n_samples] or [n_samples, n_targets]\n Target values\n\n alpha : {float, array-like},\n shape = [n_targets] if array-like\n Regularization strength; must be a positive float. Regularization\n improves the conditioning of the problem and reduces the variance of\n the estimates. Larger values specify stronger regularization.\n Alpha corresponds to ``C^-1`` in other linear models such as \n LogisticRegression or LinearSVC. If an array is passed, penalties are\n assumed to be specific to the targets. Hence they must correspond in\n number.\n\n max_iter : int, optional\n Maximum number of iterations for conjugate gradient solver.\n For 'sparse_cg' and 'lsqr' solvers, the default value is determined\n by scipy.sparse.linalg. For 'sag' solver, the default value is 1000.\n\n sample_weight : float or numpy array of shape [n_samples]\n Individual weights for each sample. If sample_weight is not None and\n solver='auto', the solver will be set to 'cholesky'.\n\n .. versionadded:: 0.17\n\n solver : {'auto', 'svd', 'cholesky', 'lsqr', 'sparse_cg'}\n Solver to use in the computational routines:\n\n - 'auto' chooses the solver automatically based on the type of data.\n\n - 'svd' uses a Singular Value Decomposition of X to compute the Ridge\n coefficients. More stable for singular matrices than\n 'cholesky'.\n\n - 'cholesky' uses the standard scipy.linalg.solve function to\n obtain a closed-form solution via a Cholesky decomposition of\n dot(X.T, X)\n\n - 'sparse_cg' uses the conjugate gradient solver as found in\n scipy.sparse.linalg.cg. As an iterative algorithm, this solver is\n more appropriate than 'cholesky' for large-scale data\n (possibility to set `tol` and `max_iter`).\n\n - 'lsqr' uses the dedicated regularized least-squares routine\n scipy.sparse.linalg.lsqr. It is the fastest but may not be available\n in old scipy versions. It also uses an iterative procedure.\n\n - 'sag' uses a Stochastic Average Gradient descent. It also uses an\n iterative procedure, and is often faster than other solvers when\n both n_samples and n_features are large. Note that 'sag' fast\n convergence is only guaranteed on features with approximately the\n same scale. You can preprocess the data with a scaler from\n sklearn.preprocessing.\n\n All last four solvers support both dense and sparse data. However,\n only 'sag' supports sparse input when `fit_intercept` is True.\n\n .. versionadded:: 0.17\n Stochastic Average Gradient descent solver.\n\n tol : float\n Precision of the solution.\n\n verbose : int\n Verbosity level. Setting verbose > 0 will display additional\n information depending on the solver used.\n\n random_state : int seed, RandomState instance, or None (default)\n The seed of the pseudo random number generator to use when\n shuffling the data. Used only in 'sag' solver.\n\n return_n_iter : boolean, default False\n If True, the method also returns `n_iter`, the actual number of\n iteration performed by the solver.\n\n .. versionadded:: 0.17\n\n return_intercept : boolean, default False\n If True and if X is sparse, the method also returns the intercept,\n and the solver is automatically changed to 'sag'. This is only a\n temporary fix for fitting the intercept with sparse data. For dense\n data, use sklearn.linear_model._preprocess_data before your regression.\n\n .. versionadded:: 0.17\n\n Returns\n -------\n coef : array, shape = [n_features] or [n_targets, n_features]\n Weight vector(s).\n\n n_iter : int, optional\n The actual number of iteration performed by the solver.\n Only returned if `return_n_iter` is True.\n\n intercept : float or array, shape = [n_targets]\n The intercept of the model. Only returned if `return_intercept`\n is True and if X is a scipy sparse array.\n\n Notes\n -----\n This function won't compute the intercept.\n \"\"\"\n if return_intercept and sparse.issparse(X) and solver != 'sag':\n if solver != 'auto':\n warnings.warn(\"In Ridge, only 'sag' solver can currently fit the \"\n \"intercept when X is sparse. Solver has been \"\n \"automatically changed into 'sag'.\")\n solver = 'sag'\n\n # SAG needs X and y columns to be C-contiguous and np.float64\n if solver == 'sag':\n X = check_array(X, accept_sparse=['csr'],\n dtype=np.float64, order='C')\n y = check_array(y, dtype=np.float64, ensure_2d=False, order='F')\n else:\n X = check_array(X, accept_sparse=['csr', 'csc', 'coo'],\n dtype=np.float64)\n y = check_array(y, dtype='numeric', ensure_2d=False)\n check_consistent_length(X, y)\n\n n_samples, n_features = X.shape\n\n if y.ndim > 2:\n raise ValueError(\"Target y has the wrong shape %s\" % str(y.shape))\n\n ravel = False\n if y.ndim == 1:\n y = y.reshape(-1, 1)\n ravel = True\n\n n_samples_, n_targets = y.shape\n\n if n_samples != n_samples_:\n raise ValueError(\"Number of samples in X and y does not correspond:\"\n \" %d != %d\" % (n_samples, n_samples_))\n\n has_sw = sample_weight is not None\n\n if solver == 'auto':\n # cholesky if it's a dense array and cg in any other case\n if not sparse.issparse(X) or has_sw:\n solver = 'cholesky'\n else:\n solver = 'sparse_cg'\n\n elif solver == 'lsqr' and not hasattr(sp_linalg, 'lsqr'):\n warnings.warn(\"\"\"lsqr not available on this machine, falling back\n to sparse_cg.\"\"\")\n solver = 'sparse_cg'\n\n if has_sw:\n if np.atleast_1d(sample_weight).ndim > 1:\n raise ValueError(\"Sample weights must be 1D array or scalar\")\n\n if solver != 'sag':\n # SAG supports sample_weight directly. For other solvers,\n # we implement sample_weight via a simple rescaling.\n X, y = _rescale_data(X, y, sample_weight)\n\n # There should be either 1 or n_targets penalties\n alpha = np.asarray(alpha).ravel()\n if alpha.size not in [1, n_targets]:\n raise ValueError(\"Number of targets and number of penalties \"\n \"do not correspond: %d != %d\"\n % (alpha.size, n_targets))\n\n if alpha.size == 1 and n_targets > 1:\n alpha = np.repeat(alpha, n_targets)\n\n if solver not in ('sparse_cg', 'cholesky', 'svd', 'lsqr', 'sag'):\n raise ValueError('Solver %s not understood' % solver)\n\n n_iter = None\n if solver == 'sparse_cg':\n coef = _solve_sparse_cg(X, y, alpha, max_iter, tol, verbose)\n\n elif solver == 'lsqr':\n coef, n_iter = _solve_lsqr(X, y, alpha, max_iter, tol)\n\n elif solver == 'cholesky':\n if n_features > n_samples:\n K = safe_sparse_dot(X, X.T, dense_output=True)\n try:\n dual_coef = _solve_cholesky_kernel(K, y, alpha)\n\n coef = safe_sparse_dot(X.T, dual_coef, dense_output=True).T\n except linalg.LinAlgError:\n # use SVD solver if matrix is singular\n solver = 'svd'\n\n else:\n try:\n coef = _solve_cholesky(X, y, alpha)\n except linalg.LinAlgError:\n # use SVD solver if matrix is singular\n solver = 'svd'\n\n elif solver == 'sag':\n # precompute max_squared_sum for all targets\n max_squared_sum = row_norms(X, squared=True).max()\n\n coef = np.empty((y.shape[1], n_features))\n n_iter = np.empty(y.shape[1], dtype=np.int32)\n intercept = np.zeros((y.shape[1], ))\n for i, (alpha_i, target) in enumerate(zip(alpha, y.T)):\n init = {'coef': np.zeros((n_features + int(return_intercept), 1))}\n coef_, n_iter_, _ = sag_solver(\n X, target.ravel(), sample_weight, 'squared', alpha_i,\n max_iter, tol, verbose, random_state, False, max_squared_sum,\n init)\n if return_intercept:\n coef[i] = coef_[:-1]\n intercept[i] = coef_[-1]\n else:\n coef[i] = coef_\n n_iter[i] = n_iter_\n\n if intercept.shape[0] == 1:\n intercept = intercept[0]\n coef = np.asarray(coef)\n\n if solver == 'svd':\n if sparse.issparse(X):\n raise TypeError('SVD solver does not support sparse'\n ' inputs currently')\n coef = _solve_svd(X, y, alpha)\n\n if ravel:\n # When y was passed as a 1d-array, we flatten the coefficients.\n coef = coef.ravel()\n\n if return_n_iter and return_intercept:\n return coef, n_iter, intercept\n elif return_intercept:\n return coef, intercept\n elif return_n_iter:\n return coef, n_iter\n else:\n return coef\n\n\nclass _BaseRidge(six.with_metaclass(ABCMeta, LinearModel)):\n\n @abstractmethod\n def __init__(self, alpha=1.0, fit_intercept=True, normalize=False,\n copy_X=True, max_iter=None, tol=1e-3, solver=\"auto\",\n random_state=None):\n self.alpha = alpha\n self.fit_intercept = fit_intercept\n self.normalize = normalize\n self.copy_X = copy_X\n self.max_iter = max_iter\n self.tol = tol\n self.solver = solver\n self.random_state = random_state\n\n def fit(self, X, y, sample_weight=None):\n X, y = check_X_y(X, y, ['csr', 'csc', 'coo'], dtype=np.float64,\n multi_output=True, y_numeric=True)\n\n if ((sample_weight is not None) and\n np.atleast_1d(sample_weight).ndim > 1):\n raise ValueError(\"Sample weights must be 1D array or scalar\")\n\n X, y, X_offset, y_offset, X_scale = self._preprocess_data(\n X, y, self.fit_intercept, self.normalize, self.copy_X,\n sample_weight=sample_weight)\n\n # temporary fix for fitting the intercept with sparse data using 'sag'\n if sparse.issparse(X) and self.fit_intercept:\n self.coef_, self.n_iter_, self.intercept_ = ridge_regression(\n X, y, alpha=self.alpha, sample_weight=sample_weight,\n max_iter=self.max_iter, tol=self.tol, solver=self.solver,\n random_state=self.random_state, return_n_iter=True,\n return_intercept=True)\n self.intercept_ += y_offset\n else:\n self.coef_, self.n_iter_ = ridge_regression(\n X, y, alpha=self.alpha, sample_weight=sample_weight,\n max_iter=self.max_iter, tol=self.tol, solver=self.solver,\n random_state=self.random_state, return_n_iter=True,\n return_intercept=False)\n self._set_intercept(X_offset, y_offset, X_scale)\n\n return self\n\n\nclass Ridge(_BaseRidge, RegressorMixin):\n \"\"\"Linear least squares with l2 regularization.\n\n This model solves a regression model where the loss function is\n the linear least squares function and regularization is given by\n the l2-norm. Also known as Ridge Regression or Tikhonov regularization.\n This estimator has built-in support for multi-variate regression\n (i.e., when y is a 2d-array of shape [n_samples, n_targets]).\n\n Read more in the :ref:`User Guide <ridge_regression>`.\n\n Parameters\n ----------\n alpha : {float, array-like}, shape (n_targets)\n Regularization strength; must be a positive float. Regularization\n improves the conditioning of the problem and reduces the variance of\n the estimates. Larger values specify stronger regularization.\n Alpha corresponds to ``C^-1`` in other linear models such as \n LogisticRegression or LinearSVC. If an array is passed, penalties are\n assumed to be specific to the targets. Hence they must correspond in\n number.\n\n copy_X : boolean, optional, default True\n If True, X will be copied; else, it may be overwritten.\n\n fit_intercept : boolean\n Whether to calculate the intercept for this model. If set\n to false, no intercept will be used in calculations\n (e.g. data is expected to be already centered).\n\n max_iter : int, optional\n Maximum number of iterations for conjugate gradient solver.\n For 'sparse_cg' and 'lsqr' solvers, the default value is determined\n by scipy.sparse.linalg. For 'sag' solver, the default value is 1000.\n\n normalize : boolean, optional, default False\n If True, the regressors X will be normalized before regression.\n This parameter is ignored when `fit_intercept` is set to False.\n When the regressors are normalized, note that this makes the\n hyperparameters learnt more robust and almost independent of the number\n of samples. The same property is not valid for standardized data.\n However, if you wish to standardize, please use\n `preprocessing.StandardScaler` before calling `fit` on an estimator\n with `normalize=False`.\n\n solver : {'auto', 'svd', 'cholesky', 'lsqr', 'sparse_cg', 'sag'}\n Solver to use in the computational routines:\n\n - 'auto' chooses the solver automatically based on the type of data.\n\n - 'svd' uses a Singular Value Decomposition of X to compute the Ridge\n coefficients. More stable for singular matrices than\n 'cholesky'.\n\n - 'cholesky' uses the standard scipy.linalg.solve function to\n obtain a closed-form solution.\n\n - 'sparse_cg' uses the conjugate gradient solver as found in\n scipy.sparse.linalg.cg. As an iterative algorithm, this solver is\n more appropriate than 'cholesky' for large-scale data\n (possibility to set `tol` and `max_iter`).\n\n - 'lsqr' uses the dedicated regularized least-squares routine\n scipy.sparse.linalg.lsqr. It is the fastest but may not be available\n in old scipy versions. It also uses an iterative procedure.\n\n - 'sag' uses a Stochastic Average Gradient descent. It also uses an\n iterative procedure, and is often faster than other solvers when\n both n_samples and n_features are large. Note that 'sag' fast\n convergence is only guaranteed on features with approximately the\n same scale. You can preprocess the data with a scaler from\n sklearn.preprocessing.\n\n All last four solvers support both dense and sparse data. However,\n only 'sag' supports sparse input when `fit_intercept` is True.\n\n .. versionadded:: 0.17\n Stochastic Average Gradient descent solver.\n\n tol : float\n Precision of the solution.\n\n random_state : int seed, RandomState instance, or None (default)\n The seed of the pseudo random number generator to use when\n shuffling the data. Used only in 'sag' solver.\n\n .. versionadded:: 0.17\n *random_state* to support Stochastic Average Gradient.\n\n Attributes\n ----------\n coef_ : array, shape (n_features,) or (n_targets, n_features)\n Weight vector(s).\n\n intercept_ : float | array, shape = (n_targets,)\n Independent term in decision function. Set to 0.0 if\n ``fit_intercept = False``.\n\n n_iter_ : array or None, shape (n_targets,)\n Actual number of iterations for each target. Available only for\n sag and lsqr solvers. Other solvers will return None.\n\n .. versionadded:: 0.17\n\n See also\n --------\n RidgeClassifier, RidgeCV, :class:`sklearn.kernel_ridge.KernelRidge`\n\n Examples\n --------\n >>> from sklearn.linear_model import Ridge\n >>> import numpy as np\n >>> n_samples, n_features = 10, 5\n >>> np.random.seed(0)\n >>> y = np.random.randn(n_samples)\n >>> X = np.random.randn(n_samples, n_features)\n >>> clf = Ridge(alpha=1.0)\n >>> clf.fit(X, y) # doctest: +NORMALIZE_WHITESPACE\n Ridge(alpha=1.0, copy_X=True, fit_intercept=True, max_iter=None,\n normalize=False, random_state=None, solver='auto', tol=0.001)\n\n \"\"\"\n def __init__(self, alpha=1.0, fit_intercept=True, normalize=False,\n copy_X=True, max_iter=None, tol=1e-3, solver=\"auto\",\n random_state=None):\n super(Ridge, self).__init__(alpha=alpha, fit_intercept=fit_intercept,\n normalize=normalize, copy_X=copy_X,\n max_iter=max_iter, tol=tol, solver=solver,\n random_state=random_state)\n\n def fit(self, X, y, sample_weight=None):\n \"\"\"Fit Ridge regression model\n\n Parameters\n ----------\n X : {array-like, sparse matrix}, shape = [n_samples, n_features]\n Training data\n\n y : array-like, shape = [n_samples] or [n_samples, n_targets]\n Target values\n\n sample_weight : float or numpy array of shape [n_samples]\n Individual weights for each sample\n\n Returns\n -------\n self : returns an instance of self.\n \"\"\"\n return super(Ridge, self).fit(X, y, sample_weight=sample_weight)\n\n\nclass RidgeClassifier(LinearClassifierMixin, _BaseRidge):\n \"\"\"Classifier using Ridge regression.\n\n Read more in the :ref:`User Guide <ridge_regression>`.\n\n Parameters\n ----------\n alpha : float\n Regularization strength; must be a positive float. Regularization\n improves the conditioning of the problem and reduces the variance of\n the estimates. Larger values specify stronger regularization.\n Alpha corresponds to ``C^-1`` in other linear models such as \n LogisticRegression or LinearSVC.\n\n class_weight : dict or 'balanced', optional\n Weights associated with classes in the form ``{class_label: weight}``.\n If not given, all classes are supposed to have weight one.\n\n The \"balanced\" mode uses the values of y to automatically adjust\n weights inversely proportional to class frequencies in the input data\n as ``n_samples / (n_classes * np.bincount(y))``\n\n copy_X : boolean, optional, default True\n If True, X will be copied; else, it may be overwritten.\n\n fit_intercept : boolean\n Whether to calculate the intercept for this model. If set to false, no\n intercept will be used in calculations (e.g. data is expected to be\n already centered).\n\n max_iter : int, optional\n Maximum number of iterations for conjugate gradient solver.\n The default value is determined by scipy.sparse.linalg.\n\n normalize : boolean, optional, default False\n If True, the regressors X will be normalized before regression.\n This parameter is ignored when `fit_intercept` is set to False.\n When the regressors are normalized, note that this makes the\n hyperparameters learnt more robust and almost independent of the number\n of samples. The same property is not valid for standardized data.\n However, if you wish to standardize, please use\n `preprocessing.StandardScaler` before calling `fit` on an estimator\n with `normalize=False`.\n\n solver : {'auto', 'svd', 'cholesky', 'lsqr', 'sparse_cg', 'sag'}\n Solver to use in the computational routines:\n\n - 'auto' chooses the solver automatically based on the type of data.\n\n - 'svd' uses a Singular Value Decomposition of X to compute the Ridge\n coefficients. More stable for singular matrices than\n 'cholesky'.\n\n - 'cholesky' uses the standard scipy.linalg.solve function to\n obtain a closed-form solution.\n\n - 'sparse_cg' uses the conjugate gradient solver as found in\n scipy.sparse.linalg.cg. As an iterative algorithm, this solver is\n more appropriate than 'cholesky' for large-scale data\n (possibility to set `tol` and `max_iter`).\n\n - 'lsqr' uses the dedicated regularized least-squares routine\n scipy.sparse.linalg.lsqr. It is the fastest but may not be available\n in old scipy versions. It also uses an iterative procedure.\n\n - 'sag' uses a Stochastic Average Gradient descent. It also uses an\n iterative procedure, and is faster than other solvers when both\n n_samples and n_features are large.\n\n .. versionadded:: 0.17\n Stochastic Average Gradient descent solver.\n\n tol : float\n Precision of the solution.\n\n random_state : int seed, RandomState instance, or None (default)\n The seed of the pseudo random number generator to use when\n shuffling the data. Used in 'sag' solver.\n\n Attributes\n ----------\n coef_ : array, shape (n_features,) or (n_classes, n_features)\n Weight vector(s).\n\n intercept_ : float | array, shape = (n_targets,)\n Independent term in decision function. Set to 0.0 if\n ``fit_intercept = False``.\n\n n_iter_ : array or None, shape (n_targets,)\n Actual number of iterations for each target. Available only for\n sag and lsqr solvers. Other solvers will return None.\n\n See also\n --------\n Ridge, RidgeClassifierCV\n\n Notes\n -----\n For multi-class classification, n_class classifiers are trained in\n a one-versus-all approach. Concretely, this is implemented by taking\n advantage of the multi-variate response support in Ridge.\n \"\"\"\n def __init__(self, alpha=1.0, fit_intercept=True, normalize=False,\n copy_X=True, max_iter=None, tol=1e-3, class_weight=None,\n solver=\"auto\", random_state=None):\n super(RidgeClassifier, self).__init__(\n alpha=alpha, fit_intercept=fit_intercept, normalize=normalize,\n copy_X=copy_X, max_iter=max_iter, tol=tol, solver=solver,\n random_state=random_state)\n self.class_weight = class_weight\n\n def fit(self, X, y, sample_weight=None):\n \"\"\"Fit Ridge regression model.\n\n Parameters\n ----------\n X : {array-like, sparse matrix}, shape = [n_samples,n_features]\n Training data\n\n y : array-like, shape = [n_samples]\n Target values\n\n sample_weight : float or numpy array of shape (n_samples,)\n Sample weight.\n\n .. versionadded:: 0.17\n *sample_weight* support to Classifier.\n\n Returns\n -------\n self : returns an instance of self.\n \"\"\"\n self._label_binarizer = LabelBinarizer(pos_label=1, neg_label=-1)\n Y = self._label_binarizer.fit_transform(y)\n if not self._label_binarizer.y_type_.startswith('multilabel'):\n y = column_or_1d(y, warn=True)\n else:\n # we don't (yet) support multi-label classification in Ridge\n raise ValueError(\n \"%s doesn't support multi-label classification\" % (\n self.__class__.__name__))\n\n if self.class_weight:\n if sample_weight is None:\n sample_weight = 1.\n # modify the sample weights with the corresponding class weight\n sample_weight = (sample_weight *\n compute_sample_weight(self.class_weight, y))\n\n super(RidgeClassifier, self).fit(X, Y, sample_weight=sample_weight)\n return self\n\n @property\n def classes_(self):\n return self._label_binarizer.classes_\n\n\nclass _RidgeGCV(LinearModel):\n \"\"\"Ridge regression with built-in Generalized Cross-Validation\n\n It allows efficient Leave-One-Out cross-validation.\n\n This class is not intended to be used directly. Use RidgeCV instead.\n\n Notes\n -----\n\n We want to solve (K + alpha*Id)c = y,\n where K = X X^T is the kernel matrix.\n\n Let G = (K + alpha*Id)^-1.\n\n Dual solution: c = Gy\n Primal solution: w = X^T c\n\n Compute eigendecomposition K = Q V Q^T.\n Then G = Q (V + alpha*Id)^-1 Q^T,\n where (V + alpha*Id) is diagonal.\n It is thus inexpensive to inverse for many alphas.\n\n Let loov be the vector of prediction values for each example\n when the model was fitted with all examples but this example.\n\n loov = (KGY - diag(KG)Y) / diag(I-KG)\n\n Let looe be the vector of prediction errors for each example\n when the model was fitted with all examples but this example.\n\n looe = y - loov = c / diag(G)\n\n References\n ----------\n http://cbcl.mit.edu/projects/cbcl/publications/ps/MIT-CSAIL-TR-2007-025.pdf\n http://www.mit.edu/~9.520/spring07/Classes/rlsslides.pdf\n \"\"\"\n\n def __init__(self, alphas=(0.1, 1.0, 10.0),\n fit_intercept=True, normalize=False,\n scoring=None, copy_X=True,\n gcv_mode=None, store_cv_values=False):\n self.alphas = np.asarray(alphas)\n self.fit_intercept = fit_intercept\n self.normalize = normalize\n self.scoring = scoring\n self.copy_X = copy_X\n self.gcv_mode = gcv_mode\n self.store_cv_values = store_cv_values\n\n def _pre_compute(self, X, y):\n # even if X is very sparse, K is usually very dense\n K = safe_sparse_dot(X, X.T, dense_output=True)\n v, Q = linalg.eigh(K)\n QT_y = np.dot(Q.T, y)\n return v, Q, QT_y\n\n def _decomp_diag(self, v_prime, Q):\n # compute diagonal of the matrix: dot(Q, dot(diag(v_prime), Q^T))\n return (v_prime * Q ** 2).sum(axis=-1)\n\n def _diag_dot(self, D, B):\n # compute dot(diag(D), B)\n if len(B.shape) > 1:\n # handle case where B is > 1-d\n D = D[(slice(None), ) + (np.newaxis, ) * (len(B.shape) - 1)]\n return D * B\n\n def _errors_and_values_helper(self, alpha, y, v, Q, QT_y):\n \"\"\"Helper function to avoid code duplication between self._errors and\n self._values.\n\n Notes\n -----\n We don't construct matrix G, instead compute action on y & diagonal.\n \"\"\"\n w = 1.0 / (v + alpha)\n c = np.dot(Q, self._diag_dot(w, QT_y))\n G_diag = self._decomp_diag(w, Q)\n # handle case where y is 2-d\n if len(y.shape) != 1:\n G_diag = G_diag[:, np.newaxis]\n return G_diag, c\n\n def _errors(self, alpha, y, v, Q, QT_y):\n G_diag, c = self._errors_and_values_helper(alpha, y, v, Q, QT_y)\n return (c / G_diag) ** 2, c\n\n def _values(self, alpha, y, v, Q, QT_y):\n G_diag, c = self._errors_and_values_helper(alpha, y, v, Q, QT_y)\n return y - (c / G_diag), c\n\n def _pre_compute_svd(self, X, y):\n if sparse.issparse(X):\n raise TypeError(\"SVD not supported for sparse matrices\")\n U, s, _ = linalg.svd(X, full_matrices=0)\n v = s ** 2\n UT_y = np.dot(U.T, y)\n return v, U, UT_y\n\n def _errors_and_values_svd_helper(self, alpha, y, v, U, UT_y):\n \"\"\"Helper function to avoid code duplication between self._errors_svd\n and self._values_svd.\n \"\"\"\n w = ((v + alpha) ** -1) - (alpha ** -1)\n c = np.dot(U, self._diag_dot(w, UT_y)) + (alpha ** -1) * y\n G_diag = self._decomp_diag(w, U) + (alpha ** -1)\n if len(y.shape) != 1:\n # handle case where y is 2-d\n G_diag = G_diag[:, np.newaxis]\n return G_diag, c\n\n def _errors_svd(self, alpha, y, v, U, UT_y):\n G_diag, c = self._errors_and_values_svd_helper(alpha, y, v, U, UT_y)\n return (c / G_diag) ** 2, c\n\n def _values_svd(self, alpha, y, v, U, UT_y):\n G_diag, c = self._errors_and_values_svd_helper(alpha, y, v, U, UT_y)\n return y - (c / G_diag), c\n\n def fit(self, X, y, sample_weight=None):\n \"\"\"Fit Ridge regression model\n\n Parameters\n ----------\n X : {array-like, sparse matrix}, shape = [n_samples, n_features]\n Training data\n\n y : array-like, shape = [n_samples] or [n_samples, n_targets]\n Target values\n\n sample_weight : float or array-like of shape [n_samples]\n Sample weight\n\n Returns\n -------\n self : Returns self.\n \"\"\"\n X, y = check_X_y(X, y, ['csr', 'csc', 'coo'], dtype=np.float64,\n multi_output=True, y_numeric=True)\n\n n_samples, n_features = X.shape\n\n X, y, X_offset, y_offset, X_scale = LinearModel._preprocess_data(\n X, y, self.fit_intercept, self.normalize, self.copy_X,\n sample_weight=sample_weight)\n\n gcv_mode = self.gcv_mode\n with_sw = len(np.shape(sample_weight))\n\n if gcv_mode is None or gcv_mode == 'auto':\n if sparse.issparse(X) or n_features > n_samples or with_sw:\n gcv_mode = 'eigen'\n else:\n gcv_mode = 'svd'\n elif gcv_mode == \"svd\" and with_sw:\n # FIXME non-uniform sample weights not yet supported\n warnings.warn(\"non-uniform sample weights unsupported for svd, \"\n \"forcing usage of eigen\")\n gcv_mode = 'eigen'\n\n if gcv_mode == 'eigen':\n _pre_compute = self._pre_compute\n _errors = self._errors\n _values = self._values\n elif gcv_mode == 'svd':\n # assert n_samples >= n_features\n _pre_compute = self._pre_compute_svd\n _errors = self._errors_svd\n _values = self._values_svd\n else:\n raise ValueError('bad gcv_mode \"%s\"' % gcv_mode)\n\n v, Q, QT_y = _pre_compute(X, y)\n n_y = 1 if len(y.shape) == 1 else y.shape[1]\n cv_values = np.zeros((n_samples * n_y, len(self.alphas)))\n C = []\n\n scorer = check_scoring(self, scoring=self.scoring, allow_none=True)\n error = scorer is None\n\n for i, alpha in enumerate(self.alphas):\n weighted_alpha = (sample_weight * alpha\n if sample_weight is not None\n else alpha)\n if error:\n out, c = _errors(weighted_alpha, y, v, Q, QT_y)\n else:\n out, c = _values(weighted_alpha, y, v, Q, QT_y)\n cv_values[:, i] = out.ravel()\n C.append(c)\n\n if error:\n best = cv_values.mean(axis=0).argmin()\n else:\n # The scorer want an object that will make the predictions but\n # they are already computed efficiently by _RidgeGCV. This\n # identity_estimator will just return them\n def identity_estimator():\n pass\n identity_estimator.decision_function = lambda y_predict: y_predict\n identity_estimator.predict = lambda y_predict: y_predict\n\n out = [scorer(identity_estimator, y.ravel(), cv_values[:, i])\n for i in range(len(self.alphas))]\n best = np.argmax(out)\n\n self.alpha_ = self.alphas[best]\n self.dual_coef_ = C[best]\n self.coef_ = safe_sparse_dot(self.dual_coef_.T, X)\n\n self._set_intercept(X_offset, y_offset, X_scale)\n\n if self.store_cv_values:\n if len(y.shape) == 1:\n cv_values_shape = n_samples, len(self.alphas)\n else:\n cv_values_shape = n_samples, n_y, len(self.alphas)\n self.cv_values_ = cv_values.reshape(cv_values_shape)\n\n return self\n\n\nclass _BaseRidgeCV(LinearModel):\n def __init__(self, alphas=(0.1, 1.0, 10.0),\n fit_intercept=True, normalize=False, scoring=None,\n cv=None, gcv_mode=None,\n store_cv_values=False):\n self.alphas = alphas\n self.fit_intercept = fit_intercept\n self.normalize = normalize\n self.scoring = scoring\n self.cv = cv\n self.gcv_mode = gcv_mode\n self.store_cv_values = store_cv_values\n\n def fit(self, X, y, sample_weight=None):\n \"\"\"Fit Ridge regression model\n\n Parameters\n ----------\n X : array-like, shape = [n_samples, n_features]\n Training data\n\n y : array-like, shape = [n_samples] or [n_samples, n_targets]\n Target values\n\n sample_weight : float or array-like of shape [n_samples]\n Sample weight\n\n Returns\n -------\n self : Returns self.\n \"\"\"\n if self.cv is None:\n estimator = _RidgeGCV(self.alphas,\n fit_intercept=self.fit_intercept,\n normalize=self.normalize,\n scoring=self.scoring,\n gcv_mode=self.gcv_mode,\n store_cv_values=self.store_cv_values)\n estimator.fit(X, y, sample_weight=sample_weight)\n self.alpha_ = estimator.alpha_\n if self.store_cv_values:\n self.cv_values_ = estimator.cv_values_\n else:\n if self.store_cv_values:\n raise ValueError(\"cv!=None and store_cv_values=True \"\n \" are incompatible\")\n parameters = {'alpha': self.alphas}\n fit_params = {'sample_weight': sample_weight}\n gs = GridSearchCV(Ridge(fit_intercept=self.fit_intercept),\n parameters, fit_params=fit_params, cv=self.cv)\n gs.fit(X, y)\n estimator = gs.best_estimator_\n self.alpha_ = gs.best_estimator_.alpha\n\n self.coef_ = estimator.coef_\n self.intercept_ = estimator.intercept_\n\n return self\n\n\nclass RidgeCV(_BaseRidgeCV, RegressorMixin):\n \"\"\"Ridge regression with built-in cross-validation.\n\n By default, it performs Generalized Cross-Validation, which is a form of\n efficient Leave-One-Out cross-validation.\n\n Read more in the :ref:`User Guide <ridge_regression>`.\n\n Parameters\n ----------\n alphas : numpy array of shape [n_alphas]\n Array of alpha values to try.\n Regularization strength; must be a positive float. Regularization\n improves the conditioning of the problem and reduces the variance of\n the estimates. Larger values specify stronger regularization.\n Alpha corresponds to ``C^-1`` in other linear models such as \n LogisticRegression or LinearSVC. \n\n fit_intercept : boolean\n Whether to calculate the intercept for this model. If set\n to false, no intercept will be used in calculations\n (e.g. data is expected to be already centered).\n\n normalize : boolean, optional, default False\n If True, the regressors X will be normalized before regression.\n This parameter is ignored when `fit_intercept` is set to False.\n When the regressors are normalized, note that this makes the\n hyperparameters learnt more robust and almost independent of the number\n of samples. The same property is not valid for standardized data.\n However, if you wish to standardize, please use\n `preprocessing.StandardScaler` before calling `fit` on an estimator\n with `normalize=False`.\n\n scoring : string, callable or None, optional, default: None\n A string (see model evaluation documentation) or\n a scorer callable object / function with signature\n ``scorer(estimator, X, y)``.\n\n cv : int, cross-validation generator or an iterable, optional\n Determines the cross-validation splitting strategy.\n Possible inputs for cv are:\n\n - None, to use the efficient Leave-One-Out cross-validation\n - integer, to specify the number of folds.\n - An object to be used as a cross-validation generator.\n - An iterable yielding train/test splits.\n\n For integer/None inputs, if ``y`` is binary or multiclass,\n :class:`sklearn.model_selection.StratifiedKFold` is used, else, \n :class:`sklearn.model_selection.KFold` is used.\n\n Refer :ref:`User Guide <cross_validation>` for the various\n cross-validation strategies that can be used here.\n\n gcv_mode : {None, 'auto', 'svd', eigen'}, optional\n Flag indicating which strategy to use when performing\n Generalized Cross-Validation. Options are::\n\n 'auto' : use svd if n_samples > n_features or when X is a sparse\n matrix, otherwise use eigen\n 'svd' : force computation via singular value decomposition of X\n (does not work for sparse matrices)\n 'eigen' : force computation via eigendecomposition of X^T X\n\n The 'auto' mode is the default and is intended to pick the cheaper\n option of the two depending upon the shape and format of the training\n data.\n\n store_cv_values : boolean, default=False\n Flag indicating if the cross-validation values corresponding to\n each alpha should be stored in the `cv_values_` attribute (see\n below). This flag is only compatible with `cv=None` (i.e. using\n Generalized Cross-Validation).\n\n Attributes\n ----------\n cv_values_ : array, shape = [n_samples, n_alphas] or \\\n shape = [n_samples, n_targets, n_alphas], optional\n Cross-validation values for each alpha (if `store_cv_values=True` and \\\n `cv=None`). After `fit()` has been called, this attribute will \\\n contain the mean squared errors (by default) or the values of the \\\n `{loss,score}_func` function (if provided in the constructor).\n\n coef_ : array, shape = [n_features] or [n_targets, n_features]\n Weight vector(s).\n\n intercept_ : float | array, shape = (n_targets,)\n Independent term in decision function. Set to 0.0 if\n ``fit_intercept = False``.\n\n alpha_ : float\n Estimated regularization parameter.\n\n See also\n --------\n Ridge: Ridge regression\n RidgeClassifier: Ridge classifier\n RidgeClassifierCV: Ridge classifier with built-in cross validation\n \"\"\"\n pass\n\n\nclass RidgeClassifierCV(LinearClassifierMixin, _BaseRidgeCV):\n \"\"\"Ridge classifier with built-in cross-validation.\n\n By default, it performs Generalized Cross-Validation, which is a form of\n efficient Leave-One-Out cross-validation. Currently, only the n_features >\n n_samples case is handled efficiently.\n\n Read more in the :ref:`User Guide <ridge_regression>`.\n\n Parameters\n ----------\n alphas : numpy array of shape [n_alphas]\n Array of alpha values to try.\n Regularization strength; must be a positive float. Regularization\n improves the conditioning of the problem and reduces the variance of\n the estimates. Larger values specify stronger regularization.\n Alpha corresponds to ``C^-1`` in other linear models such as \n LogisticRegression or LinearSVC. \n\n fit_intercept : boolean\n Whether to calculate the intercept for this model. If set\n to false, no intercept will be used in calculations\n (e.g. data is expected to be already centered).\n\n normalize : boolean, optional, default False\n If True, the regressors X will be normalized before regression.\n This parameter is ignored when `fit_intercept` is set to False.\n When the regressors are normalized, note that this makes the\n hyperparameters learnt more robust and almost independent of the number\n of samples. The same property is not valid for standardized data.\n However, if you wish to standardize, please use\n `preprocessing.StandardScaler` before calling `fit` on an estimator\n with `normalize=False`.\n\n scoring : string, callable or None, optional, default: None\n A string (see model evaluation documentation) or\n a scorer callable object / function with signature\n ``scorer(estimator, X, y)``.\n\n cv : int, cross-validation generator or an iterable, optional\n Determines the cross-validation splitting strategy.\n Possible inputs for cv are:\n\n - None, to use the efficient Leave-One-Out cross-validation\n - integer, to specify the number of folds.\n - An object to be used as a cross-validation generator.\n - An iterable yielding train/test splits.\n\n Refer :ref:`User Guide <cross_validation>` for the various\n cross-validation strategies that can be used here.\n\n class_weight : dict or 'balanced', optional\n Weights associated with classes in the form ``{class_label: weight}``.\n If not given, all classes are supposed to have weight one.\n\n The \"balanced\" mode uses the values of y to automatically adjust\n weights inversely proportional to class frequencies in the input data\n as ``n_samples / (n_classes * np.bincount(y))``\n\n Attributes\n ----------\n cv_values_ : array, shape = [n_samples, n_alphas] or \\\n shape = [n_samples, n_responses, n_alphas], optional\n Cross-validation values for each alpha (if `store_cv_values=True` and\n `cv=None`). After `fit()` has been called, this attribute will contain \\\n the mean squared errors (by default) or the values of the \\\n `{loss,score}_func` function (if provided in the constructor).\n\n coef_ : array, shape = [n_features] or [n_targets, n_features]\n Weight vector(s).\n\n intercept_ : float | array, shape = (n_targets,)\n Independent term in decision function. Set to 0.0 if\n ``fit_intercept = False``.\n\n alpha_ : float\n Estimated regularization parameter\n\n See also\n --------\n Ridge: Ridge regression\n RidgeClassifier: Ridge classifier\n RidgeCV: Ridge regression with built-in cross validation\n\n Notes\n -----\n For multi-class classification, n_class classifiers are trained in\n a one-versus-all approach. Concretely, this is implemented by taking\n advantage of the multi-variate response support in Ridge.\n \"\"\"\n def __init__(self, alphas=(0.1, 1.0, 10.0), fit_intercept=True,\n normalize=False, scoring=None, cv=None, class_weight=None):\n super(RidgeClassifierCV, self).__init__(\n alphas=alphas, fit_intercept=fit_intercept, normalize=normalize,\n scoring=scoring, cv=cv)\n self.class_weight = class_weight\n\n def fit(self, X, y, sample_weight=None):\n \"\"\"Fit the ridge classifier.\n\n Parameters\n ----------\n X : array-like, shape (n_samples, n_features)\n Training vectors, where n_samples is the number of samples\n and n_features is the number of features.\n\n y : array-like, shape (n_samples,)\n Target values.\n\n sample_weight : float or numpy array of shape (n_samples,)\n Sample weight.\n\n Returns\n -------\n self : object\n Returns self.\n \"\"\"\n self._label_binarizer = LabelBinarizer(pos_label=1, neg_label=-1)\n Y = self._label_binarizer.fit_transform(y)\n if not self._label_binarizer.y_type_.startswith('multilabel'):\n y = column_or_1d(y, warn=True)\n\n if self.class_weight:\n if sample_weight is None:\n sample_weight = 1.\n # modify the sample weights with the corresponding class weight\n sample_weight = (sample_weight *\n compute_sample_weight(self.class_weight, y))\n\n _BaseRidgeCV.fit(self, X, Y, sample_weight=sample_weight)\n return self\n\n @property\n def classes_(self):\n return self._label_binarizer.classes_\n"
] | [
[
"numpy.ones",
"numpy.sum",
"scipy.linalg.lstsq",
"numpy.reciprocal",
"numpy.vstack",
"scipy.sparse.dia_matrix",
"scipy.sparse.csc_matrix",
"numpy.average",
"numpy.zeros",
"numpy.dot",
"numpy.empty",
"scipy.sparse.issparse",
"scipy.sparse.csr_matrix",
"numpy.exp",
"numpy.atleast_1d",
"numpy.ravel",
"numpy.iinfo",
"scipy.sparse.isspmatrix",
"numpy.sqrt",
"numpy.find_common_type"
],
[
"scipy.linalg.lstsq",
"numpy.asarray",
"scipy.sparse.linalg.LinearOperator",
"scipy.sparse.linalg.aslinearoperator",
"scipy.linalg.eigh",
"numpy.zeros",
"scipy.linalg.solve",
"numpy.repeat",
"numpy.argmax",
"scipy.sparse.linalg.lsqr",
"scipy.sparse.linalg.cg",
"numpy.empty",
"scipy.sparse.issparse",
"numpy.atleast_1d",
"numpy.shape",
"numpy.sqrt",
"numpy.dot",
"numpy.outer",
"scipy.linalg.svd"
]
] |
StephenLouis/ISIC_2019 | [
"340ece42915c770e68bc13da64698a7a8987420e"
] | [
"Data_Loader.py"
] | [
"import os\nimport torch\nimport csv\nimport numpy as np\nfrom torch.utils.data import Dataset\nfrom PIL import Image\n\ndef split_csv(file):\n data = []\n a_train_file = r'/home/huangyinyue/ISIC_2019/train.csv'\n a_test_file = r'/home/huangyinyue/ISIC_2019/test.csv'\n\n seed = 3\n np.random.seed(seed)\n train_indices = np.random.choice(25331, 20265, replace=False) # 设置随机数生成从0-150中随机挑选120个随机数\n test_indices = np.array(list(set(range(25331)) - set(train_indices)))\n # test_indices = np.random.choice(len(residue), 30, replace=False) # 如果训练集和测试集综合的数据加起来就是一整个数据集则不需要这个操作\n\n with open(file)as afile:\n a_reader = csv.reader(afile) # 从原始数据集中将所有数据读取出来并保存到a_reader中\n labels = next(a_reader) # 提取第一行设置为labels\n for row in a_reader: # 将a_reader中每一行的数据提取出来并保存到data的列表中\n data.append(row)\n\n # 生成训练数据集\n if not os.path.exists(a_train_file):\n with open(a_train_file, \"w\", newline='') as a_trian:\n writer = csv.writer(a_trian)\n writer.writerows([labels]) # 第一行为标签行\n writer.writerows(np.array(data)[train_indices])\n a_trian.close()\n\n # 生成测试数据集\n if not os.path.exists(a_test_file):\n with open(a_test_file, \"w\", newline='')as a_test:\n writer = csv.writer(a_test)\n writer.writerows([labels]) # 第一行为标签行\n writer.writerows(np.array(data)[test_indices])\n a_test.close()\n\n\ndef read_labels_csv(file,header=True):\n images = []\n num_categories = 0\n with open(file, 'r') as f:\n reader = csv.reader(f)\n rownum = 0\n for row in reader:\n if header and rownum == 0:\n header = row\n else:\n if num_categories == 0:\n num_categories = len(row) - 1\n name = row[0]\n labels = (np.asarray(row[1:num_categories + 1])).astype(np.float32)\n labels = torch.from_numpy(labels)\n item = (name, labels)\n images.append(item)\n rownum += 1\n return images\n\nclass ISICDataset(Dataset):\n def __init__(self,csv_file,image_path,transform=None):\n self.images = read_labels_csv(csv_file)\n self.root_dir = image_path\n self.transform = transform\n\n def __len__(self):\n return len(self.images)\n\n def __getitem__(self, index):\n image_name,target = self.images[index]\n # print(os.path.join(self.root_dir,image_name+'.jpg'))\n image = Image.open(os.path.join(self.root_dir,image_name+'.jpg')).convert('RGB')\n if self.transform is not None:\n image = self.transform(image)\n\n return image,target\n\n\nif __name__ == '__main__':\n split_csv(file=r\"/home/huangyinyue/ISIC_2019/ISIC_2019_Training_GroundTruth.csv\")\n"
] | [
[
"numpy.random.seed",
"numpy.asarray",
"numpy.random.choice",
"torch.from_numpy",
"numpy.array"
]
] |
daniyaljamal/Personality-prediction-based-on-video-using-CNN | [
"0f1052d09fe14c73e38ac529ad35e4e98a8d859e"
] | [
"data preprocessing/MTCNN2.py"
] | [
"# extract and plot each detected face in a photograph\nfrom facenet_pytorch import MTCNN\nfrom cv2 import cv2\nfrom PIL import Image\nimport numpy as np\nfrom matplotlib import pyplot as plt\nfrom tqdm.notebook import tqdm\nimport os\nimport tensorflow as tf\nfrom torchvision import models\nimport torch\nfrom torchvision import transforms\nfrom pathlib import Path\n\ndef getface_from_video(path):\n \n # Create face detector\n mtcnn = MTCNN(margin=20, post_process=False)\n\n # Load a video\n v_cap = cv2.VideoCapture(path)\n v_len = int(v_cap.get(cv2.CAP_PROP_FRAME_COUNT))\n\n # Loop through video, taking a handful of frames to form a batch\n frames = []\n for i in tqdm(range(v_len)):\n \n # Load frame\n success = v_cap.grab()\n if i % 50 == 0:\n success, frame = v_cap.retrieve()\n else:\n continue\n if not success:\n continue\n \n # Add to batch\n frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n frames.append(Image.fromarray(frame))\n\n # Detect faces in batch\n try:\n faces = mtcnn(frames)\n for i in range(len(faces)):\n plt.imshow(faces[i].permute(1, 2, 0).int().numpy())\n plt.axis('off')\n #plt.show()\n except:\n print(\"Error in detection\")\n return plt\n\ndir(models)\nos.environ[\"KMP_DUPLICATE_LIB_OK\"]=\"TRUE\"\n\nTRAIN_DIR = \"E:\\\\F Y P\\\\Personality prediction using image processing\\\\datasets\\\\First Impressions V2 (CVPR'17)\\\\train\\\\Extracted\\\\train videos\\\\\"\nTEST_DIR = \"E:\\\\F Y P\\\\Personality prediction using image processing\\\\datasets\\\\First Impressions V2 (CVPR'17)\\\\test\\\\Extracted\\\\test videos\\\\\"\nVAL_DIR = \"E:\\\\F Y P\\\\Personality prediction using image processing\\\\datasets\\\\First Impressions V2 (CVPR'17)\\\\validate\\\\Extracted\\\\validation videos\\\\\"\nPIC_TRAIN_DIR = \"E:\\\\F Y P\\\\Personality prediction using image processing\\\\datasets\\\\First Impressions V2 (CVPR'17)\\\\train\\\\Extracted\\\\face\\\\\"\nPIC_TEST_DIR = \"E:\\\\F Y P\\\\Personality prediction using image processing\\\\datasets\\\\First Impressions V2 (CVPR'17)\\\\test\\\\Extracted\\\\face\\\\\"\nPIC_VAL_DIR = \"E:\\\\F Y P\\\\Personality prediction using image processing\\\\datasets\\\\First Impressions V2 (CVPR'17)\\\\validate\\\\Extracted\\\\face\\\\\"\n\ntrain_videos = [TRAIN_DIR+i for i in os.listdir(TRAIN_DIR)]\ntest_videos = [TEST_DIR+i for i in os.listdir(TEST_DIR)]\nval_videos = [VAL_DIR+i for i in os.listdir(VAL_DIR)]\n\ni=0\nwhile (i<len(val_videos)):\n #print(train_videos[i])\n fig = getface_from_video(val_videos[i])\n fig.savefig(os.path.splitext(PIC_VAL_DIR+Path(val_videos[i]).name)[0] +\".jpg\", bbox_inches='tight')\n i+=1\n"
] | [
[
"matplotlib.pyplot.axis"
]
] |
Igor-ID/Image-Compression | [
"e54881b62f258260baa7036cdd3b264b0d8adf05"
] | [
"wavelet_compress.py"
] | [
"import pywt\nimport matplotlib.pyplot as plt\nfrom matplotlib.image import imread\nimport numpy as np\n\n\"\"\"Image compression using discrete Wavelet transform.\"\"\"\n\nplt.rcParams['figure.figsize'] = [8, 8]\nplt.rcParams.update({'font.size': 18})\n\nim = imread('data/dog.jpg')\nim_gray = np.mean(im, -1) # convert RGB to gray scale\n\n# Wavelet Compression\nn = 4\n# Use Daubechies 1 wavelet family.\nw = 'db1'\ncoeffs = pywt.wavedec2(im_gray, wavelet=w, level=n)\ncoeff_arr, coeff_slices = pywt.coeffs_to_array(coeffs)\nCsort = np.sort(np.abs(coeff_arr.reshape(-1)))\n\nfor keep in (0.1, 0.05, 0.01, 0.005):\n thresh = Csort[int(np.floor((1 - keep) * len(Csort)))]\n ind = np.abs(coeff_arr) > thresh\n Cfilt = coeff_arr * ind # Threshold small indices\n\n coeffs_filt = pywt.array_to_coeffs(Cfilt, coeff_slices, output_format='wavedec2')\n\n # Plot reconstruction\n Arecon = pywt.waverec2(coeffs_filt, wavelet=w)\n plt.figure()\n plt.imshow(Arecon.astype('uint8'), cmap='gray')\n plt.axis('off')\n plt.title('keep = ' + str(keep))\n\nplt.show()\n# Conclusion. As we can see, image compression works batter when we using Wavelets in compare with FFT\n"
] | [
[
"matplotlib.pyplot.figure",
"matplotlib.pyplot.axis",
"numpy.abs",
"matplotlib.pyplot.rcParams.update",
"matplotlib.pyplot.show",
"matplotlib.image.imread",
"numpy.mean"
]
] |
drigols/Studies | [
"9c293156935b491ded24be6b511daac67fd43538"
] | [
"modules/ai-codes/modules/knn/src/iris-v1.py"
] | [
"########################################################\n# Rodrigo Leite - drigols #\n# Last update: 31/10/2021 #\n########################################################\n\nfrom sklearn.datasets import load_iris\nimport pandas as pd\n\niris = load_iris()\n\nx = pd.DataFrame(iris.data, columns=[iris.feature_names])\ny = pd.Series(iris.target)\n\nprint(\"Load Iris dataset dimensions: {0}\".format(x.shape))\nprint(\"Load Iris dataset features:\\n\", x.head(10))\n\n"
] | [
[
"pandas.Series",
"pandas.DataFrame",
"sklearn.datasets.load_iris"
]
] |
tableClothed/face-filters | [
"8b236643b4e22a925df6a1c299f3887fdedb3e8e"
] | [
"flask/camera.py"
] | [
"import cv2\nimport numpy as np\nimport dlib\nfrom imutils import face_utils, translate\n\nclass Camera(object):\n\tdef __init__(self):\n\t\tself.camera = cv2.VideoCapture(0)\n\n\t\tp = \"../data/shape_predictor_68_face_landmarks.dat\"\n\t\tself.detector = dlib.get_frontal_face_detector()\n\t\tself.predictor = dlib.shape_predictor(p)\n\t\tself.effect = \"contours\"\n\n\n\tdef __del__(self):\n\t\tself.camera.release()\n\n\n\tdef return_jpg(self, frame):\n\t\tret, jpeg = cv2.imencode('.jpeg', frame)\n\t\treturn jpeg.tobytes()\n\n\n\n\tdef return_effect(self):\n\t\tif self.effect == \"contours\":\n\t\t\tframe = self.effect_canny()\n\n\t\telif self.effect == \"baby\":\n\t\t\tframe = self.effect_baby_face()\n\n\t\telif self.effect == \"blurr\":\n\t\t\tframe = self.effect_bluring_face()\n\n\t\telif self.effect == \"cartoon\":\n\t\t\tframe = self.effect_cartoon()\n\n\t\telif self.effect == \"doggy\":\t\n\t\t\tframe = self.effect_dog_face()\n\n\t\telif self.effect == \"large\":\t\n\t\t\tframe = self.effect_enlarged()\n\n\t\telif self.effect == \"mirrors\":\t\n\t\t\tframe = self.effect_mirror()\n\n\t\telif self.effect == \"triangle\":\t\n\t\t\tframe = self.effect_delaunay_triangle()\n\n\t\telif self.effect == \"glasses\":\t\n\t\t\tframe = self.effect_glasses()\n\n\t\treturn frame\n\n\n\n\t# ---------------\n\t# BABY FACE\n\t# ---------------\n\tdef effect_baby_face(self):\n\t\tret, frame = self.camera.read()\n\t\tif not ret:\n\t\t\treturn False\n\t\toffset = 4\n\t\tscale = 1.3\n\n\t\tframe_2 = frame.copy()\n\t\tmask = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n\t\tmask = np.zeros(frame.shape, frame.dtype)\n\n\t\teye_mask = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n\t\teye_mask = np.zeros(frame.shape, frame.dtype)\n\n\t\tgray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n\t\trects = self.detector(gray, 0)\n\n\t\tfor rect in rects:\n\t\t\tshape = self.predictor(gray, rect)\n\t\t\tshape = face_utils.shape_to_np(shape)\n\n\t\t\tl_eye, r_eye = shape[36:42], shape[42:48]\n\n\t\t\t(lx, ly, lw, lh) = cv2.boundingRect(l_eye)\n\t\t\t(rx, ry, rw, rh) = cv2.boundingRect(r_eye)\n\n\t\t\tl_eye = frame[ly-offset:ly+lh+offset, lx-offset:lx+lw+offset]\n\t\t\tr_eye = frame[ry-offset:ry+rh+offset, rx-offset:rx+rw+offset]\n\n\t\t\tcenter_ly = lx + int(lw / 2)\n\t\t\tcenter_lx = ly + int(lh / 2) + 20\n\t\t\tcenter_ry = rx + int(rw / 2)\n\t\t\tcenter_rx = ry + int(rh / 2) + 20\n\n\t\t\tmouth = shape[48:69]\n\n\t\t\t(mx, my, mw, mh) = cv2.boundingRect(mouth)\n\t\t\tmouth = frame[my-offset:my+mh+offset, mx-offset:mx+mw+offset]\n\n\t\t\tcenter_my = mx + int(mw / 2)\n\t\t\tcenter_mx = my + int(mh / 2)\n\n\t\t\tly_scaled = int((l_eye.shape[1]*scale)/2)\n\t\t\tlx_scaled = int((l_eye.shape[0]*scale)/2)\n\t\t\try_scaled = int((r_eye.shape[1]*scale)/2)\n\t\t\trx_scaled = int((r_eye.shape[0]*scale)/2)\n\n\t\t\tl_eye = cv2.resize(l_eye, (ly_scaled*2, lx_scaled*2), interpolation = cv2.INTER_AREA)\n\t\t\tr_eye = cv2.resize(r_eye, (ry_scaled*2, rx_scaled*2), interpolation = cv2.INTER_AREA)\n\n\t\t\tframe[center_lx-lx_scaled:center_lx+lx_scaled, center_ly-ly_scaled:center_ly+ly_scaled] = l_eye\n\t\t\tmask[center_lx-lx_scaled:center_lx+lx_scaled, center_ly-ly_scaled:center_ly+ly_scaled] = 255\n\t\t\tframe[center_rx-rx_scaled:center_rx+rx_scaled, center_ry-ry_scaled:center_ry+ry_scaled] = r_eye\n\t\t\tmask[center_rx-rx_scaled:center_rx+rx_scaled, center_ry-ry_scaled:center_ry+ry_scaled] = 255\n\n\t\t\tfinal_center_x = int(np.mean([center_lx, center_rx]))\n\t\t\tfinal_center_y = int(np.mean([center_ly, center_ry]))\n\n\t\t\tframe = cv2.seamlessClone(frame, frame_2, mask, (final_center_y, final_center_x), cv2.NORMAL_CLONE)\n\n\t\treturn self.return_jpg(frame)\n\n\n\t# ------------------\n\t# ENLARGED EYES\n\t# ------------------\n\tdef effect_enlarged(self):\n\t\toffset = 4\n\t\tscale = 2\n\t\tret, frame = self.camera.read()\n\t\tif not ret:\n\t\t\treturn False\n\t\tframe_2 = frame.copy()\n\n\t\tmask = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n\t\tmask = np.zeros(frame.shape, frame.dtype)\n\n\t\tl_eye, r_eye = 0, 0\n\n\t\tgray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n\t\trects = self.detector(gray, 0)\n\n\t\tfor rect in rects:\n\t\t\tshape = self.predictor(gray, rect)\n\t\t\tshape = face_utils.shape_to_np(shape)\n\n\t\t\tl_eye, r_eye = shape[36:42], shape[42:48]\n\n\t\t\t(lx, ly, lw, lh) = cv2.boundingRect(l_eye)\n\t\t\t(rx, ry, rw, rh) = cv2.boundingRect(r_eye)\n\n\t\t\tl_eye = frame[ly-offset:ly+lh+offset, lx-offset:lx+lw+offset]\n\t\t\tr_eye = frame[ry-offset:ry+rh+offset, rx-offset:rx+rw+offset]\n\n\t\t\tcenter_ly = lx + int(lw / 2)\n\t\t\tcenter_lx = ly + int(lh / 2) + 20\n\t\t\tcenter_ry = rx + int(rw / 2)\n\t\t\tcenter_rx = ry + int(rh / 2) + 20\n\n\t\t\tmouth = shape[48:69]\n\n\t\t\t(mx, my, mw, mh) = cv2.boundingRect(mouth)\n\t\t\tmouth = frame[my-offset:my+mh+offset, mx-offset:mx+mw+offset]\n\n\t\t\tcenter_my = mx + int(mw / 2)\n\t\t\tcenter_mx = my + int(mh / 2)\n\n\t\t\tly_scaled = int((l_eye.shape[1]*1.7)/2)\n\t\t\tlx_scaled = int((l_eye.shape[0]*1.7)/2)\n\t\t\try_scaled = int((r_eye.shape[1]*1.7)/2)\n\t\t\trx_scaled = int((r_eye.shape[0]*1.7)/2)\n\n\t\t\tl_eye = cv2.resize(l_eye, (ly_scaled*2, lx_scaled*2), interpolation = cv2.INTER_AREA)\n\t\t\tr_eye = cv2.resize(r_eye, (ry_scaled*2, rx_scaled*2), interpolation = cv2.INTER_AREA)\n\n\t\t\tmy_scaled = int((mouth.shape[1]*scale)/2)\n\t\t\tmx_scaled = int((mouth.shape[0]*scale)/2)\n\n\t\t\tmouth = cv2.resize(mouth, (my_scaled*2, mx_scaled*2), interpolation = cv2.INTER_AREA)\n\n\t\t\tframe[center_mx-mx_scaled:center_mx+mx_scaled, center_my-my_scaled:center_my+my_scaled] = mouth\n\t\t\tmask[center_mx-mx_scaled:center_mx+mx_scaled, center_my-my_scaled:center_my+my_scaled] = 255\n\n\t\t\tframe[center_lx-lx_scaled:center_lx+lx_scaled, center_ly-ly_scaled:center_ly+ly_scaled] = l_eye\n\t\t\tmask[center_lx-lx_scaled:center_lx+lx_scaled, center_ly-ly_scaled:center_ly+ly_scaled] = 255\n\t\t\tframe[center_rx-rx_scaled:center_rx+rx_scaled, center_ry-ry_scaled:center_ry+ry_scaled] = r_eye\n\t\t\tmask[center_rx-rx_scaled:center_rx+rx_scaled, center_ry-ry_scaled:center_ry+ry_scaled] = 255\n\n\t\t\tfinal_center_x = int(np.mean([center_lx, center_mx, center_rx]))\n\t\t\tfinal_center_y = int(np.mean([center_ly, center_my, center_ry]))\n\n\t\t\tframe = cv2.seamlessClone(frame, frame_2, mask, (final_center_y, final_center_x), cv2.NORMAL_CLONE)\n\n\t\treturn self.return_jpg(frame)\n\n\t\n\t# ------------------\n\t# BLURRING FACE\n\t# ------------------\n\tdef effect_bluring_face(self):\n\t\tret, frame = self.camera.read()\n\t\tif not ret:\n\t\t\treturn False\n\t\tface = 0\n\n\t\tgray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n\t\trects = self.detector(gray, 0)\n\n\t\tfor rect in rects:\n\t\t\tshape = self.predictor(gray, rect)\n\t\t\tshape = face_utils.shape_to_np(shape)\n\n\t\t\t(x, y, w, h) = face_utils.rect_to_bb(rect)\n\n\t\t\tface = frame[y:y+h, x:x+w]\n\t\t\tface = blurr_face(face)\n\t\t\tface = pixel_face(face)\n\n\t\t\tframe[y:y+h, x:x+w] = face\n\t\t\n\t\treturn self.return_jpg(frame)\n\n\n\t# ------------------------\n\t# DELAUNAY TRIANGLE\n\t# ------------------------\n\tdef effect_delaunay_triangle(self):\n\t\tret, frame = self.camera.read()\n\t\tif not ret:\n\t\t\treturn False\n\n\t\tjaw = [0, 17]\n\t\tr_eyebrow, l_eyebrow = [18, 22], [23, 27]\n\t\tnose = [28, 36]\n\t\tr_eye, l_eye = [37, 42], [43, 48]\n\t\tmouth = [49, 68]\n\n\t\tgray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n\t\tmask = np.zeros_like(gray)\n\n\t\tfaces = self.detector(gray, 0)\n\t\tfor face in faces:\n\t\t landmark = self.predictor(gray, face)\n\t\t landmark_points = []\n\t\t for n in range(68):\n\t\t x = landmark.part(n).x\n\t\t y = landmark.part(n).y\n\t\t landmark_points.append((x, y))\n\n\t\t points = np.array(landmark_points, np.int32)\n\t\t convexhull = cv2.convexHull(points)\n\n\t\t cv2.fillConvexPoly(mask, convexhull, 255)\n\n\t\t face = cv2.bitwise_and(frame, frame, mask=mask)\n\n\t\t gray = delaunay_traingle(convexhull, landmark_points, gray, landmark_points)\n\n\t\treturn self.return_jpg(gray)\n\n\n\t# --------------\n\t# DOG FACE\n\t# --------------\n\tdef effect_dog_face(self):\n\t\tret, frame = self.camera.read()\n\t\tif not ret:\n\t\t\treturn False\n\t\tdog_nose = cv2.imread(\"../images/nose.png\", -1)\n\t\tdog_ears = cv2.imread(\"../images/ears.png\", -1)\n\n\t\tgray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n\t\trects = self.detector(gray, 0)\n\n\t\tfor rect in rects:\n\t\t\tshape = self.predictor(gray, rect)\n\t\t\tshape = face_utils.shape_to_np(shape)\n\n\t\t\tears_width = int(abs(shape[0][0] - shape[16][0]) * 1.5)\n\t\t\tears_height = int(ears_width * 0.4)\n\n\t\t\tears_x = int((shape[22][0] + shape[23][0])/2)\n\t\t\tears_y = shape[20][1] - 50\n\n\t\t\thalf_width = int(ears_width/2.0)\n\t\t\thalf_height = int(ears_height/2.0)\n\n\t\t\ty1, y2 = ears_y - half_height, ears_y + half_height\n\t\t\tx1, x2 = ears_x - half_width, ears_x + half_width\n\n\t\t\tdog_ears = cv2.resize(dog_ears, (half_width*2, half_height*2), interpolation = cv2.INTER_AREA)\n\n\t\t\talpha_s = dog_ears[:, :, 3] / 255.0\n\t\t\talpha_l = 1.0 - alpha_s\n\n\t\t\tfor c in range(0, 3):\n\t\t\t frame[y1:y2, x1:x2, c] = (alpha_s * dog_ears[:, :, c] + \n\t\t\t alpha_l * frame[y1:y2, x1:x2, c])\n\n\t\t\tnose_width = int(abs(shape[36][0] - shape[32][0]) * 1.7)\n\t\t\tnose_height = int(nose_width * 0.7)\n\n\t\t\t(nose_x, nose_y) = shape[30]\n\n\t\t\thalf_width = int(nose_width/2.0)\n\t\t\thalf_height = int(nose_height/2.0)\n\n\t\t\ty1, y2 = nose_y - half_height, nose_y + half_height\n\t\t\tx1, x2 = nose_x - half_width, nose_x + half_width\n\n\t\t\tdog_nose = cv2.resize(dog_nose, (half_width*2, half_height*2), interpolation = cv2.INTER_AREA)\n\n\t\t\talpha_s = dog_nose[:, :, 3] / 255.0\n\t\t\talpha_l = 1.0 - alpha_s\n\n\t\t\tfor c in range(0, 3):\n\t\t\t\tframe[y1:y2, x1:x2, c] = (alpha_s * dog_nose[:, :, c] + \n\t\t\t alpha_l * frame[y1:y2, x1:x2, c])\n\n\t\treturn self.return_jpg(frame)\n\n\n\t# -----------------\n\t# FUNNY GLASSES\n\t# -----------------\n\tdef effect_glasses(self):\n\t\tret, frame = self.camera.read()\n\t\tif not ret:\n\t\t\treturn False\n\t\tglasses = cv2.imread(\"../images/glasses.png\", -1)\n\n\t\tgray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n\t\trects = self.detector(gray, 0)\n\n\t\tfor rect in rects:\n\t\t\tshape = self.predictor(gray, rect)\n\t\t\tshape = face_utils.shape_to_np(shape)\n\n\t\tglasses_width = int(abs(shape[36][0] - shape[32][0]) * 4)\n\t\tglasses_height = int(glasses_width * 0.7)\n\n\t\t(glasses_x, glasses_y) = shape[30]\n\t\tglasses_y -= 20\n\n\t\thalf_width = int(glasses_width/2.0)\n\t\thalf_height = int(glasses_height/2.0)\n\n\t\ty1, y2 = glasses_y - half_height, glasses_y + half_height\n\t\tx1, x2 = glasses_x - half_width, glasses_x + half_width\n\n\t\tglasses = cv2.resize(glasses, (half_width*2, half_height*2), interpolation = cv2.INTER_AREA)\n\n\t\talpha_s = glasses[:, :, 3] / 255.0\n\t\talpha_l = 1.0 - alpha_s\n\n\t\tfor c in range(0, 3):\n\t\t\tframe[y1:y2, x1:x2, c] = (alpha_s * glasses[:, :, c] + \n\t\t alpha_l * frame[y1:y2, x1:x2, c])\n\n\n\t\treturn self.return_jpg(frame)\n\n\n\t# ----------------------\n\t# CARTOON-ISH\n\t# ----------------------\n\tdef effect_cartoon(self):\n\t\tret, frame = self.camera.read()\n\n\t\tgray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n\t\tgray = cv2.medianBlur(gray, 5)\n\t\tedges = cv2.adaptiveThreshold(gray, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, 9, 6)\n\n\t\tcolor = cv2.bilateralFilter(frame, 9, 150, 0.25)\n\t\tcartoon = cv2.bitwise_and(color, color, mask=edges)\n\n\t\treturn self.return_jpg(cartoon)\n\n\n\t# ------------\n\t# CANNY\n\t# ------------\n\tdef effect_canny(self):\n\t\tret, frame = self.camera.read()\n\n\t\tgray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n\t\tblurred = cv2.GaussianBlur(gray, (3, 3), 0)\n\n\t\tmedian = np.median(blurred)\n\t\tl_edge = int(max(0, 0.77 * median))\n\t\tu_edge = int(max(0, 1.33 * median))\n\n\t\tcanny = cv2.Canny(blurred, l_edge, u_edge)\n\n\t\treturn self.return_jpg(canny)\n\n\n\t# ------------\n\t# MIRRORS\n\t# ------------\n\tdef effect_mirror(self):\n\t\tret, frame = self.camera.read()\n\n\t\tsplit = frame.shape[1] // 2\n\t\tone_half = frame[:, :split, :]\n\t\tsec_half = cv2.flip(one_half, 1)\n\n\t\tframe = np.hstack((one_half, sec_half))\n\n\t\treturn self.return_jpg(frame)\n\n\n\n# ---------------------\n# ADDITIONAL FUNCTIONS\n# ---------------------\n\ndef blurr_face(image):\n\t(h, w) = image.shape[:2]\n\n\tkernel_w = int(w/3.0)\n\tkernel_h = int(h/3.0)\n\n\tif kernel_w % 2 == 0:\n\t\tkernel_w -= 1\n\telse: kernel_w = 5\n\n\tif kernel_h % 2 == 0:\n\t\tkernel_h -= 1\n\telse: kernel_h = 5\n\n\timg = cv2.GaussianBlur(image, (kernel_w, kernel_h), 0)\n\treturn img\n\n\ndef pixel_face(image):\n\tblocks = 16\n\t(h, w) = image.shape[:2]\n\txSteps = np.linspace(0, w, blocks+1, dtype=\"int\")\n\tySteps = np.linspace(0, h, blocks+1, dtype=\"int\")\n\n\tfor i in range(1, len(ySteps)):\n\t\tfor j in range(1, len(xSteps)):\n\t\t\tstartX = xSteps[j - 1]\n\t\t\tstartY = ySteps[i - 1]\n\t\t\tendX = xSteps[j]\n\t\t\tendY = ySteps[i]\n\n\t\t\troi = image[startY:endY, startX:endX]\n\t\t\t(B, G, R) = [int(x) for x in cv2.mean(roi)[:3]]\n\t\t\tcv2.rectangle(image, (startX, startY), (endX, endY),\n\t\t\t\t(B, G, R), -1)\n\n\treturn image\n\n\n\ndef delaunay_traingle(convexHull, points, frame, landmark_points):\n rect = cv2.boundingRect(convexHull)\n\n subdiv = cv2.Subdiv2D(rect)\n subdiv.insert(landmark_points)\n\n triangles = subdiv.getTriangleList()\n triangles = np.array(triangles, dtype=np.int32)\n\n\n for t in triangles:\n A, B, C = (t[0], t[1]), (t[2], t[3]), (t[4], t[5])\n\n cv2.line(frame, A, B, (255, 255, 255), 1, cv2.LINE_AA, 0)\n cv2.line(frame, B, C, (255, 255, 255), 1, cv2.LINE_AA, 0)\n cv2.line(frame, A, C, (255, 255, 255), 1, cv2.LINE_AA, 0)\n\n return frame"
] | [
[
"numpy.zeros_like",
"numpy.zeros",
"numpy.median",
"numpy.hstack",
"numpy.array",
"numpy.linspace",
"numpy.mean"
]
] |
RudyVenguswamy/DALI | [
"1456689cbb06a6d6f2c46c3fd231d1c296808e00"
] | [
"dali/test/python/test_operator_gaussian_blur.py"
] | [
"# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom nvidia.dali.pipeline import Pipeline\nimport nvidia.dali.types as types\nimport nvidia.dali.fn as fn\n\nimport numpy as np\nimport cv2\nfrom scipy.ndimage import convolve1d\nimport os\nfrom nose.tools import raises\nfrom nose.plugins.attrib import attr\n\nfrom test_utils import get_dali_extra_path, check_batch, compare_pipelines, RandomlyShapedDataIterator, dali_type\n\ndata_root = get_dali_extra_path()\nimages_dir = os.path.join(data_root, 'db', 'single', 'jpeg')\n\ntest_iters = 4\n\nshape_layout_axes_cases = [((20, 20, 30, 3), \"DHWC\", 3), ((20, 20, 30), \"\", 3),\n ((20, 30, 3), \"HWC\", 2), ((20, 30), \"HW\", 2),\n ((3, 30, 20), \"CWH\", 2), ((5, 20, 30, 3), \"FHWC\", 2),\n ((5, 10, 10, 7, 3), \"FDHWC\", 3), ((5, 3, 20, 30), \"FCHW\", 2),\n ((3, 5, 10, 10, 7), \"CFDHW\", 3)]\n\ndef to_batch(tl, batch_size):\n return [np.array(tl[i]) for i in range(batch_size)]\n\n\ndef to_cv_sigma(sigma, axes=2):\n if sigma is None:\n return (0,) * axes\n elif isinstance(sigma, (int, float)):\n return (sigma,) * axes\n elif (isinstance(sigma, np.ndarray) and len(sigma.shape) == 0):\n return (float(sigma),) * axes\n elif len(sigma) == 1:\n return (sigma[0],) * axes\n return tuple(reversed(sigma))\n\n\ndef to_cv_win_size(window_size, axes=2, sigma=None):\n if window_size is None:\n # when using cv2.getGaussianKernel we need to always provide window size\n if sigma is not None:\n sigma = to_cv_sigma(sigma, axes)\n return tuple([int(3 * s + 0.5) * 2 + 1 for s in sigma])\n return (0,) * axes\n elif isinstance(window_size, int):\n return (int(window_size),) * axes\n elif (isinstance(window_size, np.ndarray) and len(window_size.shape) == 0):\n return (int(window_size),) * axes\n elif len(window_size) == 1:\n return (int(window_size[0]),) * axes\n # OpenCV shape is the other way round: (width, height)\n return tuple(int(x) for x in reversed(window_size))\n\n\ndef gaussian_cv(image, sigma, window_size):\n sigma_x, sigma_y = to_cv_sigma(sigma)\n window_size_cv = to_cv_win_size(window_size)\n # compute on floats and round like a sane person (in mathematically complicit way)\n blurred = cv2.GaussianBlur(np.float32(image), window_size_cv, sigmaX=sigma_x, sigmaY=sigma_y)\n return np.uint8(blurred + 0.5)\n\n\ndef gaussian_baseline(image, sigma, window_size, axes=2, skip_axes=0, dtype=np.uint8):\n sigma_xyz = to_cv_sigma(sigma, axes)\n win_xyz = to_cv_win_size(window_size, axes, sigma)\n filters = [cv2.getGaussianKernel(win_xyz[i], sigma_xyz[i]) for i in range(axes)]\n filters = [np.float32(f).squeeze() for f in filters]\n filters.reverse()\n for i in reversed(range(axes)):\n axis = i + skip_axes\n image = convolve1d(np.float32(image), filters[i], axis, mode=\"mirror\")\n if dtype == np.float32:\n return image\n else:\n return dtype(image + 0.5)\n\n\ndef get_gaussian_pipe(batch_size, sigma, window_size, op_type):\n pipe = Pipeline(batch_size=batch_size, num_threads=4, device_id=0)\n with pipe:\n input, _ = fn.file_reader(file_root=images_dir, shard_id=0, num_shards=1)\n decoded = fn.image_decoder(input, device=\"cpu\", output_type=types.RGB)\n if op_type == \"gpu\":\n decoded = decoded.gpu()\n blurred = fn.gaussian_blur(decoded, device=op_type, sigma=sigma, window_size=window_size)\n pipe.set_outputs(blurred, decoded)\n return pipe\n\n\ndef check_gaussian_blur(batch_size, sigma, window_size, op_type=\"cpu\"):\n pipe = get_gaussian_pipe(batch_size, sigma, window_size, op_type)\n pipe.build()\n for _ in range(test_iters):\n result, input = pipe.run()\n if op_type == \"gpu\":\n result = result.as_cpu()\n input = input.as_cpu()\n input = to_batch(input, batch_size)\n baseline_cv = [gaussian_cv(img, sigma, window_size) for img in input]\n check_batch(result, baseline_cv, batch_size, max_allowed_error=1, expected_layout=\"HWC\")\n\n\ndef test_image_gaussian_blur():\n for dev in [\"cpu\", \"gpu\"]:\n for sigma in [1.0]:\n for window_size in [3, 5, None]:\n if sigma is None and window_size is None:\n continue\n yield check_gaussian_blur, 10, sigma, window_size, dev\n # OpenCv uses fixed values for small windows that are different that Gaussian funcion\n yield check_gaussian_blur, 10, None, 11, dev\n\n\n@attr('slow')\ndef test_image_gaussian_blur_slow():\n for dev in [\"cpu\", \"gpu\"]:\n for sigma in [1.0, [1.0, 2.0]]:\n for window_size in [3, 5, [7, 5], [5, 9], None]:\n if sigma is None and window_size is None:\n continue\n yield check_gaussian_blur, 10, sigma, window_size, dev\n # OpenCv uses fixed values for small windows that are different that Gaussian funcion\n for window_size in [15, [17, 31]]:\n yield check_gaussian_blur, 10, None, window_size, dev\n\n\ndef check_gaussian_blur_cpu_gpu(batch_size, sigma, window_size):\n cpu_pipe = get_gaussian_pipe(batch_size, sigma, window_size, \"cpu\")\n gpu_pipe = get_gaussian_pipe(batch_size, sigma, window_size, \"gpu\")\n compare_pipelines(cpu_pipe, gpu_pipe, batch_size, 16, max_allowed_error=1)\n\n\ndef test_gaussian_blur_cpu_gpu():\n for window_size in [5, [7, 13]]:\n yield check_gaussian_blur_cpu_gpu, 10, None, window_size\n\n@attr('slow')\ndef test_gaussian_blur_cpu_gpu_slow():\n for sigma in [1.0, [1.0, 2.0], None]:\n for window_size in [3, 5, [7, 5], [5, 9], 11, 15, 31, None]:\n if sigma is None and window_size is None:\n continue\n yield check_gaussian_blur_cpu_gpu, 10, sigma, window_size\n\n\ndef count_skip_axes(layout):\n if layout.startswith(\"FC\") or layout.startswith(\"CF\"):\n return 2\n elif layout.startswith(\"F\") or layout.startswith(\"C\"):\n return 1\n else:\n return 0\n\n\ndef check_generic_gaussian_blur(\n batch_size, sigma, window_size, shape, layout, axes, op_type=\"cpu\", in_dtype=np.uint8,\n out_dtype=types.NO_TYPE):\n pipe = Pipeline(batch_size=batch_size, num_threads=4, device_id=0)\n data = RandomlyShapedDataIterator(batch_size, max_shape=shape, dtype=in_dtype)\n # Extract the numpy type from DALI, we can have float32 or the same as input\n if out_dtype == types.NO_TYPE:\n result_type = in_dtype\n elif dali_type(in_dtype) == out_dtype:\n result_type = in_dtype\n else:\n result_type = np.float32\n with pipe:\n input = fn.external_source(data, layout=layout)\n if op_type == \"gpu\":\n input = input.gpu()\n blurred = fn.gaussian_blur(input, device=op_type, sigma=sigma,\n window_size=window_size, dtype=out_dtype)\n pipe.set_outputs(blurred, input)\n pipe.build()\n\n for _ in range(test_iters):\n result, input = pipe.run()\n if op_type == \"gpu\":\n result = result.as_cpu()\n input = input.as_cpu()\n input = to_batch(input, batch_size)\n skip_axes = count_skip_axes(layout)\n baseline = [\n gaussian_baseline(img, sigma, window_size, axes, skip_axes, dtype=result_type)\n for img in input]\n max_error = 1 if result_type != np.float32 else 1e-04\n check_batch(result, baseline, batch_size, max_allowed_error=max_error, expected_layout=layout)\n\n\n# Generate tests for single or per-axis sigma and window_size arguments\ndef generate_generic_cases(dev, t_in, t_out):\n for shape, layout, axes in shape_layout_axes_cases:\n for sigma in [1.0, [1.0, 2.0, 3.0]]:\n for window_size in [3, 5, [7, 5, 9], [3, 5, 9], None]:\n if isinstance(sigma, list):\n sigma = sigma[0:axes]\n if isinstance(window_size, list):\n window_size = window_size[0:axes]\n yield check_generic_gaussian_blur, 10, sigma, window_size, shape, layout, axes, dev, t_in, t_out\n for window_size in [11, 15]:\n yield check_generic_gaussian_blur, 10, None, window_size, shape, layout, axes, dev, t_in, t_out\n\n\ndef test_generic_gaussian_blur():\n for dev in [\"cpu\", \"gpu\"]:\n for (t_in, t_out) in [(np.uint8, types.NO_TYPE), (np.float32, types.FLOAT), (np.uint8, types.FLOAT)]:\n yield from generate_generic_cases(dev, t_in, t_out)\n\n\n@attr('slow')\ndef test_generic_gaussian_blur_slow():\n for dev in [\"cpu\", \"gpu\"]:\n for t_in in [np.uint8, np.int32, np.float32]:\n for t_out in [types.NO_TYPE, types.FLOAT, dali_type(t_in)]:\n yield from generate_generic_cases(dev, t_in, t_out)\n\n\ndef check_per_sample_gaussian_blur(\n batch_size, sigma_dim, window_size_dim, shape, layout, axes, op_type=\"cpu\"):\n pipe = Pipeline(batch_size=batch_size, num_threads=4, device_id=0)\n data = RandomlyShapedDataIterator(batch_size, max_shape=shape)\n with pipe:\n if sigma_dim is not None:\n sigma = fn.random.uniform(range=[0.5, 3], shape=[sigma_dim])\n sigma_arg = sigma\n else:\n # placeholder, so we can return something\n sigma = fn.coin_flip(probability=0)\n sigma_arg = None\n\n if window_size_dim is not None:\n window_radius = fn.random.uniform(range=[5, 10], shape=[window_size_dim])\n window_size = fn.cast(window_radius, dtype=types.INT32) * 2 + 1\n window_arg = window_size\n else:\n window_size = fn.coin_flip(probability=0)\n window_arg = None\n\n input = fn.external_source(data, layout=layout)\n if op_type == \"gpu\":\n input = input.gpu()\n blurred = fn.gaussian_blur(input, device=op_type, sigma=sigma_arg, window_size=window_arg)\n pipe.set_outputs(blurred, input, sigma, window_size)\n pipe.build()\n\n for _ in range(test_iters):\n result, input, sigma, window_size = pipe.run()\n if op_type == \"gpu\":\n result = result.as_cpu()\n input = input.as_cpu()\n input = to_batch(input, batch_size)\n sigma = to_batch(sigma, batch_size)\n window_size = to_batch(window_size, batch_size)\n baseline = []\n for i in range(batch_size):\n sigma_arg = sigma[i] if sigma is not None else None\n window_arg = window_size[i] if window_size_dim is not None else None\n skip_axes = count_skip_axes(layout)\n baseline.append(gaussian_baseline(input[i], sigma_arg, window_arg, axes, skip_axes))\n check_batch(result, baseline, batch_size, max_allowed_error=1, expected_layout=layout)\n\n\n# TODO(klecki): consider checking mixed ArgumentInput/Scalar value cases\ndef test_per_sample_gaussian_blur():\n for dev in [\"cpu\", \"gpu\"]:\n for shape, layout, axes in shape_layout_axes_cases:\n for sigma_dim in [None, 1, axes]:\n for window_size_dim in [None, 1, axes]:\n if sigma_dim is None and window_size_dim is None:\n continue\n yield check_per_sample_gaussian_blur, 10, sigma_dim, window_size_dim, shape, layout, axes, dev\n\n\n@raises(RuntimeError)\ndef check_fail_gaussian_blur(batch_size, sigma, window_size, shape, layout, axes, op_type, in_dtype=np.uint8, out_dtype=types.NO_TYPE):\n check_generic_gaussian_blur(batch_size, sigma, window_size, shape, layout, axes, op_type, in_dtype, out_dtype)\n\n\ndef test_fail_gaussian_blur():\n for dev in [\"cpu\", \"gpu\"]:\n # Check layout and channel placement errors\n for shape, layout, axes in [((20, 20, 30, 3), \"DHCW\", 3), ((5, 20, 30, 3), \"HFWC\", 2),\n ((5, 10, 10, 10, 7, 3), \"FWXYZC\", 4),\n ((5, 3, 20, 3, 30), \"FCHCW\", 2),\n ((5, 3, 20, 3, 30), \"FCCHW\", 2)]:\n yield check_fail_gaussian_blur, 10, 1.0, 11, shape, layout, axes, dev\n # Negative, disallowed or both unspecified values of sigma and window size\n yield check_fail_gaussian_blur, 10, 0.0, 0, (100, 20, 3), \"HWC\", 3, dev\n yield check_fail_gaussian_blur, 10, -1.0, 0, (100, 20, 3), \"HWC\", 3, dev\n yield check_fail_gaussian_blur, 10, 0.0, -11, (100, 20, 3), \"HWC\", 3, dev\n yield check_fail_gaussian_blur, 10, 0.0, 2, (100, 20, 3), \"HWC\", 3, dev\n"
] | [
[
"numpy.array",
"numpy.float32",
"numpy.uint8"
]
] |
stephenpascoe/arrow | [
"3efd08f0cbaa40d0d3a329b8613fb80ac022b985"
] | [
"python/pyarrow/tests/test_convert_pandas.py"
] | [
"# -*- coding: utf-8 -*-\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\nimport decimal\nimport json\nfrom collections import OrderedDict\nfrom datetime import date, datetime, time, timedelta\n\nimport numpy as np\nimport numpy.testing as npt\nimport pandas as pd\nimport pandas.util.testing as tm\nimport pytest\n\nimport pyarrow as pa\nimport pyarrow.types as patypes\nfrom pyarrow.compat import PY2\n\nfrom .pandas_examples import dataframe_with_arrays, dataframe_with_lists\n\n\ndef _alltypes_example(size=100):\n return pd.DataFrame({\n 'uint8': np.arange(size, dtype=np.uint8),\n 'uint16': np.arange(size, dtype=np.uint16),\n 'uint32': np.arange(size, dtype=np.uint32),\n 'uint64': np.arange(size, dtype=np.uint64),\n 'int8': np.arange(size, dtype=np.int16),\n 'int16': np.arange(size, dtype=np.int16),\n 'int32': np.arange(size, dtype=np.int32),\n 'int64': np.arange(size, dtype=np.int64),\n 'float32': np.arange(size, dtype=np.float32),\n 'float64': np.arange(size, dtype=np.float64),\n 'bool': np.random.randn(size) > 0,\n # TODO(wesm): Pandas only support ns resolution, Arrow supports s, ms,\n # us, ns\n 'datetime': np.arange(\"2016-01-01T00:00:00.001\", size,\n dtype='datetime64[ms]'),\n 'str': [str(x) for x in range(size)],\n 'str_with_nulls': [None] + [str(x) for x in range(size - 2)] + [None],\n 'empty_str': [''] * size\n })\n\n\ndef _check_pandas_roundtrip(df, expected=None, use_threads=False,\n expected_schema=None,\n check_dtype=True, schema=None,\n preserve_index=False,\n as_batch=False):\n klass = pa.RecordBatch if as_batch else pa.Table\n table = klass.from_pandas(df, schema=schema,\n preserve_index=preserve_index,\n nthreads=2 if use_threads else 1)\n result = table.to_pandas(use_threads=use_threads)\n\n if expected_schema:\n # all occurences of _check_pandas_roundtrip passes expected_schema\n # without the pandas generated key-value metadata, so we need to\n # add it before checking schema equality\n expected_schema = expected_schema.add_metadata(table.schema.metadata)\n assert table.schema.equals(expected_schema)\n\n if expected is None:\n expected = df\n\n tm.assert_frame_equal(result, expected, check_dtype=check_dtype,\n check_index_type=('equiv' if preserve_index\n else False))\n\n\ndef _check_series_roundtrip(s, type_=None, expected_pa_type=None):\n arr = pa.array(s, from_pandas=True, type=type_)\n\n if type_ is not None and expected_pa_type is None:\n expected_pa_type = type_\n\n if expected_pa_type is not None:\n assert arr.type == expected_pa_type\n\n result = pd.Series(arr.to_pandas(), name=s.name)\n if patypes.is_timestamp(arr.type) and arr.type.tz is not None:\n result = (result.dt.tz_localize('utc')\n .dt.tz_convert(arr.type.tz))\n\n tm.assert_series_equal(s, result)\n\n\ndef _check_array_roundtrip(values, expected=None, mask=None,\n type=None):\n arr = pa.array(values, from_pandas=True, mask=mask, type=type)\n result = arr.to_pandas()\n\n values_nulls = pd.isnull(values)\n if mask is None:\n assert arr.null_count == values_nulls.sum()\n else:\n assert arr.null_count == (mask | values_nulls).sum()\n\n if mask is None:\n tm.assert_series_equal(pd.Series(result), pd.Series(values),\n check_names=False)\n else:\n expected = pd.Series(np.ma.masked_array(values, mask=mask))\n tm.assert_series_equal(pd.Series(result), expected,\n check_names=False)\n\n\ndef _check_array_from_pandas_roundtrip(np_array):\n arr = pa.array(np_array, from_pandas=True)\n result = arr.to_pandas()\n npt.assert_array_equal(result, np_array)\n\n\nclass TestConvertMetadata(object):\n \"\"\"\n Conversion tests for Pandas metadata & indices.\n \"\"\"\n\n def test_non_string_columns(self):\n df = pd.DataFrame({0: [1, 2, 3]})\n table = pa.Table.from_pandas(df)\n assert table.column(0).name == '0'\n\n def test_from_pandas_with_columns(self):\n df = pd.DataFrame({0: [1, 2, 3], 1: [1, 3, 3], 2: [2, 4, 5]})\n\n table = pa.Table.from_pandas(df, columns=[0, 1])\n expected = pa.Table.from_pandas(df[[0, 1]])\n assert expected.equals(table)\n\n record_batch_table = pa.RecordBatch.from_pandas(df, columns=[0, 1])\n record_batch_expected = pa.RecordBatch.from_pandas(df[[0, 1]])\n assert record_batch_expected.equals(record_batch_table)\n\n def test_column_index_names_are_preserved(self):\n df = pd.DataFrame({'data': [1, 2, 3]})\n df.columns.names = ['a']\n _check_pandas_roundtrip(df, preserve_index=True)\n\n def test_multiindex_columns(self):\n columns = pd.MultiIndex.from_arrays([\n ['one', 'two'], ['X', 'Y']\n ])\n df = pd.DataFrame([(1, 'a'), (2, 'b'), (3, 'c')], columns=columns)\n _check_pandas_roundtrip(df, preserve_index=True)\n\n def test_multiindex_columns_with_dtypes(self):\n columns = pd.MultiIndex.from_arrays(\n [\n ['one', 'two'],\n pd.DatetimeIndex(['2017-08-01', '2017-08-02']),\n ],\n names=['level_1', 'level_2'],\n )\n df = pd.DataFrame([(1, 'a'), (2, 'b'), (3, 'c')], columns=columns)\n _check_pandas_roundtrip(df, preserve_index=True)\n\n def test_multiindex_columns_unicode(self):\n columns = pd.MultiIndex.from_arrays([[u'あ', u'い'], ['X', 'Y']])\n df = pd.DataFrame([(1, 'a'), (2, 'b'), (3, 'c')], columns=columns)\n _check_pandas_roundtrip(df, preserve_index=True)\n\n def test_integer_index_column(self):\n df = pd.DataFrame([(1, 'a'), (2, 'b'), (3, 'c')])\n _check_pandas_roundtrip(df, preserve_index=True)\n\n def test_index_metadata_field_name(self):\n # test None case, and strangely named non-index columns\n df = pd.DataFrame(\n [(1, 'a', 3.1), (2, 'b', 2.2), (3, 'c', 1.3)],\n index=pd.MultiIndex.from_arrays(\n [['c', 'b', 'a'], [3, 2, 1]],\n names=[None, 'foo']\n ),\n columns=['a', None, '__index_level_0__'],\n )\n t = pa.Table.from_pandas(df, preserve_index=True)\n raw_metadata = t.schema.metadata\n\n js = json.loads(raw_metadata[b'pandas'].decode('utf8'))\n\n col1, col2, col3, idx0, foo = js['columns']\n\n assert col1['name'] == 'a'\n assert col1['name'] == col1['field_name']\n\n assert col2['name'] is None\n assert col2['field_name'] == 'None'\n\n assert col3['name'] == '__index_level_0__'\n assert col3['name'] == col3['field_name']\n\n idx0_name, foo_name = js['index_columns']\n assert idx0_name == '__index_level_0__'\n assert idx0['field_name'] == idx0_name\n assert idx0['name'] is None\n\n assert foo_name == 'foo'\n assert foo['field_name'] == foo_name\n assert foo['name'] == foo_name\n\n def test_categorical_column_index(self):\n df = pd.DataFrame(\n [(1, 'a', 2.0), (2, 'b', 3.0), (3, 'c', 4.0)],\n columns=pd.Index(list('def'), dtype='category')\n )\n t = pa.Table.from_pandas(df, preserve_index=True)\n raw_metadata = t.schema.metadata\n js = json.loads(raw_metadata[b'pandas'].decode('utf8'))\n\n column_indexes, = js['column_indexes']\n assert column_indexes['name'] is None\n assert column_indexes['pandas_type'] == 'categorical'\n assert column_indexes['numpy_type'] == 'int8'\n\n md = column_indexes['metadata']\n assert md['num_categories'] == 3\n assert md['ordered'] is False\n\n def test_string_column_index(self):\n df = pd.DataFrame(\n [(1, 'a', 2.0), (2, 'b', 3.0), (3, 'c', 4.0)],\n columns=pd.Index(list('def'), name='stringz')\n )\n t = pa.Table.from_pandas(df, preserve_index=True)\n raw_metadata = t.schema.metadata\n js = json.loads(raw_metadata[b'pandas'].decode('utf8'))\n\n column_indexes, = js['column_indexes']\n assert column_indexes['name'] == 'stringz'\n assert column_indexes['name'] == column_indexes['field_name']\n assert column_indexes['pandas_type'] == ('bytes' if PY2 else 'unicode')\n assert column_indexes['numpy_type'] == 'object'\n\n md = column_indexes['metadata']\n\n if not PY2:\n assert len(md) == 1\n assert md['encoding'] == 'UTF-8'\n else:\n assert md is None or 'encoding' not in md\n\n def test_datetimetz_column_index(self):\n df = pd.DataFrame(\n [(1, 'a', 2.0), (2, 'b', 3.0), (3, 'c', 4.0)],\n columns=pd.date_range(\n start='2017-01-01', periods=3, tz='America/New_York'\n )\n )\n t = pa.Table.from_pandas(df, preserve_index=True)\n raw_metadata = t.schema.metadata\n js = json.loads(raw_metadata[b'pandas'].decode('utf8'))\n\n column_indexes, = js['column_indexes']\n assert column_indexes['name'] is None\n assert column_indexes['pandas_type'] == 'datetimetz'\n assert column_indexes['numpy_type'] == 'datetime64[ns]'\n\n md = column_indexes['metadata']\n assert md['timezone'] == 'America/New_York'\n\n def test_datetimetz_row_index(self):\n df = pd.DataFrame({\n 'a': pd.date_range(\n start='2017-01-01', periods=3, tz='America/New_York'\n )\n })\n df = df.set_index('a')\n\n _check_pandas_roundtrip(df, preserve_index=True)\n\n def test_categorical_row_index(self):\n df = pd.DataFrame({'a': [1, 2, 3], 'b': [1, 2, 3]})\n df['a'] = df.a.astype('category')\n df = df.set_index('a')\n\n _check_pandas_roundtrip(df, preserve_index=True)\n\n def test_duplicate_column_names_does_not_crash(self):\n df = pd.DataFrame([(1, 'a'), (2, 'b')], columns=list('aa'))\n with pytest.raises(ValueError):\n pa.Table.from_pandas(df)\n\n def test_dictionary_indices_boundscheck(self):\n # ARROW-1658. No validation of indices leads to segfaults in pandas\n indices = [[0, 1], [0, -1]]\n\n for inds in indices:\n arr = pa.DictionaryArray.from_arrays(inds, ['a'], safe=False)\n batch = pa.RecordBatch.from_arrays([arr], ['foo'])\n table = pa.Table.from_batches([batch, batch, batch])\n\n with pytest.raises(pa.ArrowInvalid):\n arr.to_pandas()\n\n with pytest.raises(pa.ArrowInvalid):\n table.to_pandas()\n\n def test_unicode_with_unicode_column_and_index(self):\n df = pd.DataFrame({u'あ': [u'い']}, index=[u'う'])\n\n _check_pandas_roundtrip(df, preserve_index=True)\n\n def test_mixed_unicode_column_names(self):\n df = pd.DataFrame({u'あ': [u'い'], b'a': 1}, index=[u'う'])\n\n # TODO(phillipc): Should this raise?\n with pytest.raises(AssertionError):\n _check_pandas_roundtrip(df, preserve_index=True)\n\n def test_binary_column_name(self):\n column_data = [u'い']\n key = u'あ'.encode('utf8')\n data = {key: column_data}\n df = pd.DataFrame(data)\n\n # we can't use _check_pandas_roundtrip here because our metdata\n # is always decoded as utf8: even if binary goes in, utf8 comes out\n t = pa.Table.from_pandas(df, preserve_index=True)\n df2 = t.to_pandas()\n assert df.values[0] == df2.values[0]\n assert df.index.values[0] == df2.index.values[0]\n assert df.columns[0] == key\n\n def test_multiindex_duplicate_values(self):\n num_rows = 3\n numbers = list(range(num_rows))\n index = pd.MultiIndex.from_arrays(\n [['foo', 'foo', 'bar'], numbers],\n names=['foobar', 'some_numbers'],\n )\n\n df = pd.DataFrame({'numbers': numbers}, index=index)\n\n table = pa.Table.from_pandas(df)\n result_df = table.to_pandas()\n tm.assert_frame_equal(result_df, df)\n\n def test_metadata_with_mixed_types(self):\n df = pd.DataFrame({'data': [b'some_bytes', u'some_unicode']})\n table = pa.Table.from_pandas(df)\n metadata = table.schema.metadata\n assert b'mixed' not in metadata[b'pandas']\n\n js = json.loads(metadata[b'pandas'].decode('utf8'))\n data_column = js['columns'][0]\n assert data_column['pandas_type'] == 'bytes'\n assert data_column['numpy_type'] == 'object'\n\n def test_list_metadata(self):\n df = pd.DataFrame({'data': [[1], [2, 3, 4], [5] * 7]})\n schema = pa.schema([pa.field('data', type=pa.list_(pa.int64()))])\n table = pa.Table.from_pandas(df, schema=schema)\n metadata = table.schema.metadata\n assert b'mixed' not in metadata[b'pandas']\n\n js = json.loads(metadata[b'pandas'].decode('utf8'))\n data_column = js['columns'][0]\n assert data_column['pandas_type'] == 'list[int64]'\n assert data_column['numpy_type'] == 'object'\n\n def test_decimal_metadata(self):\n expected = pd.DataFrame({\n 'decimals': [\n decimal.Decimal('394092382910493.12341234678'),\n -decimal.Decimal('314292388910493.12343437128'),\n ]\n })\n table = pa.Table.from_pandas(expected)\n metadata = table.schema.metadata\n assert b'mixed' not in metadata[b'pandas']\n\n js = json.loads(metadata[b'pandas'].decode('utf8'))\n data_column = js['columns'][0]\n assert data_column['pandas_type'] == 'decimal'\n assert data_column['numpy_type'] == 'object'\n assert data_column['metadata'] == {'precision': 26, 'scale': 11}\n\n def test_table_column_subset_metadata(self):\n # ARROW-1883\n df = pd.DataFrame({\n 'a': [1, 2, 3],\n 'b': pd.date_range(\"2017-01-01\", periods=3, tz='Europe/Brussels')})\n table = pa.Table.from_pandas(df)\n\n table_subset = table.remove_column(1)\n result = table_subset.to_pandas()\n tm.assert_frame_equal(result, df[['a']])\n\n table_subset2 = table_subset.remove_column(1)\n result = table_subset2.to_pandas()\n tm.assert_frame_equal(result, df[['a']])\n\n # non-default index\n for index in [\n pd.Index(['a', 'b', 'c'], name='index'),\n pd.date_range(\"2017-01-01\", periods=3, tz='Europe/Brussels')]:\n df = pd.DataFrame({'a': [1, 2, 3],\n 'b': [.1, .2, .3]}, index=index)\n table = pa.Table.from_pandas(df)\n\n table_subset = table.remove_column(1)\n result = table_subset.to_pandas()\n tm.assert_frame_equal(result, df[['a']])\n\n table_subset2 = table_subset.remove_column(1)\n result = table_subset2.to_pandas()\n tm.assert_frame_equal(result, df[['a']].reset_index(drop=True))\n\n def test_empty_list_metadata(self):\n # Create table with array of empty lists, forced to have type\n # list(string) in pyarrow\n c1 = [[\"test\"], [\"a\", \"b\"], None]\n c2 = [[], [], []]\n arrays = OrderedDict([\n ('c1', pa.array(c1, type=pa.list_(pa.string()))),\n ('c2', pa.array(c2, type=pa.list_(pa.string()))),\n ])\n rb = pa.RecordBatch.from_arrays(\n list(arrays.values()),\n list(arrays.keys())\n )\n tbl = pa.Table.from_batches([rb])\n\n # First roundtrip changes schema, because pandas cannot preserve the\n # type of empty lists\n df = tbl.to_pandas()\n tbl2 = pa.Table.from_pandas(df, preserve_index=True)\n md2 = json.loads(tbl2.schema.metadata[b'pandas'].decode('utf8'))\n\n # Second roundtrip\n df2 = tbl2.to_pandas()\n expected = pd.DataFrame(OrderedDict([('c1', c1), ('c2', c2)]))\n\n tm.assert_frame_equal(df2, expected)\n\n assert md2['columns'] == [\n {\n 'name': 'c1',\n 'field_name': 'c1',\n 'metadata': None,\n 'numpy_type': 'object',\n 'pandas_type': 'list[unicode]',\n },\n {\n 'name': 'c2',\n 'field_name': 'c2',\n 'metadata': None,\n 'numpy_type': 'object',\n 'pandas_type': 'list[empty]',\n },\n {\n 'name': None,\n 'field_name': '__index_level_0__',\n 'metadata': None,\n 'numpy_type': 'int64',\n 'pandas_type': 'int64',\n }\n ]\n\n\nclass TestConvertPrimitiveTypes(object):\n \"\"\"\n Conversion tests for primitive (e.g. numeric) types.\n \"\"\"\n\n def test_float_no_nulls(self):\n data = {}\n fields = []\n dtypes = [('f2', pa.float16()),\n ('f4', pa.float32()),\n ('f8', pa.float64())]\n num_values = 100\n\n for numpy_dtype, arrow_dtype in dtypes:\n values = np.random.randn(num_values)\n data[numpy_dtype] = values.astype(numpy_dtype)\n fields.append(pa.field(numpy_dtype, arrow_dtype))\n\n df = pd.DataFrame(data)\n schema = pa.schema(fields)\n _check_pandas_roundtrip(df, expected_schema=schema)\n\n def test_float_nulls(self):\n num_values = 100\n\n null_mask = np.random.randint(0, 10, size=num_values) < 3\n dtypes = [('f2', pa.float16()),\n ('f4', pa.float32()),\n ('f8', pa.float64())]\n names = ['f2', 'f4', 'f8']\n expected_cols = []\n\n arrays = []\n fields = []\n for name, arrow_dtype in dtypes:\n values = np.random.randn(num_values).astype(name)\n\n arr = pa.array(values, from_pandas=True, mask=null_mask)\n arrays.append(arr)\n fields.append(pa.field(name, arrow_dtype))\n values[null_mask] = np.nan\n\n expected_cols.append(values)\n\n ex_frame = pd.DataFrame(dict(zip(names, expected_cols)),\n columns=names)\n\n table = pa.Table.from_arrays(arrays, names)\n assert table.schema.equals(pa.schema(fields))\n result = table.to_pandas()\n tm.assert_frame_equal(result, ex_frame)\n\n def test_float_nulls_to_ints(self):\n # ARROW-2135\n df = pd.DataFrame({\"a\": [1.0, 2.0, pd.np.NaN]})\n schema = pa.schema([pa.field(\"a\", pa.int16(), nullable=True)])\n table = pa.Table.from_pandas(df, schema=schema)\n assert table[0].to_pylist() == [1, 2, None]\n tm.assert_frame_equal(df, table.to_pandas())\n\n def test_integer_no_nulls(self):\n data = OrderedDict()\n fields = []\n\n numpy_dtypes = [\n ('i1', pa.int8()), ('i2', pa.int16()),\n ('i4', pa.int32()), ('i8', pa.int64()),\n ('u1', pa.uint8()), ('u2', pa.uint16()),\n ('u4', pa.uint32()), ('u8', pa.uint64()),\n ('longlong', pa.int64()), ('ulonglong', pa.uint64())\n ]\n num_values = 100\n\n for dtype, arrow_dtype in numpy_dtypes:\n info = np.iinfo(dtype)\n values = np.random.randint(max(info.min, np.iinfo(np.int_).min),\n min(info.max, np.iinfo(np.int_).max),\n size=num_values)\n data[dtype] = values.astype(dtype)\n fields.append(pa.field(dtype, arrow_dtype))\n\n df = pd.DataFrame(data)\n schema = pa.schema(fields)\n _check_pandas_roundtrip(df, expected_schema=schema)\n\n def test_all_integer_types(self):\n # Test all Numpy integer aliases\n data = OrderedDict()\n numpy_dtypes = ['i1', 'i2', 'i4', 'i8', 'u1', 'u2', 'u4', 'u8',\n 'byte', 'ubyte', 'short', 'ushort', 'intc', 'uintc',\n 'int_', 'uint', 'longlong', 'ulonglong']\n for dtype in numpy_dtypes:\n data[dtype] = np.arange(12, dtype=dtype)\n df = pd.DataFrame(data)\n _check_pandas_roundtrip(df)\n\n def test_integer_with_nulls(self):\n # pandas requires upcast to float dtype\n\n int_dtypes = ['i1', 'i2', 'i4', 'i8', 'u1', 'u2', 'u4', 'u8']\n num_values = 100\n\n null_mask = np.random.randint(0, 10, size=num_values) < 3\n\n expected_cols = []\n arrays = []\n for name in int_dtypes:\n values = np.random.randint(0, 100, size=num_values)\n\n arr = pa.array(values, mask=null_mask)\n arrays.append(arr)\n\n expected = values.astype('f8')\n expected[null_mask] = np.nan\n\n expected_cols.append(expected)\n\n ex_frame = pd.DataFrame(dict(zip(int_dtypes, expected_cols)),\n columns=int_dtypes)\n\n table = pa.Table.from_arrays(arrays, int_dtypes)\n result = table.to_pandas()\n\n tm.assert_frame_equal(result, ex_frame)\n\n def test_array_from_pandas_type_cast(self):\n arr = np.arange(10, dtype='int64')\n\n target_type = pa.int8()\n\n result = pa.array(arr, type=target_type)\n expected = pa.array(arr.astype('int8'))\n assert result.equals(expected)\n\n def test_boolean_no_nulls(self):\n num_values = 100\n\n np.random.seed(0)\n\n df = pd.DataFrame({'bools': np.random.randn(num_values) > 0})\n field = pa.field('bools', pa.bool_())\n schema = pa.schema([field])\n _check_pandas_roundtrip(df, expected_schema=schema)\n\n def test_boolean_nulls(self):\n # pandas requires upcast to object dtype\n num_values = 100\n np.random.seed(0)\n\n mask = np.random.randint(0, 10, size=num_values) < 3\n values = np.random.randint(0, 10, size=num_values) < 5\n\n arr = pa.array(values, mask=mask)\n\n expected = values.astype(object)\n expected[mask] = None\n\n field = pa.field('bools', pa.bool_())\n schema = pa.schema([field])\n ex_frame = pd.DataFrame({'bools': expected})\n\n table = pa.Table.from_arrays([arr], ['bools'])\n assert table.schema.equals(schema)\n result = table.to_pandas()\n\n tm.assert_frame_equal(result, ex_frame)\n\n def test_float_object_nulls(self):\n arr = np.array([None, 1.5, np.float64(3.5)] * 5, dtype=object)\n df = pd.DataFrame({'floats': arr})\n expected = pd.DataFrame({'floats': pd.to_numeric(arr)})\n field = pa.field('floats', pa.float64())\n schema = pa.schema([field])\n _check_pandas_roundtrip(df, expected=expected,\n expected_schema=schema)\n\n def test_int_object_nulls(self):\n arr = np.array([None, 1, np.int64(3)] * 5, dtype=object)\n df = pd.DataFrame({'ints': arr})\n expected = pd.DataFrame({'ints': pd.to_numeric(arr)})\n field = pa.field('ints', pa.int64())\n schema = pa.schema([field])\n _check_pandas_roundtrip(df, expected=expected,\n expected_schema=schema)\n\n def test_boolean_object_nulls(self):\n arr = np.array([False, None, True] * 100, dtype=object)\n df = pd.DataFrame({'bools': arr})\n field = pa.field('bools', pa.bool_())\n schema = pa.schema([field])\n _check_pandas_roundtrip(df, expected_schema=schema)\n\n def test_all_nulls_cast_numeric(self):\n arr = np.array([None], dtype=object)\n\n def _check_type(t):\n a2 = pa.array(arr, type=t)\n assert a2.type == t\n assert a2[0].as_py() is None\n\n _check_type(pa.int32())\n _check_type(pa.float64())\n\n def test_half_floats_from_numpy(self):\n arr = np.array([1.5, np.nan], dtype=np.float16)\n a = pa.array(arr, type=pa.float16())\n x, y = a.to_pylist()\n assert isinstance(x, np.float16)\n assert x == 1.5\n assert isinstance(y, np.float16)\n assert np.isnan(y)\n\n a = pa.array(arr, type=pa.float16(), from_pandas=True)\n x, y = a.to_pylist()\n assert isinstance(x, np.float16)\n assert x == 1.5\n assert y is None\n\n\[email protected]('dtype',\n ['i1', 'i2', 'i4', 'i8', 'u1', 'u2', 'u4', 'u8'])\ndef test_array_integer_object_nulls_option(dtype):\n num_values = 100\n\n null_mask = np.random.randint(0, 10, size=num_values) < 3\n values = np.random.randint(0, 100, size=num_values, dtype=dtype)\n\n array = pa.array(values, mask=null_mask)\n\n if null_mask.any():\n expected = values.astype('O')\n expected[null_mask] = None\n else:\n expected = values\n\n result = array.to_pandas(integer_object_nulls=True)\n\n np.testing.assert_equal(result, expected)\n\n\[email protected]('dtype',\n ['i1', 'i2', 'i4', 'i8', 'u1', 'u2', 'u4', 'u8'])\ndef test_table_integer_object_nulls_option(dtype):\n num_values = 100\n\n null_mask = np.random.randint(0, 10, size=num_values) < 3\n values = np.random.randint(0, 100, size=num_values, dtype=dtype)\n\n array = pa.array(values, mask=null_mask)\n\n if null_mask.any():\n expected = values.astype('O')\n expected[null_mask] = None\n else:\n expected = values\n\n expected = pd.DataFrame({dtype: expected})\n\n table = pa.Table.from_arrays([array], [dtype])\n result = table.to_pandas(integer_object_nulls=True)\n\n tm.assert_frame_equal(result, expected)\n\n\nclass TestConvertDateTimeLikeTypes(object):\n \"\"\"\n Conversion tests for datetime- and timestamp-like types (date64, etc.).\n \"\"\"\n\n def test_timestamps_notimezone_no_nulls(self):\n df = pd.DataFrame({\n 'datetime64': np.array([\n '2007-07-13T01:23:34.123456789',\n '2006-01-13T12:34:56.432539784',\n '2010-08-13T05:46:57.437699912'],\n dtype='datetime64[ns]')\n })\n field = pa.field('datetime64', pa.timestamp('ns'))\n schema = pa.schema([field])\n _check_pandas_roundtrip(\n df,\n expected_schema=schema,\n )\n\n def test_timestamps_notimezone_nulls(self):\n df = pd.DataFrame({\n 'datetime64': np.array([\n '2007-07-13T01:23:34.123456789',\n None,\n '2010-08-13T05:46:57.437699912'],\n dtype='datetime64[ns]')\n })\n field = pa.field('datetime64', pa.timestamp('ns'))\n schema = pa.schema([field])\n _check_pandas_roundtrip(\n df,\n expected_schema=schema,\n )\n\n def test_timestamps_with_timezone(self):\n df = pd.DataFrame({\n 'datetime64': np.array([\n '2007-07-13T01:23:34.123',\n '2006-01-13T12:34:56.432',\n '2010-08-13T05:46:57.437'],\n dtype='datetime64[ms]')\n })\n df['datetime64'] = (df['datetime64'].dt.tz_localize('US/Eastern')\n .to_frame())\n _check_pandas_roundtrip(df)\n\n _check_series_roundtrip(df['datetime64'])\n\n # drop-in a null and ns instead of ms\n df = pd.DataFrame({\n 'datetime64': np.array([\n '2007-07-13T01:23:34.123456789',\n None,\n '2006-01-13T12:34:56.432539784',\n '2010-08-13T05:46:57.437699912'],\n dtype='datetime64[ns]')\n })\n df['datetime64'] = (df['datetime64'].dt.tz_localize('US/Eastern')\n .to_frame())\n\n _check_pandas_roundtrip(df)\n\n def test_python_datetime(self):\n # ARROW-2106\n date_array = [datetime.today() + timedelta(days=x) for x in range(10)]\n df = pd.DataFrame({\n 'datetime': pd.Series(date_array, dtype=object)\n })\n\n table = pa.Table.from_pandas(df)\n assert isinstance(table[0].data.chunk(0), pa.TimestampArray)\n\n result = table.to_pandas()\n expected_df = pd.DataFrame({\n 'datetime': date_array\n })\n tm.assert_frame_equal(expected_df, result)\n\n def test_python_datetime_subclass(self):\n\n class MyDatetime(datetime):\n # see https://github.com/pandas-dev/pandas/issues/21142\n nanosecond = 0.0\n\n date_array = [MyDatetime(2000, 1, 1, 1, 1, 1)]\n df = pd.DataFrame({\"datetime\": pd.Series(date_array, dtype=object)})\n\n table = pa.Table.from_pandas(df)\n assert isinstance(table[0].data.chunk(0), pa.TimestampArray)\n\n result = table.to_pandas()\n expected_df = pd.DataFrame({\"datetime\": date_array})\n\n # https://github.com/pandas-dev/pandas/issues/21142\n expected_df[\"datetime\"] = pd.to_datetime(expected_df[\"datetime\"])\n\n tm.assert_frame_equal(expected_df, result)\n\n def test_python_date_subclass(self):\n\n class MyDate(date):\n pass\n\n date_array = [MyDate(2000, 1, 1)]\n df = pd.DataFrame({\"date\": pd.Series(date_array, dtype=object)})\n\n table = pa.Table.from_pandas(df)\n assert isinstance(table[0].data.chunk(0), pa.Date32Array)\n\n result = table.to_pandas()\n expected_df = pd.DataFrame(\n {\"date\": np.array([\"2000-01-01\"], dtype=\"datetime64[ns]\")}\n )\n tm.assert_frame_equal(expected_df, result)\n\n def test_datetime64_to_date32(self):\n # ARROW-1718\n arr = pa.array([date(2017, 10, 23), None])\n c = pa.Column.from_array(\"d\", arr)\n s = c.to_pandas()\n\n arr2 = pa.Array.from_pandas(s, type=pa.date32())\n\n assert arr2.equals(arr.cast('date32'))\n\n @pytest.mark.parametrize('mask', [\n None,\n np.ones(3),\n np.array([True, False, False]),\n ])\n def test_pandas_datetime_to_date64(self, mask):\n s = pd.to_datetime([\n '2018-05-10T00:00:00',\n '2018-05-11T00:00:00',\n '2018-05-12T00:00:00',\n ])\n arr = pa.Array.from_pandas(s, type=pa.date64(), mask=mask)\n\n data = np.array([\n date(2018, 5, 10),\n date(2018, 5, 11),\n date(2018, 5, 12)\n ])\n expected = pa.array(data, mask=mask, type=pa.date64())\n\n assert arr.equals(expected)\n\n @pytest.mark.parametrize('mask', [\n None,\n np.ones(3),\n np.array([True, False, False])\n ])\n def test_pandas_datetime_to_date64_failures(self, mask):\n s = pd.to_datetime([\n '2018-05-10T10:24:01',\n '2018-05-11T10:24:01',\n '2018-05-12T10:24:01',\n ])\n\n expected_msg = 'Timestamp value had non-zero intraday milliseconds'\n with pytest.raises(pa.ArrowInvalid, match=expected_msg):\n pa.Array.from_pandas(s, type=pa.date64(), mask=mask)\n\n def test_date_infer(self):\n df = pd.DataFrame({\n 'date': [date(2000, 1, 1),\n None,\n date(1970, 1, 1),\n date(2040, 2, 26)]})\n table = pa.Table.from_pandas(df, preserve_index=False)\n field = pa.field('date', pa.date32())\n\n # schema's metadata is generated by from_pandas conversion\n expected_schema = pa.schema([field], metadata=table.schema.metadata)\n assert table.schema.equals(expected_schema)\n\n result = table.to_pandas()\n expected = df.copy()\n expected['date'] = pd.to_datetime(df['date'])\n tm.assert_frame_equal(result, expected)\n\n def test_date_mask(self):\n arr = np.array([date(2017, 4, 3), date(2017, 4, 4)],\n dtype='datetime64[D]')\n mask = [True, False]\n result = pa.array(arr, mask=np.array(mask))\n expected = np.array([None, date(2017, 4, 4)], dtype='datetime64[D]')\n expected = pa.array(expected, from_pandas=True)\n assert expected.equals(result)\n\n def test_date_objects_typed(self):\n arr = np.array([\n date(2017, 4, 3),\n None,\n date(2017, 4, 4),\n date(2017, 4, 5)], dtype=object)\n\n arr_i4 = np.array([17259, -1, 17260, 17261], dtype='int32')\n arr_i8 = arr_i4.astype('int64') * 86400000\n mask = np.array([False, True, False, False])\n\n t32 = pa.date32()\n t64 = pa.date64()\n\n a32 = pa.array(arr, type=t32)\n a64 = pa.array(arr, type=t64)\n\n a32_expected = pa.array(arr_i4, mask=mask, type=t32)\n a64_expected = pa.array(arr_i8, mask=mask, type=t64)\n\n assert a32.equals(a32_expected)\n assert a64.equals(a64_expected)\n\n # Test converting back to pandas\n colnames = ['date32', 'date64']\n table = pa.Table.from_arrays([a32, a64], colnames)\n table_pandas = table.to_pandas()\n\n ex_values = (np.array(['2017-04-03', '2017-04-04', '2017-04-04',\n '2017-04-05'],\n dtype='datetime64[D]')\n .astype('datetime64[ns]'))\n ex_values[1] = pd.NaT.value\n expected_pandas = pd.DataFrame({'date32': ex_values,\n 'date64': ex_values},\n columns=colnames)\n tm.assert_frame_equal(table_pandas, expected_pandas)\n\n def test_dates_from_integers(self):\n t1 = pa.date32()\n t2 = pa.date64()\n\n arr = np.array([17259, 17260, 17261], dtype='int32')\n arr2 = arr.astype('int64') * 86400000\n\n a1 = pa.array(arr, type=t1)\n a2 = pa.array(arr2, type=t2)\n\n expected = date(2017, 4, 3)\n assert a1[0].as_py() == expected\n assert a2[0].as_py() == expected\n\n @pytest.mark.xfail(reason=\"not supported ATM\",\n raises=NotImplementedError)\n def test_timedelta(self):\n # TODO(jreback): Pandas only support ns resolution\n # Arrow supports ??? for resolution\n df = pd.DataFrame({\n 'timedelta': np.arange(start=0, stop=3 * 86400000,\n step=86400000,\n dtype='timedelta64[ms]')\n })\n pa.Table.from_pandas(df)\n\n def test_pytime_from_pandas(self):\n pytimes = [time(1, 2, 3, 1356),\n time(4, 5, 6, 1356)]\n\n # microseconds\n t1 = pa.time64('us')\n\n aobjs = np.array(pytimes + [None], dtype=object)\n parr = pa.array(aobjs)\n assert parr.type == t1\n assert parr[0].as_py() == pytimes[0]\n assert parr[1].as_py() == pytimes[1]\n assert parr[2] is pa.NA\n\n # DataFrame\n df = pd.DataFrame({'times': aobjs})\n batch = pa.RecordBatch.from_pandas(df)\n assert batch[0].equals(parr)\n\n # Test ndarray of int64 values\n arr = np.array([_pytime_to_micros(v) for v in pytimes],\n dtype='int64')\n\n a1 = pa.array(arr, type=pa.time64('us'))\n assert a1[0].as_py() == pytimes[0]\n\n a2 = pa.array(arr * 1000, type=pa.time64('ns'))\n assert a2[0].as_py() == pytimes[0]\n\n a3 = pa.array((arr / 1000).astype('i4'),\n type=pa.time32('ms'))\n assert a3[0].as_py() == pytimes[0].replace(microsecond=1000)\n\n a4 = pa.array((arr / 1000000).astype('i4'),\n type=pa.time32('s'))\n assert a4[0].as_py() == pytimes[0].replace(microsecond=0)\n\n def test_arrow_time_to_pandas(self):\n pytimes = [time(1, 2, 3, 1356),\n time(4, 5, 6, 1356),\n time(0, 0, 0)]\n\n expected = np.array(pytimes[:2] + [None])\n expected_ms = np.array([x.replace(microsecond=1000)\n for x in pytimes[:2]] +\n [None])\n expected_s = np.array([x.replace(microsecond=0)\n for x in pytimes[:2]] +\n [None])\n\n arr = np.array([_pytime_to_micros(v) for v in pytimes],\n dtype='int64')\n arr = np.array([_pytime_to_micros(v) for v in pytimes],\n dtype='int64')\n\n null_mask = np.array([False, False, True], dtype=bool)\n\n a1 = pa.array(arr, mask=null_mask, type=pa.time64('us'))\n a2 = pa.array(arr * 1000, mask=null_mask,\n type=pa.time64('ns'))\n\n a3 = pa.array((arr / 1000).astype('i4'), mask=null_mask,\n type=pa.time32('ms'))\n a4 = pa.array((arr / 1000000).astype('i4'), mask=null_mask,\n type=pa.time32('s'))\n\n names = ['time64[us]', 'time64[ns]', 'time32[ms]', 'time32[s]']\n batch = pa.RecordBatch.from_arrays([a1, a2, a3, a4], names)\n arr = a1.to_pandas()\n assert (arr == expected).all()\n\n arr = a2.to_pandas()\n assert (arr == expected).all()\n\n arr = a3.to_pandas()\n assert (arr == expected_ms).all()\n\n arr = a4.to_pandas()\n assert (arr == expected_s).all()\n\n df = batch.to_pandas()\n expected_df = pd.DataFrame({'time64[us]': expected,\n 'time64[ns]': expected,\n 'time32[ms]': expected_ms,\n 'time32[s]': expected_s},\n columns=names)\n\n tm.assert_frame_equal(df, expected_df)\n\n def test_numpy_datetime64_columns(self):\n datetime64_ns = np.array([\n '2007-07-13T01:23:34.123456789',\n None,\n '2006-01-13T12:34:56.432539784',\n '2010-08-13T05:46:57.437699912'],\n dtype='datetime64[ns]')\n _check_array_from_pandas_roundtrip(datetime64_ns)\n\n datetime64_us = np.array([\n '2007-07-13T01:23:34.123456',\n None,\n '2006-01-13T12:34:56.432539',\n '2010-08-13T05:46:57.437699'],\n dtype='datetime64[us]')\n _check_array_from_pandas_roundtrip(datetime64_us)\n\n datetime64_ms = np.array([\n '2007-07-13T01:23:34.123',\n None,\n '2006-01-13T12:34:56.432',\n '2010-08-13T05:46:57.437'],\n dtype='datetime64[ms]')\n _check_array_from_pandas_roundtrip(datetime64_ms)\n\n datetime64_s = np.array([\n '2007-07-13T01:23:34',\n None,\n '2006-01-13T12:34:56',\n '2010-08-13T05:46:57'],\n dtype='datetime64[s]')\n _check_array_from_pandas_roundtrip(datetime64_s)\n\n def test_numpy_datetime64_day_unit(self):\n datetime64_d = np.array([\n '2007-07-13',\n None,\n '2006-01-15',\n '2010-08-19'],\n dtype='datetime64[D]')\n _check_array_from_pandas_roundtrip(datetime64_d)\n\n def test_array_from_pandas_date_with_mask(self):\n m = np.array([True, False, True])\n data = pd.Series([\n date(1990, 1, 1),\n date(1991, 1, 1),\n date(1992, 1, 1)\n ])\n\n result = pa.Array.from_pandas(data, mask=m)\n\n expected = pd.Series([None, date(1991, 1, 1), None])\n assert pa.Array.from_pandas(expected).equals(result)\n\n def test_fixed_offset_timezone(self):\n df = pd.DataFrame({\n 'a': [\n pd.Timestamp('2012-11-11 00:00:00+01:00'),\n pd.NaT\n ]\n })\n _check_pandas_roundtrip(df)\n _check_serialize_components_roundtrip(df)\n\n\nclass TestConvertStringLikeTypes(object):\n \"\"\"\n Conversion tests for string and binary types.\n \"\"\"\n\n def test_unicode(self):\n repeats = 1000\n values = [u'foo', None, u'bar', u'mañana', np.nan]\n df = pd.DataFrame({'strings': values * repeats})\n field = pa.field('strings', pa.string())\n schema = pa.schema([field])\n\n _check_pandas_roundtrip(df, expected_schema=schema)\n\n def test_bytes_to_binary(self):\n values = [u'qux', b'foo', None, bytearray(b'barz'), 'qux', np.nan]\n df = pd.DataFrame({'strings': values})\n\n table = pa.Table.from_pandas(df)\n assert table[0].type == pa.binary()\n\n values2 = [b'qux', b'foo', None, b'barz', b'qux', np.nan]\n expected = pd.DataFrame({'strings': values2})\n _check_pandas_roundtrip(df, expected)\n\n @pytest.mark.large_memory\n def test_bytes_exceed_2gb(self):\n v1 = b'x' * 100000000\n v2 = b'x' * 147483646\n\n # ARROW-2227, hit exactly 2GB on the nose\n df = pd.DataFrame({\n 'strings': [v1] * 20 + [v2] + ['x'] * 20\n })\n arr = pa.array(df['strings'])\n assert isinstance(arr, pa.ChunkedArray)\n assert arr.num_chunks == 2\n arr = None\n\n table = pa.Table.from_pandas(df)\n assert table[0].data.num_chunks == 2\n\n def test_fixed_size_bytes(self):\n values = [b'foo', None, bytearray(b'bar'), None, None, b'hey']\n df = pd.DataFrame({'strings': values})\n schema = pa.schema([pa.field('strings', pa.binary(3))])\n table = pa.Table.from_pandas(df, schema=schema)\n assert table.schema[0].type == schema[0].type\n assert table.schema[0].name == schema[0].name\n result = table.to_pandas()\n tm.assert_frame_equal(result, df)\n\n def test_fixed_size_bytes_does_not_accept_varying_lengths(self):\n values = [b'foo', None, b'ba', None, None, b'hey']\n df = pd.DataFrame({'strings': values})\n schema = pa.schema([pa.field('strings', pa.binary(3))])\n with pytest.raises(pa.ArrowInvalid):\n pa.Table.from_pandas(df, schema=schema)\n\n def test_variable_size_bytes(self):\n s = pd.Series([b'123', b'', b'a', None])\n _check_series_roundtrip(s, type_=pa.binary())\n\n def test_binary_from_bytearray(self):\n s = pd.Series([bytearray(b'123'), bytearray(b''), bytearray(b'a'),\n None])\n # Explicitly set type\n _check_series_roundtrip(s, type_=pa.binary())\n # Infer type from bytearrays\n _check_series_roundtrip(s, expected_pa_type=pa.binary())\n\n def test_table_empty_str(self):\n values = ['', '', '', '', '']\n df = pd.DataFrame({'strings': values})\n field = pa.field('strings', pa.string())\n schema = pa.schema([field])\n table = pa.Table.from_pandas(df, schema=schema)\n\n result1 = table.to_pandas(strings_to_categorical=False)\n expected1 = pd.DataFrame({'strings': values})\n tm.assert_frame_equal(result1, expected1, check_dtype=True)\n\n result2 = table.to_pandas(strings_to_categorical=True)\n expected2 = pd.DataFrame({'strings': pd.Categorical(values)})\n tm.assert_frame_equal(result2, expected2, check_dtype=True)\n\n def test_selective_categoricals(self):\n values = ['', '', '', '', '']\n df = pd.DataFrame({'strings': values})\n field = pa.field('strings', pa.string())\n schema = pa.schema([field])\n table = pa.Table.from_pandas(df, schema=schema)\n expected_str = pd.DataFrame({'strings': values})\n expected_cat = pd.DataFrame({'strings': pd.Categorical(values)})\n\n result1 = table.to_pandas(categories=['strings'])\n tm.assert_frame_equal(result1, expected_cat, check_dtype=True)\n result2 = table.to_pandas(categories=[])\n tm.assert_frame_equal(result2, expected_str, check_dtype=True)\n result3 = table.to_pandas(categories=('strings',))\n tm.assert_frame_equal(result3, expected_cat, check_dtype=True)\n result4 = table.to_pandas(categories=tuple())\n tm.assert_frame_equal(result4, expected_str, check_dtype=True)\n\n def test_table_str_to_categorical_without_na(self):\n values = ['a', 'a', 'b', 'b', 'c']\n df = pd.DataFrame({'strings': values})\n field = pa.field('strings', pa.string())\n schema = pa.schema([field])\n table = pa.Table.from_pandas(df, schema=schema)\n\n result = table.to_pandas(strings_to_categorical=True)\n expected = pd.DataFrame({'strings': pd.Categorical(values)})\n tm.assert_frame_equal(result, expected, check_dtype=True)\n\n with pytest.raises(pa.ArrowInvalid):\n table.to_pandas(strings_to_categorical=True,\n zero_copy_only=True)\n\n def test_table_str_to_categorical_with_na(self):\n values = [None, 'a', 'b', np.nan]\n df = pd.DataFrame({'strings': values})\n field = pa.field('strings', pa.string())\n schema = pa.schema([field])\n table = pa.Table.from_pandas(df, schema=schema)\n\n result = table.to_pandas(strings_to_categorical=True)\n expected = pd.DataFrame({'strings': pd.Categorical(values)})\n tm.assert_frame_equal(result, expected, check_dtype=True)\n\n with pytest.raises(pa.ArrowInvalid):\n table.to_pandas(strings_to_categorical=True,\n zero_copy_only=True)\n\n # Regression test for ARROW-2101\n def test_array_of_bytes_to_strings(self):\n converted = pa.array(np.array([b'x'], dtype=object), pa.string())\n assert converted.type == pa.string()\n\n # Make sure that if an ndarray of bytes is passed to the array\n # constructor and the type is string, it will fail if those bytes\n # cannot be converted to utf-8\n def test_array_of_bytes_to_strings_bad_data(self):\n with pytest.raises(\n pa.lib.ArrowInvalid,\n match=(\"'(utf8|utf-8)' codec can't decode byte 0x80 \"\n \"in position 0: invalid start byte\")):\n pa.array(np.array([b'\\x80\\x81'], dtype=object), pa.string())\n\n def test_numpy_string_array_to_fixed_size_binary(self):\n arr = np.array([b'foo', b'bar', b'baz'], dtype='|S3')\n\n converted = pa.array(arr, type=pa.binary(3))\n expected = pa.array(list(arr), type=pa.binary(3))\n assert converted.equals(expected)\n\n mask = np.array([True, False, True])\n converted = pa.array(arr, type=pa.binary(3), mask=mask)\n expected = pa.array([b'foo', None, b'baz'], type=pa.binary(3))\n assert converted.equals(expected)\n\n with pytest.raises(pa.lib.ArrowInvalid,\n match='Got bytestring of length 3 \\(expected 4\\)'):\n arr = np.array([b'foo', b'bar', b'baz'], dtype='|S3')\n pa.array(arr, type=pa.binary(4))\n\n with pytest.raises(pa.lib.ArrowInvalid,\n match='Got bytestring of length 12 \\(expected 3\\)'):\n arr = np.array([b'foo', b'bar', b'baz'], dtype='|U3')\n pa.array(arr, type=pa.binary(3))\n\n\nclass TestConvertDecimalTypes(object):\n \"\"\"\n Conversion test for decimal types.\n \"\"\"\n decimal32 = [\n decimal.Decimal('-1234.123'),\n decimal.Decimal('1234.439')\n ]\n decimal64 = [\n decimal.Decimal('-129934.123331'),\n decimal.Decimal('129534.123731')\n ]\n decimal128 = [\n decimal.Decimal('394092382910493.12341234678'),\n decimal.Decimal('-314292388910493.12343437128')\n ]\n\n @pytest.mark.parametrize(('values', 'expected_type'), [\n pytest.param(decimal32, pa.decimal128(7, 3), id='decimal32'),\n pytest.param(decimal64, pa.decimal128(12, 6), id='decimal64'),\n pytest.param(decimal128, pa.decimal128(26, 11), id='decimal128')\n ])\n def test_decimal_from_pandas(self, values, expected_type):\n expected = pd.DataFrame({'decimals': values})\n table = pa.Table.from_pandas(expected, preserve_index=False)\n field = pa.field('decimals', expected_type)\n\n # schema's metadata is generated by from_pandas conversion\n expected_schema = pa.schema([field], metadata=table.schema.metadata)\n assert table.schema.equals(expected_schema)\n\n @pytest.mark.parametrize('values', [\n pytest.param(decimal32, id='decimal32'),\n pytest.param(decimal64, id='decimal64'),\n pytest.param(decimal128, id='decimal128')\n ])\n def test_decimal_to_pandas(self, values):\n expected = pd.DataFrame({'decimals': values})\n converted = pa.Table.from_pandas(expected)\n df = converted.to_pandas()\n tm.assert_frame_equal(df, expected)\n\n def test_decimal_fails_with_truncation(self):\n data1 = [decimal.Decimal('1.234')]\n type1 = pa.decimal128(10, 2)\n with pytest.raises(pa.ArrowInvalid):\n pa.array(data1, type=type1)\n\n data2 = [decimal.Decimal('1.2345')]\n type2 = pa.decimal128(10, 3)\n with pytest.raises(pa.ArrowInvalid):\n pa.array(data2, type=type2)\n\n def test_decimal_with_different_precisions(self):\n data = [\n decimal.Decimal('0.01'),\n decimal.Decimal('0.001'),\n ]\n series = pd.Series(data)\n array = pa.array(series)\n assert array.to_pylist() == data\n assert array.type == pa.decimal128(3, 3)\n\n array = pa.array(data, type=pa.decimal128(12, 5))\n expected = [decimal.Decimal('0.01000'), decimal.Decimal('0.00100')]\n assert array.to_pylist() == expected\n\n def test_decimal_with_None_explicit_type(self):\n series = pd.Series([decimal.Decimal('3.14'), None])\n _check_series_roundtrip(series, type_=pa.decimal128(12, 5))\n\n # Test that having all None values still produces decimal array\n series = pd.Series([None] * 2)\n _check_series_roundtrip(series, type_=pa.decimal128(12, 5))\n\n def test_decimal_with_None_infer_type(self):\n series = pd.Series([decimal.Decimal('3.14'), None])\n _check_series_roundtrip(series, expected_pa_type=pa.decimal128(3, 2))\n\n\nclass TestListTypes(object):\n \"\"\"\n Conversion tests for list<> types.\n \"\"\"\n\n def test_column_of_arrays(self):\n df, schema = dataframe_with_arrays()\n _check_pandas_roundtrip(df, schema=schema, expected_schema=schema)\n table = pa.Table.from_pandas(df, schema=schema, preserve_index=False)\n\n # schema's metadata is generated by from_pandas conversion\n expected_schema = schema.add_metadata(table.schema.metadata)\n assert table.schema.equals(expected_schema)\n\n for column in df.columns:\n field = schema.field_by_name(column)\n _check_array_roundtrip(df[column], type=field.type)\n\n def test_column_of_arrays_to_py(self):\n # Test regression in ARROW-1199 not caught in above test\n dtype = 'i1'\n arr = np.array([\n np.arange(10, dtype=dtype),\n np.arange(5, dtype=dtype),\n None,\n np.arange(1, dtype=dtype)\n ])\n type_ = pa.list_(pa.int8())\n parr = pa.array(arr, type=type_)\n\n assert parr[0].as_py() == list(range(10))\n assert parr[1].as_py() == list(range(5))\n assert parr[2].as_py() is None\n assert parr[3].as_py() == [0]\n\n def test_column_of_lists(self):\n df, schema = dataframe_with_lists()\n _check_pandas_roundtrip(df, schema=schema, expected_schema=schema)\n table = pa.Table.from_pandas(df, schema=schema, preserve_index=False)\n\n # schema's metadata is generated by from_pandas conversion\n expected_schema = schema.add_metadata(table.schema.metadata)\n assert table.schema.equals(expected_schema)\n\n for column in df.columns:\n field = schema.field_by_name(column)\n _check_array_roundtrip(df[column], type=field.type)\n\n def test_column_of_lists_first_empty(self):\n # ARROW-2124\n num_lists = [[], [2, 3, 4], [3, 6, 7, 8], [], [2]]\n series = pd.Series([np.array(s, dtype=float) for s in num_lists])\n arr = pa.array(series)\n result = pd.Series(arr.to_pandas())\n tm.assert_series_equal(result, series)\n\n def test_column_of_lists_chunked(self):\n # ARROW-1357\n df = pd.DataFrame({\n 'lists': np.array([\n [1, 2],\n None,\n [2, 3],\n [4, 5],\n [6, 7],\n [8, 9]\n ], dtype=object)\n })\n\n schema = pa.schema([\n pa.field('lists', pa.list_(pa.int64()))\n ])\n\n t1 = pa.Table.from_pandas(df[:2], schema=schema)\n t2 = pa.Table.from_pandas(df[2:], schema=schema)\n\n table = pa.concat_tables([t1, t2])\n result = table.to_pandas()\n\n tm.assert_frame_equal(result, df)\n\n def test_column_of_lists_chunked2(self):\n data1 = [[0, 1], [2, 3], [4, 5], [6, 7], [10, 11],\n [12, 13], [14, 15], [16, 17]]\n data2 = [[8, 9], [18, 19]]\n\n a1 = pa.array(data1)\n a2 = pa.array(data2)\n\n t1 = pa.Table.from_arrays([a1], names=['a'])\n t2 = pa.Table.from_arrays([a2], names=['a'])\n\n concatenated = pa.concat_tables([t1, t2])\n\n result = concatenated.to_pandas()\n expected = pd.DataFrame({'a': data1 + data2})\n\n tm.assert_frame_equal(result, expected)\n\n def test_column_of_lists_strided(self):\n df, schema = dataframe_with_lists()\n df = pd.concat([df] * 6, ignore_index=True)\n\n arr = df['int64'].values[::3]\n assert arr.strides[0] != 8\n\n _check_array_roundtrip(arr)\n\n def test_nested_lists_all_none(self):\n data = np.array([[None, None], None], dtype=object)\n\n arr = pa.array(data)\n expected = pa.array(list(data))\n assert arr.equals(expected)\n assert arr.type == pa.list_(pa.null())\n\n data2 = np.array([None, None, [None, None],\n np.array([None, None], dtype=object)],\n dtype=object)\n arr = pa.array(data2)\n expected = pa.array([None, None, [None, None], [None, None]])\n assert arr.equals(expected)\n\n def test_nested_lists_all_empty(self):\n # ARROW-2128\n data = pd.Series([[], [], []])\n arr = pa.array(data)\n expected = pa.array(list(data))\n assert arr.equals(expected)\n assert arr.type == pa.list_(pa.null())\n\n def test_nested_smaller_ints(self):\n # ARROW-1345, ARROW-2008, there were some type inference bugs happening\n # before\n data = pd.Series([np.array([1, 2, 3], dtype='i1'), None])\n result = pa.array(data)\n result2 = pa.array(data.values)\n expected = pa.array([[1, 2, 3], None], type=pa.list_(pa.int8()))\n assert result.equals(expected)\n assert result2.equals(expected)\n\n data3 = pd.Series([np.array([1, 2, 3], dtype='f4'), None])\n result3 = pa.array(data3)\n expected3 = pa.array([[1, 2, 3], None], type=pa.list_(pa.float32()))\n assert result3.equals(expected3)\n\n def test_infer_lists(self):\n data = OrderedDict([\n ('nan_ints', [[None, 1], [2, 3]]),\n ('ints', [[0, 1], [2, 3]]),\n ('strs', [[None, u'b'], [u'c', u'd']]),\n ('nested_strs', [[[None, u'b'], [u'c', u'd']], None])\n ])\n df = pd.DataFrame(data)\n\n expected_schema = pa.schema([\n pa.field('nan_ints', pa.list_(pa.int64())),\n pa.field('ints', pa.list_(pa.int64())),\n pa.field('strs', pa.list_(pa.string())),\n pa.field('nested_strs', pa.list_(pa.list_(pa.string())))\n ])\n\n _check_pandas_roundtrip(df, expected_schema=expected_schema)\n\n def test_infer_numpy_array(self):\n data = OrderedDict([\n ('ints', [\n np.array([0, 1], dtype=np.int64),\n np.array([2, 3], dtype=np.int64)\n ])\n ])\n df = pd.DataFrame(data)\n expected_schema = pa.schema([\n pa.field('ints', pa.list_(pa.int64()))\n ])\n\n _check_pandas_roundtrip(df, expected_schema=expected_schema)\n\n @pytest.mark.parametrize('t,data,expected', [\n (\n pa.int64,\n [[1, 2], [3], None],\n [None, [3], None]\n ),\n (\n pa.string,\n [[u'aaa', u'bb'], [u'c'], None],\n [None, [u'c'], None]\n ),\n (\n pa.null,\n [[None, None], [None], None],\n [None, [None], None]\n )\n ])\n def test_array_from_pandas_typed_array_with_mask(self, t, data, expected):\n m = np.array([True, False, True])\n\n s = pd.Series(data)\n result = pa.Array.from_pandas(s, mask=m, type=pa.list_(t()))\n\n assert pa.Array.from_pandas(expected,\n type=pa.list_(t())).equals(result)\n\n def test_empty_list_roundtrip(self):\n empty_list_array = np.empty((3,), dtype=object)\n empty_list_array.fill([])\n\n df = pd.DataFrame({'a': np.array(['1', '2', '3']),\n 'b': empty_list_array})\n tbl = pa.Table.from_pandas(df)\n\n result = tbl.to_pandas()\n\n tm.assert_frame_equal(result, df)\n\n def test_array_from_nested_arrays(self):\n df, schema = dataframe_with_arrays()\n for field in schema:\n arr = df[field.name].values\n expected = pa.array(list(arr), type=field.type)\n result = pa.array(arr)\n assert result.type == field.type # == list<scalar>\n assert result.equals(expected)\n\n\nclass TestConvertStructTypes(object):\n \"\"\"\n Conversion tests for struct types.\n \"\"\"\n\n def test_to_pandas(self):\n ints = pa.array([None, 2, 3], type=pa.int64())\n strs = pa.array([u'a', None, u'c'], type=pa.string())\n bools = pa.array([True, False, None], type=pa.bool_())\n arr = pa.StructArray.from_arrays(\n [ints, strs, bools],\n ['ints', 'strs', 'bools'])\n\n expected = pd.Series([\n {'ints': None, 'strs': u'a', 'bools': True},\n {'ints': 2, 'strs': None, 'bools': False},\n {'ints': 3, 'strs': u'c', 'bools': None},\n ])\n\n series = pd.Series(arr.to_pandas())\n tm.assert_series_equal(series, expected)\n\n def test_from_numpy(self):\n dt = np.dtype([('x', np.int32),\n (('y_title', 'y'), np.bool_)])\n ty = pa.struct([pa.field('x', pa.int32()),\n pa.field('y', pa.bool_())])\n\n data = np.array([], dtype=dt)\n arr = pa.array(data, type=ty)\n assert arr.to_pylist() == []\n\n data = np.array([(42, True), (43, False)], dtype=dt)\n arr = pa.array(data, type=ty)\n assert arr.to_pylist() == [{'x': 42, 'y': True},\n {'x': 43, 'y': False}]\n\n # With mask\n arr = pa.array(data, mask=np.bool_([False, True]), type=ty)\n assert arr.to_pylist() == [{'x': 42, 'y': True}, None]\n\n # Trivial struct type\n dt = np.dtype([])\n ty = pa.struct([])\n\n data = np.array([], dtype=dt)\n arr = pa.array(data, type=ty)\n assert arr.to_pylist() == []\n\n data = np.array([(), ()], dtype=dt)\n arr = pa.array(data, type=ty)\n assert arr.to_pylist() == [{}, {}]\n\n def test_from_numpy_nested(self):\n dt = np.dtype([('x', np.dtype([('xx', np.int8),\n ('yy', np.bool_)])),\n ('y', np.int16)])\n ty = pa.struct([pa.field('x', pa.struct([pa.field('xx', pa.int8()),\n pa.field('yy', pa.bool_())])),\n pa.field('y', pa.int16())])\n\n data = np.array([], dtype=dt)\n arr = pa.array(data, type=ty)\n assert arr.to_pylist() == []\n\n data = np.array([((1, True), 2), ((3, False), 4)], dtype=dt)\n arr = pa.array(data, type=ty)\n assert arr.to_pylist() == [{'x': {'xx': 1, 'yy': True}, 'y': 2},\n {'x': {'xx': 3, 'yy': False}, 'y': 4}]\n\n @pytest.mark.large_memory\n def test_from_numpy_large(self):\n # Exercise rechunking + nulls\n target_size = 3 * 1024**3 # 4GB\n dt = np.dtype([('x', np.float64), ('y', 'object')])\n bs = 65536 - dt.itemsize\n block = b'.' * bs\n n = target_size // (bs + dt.itemsize)\n data = np.zeros(n, dtype=dt)\n data['x'] = np.random.random_sample(n)\n data['y'] = block\n # Add implicit nulls\n data['x'][data['x'] < 0.2] = np.nan\n\n ty = pa.struct([pa.field('x', pa.float64()),\n pa.field('y', pa.binary(bs))])\n arr = pa.array(data, type=ty, from_pandas=True)\n assert arr.num_chunks == 2\n\n def iter_chunked_array(arr):\n for chunk in arr.iterchunks():\n for item in chunk:\n yield item\n\n def check(arr, data, mask=None):\n assert len(arr) == len(data)\n xs = data['x']\n ys = data['y']\n for i, obj in enumerate(iter_chunked_array(arr)):\n try:\n d = obj.as_py()\n if mask is not None and mask[i]:\n assert d is None\n else:\n x = xs[i]\n if np.isnan(x):\n assert d['x'] is None\n else:\n assert d['x'] == x\n assert d['y'] == ys[i]\n except Exception:\n print(\"Failed at index\", i)\n raise\n\n check(arr, data)\n del arr\n\n # Now with explicit mask\n mask = np.random.random_sample(n) < 0.2\n arr = pa.array(data, type=ty, mask=mask, from_pandas=True)\n assert arr.num_chunks == 2\n\n check(arr, data, mask)\n del arr\n\n def test_from_numpy_bad_input(self):\n ty = pa.struct([pa.field('x', pa.int32()),\n pa.field('y', pa.bool_())])\n dt = np.dtype([('x', np.int32),\n ('z', np.bool_)])\n\n data = np.array([], dtype=dt)\n with pytest.raises(TypeError,\n match=\"Missing field 'y'\"):\n pa.array(data, type=ty)\n data = np.int32([])\n with pytest.raises(TypeError,\n match=\"Expected struct array\"):\n pa.array(data, type=ty)\n\n\nclass TestZeroCopyConversion(object):\n \"\"\"\n Tests that zero-copy conversion works with some types.\n \"\"\"\n\n def test_zero_copy_success(self):\n result = pa.array([0, 1, 2]).to_pandas(zero_copy_only=True)\n npt.assert_array_equal(result, [0, 1, 2])\n\n def test_zero_copy_dictionaries(self):\n arr = pa.DictionaryArray.from_arrays(\n np.array([0, 0]),\n np.array([5]))\n\n result = arr.to_pandas(zero_copy_only=True)\n values = pd.Categorical([5, 5])\n\n tm.assert_series_equal(pd.Series(result), pd.Series(values),\n check_names=False)\n\n def check_zero_copy_failure(self, arr):\n with pytest.raises(pa.ArrowInvalid):\n arr.to_pandas(zero_copy_only=True)\n\n def test_zero_copy_failure_on_object_types(self):\n self.check_zero_copy_failure(pa.array(['A', 'B', 'C']))\n\n def test_zero_copy_failure_with_int_when_nulls(self):\n self.check_zero_copy_failure(pa.array([0, 1, None]))\n\n def test_zero_copy_failure_with_float_when_nulls(self):\n self.check_zero_copy_failure(pa.array([0.0, 1.0, None]))\n\n def test_zero_copy_failure_on_bool_types(self):\n self.check_zero_copy_failure(pa.array([True, False]))\n\n def test_zero_copy_failure_on_list_types(self):\n arr = pa.array([[1, 2], [8, 9]], type=pa.list_(pa.int64()))\n self.check_zero_copy_failure(arr)\n\n def test_zero_copy_failure_on_timestamp_types(self):\n arr = np.array(['2007-07-13'], dtype='datetime64[ns]')\n self.check_zero_copy_failure(pa.array(arr))\n\n\nclass TestConvertMisc(object):\n \"\"\"\n Miscellaneous conversion tests.\n \"\"\"\n\n type_pairs = [\n (np.int8, pa.int8()),\n (np.int16, pa.int16()),\n (np.int32, pa.int32()),\n (np.int64, pa.int64()),\n (np.uint8, pa.uint8()),\n (np.uint16, pa.uint16()),\n (np.uint32, pa.uint32()),\n (np.uint64, pa.uint64()),\n (np.float16, pa.float16()),\n (np.float32, pa.float32()),\n (np.float64, pa.float64()),\n # XXX unsupported\n # (np.dtype([('a', 'i2')]), pa.struct([pa.field('a', pa.int16())])),\n (np.object, pa.string()),\n (np.object, pa.binary()),\n (np.object, pa.binary(10)),\n (np.object, pa.list_(pa.int64())),\n ]\n\n def test_all_none_objects(self):\n df = pd.DataFrame({'a': [None, None, None]})\n _check_pandas_roundtrip(df)\n\n def test_all_none_category(self):\n df = pd.DataFrame({'a': [None, None, None]})\n df['a'] = df['a'].astype('category')\n _check_pandas_roundtrip(df)\n\n def test_empty_arrays(self):\n for dtype, pa_type in self.type_pairs:\n arr = np.array([], dtype=dtype)\n _check_array_roundtrip(arr, type=pa_type)\n\n def test_threaded_conversion(self):\n df = _alltypes_example()\n _check_pandas_roundtrip(df, use_threads=True)\n _check_pandas_roundtrip(df, use_threads=True, as_batch=True)\n\n def test_category(self):\n repeats = 5\n v1 = ['foo', None, 'bar', 'qux', np.nan]\n v2 = [4, 5, 6, 7, 8]\n v3 = [b'foo', None, b'bar', b'qux', np.nan]\n df = pd.DataFrame({'cat_strings': pd.Categorical(v1 * repeats),\n 'cat_ints': pd.Categorical(v2 * repeats),\n 'cat_binary': pd.Categorical(v3 * repeats),\n 'cat_strings_ordered': pd.Categorical(\n v1 * repeats, categories=['bar', 'qux', 'foo'],\n ordered=True),\n 'ints': v2 * repeats,\n 'ints2': v2 * repeats,\n 'strings': v1 * repeats,\n 'strings2': v1 * repeats,\n 'strings3': v3 * repeats})\n _check_pandas_roundtrip(df)\n\n arrays = [\n pd.Categorical(v1 * repeats),\n pd.Categorical(v2 * repeats),\n pd.Categorical(v3 * repeats)\n ]\n for values in arrays:\n _check_array_roundtrip(values)\n\n def test_empty_category(self):\n # ARROW-2443\n df = pd.DataFrame({'cat': pd.Categorical([])})\n _check_pandas_roundtrip(df)\n\n def test_mixed_types_fails(self):\n data = pd.DataFrame({'a': ['a', 1, 2.0]})\n with pytest.raises(pa.ArrowTypeError):\n pa.Table.from_pandas(data)\n\n data = pd.DataFrame({'a': [1, True]})\n with pytest.raises(pa.ArrowTypeError):\n pa.Table.from_pandas(data)\n\n def test_strided_data_import(self):\n cases = []\n\n columns = ['a', 'b', 'c']\n N, K = 100, 3\n random_numbers = np.random.randn(N, K).copy() * 100\n\n numeric_dtypes = ['i1', 'i2', 'i4', 'i8', 'u1', 'u2', 'u4', 'u8',\n 'f4', 'f8']\n\n for type_name in numeric_dtypes:\n cases.append(random_numbers.astype(type_name))\n\n # strings\n cases.append(np.array([tm.rands(10) for i in range(N * K)],\n dtype=object)\n .reshape(N, K).copy())\n\n # booleans\n boolean_objects = (np.array([True, False, True] * N, dtype=object)\n .reshape(N, K).copy())\n\n # add some nulls, so dtype comes back as objects\n boolean_objects[5] = None\n cases.append(boolean_objects)\n\n cases.append(np.arange(\"2016-01-01T00:00:00.001\", N * K,\n dtype='datetime64[ms]')\n .reshape(N, K).copy())\n\n strided_mask = (random_numbers > 0).astype(bool)[:, 0]\n\n for case in cases:\n df = pd.DataFrame(case, columns=columns)\n col = df['a']\n\n _check_pandas_roundtrip(df)\n _check_array_roundtrip(col)\n _check_array_roundtrip(col, mask=strided_mask)\n\n def test_all_nones(self):\n def _check_series(s):\n converted = pa.array(s)\n assert isinstance(converted, pa.NullArray)\n assert len(converted) == 3\n assert converted.null_count == 3\n assert converted[0] is pa.NA\n\n _check_series(pd.Series([None] * 3, dtype=object))\n _check_series(pd.Series([np.nan] * 3, dtype=object))\n _check_series(pd.Series([np.sqrt(-1)] * 3, dtype=object))\n\n def test_partial_schema(self):\n data = OrderedDict([\n ('a', [0, 1, 2, 3, 4]),\n ('b', np.array([-10, -5, 0, 5, 10], dtype=np.int32)),\n ('c', [-10, -5, 0, 5, 10])\n ])\n df = pd.DataFrame(data)\n\n partial_schema = pa.schema([\n pa.field('a', pa.int64()),\n pa.field('b', pa.int32())\n ])\n\n expected_schema = pa.schema([\n pa.field('a', pa.int64()),\n pa.field('b', pa.int32()),\n pa.field('c', pa.int64())\n ])\n\n _check_pandas_roundtrip(df, schema=partial_schema,\n expected_schema=expected_schema)\n\n def test_table_batch_empty_dataframe(self):\n df = pd.DataFrame({})\n _check_pandas_roundtrip(df)\n _check_pandas_roundtrip(df, as_batch=True)\n\n df2 = pd.DataFrame({}, index=[0, 1, 2])\n _check_pandas_roundtrip(df2, preserve_index=True)\n _check_pandas_roundtrip(df2, as_batch=True, preserve_index=True)\n\n def test_convert_empty_table(self):\n arr = pa.array([], type=pa.int64())\n tm.assert_almost_equal(arr.to_pandas(), np.array([], dtype=np.int64))\n arr = pa.array([], type=pa.string())\n tm.assert_almost_equal(arr.to_pandas(), np.array([], dtype=object))\n arr = pa.array([], type=pa.list_(pa.int64()))\n tm.assert_almost_equal(arr.to_pandas(), np.array([], dtype=object))\n arr = pa.array([], type=pa.struct([pa.field('a', pa.int64())]))\n tm.assert_almost_equal(arr.to_pandas(), np.array([], dtype=object))\n\n def test_non_natural_stride(self):\n \"\"\"\n ARROW-2172: converting from a Numpy array with a stride that's\n not a multiple of itemsize.\n \"\"\"\n dtype = np.dtype([('x', np.int32), ('y', np.int16)])\n data = np.array([(42, -1), (-43, 2)], dtype=dtype)\n assert data.strides == (6,)\n arr = pa.array(data['x'], type=pa.int32())\n assert arr.to_pylist() == [42, -43]\n arr = pa.array(data['y'], type=pa.int16())\n assert arr.to_pylist() == [-1, 2]\n\n def test_mixed_integer_columns(self):\n row = [[], []]\n df = pd.DataFrame(data=[row], columns=['foo', 123])\n expected_df = pd.DataFrame(data=[row], columns=['foo', '123'])\n _check_pandas_roundtrip(df, expected=expected_df, preserve_index=True)\n\n\ndef _fully_loaded_dataframe_example():\n from distutils.version import LooseVersion\n\n index = pd.MultiIndex.from_arrays([\n pd.date_range('2000-01-01', periods=5).repeat(2),\n np.tile(np.array(['foo', 'bar'], dtype=object), 5)\n ])\n\n c1 = pd.date_range('2000-01-01', periods=10)\n data = {\n 0: c1,\n 1: c1.tz_localize('utc'),\n 2: c1.tz_localize('US/Eastern'),\n 3: c1[::2].tz_localize('utc').repeat(2).astype('category'),\n 4: ['foo', 'bar'] * 5,\n 5: pd.Series(['foo', 'bar'] * 5).astype('category').values,\n 6: [True, False] * 5,\n 7: np.random.randn(10),\n 8: np.random.randint(0, 100, size=10),\n 9: pd.period_range('2013', periods=10, freq='M')\n }\n\n if LooseVersion(pd.__version__) >= '0.21':\n # There is an issue with pickling IntervalIndex in pandas 0.20.x\n data[10] = pd.interval_range(start=1, freq=1, periods=10)\n\n return pd.DataFrame(data, index=index)\n\n\[email protected]('columns', ([b'foo'], ['foo']))\ndef test_roundtrip_with_bytes_unicode(columns):\n df = pd.DataFrame(columns=columns)\n table1 = pa.Table.from_pandas(df)\n table2 = pa.Table.from_pandas(table1.to_pandas())\n assert table1.equals(table2)\n assert table1.schema.equals(table2.schema)\n assert table1.schema.metadata == table2.schema.metadata\n\n\ndef _check_serialize_components_roundtrip(df):\n ctx = pa.default_serialization_context()\n\n components = ctx.serialize(df).to_components()\n deserialized = ctx.deserialize_components(components)\n\n tm.assert_frame_equal(df, deserialized)\n\n\ndef test_serialize_deserialize_pandas():\n # ARROW-1784, serialize and deserialize DataFrame by decomposing\n # BlockManager\n df = _fully_loaded_dataframe_example()\n _check_serialize_components_roundtrip(df)\n\n\ndef _pytime_from_micros(val):\n microseconds = val % 1000000\n val //= 1000000\n seconds = val % 60\n val //= 60\n minutes = val % 60\n hours = val // 60\n return time(hours, minutes, seconds, microseconds)\n\n\ndef _pytime_to_micros(pytime):\n return (pytime.hour * 3600000000 +\n pytime.minute * 60000000 +\n pytime.second * 1000000 +\n pytime.microsecond)\n"
] | [
[
"numpy.ones",
"pandas.Series",
"numpy.testing.assert_equal",
"numpy.dtype",
"numpy.random.seed",
"pandas.Categorical",
"numpy.int64",
"pandas.util.testing.assert_series_equal",
"numpy.float64",
"pandas.period_range",
"numpy.ma.masked_array",
"pandas.to_numeric",
"numpy.testing.assert_array_equal",
"numpy.bool_",
"pandas.util.testing.rands",
"pandas.to_datetime",
"numpy.isnan",
"pandas.isnull",
"pandas.Timestamp",
"numpy.random.randint",
"numpy.sqrt",
"pandas.date_range",
"numpy.zeros",
"numpy.arange",
"numpy.int32",
"pandas.interval_range",
"pandas.concat",
"pandas.Index",
"numpy.random.random_sample",
"pandas.DatetimeIndex",
"numpy.empty",
"pandas.MultiIndex.from_arrays",
"pandas.DataFrame",
"numpy.random.randn",
"numpy.iinfo",
"numpy.array",
"pandas.util.testing.assert_frame_equal"
]
] |
msenosain/TMA36_dataanalysis | [
"ba390b40e9ffb2bf8ec39b3bd6e8aa000174c313"
] | [
"src/data_integration/pw_corr.py"
] | [
"import pandas as pd\nimport pingouin as pg\n\ndef pw_corr(data_path=\"data/TMA36_project/Radiomics/processed/rad_healthmyne.csv\", \n cde_path=\"data/TMA36_project/CDE/CDE_TMA36_2020FEB25_SA_MF.csv\"):\n rad_hm = pd.read_csv(data_path, index_col=0)\n cde = pd.read_csv(cde_path, index_col=1)\n cde_sila = pd.DataFrame(cde['SILA'])\n rad_hm_sila = pd.merge(rad_hm, cde_sila, how='left', left_index=True, right_index=True)\n pairwise = rad_hm_sila.pairwise_corr(method='spearman',padjust='holm', columns=['SILA'])\n pairwise_sig = pairwise[pairwise['p-corr']<0.05]\n\n return pairwise_sig"
] | [
[
"pandas.read_csv",
"pandas.DataFrame",
"pandas.merge"
]
] |
kyuhyoung/grasping-invisible | [
"2aaaeb9e28995628ec038a79496453be9f26ffff"
] | [
"utils.py"
] | [
"import math\n\nimport numpy as np\nfrom skimage.morphology.convex_hull import convex_hull_image\nfrom scipy.ndimage.morphology import binary_dilation\n\n\ndef check_grasp_margin(target_mask_heightmap, depth_heightmap):\n margin_mask = binary_dilation(target_mask_heightmap, iterations=10).astype(np.float32)-target_mask_heightmap\n margin_depth = margin_mask * depth_heightmap\n margin_depth[np.isnan(margin_depth)] = 0\n margin_depth[margin_depth > 0.3] = 0\n margin_depth[margin_depth < 0.02] = 0\n margin_depth[margin_depth > 0] = 1\n margin_value = np.sum(margin_depth)\n return margin_value/np.sum(margin_mask), margin_value/np.sum(target_mask_heightmap)\n\n\ndef check_push_target_oriented(best_pix_ind, push_end_pix_yx, target_mask_heightmap, mask_count_threshold=5):\n mask_hull = convex_hull_image(target_mask_heightmap)\n mask_count = 0\n x1 = best_pix_ind[2]\n y1 = best_pix_ind[1]\n x2 = push_end_pix_yx[1]\n y2 = push_end_pix_yx[0]\n x_range = abs(x2-x1)\n y_range = abs(y2-y1)\n if x_range > y_range:\n k = (y2-y1)/(x2-x1)\n b = y1-k*x1\n for x in range(min(int(x1), int(x2)), max(int(x1), int(x2))+1):\n y = int(k*x+b)\n try:\n mask_count += mask_hull[y, x]\n except IndexError:\n pass\n else:\n k = (x2-x1)/(y2-y1)\n b = x1-k*y1\n for y in range(min(int(y1), int(y2)), max(int(y1), int(y2))+1):\n x = int(k*y+b)\n try:\n mask_count += mask_hull[y, x]\n except IndexError:\n pass\n if mask_count > mask_count_threshold:\n return True\n else:\n return False\n\n\ndef check_grasp_target_oriented(best_pix_ind, target_mask_heightmap):\n mask_hull = convex_hull_image(target_mask_heightmap)\n if mask_hull[int(best_pix_ind[1]), int(best_pix_ind[2])]:\n return True\n else:\n return False\n\n\ndef get_push_pix(push_maps, num_rotations):\n push_pix_ind = np.unravel_index(np.argmax(push_maps), push_maps.shape)\n push_end_pix_yx = get_push_end_pix_yx(push_pix_ind, num_rotations)\n return push_pix_ind, push_end_pix_yx\n\n\ndef get_push_end_pix_yx(push_pix_ind, num_rotations):\n push_orientation = [1.0, 0.0]\n push_length_pix = 0.1/0.002\n rotation_angle = np.deg2rad(push_pix_ind[0]*(360.0/num_rotations))\n push_direction = np.asarray([push_orientation[0] * np.cos(rotation_angle) - push_orientation[1] * np.sin(rotation_angle),\n push_orientation[0] * np.sin(rotation_angle) + push_orientation[1] * np.cos(rotation_angle)])\n return [push_pix_ind[1] + push_direction[1] * push_length_pix, push_pix_ind[2] + push_direction[0] * push_length_pix]\n\n\ndef check_env_depth_change(prev_depth_heightmap, depth_heightmap, change_threshold=300):\n depth_diff = abs(prev_depth_heightmap-depth_heightmap)\n depth_diff[np.isnan(depth_diff)] = 0\n depth_diff[depth_diff > 0.3] = 0\n depth_diff[depth_diff < 0.02] = 0\n depth_diff[depth_diff > 0] = 1\n change_value = np.sum(depth_diff)\n change_detected = change_value > change_threshold\n\n return change_detected, change_value\n\n\ndef check_target_depth_change(prev_depth_heightmap, prev_target_mask_heightmap, depth_heightmap, change_threshold=50):\n prev_mask_hull = binary_dilation(convex_hull_image(prev_target_mask_heightmap), iterations=5)\n depth_diff = prev_mask_hull*(prev_depth_heightmap-depth_heightmap)\n depth_diff[np.isnan(depth_diff)] = 0\n depth_diff[depth_diff > 0.3] = 0\n depth_diff[depth_diff < 0.02] = 0\n depth_diff[depth_diff > 0] = 1\n change_value = np.sum(depth_diff)\n change_detected = change_value > change_threshold\n\n return change_detected, change_value\n\n\ndef process_mask_heightmaps(segment_results, seg_mask_heightmaps):\n names = []\n heightmaps = []\n for i in range(len(segment_results['labels'])):\n name = segment_results['labels'][i]\n heightmap = seg_mask_heightmaps[:, :, i]\n if np.sum(heightmap) > 10:\n names.append(name)\n heightmaps.append(heightmap)\n return {'names': names, 'heightmaps': heightmaps}\n\n\ndef get_replay_id(predicted_value_log, label_value_log, reward_value_log, sample_ind, replay_type):\n # Prioritized experience replay, find sample with highest surprise value\n sample_ind = np.asarray(sample_ind)\n predicted_values = np.asarray(predicted_value_log)[sample_ind]\n label_values = np.asarray(label_value_log)[sample_ind]\n reward_values = np.asarray(reward_value_log)[sample_ind]\n if replay_type == 'augment':\n # assume predicted_value for different mask input are close\n label_values = label_values - reward_values + 1.0\n\n sample_surprise_values = np.abs(predicted_values - label_values)\n sorted_surprise_ind = np.argsort(sample_surprise_values[:, 0])\n sorted_sample_ind = sample_ind[sorted_surprise_ind]\n pow_law_exp = 2\n rand_sample_ind = int(np.round(np.random.power(pow_law_exp, 1) * (sample_ind.size - 1)))\n sample_iteration = sorted_sample_ind[rand_sample_ind]\n print(replay_type.capitalize(), 'replay: iteration %d (surprise value: %f)' %\n (sample_iteration, sample_surprise_values[sorted_surprise_ind[rand_sample_ind]]))\n return sample_iteration\n\n\ndef get_pointcloud(color_img, depth_img, masks_imgs, camera_intrinsics):\n\n # Get depth image size\n im_h = depth_img.shape[0]\n im_w = depth_img.shape[1]\n\n # Project depth into 3D point cloud in camera coordinates\n pix_x, pix_y = np.meshgrid(np.linspace(0, im_w-1, im_w), np.linspace(0, im_h-1, im_h))\n cam_pts_x = np.multiply(pix_x-camera_intrinsics[0][2],depth_img/camera_intrinsics[0][0])\n cam_pts_y = np.multiply(pix_y-camera_intrinsics[1][2],depth_img/camera_intrinsics[1][1])\n cam_pts_z = depth_img.copy()\n cam_pts_x.shape = (im_h*im_w, 1)\n cam_pts_y.shape = (im_h*im_w, 1)\n cam_pts_z.shape = (im_h*im_w, 1)\n\n # Reshape image into colors for 3D point cloud\n rgb_pts_r = color_img[:, :, 0]\n rgb_pts_g = color_img[:, :, 1]\n rgb_pts_b = color_img[:, :, 2]\n rgb_pts_r.shape = (im_h*im_w, 1)\n rgb_pts_g.shape = (im_h*im_w, 1)\n rgb_pts_b.shape = (im_h*im_w, 1)\n\n num_masks = masks_imgs.shape[2]\n masks_pts = masks_imgs.copy()\n masks_pts = masks_pts.transpose(2, 0, 1).reshape(num_masks, -1)\n\n cam_pts = np.concatenate((cam_pts_x, cam_pts_y, cam_pts_z), axis=1)\n rgb_pts = np.concatenate((rgb_pts_r, rgb_pts_g, rgb_pts_b), axis=1)\n\n return cam_pts, rgb_pts, masks_pts\n\n\ndef get_heightmap(color_img, depth_img, masks_imgs, cam_intrinsics, cam_pose, workspace_limits, heightmap_resolution):\n\n num_masks = masks_imgs.shape[2]\n\n # Compute heightmap size\n heightmap_size = np.round(((workspace_limits[1][1] - workspace_limits[1][0])/heightmap_resolution, (workspace_limits[0][1] - workspace_limits[0][0])/heightmap_resolution)).astype(int)\n\n # Get 3D point cloud from RGB-D images\n surface_pts, color_pts, masks_pts = get_pointcloud(color_img, depth_img, masks_imgs, cam_intrinsics)\n\n # Transform 3D point cloud from camera coordinates to robot coordinates\n surface_pts = np.transpose(np.dot(cam_pose[0:3,0:3],np.transpose(surface_pts)) + np.tile(cam_pose[0:3,3:],(1,surface_pts.shape[0])))\n\n # Sort surface points by z value\n sort_z_ind = np.argsort(surface_pts[:,2])\n surface_pts = surface_pts[sort_z_ind]\n color_pts = color_pts[sort_z_ind]\n masks_pts = masks_pts[:, sort_z_ind]\n\n # Filter out surface points outside heightmap boundaries\n heightmap_valid_ind = np.logical_and(np.logical_and(np.logical_and(np.logical_and(surface_pts[:,0] >= workspace_limits[0][0], surface_pts[:,0] < workspace_limits[0][1]), surface_pts[:,1] >= workspace_limits[1][0]), surface_pts[:,1] < workspace_limits[1][1]), surface_pts[:,2] < workspace_limits[2][1])\n surface_pts = surface_pts[heightmap_valid_ind]\n color_pts = color_pts[heightmap_valid_ind]\n masks_pts = masks_pts[:, heightmap_valid_ind]\n\n # Create orthographic top-down-view RGB-D heightmaps\n color_heightmap_r = np.zeros((heightmap_size[0], heightmap_size[1], 1), dtype=np.uint8)\n color_heightmap_g = np.zeros((heightmap_size[0], heightmap_size[1], 1), dtype=np.uint8)\n color_heightmap_b = np.zeros((heightmap_size[0], heightmap_size[1], 1), dtype=np.uint8)\n masks_heightmaps = np.zeros((heightmap_size[0], heightmap_size[1], num_masks), dtype=np.uint8)\n depth_heightmap = np.zeros(heightmap_size)\n heightmap_pix_x = np.floor((surface_pts[:,0] - workspace_limits[0][0])/heightmap_resolution).astype(int)\n heightmap_pix_y = np.floor((surface_pts[:,1] - workspace_limits[1][0])/heightmap_resolution).astype(int)\n color_heightmap_r[heightmap_pix_y,heightmap_pix_x] = color_pts[:, [0]]\n color_heightmap_g[heightmap_pix_y,heightmap_pix_x] = color_pts[:, [1]]\n color_heightmap_b[heightmap_pix_y,heightmap_pix_x] = color_pts[:, [2]]\n color_heightmap = np.concatenate((color_heightmap_r, color_heightmap_g, color_heightmap_b), axis=2)\n for c in range(num_masks):\n masks_heightmaps[heightmap_pix_y, heightmap_pix_x, c] = masks_pts[c, :]\n depth_heightmap[heightmap_pix_y, heightmap_pix_x] = surface_pts[:, 2]\n z_bottom = workspace_limits[2][0]\n depth_heightmap = depth_heightmap - z_bottom\n depth_heightmap[depth_heightmap < 0] = 0\n depth_heightmap[depth_heightmap == -z_bottom] = np.nan\n\n return color_heightmap, depth_heightmap, masks_heightmaps\n\n\n# Get rotation matrix from euler angles\ndef euler2rotm(theta):\n R_x = np.array([[1, 0, 0],\n [0, math.cos(theta[0]), -math.sin(theta[0])],\n [0, math.sin(theta[0]), math.cos(theta[0])]])\n R_y = np.array([[math.cos(theta[1]), 0, math.sin(theta[1])],\n [0, 1, 0],\n [-math.sin(theta[1]), 0, math.cos(theta[1])]])\n R_z = np.array([[math.cos(theta[2]), -math.sin(theta[2]), 0],\n [math.sin(theta[2]), math.cos(theta[2]), 0],\n [0, 0, 1]])\n R = np.dot(R_z, np.dot(R_y, R_x))\n return R\n"
] | [
[
"numpy.sum",
"numpy.multiply",
"numpy.argsort",
"numpy.asarray",
"scipy.ndimage.morphology.binary_dilation",
"numpy.transpose",
"numpy.logical_and",
"numpy.concatenate",
"numpy.abs",
"numpy.cos",
"numpy.isnan",
"numpy.linspace",
"numpy.random.power",
"numpy.deg2rad",
"numpy.tile",
"numpy.zeros",
"numpy.argmax",
"numpy.floor",
"numpy.round",
"numpy.sin",
"numpy.dot"
]
] |
xiebaiyuan/PaddleLite | [
"6f7280a91741d1c63fcb0296ac5c08c4e81c2a90"
] | [
"lite/tests/unittest_py/auto_scan_base.py"
] | [
"# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport numpy as np\nimport unittest\nimport abc\nimport os\nimport enum\nimport time\nimport logging\nimport shutil\nimport paddle\nimport paddle.fluid as fluid\nfrom paddle.fluid.initializer import NumpyArrayInitializer\nfrom paddle.fluid.core import PassVersionChecker\nimport paddle.fluid.core as core\nfrom paddle import compat as cpt\nimport paddle.inference as paddle_infer\nfrom typing import Optional, List, Callable, Dict, Any, Set\nfrom program_config import TensorConfig, OpConfig, ProgramConfig, create_fake_model, create_quant_model\n\nfrom itertools import product\nfrom program_config import CxxConfig, TargetType, PrecisionType, DataLayoutType, Place\n\nimport hypothesis\nfrom hypothesis import given, settings, seed\nimport hypothesis.strategies as st\nimport argparse\nparser = argparse.ArgumentParser()\nparser.add_argument(\"--target\", choices=['Host', 'X86','CUDA','ARM','OpenCL','FPGA','NPU','MLU','RKNPU','APU','HUAWEI_ASCEND_NPU','INTEL_FPGA'], required=True)\nlogging.basicConfig(level=logging.INFO, format=\"%(message)s\")\n\nsettings.register_profile(\n \"ci\",\n max_examples=10,\n suppress_health_check=hypothesis.HealthCheck.all(),\n deadline=None,\n print_blob=True,\n derandomize=True,\n report_multiple_bugs=False)\nsettings.load_profile(\"ci\")\n\nclass IgnoreReasonsBase(enum.Enum):\n # Paddle not support, but paddlelite support, we need to add the feature.\n PADDLE_NOT_IMPLEMENTED = 0\n # paddlelite not support.\n PADDLELITE_NOT_SUPPORT = 1\n # Accuracy is abnormal after enabling pass.\n ACCURACY_ERROR = 2\n\n\n\nclass AutoScanBaseTest(unittest.TestCase):\n def __init__(self, *args, **kwargs):\n self.valid_places = []\n self.thread_num = [1]\n\n np.random.seed(1024)\n paddle.enable_static()\n super(AutoScanBaseTest, self).__init__(*args, **kwargs)\n self.ignore_cases = []\n abs_dir = os.path.abspath(os.path.dirname(__file__))\n self.cache_dir = os.path.join(abs_dir,\n str(self.__module__) + '_cache_dir')\n self.available_passes_in_framework = set()\n self.num_ran_programs = 0\n self.num_invalid_programs = 0\n self.num_ignore_tests = 0\n self.num_predictor_kinds = 0\n\n args = parser.parse_args()\n self.args = args\n\n\n @abc.abstractmethod\n def sample_program_configs(self, draw):\n '''\n Generate all config with the combination of different Input tensor shape and\n different Attr values.\n '''\n raise NotImplementedError\n\n @abc.abstractmethod\n def sample_predictor_configs(self):\n raise NotImplementedError\n\n @abc.abstractmethod\n def add_ignore_check_case(\n self,\n teller: [Callable[[ProgramConfig, CxxConfig], bool]],\n reason: IgnoreReasonsBase,\n note: str):\n self.ignore_cases.append((teller, reason, note))\n\n @abc.abstractmethod\n def is_program_valid(self, program_config: ProgramConfig, predictor_config: CxxConfig) -> bool:\n return True\n\n def run_test_config(self, model, params, prog_config, pred_config,\n feed_data) -> Dict[str, np.ndarray]:\n '''\n Test a single case.\n '''\n pred_config.set_model_buffer(model, len(model), params, len(params))\n predictor = paddle_infer.create_predictor(pred_config)\n self.available_passes_in_framework = self.available_passes_in_framework | set(\n pred_config.pass_builder().all_passes())\n\n for name, _ in prog_config.inputs.items():\n input_tensor = predictor.get_input_handle(name)\n input_tensor.copy_from_cpu(feed_data[name]['data'])\n if feed_data[name]['lod'] is not None:\n input_tensor.set_lod(feed_data[name]['lod'])\n predictor.run()\n result = {}\n for out_name, o_name in zip(prog_config.outputs,\n predictor.get_output_names()):\n result[out_name] = predictor.get_output_handle(o_name).copy_to_cpu()\n return result\n\n\n @abc.abstractmethod\n def assert_tensors_near(self,\n atol: float,\n rtol: float,\n tensor: Dict[str, np.array],\n baseline: Dict[str, np.array]):\n if len(tensor) == 1 and len(baseline) == 1:\n tensor_key = list(tensor.keys())\n arr = np.array(tensor[tensor_key[0]])\n base_key = list(baseline.keys())\n base = np.array(baseline[base_key[0]])\n self.assertTrue(\n base.shape == arr.shape,\n \"The output shapes are not equal, the baseline shape is \" +\n str(base.shape) + ', but got ' + str(arr.shape))\n self.assertTrue(\n np.allclose(\n base, arr, atol=atol, rtol=rtol),\n \"Output has diff. \")\n else:\n for key in tensor:\n opencl_str = \"/target_trans\"\n index = key.rfind(opencl_str)\n paddlekey=key\n if index > 0:\n paddlekey = key[0: index]\n if (key == \"saved_mean\" or key == \"saved_variance\"):\n # training using data\n continue\n arr = np.array(tensor[key])\n self.assertTrue(\n baseline[paddlekey].shape == arr.shape,\n \"The output shapes are not equal, the baseline shape is \" +\n str(baseline[paddlekey].shape) + ', but got ' + str(arr.shape))\n self.assertTrue(\n np.allclose(\n baseline[paddlekey], arr, atol=atol, rtol=rtol),\n \"Output has diff. \")\n\n def generate_op_config(self,\n ops_config: List[Dict[str, Any]]) -> List[OpConfig]:\n ops = []\n for i in range(len(ops_config)):\n op_config = ops_config[i]\n ops.append(\n OpConfig(\n type=op_config['op_type'],\n inputs=op_config['op_inputs'],\n outputs=op_config['op_outputs'],\n attrs=op_config['op_attrs']))\n return ops\n\n @abc.abstractmethod\n def ignore_log(self, msg: str):\n logging.warning(\"SKIP: \" + msg)\n\n @abc.abstractmethod\n def fail_log(self, msg: str):\n logging.fatal(\"FAILE: \" + msg)\n\n @abc.abstractmethod\n def success_log(self, msg: str):\n logging.info(\"SUCCESS: \" + msg)\n\n @abc.abstractmethod\n def create_inference_config(self,\n passes: Optional[List[str]]=None,\n use_gpu: bool=False,\n use_mkldnn: bool=False,\n ir_optim: Optional[bool]=None):\n config = paddle_infer.Config()\n config.switch_ir_debug(True)\n config.disable_glog_info()\n if ir_optim is not None:\n config.switch_ir_optim(ir_optim)\n if use_gpu:\n config.enable_use_gpu(100, 0)\n if use_mkldnn:\n config.enable_mkldnn()\n if passes is not None:\n config.pass_builder().set_passes(passes)\n self.passes = passes\n return config\n\n def run_test(self, quant=False, prog_configs=None):\n status = True\n\n paddlelite_configs, op_list_, (atol_, rtol_) = self.sample_predictor_configs()\n for prog_config in prog_configs:\n # if program is invalid, we should ignore this cases.\n program_valid_ = False\n for paddlelite_config in paddlelite_configs:\n # judge validity of program\n if self.is_program_valid(prog_config, paddlelite_config):\n program_valid_ = True\n if not program_valid_:\n self.num_invalid_programs += 1\n continue\n\n\n self.num_ran_programs += 1\n model, params = create_fake_model(prog_config)\n if quant:\n model, params = create_quant_model(model, params)\n\n feed_data = {}\n for name, tensor_config in prog_config.inputs.items():\n feed_data[name] = {\n 'data': tensor_config.data,\n 'lod': tensor_config.lod\n }\n results: List[Dict[str, np.ndarray]] = []\n\n # baseline: cpu no ir_optim run\n base_config = self.create_inference_config(ir_optim=False)\n logging.info('[ProgramConfig]: ' + str(prog_config))\n results.append(\n self.run_test_config(model, params, prog_config, base_config,\n feed_data))\n\n\n for paddlelite_config in paddlelite_configs:\n # judge validity of program\n if not self.is_program_valid(prog_config, paddlelite_config):\n continue\n\n self.num_predictor_kinds += 1\n # ignore info\n ignore_flag = False\n pred_config = paddlelite_config.value()\n for ignore_info in self.ignore_cases:\n if ignore_info[0](prog_config, paddlelite_config):\n ignore_flag = True\n self.num_ignore_tests += 1\n if ignore_info[1] == IgnoreReasonsBase.ACCURACY_ERROR:\n self.ignore_log(\"[ACCURACY_ERROR] \" +\n ignore_info[2] + ' ' + ' vs ' + self.\n paddlelite_config_str(pred_config))\n else:\n raise NotImplementedError\n break\n if os.path.exists(self.cache_dir):\n shutil.rmtree(self.cache_dir)\n if not os.path.exists(self.cache_dir):\n os.mkdir(self.cache_dir)\n try:\n result, opt_model_bytes = self.run_lite_config(model, params, feed_data, pred_config)\n results.append(result)\n self.assert_tensors_near(atol_, rtol_, results[-1],\n results[0])\n if not ignore_flag and self.passes is not None:\n self.assert_op_list(opt_model_bytes, op_list_)\n except Exception as e:\n self.fail_log(\n self.paddlelite_config_str(pred_config) +\n '\\033[1;31m \\nERROR INFO: {}\\033[0m'.format(str(e)))\n if not ignore_flag:\n status = False\n continue\n self.success_log('PredictorConfig: ' + self.\n paddlelite_config_str(pred_config))\n self.assertTrue(status)\n\n def inference_config_str(self, config) -> bool:\n dic = {}\n enable_mkldnn = config.mkldnn_enabled()\n dic['use_mkldnn'] = enable_mkldnn\n enable_gpu = config.use_gpu()\n return str(dic)\n\n def paddlelite_config_str(self, config) -> bool:\n return str(config)\n\n # method for ignoring\n def add_ignore_pass_case(self):\n return\n\n # judge if program contain op_list\n def assert_op_list(self, model_bytes, op_list_after_fusion):\n if not self.passes:\n raise ValueError(\n \"In PassAutoScan you should give a valid pass name.\")\n pg = paddle.static.deserialize_program(model_bytes)\n main_block = pg.desc.block(0)\n after_op_list = list()\n for i in range(main_block.op_size()):\n if main_block.op(i).type() in [\"feed\", \"fetch\"]:\n continue\n after_op_list.append(main_block.op(i).type())\n self.assertTrue(\n op_list_after_fusion == after_op_list,\n \"Expected operator list after fusion is {}, but now it's {}\".format(\n op_list_after_fusion, after_op_list), )\n\n\n def run_and_statis(\n self,\n quant=False,\n max_examples=100,\n reproduce=None,\n min_success_num=25,\n max_duration=180,\n passes=None ):\n if os.getenv('HYPOTHESIS_TEST_PROFILE', 'ci') == \"dev\":\n max_examples *= 10\n min_success_num *= 10\n # while at ce phase, there's no limit on time\n max_duration = -1\n start_time = time.time()\n settings.register_profile(\n \"ci\",\n max_examples=max_examples,\n suppress_health_check=hypothesis.HealthCheck.all(),\n deadline=None,\n print_blob=True,\n derandomize=True,\n report_multiple_bugs=False, )\n settings.load_profile(\"ci\")\n\n self.passes = passes\n self.add_ignore_pass_case()\n\n def program_generator(draw):\n return self.sample_program_configs(draw)\n\n def run_test(prog_config):\n return self.run_test(quant=quant, prog_configs=[prog_config])\n\n # if current unittest is not active on the input target, we will exit directly.\n if not self.is_actived():\n logging.info(\"Error: This test is not actived on \" + self.get_target())\n return\n\n generator = st.composite(program_generator)\n loop_func = given(generator())(run_test)\n if reproduce is not None:\n loop_func = reproduce(loop_func)\n logging.info(\"Start to running test of {}\".format(type(self)))\n loop_func()\n logging.info(\n \"===================Statistical Information===================\")\n logging.info(\"Number of Generated Programs: {}\".format(\n self.num_ran_programs + self.num_invalid_programs))\n logging.info(\"Number of Invalid Programs: {}\".format(\n self.num_invalid_programs))\n logging.info(\"Number of Ran Programs: {}\".format(self.num_ran_programs))\n logging.info(\"Number of Ignored Tests: {}\".format(\n self.num_ignore_tests))\n if self.num_predictor_kinds == 0:\n successful_ran_programs = int(self.num_ran_programs)\n min_success_num = 0\n else:\n successful_ran_programs = int(self.num_ran_programs -\n self.num_ignore_tests /\n self.num_predictor_kinds)\n\n logging.info(\n \"Number of successfully ran programs approximately equal to {}\".\n format(successful_ran_programs))\n if successful_ran_programs < min_success_num:\n logging.warning(\n \"satisfied_programs = ran_programs - num_ignore_tests / num_predictor_kinds\"\n )\n logging.fatal(\n \"At least {} programs need to ran successfully, but now only about {} programs satisfied.\".\n format(min_success_num, successful_ran_programs))\n assert False\n used_time = time.time() - start_time\n if max_duration > 0 and used_time > max_duration:\n logging.fatal(\n \"The duration exceeds {} seconds, if this is neccessary, try to set a larger number for parameter `max_duration`.\".\n format(max_duration))\n assert False\n\n @abc.abstractmethod\n def run_lite_config(self, model, params, feed_data, pred_config) -> Dict[str, np.ndarray]:\n raise NotImplementedError\n\n\n # enable a predictor config\n # configs will be generated automatically according to inputs\n def enable_testing_on_place(self, target=None, precision=None, layout=None, thread=None, places=None) -> None:\n # set thread_num\n if isinstance(thread,list):\n self.thread_num = list(set(self.thread_num + thread))\n if isinstance(thread,int):\n self.thread_num.append(thread)\n self.thread_num = list(self.thread_num)\n\n # if list[Place] is inputed, this will be used directly\n if places is not None:\n assert isinstance(places, list)\n self.valid_places.append(places)\n return\n # otherwise we will generate a list[Place] from the inputed[target\\precision\\layout]\n assert (target is not None)\n target_ = target if isinstance(target,list) else [target]\n precision_ = precision if isinstance(precision, list) else [precision]\n layout_ = layout if isinstance(layout,list) else [layout]\n for tar_, pre_, lay_ in product(target_, precision_, layout_):\n self.valid_places.append([Place(tar_, pre_, lay_)])\n return\n\n\n def get_target(self) -> str:\n return self.args.target\n\n\n def is_actived(self) -> bool:\n for valid_place_ in self.valid_places:\n if self.get_target() in valid_place_[0]:\n return True\n return False\n\n def get_predictor_configs(self) -> List[CxxConfig]:\n return self.target_to_predictor_configs(self, self.get_target())\n\n # get valid test configs\n @staticmethod\n def target_to_predictor_configs(self,target:str) -> List[CxxConfig]:\n configs_ = []\n for elem_ in self.valid_places:\n if target in elem_[0]:\n for thread_ in self.thread_num:\n config_ = CxxConfig()\n config_.set_valid_places(elem_)\n config_.set_threads(thread_)\n configs_.append(config_)\n return configs_\n"
] | [
[
"numpy.array",
"numpy.allclose",
"numpy.random.seed"
]
] |
jdehotin/TensorFlow | [
"a6c5f8e4e013e54fed8dfcf49fb6de365f018022",
"a6c5f8e4e013e54fed8dfcf49fb6de365f018022",
"a6c5f8e4e013e54fed8dfcf49fb6de365f018022",
"a6c5f8e4e013e54fed8dfcf49fb6de365f018022"
] | [
"tensorflow/python/summary/impl/io_wrapper.py",
"tensorflow/contrib/distributions/python/ops/dirichlet_multinomial.py",
"tensorflow/contrib/slim/python/slim/evaluation_test.py",
"tensorflow/contrib/layers/python/layers/feature_column_ops_test.py"
] | [
"# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Functions that wrap both gfile and gcs.\n\nThis module is *not* intended to be a general-purpose IO wrapper library; it\nonly implements the operations that are necessary for loading event files. The\nfunctions either dispatch to the gcs library or to gfile, depending on whether\nthe path is a GCS 'pseudo-path' (i.e., it satisfies gcs.IsGCSPath) or not.\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\n\nfrom tensorflow.python.platform import gfile\nfrom tensorflow.python.summary.impl import event_file_loader\nfrom tensorflow.python.summary.impl import gcs\nfrom tensorflow.python.summary.impl import gcs_file_loader\n\n\ndef CreateFileLoader(path):\n \"\"\"Creates a file loader for the given path.\n\n Args:\n path: A string representing either a normal path or a GCS\n Returns:\n An object with a Load() method that yields event_pb2.Event protos.\n \"\"\"\n if gcs.IsGCSPath(path):\n return gcs_file_loader.GCSFileLoader(path)\n else:\n return event_file_loader.EventFileLoader(path)\n\n\ndef ListDirectoryAbsolute(directory):\n \"\"\"Yields all files in the given directory. The paths are absolute.\"\"\"\n if gcs.IsGCSPath(directory):\n return gcs.ListDirectory(directory)\n else:\n return (os.path.join(directory, path)\n for path in gfile.ListDirectory(directory))\n\n\ndef ListRecursively(top):\n \"\"\"Walks a directory tree, yielding (dir_path, file_paths) tuples.\n\n For each of `top` and its subdirectories, yields a tuple containing the path\n to the directory and the path to each of the contained files. Note that\n unlike os.Walk()/gfile.Walk(), this does not list subdirectories and the file\n paths are all absolute.\n\n If the directory does not exist, this yields nothing.\n\n Args:\n top: A path to a directory..\n Yields:\n A list of (dir_path, file_paths) tuples.\n \"\"\"\n if gcs.IsGCSPath(top):\n for x in gcs.ListRecursively(top):\n yield x\n else:\n for dir_path, _, filenames in gfile.Walk(top):\n yield (dir_path, (os.path.join(dir_path, filename)\n for filename in filenames))\n\n\ndef IsDirectory(path):\n \"\"\"Returns true if path exists and is a directory.\"\"\"\n if gcs.IsGCSPath(path):\n return gcs.IsDirectory(path)\n else:\n return gfile.IsDirectory(path)\n\n\ndef Exists(path):\n if gcs.IsGCSPath(path):\n return gcs.Exists(path)\n else:\n return gfile.Exists(path)\n\n\ndef Size(path):\n \"\"\"Returns the number of bytes in the given file. Doesn't work on GCS.\"\"\"\n if gcs.IsGCSPath(path):\n raise NotImplementedError(\"io_wrapper.Size doesn't support GCS paths\")\n else:\n return gfile.Open(path).size()\n",
"# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"The Dirichlet Multinomial distribution class.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom tensorflow.contrib.distributions.python.ops import distribution\nfrom tensorflow.contrib.distributions.python.ops import distribution_util\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import check_ops\nfrom tensorflow.python.ops import control_flow_ops\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops import special_math_ops\n\n\nclass DirichletMultinomial(distribution.Distribution):\n \"\"\"DirichletMultinomial mixture distribution.\n\n This distribution is parameterized by a vector `alpha` of concentration\n parameters for `k` classes and `n`, the counts per each class..\n\n #### Mathematical details\n\n The Dirichlet Multinomial is a distribution over k-class count data, meaning\n for each k-tuple of non-negative integer `counts = [c_1,...,c_k]`, we have a\n probability of these draws being made from the distribution. The distribution\n has hyperparameters `alpha = (alpha_1,...,alpha_k)`, and probability mass\n function (pmf):\n\n ```pmf(counts) = N! / (n_1!...n_k!) * Beta(alpha + c) / Beta(alpha)```\n\n where above `N = sum_j n_j`, `N!` is `N` factorial, and\n `Beta(x) = prod_j Gamma(x_j) / Gamma(sum_j x_j)` is the multivariate beta\n function.\n\n This is a mixture distribution in that `M` samples can be produced by:\n 1. Choose class probabilities `p = (p_1,...,p_k) ~ Dir(alpha)`\n 2. Draw integers `m = (n_1,...,n_k) ~ Multinomial(N, p)`\n\n This class provides methods to create indexed batches of Dirichlet\n Multinomial distributions. If the provided `alpha` is rank 2 or higher, for\n every fixed set of leading dimensions, the last dimension represents one\n single Dirichlet Multinomial distribution. When calling distribution\n functions (e.g. `dist.pmf(counts)`), `alpha` and `counts` are broadcast to the\n same shape (if possible). In all cases, the last dimension of alpha/counts\n represents single Dirichlet Multinomial distributions.\n\n #### Examples\n\n ```python\n alpha = [1, 2, 3]\n n = 2\n dist = DirichletMultinomial(n, alpha)\n ```\n\n Creates a 3-class distribution, with the 3rd class is most likely to be drawn.\n The distribution functions can be evaluated on counts.\n\n ```python\n # counts same shape as alpha.\n counts = [0, 0, 2]\n dist.pmf(counts) # Shape []\n\n # alpha will be broadcast to [[1, 2, 3], [1, 2, 3]] to match counts.\n counts = [[1, 1, 0], [1, 0, 1]]\n dist.pmf(counts) # Shape [2]\n\n # alpha will be broadcast to shape [5, 7, 3] to match counts.\n counts = [[...]] # Shape [5, 7, 3]\n dist.pmf(counts) # Shape [5, 7]\n ```\n\n Creates a 2-batch of 3-class distributions.\n\n ```python\n alpha = [[1, 2, 3], [4, 5, 6]] # Shape [2, 3]\n n = [3, 3]\n dist = DirichletMultinomial(n, alpha)\n\n # counts will be broadcast to [[2, 1, 0], [2, 1, 0]] to match alpha.\n counts = [2, 1, 0]\n dist.pmf(counts) # Shape [2]\n ```\n\n \"\"\"\n\n # TODO(b/27419586) Change docstring for dtype of alpha once int allowed.\n def __init__(self,\n n,\n alpha,\n validate_args=False,\n allow_nan_stats=True,\n name=\"DirichletMultinomial\"):\n \"\"\"Initialize a batch of DirichletMultinomial distributions.\n\n Args:\n n: Non-negative floating point tensor, whose dtype is the same as\n `alpha`. The shape is broadcastable to `[N1,..., Nm]` with `m >= 0`.\n Defines this as a batch of `N1 x ... x Nm` different Dirichlet\n multinomial distributions. Its components should be equal to integer\n values.\n alpha: Positive floating point tensor, whose dtype is the same as\n `n` with shape broadcastable to `[N1,..., Nm, k]` `m >= 0`. Defines\n this as a batch of `N1 x ... x Nm` different `k` class Dirichlet\n multinomial distributions.\n validate_args: `Boolean`, default `False`. Whether to assert valid\n values for parameters `alpha` and `n`, and `x` in `prob` and\n `log_prob`. If `False`, correct behavior is not guaranteed.\n allow_nan_stats: `Boolean`, default `True`. If `False`, raise an\n exception if a statistic (e.g. mean/mode/etc...) is undefined for any\n batch member. If `True`, batch members with valid parameters leading to\n undefined statistics will return NaN for this statistic.\n name: The name to prefix Ops created by this distribution class.\n\n Examples:\n\n ```python\n # Define 1-batch of 2-class Dirichlet multinomial distribution,\n # also known as a beta-binomial.\n dist = DirichletMultinomial(2.0, [1.1, 2.0])\n\n # Define a 2-batch of 3-class distributions.\n dist = DirichletMultinomial([3., 4], [[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])\n ```\n\n \"\"\"\n with ops.name_scope(name, values=[n, alpha]) as ns:\n # Broadcasting works because:\n # * The broadcasting convention is to prepend dimensions of size [1], and\n # we use the last dimension for the distribution, wherease\n # the batch dimensions are the leading dimensions, which forces the\n # distribution dimension to be defined explicitly (i.e. it cannot be\n # created automatically by prepending). This forces enough\n # explicitivity.\n # * All calls involving `counts` eventually require a broadcast between\n # `counts` and alpha.\n self._alpha = self._assert_valid_alpha(alpha, validate_args)\n self._n = self._assert_valid_n(n, validate_args)\n self._alpha_sum = math_ops.reduce_sum(\n self._alpha, reduction_indices=[-1], keep_dims=False)\n super(DirichletMultinomial, self).__init__(\n dtype=self._alpha.dtype,\n parameters={\"alpha\": self._alpha,\n \"alpha_sum\": self._alpha_sum,\n \"n\": self._n},\n is_continuous=False,\n is_reparameterized=False,\n validate_args=validate_args,\n allow_nan_stats=allow_nan_stats,\n name=ns)\n\n @property\n def n(self):\n \"\"\"Parameter defining this distribution.\"\"\"\n return self._n\n\n @property\n def alpha(self):\n \"\"\"Parameter defining this distribution.\"\"\"\n return self._alpha\n\n @property\n def alpha_sum(self):\n \"\"\"Summation of alpha parameter.\"\"\"\n return self._alpha_sum\n\n def _batch_shape(self):\n return array_ops.shape(self.alpha_sum)\n\n def _get_batch_shape(self):\n return self.alpha_sum.get_shape()\n\n def _event_shape(self):\n return array_ops.reverse(array_ops.shape(self.alpha), [True])[0]\n\n def _get_event_shape(self):\n # Event shape depends only on alpha, not \"n\".\n return self.alpha.get_shape().with_rank_at_least(1)[-1:]\n\n def _log_prob(self, counts):\n counts = self._assert_valid_counts(counts)\n ordered_prob = (special_math_ops.lbeta(self.alpha + counts) -\n special_math_ops.lbeta(self.alpha))\n log_prob = ordered_prob + distribution_util.log_combinations(\n self.n, counts)\n return log_prob\n\n def _prob(self, counts):\n return math_ops.exp(self._log_prob(counts))\n\n def _mean(self):\n normalized_alpha = self.alpha / array_ops.expand_dims(self.alpha_sum, -1)\n return array_ops.expand_dims(self.n, -1) * normalized_alpha\n\n def _variance(self):\n alpha_sum = array_ops.expand_dims(self.alpha_sum, -1)\n normalized_alpha = self.alpha / alpha_sum\n variance = -math_ops.batch_matmul(\n array_ops.expand_dims(normalized_alpha, -1),\n array_ops.expand_dims(normalized_alpha, -2))\n variance = array_ops.matrix_set_diag(variance, normalized_alpha *\n (1. - normalized_alpha))\n shared_factor = (self.n * (alpha_sum + self.n) /\n (alpha_sum + 1) * array_ops.ones_like(self.alpha))\n variance *= array_ops.expand_dims(shared_factor, -1)\n return variance\n\n def _assert_valid_counts(self, counts):\n \"\"\"Check counts for proper shape, values, then return tensor version.\"\"\"\n counts = ops.convert_to_tensor(counts, name=\"counts\")\n if not self.validate_args:\n return counts\n candidate_n = math_ops.reduce_sum(counts, reduction_indices=[-1])\n return control_flow_ops.with_dependencies([\n check_ops.assert_non_negative(counts),\n check_ops.assert_equal(\n self._n, candidate_n,\n message=\"counts do not sum to n\"),\n distribution_util.assert_integer_form(counts)], counts)\n\n def _assert_valid_alpha(self, alpha, validate_args):\n alpha = ops.convert_to_tensor(alpha, name=\"alpha\")\n if not validate_args:\n return alpha\n return control_flow_ops.with_dependencies(\n [check_ops.assert_rank_at_least(alpha, 1),\n check_ops.assert_positive(alpha)], alpha)\n\n def _assert_valid_n(self, n, validate_args):\n n = ops.convert_to_tensor(n, name=\"n\")\n if not validate_args:\n return n\n return control_flow_ops.with_dependencies(\n [check_ops.assert_non_negative(n),\n distribution_util.assert_integer_form(n)], n)\n\n\n_prob_note = \"\"\"\n\n For each batch of counts `[n_1,...,n_k]`, `P[counts]` is the probability\n that after sampling `n` draws from this Dirichlet Multinomial\n distribution, the number of draws falling in class `j` is `n_j`. Note that\n different sequences of draws can result in the same counts, thus the\n probability includes a combinatorial coefficient.\n\n Note that input, \"counts\", must be a non-negative tensor with dtype `dtype`\n and whose shape can be broadcast with `self.alpha`. For fixed leading\n dimensions, the last dimension represents counts for the corresponding\n Dirichlet Multinomial distribution in `self.alpha`. `counts` is only legal if\n it sums up to `n` and its components are equal to integer values.\n\"\"\"\ndistribution_util.append_class_fun_doc(DirichletMultinomial.log_prob,\n doc_str=_prob_note)\ndistribution_util.append_class_fun_doc(DirichletMultinomial.prob,\n doc_str=_prob_note)\n\ndistribution_util.append_class_fun_doc(DirichletMultinomial.variance,\n doc_str=\"\"\"\n\n The variance for each batch member is defined as the following:\n\n ```\n Var(X_j) = n * alpha_j / alpha_0 * (1 - alpha_j / alpha_0) *\n (n + alpha_0) / (1 + alpha_0)\n ```\n\n where `alpha_0 = sum_j alpha_j`.\n\n The covariance between elements in a batch is defined as:\n\n ```\n Cov(X_i, X_j) = -n * alpha_i * alpha_j / alpha_0 ** 2 *\n (n + alpha_0) / (1 + alpha_0)\n ```\n\n\"\"\")\n",
"# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for slim.evaluation.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\n\nimport glob\nimport os\nimport time\n\nimport numpy as np\nimport tensorflow as tf\n\nfrom tensorflow.python.platform import flags\nfrom tensorflow.python.platform import gfile\n\nslim = tf.contrib.slim\n\nFLAGS = flags.FLAGS\n\n\ndef GenerateTestData(num_classes, batch_size):\n inputs = np.random.rand(batch_size, num_classes)\n\n np.random.seed(0)\n labels = np.random.randint(low=0, high=num_classes, size=batch_size)\n labels = labels.reshape((batch_size,))\n return inputs, labels\n\n\ndef TestModel(inputs):\n scale = tf.Variable(1.0, trainable=False)\n\n # Scaling the outputs wont change the result...\n outputs = tf.mul(inputs, scale)\n return tf.argmax(outputs, 1), scale\n\n\ndef GroundTruthAccuracy(inputs, labels, batch_size):\n predictions = np.argmax(inputs, 1)\n num_correct = np.sum(predictions == labels)\n return float(num_correct) / batch_size\n\n\nclass EvaluationTest(tf.test.TestCase):\n\n def setUp(self):\n super(EvaluationTest, self).setUp()\n\n num_classes = 8\n batch_size = 16\n inputs, labels = GenerateTestData(num_classes, batch_size)\n self._expected_accuracy = GroundTruthAccuracy(inputs, labels, batch_size)\n\n self._global_step = slim.get_or_create_global_step()\n self._inputs = tf.constant(inputs, dtype=tf.float32)\n self._labels = tf.constant(labels, dtype=tf.int64)\n self._predictions, self._scale = TestModel(self._inputs)\n\n def testUpdateOpsAreEvaluated(self):\n accuracy, update_op = slim.metrics.streaming_accuracy(\n self._predictions, self._labels)\n initial_op = tf.group(tf.initialize_all_variables(),\n tf.initialize_local_variables())\n\n with self.test_session() as sess:\n slim.evaluation.evaluation(\n sess, initial_op=initial_op, eval_op=update_op)\n self.assertAlmostEqual(accuracy.eval(), self._expected_accuracy)\n\n def testFinalOpsIsEvaluated(self):\n _, update_op = slim.metrics.streaming_accuracy(\n self._predictions, self._labels)\n initial_op = tf.group(tf.initialize_all_variables(),\n tf.initialize_local_variables())\n\n with self.test_session() as sess:\n accuracy_value = slim.evaluation.evaluation(\n sess, initial_op=initial_op, final_op=update_op)\n self.assertAlmostEqual(accuracy_value, self._expected_accuracy)\n\n def testFinalOpsOnEvaluationLoop(self):\n value_op, update_op = slim.metrics.streaming_accuracy(\n self._predictions, self._labels)\n init_op = tf.group(tf.initialize_all_variables(),\n tf.initialize_local_variables())\n # Create Checkpoint and log directories\n chkpt_dir = os.path.join(self.get_temp_dir(), 'tmp_logs/')\n gfile.MakeDirs(chkpt_dir)\n logdir = os.path.join(self.get_temp_dir(), 'tmp_logs2/')\n gfile.MakeDirs(logdir)\n\n # Save initialized variables to checkpoint directory\n saver = tf.train.Saver()\n with self.test_session() as sess:\n init_op.run()\n saver.save(sess, os.path.join(chkpt_dir, 'chkpt'))\n\n # Now, run the evaluation loop:\n accuracy_value = slim.evaluation.evaluation_loop(\n '', chkpt_dir, logdir, eval_op=update_op, final_op=value_op,\n max_number_of_evaluations=1)\n self.assertAlmostEqual(accuracy_value, self._expected_accuracy)\n\n def _create_names_to_metrics(self, predictions, labels):\n accuracy0, update_op0 = tf.contrib.metrics.streaming_accuracy(\n predictions, labels)\n accuracy1, update_op1 = tf.contrib.metrics.streaming_accuracy(\n predictions+1, labels)\n\n names_to_values = {'Accuracy': accuracy0, 'Another accuracy': accuracy1}\n names_to_updates = {'Accuracy': update_op0, 'Another accuracy': update_op1}\n return names_to_values, names_to_updates\n\n def _verify_summaries(self, output_dir, names_to_values):\n \"\"\"Verifies that the given `names_to_values` are found in the summaries.\n\n Args:\n output_dir: An existing directory where summaries are found.\n names_to_values: A dictionary of strings to values.\n \"\"\"\n # Check that the results were saved. The events file may have additional\n # entries, e.g. the event version stamp, so have to parse things a bit.\n output_filepath = glob.glob(os.path.join(output_dir, '*'))\n self.assertEqual(len(output_filepath), 1)\n\n events = tf.train.summary_iterator(output_filepath[0])\n summaries = [e.summary for e in events if e.summary.value]\n values = []\n for summary in summaries:\n for value in summary.value:\n values.append(value)\n saved_results = {v.tag: v.simple_value for v in values}\n for name in names_to_values:\n self.assertAlmostEqual(names_to_values[name], saved_results[name])\n\n def testSummariesAreFlushedToDisk(self):\n output_dir = os.path.join(self.get_temp_dir(), 'flush_test')\n if tf.gfile.Exists(output_dir): # For running on jenkins.\n tf.gfile.DeleteRecursively(output_dir)\n\n names_to_metrics, names_to_updates = self._create_names_to_metrics(\n self._predictions, self._labels)\n\n for k in names_to_metrics:\n v = names_to_metrics[k]\n tf.scalar_summary(k, v)\n\n summary_writer = tf.train.SummaryWriter(output_dir)\n\n initial_op = tf.group(tf.initialize_all_variables(),\n tf.initialize_local_variables())\n eval_op = tf.group(*names_to_updates.values())\n\n with self.test_session() as sess:\n slim.evaluation.evaluation(\n sess,\n initial_op=initial_op,\n eval_op=eval_op,\n summary_op=tf.merge_all_summaries(),\n summary_writer=summary_writer,\n global_step=self._global_step)\n\n names_to_values = {name: names_to_metrics[name].eval()\n for name in names_to_metrics}\n self._verify_summaries(output_dir, names_to_values)\n\n def testSummariesAreFlushedToDiskWithoutGlobalStep(self):\n output_dir = os.path.join(self.get_temp_dir(), 'flush_test_no_global_step')\n if tf.gfile.Exists(output_dir): # For running on jenkins.\n tf.gfile.DeleteRecursively(output_dir)\n\n names_to_metrics, names_to_updates = self._create_names_to_metrics(\n self._predictions, self._labels)\n\n for k in names_to_metrics:\n v = names_to_metrics[k]\n tf.scalar_summary(k, v)\n\n summary_writer = tf.train.SummaryWriter(output_dir)\n\n initial_op = tf.group(tf.initialize_all_variables(),\n tf.initialize_local_variables())\n eval_op = tf.group(*names_to_updates.values())\n\n with self.test_session() as sess:\n slim.evaluation.evaluation(\n sess,\n initial_op=initial_op,\n eval_op=eval_op,\n summary_op=tf.merge_all_summaries(),\n summary_writer=summary_writer)\n\n names_to_values = {name: names_to_metrics[name].eval()\n for name in names_to_metrics}\n self._verify_summaries(output_dir, names_to_values)\n\n def testWithFeedDict(self):\n accuracy, update_op = slim.metrics.streaming_accuracy(\n self._predictions, self._labels)\n initial_op = tf.group(tf.initialize_all_variables(),\n tf.initialize_local_variables())\n\n with self.test_session() as sess:\n slim.evaluation.evaluation(\n sess,\n initial_op=initial_op,\n eval_op=update_op,\n eval_op_feed_dict={self._scale: np.ones([], dtype=np.float32)})\n self.assertAlmostEqual(accuracy.eval(), self._expected_accuracy)\n\n def testWithQueueRunning(self):\n strings = ['the', 'cat', 'in', 'the', 'hat']\n _ = tf.train.string_input_producer(strings, capacity=5)\n\n accuracy, update_op = slim.metrics.streaming_accuracy(\n self._predictions, self._labels)\n\n initial_op = tf.group(tf.initialize_all_variables(),\n tf.initialize_local_variables())\n\n with self.test_session() as sess:\n slim.evaluation.evaluation(\n sess, initial_op=initial_op, eval_op=update_op)\n self.assertAlmostEqual(accuracy.eval(), self._expected_accuracy)\n\n def testLatestCheckpointReturnsNoneAfterTimeout(self):\n start = time.time()\n ret = slim.evaluation.wait_for_new_checkpoint(\n '/non-existent-dir', 'foo', timeout=1.0, seconds_to_sleep=0.5)\n end = time.time()\n self.assertIsNone(ret)\n # We've waited one time.\n self.assertGreater(end, start + 0.5)\n # The timeout kicked in.\n self.assertLess(end, start + 1.1)\n\n def testMonitorCheckpointsLoopTimeout(self):\n ret = list(slim.evaluation.checkpoints_iterator(\n '/non-existent-dir', timeout=0))\n self.assertEqual(ret, [])\n\n\nclass SingleEvaluationTest(tf.test.TestCase):\n\n def setUp(self):\n super(SingleEvaluationTest, self).setUp()\n\n num_classes = 8\n batch_size = 16\n inputs, labels = GenerateTestData(num_classes, batch_size)\n self._expected_accuracy = GroundTruthAccuracy(inputs, labels, batch_size)\n\n self._global_step = slim.get_or_create_global_step()\n self._inputs = tf.constant(inputs, dtype=tf.float32)\n self._labels = tf.constant(labels, dtype=tf.int64)\n self._predictions, self._scale = TestModel(self._inputs)\n\n def testErrorRaisedIfCheckpointDoesntExist(self):\n checkpoint_path = os.path.join(self.get_temp_dir(),\n 'this_file_doesnt_exist')\n log_dir = os.path.join(self.get_temp_dir(), 'error_raised')\n with self.assertRaises(ValueError):\n slim.evaluation.evaluate_once('', checkpoint_path, log_dir)\n\n def testRestoredModelPerformance(self):\n checkpoint_path = os.path.join(self.get_temp_dir(), 'model.ckpt')\n log_dir = os.path.join(self.get_temp_dir(), 'log_dir1/')\n\n # First, save out the current model to a checkpoint:\n init_op = tf.group(tf.initialize_all_variables(),\n tf.initialize_local_variables())\n saver = tf.train.Saver()\n with self.test_session() as sess:\n sess.run(init_op)\n saver.save(sess, checkpoint_path)\n\n # Next, determine the metric to evaluate:\n value_op, update_op = slim.metrics.streaming_accuracy(\n self._predictions, self._labels)\n\n # Run the evaluation and verify the results:\n accuracy_value = slim.evaluation.evaluate_once(\n '',\n checkpoint_path,\n log_dir,\n eval_op=update_op,\n final_op=value_op)\n self.assertAlmostEqual(accuracy_value, self._expected_accuracy)\n\n\nif __name__ == '__main__':\n tf.test.main()\n",
"# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for layers.feature_column_ops.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\nimport tensorflow as tf\n\nfrom tensorflow.contrib.layers.python.layers import feature_column_ops\nfrom tensorflow.python.ops import init_ops\n\n\nclass TransformerTest(tf.test.TestCase):\n\n def testRealValuedColumnIsIdentityTransformation(self):\n real_valued = tf.contrib.layers.real_valued_column(\"price\")\n features = {\"price\": tf.constant([[20.], [110], [-3]])}\n output = feature_column_ops._Transformer(features).transform(real_valued)\n with self.test_session():\n self.assertAllEqual(output.eval(), [[20.], [110], [-3]])\n\n def testBucketizedColumn(self):\n bucket = tf.contrib.layers.bucketized_column(\n tf.contrib.layers.real_valued_column(\"price\"),\n boundaries=[0., 10., 100.])\n # buckets 2, 3, 0\n features = {\"price\": tf.constant([[20.], [110], [-3]])}\n output = feature_column_ops._Transformer(features).transform(bucket)\n with self.test_session():\n self.assertAllEqual(output.eval(), [[2], [3], [0]])\n\n def testBucketizedColumnWithMultiDimensions(self):\n bucket = tf.contrib.layers.bucketized_column(\n tf.contrib.layers.real_valued_column(\"price\", 2),\n boundaries=[0., 10., 100.])\n # buckets 2, 3, 0\n features = {\"price\": tf.constant([[20., 110], [110., 20], [-3, -3]])}\n output = feature_column_ops._Transformer(features).transform(bucket)\n with self.test_session():\n self.assertAllEqual(output.eval(), [[2, 3], [3, 2], [0, 0]])\n\n def testCachedTransformation(self):\n bucket = tf.contrib.layers.bucketized_column(\n tf.contrib.layers.real_valued_column(\"price\"),\n boundaries=[0., 10., 100.])\n # buckets 2, 3, 0\n features = {\"price\": tf.constant([[20.], [110], [-3]])}\n transformer = feature_column_ops._Transformer(features)\n with self.test_session() as sess:\n transformer.transform(bucket)\n num_of_ops = len(sess.graph.get_operations())\n # Verify that the second call to transform the same feature\n # doesn't increase the number of ops.\n transformer.transform(bucket)\n self.assertEqual(num_of_ops, len(sess.graph.get_operations()))\n\n def testSparseColumnWithHashBucket(self):\n hashed_sparse = tf.contrib.layers.sparse_column_with_hash_bucket(\"wire\", 10)\n wire_tensor = tf.SparseTensor(values=[\"omar\", \"stringer\", \"marlo\"],\n indices=[[0, 0], [1, 0], [1, 1]],\n shape=[2, 2])\n features = {\"wire\": wire_tensor}\n output = feature_column_ops._Transformer(features).transform(hashed_sparse)\n with self.test_session():\n self.assertEqual(output.values.dtype, tf.int64)\n self.assertTrue(all(x < 10 and x >= 0 for x in output.values.eval()))\n self.assertAllEqual(output.indices.eval(), wire_tensor.indices.eval())\n self.assertAllEqual(output.shape.eval(), wire_tensor.shape.eval())\n\n def testSparseIntColumnWithHashBucket(self):\n \"\"\"Tests a sparse column with int values.\"\"\"\n hashed_sparse = tf.contrib.layers.sparse_column_with_hash_bucket(\n \"wire\", 10, dtype=tf.int64)\n wire_tensor = tf.SparseTensor(values=[101, 201, 301],\n indices=[[0, 0], [1, 0], [1, 1]],\n shape=[2, 2])\n features = {\"wire\": wire_tensor}\n output = feature_column_ops._Transformer(features).transform(hashed_sparse)\n with self.test_session():\n self.assertEqual(output.values.dtype, tf.int64)\n self.assertTrue(all(x < 10 and x >= 0 for x in output.values.eval()))\n self.assertAllEqual(output.indices.eval(), wire_tensor.indices.eval())\n self.assertAllEqual(output.shape.eval(), wire_tensor.shape.eval())\n\n def testEmbeddingColumn(self):\n hashed_sparse = tf.contrib.layers.sparse_column_with_hash_bucket(\"wire\", 10)\n wire_tensor = tf.SparseTensor(values=[\"omar\", \"stringer\", \"marlo\"],\n indices=[[0, 0], [1, 0], [1, 1]],\n shape=[2, 2])\n features = {\"wire\": wire_tensor}\n output = feature_column_ops._Transformer(features).transform(\n tf.contrib.layers.embedding_column(hashed_sparse, 10))\n expected = feature_column_ops._Transformer(features).transform(\n hashed_sparse)\n with self.test_session():\n self.assertAllEqual(output.values.eval(), expected.values.eval())\n self.assertAllEqual(output.indices.eval(), expected.indices.eval())\n self.assertAllEqual(output.shape.eval(), expected.shape.eval())\n\n def testSparseColumnWithKeys(self):\n keys_sparse = tf.contrib.layers.sparse_column_with_keys(\n \"wire\", [\"marlo\", \"omar\", \"stringer\"])\n wire_tensor = tf.SparseTensor(values=[\"omar\", \"stringer\", \"marlo\"],\n indices=[[0, 0], [1, 0], [1, 1]],\n shape=[2, 2])\n features = {\"wire\": wire_tensor}\n output = feature_column_ops._Transformer(features).transform(keys_sparse)\n with self.test_session():\n tf.initialize_all_tables().run()\n self.assertEqual(output.values.dtype, tf.int64)\n self.assertAllEqual(output.values.eval(), [1, 2, 0])\n self.assertAllEqual(output.indices.eval(), wire_tensor.indices.eval())\n self.assertAllEqual(output.shape.eval(), wire_tensor.shape.eval())\n\n def testSparseColumnWithHashBucket_IsIntegerized(self):\n hashed_sparse = tf.contrib.layers.sparse_column_with_integerized_feature(\n \"wire\", 10)\n wire_tensor = tf.SparseTensor(values=[100, 1, 25],\n indices=[[0, 0], [1, 0], [1, 1]],\n shape=[2, 2])\n features = {\"wire\": wire_tensor}\n output = feature_column_ops._Transformer(features).transform(hashed_sparse)\n with self.test_session():\n self.assertEqual(output.values.dtype, tf.int32)\n self.assertTrue(all(x < 10 and x >= 0 for x in output.values.eval()))\n self.assertAllEqual(output.indices.eval(), wire_tensor.indices.eval())\n self.assertAllEqual(output.shape.eval(), wire_tensor.shape.eval())\n\n def testWeightedSparseColumn(self):\n ids = tf.contrib.layers.sparse_column_with_keys(\n \"ids\", [\"marlo\", \"omar\", \"stringer\"])\n ids_tensor = tf.SparseTensor(values=[\"stringer\", \"stringer\", \"marlo\"],\n indices=[[0, 0], [1, 0], [1, 1]],\n shape=[2, 2])\n weighted_ids = tf.contrib.layers.weighted_sparse_column(ids, \"weights\")\n weights_tensor = tf.SparseTensor(values=[10.0, 20.0, 30.0],\n indices=[[0, 0], [1, 0], [1, 1]],\n shape=[2, 2])\n features = {\"ids\": ids_tensor,\n \"weights\": weights_tensor}\n output = feature_column_ops._Transformer(features).transform(weighted_ids)\n with self.test_session():\n tf.initialize_all_tables().run()\n self.assertAllEqual(output[0].shape.eval(), ids_tensor.shape.eval())\n self.assertAllEqual(output[0].indices.eval(), ids_tensor.indices.eval())\n self.assertAllEqual(output[0].values.eval(), [2, 2, 0])\n self.assertAllEqual(output[1].shape.eval(), weights_tensor.shape.eval())\n self.assertAllEqual(output[1].indices.eval(),\n weights_tensor.indices.eval())\n self.assertEqual(output[1].values.dtype, tf.float32)\n self.assertAllEqual(output[1].values.eval(), weights_tensor.values.eval())\n\n def testCrossColumn(self):\n language = tf.contrib.layers.sparse_column_with_hash_bucket(\n \"language\", hash_bucket_size=3)\n country = tf.contrib.layers.sparse_column_with_hash_bucket(\n \"country\", hash_bucket_size=5)\n country_language = tf.contrib.layers.crossed_column(\n [language, country], hash_bucket_size=15)\n features = {\n \"language\": tf.SparseTensor(values=[\"english\", \"spanish\"],\n indices=[[0, 0], [1, 0]],\n shape=[2, 1]),\n \"country\": tf.SparseTensor(values=[\"US\", \"SV\"],\n indices=[[0, 0], [1, 0]],\n shape=[2, 1])\n }\n output = feature_column_ops._Transformer(features).transform(\n country_language)\n with self.test_session():\n self.assertEqual(output.values.dtype, tf.int64)\n self.assertTrue(all(x < 15 and x >= 0 for x in output.values.eval()))\n\n def testCrossWithBucketizedColumn(self):\n price_bucket = tf.contrib.layers.bucketized_column(\n tf.contrib.layers.real_valued_column(\"price\"),\n boundaries=[0., 10., 100.])\n country = tf.contrib.layers.sparse_column_with_hash_bucket(\n \"country\", hash_bucket_size=5)\n country_price = tf.contrib.layers.crossed_column(\n [country, price_bucket], hash_bucket_size=15)\n features = {\n \"price\": tf.constant([[20.]]),\n \"country\": tf.SparseTensor(values=[\"US\", \"SV\"],\n indices=[[0, 0], [0, 1]],\n shape=[1, 2])\n }\n output = feature_column_ops._Transformer(features).transform(country_price)\n with self.test_session():\n self.assertEqual(output.values.dtype, tf.int64)\n self.assertTrue(all(x < 15 and x >= 0 for x in output.values.eval()))\n\n def testCrossWithMultiDimensionBucketizedColumn(self):\n country = tf.contrib.layers.sparse_column_with_hash_bucket(\n \"country\", hash_bucket_size=5)\n price_bucket = tf.contrib.layers.bucketized_column(\n tf.contrib.layers.real_valued_column(\"price\", 2),\n boundaries=[0., 10., 100.])\n country_price = tf.contrib.layers.crossed_column(\n [country, price_bucket], hash_bucket_size=1000)\n\n with tf.Graph().as_default():\n features = {\"price\": tf.constant([[20., 210.], [110., 50.], [-3., -30.]]),\n \"country\": tf.SparseTensor(values=[\"US\", \"SV\", \"US\"],\n indices=[[0, 0], [1, 0], [2, 0]],\n shape=[3, 2])}\n output, column_to_variable, _ = (\n tf.contrib.layers.weighted_sum_from_feature_columns(features,\n [country_price],\n num_outputs=1))\n\n weights = column_to_variable[country_price][0]\n grad = tf.squeeze(tf.gradients(output, weights)[0].values)\n with self.test_session():\n tf.initialize_all_variables().run()\n self.assertEqual(len(grad.eval()), 6)\n\n def testCrossWithCrossedColumn(self):\n price_bucket = tf.contrib.layers.bucketized_column(\n tf.contrib.layers.real_valued_column(\"price\"),\n boundaries=[0., 10., 100.])\n country = tf.contrib.layers.sparse_column_with_hash_bucket(\n \"country\", hash_bucket_size=5)\n country_price = tf.contrib.layers.crossed_column(\n [country, price_bucket], hash_bucket_size=15)\n wire = tf.contrib.layers.sparse_column_with_hash_bucket(\"wire\", 10)\n wire_country_price = tf.contrib.layers.crossed_column(\n [wire, country_price], hash_bucket_size=15)\n features = {\n \"price\": tf.constant([[20.]]),\n \"country\": tf.SparseTensor(values=[\"US\", \"SV\"],\n indices=[[0, 0], [0, 1]],\n shape=[1, 2]),\n \"wire\": tf.SparseTensor(values=[\"omar\", \"stringer\", \"marlo\"],\n indices=[[0, 0], [0, 1], [0, 2]],\n shape=[1, 3])\n }\n output = feature_column_ops._Transformer(features).transform(\n wire_country_price)\n with self.test_session():\n self.assertEqual(output.values.dtype, tf.int64)\n self.assertTrue(all(x < 15 and x >= 0 for x in output.values.eval()))\n\n def testIfFeatureTableContainsTransformationReturnIt(self):\n any_column = tf.contrib.layers.sparse_column_with_hash_bucket(\"sparse\", 10)\n features = {any_column: \"any-thing-even-not-a-tensor\"}\n output = feature_column_ops._Transformer(features).transform(any_column)\n self.assertEqual(output, \"any-thing-even-not-a-tensor\")\n\n\nclass CreateInputLayersForDNNsTest(tf.test.TestCase):\n\n def testAllDNNColumns(self):\n sparse_column = tf.contrib.layers.sparse_column_with_keys(\n \"ids\", [\"a\", \"b\", \"c\", \"unseen\"])\n\n real_valued_column = tf.contrib.layers.real_valued_column(\"income\", 2)\n one_hot_column = tf.contrib.layers.one_hot_column(sparse_column)\n embedding_column = tf.contrib.layers.embedding_column(sparse_column, 10)\n features = {\n \"ids\": tf.SparseTensor(\n values=[\"c\", \"b\", \"a\"],\n indices=[[0, 0], [1, 0], [2, 0]],\n shape=[3, 1]),\n \"income\": tf.constant([[20.3, 10], [110.3, 0.4], [-3.0, 30.4]])\n }\n output = tf.contrib.layers.input_from_feature_columns(features,\n [one_hot_column,\n embedding_column,\n real_valued_column])\n with self.test_session():\n tf.initialize_all_variables().run()\n tf.initialize_all_tables().run()\n self.assertAllEqual(output.eval().shape, [3, 2 + 4 + 10])\n\n def testRealValuedColumn(self):\n real_valued = tf.contrib.layers.real_valued_column(\"price\")\n features = {\"price\": tf.constant([[20.], [110], [-3]])}\n output = tf.contrib.layers.input_from_feature_columns(features,\n [real_valued])\n with self.test_session():\n self.assertAllClose(output.eval(), features[\"price\"].eval())\n\n def testRealValuedColumnWithMultiDimensions(self):\n real_valued = tf.contrib.layers.real_valued_column(\"price\", 2)\n features = {\"price\": tf.constant([[20., 10.],\n [110, 0.],\n [-3, 30]])}\n output = tf.contrib.layers.input_from_feature_columns(features,\n [real_valued])\n with self.test_session():\n self.assertAllClose(output.eval(), features[\"price\"].eval())\n\n def testRealValuedColumnWithNormalizer(self):\n real_valued = tf.contrib.layers.real_valued_column(\n \"price\", normalizer=lambda x: x - 2)\n features = {\"price\": tf.constant([[20.], [110], [-3]])}\n output = tf.contrib.layers.input_from_feature_columns(features,\n [real_valued])\n with self.test_session():\n self.assertAllClose(output.eval(), features[\"price\"].eval() - 2)\n\n def testRealValuedColumnWithMultiDimensionsAndNormalizer(self):\n real_valued = tf.contrib.layers.real_valued_column(\n \"price\", 2, normalizer=lambda x: x - 2)\n features = {\"price\": tf.constant([[20., 10.], [110, 0.], [-3, 30]])}\n output = tf.contrib.layers.input_from_feature_columns(features,\n [real_valued])\n with self.test_session():\n self.assertAllClose(output.eval(), features[\"price\"].eval() - 2)\n\n def testBucketizedColumnSucceedsForDNN(self):\n bucket = tf.contrib.layers.bucketized_column(\n tf.contrib.layers.real_valued_column(\"price\"),\n boundaries=[0., 10., 100.])\n # buckets 2, 3, 0\n features = {\"price\": tf.constant([[20.], [110], [-3]])}\n output = tf.contrib.layers.input_from_feature_columns(features, [bucket])\n expected = [[0, 0, 1, 0], [0, 0, 0, 1], [1, 0, 0, 0]]\n with self.test_session():\n self.assertAllClose(output.eval(), expected)\n\n def testBucketizedColumnWithNormalizerSucceedsForDNN(self):\n bucket = tf.contrib.layers.bucketized_column(\n tf.contrib.layers.real_valued_column(\n \"price\", normalizer=lambda x: x - 15),\n boundaries=[0., 10., 100.])\n # buckets 2, 3, 0\n features = {\"price\": tf.constant([[20.], [110], [-3]])}\n output = tf.contrib.layers.input_from_feature_columns(features, [bucket])\n expected = [[0, 1, 0, 0], [0, 0, 1, 0], [1, 0, 0, 0]]\n with self.test_session():\n self.assertAllClose(output.eval(), expected)\n\n def testBucketizedColumnWithMultiDimensionsSucceedsForDNN(self):\n bucket = tf.contrib.layers.bucketized_column(\n tf.contrib.layers.real_valued_column(\"price\", 2),\n boundaries=[0., 10., 100.])\n # buckets [2, 3], [3, 2], [0, 0]. dimension = 2\n features = {\"price\": tf.constant([[20., 200],\n [110, 50],\n [-3, -3]])}\n output = tf.contrib.layers.input_from_feature_columns(features, [bucket])\n expected = [[0, 0, 1, 0, 0, 0, 0, 1],\n [0, 0, 0, 1, 0, 0, 1, 0],\n [1, 0, 0, 0, 1, 0, 0, 0]]\n with self.test_session():\n self.assertAllClose(output.eval(), expected)\n\n def testOneHotColumnFromWeightedSparseColumnFails(self):\n ids_column = tf.contrib.layers.sparse_column_with_keys(\n \"ids\", [\"a\", \"b\", \"c\", \"unseen\"])\n ids_tensor = tf.SparseTensor(\n values=[\"c\", \"b\", \"a\", \"c\"],\n indices=[[0, 0], [1, 0], [2, 0], [2, 1]],\n shape=[3, 2])\n weighted_ids_column = tf.contrib.layers.weighted_sparse_column(ids_column,\n \"weights\")\n weights_tensor = tf.SparseTensor(\n values=[10.0, 20.0, 30.0, 40.0],\n indices=[[0, 0], [1, 0], [2, 0], [2, 1]],\n shape=[3, 2])\n features = {\"ids\": ids_tensor, \"weights\": weights_tensor}\n one_hot_column = tf.contrib.layers.one_hot_column(weighted_ids_column)\n with self.test_session():\n tf.initialize_all_variables().run()\n tf.initialize_all_tables().run()\n with self.assertRaisesRegexp(\n ValueError,\n \"one_hot_column does not yet support weighted_sparse_column\"):\n _ = tf.contrib.layers.input_from_feature_columns(features,\n [one_hot_column])\n\n def testOneHotColumnFromSparseColumnWithKeysSucceedsForDNN(self):\n ids_column = tf.contrib.layers.sparse_column_with_keys(\n \"ids\", [\"a\", \"b\", \"c\", \"unseen\"])\n ids_tensor = tf.SparseTensor(\n values=[\"c\", \"b\", \"a\"], indices=[[0, 0], [1, 0], [2, 0]], shape=[3, 1])\n one_hot_sparse = tf.contrib.layers.one_hot_column(ids_column)\n features = {\"ids\": ids_tensor}\n output = tf.contrib.layers.input_from_feature_columns(features,\n [one_hot_sparse])\n\n with self.test_session():\n tf.initialize_all_variables().run()\n tf.initialize_all_tables().run()\n self.assertAllEqual([[0, 0, 1, 0], [0, 1, 0, 0], [1, 0, 0, 0]],\n output.eval())\n\n def testOneHotColumnFromMultivalentSparseColumnWithKeysSucceedsForDNN(self):\n ids_column = tf.contrib.layers.sparse_column_with_keys(\n \"ids\", [\"a\", \"b\", \"c\", \"unseen\"])\n ids_tensor = tf.SparseTensor(\n values=[\"c\", \"b\", \"a\", \"c\"],\n indices=[[0, 0], [1, 0], [2, 0], [2, 1]],\n shape=[3, 2])\n one_hot_sparse = tf.contrib.layers.one_hot_column(ids_column)\n features = {\"ids\": ids_tensor}\n output = tf.contrib.layers.input_from_feature_columns(features,\n [one_hot_sparse])\n\n with self.test_session():\n tf.initialize_all_variables().run()\n tf.initialize_all_tables().run()\n self.assertAllEqual([[0, 0, 1, 0], [0, 1, 0, 0], [1, 0, 1, 0]],\n output.eval())\n\n def testOneHotColumnFromSparseColumnWithIntegerizedFeaturePassesForDNN(self):\n ids_column = tf.contrib.layers.sparse_column_with_integerized_feature(\n \"ids\", bucket_size=4)\n one_hot_sparse = tf.contrib.layers.one_hot_column(ids_column)\n features = {\"ids\": tf.SparseTensor(\n values=[2, 1, 0, 2],\n indices=[[0, 0], [1, 0], [2, 0], [2, 1]],\n shape=[3, 2])}\n output = tf.contrib.layers.input_from_feature_columns(features,\n [one_hot_sparse])\n with self.test_session():\n tf.initialize_all_variables().run()\n self.assertAllEqual([[0, 0, 1, 0], [0, 1, 0, 0], [1, 0, 1, 0]],\n output.eval())\n\n def testOneHotColumnFromSparseColumnWithHashBucketSucceedsForDNN(self):\n hashed_sparse = tf.contrib.layers.sparse_column_with_hash_bucket(\"feat\", 10)\n wire_tensor = tf.SparseTensor(\n values=[\"a\", \"b\", \"c1\", \"c2\"],\n indices=[[0, 0], [1, 0], [2, 0], [2, 1]],\n shape=[3, 2])\n features = {\"feat\": wire_tensor}\n one_hot_sparse = tf.contrib.layers.one_hot_column(hashed_sparse)\n output = tf.contrib.layers.input_from_feature_columns(features,\n [one_hot_sparse])\n with self.test_session():\n tf.initialize_all_variables().run()\n tf.initialize_all_tables().run()\n self.assertAllEqual([3, 10], output.eval().shape)\n\n def testEmbeddingColumnSucceedsForDNN(self):\n hashed_sparse = tf.contrib.layers.sparse_column_with_hash_bucket(\"wire\", 10)\n wire_tensor = tf.SparseTensor(\n values=[\"omar\", \"stringer\", \"marlo\", \"xx\", \"yy\"],\n indices=[[0, 0], [1, 0], [1, 1], [2, 0], [3, 0]],\n shape=[4, 2])\n features = {\"wire\": wire_tensor}\n embeded_sparse = tf.contrib.layers.embedding_column(hashed_sparse, 10)\n output = tf.contrib.layers.input_from_feature_columns(features,\n [embeded_sparse])\n with self.test_session():\n tf.initialize_all_variables().run()\n self.assertAllEqual(output.eval().shape, [4, 10])\n\n def testHashedEmbeddingColumnSucceedsForDNN(self):\n wire_tensor = tf.SparseTensor(values=[\"omar\", \"stringer\", \"marlo\", \"omar\"],\n indices=[[0, 0], [1, 0], [1, 1], [2, 0]],\n shape=[3, 2])\n\n features = {\"wire\": wire_tensor}\n # Big enough hash space so that hopefully there is no collision\n embedded_sparse = tf.contrib.layers.hashed_embedding_column(\"wire\", 1000, 3)\n output = tf.contrib.layers.input_from_feature_columns(\n features, [embedded_sparse], weight_collections=[\"my_collection\"])\n weights = tf.get_collection(\"my_collection\")\n grad = tf.gradients(output, weights)\n with self.test_session():\n tf.initialize_all_variables().run()\n gradient_values = []\n # Collect the gradient from the different partitions (one in this test)\n for p in range(len(grad)):\n gradient_values.extend(grad[p].values.eval())\n gradient_values.sort()\n self.assertAllEqual(gradient_values, [0.5]*6 + [2]*3)\n\n def testEmbeddingColumnWithInitializerSucceedsForDNN(self):\n hashed_sparse = tf.contrib.layers.sparse_column_with_hash_bucket(\"wire\", 10)\n wire_tensor = tf.SparseTensor(values=[\"omar\", \"stringer\", \"marlo\"],\n indices=[[0, 0], [1, 0], [1, 1]],\n shape=[2, 2])\n features = {\"wire\": wire_tensor}\n init_value = 133.7\n embeded_sparse = tf.contrib.layers.embedding_column(\n hashed_sparse,\n 10, initializer=tf.constant_initializer(init_value))\n output = tf.contrib.layers.input_from_feature_columns(features,\n [embeded_sparse])\n\n with self.test_session():\n tf.initialize_all_variables().run()\n output_eval = output.eval()\n self.assertAllEqual(output_eval.shape, [2, 10])\n self.assertAllClose(output_eval, np.tile(init_value, [2, 10]))\n\n def testEmbeddingColumnWithMultipleInitializersFails(self):\n hashed_sparse = tf.contrib.layers.sparse_column_with_hash_bucket(\"wire\", 10)\n wire_tensor = tf.SparseTensor(values=[\"omar\", \"stringer\", \"marlo\"],\n indices=[[0, 0], [1, 0], [1, 1]],\n shape=[2, 2])\n features = {\"wire\": wire_tensor}\n embedded_sparse = tf.contrib.layers.embedding_column(\n hashed_sparse,\n 10,\n initializer=tf.truncated_normal_initializer(mean=42,\n stddev=1337))\n embedded_sparse_alternate = tf.contrib.layers.embedding_column(\n hashed_sparse,\n 10,\n initializer=tf.truncated_normal_initializer(mean=1337,\n stddev=42))\n\n # Makes sure that trying to use different initializers with the same\n # embedding column explicitly fails.\n with self.test_session():\n with self.assertRaisesRegexp(\n ValueError,\n \"Duplicate feature column key found for column: wire_embedding\"):\n tf.contrib.layers.input_from_feature_columns(\n features, [embedded_sparse, embedded_sparse_alternate])\n\n def testEmbeddingColumnWithWeightedSparseColumnSucceedsForDNN(self):\n ids = tf.contrib.layers.sparse_column_with_keys(\n \"ids\", [\"marlo\", \"omar\", \"stringer\"])\n ids_tensor = tf.SparseTensor(values=[\"stringer\", \"stringer\", \"marlo\"],\n indices=[[0, 0], [1, 0], [1, 1]],\n shape=[2, 2])\n weighted_ids = tf.contrib.layers.weighted_sparse_column(ids, \"weights\")\n weights_tensor = tf.SparseTensor(values=[10.0, 20.0, 30.0],\n indices=[[0, 0], [1, 0], [1, 1]],\n shape=[2, 2])\n features = {\"ids\": ids_tensor,\n \"weights\": weights_tensor}\n embeded_sparse = tf.contrib.layers.embedding_column(weighted_ids, 10)\n output = tf.contrib.layers.input_from_feature_columns(features,\n [embeded_sparse])\n with self.test_session():\n tf.initialize_all_variables().run()\n tf.initialize_all_tables().run()\n self.assertAllEqual(output.eval().shape, [2, 10])\n\n def testEmbeddingColumnWithCrossedColumnSucceedsForDNN(self):\n a = tf.contrib.layers.sparse_column_with_hash_bucket(\"aaa\",\n hash_bucket_size=100)\n b = tf.contrib.layers.sparse_column_with_hash_bucket(\"bbb\",\n hash_bucket_size=100)\n crossed = tf.contrib.layers.crossed_column(\n set([a, b]), hash_bucket_size=10000)\n wire_tensor = tf.SparseTensor(values=[\"omar\", \"stringer\", \"marlo\"],\n indices=[[0, 0], [1, 0], [1, 1]],\n shape=[2, 2])\n features = {\"aaa\": wire_tensor, \"bbb\": wire_tensor}\n embeded_sparse = tf.contrib.layers.embedding_column(crossed, 10)\n output = tf.contrib.layers.input_from_feature_columns(features,\n [embeded_sparse])\n with self.test_session():\n tf.initialize_all_variables().run()\n self.assertAllEqual(output.eval().shape, [2, 10])\n\n def testSparseColumnFailsForDNN(self):\n hashed_sparse = tf.contrib.layers.sparse_column_with_hash_bucket(\"wire\", 10)\n wire_tensor = tf.SparseTensor(values=[\"omar\", \"stringer\", \"marlo\"],\n indices=[[0, 0], [1, 0], [1, 1]],\n shape=[2, 2])\n features = {\"wire\": wire_tensor}\n with self.test_session():\n with self.assertRaisesRegexp(\n ValueError, \"Error creating input layer for column: wire\"):\n tf.initialize_all_variables().run()\n tf.contrib.layers.input_from_feature_columns(features, [hashed_sparse])\n\n def testWeightedSparseColumnFailsForDNN(self):\n ids = tf.contrib.layers.sparse_column_with_keys(\n \"ids\", [\"marlo\", \"omar\", \"stringer\"])\n ids_tensor = tf.SparseTensor(values=[\"stringer\", \"stringer\", \"marlo\"],\n indices=[[0, 0], [1, 0], [1, 1]],\n shape=[2, 2])\n weighted_ids = tf.contrib.layers.weighted_sparse_column(ids, \"weights\")\n weights_tensor = tf.SparseTensor(values=[10.0, 20.0, 30.0],\n indices=[[0, 0], [1, 0], [1, 1]],\n shape=[2, 2])\n features = {\"ids\": ids_tensor,\n \"weights\": weights_tensor}\n with self.test_session():\n with self.assertRaisesRegexp(\n ValueError,\n \"Error creating input layer for column: ids_weighted_by_weights\"):\n tf.initialize_all_tables().run()\n tf.contrib.layers.input_from_feature_columns(features, [weighted_ids])\n\n def testCrossedColumnFailsForDNN(self):\n a = tf.contrib.layers.sparse_column_with_hash_bucket(\"aaa\",\n hash_bucket_size=100)\n b = tf.contrib.layers.sparse_column_with_hash_bucket(\"bbb\",\n hash_bucket_size=100)\n crossed = tf.contrib.layers.crossed_column(\n set([a, b]), hash_bucket_size=10000)\n wire_tensor = tf.SparseTensor(values=[\"omar\", \"stringer\", \"marlo\"],\n indices=[[0, 0], [1, 0], [1, 1]],\n shape=[2, 2])\n features = {\"aaa\": wire_tensor, \"bbb\": wire_tensor}\n with self.test_session():\n with self.assertRaisesRegexp(\n ValueError, \"Error creating input layer for column: aaa_X_bbb\"):\n tf.initialize_all_variables().run()\n tf.contrib.layers.input_from_feature_columns(features, [crossed])\n\n def testDeepColumnsSucceedForDNN(self):\n real_valued = tf.contrib.layers.real_valued_column(\"income\", 3)\n bucket = tf.contrib.layers.bucketized_column(\n tf.contrib.layers.real_valued_column(\"price\", 2),\n boundaries=[0., 10., 100.])\n hashed_sparse = tf.contrib.layers.sparse_column_with_hash_bucket(\"wire\", 10)\n features = {\n \"income\": tf.constant([[20., 10, -5], [110, 0, -7], [-3, 30, 50]]),\n \"price\": tf.constant([[20., 200], [110, 2], [-20, -30]]),\n \"wire\": tf.SparseTensor(values=[\"omar\", \"stringer\", \"marlo\"],\n indices=[[0, 0], [1, 0], [2, 0]],\n shape=[3, 1])\n }\n embeded_sparse = tf.contrib.layers.embedding_column(\n hashed_sparse,\n 10, initializer=tf.constant_initializer(133.7))\n output = tf.contrib.layers.input_from_feature_columns(\n features, [real_valued, bucket, embeded_sparse])\n with self.test_session():\n tf.initialize_all_variables().run()\n # size of output = 3 (real_valued) + 2 * 4 (bucket) + 10 (embedding) = 21\n self.assertAllEqual(output.eval().shape, [3, 21])\n\n def testEmbeddingColumnForDNN(self):\n hashed_sparse = tf.contrib.layers.sparse_column_with_hash_bucket(\"wire\", 10)\n wire_tensor = tf.SparseTensor(values=[\"omar\", \"stringer\", \"marlo\"],\n indices=[[0, 0], [1, 0], [1, 1]],\n shape=[2, 2])\n features = {\"wire\": wire_tensor}\n embeded_sparse = tf.contrib.layers.embedding_column(\n hashed_sparse, 1, combiner=\"sum\", initializer=init_ops.ones_initializer)\n output = tf.contrib.layers.input_from_feature_columns(features,\n [embeded_sparse])\n with self.test_session():\n tf.initialize_all_variables().run()\n # score: (number of values)\n self.assertAllEqual(output.eval(), [[1.], [2.]])\n\n def testEmbeddingColumnWithWeightedSparseColumnForDNN(self):\n ids = tf.contrib.layers.sparse_column_with_keys(\n \"ids\", [\"marlo\", \"omar\", \"stringer\"])\n ids_tensor = tf.SparseTensor(values=[\"stringer\", \"stringer\", \"marlo\"],\n indices=[[0, 0], [1, 0], [1, 1]],\n shape=[2, 2])\n weighted_ids = tf.contrib.layers.weighted_sparse_column(ids, \"weights\")\n weights_tensor = tf.SparseTensor(values=[10.0, 20.0, 30.0],\n indices=[[0, 0], [1, 0], [1, 1]],\n shape=[2, 2])\n features = {\"ids\": ids_tensor,\n \"weights\": weights_tensor}\n embeded_sparse = tf.contrib.layers.embedding_column(\n weighted_ids, 1, combiner=\"sum\", initializer=init_ops.ones_initializer)\n output = tf.contrib.layers.input_from_feature_columns(features,\n [embeded_sparse])\n with self.test_session():\n tf.initialize_all_variables().run()\n tf.initialize_all_tables().run()\n # score: (sum of weights)\n self.assertAllEqual(output.eval(), [[10.], [50.]])\n\n def testInputLayerWithCollectionsForDNN(self):\n real_valued = tf.contrib.layers.real_valued_column(\"price\")\n bucket = tf.contrib.layers.bucketized_column(real_valued,\n boundaries=[0., 10., 100.])\n hashed_sparse = tf.contrib.layers.sparse_column_with_hash_bucket(\"wire\", 10)\n features = {\n \"price\": tf.constant([[20.], [110], [-3]]),\n \"wire\": tf.SparseTensor(values=[\"omar\", \"stringer\", \"marlo\"],\n indices=[[0, 0], [1, 0], [2, 0]],\n shape=[3, 1])\n }\n embeded_sparse = tf.contrib.layers.embedding_column(hashed_sparse, 10)\n tf.contrib.layers.input_from_feature_columns(\n features, [real_valued, bucket, embeded_sparse],\n weight_collections=[\"my_collection\"])\n weights = tf.get_collection(\"my_collection\")\n # one variable for embeded sparse\n self.assertEqual(1, len(weights))\n\n def testInputLayerWithTrainableArgForDNN(self):\n real_valued = tf.contrib.layers.real_valued_column(\"price\")\n bucket = tf.contrib.layers.bucketized_column(real_valued,\n boundaries=[0., 10., 100.])\n hashed_sparse = tf.contrib.layers.sparse_column_with_hash_bucket(\"wire\", 10)\n features = {\n \"price\": tf.constant([[20.], [110], [-3]]),\n \"wire\": tf.SparseTensor(values=[\"omar\", \"stringer\", \"marlo\"],\n indices=[[0, 0], [1, 0], [2, 0]],\n shape=[3, 1])\n }\n embeded_sparse = tf.contrib.layers.embedding_column(hashed_sparse, 10)\n tf.contrib.layers.input_from_feature_columns(\n features, [real_valued, bucket, embeded_sparse],\n weight_collections=[\"my_collection\"],\n trainable=False)\n # There should not be any trainable variables\n self.assertEqual(0, len(tf.trainable_variables()))\n\n tf.contrib.layers.input_from_feature_columns(\n features, [real_valued, bucket, embeded_sparse],\n weight_collections=[\"my_collection\"],\n trainable=True)\n # There should one trainable variable for embeded sparse\n self.assertEqual(1, len(tf.trainable_variables()))\n\n\nclass WeightedSumTest(tf.test.TestCase):\n\n def testSparseColumn(self):\n hashed_sparse = tf.contrib.layers.sparse_column_with_hash_bucket(\"wire\", 10)\n wire_tensor = tf.SparseTensor(values=[\"omar\", \"stringer\", \"marlo\"],\n indices=[[0, 0], [1, 0], [1, 1]],\n shape=[2, 2])\n features = {\"wire\": wire_tensor}\n logits, _, _ = tf.contrib.layers.weighted_sum_from_feature_columns(\n features, [hashed_sparse], num_outputs=5)\n with self.test_session():\n tf.initialize_all_variables().run()\n self.assertAllEqual(logits.eval().shape, [2, 5])\n\n def testSparseIntColumn(self):\n \"\"\"Tests a sparse column with int values.\"\"\"\n hashed_sparse = tf.contrib.layers.sparse_column_with_hash_bucket(\n \"wire\", 10, dtype=tf.int64)\n wire_tensor = tf.SparseTensor(values=[101, 201, 301],\n indices=[[0, 0], [1, 0], [1, 1]],\n shape=[2, 2])\n features = {\"wire\": wire_tensor}\n logits, _, _ = tf.contrib.layers.weighted_sum_from_feature_columns(\n features, [hashed_sparse], num_outputs=5)\n with self.test_session():\n tf.initialize_all_variables().run()\n self.assertAllEqual(logits.eval().shape, [2, 5])\n\n def testWeightedSparseColumn(self):\n ids = tf.contrib.layers.sparse_column_with_keys(\n \"ids\", [\"marlo\", \"omar\", \"stringer\"])\n ids_tensor = tf.SparseTensor(values=[\"stringer\", \"stringer\", \"marlo\"],\n indices=[[0, 0], [1, 0], [1, 1]],\n shape=[2, 2])\n weighted_ids = tf.contrib.layers.weighted_sparse_column(ids, \"weights\")\n weights_tensor = tf.SparseTensor(values=[10.0, 20.0, 30.0],\n indices=[[0, 0], [1, 0], [1, 1]],\n shape=[2, 2])\n features = {\"ids\": ids_tensor,\n \"weights\": weights_tensor}\n logits, _, _ = tf.contrib.layers.weighted_sum_from_feature_columns(\n features, [weighted_ids], num_outputs=5)\n with self.test_session():\n tf.initialize_all_variables().run()\n tf.initialize_all_tables().run()\n self.assertAllEqual(logits.eval().shape, [2, 5])\n\n def testCrossedColumn(self):\n a = tf.contrib.layers.sparse_column_with_hash_bucket(\"aaa\",\n hash_bucket_size=100)\n b = tf.contrib.layers.sparse_column_with_hash_bucket(\"bbb\",\n hash_bucket_size=100)\n crossed = tf.contrib.layers.crossed_column(\n set([a, b]), hash_bucket_size=10000)\n wire_tensor = tf.SparseTensor(values=[\"omar\", \"stringer\", \"marlo\"],\n indices=[[0, 0], [1, 0], [1, 1]],\n shape=[2, 2])\n features = {\"aaa\": wire_tensor, \"bbb\": wire_tensor}\n logits, _, _ = tf.contrib.layers.weighted_sum_from_feature_columns(\n features, [crossed], num_outputs=5)\n with self.test_session():\n tf.initialize_all_variables().run()\n self.assertAllEqual(logits.eval().shape, [2, 5])\n\n def testEmbeddingColumn(self):\n hashed_sparse = tf.contrib.layers.sparse_column_with_hash_bucket(\"wire\", 10)\n wire_tensor = tf.SparseTensor(values=[\"omar\", \"stringer\", \"marlo\"],\n indices=[[0, 0], [1, 0], [1, 1]],\n shape=[2, 2])\n features = {\"wire\": wire_tensor}\n embeded_sparse = tf.contrib.layers.embedding_column(hashed_sparse, 10)\n with self.test_session():\n with self.assertRaisesRegexp(\n ValueError, \"Error creating weighted sum for column: wire_embedding\"):\n tf.initialize_all_variables().run()\n tf.contrib.layers.weighted_sum_from_feature_columns(features,\n [embeded_sparse],\n num_outputs=5)\n\n def testRealValuedColumnWithMultiDimensions(self):\n real_valued = tf.contrib.layers.real_valued_column(\"price\", 2)\n features = {\"price\": tf.constant([[20., 10.], [110, 0.], [-3, 30]])}\n logits, _, _ = tf.contrib.layers.weighted_sum_from_feature_columns(\n features, [real_valued], num_outputs=5)\n with self.test_session():\n tf.initialize_all_variables().run()\n self.assertAllEqual(logits.eval().shape, [3, 5])\n\n def testBucketizedColumnWithMultiDimensions(self):\n bucket = tf.contrib.layers.bucketized_column(\n tf.contrib.layers.real_valued_column(\"price\", 2),\n boundaries=[0., 10., 100.])\n features = {\"price\": tf.constant([[20., 10.], [110, 0.], [-3, 30]])}\n logits, _, _ = tf.contrib.layers.weighted_sum_from_feature_columns(\n features, [bucket], num_outputs=5)\n with self.test_session():\n tf.initialize_all_variables().run()\n self.assertAllEqual(logits.eval().shape, [3, 5])\n\n def testAllWideColumns(self):\n real_valued = tf.contrib.layers.real_valued_column(\"income\", 2)\n bucket = tf.contrib.layers.bucketized_column(\n tf.contrib.layers.real_valued_column(\"price\"),\n boundaries=[0., 10., 100.])\n hashed_sparse = tf.contrib.layers.sparse_column_with_hash_bucket(\"wire\", 10)\n crossed = tf.contrib.layers.crossed_column([bucket, hashed_sparse], 100)\n features = {\n \"income\": tf.constant([[20., 10], [110, 0], [-3, 30]]),\n \"price\": tf.constant([[20.], [110], [-3]]),\n \"wire\": tf.SparseTensor(values=[\"omar\", \"stringer\", \"marlo\"],\n indices=[[0, 0], [1, 0], [2, 0]],\n shape=[3, 1])\n }\n output, _, _ = tf.contrib.layers.weighted_sum_from_feature_columns(\n features, [real_valued, bucket, hashed_sparse, crossed],\n num_outputs=5)\n with self.test_session():\n tf.initialize_all_variables().run()\n self.assertAllEqual(output.eval().shape, [3, 5])\n\n def testPredictions(self):\n language = tf.contrib.layers.sparse_column_with_keys(\n column_name=\"language\",\n keys=[\"english\", \"finnish\", \"hindi\"])\n age = tf.contrib.layers.real_valued_column(\"age\")\n with tf.Graph().as_default():\n features = {\n \"age\": tf.constant([[1], [2]]),\n \"language\": tf.SparseTensor(values=[\"hindi\", \"english\"],\n indices=[[0, 0], [1, 0]],\n shape=[2, 1]),\n }\n output, column_to_variable, bias = (\n tf.contrib.layers.weighted_sum_from_feature_columns(features,\n [age, language],\n num_outputs=1))\n with self.test_session() as sess:\n tf.initialize_all_variables().run()\n tf.initialize_all_tables().run()\n\n self.assertAllClose(output.eval(), [[0.], [0.]])\n\n sess.run(bias.assign([0.1]))\n self.assertAllClose(output.eval(), [[0.1], [0.1]])\n\n # score: 0.1 + age*0.1\n sess.run(column_to_variable[age][0].assign([[0.2]]))\n self.assertAllClose(output.eval(), [[0.3], [0.5]])\n\n # score: 0.1 + age*0.1 + language_weight[language_index]\n sess.run(column_to_variable[language][0].assign([[0.1], [0.3], [0.2]]))\n self.assertAllClose(output.eval(), [[0.5], [0.6]])\n\n def testJointPredictions(self):\n country = tf.contrib.layers.sparse_column_with_keys(\n column_name=\"country\",\n keys=[\"us\", \"finland\"])\n language = tf.contrib.layers.sparse_column_with_keys(\n column_name=\"language\",\n keys=[\"english\", \"finnish\", \"hindi\"])\n with tf.Graph().as_default():\n features = {\n \"country\": tf.SparseTensor(values=[\"finland\", \"us\"],\n indices=[[0, 0], [1, 0]],\n shape=[2, 1]),\n \"language\": tf.SparseTensor(values=[\"hindi\", \"english\"],\n indices=[[0, 0], [1, 0]],\n shape=[2, 1]),\n }\n output, variables, bias = (\n tf.contrib.layers.joint_weighted_sum_from_feature_columns(\n features, [country, language], num_outputs=1))\n # Assert that only a single weight is created.\n self.assertEqual(len(variables), 1)\n with self.test_session() as sess:\n tf.initialize_all_variables().run()\n tf.initialize_all_tables().run()\n\n self.assertAllClose(output.eval(), [[0.], [0.]])\n\n sess.run(bias.assign([0.1]))\n self.assertAllClose(output.eval(), [[0.1], [0.1]])\n\n # shape is [5,1] because 1 class and 2 + 3 features.\n self.assertEquals(variables[0].get_shape().as_list(), [5, 1])\n\n # score: bias + country_weight + language_weight\n sess.run(variables[0].assign([[0.1], [0.2], [0.3], [0.4], [0.5]]))\n self.assertAllClose(output.eval(), [[0.8], [0.5]])\n\n def testJointPredictionsWeightedFails(self):\n language = tf.contrib.layers.weighted_sparse_column(\n tf.contrib.layers.sparse_column_with_keys(\n column_name=\"language\",\n keys=[\"english\", \"finnish\", \"hindi\"]),\n \"weight\")\n with tf.Graph().as_default():\n features = {\n \"weight\": tf.constant([[1], [2]]),\n \"language\": tf.SparseTensor(values=[\"hindi\", \"english\"],\n indices=[[0, 0], [1, 0]],\n shape=[2, 1]),\n }\n with self.assertRaises(AssertionError):\n tf.contrib.layers.joint_weighted_sum_from_feature_columns(\n features, [language], num_outputs=1)\n\n def testJointPredictionsRealFails(self):\n age = tf.contrib.layers.real_valued_column(\"age\")\n with tf.Graph().as_default():\n features = {\n \"age\": tf.constant([[1], [2]]),\n }\n with self.assertRaises(NotImplementedError):\n tf.contrib.layers.joint_weighted_sum_from_feature_columns(\n features, [age], num_outputs=1)\n\n def testPredictionsWithWeightedSparseColumn(self):\n language = tf.contrib.layers.sparse_column_with_keys(\n column_name=\"language\",\n keys=[\"english\", \"finnish\", \"hindi\"])\n weighted_language = tf.contrib.layers.weighted_sparse_column(\n sparse_id_column=language,\n weight_column_name=\"age\")\n with tf.Graph().as_default():\n features = {\n \"language\": tf.SparseTensor(values=[\"hindi\", \"english\"],\n indices=[[0, 0], [1, 0]],\n shape=[2, 1]),\n \"age\": tf.SparseTensor(values=[10.0, 20.0],\n indices=[[0, 0], [1, 0]],\n shape=[2, 1])\n }\n output, column_to_variable, bias = (\n tf.contrib.layers.weighted_sum_from_feature_columns(\n features, [weighted_language], num_outputs=1))\n with self.test_session() as sess:\n tf.initialize_all_variables().run()\n tf.initialize_all_tables().run()\n\n self.assertAllClose(output.eval(), [[0.], [0.]])\n\n sess.run(bias.assign([0.1]))\n self.assertAllClose(output.eval(), [[0.1], [0.1]])\n\n # score: bias + age*language_weight[index]\n sess.run(column_to_variable[weighted_language][0].assign(\n [[0.1], [0.2], [0.3]]))\n self.assertAllClose(output.eval(), [[3.1], [2.1]])\n\n def testPredictionsWithMultivalentColumnButNoCross(self):\n language = tf.contrib.layers.sparse_column_with_keys(\n column_name=\"language\",\n keys=[\"english\", \"turkish\", \"hindi\"])\n with tf.Graph().as_default():\n features = {\n \"language\": tf.SparseTensor(values=[\"hindi\", \"english\"],\n indices=[[0, 0], [0, 1]],\n shape=[1, 2])\n }\n output, column_to_variable, bias = (\n tf.contrib.layers.weighted_sum_from_feature_columns(features,\n [language],\n num_outputs=1))\n with self.test_session() as sess:\n tf.initialize_all_variables().run()\n tf.initialize_all_tables().run()\n\n # score: 0.1 + language_weight['hindi'] + language_weight['english']\n sess.run(bias.assign([0.1]))\n sess.run(column_to_variable[language][0].assign([[0.1], [0.3], [0.2]]))\n self.assertAllClose(output.eval(), [[0.4]])\n\n def testSparseFeatureColumnWithHashedBucketSize(self):\n movies = tf.contrib.layers.sparse_column_with_hash_bucket(\n column_name=\"movies\", hash_bucket_size=15)\n with tf.Graph().as_default():\n features = {\n \"movies\": tf.SparseTensor(\n values=[\"matrix\", \"head-on\", \"winter sleep\"],\n indices=[[0, 0], [0, 1], [1, 0]],\n shape=[2, 2])\n }\n output, column_to_variable, _ = (\n tf.contrib.layers.weighted_sum_from_feature_columns(features,\n [movies],\n num_outputs=1))\n with self.test_session() as sess:\n tf.initialize_all_variables().run()\n tf.initialize_all_tables().run()\n\n weights = column_to_variable[movies][0]\n self.assertEqual(weights.get_shape(), (15, 1))\n sess.run(weights.assign(weights + 0.4))\n # score for first example = 0.4 (matrix) + 0.4 (head-on) = 0.8\n # score for second example = 0.4 (winter sleep)\n self.assertAllClose(output.eval(), [[0.8], [0.4]])\n\n def testCrossUsageInPredictions(self):\n language = tf.contrib.layers.sparse_column_with_hash_bucket(\n \"language\", hash_bucket_size=3)\n country = tf.contrib.layers.sparse_column_with_hash_bucket(\n \"country\", hash_bucket_size=5)\n country_language = tf.contrib.layers.crossed_column(\n [language, country], hash_bucket_size=10)\n with tf.Graph().as_default():\n features = {\n \"language\": tf.SparseTensor(values=[\"english\", \"spanish\"],\n indices=[[0, 0], [1, 0]],\n shape=[2, 1]),\n \"country\": tf.SparseTensor(values=[\"US\", \"SV\"],\n indices=[[0, 0], [1, 0]],\n shape=[2, 1])\n }\n output, column_to_variable, _ = (\n tf.contrib.layers.weighted_sum_from_feature_columns(\n features, [country_language],\n num_outputs=1))\n with self.test_session() as sess:\n tf.initialize_all_variables().run()\n tf.initialize_all_tables().run()\n\n weights = column_to_variable[country_language][0]\n sess.run(weights.assign(weights + 0.4))\n self.assertAllClose(output.eval(), [[0.4], [0.4]])\n\n def testCrossColumnByItself(self):\n language = tf.contrib.layers.sparse_column_with_hash_bucket(\n \"language\", hash_bucket_size=3)\n language_language = tf.contrib.layers.crossed_column(\n [language, language], hash_bucket_size=10)\n with tf.Graph().as_default():\n features = {\n \"language\": tf.SparseTensor(values=[\"english\", \"spanish\"],\n indices=[[0, 0], [0, 1]],\n shape=[1, 2]),\n }\n output, column_to_variable, _ = (\n tf.contrib.layers.weighted_sum_from_feature_columns(\n features, [language_language],\n num_outputs=1))\n with self.test_session() as sess:\n tf.initialize_all_variables().run()\n tf.initialize_all_tables().run()\n\n weights = column_to_variable[language_language][0]\n sess.run(weights.assign(weights + 0.4))\n # There are two features inside language. If we cross it by itself we'll\n # have four crossed features.\n self.assertAllClose(output.eval(), [[1.6]])\n\n def testMultivalentCrossUsageInPredictions(self):\n language = tf.contrib.layers.sparse_column_with_hash_bucket(\n \"language\", hash_bucket_size=3)\n country = tf.contrib.layers.sparse_column_with_hash_bucket(\n \"country\", hash_bucket_size=5)\n country_language = tf.contrib.layers.crossed_column(\n [language, country], hash_bucket_size=10)\n with tf.Graph().as_default():\n features = {\n \"language\": tf.SparseTensor(values=[\"english\", \"spanish\"],\n indices=[[0, 0], [0, 1]],\n shape=[1, 2]),\n \"country\": tf.SparseTensor(values=[\"US\", \"SV\"],\n indices=[[0, 0], [0, 1]],\n shape=[1, 2])\n }\n output, column_to_variable, _ = (\n tf.contrib.layers.weighted_sum_from_feature_columns(\n features, [country_language],\n num_outputs=1))\n with self.test_session() as sess:\n tf.initialize_all_variables().run()\n tf.initialize_all_tables().run()\n\n weights = column_to_variable[country_language][0]\n sess.run(weights.assign(weights + 0.4))\n # There are four crosses each with 0.4 weight.\n # score = 0.4 + 0.4 + 0.4 + 0.4\n self.assertAllClose(output.eval(), [[1.6]])\n\n def testMultivalentCrossUsageInPredictionsWithPartition(self):\n # bucket size has to be big enough to allwo sharding.\n language = tf.contrib.layers.sparse_column_with_hash_bucket(\n \"language\", hash_bucket_size=64 << 19)\n country = tf.contrib.layers.sparse_column_with_hash_bucket(\n \"country\", hash_bucket_size=64 << 18)\n country_language = tf.contrib.layers.crossed_column(\n [language, country], hash_bucket_size=64 << 18)\n with tf.Graph().as_default():\n features = {\n \"language\": tf.SparseTensor(values=[\"english\", \"spanish\"],\n indices=[[0, 0], [0, 1]],\n shape=[1, 2]),\n \"country\": tf.SparseTensor(values=[\"US\", \"SV\"],\n indices=[[0, 0], [0, 1]],\n shape=[1, 2])\n }\n with tf.variable_scope(\n \"weighted_sum_from_feature_columns\",\n features.values(),\n partitioner=tf.min_max_variable_partitioner(\n max_partitions=10, min_slice_size=((64 << 20) - 1))) as scope:\n output, column_to_variable, _ = (\n tf.contrib.layers.weighted_sum_from_feature_columns(\n features, [country, language, country_language],\n num_outputs=1,\n scope=scope))\n with self.test_session() as sess:\n tf.initialize_all_variables().run()\n tf.initialize_all_tables().run()\n\n self.assertEqual(2, len(column_to_variable[country]))\n self.assertEqual(3, len(column_to_variable[language]))\n self.assertEqual(2, len(column_to_variable[country_language]))\n\n weights = column_to_variable[country_language]\n for partition_variable in weights:\n sess.run(partition_variable.assign(partition_variable + 0.4))\n # There are four crosses each with 0.4 weight.\n # score = 0.4 + 0.4 + 0.4 + 0.4\n self.assertAllClose(output.eval(), [[1.6]])\n\n def testRealValuedColumnHavingMultiDimensions(self):\n country = tf.contrib.layers.sparse_column_with_hash_bucket(\n \"country\", hash_bucket_size=5)\n age = tf.contrib.layers.real_valued_column(\"age\")\n # The following RealValuedColumn has 3 dimensions.\n incomes = tf.contrib.layers.real_valued_column(\"incomes\", 3)\n\n with tf.Graph().as_default():\n features = {\"age\": tf.constant([[1], [1]]),\n \"incomes\": tf.constant([[100., 200., 300.], [10., 20., 30.]]),\n \"country\": tf.SparseTensor(values=[\"US\", \"SV\"],\n indices=[[0, 0], [1, 0]],\n shape=[2, 2])}\n output, column_to_variable, _ = (\n tf.contrib.layers.weighted_sum_from_feature_columns(\n features, [country, age, incomes],\n num_outputs=1))\n with self.test_session() as sess:\n tf.initialize_all_variables().run()\n tf.initialize_all_tables().run()\n\n incomes_weights = column_to_variable[incomes][0]\n sess.run(incomes_weights.assign([[0.1], [0.2], [0.3]]))\n self.assertAllClose(output.eval(), [[140.], [14.]])\n\n def testMulticlassWithRealValuedColumnHavingMultiDimensions(self):\n country = tf.contrib.layers.sparse_column_with_hash_bucket(\n \"country\", hash_bucket_size=5)\n age = tf.contrib.layers.real_valued_column(\"age\")\n # The following RealValuedColumn has 3 dimensions.\n incomes = tf.contrib.layers.real_valued_column(\"incomes\", 3)\n with tf.Graph().as_default():\n features = {\"age\": tf.constant([[1], [1]]),\n \"incomes\": tf.constant([[100., 200., 300.], [10., 20., 30.]]),\n \"country\": tf.SparseTensor(values=[\"US\", \"SV\"],\n indices=[[0, 0], [1, 0]],\n shape=[2, 2])}\n output, column_to_variable, _ = (\n tf.contrib.layers.weighted_sum_from_feature_columns(\n features, [country, age, incomes],\n num_outputs=5))\n with self.test_session() as sess:\n tf.initialize_all_variables().run()\n tf.initialize_all_tables().run()\n\n incomes_weights = column_to_variable[incomes][0]\n sess.run(incomes_weights.assign([[0.01, 0.1, 1., 10., 100.],\n [0.02, 0.2, 2., 20., 200.],\n [0.03, 0.3, 3., 30., 300.]]))\n self.assertAllClose(output.eval(), [[14., 140., 1400., 14000., 140000.],\n [1.4, 14., 140., 1400., 14000.]])\n\n def testBucketizedColumn(self):\n bucket = tf.contrib.layers.bucketized_column(\n tf.contrib.layers.real_valued_column(\"price\"),\n boundaries=[0., 10., 100.])\n with tf.Graph().as_default():\n # buckets 2, 3, 0\n features = {\"price\": tf.constant([[20.], [110], [-3]])}\n output, column_to_variable, _ = (\n tf.contrib.layers.weighted_sum_from_feature_columns(features,\n [bucket],\n num_outputs=1))\n with self.test_session() as sess:\n tf.initialize_all_variables().run()\n tf.initialize_all_tables().run()\n\n sess.run(column_to_variable[bucket][0].assign([[0.1], [0.2], [0.3], [0.4\n ]]))\n self.assertAllClose(output.eval(), [[0.3], [0.4], [0.1]])\n\n def testBucketizedColumnHavingMultiDimensions(self):\n country = tf.contrib.layers.sparse_column_with_hash_bucket(\n \"country\", hash_bucket_size=5)\n bucket = tf.contrib.layers.bucketized_column(\n tf.contrib.layers.real_valued_column(\"price\", 2),\n boundaries=[0., 10., 100.])\n with tf.Graph().as_default():\n # buckets 2, 3, 0\n features = {\"price\": tf.constant([[20., 210], [110, 50], [-3, -30]]),\n \"country\": tf.SparseTensor(values=[\"US\", \"SV\"],\n indices=[[0, 0], [1, 0]],\n shape=[3, 2])}\n output, column_to_variable, _ = (\n tf.contrib.layers.weighted_sum_from_feature_columns(features,\n [bucket, country],\n num_outputs=1))\n with self.test_session() as sess:\n tf.initialize_all_variables().run()\n tf.initialize_all_tables().run()\n\n # dimension = 2, bucket_size = 4, num_classes = 1\n sess.run(column_to_variable[bucket][0].assign(\n [[0.1], [0.2], [0.3], [0.4], [1], [2], [3], [4]]))\n self.assertAllClose(output.eval(), [[0.3 + 4], [0.4 + 3], [0.1 + 1]])\n\n def testMulticlassWithBucketizedColumnHavingMultiDimensions(self):\n country = tf.contrib.layers.sparse_column_with_hash_bucket(\n \"country\", hash_bucket_size=5)\n bucket = tf.contrib.layers.bucketized_column(\n tf.contrib.layers.real_valued_column(\"price\", 2),\n boundaries=[0., 10., 100.])\n with tf.Graph().as_default():\n # buckets 2, 3, 0\n features = {\"price\": tf.constant([[20., 210], [110, 50], [-3, -30]]),\n \"country\": tf.SparseTensor(values=[\"US\", \"SV\"],\n indices=[[0, 0], [1, 0]],\n shape=[3, 2])}\n output, column_to_variable, _ = (\n tf.contrib.layers.weighted_sum_from_feature_columns(features,\n [bucket, country],\n num_outputs=5))\n with self.test_session() as sess:\n tf.initialize_all_variables().run()\n tf.initialize_all_tables().run()\n\n # dimension = 2, bucket_size = 4, num_classes = 5\n sess.run(column_to_variable[bucket][0].assign(\n [[0.1, 1, 10, 100, 1000], [0.2, 2, 20, 200, 2000],\n [0.3, 3, 30, 300, 3000], [0.4, 4, 40, 400, 4000],\n [5, 50, 500, 5000, 50000], [6, 60, 600, 6000, 60000],\n [7, 70, 700, 7000, 70000], [8, 80, 800, 8000, 80000]]))\n self.assertAllClose(\n output.eval(),\n [[0.3 + 8, 3 + 80, 30 + 800, 300 + 8000, 3000 + 80000],\n [0.4 + 7, 4 + 70, 40 + 700, 400 + 7000, 4000 + 70000],\n [0.1 + 5, 1 + 50, 10 + 500, 100 + 5000, 1000 + 50000]])\n\n def testCrossWithBucketizedColumn(self):\n price_bucket = tf.contrib.layers.bucketized_column(\n tf.contrib.layers.real_valued_column(\"price\"),\n boundaries=[0., 10., 100.])\n country = tf.contrib.layers.sparse_column_with_hash_bucket(\n \"country\", hash_bucket_size=5)\n country_price = tf.contrib.layers.crossed_column(\n [country, price_bucket], hash_bucket_size=10)\n with tf.Graph().as_default():\n features = {\n \"price\": tf.constant([[20.]]),\n \"country\": tf.SparseTensor(values=[\"US\", \"SV\"],\n indices=[[0, 0], [0, 1]],\n shape=[1, 2])\n }\n output, column_to_variable, _ = (\n tf.contrib.layers.weighted_sum_from_feature_columns(features,\n [country_price],\n num_outputs=1))\n with self.test_session() as sess:\n tf.initialize_all_variables().run()\n tf.initialize_all_tables().run()\n\n weights = column_to_variable[country_price][0]\n sess.run(weights.assign(weights + 0.4))\n # There are two crosses each with 0.4 weight.\n # score = 0.4 + 0.4\n self.assertAllClose(output.eval(), [[0.8]])\n\n def testCrossWithCrossedColumn(self):\n price_bucket = tf.contrib.layers.bucketized_column(\n tf.contrib.layers.real_valued_column(\"price\"),\n boundaries=[0., 10., 100.])\n language = tf.contrib.layers.sparse_column_with_hash_bucket(\n \"language\", hash_bucket_size=3)\n country = tf.contrib.layers.sparse_column_with_hash_bucket(\n \"country\", hash_bucket_size=5)\n country_language = tf.contrib.layers.crossed_column(\n [language, country], hash_bucket_size=10)\n country_language_price = tf.contrib.layers.crossed_column(\n set([country_language, price_bucket]),\n hash_bucket_size=15)\n with tf.Graph().as_default():\n features = {\n \"price\": tf.constant([[20.]]),\n \"country\": tf.SparseTensor(values=[\"US\", \"SV\"],\n indices=[[0, 0], [0, 1]],\n shape=[1, 2]),\n \"language\": tf.SparseTensor(values=[\"english\", \"spanish\"],\n indices=[[0, 0], [0, 1]],\n shape=[1, 2])\n }\n output, column_to_variable, _ = (\n tf.contrib.layers.weighted_sum_from_feature_columns(\n features, [country_language_price],\n num_outputs=1))\n with self.test_session() as sess:\n tf.initialize_all_variables().run()\n tf.initialize_all_tables().run()\n\n weights = column_to_variable[country_language_price][0]\n sess.run(weights.assign(weights + 0.4))\n # There are two crosses each with 0.4 weight.\n # score = 0.4 + 0.4 + 0.4 + 0.4\n self.assertAllClose(output.eval(), [[1.6]])\n\n def testIntegerizedColumn(self):\n product = tf.contrib.layers.sparse_column_with_integerized_feature(\n \"product\", bucket_size=5)\n with tf.Graph().as_default():\n features = {\"product\": tf.SparseTensor(values=[0, 4, 2],\n indices=[[0, 0], [1, 0], [2, 0]],\n shape=[3, 1])}\n output, column_to_variable, _ = (\n tf.contrib.layers.weighted_sum_from_feature_columns(features,\n [product],\n num_outputs=1))\n with self.test_session() as sess:\n tf.initialize_all_variables().run()\n tf.initialize_all_tables().run()\n product_weights = column_to_variable[product][0]\n sess.run(product_weights.assign([[0.1], [0.2], [0.3], [0.4], [0.5]]))\n self.assertAllClose(output.eval(), [[0.1], [0.5], [0.3]])\n\n def testIntegerizedColumnWithInvalidId(self):\n product = tf.contrib.layers.sparse_column_with_integerized_feature(\n \"product\", bucket_size=5)\n with tf.Graph().as_default():\n features = {\"product\": tf.SparseTensor(values=[5, 4, 7],\n indices=[[0, 0], [1, 0], [2, 0]],\n shape=[3, 1])}\n output, column_to_variable, _ = (\n tf.contrib.layers.weighted_sum_from_feature_columns(features,\n [product],\n num_outputs=1))\n with self.test_session() as sess:\n tf.initialize_all_variables().run()\n tf.initialize_all_tables().run()\n product_weights = column_to_variable[product][0]\n sess.run(product_weights.assign([[0.1], [0.2], [0.3], [0.4], [0.5]]))\n self.assertAllClose(output.eval(), [[0.1], [0.5], [0.3]])\n\n def testMulticlassWithOnlyBias(self):\n with tf.Graph().as_default():\n features = {\"age\": tf.constant([[10.], [20.], [30.], [40.]])}\n output, _, bias = tf.contrib.layers.weighted_sum_from_feature_columns(\n features, [tf.contrib.layers.real_valued_column(\"age\")],\n num_outputs=3)\n with self.test_session() as sess:\n tf.initialize_all_variables().run()\n tf.initialize_all_tables().run()\n sess.run(bias.assign([0.1, 0.2, 0.3]))\n self.assertAllClose(output.eval(), [[0.1, 0.2, 0.3], [0.1, 0.2, 0.3],\n [0.1, 0.2, 0.3], [0.1, 0.2, 0.3]])\n\n def testMulticlassWithRealValuedColumn(self):\n with tf.Graph().as_default():\n column = tf.contrib.layers.real_valued_column(\"age\")\n features = {\"age\": tf.constant([[10.], [20.], [30.], [40.]])}\n output, column_to_variable, _ = (\n tf.contrib.layers.weighted_sum_from_feature_columns(features,\n [column],\n num_outputs=3))\n with self.test_session() as sess:\n tf.initialize_all_variables().run()\n tf.initialize_all_tables().run()\n weights = column_to_variable[column][0]\n self.assertEqual(weights.get_shape(), (1, 3))\n sess.run(weights.assign([[0.01, 0.03, 0.05]]))\n self.assertAllClose(output.eval(), [[0.1, 0.3, 0.5], [0.2, 0.6, 1.0],\n [0.3, 0.9, 1.5], [0.4, 1.2, 2.0]])\n\n def testMulticlassWithSparseColumn(self):\n with tf.Graph().as_default():\n column = tf.contrib.layers.sparse_column_with_keys(\n column_name=\"language\",\n keys=[\"english\", \"arabic\", \"hindi\", \"russian\", \"swahili\"])\n features = {\n \"language\": tf.SparseTensor(\n values=[\"hindi\", \"english\", \"arabic\", \"russian\"],\n indices=[[0, 0], [1, 0], [2, 0], [3, 0]],\n shape=[4, 1])\n }\n output, column_to_variable, _ = (\n tf.contrib.layers.weighted_sum_from_feature_columns(features,\n [column],\n num_outputs=3))\n with self.test_session() as sess:\n tf.initialize_all_variables().run()\n tf.initialize_all_tables().run()\n weights = column_to_variable[column][0]\n self.assertEqual(weights.get_shape(), (5, 3))\n sess.run(weights.assign([[0.1, 0.4, 0.7], [0.2, 0.5, 0.8],\n [0.3, 0.6, 0.9], [0.4, 0.7, 1.0], [0.5, 0.8,\n 1.1]]))\n self.assertAllClose(output.eval(), [[0.3, 0.6, 0.9], [0.1, 0.4, 0.7],\n [0.2, 0.5, 0.8], [0.4, 0.7, 1.0]])\n\n def testMulticlassWithBucketizedColumn(self):\n column = tf.contrib.layers.bucketized_column(\n tf.contrib.layers.real_valued_column(\"price\"),\n boundaries=[0., 100., 500., 1000.])\n with tf.Graph().as_default():\n # buckets 0, 2, 1, 2\n features = {\"price\": tf.constant([[-3], [110], [20.], [210]])}\n output, column_to_variable, _ = (\n tf.contrib.layers.weighted_sum_from_feature_columns(features,\n [column],\n num_outputs=3))\n with self.test_session() as sess:\n tf.initialize_all_variables().run()\n tf.initialize_all_tables().run()\n\n weights = column_to_variable[column][0]\n self.assertEqual(weights.get_shape(), (5, 3))\n sess.run(weights.assign([[0.1, 0.4, 0.7], [0.2, 0.5, 0.8],\n [0.3, 0.6, 0.9], [0.4, 0.7, 1.0], [0.5, 0.8,\n 1.1]]))\n self.assertAllClose(output.eval(), [[0.1, 0.4, 0.7], [0.3, 0.6, 0.9],\n [0.2, 0.5, 0.8], [0.3, 0.6, 0.9]])\n\n def testMulticlassWithCrossedColumn(self):\n language = tf.contrib.layers.sparse_column_with_hash_bucket(\n \"language\", hash_bucket_size=3)\n country = tf.contrib.layers.sparse_column_with_hash_bucket(\n \"country\", hash_bucket_size=2)\n column = tf.contrib.layers.crossed_column(\n {language, country}, hash_bucket_size=5)\n with tf.Graph().as_default():\n features = {\n \"language\": tf.SparseTensor(\n values=[\"english\", \"spanish\", \"russian\", \"swahili\"],\n indices=[[0, 0], [1, 0], [2, 0], [3, 0]],\n shape=[4, 1]),\n \"country\": tf.SparseTensor(values=[\"US\", \"SV\", \"RU\", \"KE\"],\n indices=[[0, 0], [1, 0], [2, 0], [3, 0]],\n shape=[4, 1])\n }\n output, column_to_variable, _ = (\n tf.contrib.layers.weighted_sum_from_feature_columns(features,\n [column],\n num_outputs=3))\n with self.test_session() as sess:\n tf.initialize_all_variables().run()\n tf.initialize_all_tables().run()\n\n weights = column_to_variable[column][0]\n self.assertEqual(weights.get_shape(), (5, 3))\n sess.run(weights.assign([[0.1, 0.4, 0.7], [0.2, 0.5, 0.8],\n [0.3, 0.6, 0.9], [0.4, 0.7, 1.0], [0.5, 0.8,\n 1.1]]))\n self.assertAllClose(tf.shape(output).eval(), [4, 3])\n\n def testMulticlassWithMultivalentColumn(self):\n column = tf.contrib.layers.sparse_column_with_keys(\n column_name=\"language\",\n keys=[\"english\", \"turkish\", \"hindi\", \"russian\", \"swahili\"])\n with tf.Graph().as_default():\n features = {\n \"language\": tf.SparseTensor(\n values=[\"hindi\", \"english\", \"turkish\", \"turkish\", \"english\"],\n indices=[[0, 0], [0, 1], [1, 0], [2, 0], [3, 0]],\n shape=[4, 2])\n }\n output, column_to_variable, _ = (\n tf.contrib.layers.weighted_sum_from_feature_columns(features,\n [column],\n num_outputs=3))\n with self.test_session() as sess:\n tf.initialize_all_variables().run()\n tf.initialize_all_tables().run()\n\n weights = column_to_variable[column][0]\n self.assertEqual(weights.get_shape(), (5, 3))\n sess.run(weights.assign([[0.1, 0.4, 0.7], [0.2, 0.5, 0.8],\n [0.3, 0.6, 0.9], [0.4, 0.7, 1.0], [0.5, 0.8,\n 1.1]]))\n self.assertAllClose(output.eval(), [[0.4, 1.0, 1.6], [0.2, 0.5, 0.8],\n [0.2, 0.5, 0.8], [0.1, 0.4, 0.7]])\n\n def testVariablesAddedToCollection(self):\n price_bucket = tf.contrib.layers.bucketized_column(\n tf.contrib.layers.real_valued_column(\"price\"),\n boundaries=[0., 10., 100.])\n country = tf.contrib.layers.sparse_column_with_hash_bucket(\n \"country\", hash_bucket_size=5)\n country_price = tf.contrib.layers.crossed_column(\n [country, price_bucket], hash_bucket_size=10)\n with tf.Graph().as_default():\n features = {\n \"price\": tf.constant([[20.]]),\n \"country\": tf.SparseTensor(values=[\"US\", \"SV\"],\n indices=[[0, 0], [0, 1]],\n shape=[1, 2])\n }\n tf.contrib.layers.weighted_sum_from_feature_columns(\n features, [country_price, price_bucket],\n num_outputs=1,\n weight_collections=[\"my_collection\"])\n weights = tf.get_collection(\"my_collection\")\n # 3 = bias + price_bucket + country_price\n self.assertEqual(3, len(weights))\n\n\nclass ParseExampleTest(tf.test.TestCase):\n\n def testParseExample(self):\n bucket = tf.contrib.layers.bucketized_column(\n tf.contrib.layers.real_valued_column(\"price\", dimension=3),\n boundaries=[0., 10., 100.])\n wire_cast = tf.contrib.layers.sparse_column_with_keys(\n \"wire_cast\", [\"marlo\", \"omar\", \"stringer\"])\n # buckets 2, 3, 0\n data = tf.train.Example(features=tf.train.Features(feature={\n \"price\": tf.train.Feature(float_list=tf.train.FloatList(value=[20., 110,\n -3])),\n \"wire_cast\": tf.train.Feature(bytes_list=tf.train.BytesList(value=[\n b\"stringer\", b\"marlo\"\n ])),\n }))\n output = tf.contrib.layers.parse_feature_columns_from_examples(\n serialized=[data.SerializeToString()],\n feature_columns=[bucket, wire_cast])\n self.assertIn(bucket, output)\n self.assertIn(wire_cast, output)\n with self.test_session():\n tf.initialize_all_tables().run()\n self.assertAllEqual(output[bucket].eval(), [[2, 3, 0]])\n self.assertAllEqual(output[wire_cast].indices.eval(), [[0, 0], [0, 1]])\n self.assertAllEqual(output[wire_cast].values.eval(), [2, 0])\n\n\nclass InferRealValuedColumnTest(tf.test.TestCase):\n\n def testTensorInt32(self):\n self.assertEqual(\n tf.contrib.layers.infer_real_valued_columns(\n tf.zeros(shape=[33, 4], dtype=tf.int32)),\n [tf.contrib.layers.real_valued_column(\"\", dimension=4, dtype=tf.int32)])\n\n def testTensorInt64(self):\n self.assertEqual(\n tf.contrib.layers.infer_real_valued_columns(\n tf.zeros(shape=[33, 4], dtype=tf.int64)),\n [tf.contrib.layers.real_valued_column(\"\", dimension=4, dtype=tf.int64)])\n\n def testTensorFloat32(self):\n self.assertEqual(\n tf.contrib.layers.infer_real_valued_columns(\n tf.zeros(shape=[33, 4], dtype=tf.float32)),\n [tf.contrib.layers.real_valued_column(\n \"\", dimension=4, dtype=tf.float32)])\n\n def testTensorFloat64(self):\n self.assertEqual(\n tf.contrib.layers.infer_real_valued_columns(\n tf.zeros(shape=[33, 4], dtype=tf.float64)),\n [tf.contrib.layers.real_valued_column(\n \"\", dimension=4, dtype=tf.float64)])\n\n def testDictionary(self):\n self.assertItemsEqual(\n tf.contrib.layers.infer_real_valued_columns({\n \"a\": tf.zeros(shape=[33, 4], dtype=tf.int32),\n \"b\": tf.zeros(shape=[3, 2], dtype=tf.float32)\n }),\n [tf.contrib.layers.real_valued_column(\n \"a\", dimension=4, dtype=tf.int32),\n tf.contrib.layers.real_valued_column(\n \"b\", dimension=2, dtype=tf.float32)])\n\n def testNotGoodDtype(self):\n with self.assertRaises(ValueError):\n tf.contrib.layers.infer_real_valued_columns(\n tf.constant([[\"a\"]], dtype=tf.string))\n\n def testSparseTensor(self):\n with self.assertRaises(ValueError):\n tf.contrib.layers.infer_real_valued_columns(\n tf.SparseTensor(indices=[[0, 0]], values=[\"a\"], shape=[1, 1]))\n\n\nif __name__ == \"__main__\":\n tf.test.main()\n"
] | [
[
"tensorflow.python.summary.impl.gcs.IsDirectory",
"tensorflow.python.summary.impl.event_file_loader.EventFileLoader",
"tensorflow.python.platform.gfile.Walk",
"tensorflow.python.summary.impl.gcs_file_loader.GCSFileLoader",
"tensorflow.python.summary.impl.gcs.Exists",
"tensorflow.python.platform.gfile.Exists",
"tensorflow.python.summary.impl.gcs.IsGCSPath",
"tensorflow.python.platform.gfile.IsDirectory",
"tensorflow.python.summary.impl.gcs.ListDirectory",
"tensorflow.python.platform.gfile.ListDirectory",
"tensorflow.python.platform.gfile.Open",
"tensorflow.python.summary.impl.gcs.ListRecursively"
],
[
"tensorflow.python.ops.math_ops.reduce_sum",
"tensorflow.python.ops.array_ops.shape",
"tensorflow.python.ops.check_ops.assert_positive",
"tensorflow.python.ops.check_ops.assert_non_negative",
"tensorflow.contrib.distributions.python.ops.distribution_util.log_combinations",
"tensorflow.python.ops.check_ops.assert_equal",
"tensorflow.python.framework.ops.name_scope",
"tensorflow.python.ops.array_ops.expand_dims",
"tensorflow.python.ops.array_ops.ones_like",
"tensorflow.contrib.distributions.python.ops.distribution_util.assert_integer_form",
"tensorflow.contrib.distributions.python.ops.distribution_util.append_class_fun_doc",
"tensorflow.python.ops.check_ops.assert_rank_at_least",
"tensorflow.python.ops.special_math_ops.lbeta",
"tensorflow.python.framework.ops.convert_to_tensor",
"tensorflow.python.ops.array_ops.matrix_set_diag"
],
[
"tensorflow.initialize_all_variables",
"numpy.sum",
"numpy.ones",
"tensorflow.merge_all_summaries",
"numpy.random.seed",
"tensorflow.train.summary_iterator",
"tensorflow.Variable",
"tensorflow.python.platform.gfile.MakeDirs",
"tensorflow.train.SummaryWriter",
"tensorflow.contrib.metrics.streaming_accuracy",
"numpy.random.rand",
"tensorflow.constant",
"tensorflow.test.main",
"tensorflow.scalar_summary",
"tensorflow.train.string_input_producer",
"numpy.argmax",
"tensorflow.mul",
"tensorflow.train.Saver",
"tensorflow.initialize_local_variables",
"tensorflow.argmax",
"tensorflow.gfile.Exists",
"numpy.random.randint",
"tensorflow.gfile.DeleteRecursively"
],
[
"tensorflow.initialize_all_variables",
"tensorflow.train.FloatList",
"tensorflow.contrib.layers.real_valued_column",
"tensorflow.train.BytesList",
"tensorflow.contrib.layers.sparse_column_with_integerized_feature",
"tensorflow.truncated_normal_initializer",
"tensorflow.Graph",
"tensorflow.min_max_variable_partitioner",
"tensorflow.contrib.layers.weighted_sparse_column",
"tensorflow.contrib.layers.joint_weighted_sum_from_feature_columns",
"tensorflow.constant",
"tensorflow.test.main",
"tensorflow.contrib.layers.embedding_column",
"tensorflow.contrib.layers.input_from_feature_columns",
"numpy.tile",
"tensorflow.constant_initializer",
"tensorflow.shape",
"tensorflow.contrib.layers.sparse_column_with_keys",
"tensorflow.contrib.layers.bucketized_column",
"tensorflow.contrib.layers.sparse_column_with_hash_bucket",
"tensorflow.get_collection",
"tensorflow.contrib.layers.weighted_sum_from_feature_columns",
"tensorflow.SparseTensor",
"tensorflow.contrib.layers.one_hot_column",
"tensorflow.gradients",
"tensorflow.contrib.layers.crossed_column",
"tensorflow.contrib.layers.python.layers.feature_column_ops._Transformer",
"tensorflow.contrib.layers.hashed_embedding_column",
"tensorflow.zeros",
"tensorflow.initialize_all_tables",
"tensorflow.trainable_variables"
]
] |
ahoneybun/tensorflow | [
"51100a8de57ef53e36a8a9f5a9829cbd33fbed04"
] | [
"tensorflow/python/keras/engine/training.py"
] | [
"# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Training-related part of the Keras engine.\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport weakref\nimport numpy as np\n\nfrom tensorflow.python.data.ops import dataset_ops\nfrom tensorflow.python.data.ops import iterator_ops\nfrom tensorflow.python.eager import context\nfrom tensorflow.python.framework import errors\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import tensor_util\nfrom tensorflow.python.keras import backend as K\nfrom tensorflow.python.keras import losses\nfrom tensorflow.python.keras import metrics as metrics_module\nfrom tensorflow.python.keras import optimizers\nfrom tensorflow.python.keras.engine import base_layer\nfrom tensorflow.python.keras.engine import distributed_training_utils\nfrom tensorflow.python.keras.engine import training_arrays\nfrom tensorflow.python.keras.engine import training_distributed\nfrom tensorflow.python.keras.engine import training_eager\nfrom tensorflow.python.keras.engine import training_generator\nfrom tensorflow.python.keras.engine import training_utils\nfrom tensorflow.python.keras.engine.network import Network\nfrom tensorflow.python.keras.utils.generic_utils import slice_arrays\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops import weights_broadcast_ops\nfrom tensorflow.python.platform import tf_logging as logging\nfrom tensorflow.python.training import optimizer as tf_optimizer_module\nfrom tensorflow.python.training.checkpointable import base as checkpointable\nfrom tensorflow.python.util.tf_export import tf_export\n\n\n@tf_export('keras.models.Model', 'keras.Model')\nclass Model(Network):\n \"\"\"`Model` groups layers into an object with training and inference features.\n\n There are two ways to instantiate a `Model`:\n\n 1 - With the \"functional API\", where you start from `Input`,\n you chain layer calls to specify the model's forward pass,\n and finally you create your model from inputs and outputs:\n\n ```python\n import tensorflow as tf\n\n inputs = tf.keras.Input(shape=(3,))\n x = tf.keras.layers.Dense(4, activation=tf.nn.relu)(inputs)\n outputs = tf.keras.layers.Dense(5, activation=tf.nn.softmax)(x)\n model = tf.keras.Model(inputs=inputs, outputs=outputs)\n ```\n\n 2 - By subclassing the `Model` class: in that case, you should define your\n layers in `__init__` and you should implement the model's forward pass\n in `call`.\n\n ```python\n import tensorflow as tf\n\n class MyModel(tf.keras.Model):\n\n def __init__(self):\n super(MyModel, self).__init__()\n self.dense1 = tf.keras.layers.Dense(4, activation=tf.nn.relu)\n self.dense2 = tf.keras.layers.Dense(5, activation=tf.nn.softmax)\n\n def call(self, inputs):\n x = self.dense1(inputs)\n return self.dense2(x)\n\n model = MyModel()\n ```\n\n If you subclass `Model`, you can optionally have\n a `training` argument (boolean) in `call`, which you can use to specify\n a different behavior in training and inference:\n\n ```python\n import tensorflow as tf\n\n class MyModel(tf.keras.Model):\n\n def __init__(self):\n super(MyModel, self).__init__()\n self.dense1 = tf.keras.layers.Dense(4, activation=tf.nn.relu)\n self.dense2 = tf.keras.layers.Dense(5, activation=tf.nn.softmax)\n self.dropout = tf.keras.layers.Dropout(0.5)\n\n def call(self, inputs, training=False):\n x = self.dense1(inputs)\n if training:\n x = self.dropout(x, training=training)\n return self.dense2(x)\n\n model = MyModel()\n ```\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n super(Model, self).__init__(*args, **kwargs)\n # Create a cache for iterator get_next op.\n self._iterator_get_next = weakref.WeakKeyDictionary()\n # Create a cache for dataset - uninitialized iterators\n self._dataset_iterator_cache = weakref.WeakKeyDictionary()\n # initializing _distribution_strategy here since it is possible to call\n # predict on a model without compiling it.\n self._distribution_strategy = None\n\n def _set_sample_weight_attributes(self, sample_weight_mode,\n skip_target_weighing_indices):\n \"\"\"Sets sample weight related attributes on the model.\"\"\"\n sample_weights, sample_weight_modes = training_utils.prepare_sample_weights(\n self.output_names, sample_weight_mode, skip_target_weighing_indices)\n self.sample_weights = sample_weights\n self.sample_weight_modes = sample_weight_modes\n self._feed_sample_weight_modes = [\n sample_weight_modes[i]\n for i in range(len(self.outputs))\n if i not in skip_target_weighing_indices\n ]\n self._feed_sample_weights = [\n sample_weights[i]\n for i in range(len(sample_weights))\n if i not in skip_target_weighing_indices\n ]\n\n def _get_metric_name(self, metric, output_index, weighted=False):\n \"\"\"Returns the metric name corresponding to the given metric input.\n\n Arguments:\n metric: Metric function name or reference.\n output_index: Index of the current output.\n weighted: Boolean indicating if the given metric is weighted.\n\n Returns:\n A metric name.\n \"\"\"\n metric_name_prefix = 'weighted_' if weighted else ''\n if metric in ('accuracy', 'acc', 'crossentropy', 'ce'):\n if metric in ('accuracy', 'acc'):\n suffix = 'acc'\n elif metric in ('crossentropy', 'ce'):\n suffix = 'ce'\n else:\n metric_fn = metrics_module.get(metric)\n # Get metric name as string\n if hasattr(metric_fn, 'name'):\n suffix = metric_fn.name\n else:\n suffix = metric_fn.__name__\n metric_name = metric_name_prefix + suffix\n\n if len(self.output_names) > 1:\n metric_name = '%s_%s' % (self.output_names[output_index], metric_name)\n j = 1\n base_metric_name = metric_name\n while metric_name in self.metrics_names:\n metric_name = '%s_%d' % (base_metric_name, j)\n j += 1\n\n return metric_name\n\n def _handle_per_output_metrics(self,\n metrics,\n y_true,\n y_pred,\n output_index,\n output_shape,\n loss_fn,\n mask,\n weights=None):\n \"\"\"Calls metric functions and sets metric attributes for a single output.\n\n Arguments:\n metrics: List of metrics.\n y_true: Target output.\n y_pred: Predicted output.\n output_index: Index of the current output.\n output_shape: Shape of the current output.\n loss_fn: Loss function corresponding to the current output.\n mask: Computed mask value for the current output.\n weights: Weights to be applied on the current output.\n\n Returns:\n A list of metric result tensors.\n \"\"\"\n metric_results = []\n for metric in metrics:\n metric_fn = training_utils.get_metric_function(\n metric, output_shape=output_shape, loss_fn=loss_fn)\n metric_name = self._get_metric_name(\n metric, output_index, weighted=weights is not None)\n\n with K.name_scope(metric_name):\n # If both outputs and targets are available, call the metric function.\n if y_true is not None and y_pred is not None:\n if isinstance(metric_fn, metrics_module.Metric):\n # Call the stateful metric function.\n if mask is not None:\n mask = math_ops.cast(mask, y_pred.dtype)\n # Update weights with mask.\n if weights is None:\n weights = mask\n else:\n # Update shape of weights if possible before adding mask.\n # Update dimensions of weights to match with mask if possible.\n mask, _, weights = metrics_module.squeeze_or_expand_dimensions(\n mask, None, weights)\n try:\n # Broadcast weights if possible.\n weights = weights_broadcast_ops.broadcast_weights(\n weights, mask)\n except ValueError:\n pass\n # TODO(psv): Handle case when mask and weight shapes are not\n # compatible.\n weights *= mask\n\n metric_result = metric_fn(y_true, y_pred, weights)\n else:\n # Call the stateless metric function.\n weighted_metric_fn = training_utils.weighted_masked_objective(\n metric_fn)\n metric_result = weighted_metric_fn(\n y_true, y_pred, weights=weights, mask=mask)\n\n if not context.executing_eagerly():\n # Keep track of metric result tensor.\n self.metrics_tensors.append(metric_result)\n metric_results.append(metric_result)\n\n # Keep track of metric name.\n self.metrics_names.append(metric_name)\n\n # Keep track of stateful metric attributes (name and metric function).\n if isinstance(metric_fn, base_layer.Layer) and metric_fn.stateful:\n self.stateful_metric_names.append(metric_name)\n self.stateful_metric_functions.append(metric_fn)\n if not context.executing_eagerly():\n # Keep track of updates created by stateful metrics.\n self.metrics_updates += metric_fn.updates\n return metric_results\n\n def _handle_metrics(self,\n outputs,\n skip_target_indices=None,\n targets=None,\n sample_weights=None,\n masks=None):\n \"\"\"Handles calling metric functions and setting model metric attributes.\n\n Arguments:\n outputs: List of outputs (predictions).\n skip_target_indices: Optional. List of target ids to skip.\n targets: List of targets.\n sample_weights: Optional list of sample weight arrays.\n masks: List of computed output mask values.\n\n Returns:\n A list of metric result tensors.\n \"\"\"\n skip_target_indices = skip_target_indices or []\n metric_results = []\n with K.name_scope('metrics'):\n for i in range(len(outputs)):\n if i in skip_target_indices:\n continue\n output = outputs[i] if outputs else None\n target = targets[i] if targets else None\n output_shape = None if output is None else output.get_shape().as_list()\n output_mask = masks[i] if masks else None\n metric_results.extend(\n self._handle_per_output_metrics(\n self.nested_metrics[i], target, output, i, output_shape,\n self.loss_functions[i], output_mask))\n metric_results.extend(\n self._handle_per_output_metrics(\n self.nested_weighted_metrics[i],\n target,\n output,\n i,\n output_shape,\n self.loss_functions[i],\n output_mask,\n weights=sample_weights[i]))\n return metric_results\n\n @checkpointable.no_automatic_dependency_tracking\n def compile(self,\n optimizer,\n loss=None,\n metrics=None,\n loss_weights=None,\n sample_weight_mode=None,\n weighted_metrics=None,\n target_tensors=None,\n distribute=None,\n **kwargs):\n \"\"\"Configures the model for training.\n\n Arguments:\n optimizer: String (name of optimizer) or optimizer instance.\n See [optimizers](/api_docs/python/tf/keras/optimizers).\n loss: String (name of objective function) or objective function.\n See [losses](/api_docs/python/tf/losses).\n If the model has multiple outputs, you can use a different loss\n on each output by passing a dictionary or a list of losses.\n The loss value that will be minimized by the model\n will then be the sum of all individual losses.\n metrics: List of metrics to be evaluated by the model\n during training and testing.\n Typically you will use `metrics=['accuracy']`.\n To specify different metrics for different outputs of a\n multi-output model, you could also pass a dictionary,\n such as `metrics={'output_a': 'accuracy'}`.\n loss_weights: Optional list or dictionary specifying scalar\n coefficients (Python floats) to weight the loss contributions\n of different model outputs.\n The loss value that will be minimized by the model\n will then be the *weighted sum* of all individual losses,\n weighted by the `loss_weights` coefficients.\n If a list, it is expected to have a 1:1 mapping\n to the model's outputs. If a tensor, it is expected to map\n output names (strings) to scalar coefficients.\n sample_weight_mode: If you need to do timestep-wise\n sample weighting (2D weights), set this to `\"temporal\"`.\n `None` defaults to sample-wise weights (1D).\n If the model has multiple outputs, you can use a different\n `sample_weight_mode` on each output by passing a\n dictionary or a list of modes.\n weighted_metrics: List of metrics to be evaluated and weighted\n by sample_weight or class_weight during training and testing.\n target_tensors: By default, Keras will create placeholders for the\n model's target, which will be fed with the target data during\n training. If instead you would like to use your own\n target tensors (in turn, Keras will not expect external\n Numpy data for these targets at training time), you\n can specify them via the `target_tensors` argument. It can be\n a single tensor (for a single-output model), a list of tensors,\n or a dict mapping output names to target tensors.\n distribute: The DistributionStrategy instance that we want to use to\n distribute the training of the model.\n **kwargs: These arguments are passed to `tf.Session.run`.\n\n Raises:\n ValueError: In case of invalid arguments for\n `optimizer`, `loss`, `metrics` or `sample_weight_mode`.\n \"\"\"\n # Validate that arguments passed by the user to `compile` are supported by\n # DistributionStrategy.\n if distribute and not isinstance(\n optimizer, (tf_optimizer_module.Optimizer, optimizers.TFOptimizer)):\n raise NotImplementedError('Only TF native optimizers are supported with '\n 'DistributionStrategy.')\n if distribute and context.executing_eagerly():\n raise NotImplementedError('DistributionStrategy is not supported in '\n 'Eager mode.')\n if distribute and sample_weight_mode:\n raise NotImplementedError('sample_weight_mode is not supported with '\n 'DistributionStrategy.')\n if distribute and weighted_metrics:\n raise NotImplementedError('weighted_metrics is not supported with '\n 'DistributionStrategy.')\n if distribute and target_tensors:\n raise ValueError('target_tensors is not supported with '\n 'DistributionStrategy.')\n\n loss = loss or {}\n if context.executing_eagerly() and not isinstance(\n optimizer, (tf_optimizer_module.Optimizer, optimizers.TFOptimizer)):\n raise ValueError('Only TF native optimizers are supported in Eager mode.')\n\n self.optimizer = optimizers.get(optimizer)\n # We've disabled automatic dependency tracking for this method, but do want\n # to add a checkpoint dependency on the optimizer if it's checkpointable.\n if isinstance(self.optimizer, checkpointable.CheckpointableBase):\n self._track_checkpointable(\n self.optimizer, name='optimizer', overwrite=True)\n self.loss = loss\n self.metrics = metrics or []\n self.loss_weights = loss_weights\n self.sample_weight_mode = sample_weight_mode\n self.weighted_metrics = weighted_metrics\n if context.executing_eagerly() and target_tensors is not None:\n raise ValueError('target_tensors is not supported in Eager mode.')\n self.target_tensors = target_tensors\n\n # Set DistributionStrategy specific parameters.\n self._distribution_strategy = distribute\n if self._distribution_strategy is not None:\n self._grouped_model = self._compile_distributed_model(\n self._distribution_strategy)\n with self._distribution_strategy.scope():\n first_replicated_model = self._distribution_strategy.unwrap(\n self._grouped_model)[0]\n # If the specified metrics in `compile` are stateful, raise an error\n # since we currently don't support stateful metrics.\n if first_replicated_model.stateful_metric_names:\n raise NotImplementedError('Stateful metrics are not supported with '\n 'DistributionStrategy.')\n\n # We initialize the callback model with the first replicated model.\n self._replicated_model = DistributedCallbackModel(first_replicated_model)\n self._replicated_model.set_original_model(self)\n if not self.built:\n # Model is not compilable because it does not know its number of inputs\n # and outputs, nor their shapes and names. We will compile after the first\n # time the model gets called on training data.\n return\n self._is_compiled = True\n\n # Prepare loss functions.\n if isinstance(loss, dict):\n for name in loss:\n if name not in self.output_names:\n raise ValueError(\n 'Unknown entry in loss '\n 'dictionary: \"' + name + '\". '\n 'Only expected the following keys: ' + str(self.output_names))\n loss_functions = []\n for name in self.output_names:\n if name not in loss:\n logging.warning(\n 'Output \"' + name + '\" missing from loss dictionary. We assume '\n 'this was done on purpose. The fit and evaluate APIs will not be '\n 'expecting any data to be passed to \"' + name + '\".')\n loss_functions.append(losses.get(loss.get(name)))\n elif isinstance(loss, list):\n if len(loss) != len(self.outputs):\n raise ValueError('When passing a list as loss, '\n 'it should have one entry per model outputs. '\n 'The model has ' + str(len(self.outputs)) +\n ' outputs, but you passed loss=' + str(loss))\n loss_functions = [losses.get(l) for l in loss]\n else:\n loss_function = losses.get(loss)\n loss_functions = [loss_function for _ in range(len(self.outputs))]\n self.loss_functions = loss_functions\n\n weighted_losses = [training_utils.weighted_masked_objective(fn)\n for fn in loss_functions]\n skip_target_indices = []\n skip_target_weighing_indices = []\n self._feed_outputs = []\n self._feed_output_names = []\n self._feed_output_shapes = []\n self._feed_loss_fns = []\n for i in range(len(weighted_losses)):\n if weighted_losses[i] is None:\n skip_target_indices.append(i)\n skip_target_weighing_indices.append(i)\n\n # Prepare output masks.\n if not context.executing_eagerly():\n masks = [getattr(x, '_keras_mask', None) for x in self.outputs]\n if not isinstance(masks, list):\n masks = [masks]\n\n # Prepare loss weights.\n if loss_weights is None:\n loss_weights_list = [1. for _ in range(len(self.outputs))]\n elif isinstance(loss_weights, dict):\n for name in loss_weights:\n if name not in self.output_names:\n raise ValueError(\n 'Unknown entry in loss_weights '\n 'dictionary: \"' + name + '\". '\n 'Only expected the following keys: ' + str(self.output_names))\n loss_weights_list = []\n for name in self.output_names:\n loss_weights_list.append(loss_weights.get(name, 1.))\n elif isinstance(loss_weights, list):\n if len(loss_weights) != len(self.outputs):\n raise ValueError(\n 'When passing a list as loss_weights, '\n 'it should have one entry per model output. '\n 'The model has ' + str(len(self.outputs)) +\n ' outputs, but you passed loss_weights=' + str(loss_weights))\n loss_weights_list = loss_weights\n else:\n raise TypeError('Could not interpret loss_weights argument: ' +\n str(loss_weights) + ' - expected a list of dicts.')\n self.loss_weights_list = loss_weights_list\n\n # Initialize model metric attributes.\n self.metrics_names = ['loss']\n self.metrics_tensors = []\n self.metrics_updates = []\n self.stateful_metric_names = []\n self.stateful_metric_functions = []\n\n # Nested metrics is a list of list of metrics.\n # One list per output of the model.\n self.nested_metrics = training_utils.collect_metrics(\n metrics, self.output_names)\n self.nested_weighted_metrics = training_utils.collect_metrics(\n weighted_metrics, self.output_names)\n\n # Initialization for Eager mode execution.\n if context.executing_eagerly():\n # Prepare sample weights.\n self._set_sample_weight_attributes(sample_weight_mode,\n skip_target_weighing_indices)\n\n if target_tensors is not None:\n raise ValueError('target_tensors are not currently supported in Eager '\n 'mode.')\n self.total_loss = None\n for i in range(len(self.outputs)):\n if len(self.outputs) > 1:\n self.metrics_names.append(self.output_names[i] + '_loss')\n\n # Set metric attributes on model.\n self._handle_metrics(\n self.outputs,\n skip_target_indices=skip_target_indices,\n sample_weights=self.sample_weights)\n\n self.targets = []\n for i in range(len(self.outputs)):\n self._feed_output_names.append(self.output_names[i])\n self._collected_trainable_weights = self.trainable_weights\n return\n\n # Prepare targets of model.\n self.targets = []\n self._feed_targets = []\n if target_tensors not in (None, []):\n if isinstance(target_tensors, list):\n if len(target_tensors) != len(self.outputs):\n raise ValueError(\n 'When passing a list as `target_tensors`, '\n 'it should have one entry per model output. '\n 'The model has ' + str(len(self.outputs)) +\n ' outputs, but you passed target_tensors=' + str(target_tensors))\n elif isinstance(target_tensors, dict):\n for name in target_tensors:\n if name not in self.output_names:\n raise ValueError(\n 'Unknown entry in `target_tensors` '\n 'dictionary: \"' + name + '\". '\n 'Only expected the following keys: ' + str(self.output_names))\n tmp_target_tensors = []\n for name in self.output_names:\n tmp_target_tensors.append(target_tensors.get(name, None))\n target_tensors = tmp_target_tensors\n else:\n raise TypeError('Expected `target_tensors` to be '\n 'a list or dict, but got:', target_tensors)\n\n for i in range(len(self.outputs)):\n if i in skip_target_indices:\n self.targets.append(None)\n else:\n shape = K.int_shape(self.outputs[i])\n name = self.output_names[i]\n if target_tensors not in (None, []):\n target = target_tensors[i]\n else:\n target = None\n if target is None or K.is_placeholder(target):\n if target is None:\n target = K.placeholder(\n ndim=len(shape),\n name=name + '_target',\n sparse=K.is_sparse(self.outputs[i]),\n dtype=K.dtype(self.outputs[i]))\n self._feed_targets.append(target)\n self._feed_outputs.append(self.outputs[i])\n self._feed_output_names.append(name)\n self._feed_output_shapes.append(shape)\n self._feed_loss_fns.append(self.loss_functions[i])\n else:\n skip_target_weighing_indices.append(i)\n self.targets.append(target)\n\n # Prepare sample weights.\n self._set_sample_weight_attributes(sample_weight_mode,\n skip_target_weighing_indices)\n\n # Compute total loss.\n total_loss = None\n with K.name_scope('loss'):\n for i in range(len(self.outputs)):\n if i in skip_target_indices:\n continue\n y_true = self.targets[i]\n y_pred = self.outputs[i]\n weighted_loss = weighted_losses[i]\n sample_weight = self.sample_weights[i]\n mask = masks[i]\n loss_weight = loss_weights_list[i]\n with K.name_scope(self.output_names[i] + '_loss'):\n output_loss = weighted_loss(y_true, y_pred, sample_weight, mask)\n if len(self.outputs) > 1:\n self.metrics_tensors.append(output_loss)\n self.metrics_names.append(self.output_names[i] + '_loss')\n if total_loss is None:\n total_loss = loss_weight * output_loss\n else:\n total_loss += loss_weight * output_loss\n if total_loss is None:\n if not self.losses:\n raise ValueError('The model cannot be compiled '\n 'because it has no loss to optimize.')\n else:\n total_loss = 0.\n\n # Add regularization penalties\n # and other layer-specific losses.\n for loss_tensor in self.losses:\n total_loss += loss_tensor\n\n # Invoke metric functions for all the outputs.\n self._handle_metrics(\n self.outputs,\n masks=masks,\n targets=self.targets,\n skip_target_indices=skip_target_indices,\n sample_weights=self.sample_weights)\n\n # Prepare gradient updates and state updates.\n self.total_loss = total_loss\n\n # Functions for train, test and predict will\n # be compiled lazily when required.\n # This saves time when the user is not using all functions.\n self._function_kwargs = kwargs\n\n self.train_function = None\n self.test_function = None\n self.predict_function = None\n\n # Collected trainable weights, sorted in topological order.\n trainable_weights = self.trainable_weights\n self._collected_trainable_weights = trainable_weights\n\n def _compile_distributed_model(self, distribution_strategy):\n # TODO(anjalisridhar): Can we move the clone_and_build_model to outside the\n # model?\n def _clone_model_per_tower(model):\n new_model = training_distributed.clone_and_build_model(model)\n return new_model\n\n with distribution_strategy.scope():\n # Create a copy of this model on each of the devices.\n grouped_models = distribution_strategy.call_for_each_tower(\n _clone_model_per_tower, self)\n return grouped_models\n\n def _check_trainable_weights_consistency(self):\n \"\"\"Check trainable weights count consistency.\n\n This will raise a warning if `trainable_weights` and\n `_collected_trainable_weights` are inconsistent (i.e. have different\n number of parameters).\n Inconsistency will typically arise when one modifies `model.trainable`\n without calling `model.compile` again.\n \"\"\"\n if not hasattr(self, '_collected_trainable_weights'):\n return\n\n if len(self.trainable_weights) != len(self._collected_trainable_weights):\n logging.warning(\n UserWarning(\n 'Discrepancy between trainable weights and collected trainable'\n ' weights, did you set `model.trainable` without calling'\n ' `model.compile` after ?'))\n\n def _make_train_function(self):\n if not hasattr(self, 'train_function'):\n raise RuntimeError('You must compile your model before using it.')\n self._check_trainable_weights_consistency()\n if self.train_function is None:\n inputs = (self._feed_inputs +\n self._feed_targets +\n self._feed_sample_weights)\n if self.uses_learning_phase and not isinstance(K.learning_phase(), int):\n inputs += [K.learning_phase()]\n\n with K.name_scope('training'):\n with K.name_scope(self.optimizer.__class__.__name__):\n # Training updates\n updates = self.optimizer.get_updates(\n params=self._collected_trainable_weights, loss=self.total_loss)\n # Unconditional updates\n updates += self.get_updates_for(None)\n # Conditional updates relevant to this model\n updates += self.get_updates_for(self.inputs)\n # Stateful metrics updates\n updates += self.metrics_updates\n # Gets loss and metrics. Updates weights at each call.\n self.train_function = K.function(\n inputs, [self.total_loss] + self.metrics_tensors,\n updates=updates,\n name='train_function',\n **self._function_kwargs)\n\n def _make_test_function(self):\n if not hasattr(self, 'test_function'):\n raise RuntimeError('You must compile your model before using it.')\n if self.test_function is None:\n inputs = (self._feed_inputs +\n self._feed_targets +\n self._feed_sample_weights)\n if self.uses_learning_phase and not isinstance(K.learning_phase(), int):\n inputs += [K.learning_phase()]\n # Return loss and metrics, no gradient updates.\n # Does update the network states.\n self.test_function = K.function(\n inputs, [self.total_loss] + self.metrics_tensors,\n updates=self.state_updates + self.metrics_updates,\n name='test_function',\n **self._function_kwargs)\n\n def _make_predict_function(self):\n if not hasattr(self, 'predict_function'):\n self.predict_function = None\n if self.predict_function is None:\n if self.uses_learning_phase and not isinstance(K.learning_phase(), int):\n inputs = self._feed_inputs + [K.learning_phase()]\n else:\n inputs = self._feed_inputs\n # Gets network outputs. Does not update weights.\n # Does update the network states.\n kwargs = getattr(self, '_function_kwargs', {})\n self.predict_function = K.function(\n inputs,\n self.outputs,\n updates=self.state_updates,\n name='predict_function',\n **kwargs)\n\n def _get_iterator_get_next_tensors(self, iterator):\n get_next_op = self._iterator_get_next.get(iterator, None)\n if get_next_op is None:\n get_next_op = iterator.get_next()\n self._iterator_get_next[iterator] = get_next_op\n return get_next_op\n\n def _distribution_standardize_user_data(self,\n x,\n y=None,\n sample_weight=None,\n class_weight=None,\n batch_size=None,\n check_steps=False,\n steps_name='steps',\n steps=None,\n validation_split=0):\n \"\"\"Runs validation checks on input and target data passed by the user.\n\n This is called when using DistributionStrategy to train, evaluate or serve\n the model.\n\n Args:\n x: Input data. A `tf.data` dataset.\n y: Since `x` is a dataset, `y` should not be specified\n (since targets will be obtained from the iterator).\n sample_weight: An optional sample-weight array passed by the user to\n weight the importance of each sample in `x`.\n class_weight: An optional class-weight array by the user to\n weight the importance of samples in `x` based on the class they belong\n to, as conveyed by `y`.\n batch_size: Integer batch size. If provided, it is used to run additional\n validation checks on stateful models.\n check_steps: boolean, True if we want to check for validity of `steps` and\n False, otherwise.\n steps_name: The public API's parameter name for `steps`.\n steps: Integer or `None`. Total number of steps (batches of samples) to\n execute.\n validation_split: Float between 0 and 1.\n Fraction of the training data to be used as validation data.\n\n Returns:\n A tuple of 3 lists: input arrays, target arrays, sample-weight arrays.\n If the model's input and targets are symbolic, these lists are empty\n (since the model takes no user-provided data, instead the data comes\n from the symbolic inputs/targets).\n\n Raises:\n ValueError: In case of invalid user-provided data.\n RuntimeError: If the model was never compiled.\n \"\"\"\n if sample_weight is not None and sample_weight.all():\n raise NotImplementedError('sample_weight is currently not supported when '\n 'using DistributionStrategy.')\n if class_weight:\n raise NotImplementedError('class_weight is currently not supported when '\n 'using DistributionStrategy.')\n\n # TODO(anjalisridhar): Can we use the iterator and getnext op cache?\n # We require users to pass Datasets since we distribute the dataset across\n # multiple devices.\n if not isinstance(x, dataset_ops.Dataset):\n raise ValueError('When using DistributionStrategy you must specify a '\n 'Dataset object instead of a %s.' % type(x))\n # TODO(anjalisridhar): We want distribute_dataset() to accept a Dataset or a\n # function which returns a Dataset. Currently distribute_dataset() only\n # accepts a function that returns a Dataset. Once we add support for being\n # able to clone a Dataset on multiple workers we can remove this lambda.\n result = self._distribution_strategy.distribute_dataset(lambda: x)\n iterator = result.make_initializable_iterator()\n K.get_session().run(iterator.initializer)\n # Validates `steps` argument based on x's type.\n if check_steps:\n if steps is None:\n raise ValueError('When using a Dataset instance as input to a model, '\n 'you should specify the `{steps_name}` argument.'\n .format(steps_name=steps_name))\n\n training_utils.validate_iterator_input(x, y, sample_weight,\n validation_split)\n # x an y may be PerDevice objects with an input and output tensor\n # corresponding to each device. For example, x could be\n # PerDevice:{device: get_next tensor,...}.\n next_element = iterator.get_next()\n\n if not isinstance(next_element, (list, tuple)) or len(next_element) != 2:\n raise ValueError('Please provide data as a list or tuple of 2 elements '\n ' - input and target pair. Received %s' % next_element)\n x, y = next_element\n # Validate that all the elements in x and y are of the same type and shape.\n # We can then pass the first element of x and y to `_standardize_weights`\n # below and be confident of the output. We need to reopen the scope since\n # we unwrap values when we validate x and y.\n with self._distribution_strategy.scope():\n x_values, y_values = distributed_training_utils.\\\n validate_distributed_dataset_inputs(self._distribution_strategy, x, y)\n\n _, _, sample_weights = self._standardize_weights(x_values,\n y_values,\n sample_weight,\n class_weight,\n batch_size)\n return x, y, sample_weights\n\n def _standardize_user_data(self,\n x,\n y=None,\n sample_weight=None,\n class_weight=None,\n batch_size=None,\n check_steps=False,\n steps_name='steps',\n steps=None,\n validation_split=0):\n \"\"\"Runs validation checks on input and target data passed by the user.\n\n Also standardizes the data to lists of arrays, in order.\n\n Also builds and compiles the model on the fly if it is a subclassed model\n that has never been called before (and thus has no inputs/outputs).\n\n This is a purely internal method, subject to refactoring at any time.\n\n Args:\n x: Input data. It could be:\n - A Numpy array (or array-like), or a list of arrays\n (in case the model has multiple inputs).\n - A TensorFlow tensor, or a list of tensors\n (in case the model has multiple inputs).\n - A dict mapping input names to the corresponding array/tensors,\n if the model has named inputs.\n - A `tf.data` dataset or a dataset iterator.\n y: Target data. Like the input data `x`,\n it could be either Numpy array(s) or TensorFlow tensor(s).\n It should be consistent with `x` (you cannot have Numpy inputs and\n tensor targets, or inversely). If `x` is a dataset or a\n dataset iterator, `y` should not be specified\n (since targets will be obtained from the iterator).\n sample_weight: An optional sample-weight array passed by the user to\n weight the importance of each sample in `x`.\n class_weight: An optional class-weight array by the user to\n weight the importance of samples in `x` based on the class they belong\n to, as conveyed by `y`.\n batch_size: Integer batch size. If provided, it is used to run additional\n validation checks on stateful models.\n check_steps: boolean, True if we want to check for validity of `steps` and\n False, otherwise. For example, when we are standardizing one batch of\n data for train_on_batch/predict_on_batch/test_on_batch APIs, `steps`\n value is not required and we should not check for its validity in these\n cases.\n steps_name: The public API's parameter name for `steps`.\n steps: Integer or `None`. Total number of steps (batches of samples) to\n execute.\n validation_split: Float between 0 and 1.\n Fraction of the training data to be used as validation data.\n\n Returns:\n A tuple of 3 lists: input arrays, target arrays, sample-weight arrays.\n If the model's input and targets are symbolic, these lists are empty\n (since the model takes no user-provided data, instead the data comes\n from the symbolic inputs/targets).\n\n Raises:\n ValueError: In case of invalid user-provided data.\n RuntimeError: If the model was never compiled.\n \"\"\"\n if self._distribution_strategy:\n return self._distribution_standardize_user_data(\n x,\n y,\n sample_weight=sample_weight,\n class_weight=class_weight,\n batch_size=batch_size,\n check_steps=check_steps,\n steps_name=steps_name,\n steps=steps,\n validation_split=validation_split)\n\n if isinstance(x, dataset_ops.Dataset):\n if context.executing_eagerly():\n x = x.make_one_shot_iterator()\n else:\n if x in self._dataset_iterator_cache:\n x = self._dataset_iterator_cache[x]\n else:\n iterator = x.make_initializable_iterator()\n self._dataset_iterator_cache[x] = iterator\n x = iterator\n K.get_session().run(x.initializer)\n\n # Validates `steps` argument based on x's type.\n if check_steps:\n training_utils.check_steps_argument(x, steps, steps_name)\n\n is_x_eager_iterator = isinstance(x, iterator_ops.EagerIterator)\n is_x_iterator = isinstance(x, iterator_ops.Iterator)\n\n # Validate user inputs when data is given as a dataset or dataset iterator.\n if is_x_iterator or is_x_eager_iterator:\n training_utils.validate_iterator_input(x, y, sample_weight,\n validation_split)\n\n # For eager iterators, when we have to process multiple batches of samples,\n # we will standardize the data when we actually loop over iterator and get\n # the batches. For now, we just return the iterator as is.\n if is_x_eager_iterator and steps is not None:\n return x, y, sample_weight\n\n # If input data is a dataset iterator in graph mode or if it is an eager\n # iterator and only one batch of samples is required, we fetch the data\n # tensors from the iterator and then standardize them.\n if is_x_iterator or is_x_eager_iterator:\n try:\n if is_x_iterator:\n next_element = self._get_iterator_get_next_tensors(x)\n else:\n next_element = x.get_next()\n except errors.OutOfRangeError:\n raise RuntimeError('Your dataset iterator ran out of data; '\n 'Make sure that your dataset can generate '\n 'required number of samples.')\n\n if not isinstance(next_element, (list, tuple)) or len(next_element) != 2:\n raise ValueError('Please provide data as a list or tuple of 2 elements '\n ' - input and target pair. Received %s' % next_element)\n x, y = next_element\n x, y, sample_weights = self._standardize_weights(x, y, sample_weight,\n class_weight, batch_size)\n return x, y, sample_weights\n\n def _standardize_weights(self, x, y, sample_weight=None, class_weight=None,\n batch_size=None,):\n # First, we build/compile the model on the fly if necessary.\n all_inputs = []\n is_build_called = False\n is_compile_called = False\n if not self.inputs:\n # We need to use `x` to set the model inputs.\n # We type-check that `x` and `y` are either single arrays\n # or lists of arrays.\n if isinstance(x, (list, tuple)):\n if not all(isinstance(v, np.ndarray) or\n tensor_util.is_tensor(v) for v in x):\n raise ValueError('Please provide as model inputs either a single '\n 'array or a list of arrays. You passed: x=' + str(x))\n all_inputs += list(x)\n elif isinstance(x, dict):\n raise ValueError('Please do not pass a dictionary as model inputs.')\n else:\n if not isinstance(x, np.ndarray) and not tensor_util.is_tensor(x):\n raise ValueError('Please provide as model inputs either a single '\n 'array or a list of arrays. You passed: x=' + str(x))\n all_inputs.append(x)\n\n # Build the model using the retrieved inputs (value or symbolic).\n # If values, then in symbolic-mode placeholders will be created\n # to match the value shapes.\n if not self.inputs:\n is_build_called = True\n self._set_inputs(x)\n\n if y is not None:\n if not self.optimizer:\n raise RuntimeError('You must compile a model before '\n 'training/testing. '\n 'Use `model.compile(optimizer, loss)`.')\n if not self._is_compiled:\n # On-the-fly compilation of the model.\n # We need to use `y` to set the model targets.\n if isinstance(y, (list, tuple)):\n if not all(isinstance(v, np.ndarray) or\n tensor_util.is_tensor(v) for v in y):\n raise ValueError('Please provide as model targets either a single '\n 'array or a list of arrays. '\n 'You passed: y=' + str(y))\n all_inputs += list(y)\n elif isinstance(y, dict):\n raise ValueError('Please do not pass a dictionary as model targets.')\n else:\n if not isinstance(y, np.ndarray) and not tensor_util.is_tensor(y):\n raise ValueError('Please provide as model targets either a single '\n 'array or a list of arrays. '\n 'You passed: y=' + str(y))\n all_inputs.append(y)\n\n # Typecheck that all inputs are *either* value *or* symbolic.\n # TODO(fchollet): this check could be removed in Eager mode?\n if any(tensor_util.is_tensor(v) for v in all_inputs):\n if not all(tensor_util.is_tensor(v) for v in all_inputs):\n raise ValueError('Do not pass inputs that mix Numpy arrays and '\n 'TensorFlow tensors. '\n 'You passed: x=' + str(x) + '; y=' + str(y))\n\n if context.executing_eagerly():\n target_tensors = None\n else:\n # Handle target tensors if any passed.\n if not isinstance(y, (list, tuple)):\n y = [y]\n target_tensors = [v for v in y if tensor_util.is_tensor(v)]\n is_compile_called = True\n self.compile(optimizer=self.optimizer,\n loss=self.loss,\n metrics=self.metrics,\n loss_weights=self.loss_weights,\n target_tensors=target_tensors)\n\n # In graph mode, if we had just set inputs and targets as symbolic tensors\n # by invoking build and compile on the model respectively, we do not have to\n # feed anything to the model. Model already has input and target data as\n # part of the graph.\n # Note: in this case, `any` and `all` are equivalent since we disallow\n # mixed symbolic/value inputs.\n if (not context.executing_eagerly() and is_build_called and\n is_compile_called and\n any(tensor_util.is_tensor(v) for v in all_inputs)):\n return [], [], []\n\n # What follows is input validation and standardization to list format,\n # in the case where all inputs are value arrays.\n\n if context.executing_eagerly():\n # In eager mode, do not do shape validation\n # since the network has no input nodes (placeholders) to be fed.\n feed_input_names = self.input_names\n feed_input_shapes = None\n elif not self._is_graph_network:\n # Case: symbolic-mode subclassed network. Do not do shape validation.\n feed_input_names = self._feed_input_names\n feed_input_shapes = None\n else:\n # Case: symbolic-mode graph network.\n # In this case, we run extensive shape validation checks.\n feed_input_names = self._feed_input_names\n feed_input_shapes = self._feed_input_shapes\n\n # Standardize the inputs.\n x = training_utils.standardize_input_data(\n x,\n feed_input_names,\n feed_input_shapes,\n check_batch_axis=False, # Don't enforce the batch size.\n exception_prefix='input')\n\n if y is not None:\n if not self._is_graph_network:\n feed_output_names = self._feed_output_names\n feed_output_shapes = None\n # Sample weighting not supported in this case.\n # TODO(fchollet): consider supporting it.\n feed_sample_weight_modes = [None for _ in self.outputs]\n else:\n feed_output_names = self._feed_output_names\n feed_sample_weight_modes = self._feed_sample_weight_modes\n feed_output_shapes = []\n for output_shape, loss_fn in zip(self._feed_output_shapes,\n self._feed_loss_fns):\n if loss_fn is losses.sparse_categorical_crossentropy:\n if K.image_data_format() == 'channels_first':\n feed_output_shapes.append(\n (output_shape[0], 1) + output_shape[2:])\n else:\n feed_output_shapes.append(output_shape[:-1] + (1,))\n elif (not hasattr(loss_fn, '__name__') or\n getattr(losses, loss_fn.__name__, None) is None):\n # If `loss_fn` is not a function (e.g. callable class)\n # or if it not in the `losses` module, then\n # it is a user-defined loss and we make no assumptions\n # about it.\n feed_output_shapes.append(None)\n else:\n feed_output_shapes.append(output_shape)\n\n # Standardize the outputs.\n y = training_utils.standardize_input_data(\n y,\n feed_output_names,\n feed_output_shapes,\n check_batch_axis=False, # Don't enforce the batch size.\n exception_prefix='target')\n\n # Generate sample-wise weight values given the `sample_weight` and\n # `class_weight` arguments.\n sample_weights = training_utils.standardize_sample_weights(\n sample_weight, feed_output_names)\n class_weights = training_utils.standardize_class_weights(\n class_weight, feed_output_names)\n sample_weights = [\n training_utils.standardize_weights(ref, sw, cw, mode)\n for (ref, sw, cw, mode) in zip(y, sample_weights, class_weights,\n feed_sample_weight_modes)\n ]\n # Check that all arrays have the same length.\n if not self._distribution_strategy:\n training_utils.check_array_lengths(x, y, sample_weights)\n if self._is_graph_network and not context.executing_eagerly():\n # Additional checks to avoid users mistakenly using improper loss fns.\n training_utils.check_loss_and_target_compatibility(\n y, self._feed_loss_fns, feed_output_shapes)\n else:\n y = []\n sample_weights = []\n\n if self.stateful and batch_size:\n # Check that for stateful networks, number of samples is a multiple\n # of the static batch size.\n if x[0].shape[0] % batch_size != 0:\n raise ValueError('In a stateful network, '\n 'you should only pass inputs with '\n 'a number of samples that can be '\n 'divided by the batch size. Found: ' +\n str(x[0].shape[0]) + ' samples')\n return x, y, sample_weights\n\n @checkpointable.no_automatic_dependency_tracking\n def _set_inputs(self, inputs, training=None):\n \"\"\"Set model's input and output specs based on the input data received.\n\n This is to be used for Model subclasses, which do not know at instantiation\n time what their inputs look like.\n\n Args:\n inputs: Single array, or list of arrays. The arrays could be placeholders,\n Numpy arrays, or data tensors.\n - if placeholders: the model is built on top of these placeholders,\n and we expect Numpy data to be fed for them when calling `fit`/etc.\n - if Numpy data: we create placeholders matching the shape of the Numpy\n arrays. We expect Numpy data to be fed for these placeholders\n when calling `fit`/etc.\n - if data tensors: the model is built on top of these tensors.\n We do not expect any Numpy data to be provided when calling `fit`/etc.\n training: Boolean or None. Only relevant in symbolic mode. Specifies\n whether to build the model's graph in inference mode (False), training\n mode (True), or using the Keras learning phase (None).\n \"\"\"\n call_convention = getattr(\n self,\n '_call_convention',\n base_layer.CallConvention.EXPLICIT_INPUTS_ARGUMENT)\n if call_convention not in (\n base_layer.CallConvention.EXPLICIT_INPUTS_ARGUMENT,\n base_layer.CallConvention.SINGLE_POSITIONAL_ARGUMENT):\n raise NotImplementedError(\n 'Subclassed Models without \"inputs\" (or single positional arguments) '\n 'in their call() signatures do not yet support shape inference. File '\n 'a feature request if this limitation bothers you.')\n if self.__class__.__name__ == 'Sequential':\n if tensor_util.is_tensor(inputs):\n input_shape = (None,) + tuple(inputs.get_shape().as_list()[1:])\n self.build(input_shape=input_shape)\n else:\n input_shape = (None,) + inputs.shape[1:]\n self.build(input_shape=input_shape)\n if context.executing_eagerly():\n self._eager_set_inputs(inputs)\n else:\n self._symbolic_set_inputs(inputs, training=training)\n\n @checkpointable.no_automatic_dependency_tracking\n def _eager_set_inputs(self, inputs):\n \"\"\"Set model's input and output specs based on the input data received.\n\n This is to be used for Model subclasses, which do not know at instantiation\n time what their inputs look like.\n\n We assume the number and ndim of outputs\n does not change over different calls.\n\n Args:\n inputs: Argument `x` (input data) passed by the user upon first model use.\n\n Raises:\n ValueError: If the model's inputs are already set.\n \"\"\"\n assert context.executing_eagerly()\n if self.inputs:\n raise ValueError('Model inputs are already set.')\n # On-the-fly setting of model inputs/outputs as DeferredTensors,\n # to keep track of number of inputs and outputs and their ndim.\n if isinstance(inputs, (list, tuple)):\n if tensor_util.is_tensor(inputs[0]):\n dummy_output_values = self.call(\n training_utils.cast_if_floating_dtype(inputs))\n else:\n dummy_output_values = self.call(\n [ops.convert_to_tensor(v, dtype=K.floatx()) for v in inputs])\n dummy_input_values = list(inputs)\n else:\n if tensor_util.is_tensor(inputs):\n dummy_output_values = self.call(\n training_utils.cast_if_floating_dtype(inputs))\n else:\n dummy_output_values = self.call(\n ops.convert_to_tensor(inputs, dtype=K.floatx()))\n dummy_input_values = [inputs]\n if isinstance(dummy_output_values, (list, tuple)):\n dummy_output_values = list(dummy_output_values)\n else:\n dummy_output_values = [dummy_output_values]\n self.outputs = [\n base_layer.DeferredTensor(shape=(None for _ in v.shape),\n dtype=v.dtype) for v in dummy_output_values]\n self.inputs = [\n base_layer.DeferredTensor(shape=(None for _ in v.shape),\n dtype=v.dtype) for v in dummy_input_values]\n self.input_names = [\n 'input_%d' % (i + 1) for i in range(len(dummy_input_values))]\n self.output_names = [\n 'output_%d' % (i + 1) for i in range(len(dummy_output_values))]\n self.built = True\n\n @checkpointable.no_automatic_dependency_tracking\n def _symbolic_set_inputs(self, inputs, outputs=None, training=None):\n \"\"\"Set model's inputs and output specs based.\n\n This is to be used for Model subclasses, which do not know at instantiation\n time what their inputs look like.\n\n Args:\n inputs: Argument `x` (input data) passed by the user upon first model use.\n outputs: None, a data tensor, or a list of data tensors. If None, the\n outputs will be determined by invoking self.call(), otherwise the\n provided value will be used.\n training: Boolean or None. Only relevant in symbolic mode. Specifies\n whether to build the model's graph in inference mode (False), training\n mode (True), or using the Keras learning phase (None).\n\n Raises:\n ValueError: If the model's inputs are already set.\n \"\"\"\n assert not context.executing_eagerly()\n if self.inputs:\n raise ValueError('Model inputs are already set.')\n\n # On-the-fly setting of symbolic model inputs (either by using the tensor\n # provided, or by creating a placeholder if Numpy data was provided).\n self.inputs = []\n self.input_names = []\n self._feed_inputs = []\n self._feed_input_names = []\n self._feed_input_shapes = []\n if isinstance(inputs, (list, tuple)):\n inputs = list(inputs)\n else:\n inputs = [inputs]\n\n for i, v in enumerate(inputs):\n name = 'input_%d' % (i + 1)\n self.input_names.append(name)\n if isinstance(v, list):\n v = np.asarray(v)\n if v.ndim == 1:\n v = np.expand_dims(v, 1)\n if isinstance(v, (np.ndarray)):\n # We fix the placeholder shape except the batch size.\n # This is suboptimal, but it is the best we can do with the info\n # we have. The user should call `model._set_inputs(placeholders)`\n # to specify custom placeholders if the need arises.\n shape = (None,) + v.shape[1:]\n placeholder = K.placeholder(shape=shape, name=name)\n self.inputs.append(placeholder)\n self._feed_inputs.append(placeholder)\n self._feed_input_names.append(name)\n self._feed_input_shapes.append(shape)\n else:\n # Assumed tensor - TODO(fchollet) additional type check?\n self.inputs.append(v)\n if K.is_placeholder(v):\n self._feed_inputs.append(v)\n self._feed_input_names.append(name)\n self._feed_input_shapes.append(K.int_shape(v))\n\n if outputs is None:\n # Obtain symbolic outputs by calling the model.\n if len(self.inputs) == 1:\n if self._expects_training_arg:\n outputs = self.call(self.inputs[0], training=training)\n else:\n outputs = self.call(self.inputs[0])\n else:\n if self._expects_training_arg:\n outputs = self.call(self.inputs, training=training)\n else:\n outputs = self.call(self.inputs)\n if isinstance(outputs, (list, tuple)):\n outputs = list(outputs)\n else:\n outputs = [outputs]\n self.outputs = outputs\n self.output_names = [\n 'output_%d' % (i + 1) for i in range(len(self.outputs))]\n self.built = True\n\n def fit(self,\n x=None,\n y=None,\n batch_size=None,\n epochs=1,\n verbose=1,\n callbacks=None,\n validation_split=0.,\n validation_data=None,\n shuffle=True,\n class_weight=None,\n sample_weight=None,\n initial_epoch=0,\n steps_per_epoch=None,\n validation_steps=None,\n **kwargs):\n \"\"\"Trains the model for a fixed number of epochs (iterations on a dataset).\n\n Arguments:\n x: Input data. It could be:\n - A Numpy array (or array-like), or a list of arrays\n (in case the model has multiple inputs).\n - A TensorFlow tensor, or a list of tensors\n (in case the model has multiple inputs).\n - A dict mapping input names to the corresponding array/tensors,\n if the model has named inputs.\n - A `tf.data` dataset or a dataset iterator.\n y: Target data. Like the input data `x`,\n it could be either Numpy array(s) or TensorFlow tensor(s).\n It should be consistent with `x` (you cannot have Numpy inputs and\n tensor targets, or inversely). If `x` is a dataset or dataset\n iterator, `y` should not be specified\n (since targets will be obtained from the iterator).\n batch_size: Integer or `None`.\n Number of samples per gradient update.\n If unspecified, `batch_size` will default to 32.\n Do not specify the `batch_size` if your data is in the\n form of symbolic tensors, datasets, or dataset iterators\n (since they generate batches).\n epochs: Integer. Number of epochs to train the model.\n An epoch is an iteration over the entire `x` and `y`\n data provided.\n Note that in conjunction with `initial_epoch`,\n `epochs` is to be understood as \"final epoch\".\n The model is not trained for a number of iterations\n given by `epochs`, but merely until the epoch\n of index `epochs` is reached.\n verbose: Integer. 0, 1, or 2. Verbosity mode.\n 0 = silent, 1 = progress bar, 2 = one line per epoch.\n callbacks: List of `keras.callbacks.Callback` instances.\n List of callbacks to apply during training.\n See [callbacks](/api_docs/python/tf/keras/callbacks).\n validation_split: Float between 0 and 1.\n Fraction of the training data to be used as validation data.\n The model will set apart this fraction of the training data,\n will not train on it, and will evaluate\n the loss and any model metrics\n on this data at the end of each epoch.\n The validation data is selected from the last samples\n in the `x` and `y` data provided, before shuffling. This argument is\n not supported when `x` is a dataset or a dataset iterator.\n validation_data: Data on which to evaluate\n the loss and any model metrics at the end of each epoch.\n The model will not be trained on this data.\n `validation_data` will override `validation_split`.\n `validation_data` could be:\n - tuple `(x_val, y_val)` of Numpy arrays or tensors\n - tuple `(x_val, y_val, val_sample_weights)` of Numpy arrays\n - dataset or a dataset iterator\n shuffle: Boolean (whether to shuffle the training data\n before each epoch) or str (for 'batch').\n 'batch' is a special option for dealing with the\n limitations of HDF5 data; it shuffles in batch-sized chunks.\n Has no effect when `steps_per_epoch` is not `None`.\n class_weight: Optional dictionary mapping class indices (integers)\n to a weight (float) value, used for weighting the loss function\n (during training only).\n This can be useful to tell the model to\n \"pay more attention\" to samples from\n an under-represented class.\n sample_weight: Optional Numpy array of weights for\n the training samples, used for weighting the loss function\n (during training only). You can either pass a flat (1D)\n Numpy array with the same length as the input samples\n (1:1 mapping between weights and samples),\n or in the case of temporal data,\n you can pass a 2D array with shape\n `(samples, sequence_length)`,\n to apply a different weight to every timestep of every sample.\n In this case you should make sure to specify\n `sample_weight_mode=\"temporal\"` in `compile()`. This argument is not\n supported when `x` is a dataset or a dataset iterator.\n initial_epoch: Integer.\n Epoch at which to start training\n (useful for resuming a previous training run).\n steps_per_epoch: Integer or `None`.\n Total number of steps (batches of samples)\n before declaring one epoch finished and starting the\n next epoch. When training with input tensors such as\n TensorFlow data tensors, the default `None` is equal to\n the number of samples in your dataset divided by\n the batch size, or 1 if that cannot be determined.\n validation_steps: Only relevant if `steps_per_epoch`\n is specified. Total number of steps (batches of samples)\n to validate before stopping.\n **kwargs: Used for backwards compatibility.\n\n Returns:\n A `History` object. Its `History.history` attribute is\n a record of training loss values and metrics values\n at successive epochs, as well as validation loss values\n and validation metrics values (if applicable).\n\n Raises:\n RuntimeError: If the model was never compiled.\n ValueError: In case of mismatch between the provided input data\n and what the model expects.\n \"\"\"\n # TODO(fchollet): this method may be creating reference cycles, which would\n # lead to accumulating garbage in memory when called in a loop. Investigate.\n\n # Backwards compatibility\n if batch_size is None and steps_per_epoch is None:\n batch_size = 32\n # Legacy support\n if 'nb_epoch' in kwargs:\n logging.warning(\n 'The `nb_epoch` argument in `fit` '\n 'has been renamed `epochs`.')\n epochs = kwargs.pop('nb_epoch')\n if kwargs:\n raise TypeError('Unrecognized keyword arguments: ' + str(kwargs))\n\n # Validate and standardize user data.\n if self._distribution_strategy:\n distributed_training_utils.validate_callbacks(callbacks)\n\n x, y, sample_weights = self._standardize_user_data(\n x,\n y,\n sample_weight=sample_weight,\n class_weight=class_weight,\n batch_size=batch_size,\n check_steps=True,\n steps_name='steps_per_epoch',\n steps=steps_per_epoch,\n validation_split=validation_split)\n\n # Prepare validation data.\n if validation_data:\n if (isinstance(validation_data, iterator_ops.Iterator) or\n isinstance(validation_data, iterator_ops.EagerIterator) or\n isinstance(validation_data, dataset_ops.Dataset)):\n val_x = validation_data\n val_y = None\n val_sample_weight = None\n elif len(validation_data) == 2:\n val_x, val_y = validation_data # pylint: disable=unpacking-non-sequence\n val_sample_weight = None\n elif len(validation_data) == 3:\n val_x, val_y, val_sample_weight = validation_data # pylint: disable=unpacking-non-sequence\n else:\n raise ValueError(\n 'When passing a `validation_data` argument, '\n 'it must contain either 2 items (x_val, y_val), '\n 'or 3 items (x_val, y_val, val_sample_weights), '\n 'or alternatively it could be a dataset or a '\n 'dataset or a dataset iterator. '\n 'However we received `validation_data=%s`' % validation_data)\n\n # Validate and standardize validation data.\n val_x, val_y, val_sample_weights = self._standardize_user_data(\n val_x,\n val_y,\n sample_weight=val_sample_weight,\n batch_size=batch_size,\n steps=validation_steps)\n\n elif validation_split and 0. < validation_split < 1.:\n if training_utils.has_symbolic_tensors(x):\n raise ValueError('If your data is in the form of symbolic tensors, '\n 'you cannot use `validation_split`.')\n if hasattr(x[0], 'shape'):\n split_at = int(x[0].shape[0] * (1. - validation_split))\n else:\n split_at = int(len(x[0]) * (1. - validation_split))\n x, val_x = (slice_arrays(x, 0, split_at), slice_arrays(x, split_at))\n y, val_y = (slice_arrays(y, 0, split_at), slice_arrays(y, split_at))\n sample_weights, val_sample_weights = (slice_arrays(\n sample_weights, 0, split_at), slice_arrays(sample_weights, split_at))\n elif validation_steps:\n val_x = []\n val_y = []\n val_sample_weights = []\n else:\n val_x = None\n val_y = None\n val_sample_weights = None\n\n if context.executing_eagerly():\n return training_eager.fit_loop(\n self,\n inputs=x,\n targets=y,\n sample_weights=sample_weights,\n class_weight=class_weight,\n batch_size=batch_size,\n epochs=epochs,\n verbose=verbose,\n callbacks=callbacks,\n val_inputs=val_x,\n val_targets=val_y,\n val_sample_weights=val_sample_weights,\n shuffle=shuffle,\n initial_epoch=initial_epoch,\n steps_per_epoch=steps_per_epoch,\n validation_steps=validation_steps)\n elif self._distribution_strategy:\n return training_distributed.fit_loop(\n self, x, y,\n epochs=epochs,\n verbose=verbose,\n callbacks=callbacks,\n val_inputs=val_x,\n val_targets=val_y,\n initial_epoch=initial_epoch,\n steps_per_epoch=steps_per_epoch,\n validation_steps=validation_steps)\n else:\n return training_arrays.fit_loop(\n self, x, y,\n sample_weights=sample_weights,\n batch_size=batch_size,\n epochs=epochs,\n verbose=verbose,\n callbacks=callbacks,\n val_inputs=val_x,\n val_targets=val_y,\n val_sample_weights=val_sample_weights,\n shuffle=shuffle,\n initial_epoch=initial_epoch,\n steps_per_epoch=steps_per_epoch,\n validation_steps=validation_steps)\n\n def evaluate(self,\n x=None,\n y=None,\n batch_size=None,\n verbose=1,\n sample_weight=None,\n steps=None):\n \"\"\"Returns the loss value & metrics values for the model in test mode.\n\n Computation is done in batches.\n\n Arguments:\n x: Input data. It could be:\n - A Numpy array (or array-like), or a list of arrays\n (in case the model has multiple inputs).\n - A TensorFlow tensor, or a list of tensors\n (in case the model has multiple inputs).\n - A dict mapping input names to the corresponding array/tensors,\n if the model has named inputs.\n - A `tf.data` dataset or a dataset iterator.\n y: Target data. Like the input data `x`,\n it could be either Numpy array(s) or TensorFlow tensor(s).\n It should be consistent with `x` (you cannot have Numpy inputs and\n tensor targets, or inversely).\n If `x` is a dataset or a dataset iterator, `y` should not be specified\n (since targets will be obtained from the iterator/dataset).\n batch_size: Integer or `None`.\n Number of samples per gradient update.\n If unspecified, `batch_size` will default to 32.\n Do not specify the `batch_size` is your data is in the\n form of symbolic tensors, datasets, or dataset iterators\n (since they generate batches).\n verbose: 0 or 1. Verbosity mode.\n 0 = silent, 1 = progress bar.\n sample_weight: Optional Numpy array of weights for\n the test samples, used for weighting the loss function.\n You can either pass a flat (1D)\n Numpy array with the same length as the input samples\n (1:1 mapping between weights and samples),\n or in the case of temporal data,\n you can pass a 2D array with shape\n `(samples, sequence_length)`,\n to apply a different weight to every timestep of every sample.\n In this case you should make sure to specify\n `sample_weight_mode=\"temporal\"` in `compile()`. This argument is not\n supported when `x` is a dataset or a dataset iterator.\n steps: Integer or `None`.\n Total number of steps (batches of samples)\n before declaring the evaluation round finished.\n Ignored with the default value of `None`.\n\n Returns:\n Scalar test loss (if the model has a single output and no metrics)\n or list of scalars (if the model has multiple outputs\n and/or metrics). The attribute `model.metrics_names` will give you\n the display labels for the scalar outputs.\n\n Raises:\n ValueError: in case of invalid arguments.\n \"\"\"\n # Backwards compatibility.\n if batch_size is None and steps is None:\n batch_size = 32\n\n # Validate and standardize user data.\n x, y, sample_weights = self._standardize_user_data(\n x,\n y,\n sample_weight=sample_weight,\n batch_size=batch_size,\n check_steps=True,\n steps_name='steps',\n steps=steps)\n\n if context.executing_eagerly():\n return training_eager.test_loop(\n self,\n inputs=x,\n targets=y,\n sample_weights=sample_weights,\n batch_size=batch_size,\n verbose=verbose,\n steps=steps)\n elif self._distribution_strategy:\n return training_distributed.test_loop(\n self,\n inputs=x,\n targets=y,\n verbose=verbose,\n steps=steps)\n else:\n return training_arrays.test_loop(\n self,\n inputs=x,\n targets=y,\n sample_weights=sample_weights,\n batch_size=batch_size,\n verbose=verbose,\n steps=steps)\n\n def predict(self, x, batch_size=None, verbose=0, steps=None):\n \"\"\"Generates output predictions for the input samples.\n\n Computation is done in batches.\n\n Arguments:\n x: Input samples. It could be:\n - A Numpy array (or array-like), or a list of arrays\n (in case the model has multiple inputs).\n - A TensorFlow tensor, or a list of tensors\n (in case the model has multiple inputs).\n - A `tf.data` dataset or a dataset iterator.\n batch_size: Integer or `None`.\n Number of samples per gradient update.\n If unspecified, `batch_size` will default to 32.\n Do not specify the `batch_size` is your data is in the\n form of symbolic tensors, dataset, or dataset iterators\n (since they generate batches).\n verbose: Verbosity mode, 0 or 1.\n steps: Total number of steps (batches of samples)\n before declaring the prediction round finished.\n Ignored with the default value of `None`.\n\n Returns:\n Numpy array(s) of predictions.\n\n Raises:\n ValueError: In case of mismatch between the provided\n input data and the model's expectations,\n or in case a stateful model receives a number of samples\n that is not a multiple of the batch size.\n \"\"\"\n # Backwards compatibility.\n if batch_size is None and steps is None:\n batch_size = 32\n\n # Validate and standardize user data.\n x, _, _ = self._standardize_user_data(\n x, check_steps=True, steps_name='steps', steps=steps)\n\n if context.executing_eagerly():\n return training_eager.predict_loop(\n self, x, batch_size=batch_size, verbose=verbose, steps=steps)\n elif self._distribution_strategy:\n return training_distributed.predict_loop(\n self, x, verbose=verbose, steps=steps)\n else:\n return training_arrays.predict_loop(\n self, x, batch_size=batch_size, verbose=verbose, steps=steps)\n\n def train_on_batch(self, x, y=None, sample_weight=None, class_weight=None):\n \"\"\"Runs a single gradient update on a single batch of data.\n\n Arguments:\n x: Input data. It could be:\n - A Numpy array (or array-like), or a list of arrays\n (in case the model has multiple inputs).\n - A TensorFlow tensor, or a list of tensors\n (in case the model has multiple inputs).\n - A dict mapping input names to the corresponding array/tensors,\n if the model has named inputs.\n - A `tf.data` dataset or a dataset iterator.\n y: Target data. Like the input data `x`,\n it could be either Numpy array(s) or TensorFlow tensor(s).\n It should be consistent with `x` (you cannot have Numpy inputs and\n tensor targets, or inversely). If `x` is a dataset or a\n dataset iterator, `y` should not be specified\n (since targets will be obtained from the iterator).\n sample_weight: Optional array of the same length as x, containing\n weights to apply to the model's loss for each sample.\n In the case of temporal data, you can pass a 2D array\n with shape (samples, sequence_length),\n to apply a different weight to every timestep of every sample.\n In this case you should make sure to specify\n sample_weight_mode=\"temporal\" in compile(). This argument is not\n supported when `x` is a dataset or a dataset iterator.\n class_weight: Optional dictionary mapping\n class indices (integers) to\n a weight (float) to apply to the model's loss for the samples\n from this class during training.\n This can be useful to tell the model to \"pay more attention\" to\n samples from an under-represented class.\n\n Returns:\n Scalar training loss\n (if the model has a single output and no metrics)\n or list of scalars (if the model has multiple outputs\n and/or metrics). The attribute `model.metrics_names` will give you\n the display labels for the scalar outputs.\n\n Raises:\n ValueError: In case of invalid user-provided arguments.\n \"\"\"\n if self._distribution_strategy:\n raise NotImplementedError('`train_on_batch` is not supported for models '\n 'compiled with DistributionStrategy.')\n # Validate and standardize user data.\n x, y, sample_weights = self._standardize_user_data(\n x, y, sample_weight=sample_weight, class_weight=class_weight)\n\n if context.executing_eagerly():\n outputs = training_eager.train_on_batch(\n self, x, y, sample_weights=sample_weights)\n else:\n if self.uses_learning_phase and not isinstance(K.learning_phase(), int):\n ins = x + y + sample_weights + [1]\n else:\n ins = x + y + sample_weights\n\n self._make_train_function()\n outputs = self.train_function(ins)\n\n if len(outputs) == 1:\n return outputs[0]\n return outputs\n\n def test_on_batch(self, x, y=None, sample_weight=None):\n \"\"\"Test the model on a single batch of samples.\n\n Arguments:\n x: Input data. It could be:\n - A Numpy array (or array-like), or a list of arrays\n (in case the model has multiple inputs).\n - A TensorFlow tensor, or a list of tensors\n (in case the model has multiple inputs).\n - A dict mapping input names to the corresponding array/tensors,\n if the model has named inputs.\n - A `tf.data` dataset or a dataset iterator.\n y: Target data. Like the input data `x`,\n it could be either Numpy array(s) or TensorFlow tensor(s).\n It should be consistent with `x` (you cannot have Numpy inputs and\n tensor targets, or inversely). If `x` is a dataset or a\n dataset iterator, `y` should not be specified\n (since targets will be obtained from the iterator).\n sample_weight: Optional array of the same length as x, containing\n weights to apply to the model's loss for each sample.\n In the case of temporal data, you can pass a 2D array\n with shape (samples, sequence_length),\n to apply a different weight to every timestep of every sample.\n In this case you should make sure to specify\n sample_weight_mode=\"temporal\" in compile(). This argument is not\n supported when `x` is a dataset or a dataset iterator.\n\n Returns:\n Scalar test loss (if the model has a single output and no metrics)\n or list of scalars (if the model has multiple outputs\n and/or metrics). The attribute `model.metrics_names` will give you\n the display labels for the scalar outputs.\n\n Raises:\n ValueError: In case of invalid user-provided arguments.\n \"\"\"\n if self._distribution_strategy:\n raise NotImplementedError('`test_on_batch` is not supported for models '\n 'compiled with DistributionStrategy.')\n # Validate and standardize user data.\n x, y, sample_weights = self._standardize_user_data(\n x, y, sample_weight=sample_weight)\n\n if context.executing_eagerly():\n outputs = training_eager.test_on_batch(\n self, x, y, sample_weights=sample_weights)\n else:\n if self.uses_learning_phase and not isinstance(K.learning_phase(), int):\n ins = x + y + sample_weights + [0]\n else:\n ins = x + y + sample_weights\n self._make_test_function()\n outputs = self.test_function(ins)\n\n if len(outputs) == 1:\n return outputs[0]\n return outputs\n\n def predict_on_batch(self, x):\n \"\"\"Returns predictions for a single batch of samples.\n\n Arguments:\n x: Input data. It could be:\n - A Numpy array (or array-like), or a list of arrays\n (in case the model has multiple inputs).\n - A TensorFlow tensor, or a list of tensors\n (in case the model has multiple inputs).\n - A `tf.data` dataset or a dataset iterator.\n\n Returns:\n Numpy array(s) of predictions.\n\n Raises:\n ValueError: In case of mismatch between given number of inputs and\n expectations of the model.\n \"\"\"\n if self._distribution_strategy:\n raise NotImplementedError('`predict_on_batch` is not supported for '\n 'models compiled with DistributionStrategy.')\n # Validate and standardize user data.\n inputs, _, _ = self._standardize_user_data(x)\n if context.executing_eagerly():\n if (isinstance(x, iterator_ops.EagerIterator) or\n (isinstance(x, dataset_ops.Dataset) and context.executing_eagerly())):\n inputs = training_utils.cast_if_floating_dtype(inputs)\n else:\n inputs = [\n ops.convert_to_tensor(val, dtype=K.floatx()) for val in inputs\n ]\n return self(inputs) # pylint: disable=not-callable\n\n if not context.executing_eagerly():\n if self.uses_learning_phase and not isinstance(K.learning_phase(), int):\n ins = inputs + [0]\n else:\n ins = inputs\n\n self._make_predict_function()\n outputs = self.predict_function(ins)\n\n if len(outputs) == 1:\n return outputs[0]\n return outputs\n\n def fit_generator(self,\n generator,\n steps_per_epoch=None,\n epochs=1,\n verbose=1,\n callbacks=None,\n validation_data=None,\n validation_steps=None,\n class_weight=None,\n max_queue_size=10,\n workers=1,\n use_multiprocessing=False,\n shuffle=True,\n initial_epoch=0):\n \"\"\"Fits the model on data yielded batch-by-batch by a Python generator.\n\n The generator is run in parallel to the model, for efficiency.\n For instance, this allows you to do real-time data augmentation\n on images on CPU in parallel to training your model on GPU.\n\n The use of `keras.utils.Sequence` guarantees the ordering\n and guarantees the single use of every input per epoch when\n using `use_multiprocessing=True`.\n\n Arguments:\n generator: A generator or an instance of `Sequence`\n (`keras.utils.Sequence`)\n object in order to avoid duplicate data\n when using multiprocessing.\n The output of the generator must be either\n - a tuple `(inputs, targets)`\n - a tuple `(inputs, targets, sample_weights)`.\n This tuple (a single output of the generator) makes a single batch.\n Therefore, all arrays in this tuple must have the same length (equal\n to the size of this batch). Different batches may have different\n sizes.\n For example, the last batch of the epoch is commonly smaller than\n the\n others, if the size of the dataset is not divisible by the batch\n size.\n The generator is expected to loop over its data\n indefinitely. An epoch finishes when `steps_per_epoch`\n batches have been seen by the model.\n steps_per_epoch: Total number of steps (batches of samples)\n to yield from `generator` before declaring one epoch\n finished and starting the next epoch. It should typically\n be equal to the number of samples of your dataset\n divided by the batch size.\n Optional for `Sequence`: if unspecified, will use\n the `len(generator)` as a number of steps.\n epochs: Integer, total number of iterations on the data.\n verbose: Verbosity mode, 0, 1, or 2.\n callbacks: List of callbacks to be called during training.\n validation_data: This can be either\n - a generator for the validation data\n - a tuple (inputs, targets)\n - a tuple (inputs, targets, sample_weights).\n validation_steps: Only relevant if `validation_data`\n is a generator. Total number of steps (batches of samples)\n to yield from `generator` before stopping.\n Optional for `Sequence`: if unspecified, will use\n the `len(validation_data)` as a number of steps.\n class_weight: Dictionary mapping class indices to a weight\n for the class.\n max_queue_size: Integer. Maximum size for the generator queue.\n If unspecified, `max_queue_size` will default to 10.\n workers: Integer. Maximum number of processes to spin up\n when using process-based threading.\n If unspecified, `workers` will default to 1. If 0, will\n execute the generator on the main thread.\n use_multiprocessing: Boolean.\n If `True`, use process-based threading.\n If unspecified, `use_multiprocessing` will default to `False`.\n Note that because this implementation relies on multiprocessing,\n you should not pass non-picklable arguments to the generator\n as they can't be passed easily to children processes.\n shuffle: Boolean. Whether to shuffle the order of the batches at\n the beginning of each epoch. Only used with instances\n of `Sequence` (`keras.utils.Sequence`).\n Has no effect when `steps_per_epoch` is not `None`.\n initial_epoch: Epoch at which to start training\n (useful for resuming a previous training run)\n\n Returns:\n A `History` object.\n\n Example:\n\n ```python\n def generate_arrays_from_file(path):\n while 1:\n f = open(path)\n for line in f:\n # create numpy arrays of input data\n # and labels, from each line in the file\n x1, x2, y = process_line(line)\n yield ({'input_1': x1, 'input_2': x2}, {'output': y})\n f.close()\n\n model.fit_generator(generate_arrays_from_file('/my_file.txt'),\n steps_per_epoch=10000, epochs=10)\n ```\n Raises:\n ValueError: In case the generator yields data in an invalid format.\n \"\"\"\n if self._distribution_strategy:\n raise NotImplementedError('`fit_generator` is not supported for '\n 'models compiled with DistributionStrategy.')\n\n if not self.built and not self._is_graph_network:\n raise NotImplementedError(\n '`fit_generator` is not yet enabled for unbuilt Model subclasses')\n\n return training_generator.fit_generator(\n self,\n generator,\n steps_per_epoch=steps_per_epoch,\n epochs=epochs,\n verbose=verbose,\n callbacks=callbacks,\n validation_data=validation_data,\n validation_steps=validation_steps,\n class_weight=class_weight,\n max_queue_size=max_queue_size,\n workers=workers,\n use_multiprocessing=use_multiprocessing,\n shuffle=shuffle,\n initial_epoch=initial_epoch)\n\n def evaluate_generator(self,\n generator,\n steps=None,\n max_queue_size=10,\n workers=1,\n use_multiprocessing=False,\n verbose=0):\n \"\"\"Evaluates the model on a data generator.\n\n The generator should return the same kind of data\n as accepted by `test_on_batch`.\n\n Arguments:\n generator: Generator yielding tuples (inputs, targets)\n or (inputs, targets, sample_weights)\n or an instance of Sequence (keras.utils.Sequence)\n object in order to avoid duplicate data\n when using multiprocessing.\n steps: Total number of steps (batches of samples)\n to yield from `generator` before stopping.\n Optional for `Sequence`: if unspecified, will use\n the `len(generator)` as a number of steps.\n max_queue_size: maximum size for the generator queue\n workers: Integer. Maximum number of processes to spin up\n when using process-based threading.\n If unspecified, `workers` will default to 1. If 0, will\n execute the generator on the main thread.\n use_multiprocessing: Boolean.\n If `True`, use process-based threading.\n If unspecified, `use_multiprocessing` will default to `False`.\n Note that because this implementation relies on multiprocessing,\n you should not pass non-picklable arguments to the generator\n as they can't be passed easily to children processes.\n verbose: Verbosity mode, 0 or 1.\n\n Returns:\n Scalar test loss (if the model has a single output and no metrics)\n or list of scalars (if the model has multiple outputs\n and/or metrics). The attribute `model.metrics_names` will give you\n the display labels for the scalar outputs.\n\n Raises:\n ValueError: in case of invalid arguments.\n\n Raises:\n ValueError: In case the generator yields data in an invalid format.\n \"\"\"\n if self._distribution_strategy:\n raise NotImplementedError('`evaluate_generator` is not supported for '\n 'models compiled with DistributionStrategy.')\n\n if not self.built and not self._is_graph_network:\n raise NotImplementedError(\n '`evaluate_generator` is not yet enabled for '\n 'unbuilt Model subclasses')\n\n return training_generator.evaluate_generator(\n self,\n generator,\n steps=steps,\n max_queue_size=max_queue_size,\n workers=workers,\n use_multiprocessing=use_multiprocessing,\n verbose=verbose)\n\n def predict_generator(self,\n generator,\n steps=None,\n max_queue_size=10,\n workers=1,\n use_multiprocessing=False,\n verbose=0):\n \"\"\"Generates predictions for the input samples from a data generator.\n\n The generator should return the same kind of data as accepted by\n `predict_on_batch`.\n\n Arguments:\n generator: Generator yielding batches of input samples\n or an instance of Sequence (keras.utils.Sequence)\n object in order to avoid duplicate data\n when using multiprocessing.\n steps: Total number of steps (batches of samples)\n to yield from `generator` before stopping.\n Optional for `Sequence`: if unspecified, will use\n the `len(generator)` as a number of steps.\n max_queue_size: Maximum size for the generator queue.\n workers: Integer. Maximum number of processes to spin up\n when using process-based threading.\n If unspecified, `workers` will default to 1. If 0, will\n execute the generator on the main thread.\n use_multiprocessing: Boolean.\n If `True`, use process-based threading.\n If unspecified, `use_multiprocessing` will default to `False`.\n Note that because this implementation relies on multiprocessing,\n you should not pass non-picklable arguments to the generator\n as they can't be passed easily to children processes.\n verbose: verbosity mode, 0 or 1.\n\n Returns:\n Numpy array(s) of predictions.\n\n Raises:\n ValueError: In case the generator yields data in an invalid format.\n \"\"\"\n if self._distribution_strategy:\n raise NotImplementedError('`predict_generator` is not supported for '\n 'models compiled with DistributionStrategy.')\n\n if not self.built and not self._is_graph_network:\n raise NotImplementedError(\n '`predict_generator` is not yet enabled for unbuilt Model subclasses')\n\n return training_generator.predict_generator(\n self,\n generator,\n steps=steps,\n max_queue_size=max_queue_size,\n workers=workers,\n use_multiprocessing=use_multiprocessing,\n verbose=verbose)\n\n def _get_callback_model(self):\n \"\"\"Returns the Callback Model for this Model.\"\"\"\n\n if hasattr(self, '_replicated_model') and self._replicated_model:\n # When using training_distributed, we set the callback model\n # to an instance of the `DistributedModel` that we create in\n # the `compile` call. The `DistributedModel` is initialized\n # with the first replicated model. We need to set the callback\n # model to a DistributedModel to allow us to override saving\n # and loading weights when we checkpoint the model during training.\n return self._replicated_model\n if hasattr(self, 'callback_model') and self.callback_model:\n return self.callback_model\n return self\n\n\nclass DistributedCallbackModel(Model):\n \"\"\"Model that is used for callbacks with DistributionStrategy.\"\"\"\n\n def __init__(self, model):\n super(DistributedCallbackModel, self).__init__()\n # TODO(anjalisridhar): Right now the only attributes set are the layer and\n # weights. We may need to set additional attributes as needed since we have\n # not called compile on this model.\n\n def set_original_model(self, orig_model):\n self._original_model = orig_model\n\n def save_weights(self, filepath, overwrite=True, save_format=None):\n self._replicated_model.save_weights(filepath, overwrite=overwrite,\n save_format=save_format)\n\n def save(self, filepath, overwrite=True, include_optimizer=True):\n # save weights from the distributed model to the original model\n distributed_model_weights = self.get_weights()\n self._original_model.set_weights(distributed_model_weights)\n # TODO(anjalisridhar): Do we need to save the original model here?\n # Saving the first replicated model works as well.\n self._original_model.save(filepath, overwrite=True, include_optimizer=False)\n\n def load_weights(self, filepath, by_name=False):\n self._original_model.load_weights(filepath, by_name=False)\n # Copy the weights from the original model to each of the replicated models.\n orig_model_weights = self._original_model.get_weights()\n distributed_training_utils.set_weights(\n self._original_model._distribution_strategy, self, # pylint: disable=protected-access\n orig_model_weights)\n\n def __getattr__(self, item):\n # Whitelisted atttributes of the model that can be accessed by the user\n # during a callback.\n if item not in ['_setattr_tracking']:\n logging.warning('You are accessing attribute ' + item + 'of the'\n 'DistributedCallbackModel that may not have been set'\n 'correctly.')\n"
] | [
[
"tensorflow.python.keras.engine.training_arrays.test_loop",
"tensorflow.python.keras.losses.get",
"tensorflow.python.keras.engine.training_utils.get_metric_function",
"tensorflow.python.keras.engine.training_utils.collect_metrics",
"tensorflow.python.keras.engine.training_utils.standardize_class_weights",
"tensorflow.python.keras.engine.training_eager.train_on_batch",
"tensorflow.python.keras.engine.training_eager.fit_loop",
"numpy.asarray",
"tensorflow.python.keras.engine.training_utils.check_loss_and_target_compatibility",
"tensorflow.python.keras.backend.image_data_format",
"tensorflow.python.keras.engine.training_arrays.predict_loop",
"tensorflow.python.keras.engine.training_utils.has_symbolic_tensors",
"tensorflow.python.keras.engine.training_utils.cast_if_floating_dtype",
"tensorflow.python.keras.engine.training_distributed.fit_loop",
"tensorflow.python.keras.engine.training_eager.test_loop",
"tensorflow.python.keras.backend.learning_phase",
"tensorflow.python.keras.backend.function",
"tensorflow.python.util.tf_export.tf_export",
"tensorflow.python.keras.engine.base_layer.DeferredTensor",
"tensorflow.python.keras.backend.placeholder",
"tensorflow.python.keras.engine.training_utils.weighted_masked_objective",
"tensorflow.python.keras.engine.distributed_training_utils.set_weights",
"tensorflow.python.keras.optimizers.get",
"tensorflow.python.keras.engine.distributed_training_utils.validate_distributed_dataset_inputs",
"tensorflow.python.keras.engine.training_arrays.fit_loop",
"numpy.expand_dims",
"tensorflow.python.keras.backend.int_shape",
"tensorflow.python.keras.engine.training_utils.prepare_sample_weights",
"tensorflow.python.keras.engine.training_utils.standardize_sample_weights",
"tensorflow.python.platform.tf_logging.warning",
"tensorflow.python.keras.engine.training_distributed.predict_loop",
"tensorflow.python.keras.backend.name_scope",
"tensorflow.python.keras.engine.training_eager.test_on_batch",
"tensorflow.python.keras.engine.training_utils.validate_iterator_input",
"tensorflow.python.keras.backend.dtype",
"tensorflow.python.keras.backend.is_placeholder",
"tensorflow.python.keras.utils.generic_utils.slice_arrays",
"tensorflow.python.ops.math_ops.cast",
"tensorflow.python.keras.engine.training_eager.predict_loop",
"tensorflow.python.keras.engine.training_generator.evaluate_generator",
"tensorflow.python.keras.backend.floatx",
"tensorflow.python.keras.engine.training_generator.fit_generator",
"tensorflow.python.keras.metrics.get",
"tensorflow.python.keras.engine.training_generator.predict_generator",
"tensorflow.python.keras.engine.training_utils.standardize_weights",
"tensorflow.python.keras.engine.training_distributed.test_loop",
"tensorflow.python.keras.engine.distributed_training_utils.validate_callbacks",
"tensorflow.python.keras.metrics.squeeze_or_expand_dimensions",
"tensorflow.python.keras.engine.training_utils.check_steps_argument",
"tensorflow.python.ops.weights_broadcast_ops.broadcast_weights",
"tensorflow.python.keras.backend.get_session",
"tensorflow.python.eager.context.executing_eagerly",
"tensorflow.python.keras.engine.training_distributed.clone_and_build_model",
"tensorflow.python.keras.backend.is_sparse",
"tensorflow.python.keras.engine.training_utils.standardize_input_data",
"tensorflow.python.keras.engine.training_utils.check_array_lengths",
"tensorflow.python.framework.tensor_util.is_tensor"
]
] |
jkrogager/PyNOT | [
"2514a443079e50c12a13ebbd89a48f91a8d20626"
] | [
"pynot/phot.py"
] | [
"\"\"\"\nFunctions for Imaging Pipeline\n\"\"\"\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib.patches import Ellipse\nfrom astropy.io import fits\nfrom astropy.modeling import models, fitting\nfrom astropy.table import Table\nfrom scipy.optimize import curve_fit\nimport os\n\nfrom astropy.coordinates import SkyCoord\nimport astropy.units as u\nfrom astroquery.sdss import SDSS\n\nimport astroalign as aa\nimport sep\n\nfrom pynot import alfosc\nfrom pynot.functions import get_version_number, mad\nfrom pynot.data.organizer import get_filter\n\n__version__ = get_version_number()\n\n\ndef source_detection(fname, zeropoint=0., threshold=5.0, aperture=10.0, kwargs_bg={}, kwargs_ext={}):\n \"\"\"\n Run source detection in the input image using the python package SEP, based on the SExtractor algorithm.\n\n Parameters\n ----------\n fname : str\n Filename of the FITS image to be analyzed. The image must have at least two extensions:\n the first should be the image in counts, and one should be named ERR holding the associated error image\n\n zeropoint : float [default=0.]\n Magnitude zero-point for the given photometric filter used for the observations.\n By defualt instrument magnitudes will be returned if no zero-point is given.\n\n threshold : float [default=5.0]\n Detection threshold in 'sigmas'.\n\n aperture : float [default=10.]\n Circular aperture radius in pixels.\n\n kwargs_bg : dict\n Parameters to pass to background subtraction (sep.Background()).\n See defition in `default_options_img.yml`\n\n kwargs_ext : dict\n Parameters to pass to source extraction (sep.extract()).\n See defition in `default_options_img.yml`\n\n Returns\n -------\n table_fname : str\n The autogenerated filename of the source catalog. The format is: file-base of the input filename + '_phot.fits'.\n Ex.: fname='alfosc_rband.fits' -> table_fname='alfosc_rband_phot.fits'\n\n segmap_fname : str\n The autogenerated filename of the segmentation map. This image holds the regions associated to each source\n in the source catalog. The format is: file-base of the input filename + '_sep.fits'\n\n output_msg : str\n Log of messages from the function call.\n \"\"\"\n msg = list()\n # get GAIN from header\n data = fits.getdata(fname)\n error_image = fits.getdata(fname, 'ERR')\n hdr = fits.getheader(fname)\n msg.append(\" - Loaded input image: %s\" % fname)\n\n if 'EXPTIME' in hdr:\n exptime = hdr['EXPTIME']\n msg.append(\" - Loaded exposure time from image header: %.1f\" % exptime)\n else:\n exptime = 1.\n msg.append(\"[WARNING] - No exposure time found in image header! Assuming image in counts.\")\n\n data = data * 1.\n error_image = error_image * 1.\n if 'threshold' in kwargs_ext:\n threshold = kwargs_ext.pop('threshold')\n if 'aperture' in kwargs_ext:\n aperture = kwargs_ext.pop('aperture')\n\n bkg = sep.Background(data, **kwargs_bg)\n data_sub = data - bkg\n msg.append(\" - Subtracted sky background\")\n msg.append(\" - Background RMS: %.2e\" % bkg.globalrms)\n data_sub = data_sub.byteswap().newbyteorder()\n error_image = error_image.byteswap().newbyteorder()\n if data_sub.dtype.byteorder != '<':\n data_sub = data_sub.byteswap().newbyteorder()\n error_image = error_image.byteswap().newbyteorder()\n extract_output = sep.extract(data_sub, threshold, err=bkg.globalrms, **kwargs_ext)\n if len(extract_output) == 2:\n objects, segmap = extract_output\n else:\n objects = extract_output\n segmap = None\n N_obj = len(objects)\n msg.append(\" - Detected %i objects\" % N_obj)\n\n # Calculate fixed aperture magnitudes:\n aper_results = sep.sum_circle(data_sub, objects['x'], objects['y'], aperture, err=error_image)\n aper_flux, aper_fluxerr, aper_flag = aper_results\n msg.append(\" - Calculating fluxes within circular aperture of: %i pixels\" % aperture)\n\n # Calculate Kron radius:\n x = objects['x']\n y = objects['y']\n a = objects['a']\n b = objects['b']\n theta = objects['theta']\n kronrad, krflag = sep.kron_radius(data_sub, x, y, a, b, theta, 6.0)\n kronrad[kronrad < 1.] = 1.\n # Sum fluxes in ellipse apertures:\n flux, fluxerr, flag = sep.sum_ellipse(data_sub, x, y, a, b, theta, 2.5*kronrad, subpix=1)\n msg.append(\" - Calculating Kron radii and fluxes within elliptical apertures\")\n # combine flags:\n flag |= krflag\n\n # If the Kron radius is less than r_min (aperture), use aperture fluxes:\n r_min = aperture\n use_circle = kronrad * np.sqrt(b * a) < r_min\n flux[use_circle] = aper_flux[use_circle]\n fluxerr[use_circle] = aper_fluxerr[use_circle]\n flag[use_circle] = aper_flag[use_circle]\n msg.append(\" - Targets with Kron radii below R_min (%.2f) are ignored\" % r_min)\n msg.append(\" - Circular aperture fluxes used instead where R_kron < R_min\")\n if np.sum(use_circle) == 1:\n msg.append(\" - %i source identified with R_kron < R_min\" % np.sum(use_circle))\n else:\n msg.append(\" - %i sources identified with R_kron < R_min\" % np.sum(use_circle))\n\n # Save output table:\n base, ext = os.path.splitext(fname)\n table_fname = base + '_phot.fits'\n object_table = Table(objects)\n object_table['flux_auto'] = flux\n object_table['flux_err_auto'] = fluxerr\n object_table['flux_aper'] = aper_flux\n object_table['flux_err_aper'] = aper_fluxerr\n object_table['R_kron'] = kronrad\n flux[flux <= 0] = 1.\n object_table['mag_auto'] = zeropoint - 2.5*np.log10(flux)\n object_table.write(table_fname, format='fits', overwrite=True)\n msg.append(\" [OUTPUT] - Saved extraction table: %s\" % table_fname)\n\n # Save segmentation map:\n if segmap is not None:\n segmap_fname = base + '_seg.fits'\n seg_hdr = fits.Header()\n seg_hdr['AUTHOR'] = 'PyNOT version %s' % __version__\n seg_hdr['IMAGE'] = fname\n seg_hdr['FILTER'] = get_filter(hdr)\n seg_hdr.add_comment(\"Segmentation map from SEP (SExtractor)\")\n fits.writeto(segmap_fname, segmap, header=seg_hdr, overwrite=True)\n msg.append(\" [OUTPUT] - Saved source segmentation map: %s\" % segmap_fname)\n else:\n segmap_fname = ''\n\n # Plot source identifications:\n fig_fname = base + '_sources.pdf'\n plot_objects(fig_fname, data_sub, objects, threshold=threshold)\n msg.append(\" [OUTPUT] - Saved source identification overview: %s\" % fig_fname)\n msg.append(\"\")\n output_msg = \"\\n\".join(msg)\n\n return table_fname, segmap_fname, output_msg\n\n\ndef plot_objects(fig_fname, data, objects, threshold=5.):\n \"\"\"\n Create a plot of the image and the detected sources from SEP.\n\n Parameters\n ----------\n fig_fname : str\n Filename of the resulting figure\n\n data : np.array, shape (N, M)\n Numpy array of the image data, must be a 2D array.\n\n objects : astropy.table.Table or List[dict]\n List of dictionaries or astropy table holding the object information:\n x, y : x, y positions\n a, b : aperture minor and major axes in pixels\n theta : aperture orientation in radians\n\n threshold : float [default=5.]\n Constract threshold for the image. The color-scale is normalized based on the image\n statistics (median and MAD). The min and max values are -1*MAD and +`threshold`*MAD\n around the median value of the image counts, where MAD is the median absolute deviation.\n\n Returns\n -------\n None\n \"\"\"\n # plot background-subtracted image\n fig, ax = plt.subplots()\n m, s = np.median(data), 1.5*mad(data)\n ax.imshow(data, interpolation='nearest', cmap='gray_r',\n vmin=m-1*s, vmax=m+threshold*s, origin='lower')\n\n # plot an ellipse for each object\n for item in objects:\n e = Ellipse(xy=(item['x'], item['y']),\n width=10*item['a'],\n height=10*item['b'],\n angle=item['theta'] * 180. / np.pi)\n e.set_facecolor('none')\n e.set_edgecolor('red')\n e.set_linewidth(0.8)\n ax.add_artist(e)\n fig.tight_layout()\n fig.savefig(fig_fname)\n\n\ndef load_fits_image(fname):\n \"\"\"Load a FITS image with an associated error extension and an optional data quality MASK.\"\"\"\n with fits.open(fname) as hdu_list:\n image = hdu_list[0].data\n hdr = hdu_list[0].header\n if 'ERR' in hdu_list:\n error = hdu_list['ERR'].data\n else:\n raise TypeError(\"No error image detected\")\n\n if 'MASK' in hdu_list:\n mask = hdu_list['MASK'].data\n else:\n mask = np.zeros_like(image, dtype=bool)\n return image, error, mask, hdr\n\n\ndef measure_seeing(img, centers, size=20, max_obj=10):\n \"\"\"\n Measure the average seeing in an image by fitting a 2D Gaussian to pre-defined point sources.\n\n Parameters\n ----------\n img : np.array, shape(N, M)\n Numpy array of the image to analyze.\n\n centers : list[number, number]\n List of positions of point sources (x, y) in pixels\n\n size : int [default=20]\n Image cutout size. The Gaussian PSF is fitted in a box of size 2*size by 2*size pixels.\n\n max_obj : int [default=10]\n Maximum number of sources to include in the fitting.\n\n Returns\n -------\n fwhm : float\n The average seeing FWHM in pixels.\n\n ratio : float\n The average axis ratio (ellipticity) of the Gaussian PSF.\n\n msg : str\n Output message of the function call.\n If no warnings occurred, this is an emptry string.\n \"\"\"\n X = np.arange(img.shape[1])\n Y = np.arange(img.shape[0])\n sigmas = list()\n ratios = list()\n good_x = (centers[:, 0] > size) & (centers[:, 0] < X.max()-size)\n good_y = (centers[:, 1] > size) & (centers[:, 1] < Y.max()-size)\n if np.sum(good_x & good_y) < 2:\n msg = \"[WARNING] - Not enough sources to measure seeing.\"\n return (-1, -1, msg)\n max_obj = min(max_obj, np.sum(good_x & good_y))\n idx = np.random.choice(np.arange(len(centers))[good_x & good_y], max_obj, replace=False)\n for x_cen, y_cen in centers[idx]:\n x1, x2 = int(x_cen)-size, int(x_cen)+size\n y1, y2 = int(y_cen)-size, int(y_cen)+size\n cutout = img[y1:y2, x1:x2]\n x, y = np.meshgrid(X[x1:x2], Y[y1:y2])\n A = img[int(y_cen), int(x_cen)]\n p_init = models.Gaussian2D(amplitude=A, x_mean=x_cen, y_mean=y_cen, x_stddev=5, y_stddev=5, theta=0)\n try:\n fitter = fitting.LevMarLSQFitter()\n except TypeError:\n continue\n p_opt = fitter(p_init, x, y, cutout-np.median(cutout))\n sigma_x = p_opt.x_stddev\n sigma_y = p_opt.y_stddev\n sig = np.sqrt(sigma_x**2 + sigma_y**2)\n ba = min(sigma_x, sigma_y) / max(sigma_x, sigma_y)\n sigmas.append(sig)\n ratios.append(ba)\n\n if len(sigmas) < 2:\n msg = \"[WARNING] - Not enough sources to measure seeing.\"\n return (-1, -1, msg)\n\n fwhm = np.median(sigmas) * 2.35\n ratio = np.median(ratios)\n msg = \"\"\n return (fwhm, ratio, msg)\n\n\ndef save_file_log(log_name, image_log, target_hdr):\n with open(log_name, 'w') as out:\n out.write(\"# PyNOT Combination Log of Target: %s\\n\" % target_hdr['OBJECT'])\n out.write(\"# Filter: %s\\n\" % get_filter(target_hdr))\n out.write(\"# Col 1: Filename\\n\")\n out.write(\"# Col 2: FWHM / pixels (seeing)\\n\")\n out.write(\"# Col 3: PSF axis ratio (minor/major)\\n\")\n out.write(\"# Col 4: Exp. Time / seconds\\n\")\n out.write(\"# \" + 40*\"-\" + \"\\n\")\n for line in image_log:\n out.write(\" %s %.1f %5.2f %6.1f\\n\" % tuple(line))\n\n\ndef image_combine(corrected_images, output='', log_name='', fringe_image='', method='weighted', max_control_points=50, detection_sigma=5, min_area=9):\n \"\"\"\n Register and combine a list of FITS images using affine transformation.\n\n Parameters\n ----------\n corrected_images : List[str]\n List of input filenames of `corrected` images, i.e., bias, flat corrected\n and trimmed for filter/aperture vignetting.\n\n output : str [default='']\n Output filename of the combined image. If not given, it is generated from the OBJECT keyword of the FITS header.\n\n log_name : str [default='']\n Filename of the combination log. This table holds the average seeing FWHM, PSF ellipticity, and exposure time\n for each image in the input list.\n\n fringe_image : str [default='']\n Filename of the fringe image (FITS format) from `pynot.create_fringe_image`.\n If given, this image will be subtracted from each input image before combination.\n\n method : str [default='weighted']\n Method for image combination: mean, median or weighted.\n By default an inverse-variance weighting is used.\n\n max_control_points : int [default=50]\n Maximum number of control point-sources to find the transformation.\n A lower number will converge faster but may result in a less robust image registration.\n\n detection_sigma : float [default=5.]\n Detection threshold for control points in units of standard deviations of the sky background.\n\n min_area : int [default=9]\n Minimum number of connected pixels to be considered a source\n\n Returns\n -------\n output_msg : str\n Log of messages from the function call.\n \"\"\"\n msg = list()\n if fringe_image != '':\n norm_sky = fits.getdata(fringe_image)\n msg.append(\" - Loaded normalized fringe image: %s\" % fringe_image)\n else:\n norm_sky = 1.\n target_fname = corrected_images[0]\n target, target_err, target_mask, target_hdr = load_fits_image(target_fname)\n target = target - norm_sky*np.median(target)\n exptime = target_hdr['EXPTIME']\n target /= exptime\n target_err /= exptime\n target_hdr['BUNIT'] = 'count / s'\n msg.append(\" - Aligning all images to reference: %s\" % target_fname)\n\n msg.append(\" - Registering input images:\")\n shifted_images = [target]\n shifted_vars = [target_err**2]\n target = target.byteswap().newbyteorder()\n if target.dtype.byteorder != '<':\n target = target.byteswap().newbyteorder()\n final_exptime = exptime\n image_log = list()\n if len(corrected_images) > 1:\n for fname in corrected_images[1:]:\n msg.append(\" - Input image: %s\" % fname)\n source, source_err, source_mask, hdr_i = load_fits_image(fname)\n source = source - norm_sky*np.median(source)\n source /= hdr_i['EXPTIME']\n source_err /= hdr_i['EXPTIME']\n final_exptime += hdr_i['EXPTIME']\n try:\n transf, (coords) = aa.find_transform(source, target,\n max_control_points=max_control_points,\n detection_sigma=detection_sigma,\n min_area=min_area)\n except:\n msg.append(\" [ERROR] - Failed to find image transformation!\")\n msg.append(\" - Skipping image\")\n continue\n\n source = source.byteswap().newbyteorder()\n source_err = source_err.byteswap().newbyteorder()\n source_mask = source_mask.byteswap().newbyteorder()\n if source.dtype.byteorder != '<':\n source = source.byteswap().newbyteorder()\n if source_err.dtype.byteorder != '<':\n source_err = source_err.byteswap().newbyteorder()\n if source_mask.dtype.byteorder != '<':\n source_mask = source_mask.byteswap().newbyteorder()\n\n registered_image, _ = aa.apply_transform(transf, source, target, fill_value=0)\n registered_error, _ = aa.apply_transform(transf, source_err, target, fill_value=0)\n registered_mask, _ = aa.apply_transform(transf, source_mask, target, fill_value=0)\n target_mask += 1 * (registered_mask > 0)\n registered_error[registered_error == 0] = np.mean(registered_error)*10\n shifted_images.append(registered_image)\n shifted_vars.append(registered_error**2)\n source_list, target_list = coords\n if len(image_log) == 0:\n fwhm, ratio, seeing_msg = measure_seeing(target, target_list)\n image_log.append([os.path.basename(target_fname), fwhm, ratio, exptime])\n if seeing_msg:\n msg.append(seeing_msg)\n fwhm, ratio, seeing_msg = measure_seeing(source, source_list)\n if seeing_msg:\n msg.append(seeing_msg)\n image_log.append([os.path.basename(fname), fwhm, ratio, hdr_i['EXPTIME']])\n\n if log_name == '':\n filter_name = alfosc.filter_translate[get_filter(target_hdr)]\n log_name = 'filelist_%s_%s.txt' % (target_hdr['OBJECT'], filter_name)\n save_file_log(log_name, image_log, target_hdr)\n msg.append(\" [OUTPUT] - Saved file log and image stats: %s\" % log_name)\n\n if method == 'median':\n final_image = np.nanmedian(shifted_images, axis=0)\n final_error = np.sqrt(np.nanmean(shifted_vars, axis=0))\n target_hdr['COMBINE'] = \"Median\"\n elif method == 'mean':\n final_image = np.nanmean(shifted_images, axis=0)\n final_error = np.sqrt(np.nanmean(shifted_vars, axis=0))\n target_hdr['COMBINE'] = \"Mean\"\n else:\n w = 1./np.array(shifted_vars)\n shifted_images = np.array(shifted_images)\n final_image = np.nansum(w*shifted_images, axis=0) / np.sum(w, axis=0)\n final_error = np.sqrt(1. / np.nansum(w, axis=0))\n target_hdr['COMBINE'] = \"Inverse Variance Weighted\"\n final_mask = 1 * (target_mask > 0)\n else:\n final_image = target\n final_error = target_err\n final_mask = target_mask\n target_hdr['COMBINE'] = \"None\"\n\n target_hdr['NCOMBINE'] = len(shifted_images)\n target_hdr['EXPTIME'] = final_exptime / len(shifted_images)\n # Fix NaN values from negative pixel values:\n err_NaN = np.isnan(final_error)\n final_error[err_NaN] = np.nanmean(final_error)*100\n msg.append(\" - Correcting NaNs in noise image: %i pixel(s)\" % np.sum(err_NaN))\n target_hdr['DATAMIN'] = np.nanmin(final_image)\n target_hdr['DATAMAX'] = np.nanmax(final_image)\n target_hdr['EXTNAME'] = 'DATA'\n target_hdr['AUTHOR'] = 'PyNOT version %s' % __version__\n\n mask_hdr = fits.Header()\n mask_hdr.add_comment(\"0 = Good Pixels\")\n mask_hdr.add_comment(\"1 = Cosmic Ray Hits\")\n\n if output == '':\n output = \"combined_%s.fits\" % target_hdr['OBJECT']\n\n sci_ext = fits.PrimaryHDU(final_image, header=target_hdr)\n err_ext = fits.ImageHDU(final_error, header=target_hdr, name='ERR')\n mask_ext = fits.ImageHDU(final_mask, header=mask_hdr, name='MASK')\n output_HDU = fits.HDUList([sci_ext, err_ext, mask_ext])\n output_HDU.writeto(output, overwrite=True)\n msg.append(\" - Successfully combined the images\")\n msg.append(\" [OUTPUT] - Saving output: %s\" % output)\n msg.append(\"\")\n output_msg = \"\\n\".join(msg)\n return output_msg\n\n\ndef plot_image2D(fname, image, vmin=-2, vmax=2):\n fig = plt.figure()\n ax = fig.add_subplot(111)\n med = np.median(image)\n s = mad(image)\n im = ax.imshow(image, origin='lower', vmin=med+vmin*s, vmax=med+vmax*s)\n fig.colorbar(im)\n fig.tight_layout()\n fig.savefig(fname)\n\n\ndef create_fringe_image(input_filenames, output='', fig_fname='', threshold=3.0):\n \"\"\"\n Create a normalized average fringe image for a list of images taken with the same filter.\n\n Parameters\n ----------\n input_filenames : str\n List of FITS filenames of images taken in the same photometric band.\n\n output : str [default='']\n Output filename of the fringe image.\n\n fig_fname : str [default='']\n Output filename of the diagnostic figure showing the normalized fringe image.\n\n threshold : float [default=3.]\n Threshold for source rejection in the image stacking in units of the standard deviation\n of the sky background (estimated via median absolute deviation).\n\n Returns\n -------\n output_msg : str\n Log of messages from the function call.\n \"\"\"\n msg = list()\n hdr = fits.getheader(input_filenames[0])\n img_list = [fits.getdata(fname) for fname in input_filenames]\n exptimes = [fits.getheader(fname)['EXPTIME'] for fname in input_filenames]\n msg.append(\" - Loaded input images\")\n mask = [np.fabs(im-np.median(im)) < threshold*mad(im) for im in img_list]\n msg.append(\" - Created image mask using threshold: %.2f\" % threshold)\n\n N = np.sum(mask, 0)\n skysum = np.sum([im*m/t for im, m, t in zip(img_list, mask, exptimes)], axis=0)\n skysum[N == 0] = np.median(skysum)\n N[N == 0] = 1\n sky = skysum / N\n norm_sky = sky / np.median(sky)\n msg.append(\" - Created normalized fringe image\")\n\n if fig_fname:\n plot_image2D(fig_fname, norm_sky, vmin=-2, vmax=2)\n msg.append(\" [OUTPUT] - Saving figure: %s\" % fig_fname)\n\n if output == '':\n output = \"fringe_%s.fits\" % hdr['OBJECT']\n hdr['OBJECT'] = 'Fringe Image'\n hdr['EXTNAME'] = 'MODEL'\n hdr.add_comment('Average Fringe image, median normalized')\n fits.writeto(output, norm_sky, header=hdr, overwrite=True)\n msg.append(\" [OUTPUT] - Saving output: %s\" % output)\n msg.append(\"\")\n output_msg = \"\\n\".join(msg)\n return output_msg\n\n\n\ndef match_phot_catalogs(sep, phot, match_radius=1.):\n \"\"\"\n Match a source catalog from SEP to a photometric catalog `phot`.\n Both catalogs must include columns 'ra' and 'dec'.\n\n Parameters\n ----------\n match_radius : float [default=1.0]\n Matching radius in arcseconds\n\n Returns\n -------\n matched_sep : astropy.table.Table\n An astropy table of sources in the SEP source catalog that have matches\n in the reference `phot` catalog.\n\n matched_phot : astropy.table.Table\n An astropy table of sources in the reference `phot` catalog that have matches\n in the SEP source catalog.\n \"\"\"\n matched_sep = list()\n matched_phot = list()\n refs = np.array([phot['ra'], phot['dec']]).T\n for row in sep:\n xy = np.array([row['ra'], row['dec']])\n dist = np.sqrt(np.sum((refs - xy)**2, axis=1))\n index = np.argmin(dist)\n if np.min(dist) < match_radius/3600.:\n matched_phot.append(np.array(phot[index]))\n matched_sep.append(np.array(row))\n matched_sep = np.array(matched_sep)\n matched_phot = np.array(matched_phot)\n return Table(matched_sep), Table(matched_phot)\n\n\ndef get_sdss_catalog(ra, dec, radius=4.):\n \"\"\"Download the SDSS photometry using astroquery for a circular region of radius in deg.\"\"\"\n catalog_fname = 'sdss_phot_%.2f%+.2f.csv' % (ra, dec)\n fields = ['ra', 'dec', 'psfMag_u', 'psfMag_g', 'psfMag_r', 'psfMag_i', 'psfMag_z',\n 'psfMagErr_u', 'psfMagErr_g', 'psfMagErr_r', 'psfMagErr_i', 'psfMagErr_z']\n field_center = SkyCoord(ra, dec, frame='icrs', unit='deg')\n sdss_result = SDSS.query_region(field_center, radius*u.arcmin, photoobj_fields=fields)\n if sdss_result is not None:\n sdss_result.write(catalog_fname, format='ascii.csv', overwrite=True)\n return sdss_result\n\n\n\next_coeffs = {'u': 0.517,\n 'g': 0.165,\n 'r': 0.0754,\n 'i': 0.0257,\n 'z': 0.0114}\n\ndef flux_calibration_sdss(img_fname, sep_fname, fig_fname='', q_lim=0.8, kappa=3, match_radius=1.):\n \"\"\"\n Self-calibration of magnitude zero point using SDSS photometry as reference\n\n Parameters\n ----------\n img_fname : string\n Filename of WCS calibrated image (_wcs.fits)\n\n sep_fname : string\n Filename of the source extraction table (_phot.fits)\n\n fig_fname : string\n Filename of the diagnostic figure. Autogenerated by default.\n\n q_lim : float [default=0.8]\n Reject elliptical sources with axis ratio < `q_lim`.\n Axis ratio is defined as minor/major.\n\n kappa : float [default=3]\n Threshold for projected distance filtering. Sources are rejected if the distance differs\n more then `kappa` times the median absolute deviation from the median of all distances.\n\n match_radius : float [default=1]\n Matching radius between SDSS sources and image sources\n\n Returns\n -------\n output_msg : string\n Log of messages from the function call.\n \"\"\"\n # -- Get SDSS catalog\n msg = list()\n\n hdr = fits.getheader(img_fname)\n msg.append(\" - Loaded image: %s\" % img_fname)\n radius = np.sqrt(hdr['CD1_1']**2 + hdr['CD1_2']**2)*60 * hdr['NAXIS1'] / np.sqrt(2)\n msg.append(\" - Downloading SDSS photometric catalog...\")\n try:\n sdss_cat = get_sdss_catalog(hdr['CRVAL1'], hdr['CRVAL2'], radius)\n except:\n msg.append(\" [ERROR] - Could not connect to SDSS server. Check your internet connection.\")\n msg.append(\"\")\n return \"\\n\".join(msg)\n\n def line(x, zp):\n return zp + x\n\n if sdss_cat is None:\n msg.append(\" [ERROR] - No data found in SDSS. No zero point calculated\")\n msg.append(\"\")\n return \"\\n\".join(msg)\n\n airmass = hdr['AIRMASS']\n filter = alfosc.filter_translate[alfosc.get_filter(hdr)]\n if 'SDSS' in filter:\n band = filter.split('_')[0]\n else:\n msg.append(\" [ERROR] - The image was not taken with an SDSS filter. No zero point calculated\")\n msg.append(\"\")\n return \"\\n\".join(msg)\n\n\n # For r-band: (measured from La Palma extinction curve)\n mag_key = 'psfMag_%s' % band\n mag_err_key = 'psfMagErr_%s' % band\n good = (sdss_cat[mag_key] > 0) & (sdss_cat[mag_key] < 30)\n sdss_cat = sdss_cat[good]\n\n # Load SEP filename:\n try:\n sep_cat = Table.read(sep_fname)\n sep_hdr = fits.getheader(sep_fname)\n msg.append(\" - Loaded SEP source table: %s\" % sep_fname)\n except (FileNotFoundError, OSError):\n msg.append(\" [ERROR] - Could not load SEP source table: %s\" % sep_fname)\n msg.append(\"\")\n return \"\\n\".join(msg)\n\n if 'MAG_ZP' in sep_hdr:\n msg.append(\"[WARNING] - The source table has already been flux calibrated by PyNOT\")\n msg.append(\" - Terminating task...\")\n msg.append(\"\")\n return \"\\n\".join(msg)\n\n axis_ratio = sep_cat['b']/sep_cat['a']\n # Select only 'round' sources:\n sep_points = sep_cat[axis_ratio > q_lim]\n\n # Match catalogs:\n match_sep, match_sdss = match_phot_catalogs(sep_points, sdss_cat)\n msg.append(\" - Cross matched source catalog\")\n\n mag = match_sdss[mag_key]\n mag_err = match_sdss[mag_err_key]\n m_inst = match_sep['mag_auto']\n k = ext_coeffs[band]\n\n # Get first estimate using the median:\n zp0, _ = curve_fit(line, m_inst+k*airmass, mag, p0=[27], sigma=mag_err)\n\n # Filter outliers:\n cut = np.abs(zp0 + m_inst + k*airmass - mag) < kappa*mad(zp0 + m_inst + k*airmass - mag)\n cut &= (mag < 20.1) & (mag > 15)\n\n # Get weighted average zero point:\n w = 1./mag_err[cut]**2\n zp = np.sum((mag[cut] - m_inst[cut] - k*airmass) * w) / np.sum(w)\n msg.append(\" - Calculating zero point in SDSS %s band using %i sources\" % (band, len(w)))\n\n # Zero point dispersion:\n zp_err = np.std(mag[cut] - zp - m_inst[cut] - k*airmass)\n msg.append(\" - Zero Point = %.3f ± %.3f mag\" % (zp, zp_err))\n\n sep_cat['mag_auto'] += zp\n sep_cat.write(sep_fname, overwrite=True)\n with fits.open(sep_fname, 'update') as sep_file:\n sep_file[0].header.add_comment(\"Self-calibration of mag. zero point using SDSS\")\n sep_file[0].header['MAG_ZP'] = (np.round(zp, 3), \"Magnitude zero point (AB mag)\")\n sep_file[0].header['ZP_ERR'] = (np.round(zp_err, 3), \"Uncertainty on magnitude zero point (AB mag)\")\n msg.append(\" [OUTPUT] - Updating magnitudes in source table: %s\" % sep_fname)\n\n # -- Plot the zero point for visual aid:\n base, _ = os.path.splitext(os.path.basename(img_fname))\n dirname = os.path.dirname(img_fname)\n if fig_fname == '':\n fig_fname = 'zero_point_' + base + '.pdf'\n fig_fname = os.path.join(dirname, fig_fname)\n fig = plt.figure()\n ax = fig.add_subplot(111)\n ax.errorbar(m_inst, mag, 3*mag_err, ls='', marker='.', color='k', alpha=0.8)\n ax.plot(m_inst[cut], mag[cut], ls='', marker='o', color='b', alpha=0.7)\n ax.plot(np.sort(m_inst), zp + np.sort(m_inst) + k*airmass, ls='--', color='crimson',\n label='ZP = %.2f ± %.2f' % (zp, zp_err))\n ax.set_ylim(np.min(mag)-0.2, np.max(mag)+0.5)\n ax.set_xlabel(\"Instrument Magnitude\")\n ax.set_ylabel(\"Reference SDSS Magnitude (r-band)\")\n ax.legend()\n ax.tick_params(which='both', top=False, right=False)\n fig.tight_layout()\n fig.savefig(fig_fname)\n msg.append(\" [OUTPUT] - Saving diagnostic figure: %s\" % fig_fname)\n\n # -- Update header in FITS image:\n with fits.open(img_fname) as hdu_list:\n hdu_list['DATA'].header.add_comment(\"Self-calibration of mag. zero point using SDSS\")\n hdu_list['DATA'].header['MAG_ZP'] = (np.round(zp, 3), \"Magnitude zero point (AB mag)\")\n hdu_list['DATA'].header['ZP_ERR'] = (np.round(zp_err, 3), \"Uncertainty on magnitude zero point (AB mag)\")\n hdu_list.writeto(img_fname, overwrite=True)\n\n msg.append(\" [OUTPUT] - Updating header of input image: %s\" % img_fname)\n msg.append(\" - MAG_ZP = %10.3f / %s\" % (zp, \"Magnitude zero point (AB mag)\"))\n msg.append(\" - ZP_ERR = %10.3f / %s\" % (zp_err, \"Uncertainty on magnitude zero point (AB mag)\"))\n msg.append(\"\")\n return \"\\n\".join(msg)\n"
] | [
[
"numpy.sum",
"scipy.optimize.curve_fit",
"numpy.nanmedian",
"numpy.nansum",
"numpy.meshgrid",
"numpy.nanmean",
"matplotlib.pyplot.figure",
"numpy.argmin",
"numpy.abs",
"numpy.log10",
"numpy.isnan",
"numpy.round",
"numpy.mean",
"numpy.median",
"matplotlib.pyplot.subplots",
"numpy.arange",
"numpy.max",
"numpy.min",
"numpy.sort",
"numpy.std",
"numpy.array",
"numpy.zeros_like",
"numpy.nanmax",
"matplotlib.patches.Ellipse",
"numpy.nanmin",
"numpy.sqrt"
]
] |
RobinVogel/metric-learn | [
"a30471424d35b0ef47582751fa6acea7b3a3bce5"
] | [
"test/metric_learn_test.py"
] | [
"import unittest\nimport re\nimport pytest\nimport numpy as np\nimport scipy\nfrom scipy.optimize import check_grad, approx_fprime\nfrom six.moves import xrange\nfrom sklearn.metrics import pairwise_distances, euclidean_distances\nfrom sklearn.datasets import (load_iris, make_classification, make_regression,\n make_spd_matrix)\nfrom numpy.testing import (assert_array_almost_equal, assert_array_equal,\n assert_allclose)\nfrom sklearn.utils.testing import assert_warns_message\nfrom sklearn.exceptions import ConvergenceWarning, ChangedBehaviorWarning\nfrom sklearn.utils.validation import check_X_y\ntry:\n from inverse_covariance import quic\n assert(quic)\nexcept ImportError:\n HAS_SKGGM = False\nelse:\n HAS_SKGGM = True\nfrom metric_learn import (LMNN, NCA, LFDA, Covariance, MLKR, MMC,\n LSML_Supervised, ITML_Supervised, SDML_Supervised,\n RCA_Supervised, MMC_Supervised, SDML, RCA, ITML,\n LSML)\n# Import this specially for testing.\nfrom metric_learn.constraints import wrap_pairs\nfrom metric_learn.lmnn import _sum_outer_products\n\n\ndef class_separation(X, labels):\n unique_labels, label_inds = np.unique(labels, return_inverse=True)\n ratio = 0\n for li in xrange(len(unique_labels)):\n Xc = X[label_inds == li]\n Xnc = X[label_inds != li]\n ratio += pairwise_distances(Xc).mean() / pairwise_distances(Xc, Xnc).mean()\n return ratio / len(unique_labels)\n\n\nclass MetricTestCase(unittest.TestCase):\n @classmethod\n def setUpClass(self):\n # runs once per test class\n iris_data = load_iris()\n self.iris_points = iris_data['data']\n self.iris_labels = iris_data['target']\n np.random.seed(1234)\n\n\nclass TestCovariance(MetricTestCase):\n def test_iris(self):\n cov = Covariance()\n cov.fit(self.iris_points)\n\n csep = class_separation(cov.transform(self.iris_points), self.iris_labels)\n # deterministic result\n self.assertAlmostEqual(csep, 0.72981476)\n\n def test_singular_returns_pseudo_inverse(self):\n \"\"\"Checks that if the input covariance matrix is singular, we return\n the pseudo inverse\"\"\"\n X, y = load_iris(return_X_y=True)\n # We add a virtual column that is a linear combination of the other\n # columns so that the covariance matrix will be singular\n X = np.concatenate([X, X[:, :2].dot([[2], [3]])], axis=1)\n cov_matrix = np.cov(X, rowvar=False)\n covariance = Covariance()\n covariance.fit(X)\n pseudo_inverse = covariance.get_mahalanobis_matrix()\n # here is the definition of a pseudo inverse according to wikipedia:\n assert_allclose(cov_matrix.dot(pseudo_inverse).dot(cov_matrix),\n cov_matrix)\n assert_allclose(pseudo_inverse.dot(cov_matrix).dot(pseudo_inverse),\n pseudo_inverse)\n\n\nclass TestLSML(MetricTestCase):\n def test_iris(self):\n lsml = LSML_Supervised(num_constraints=200)\n lsml.fit(self.iris_points, self.iris_labels)\n\n csep = class_separation(lsml.transform(self.iris_points), self.iris_labels)\n self.assertLess(csep, 0.8) # it's pretty terrible\n\n def test_deprecation_num_labeled(self):\n # test that a deprecation message is thrown if num_labeled is set at\n # initialization\n # TODO: remove in v.0.6\n X = np.array([[0, 0], [0, 1], [2, 0], [2, 1]])\n y = np.array([1, 0, 1, 0])\n lsml_supervised = LSML_Supervised(num_labeled=np.inf)\n msg = ('\"num_labeled\" parameter is not used.'\n ' It has been deprecated in version 0.5.0 and will be'\n ' removed in 0.6.0')\n assert_warns_message(DeprecationWarning, msg, lsml_supervised.fit, X, y)\n\n def test_changed_behaviour_warning(self):\n # test that a ChangedBehavior warning is thrown about the init, if the\n # default parameters are used.\n # TODO: remove in v.0.6\n X = np.array([[0, 0], [0, 1], [2, 0], [2, 1]])\n y = np.array([1, 0, 1, 0])\n lsml_supervised = LSML_Supervised()\n msg = (\"Warning, no prior was set (`prior=None`). As of version 0.5.0, \"\n \"the default prior will now be set to \"\n \"'identity', instead of 'covariance'. If you still want to use \"\n \"the inverse of the covariance matrix as a prior, \"\n \"set prior='covariance'. This warning will disappear in \"\n \"v0.6.0, and `prior` parameter's default value will be set to \"\n \"'identity'.\")\n with pytest.warns(ChangedBehaviorWarning) as raised_warning:\n lsml_supervised.fit(X, y)\n assert any(msg == str(wrn.message) for wrn in raised_warning)\n\n pairs = np.array([[[-10., 0.], [10., 0.], [-5., 3.], [5., 0.]],\n [[0., 50.], [0., -60], [-10., 0.], [10., 0.]]])\n lsml = LSML()\n with pytest.warns(ChangedBehaviorWarning) as raised_warning:\n lsml.fit(pairs)\n assert any(msg == str(wrn.message) for wrn in raised_warning)\n\n def test_deprecation_random_state(self):\n # test that a deprecation message is thrown if random_state is set at\n # fit time\n # TODO: remove in v.0.6\n X = np.array([[0, 0], [0, 1], [2, 0], [2, 1]])\n y = np.array([1, 0, 1, 0])\n lsml_supervised = LSML_Supervised()\n msg = ('\"random_state\" parameter in the `fit` function is '\n 'deprecated. Set `random_state` at initialization '\n 'instead (when instantiating a new `LSML_Supervised` '\n 'object).')\n with pytest.warns(DeprecationWarning) as raised_warning:\n lsml_supervised.fit(X, y, random_state=np.random)\n assert any(msg == str(wrn.message) for wrn in raised_warning)\n\n def test_changed_behaviour_warning_random_state(self):\n # test that a ChangedBehavior warning is thrown if the random_state is\n # not set in fit.\n # TODO: remove in v.0.6\n X = np.array([[0, 0], [0, 1], [2, 0], [2, 1]])\n y = np.array([1, 0, 1, 0])\n lsml_supervised = LSML_Supervised()\n msg = ('As of v0.5.0, `LSML_Supervised` now uses the '\n '`random_state` given at initialization to sample '\n 'constraints, not the default `np.random` from the `fit` '\n 'method, since this argument is now deprecated. '\n 'This warning will disappear in v0.6.0.')\n with pytest.warns(ChangedBehaviorWarning) as raised_warning:\n lsml_supervised.fit(X, y)\n assert any(msg == str(wrn.message) for wrn in raised_warning)\n\n\nclass TestITML(MetricTestCase):\n def test_iris(self):\n itml = ITML_Supervised(num_constraints=200)\n itml.fit(self.iris_points, self.iris_labels)\n\n csep = class_separation(itml.transform(self.iris_points), self.iris_labels)\n self.assertLess(csep, 0.2)\n\n def test_deprecation_num_labeled(self):\n # test that a deprecation message is thrown if num_labeled is set at\n # initialization\n # TODO: remove in v.0.6\n X = np.array([[0, 0], [0, 1], [2, 0], [2, 1]])\n y = np.array([1, 0, 1, 0])\n itml_supervised = ITML_Supervised(num_labeled=np.inf)\n msg = ('\"num_labeled\" parameter is not used.'\n ' It has been deprecated in version 0.5.0 and will be'\n ' removed in 0.6.0')\n assert_warns_message(DeprecationWarning, msg, itml_supervised.fit, X, y)\n\n def test_deprecation_bounds(self):\n # test that a deprecation message is thrown if bounds is set at\n # initialization\n # TODO: remove in v.0.6\n X = np.array([[0, 0], [0, 1], [2, 0], [2, 1]])\n y = np.array([1, 0, 1, 0])\n itml_supervised = ITML_Supervised(bounds=None)\n msg = ('\"bounds\" parameter from initialization is not used.'\n ' It has been deprecated in version 0.5.0 and will be'\n ' removed in 0.6.0. Use the \"bounds\" parameter of this '\n 'fit method instead.')\n assert_warns_message(DeprecationWarning, msg, itml_supervised.fit, X, y)\n\n def test_deprecation_A0(self):\n # test that a deprecation message is thrown if A0 is set at\n # initialization\n # TODO: remove in v.0.6\n X = np.array([[0, 0], [0, 1], [2, 0], [2, 1]])\n y = np.array([1, 0, 1, 0])\n itml_supervised = ITML_Supervised(A0=np.ones_like(X))\n msg = ('\"A0\" parameter is not used.'\n ' It has been deprecated in version 0.5.0 and will be'\n 'removed in 0.6.0. Use \"prior\" instead.')\n with pytest.warns(DeprecationWarning) as raised_warning:\n itml_supervised.fit(X, y)\n assert any(msg == str(wrn.message) for wrn in raised_warning)\n\n pairs = np.array([[[-10., 0.], [10., 0.]], [[0., 50.], [0., -60]]])\n y_pairs = [1, -1]\n itml = ITML(A0=np.ones_like(X))\n with pytest.warns(DeprecationWarning) as raised_warning:\n itml.fit(pairs, y_pairs)\n assert any(msg == str(wrn.message) for wrn in raised_warning)\n\n def test_deprecation_random_state(self):\n # test that a deprecation message is thrown if random_state is set at\n # fit time\n # TODO: remove in v.0.6\n X = np.array([[0, 0], [0, 1], [2, 0], [2, 1]])\n y = np.array([1, 0, 1, 0])\n itml_supervised = ITML_Supervised()\n msg = ('\"random_state\" parameter in the `fit` function is '\n 'deprecated. Set `random_state` at initialization '\n 'instead (when instantiating a new `ITML_Supervised` '\n 'object).')\n with pytest.warns(DeprecationWarning) as raised_warning:\n itml_supervised.fit(X, y, random_state=np.random)\n assert any(msg == str(wrn.message) for wrn in raised_warning)\n\n def test_changed_behaviour_warning_random_state(self):\n # test that a ChangedBehavior warning is thrown if the random_state is\n # not set in fit.\n # TODO: remove in v.0.6\n X = np.array([[0, 0], [0, 1], [2, 0], [2, 1]])\n y = np.array([1, 0, 1, 0])\n itml_supervised = ITML_Supervised()\n msg = ('As of v0.5.0, `ITML_Supervised` now uses the '\n '`random_state` given at initialization to sample '\n 'constraints, not the default `np.random` from the `fit` '\n 'method, since this argument is now deprecated. '\n 'This warning will disappear in v0.6.0.')\n with pytest.warns(ChangedBehaviorWarning) as raised_warning:\n itml_supervised.fit(X, y)\n assert any(msg == str(wrn.message) for wrn in raised_warning)\n\n\[email protected]('bounds', [None, (20., 100.), [20., 100.],\n np.array([20., 100.]),\n np.array([[20., 100.]]),\n np.array([[20], [100]])])\ndef test_bounds_parameters_valid(bounds):\n \"\"\"Asserts that we can provide any array-like of two elements as bounds,\n and that the attribute bound_ is a numpy array\"\"\"\n\n pairs = np.array([[[-10., 0.], [10., 0.]], [[0., 50.], [0., -60]]])\n y_pairs = [1, -1]\n itml = ITML()\n itml.fit(pairs, y_pairs, bounds=bounds)\n\n X = np.array([[0, 0], [0, 1], [2, 0], [2, 1]])\n y = np.array([1, 0, 1, 0])\n itml_supervised = ITML_Supervised()\n itml_supervised.fit(X, y, bounds=bounds)\n\n\[email protected]('bounds', ['weird', ['weird1', 'weird2'],\n np.array([1, 2, 3])])\ndef test_bounds_parameters_invalid(bounds):\n \"\"\"Assert that if a non array-like is put for bounds, or an array-like\n of length different than 2, an error is returned\"\"\"\n pairs = np.array([[[-10., 0.], [10., 0.]], [[0., 50.], [0., -60]]])\n y_pairs = [1, -1]\n itml = ITML()\n with pytest.raises(Exception):\n itml.fit(pairs, y_pairs, bounds=bounds)\n\n X = np.array([[0, 0], [0, 1], [2, 0], [2, 1]])\n y = np.array([1, 0, 1, 0])\n itml_supervised = ITML_Supervised()\n with pytest.raises(Exception):\n itml_supervised.fit(X, y, bounds=bounds)\n\n\nclass TestLMNN(MetricTestCase):\n def test_iris(self):\n lmnn = LMNN(k=5, learn_rate=1e-6, verbose=False)\n lmnn.fit(self.iris_points, self.iris_labels)\n\n csep = class_separation(lmnn.transform(self.iris_points),\n self.iris_labels)\n self.assertLess(csep, 0.25)\n\n def test_loss_grad_lbfgs(self):\n \"\"\"Test gradient of loss function\n Assert that the gradient is almost equal to its finite differences\n approximation.\n \"\"\"\n rng = np.random.RandomState(42)\n X, y = make_classification(random_state=rng)\n L = rng.randn(rng.randint(1, X.shape[1] + 1), X.shape[1])\n lmnn = LMNN()\n\n k = lmnn.k\n reg = lmnn.regularization\n\n X, y = lmnn._prepare_inputs(X, y, dtype=float,\n ensure_min_samples=2)\n num_pts, n_components = X.shape\n unique_labels, label_inds = np.unique(y, return_inverse=True)\n lmnn.labels_ = np.arange(len(unique_labels))\n lmnn.components_ = np.eye(n_components)\n\n target_neighbors = lmnn._select_targets(X, label_inds)\n\n # sum outer products\n dfG = _sum_outer_products(X, target_neighbors.flatten(),\n np.repeat(np.arange(X.shape[0]), k))\n\n # initialize L\n def loss_grad(flat_L):\n return lmnn._loss_grad(X, flat_L.reshape(-1, X.shape[1]), dfG,\n k, reg, target_neighbors, label_inds)\n\n def fun(x):\n return loss_grad(x)[1]\n\n def grad(x):\n return loss_grad(x)[0].ravel()\n\n # compute relative error\n epsilon = np.sqrt(np.finfo(float).eps)\n rel_diff = (check_grad(fun, grad, L.ravel()) /\n np.linalg.norm(approx_fprime(L.ravel(), fun, epsilon)))\n np.testing.assert_almost_equal(rel_diff, 0., decimal=5)\n\n def test_changed_behaviour_warning(self):\n # test that a ChangedBehavior warning is thrown about the init, if the\n # default parameters are used.\n # TODO: remove in v.0.6\n X = np.array([[0, 0], [0, 1], [2, 0], [2, 1]])\n y = np.array([1, 0, 1, 0])\n lmnn = LMNN(k=2)\n msg = (\"Warning, no init was set (`init=None`). As of version 0.5.0, \"\n \"the default init will now be set to 'auto', instead of the \"\n \"previous identity matrix. If you still want to use the identity \"\n \"matrix as before, set init='identity'. This warning \"\n \"will disappear in v0.6.0, and `init` parameter's default value \"\n \"will be set to 'auto'.\")\n with pytest.warns(ChangedBehaviorWarning) as raised_warning:\n lmnn.fit(X, y)\n assert any(msg == str(wrn.message) for wrn in raised_warning)\n\n def test_deprecation_use_pca(self):\n # test that a DeprecationWarning is thrown about use_pca, if the\n # default parameters are used.\n # TODO: remove in v.0.6\n X = np.array([[0, 0], [0, 1], [2, 0], [2, 1]])\n y = np.array([1, 0, 1, 0])\n lmnn = LMNN(k=2, use_pca=True)\n msg = ('\"use_pca\" parameter is not used.'\n ' It has been deprecated in version 0.5.0 and will be'\n ' removed in 0.6.0.')\n assert_warns_message(DeprecationWarning, msg, lmnn.fit, X, y)\n\n\ndef test_loss_func(capsys):\n \"\"\"Test the loss function (and its gradient) on a simple example,\n by comparing the results with the actual implementation of metric-learn,\n with a very simple (but nonperformant) implementation\"\"\"\n\n # toy dataset to use\n X, y = make_classification(n_samples=10, n_classes=2,\n n_features=6,\n n_redundant=0, shuffle=True,\n scale=[1, 1, 20, 20, 20, 20], random_state=42)\n\n def hinge(a):\n if a > 0:\n return a, 1\n else:\n return 0, 0\n\n def loss_fn(L, X, y, target_neighbors, reg):\n L = L.reshape(-1, X.shape[1])\n Lx = np.dot(X, L.T)\n loss = 0\n total_active = 0\n grad = np.zeros_like(L)\n for i in range(X.shape[0]):\n for j in target_neighbors[i]:\n loss += (1 - reg) * np.sum((Lx[i] - Lx[j]) ** 2)\n grad += (1 - reg) * np.outer(Lx[i] - Lx[j], X[i] - X[j])\n for l in range(X.shape[0]):\n if y[i] != y[l]:\n hin, active = hinge(1 + np.sum((Lx[i] - Lx[j])**2) -\n np.sum((Lx[i] - Lx[l])**2))\n total_active += active\n if active:\n loss += reg * hin\n grad += (reg * (np.outer(Lx[i] - Lx[j], X[i] - X[j]) -\n np.outer(Lx[i] - Lx[l], X[i] - X[l])))\n grad = 2 * grad\n return grad, loss, total_active\n\n # we check that the gradient we have computed in the non-performant implem\n # is indeed the true gradient on a toy example:\n\n def _select_targets(X, y, k):\n target_neighbors = np.empty((X.shape[0], k), dtype=int)\n for label in np.unique(y):\n inds, = np.nonzero(y == label)\n dd = euclidean_distances(X[inds], squared=True)\n np.fill_diagonal(dd, np.inf)\n nn = np.argsort(dd)[..., :k]\n target_neighbors[inds] = inds[nn]\n return target_neighbors\n\n target_neighbors = _select_targets(X, y, 2)\n regularization = 0.5\n n_features = X.shape[1]\n x0 = np.random.randn(1, n_features)\n\n def loss(x0):\n return loss_fn(x0.reshape(-1, X.shape[1]), X, y, target_neighbors,\n regularization)[1]\n\n def grad(x0):\n return loss_fn(x0.reshape(-1, X.shape[1]), X, y, target_neighbors,\n regularization)[0].ravel()\n\n scipy.optimize.check_grad(loss, grad, x0.ravel())\n\n class LMNN_with_callback(LMNN):\n \"\"\" We will use a callback to get the gradient (see later)\n \"\"\"\n\n def __init__(self, callback, *args, **kwargs):\n self.callback = callback\n super(LMNN_with_callback, self).__init__(*args, **kwargs)\n\n def _loss_grad(self, *args, **kwargs):\n grad, objective, total_active = (\n super(LMNN_with_callback, self)._loss_grad(*args, **kwargs))\n self.callback.append(grad)\n return grad, objective, total_active\n\n class LMNN_nonperformant(LMNN_with_callback):\n\n def fit(self, X, y):\n self.y = y\n return super(LMNN_nonperformant, self).fit(X, y)\n\n def _loss_grad(self, X, L, dfG, k, reg, target_neighbors, label_inds):\n grad, loss, total_active = loss_fn(L.ravel(), X, self.y,\n target_neighbors, self.regularization)\n self.callback.append(grad)\n return grad, loss, total_active\n\n mem1, mem2 = [], []\n lmnn_perf = LMNN_with_callback(verbose=True, random_state=42,\n init='identity', max_iter=30, callback=mem1)\n lmnn_nonperf = LMNN_nonperformant(verbose=True, random_state=42,\n init='identity', max_iter=30,\n callback=mem2)\n objectives, obj_diffs, learn_rate, total_active = (dict(), dict(), dict(),\n dict())\n for algo, name in zip([lmnn_perf, lmnn_nonperf], ['perf', 'nonperf']):\n algo.fit(X, y)\n out, _ = capsys.readouterr()\n lines = re.split(\"\\n+\", out)\n # we get every variable that is printed from the algorithm in verbose\n num = r'(-?\\d+.?\\d*(e[+|-]\\d+)?)'\n strings = [re.search(r\"\\d+ (?:{}) (?:{}) (?:(\\d+)) (?:{})\"\n .format(num, num, num), s) for s in lines]\n objectives[name] = [float(match.group(1)) for match in strings if match is\n not None]\n obj_diffs[name] = [float(match.group(3)) for match in strings if match is\n not None]\n total_active[name] = [float(match.group(5)) for match in strings if\n match is not\n None]\n learn_rate[name] = [float(match.group(6)) for match in strings if match is\n not None]\n assert len(strings) >= 10 # we ensure that we actually did more than 10\n # iterations\n assert total_active[name][0] >= 2 # we ensure that we have some active\n # constraints (that's the case we want to test)\n # we remove the last element because it can be equal to the penultimate\n # if the last gradient update is null\n for i in range(len(mem1)):\n np.testing.assert_allclose(lmnn_perf.callback[i],\n lmnn_nonperf.callback[i],\n err_msg='Gradient different at position '\n '{}'.format(i))\n np.testing.assert_allclose(objectives['perf'], objectives['nonperf'])\n np.testing.assert_allclose(obj_diffs['perf'], obj_diffs['nonperf'])\n np.testing.assert_allclose(total_active['perf'], total_active['nonperf'])\n np.testing.assert_allclose(learn_rate['perf'], learn_rate['nonperf'])\n\n\[email protected]('X, y, loss', [(np.array([[0], [1], [2], [3]]),\n [1, 1, 0, 0], 3.0),\n (np.array([[0], [1], [2], [3]]),\n [1, 0, 0, 1], 26.)])\ndef test_toy_ex_lmnn(X, y, loss):\n \"\"\"Test that the loss give the right result on a toy example\"\"\"\n L = np.array([[1]])\n lmnn = LMNN(k=1, regularization=0.5)\n\n k = lmnn.k\n reg = lmnn.regularization\n\n X, y = lmnn._prepare_inputs(X, y, dtype=float,\n ensure_min_samples=2)\n num_pts, n_components = X.shape\n unique_labels, label_inds = np.unique(y, return_inverse=True)\n lmnn.labels_ = np.arange(len(unique_labels))\n lmnn.components_ = np.eye(n_components)\n\n target_neighbors = lmnn._select_targets(X, label_inds)\n\n # sum outer products\n dfG = _sum_outer_products(X, target_neighbors.flatten(),\n np.repeat(np.arange(X.shape[0]), k))\n\n # storage\n a1 = [None] * k\n a2 = [None] * k\n for nn_idx in xrange(k):\n a1[nn_idx] = np.array([])\n a2[nn_idx] = np.array([])\n\n # assert that the loss equals the one computed by hand\n assert lmnn._loss_grad(X, L.reshape(-1, X.shape[1]), dfG, k,\n reg, target_neighbors, label_inds)[1] == loss\n\n\ndef test_convergence_simple_example(capsys):\n # LMNN should converge on this simple example, which it did not with\n # this issue: https://github.com/scikit-learn-contrib/metric-learn/issues/88\n X, y = make_classification(random_state=0)\n lmnn = LMNN(verbose=True)\n lmnn.fit(X, y)\n out, _ = capsys.readouterr()\n assert \"LMNN converged with objective\" in out\n\n\ndef test_no_twice_same_objective(capsys):\n # test that the objective function never has twice the same value\n # see https://github.com/scikit-learn-contrib/metric-learn/issues/88\n X, y = make_classification(random_state=0)\n lmnn = LMNN(verbose=True)\n lmnn.fit(X, y)\n out, _ = capsys.readouterr()\n lines = re.split(\"\\n+\", out)\n # we get only objectives from each line:\n # the regexp matches a float that follows an integer (the iteration\n # number), and which is followed by a (signed) float (delta obj). It\n # matches for instance:\n # 3 **1113.7665747189938** -3.182774197440267 46431.0200999999999998e-06\n objectives = [re.search(r\"\\d* (?:(\\d*.\\d*))[ | -]\\d*.\\d*\", s)\n for s in lines]\n objectives = [match.group(1) for match in objectives if match is not None]\n # we remove the last element because it can be equal to the penultimate\n # if the last gradient update is null\n assert len(objectives[:-1]) == len(set(objectives[:-1]))\n\n\nclass TestSDML(MetricTestCase):\n\n @pytest.mark.skipif(HAS_SKGGM,\n reason=\"The warning can be thrown only if skggm is \"\n \"not installed.\")\n def test_sdml_supervised_raises_warning_msg_not_installed_skggm(self):\n \"\"\"Tests that the right warning message is raised if someone tries to\n use SDML_Supervised but has not installed skggm, and that the algorithm\n fails to converge\"\"\"\n # TODO: remove if we don't need skggm anymore\n # load_iris: dataset where we know scikit-learn's graphical lasso fails\n # with a Floating Point error\n X, y = load_iris(return_X_y=True)\n sdml_supervised = SDML_Supervised(balance_param=0.5, use_cov=True,\n sparsity_param=0.01)\n msg = (\"There was a problem in SDML when using scikit-learn's graphical \"\n \"lasso solver. skggm's graphical lasso can sometimes converge on \"\n \"non SPD cases where scikit-learn's graphical lasso fails to \"\n \"converge. Try to install skggm and rerun the algorithm (see \"\n \"the README.md for the right version of skggm). The following \"\n \"error message was thrown:\")\n with pytest.raises(RuntimeError) as raised_error:\n sdml_supervised.fit(X, y)\n assert str(raised_error.value).startswith(msg)\n\n @pytest.mark.skipif(HAS_SKGGM,\n reason=\"The warning can be thrown only if skggm is \"\n \"not installed.\")\n def test_sdml_raises_warning_msg_not_installed_skggm(self):\n \"\"\"Tests that the right warning message is raised if someone tries to\n use SDML but has not installed skggm, and that the algorithm fails to\n converge\"\"\"\n # TODO: remove if we don't need skggm anymore\n # case on which we know that scikit-learn's graphical lasso fails\n # because it will return a non SPD matrix\n pairs = np.array([[[-10., 0.], [10., 0.]], [[0., 50.], [0., -60]]])\n y_pairs = [1, -1]\n sdml = SDML(prior='identity', balance_param=100, verbose=True)\n\n msg = (\"There was a problem in SDML when using scikit-learn's graphical \"\n \"lasso solver. skggm's graphical lasso can sometimes converge on \"\n \"non SPD cases where scikit-learn's graphical lasso fails to \"\n \"converge. Try to install skggm and rerun the algorithm (see \"\n \"the README.md for the right version of skggm).\")\n with pytest.raises(RuntimeError) as raised_error:\n sdml.fit(pairs, y_pairs)\n assert msg == str(raised_error.value)\n\n @pytest.mark.skipif(not HAS_SKGGM,\n reason=\"The warning can be thrown only if skggm is \"\n \"installed.\")\n def test_sdml_raises_warning_msg_installed_skggm(self):\n \"\"\"Tests that the right warning message is raised if someone tries to\n use SDML and has installed skggm, and that the algorithm fails to\n converge\"\"\"\n # TODO: remove if we don't need skggm anymore\n # case on which we know that skggm's graphical lasso fails\n # because it will return non finite values\n pairs = np.array([[[-10., 0.], [10., 0.]], [[0., 50.], [0., -60]]])\n y_pairs = [1, -1]\n sdml = SDML(prior='identity', balance_param=100, verbose=True)\n\n msg = (\"There was a problem in SDML when using skggm's graphical \"\n \"lasso solver.\")\n with pytest.raises(RuntimeError) as raised_error:\n sdml.fit(pairs, y_pairs)\n assert msg == str(raised_error.value)\n\n @pytest.mark.skipif(not HAS_SKGGM,\n reason=\"The warning can be thrown only if skggm is \"\n \"installed.\")\n def test_sdml_supervised_raises_warning_msg_installed_skggm(self):\n \"\"\"Tests that the right warning message is raised if someone tries to\n use SDML_Supervised but has not installed skggm, and that the algorithm\n fails to converge\"\"\"\n # TODO: remove if we don't need skggm anymore\n # case on which we know that skggm's graphical lasso fails\n # because it will return non finite values\n rng = np.random.RandomState(42)\n # This example will create a diagonal em_cov with a negative coeff (\n # pathological case)\n X = np.array([[-10., 0.], [10., 0.], [5., 0.], [3., 0.]])\n y = [0, 0, 1, 1]\n sdml_supervised = SDML_Supervised(balance_param=0.5, prior='identity',\n sparsity_param=0.01, random_state=rng)\n msg = (\"There was a problem in SDML when using skggm's graphical \"\n \"lasso solver.\")\n with pytest.raises(RuntimeError) as raised_error:\n sdml_supervised.fit(X, y)\n assert msg == str(raised_error.value)\n\n @pytest.mark.skipif(not HAS_SKGGM,\n reason=\"It's only in the case where skggm is installed\"\n \"that no warning should be thrown.\")\n def test_raises_no_warning_installed_skggm(self):\n # otherwise we should be able to instantiate and fit SDML and it\n # should raise no error and no ConvergenceWarning\n pairs = np.array([[[-10., 0.], [10., 0.]], [[0., -55.], [0., -60]]])\n y_pairs = [1, -1]\n X, y = make_classification(random_state=42)\n with pytest.warns(None) as records:\n sdml = SDML(prior='covariance')\n sdml.fit(pairs, y_pairs)\n for record in records:\n assert record.category is not ConvergenceWarning\n with pytest.warns(None) as records:\n sdml_supervised = SDML_Supervised(prior='identity', balance_param=1e-5)\n sdml_supervised.fit(X, y)\n for record in records:\n assert record.category is not ConvergenceWarning\n\n def test_iris(self):\n # Note: this is a flaky test, which fails for certain seeds.\n # TODO: un-flake it!\n rs = np.random.RandomState(5555)\n\n sdml = SDML_Supervised(num_constraints=1500, prior='identity',\n balance_param=5e-5)\n sdml.fit(self.iris_points, self.iris_labels, random_state=rs)\n csep = class_separation(sdml.transform(self.iris_points),\n self.iris_labels)\n self.assertLess(csep, 0.22)\n\n def test_deprecation_num_labeled(self):\n # test that a deprecation message is thrown if num_labeled is set at\n # initialization\n # TODO: remove in v.0.6\n X, y = make_classification(random_state=42)\n sdml_supervised = SDML_Supervised(num_labeled=np.inf, prior='identity',\n balance_param=5e-5)\n msg = ('\"num_labeled\" parameter is not used.'\n ' It has been deprecated in version 0.5.0 and will be'\n ' removed in 0.6.0')\n assert_warns_message(DeprecationWarning, msg, sdml_supervised.fit, X, y)\n\n def test_sdml_raises_warning_non_psd(self):\n \"\"\"Tests that SDML raises a warning on a toy example where we know the\n pseudo-covariance matrix is not PSD\"\"\"\n pairs = np.array([[[-10., 0.], [10., 0.]], [[0., 50.], [0., -60]]])\n y = [1, -1]\n sdml = SDML(prior='covariance', sparsity_param=0.01, balance_param=0.5)\n msg = (\"Warning, the input matrix of graphical lasso is not \"\n \"positive semi-definite (PSD). The algorithm may diverge, \"\n \"and lead to degenerate solutions. \"\n \"To prevent that, try to decrease the balance parameter \"\n \"`balance_param` and/or to set prior='identity'.\")\n with pytest.warns(ConvergenceWarning) as raised_warning:\n try:\n sdml.fit(pairs, y)\n except Exception:\n pass\n # we assert that this warning is in one of the warning raised by the\n # estimator\n assert msg in list(map(lambda w: str(w.message), raised_warning))\n\n def test_sdml_converges_if_psd(self):\n \"\"\"Tests that sdml converges on a simple problem where we know the\n pseudo-covariance matrix is PSD\"\"\"\n pairs = np.array([[[-10., 0.], [10., 0.]], [[0., -55.], [0., -60]]])\n y = [1, -1]\n sdml = SDML(prior='covariance', sparsity_param=0.01, balance_param=0.5)\n sdml.fit(pairs, y)\n assert np.isfinite(sdml.get_mahalanobis_matrix()).all()\n\n @pytest.mark.skipif(not HAS_SKGGM,\n reason=\"sklearn's graphical_lasso can sometimes not \"\n \"work on some non SPD problems. We test that \"\n \"is works only if skggm is installed.\")\n def test_sdml_works_on_non_spd_pb_with_skggm(self):\n \"\"\"Test that SDML works on a certain non SPD problem on which we know\n it should work, but scikit-learn's graphical_lasso does not work\"\"\"\n X, y = load_iris(return_X_y=True)\n sdml = SDML_Supervised(balance_param=0.5, sparsity_param=0.01,\n prior='covariance',\n random_state=np.random.RandomState(42))\n sdml.fit(X, y)\n\n def test_deprecation_use_cov(self):\n # test that a deprecation message is thrown if use_cov is set at\n # initialization\n # TODO: remove in v.0.6\n X = np.array([[0, 0], [0, 1], [2, 0], [2, 1]])\n y = np.array([1, 0, 1, 0])\n sdml_supervised = SDML_Supervised(use_cov=np.ones_like(X),\n balance_param=1e-5)\n msg = ('\"use_cov\" parameter is not used.'\n ' It has been deprecated in version 0.5.0 and will be'\n 'removed in 0.6.0. Use \"prior\" instead.')\n with pytest.warns(DeprecationWarning) as raised_warning:\n sdml_supervised.fit(X, y)\n assert any(msg == str(wrn.message) for wrn in raised_warning)\n\n pairs = np.array([[[-10., 0.], [10., 0.]], [[0., 50.], [0., -60]]])\n y_pairs = [1, -1]\n sdml = SDML(use_cov=np.ones_like(X), balance_param=1e-5)\n with pytest.warns(DeprecationWarning) as raised_warning:\n sdml.fit(pairs, y_pairs)\n assert any(msg == str(wrn.message) for wrn in raised_warning)\n\n def test_changed_behaviour_warning(self):\n # test that a ChangedBehavior warning is thrown about the init, if the\n # default parameters are used (except for the balance_param that we need\n # to set for the algorithm to not diverge)\n # TODO: remove in v.0.6\n X = np.array([[0, 0], [0, 1], [2, 0], [2, 1]])\n y = np.array([1, 0, 1, 0])\n sdml_supervised = SDML_Supervised(balance_param=1e-5)\n msg = (\"Warning, no prior was set (`prior=None`). As of version 0.5.0, \"\n \"the default prior will now be set to \"\n \"'identity', instead of 'covariance'. If you still want to use \"\n \"the inverse of the covariance matrix as a prior, \"\n \"set prior='covariance'. This warning will disappear in \"\n \"v0.6.0, and `prior` parameter's default value will be set to \"\n \"'identity'.\")\n with pytest.warns(ChangedBehaviorWarning) as raised_warning:\n sdml_supervised.fit(X, y)\n assert any(msg == str(wrn.message) for wrn in raised_warning)\n\n pairs = np.array([[[-10., 0.], [10., 0.]], [[0., 50.], [0., -60]]])\n y_pairs = [1, -1]\n sdml = SDML(balance_param=1e-5)\n with pytest.warns(ChangedBehaviorWarning) as raised_warning:\n sdml.fit(pairs, y_pairs)\n assert any(msg == str(wrn.message) for wrn in raised_warning)\n\n def test_deprecation_random_state(self):\n # test that a deprecation message is thrown if random_state is set at\n # fit time\n # TODO: remove in v.0.6\n X, y = load_iris(return_X_y=True)\n sdml_supervised = SDML_Supervised(balance_param=5e-5)\n msg = ('\"random_state\" parameter in the `fit` function is '\n 'deprecated. Set `random_state` at initialization '\n 'instead (when instantiating a new `SDML_Supervised` '\n 'object).')\n with pytest.warns(DeprecationWarning) as raised_warning:\n sdml_supervised.fit(X, y, random_state=np.random)\n assert any(msg == str(wrn.message) for wrn in raised_warning)\n\n def test_changed_behaviour_warning_random_state(self):\n # test that a ChangedBehavior warning is thrown if the random_state is\n # not set in fit.\n # TODO: remove in v.0.6\n X, y = load_iris(return_X_y=True)\n sdml_supervised = SDML_Supervised(balance_param=5e-5)\n msg = ('As of v0.5.0, `SDML_Supervised` now uses the '\n '`random_state` given at initialization to sample '\n 'constraints, not the default `np.random` from the `fit` '\n 'method, since this argument is now deprecated. '\n 'This warning will disappear in v0.6.0.')\n with pytest.warns(ChangedBehaviorWarning) as raised_warning:\n sdml_supervised.fit(X, y)\n assert any(msg == str(wrn.message) for wrn in raised_warning)\n\n\[email protected](not HAS_SKGGM,\n reason='The message should be printed only if skggm is '\n 'installed.')\ndef test_verbose_has_installed_skggm_sdml(capsys):\n # Test that if users have installed skggm, a message is printed telling them\n # skggm's solver is used (when they use SDML)\n # TODO: remove if we don't need skggm anymore\n pairs = np.array([[[-10., 0.], [10., 0.]], [[0., -55.], [0., -60]]])\n y_pairs = [1, -1]\n sdml = SDML(verbose=True, prior='covariance')\n sdml.fit(pairs, y_pairs)\n out, _ = capsys.readouterr()\n assert \"SDML will use skggm's graphical lasso solver.\" in out\n\n\[email protected](not HAS_SKGGM,\n reason='The message should be printed only if skggm is '\n 'installed.')\ndef test_verbose_has_installed_skggm_sdml_supervised(capsys):\n # Test that if users have installed skggm, a message is printed telling them\n # skggm's solver is used (when they use SDML_Supervised)\n # TODO: remove if we don't need skggm anymore\n X, y = load_iris(return_X_y=True)\n sdml = SDML_Supervised(verbose=True, prior='identity', balance_param=1e-5)\n sdml.fit(X, y)\n out, _ = capsys.readouterr()\n assert \"SDML will use skggm's graphical lasso solver.\" in out\n\n\[email protected](HAS_SKGGM,\n reason='The message should be printed only if skggm is '\n 'not installed.')\ndef test_verbose_has_not_installed_skggm_sdml(capsys):\n # Test that if users have installed skggm, a message is printed telling them\n # skggm's solver is used (when they use SDML)\n # TODO: remove if we don't need skggm anymore\n pairs = np.array([[[-10., 0.], [10., 0.]], [[0., -55.], [0., -60]]])\n y_pairs = [1, -1]\n sdml = SDML(verbose=True, prior='covariance')\n sdml.fit(pairs, y_pairs)\n out, _ = capsys.readouterr()\n assert \"SDML will use scikit-learn's graphical lasso solver.\" in out\n\n\[email protected](HAS_SKGGM,\n reason='The message should be printed only if skggm is '\n 'not installed.')\ndef test_verbose_has_not_installed_skggm_sdml_supervised(capsys):\n # Test that if users have installed skggm, a message is printed telling them\n # skggm's solver is used (when they use SDML_Supervised)\n # TODO: remove if we don't need skggm anymore\n X, y = make_classification(random_state=42)\n sdml = SDML_Supervised(verbose=True, balance_param=1e-5, prior='identity')\n sdml.fit(X, y)\n out, _ = capsys.readouterr()\n assert \"SDML will use scikit-learn's graphical lasso solver.\" in out\n\n\nclass TestNCA(MetricTestCase):\n def test_iris(self):\n n = self.iris_points.shape[0]\n\n # Without dimension reduction\n nca = NCA(max_iter=(100000 // n))\n nca.fit(self.iris_points, self.iris_labels)\n csep = class_separation(nca.transform(self.iris_points), self.iris_labels)\n self.assertLess(csep, 0.15)\n\n # With dimension reduction\n nca = NCA(max_iter=(100000 // n), n_components=2)\n nca.fit(self.iris_points, self.iris_labels)\n csep = class_separation(nca.transform(self.iris_points), self.iris_labels)\n self.assertLess(csep, 0.20)\n\n def test_finite_differences(self):\n \"\"\"Test gradient of loss function\n\n Assert that the gradient is almost equal to its finite differences\n approximation.\n \"\"\"\n # Initialize the transformation `M`, as well as `X` and `y` and `NCA`\n X, y = make_classification()\n M = np.random.randn(np.random.randint(1, X.shape[1] + 1), X.shape[1])\n mask = y[:, np.newaxis] == y[np.newaxis, :]\n nca = NCA()\n nca.n_iter_ = 0\n\n def fun(M):\n return nca._loss_grad_lbfgs(M, X, mask)[0]\n\n def grad(M):\n return nca._loss_grad_lbfgs(M, X, mask)[1].ravel()\n\n # compute relative error\n epsilon = np.sqrt(np.finfo(float).eps)\n rel_diff = (check_grad(fun, grad, M.ravel()) /\n np.linalg.norm(approx_fprime(M.ravel(), fun, epsilon)))\n np.testing.assert_almost_equal(rel_diff, 0., decimal=6)\n\n def test_simple_example(self):\n \"\"\"Test on a simple example.\n\n Puts four points in the input space where the opposite labels points are\n next to each other. After transform the same labels points should be next\n to each other.\n\n \"\"\"\n X = np.array([[0, 0], [0, 1], [2, 0], [2, 1]])\n y = np.array([1, 0, 1, 0])\n nca = NCA(n_components=2,)\n nca.fit(X, y)\n Xansformed = nca.transform(X)\n np.testing.assert_equal(pairwise_distances(Xansformed).argsort()[:, 1],\n np.array([2, 3, 0, 1]))\n\n def test_singleton_class(self):\n X = self.iris_points\n y = self.iris_labels\n\n # one singleton class: test fitting works\n singleton_class = 1\n ind_singleton, = np.where(y == singleton_class)\n y[ind_singleton] = 2\n y[ind_singleton[0]] = singleton_class\n\n nca = NCA(max_iter=30)\n nca.fit(X, y)\n\n # One non-singleton class: test fitting works\n ind_1, = np.where(y == 1)\n ind_2, = np.where(y == 2)\n y[ind_1] = 0\n y[ind_1[0]] = 1\n y[ind_2] = 0\n y[ind_2[0]] = 2\n\n nca = NCA(max_iter=30)\n nca.fit(X, y)\n\n # Only singleton classes: test fitting does nothing (the gradient\n # must be null in this case, so the final matrix must stay like\n # the initialization)\n ind_0, = np.where(y == 0)\n ind_1, = np.where(y == 1)\n ind_2, = np.where(y == 2)\n X = X[[ind_0[0], ind_1[0], ind_2[0]]]\n y = y[[ind_0[0], ind_1[0], ind_2[0]]]\n\n A = make_spd_matrix(X.shape[1], X.shape[1])\n nca = NCA(init=A, max_iter=30, n_components=X.shape[1])\n nca.fit(X, y)\n assert_array_equal(nca.components_, A)\n\n def test_one_class(self):\n # if there is only one class the gradient is null, so the final matrix\n # must stay like the initialization\n X = self.iris_points[self.iris_labels == 0]\n y = self.iris_labels[self.iris_labels == 0]\n\n A = make_spd_matrix(X.shape[1], X.shape[1])\n nca = NCA(init=A, max_iter=30, n_components=X.shape[1])\n nca.fit(X, y)\n assert_array_equal(nca.components_, A)\n\n def test_changed_behaviour_warning(self):\n # test that a ChangedBehavior warning is thrown about the init, if the\n # default parameters are used.\n # TODO: remove in v.0.6\n X = np.array([[0, 0], [0, 1], [2, 0], [2, 1]])\n y = np.array([1, 0, 1, 0])\n nca = NCA()\n msg = (\"Warning, no init was set (`init=None`). As of version 0.5.0, \"\n \"the default init will now be set to 'auto', instead of the \"\n \"previous scaling matrix. If you still want to use the same \"\n \"scaling matrix as before, set \"\n \"init=np.eye(X.shape[1])/(np.maximum(X.max(axis=0)-X.min(axis=0)\"\n \", EPS))). This warning will disappear in v0.6.0, and `init` \"\n \"parameter's default value will be set to 'auto'.\")\n with pytest.warns(ChangedBehaviorWarning) as raised_warning:\n nca.fit(X, y)\n assert any(msg == str(wrn.message) for wrn in raised_warning)\n\n\[email protected]('num_dims', [None, 2])\ndef test_deprecation_num_dims_nca(num_dims):\n # test that a deprecation message is thrown if num_dims is set at\n # initialization\n # TODO: remove in v.0.6\n X = np.array([[0, 0], [0, 1], [2, 0], [2, 1]])\n y = np.array([1, 0, 1, 0])\n nca = NCA(num_dims=num_dims)\n msg = ('\"num_dims\" parameter is not used.'\n ' It has been deprecated in version 0.5.0 and will be'\n ' removed in 0.6.0. Use \"n_components\" instead')\n with pytest.warns(DeprecationWarning) as raised_warning:\n nca.fit(X, y)\n assert (str(raised_warning[0].message) == msg)\n\n\nclass TestLFDA(MetricTestCase):\n def test_iris(self):\n lfda = LFDA(k=2, n_components=2)\n lfda.fit(self.iris_points, self.iris_labels)\n csep = class_separation(lfda.transform(self.iris_points), self.iris_labels)\n self.assertLess(csep, 0.15)\n\n # Sanity checks for learned matrices.\n self.assertEqual(lfda.get_mahalanobis_matrix().shape, (4, 4))\n self.assertEqual(lfda.components_.shape, (2, 4))\n\n\[email protected]('num_dims', [None, 2])\ndef test_deprecation_num_dims_lfda(num_dims):\n # test that a deprecation message is thrown if num_dims is set at\n # initialization\n # TODO: remove in v.0.6\n X = np.array([[0, 0], [0, 1], [2, 0], [2, 1]])\n y = np.array([1, 0, 1, 0])\n lfda = LFDA(num_dims=num_dims)\n msg = ('\"num_dims\" parameter is not used.'\n ' It has been deprecated in version 0.5.0 and will be'\n ' removed in 0.6.0. Use \"n_components\" instead')\n with pytest.warns(DeprecationWarning) as raised_warning:\n lfda.fit(X, y)\n assert (str(raised_warning[0].message) == msg)\n\n\nclass TestRCA(MetricTestCase):\n def test_iris(self):\n rca = RCA_Supervised(n_components=2, num_chunks=30, chunk_size=2)\n rca.fit(self.iris_points, self.iris_labels)\n csep = class_separation(rca.transform(self.iris_points), self.iris_labels)\n self.assertLess(csep, 0.29)\n\n def test_deprecation_pca_comps(self):\n # test that a deprecation message is thrown if pca_comps is set at\n # initialization\n # TODO: remove in v.0.6\n X, y = make_classification(random_state=42, n_samples=100)\n rca_supervised = RCA_Supervised(pca_comps=X.shape[1], num_chunks=20)\n msg = ('\"pca_comps\" parameter is not used. '\n 'It has been deprecated in version 0.5.0 and will be'\n 'removed in 0.6.0. RCA will not do PCA preprocessing anymore. If '\n 'you still want to do it, you could use '\n '`sklearn.decomposition.PCA` and an `sklearn.pipeline.Pipeline`.')\n with pytest.warns(ChangedBehaviorWarning) as expected_msg:\n rca_supervised.fit(X, y)\n assert any(str(w.message) == msg for w in expected_msg)\n\n rca = RCA(pca_comps=X.shape[1])\n with pytest.warns(ChangedBehaviorWarning) as expected_msg:\n rca.fit(X, y)\n assert any(str(w.message) == msg for w in expected_msg)\n\n def test_changedbehaviorwarning_preprocessing(self):\n # test that a ChangedBehaviorWarning is thrown when using RCA\n # TODO: remove in v.0.6\n\n msg = (\"RCA will no longer center the data before training. If you want \"\n \"to do some preprocessing, you should do it manually (you can also \"\n \"use an `sklearn.pipeline.Pipeline` for instance). This warning \"\n \"will disappear in version 0.6.0.\")\n\n X, y = make_classification(random_state=42, n_samples=100)\n rca_supervised = RCA_Supervised(num_chunks=20)\n with pytest.warns(ChangedBehaviorWarning) as expected_msg:\n rca_supervised.fit(X, y)\n assert any(str(w.message) == msg for w in expected_msg)\n\n rca = RCA()\n with pytest.warns(ChangedBehaviorWarning) as expected_msg:\n rca.fit(X, y)\n assert any(str(w.message) == msg for w in expected_msg)\n\n def test_rank_deficient_returns_warning(self):\n \"\"\"Checks that if the covariance matrix is not invertible, we raise a\n warning message advising to use PCA\"\"\"\n X, y = load_iris(return_X_y=True)\n # we make the fourth column a linear combination of the two first,\n # so that the covariance matrix will not be invertible:\n X[:, 3] = X[:, 0] + 3 * X[:, 1]\n rca = RCA()\n msg = ('The inner covariance matrix is not invertible, '\n 'so the transformation matrix may contain Nan values. '\n 'You should reduce the dimensionality of your input,'\n 'for instance using `sklearn.decomposition.PCA` as a '\n 'preprocessing step.')\n with pytest.warns(None) as raised_warnings:\n rca.fit(X, y)\n assert any(str(w.message) == msg for w in raised_warnings)\n\n def test_deprecation_random_state(self):\n # test that a deprecation message is thrown if random_state is set at\n # fit time\n # TODO: remove in v.0.6\n X, y = make_classification(random_state=42, n_samples=100)\n rca_supervised = RCA_Supervised(num_chunks=20)\n msg = ('\"random_state\" parameter in the `fit` function is '\n 'deprecated. Set `random_state` at initialization '\n 'instead (when instantiating a new `RCA_Supervised` '\n 'object).')\n with pytest.warns(DeprecationWarning) as raised_warning:\n rca_supervised.fit(X, y, random_state=np.random)\n assert any(msg == str(wrn.message) for wrn in raised_warning)\n\n def test_changed_behaviour_warning_random_state(self):\n # test that a ChangedBehavior warning is thrown if the random_state is\n # not set in fit.\n # TODO: remove in v.0.6\n X, y = make_classification(random_state=42, n_samples=100)\n rca_supervised = RCA_Supervised(num_chunks=20)\n msg = ('As of v0.5.0, `RCA_Supervised` now uses the '\n '`random_state` given at initialization to sample '\n 'constraints, not the default `np.random` from the `fit` '\n 'method, since this argument is now deprecated. '\n 'This warning will disappear in v0.6.0.')\n with pytest.warns(ChangedBehaviorWarning) as raised_warning:\n rca_supervised.fit(X, y)\n assert any(msg == str(wrn.message) for wrn in raised_warning)\n\n\[email protected]('num_dims', [None, 2])\ndef test_deprecation_num_dims_rca(num_dims):\n # test that a deprecation message is thrown if num_dims is set at\n # initialization\n # TODO: remove in v.0.6\n X, y = load_iris(return_X_y=True)\n rca = RCA(num_dims=num_dims)\n msg = ('\"num_dims\" parameter is not used.'\n ' It has been deprecated in version 0.5.0 and will be'\n ' removed in 0.6.0. Use \"n_components\" instead')\n with pytest.warns(DeprecationWarning) as raised_warning:\n rca.fit(X, y)\n assert any(str(w.message) == msg for w in raised_warning)\n\n # we take a small number of chunks so that RCA works on iris\n rca_supervised = RCA_Supervised(num_dims=num_dims, num_chunks=10)\n msg = ('\"num_dims\" parameter is not used.'\n ' It has been deprecated in version 0.5.0 and will be'\n ' removed in 0.6.0. Use \"n_components\" instead')\n with pytest.warns(DeprecationWarning) as raised_warning:\n rca_supervised.fit(X, y)\n assert any(str(w.message) == msg for w in raised_warning)\n\n\nclass TestMLKR(MetricTestCase):\n def test_iris(self):\n mlkr = MLKR()\n mlkr.fit(self.iris_points, self.iris_labels)\n csep = class_separation(mlkr.transform(self.iris_points), self.iris_labels)\n self.assertLess(csep, 0.25)\n\n def test_finite_differences(self):\n \"\"\"Test gradient of loss function\n\n Assert that the gradient is almost equal to its finite differences\n approximation.\n \"\"\"\n # Initialize the transformation `M`, as well as `X`, and `y` and `MLKR`\n X, y = make_regression(n_features=4, random_state=1, n_samples=20)\n X, y = check_X_y(X, y)\n M = np.random.randn(2, X.shape[1])\n mlkr = MLKR()\n mlkr.n_iter_ = 0\n\n def fun(M):\n return mlkr._loss(M, X, y)[0]\n\n def grad_fn(M):\n return mlkr._loss(M, X, y)[1].ravel()\n\n # compute relative error\n rel_diff = check_grad(fun, grad_fn, M.ravel()) / np.linalg.norm(grad_fn(M))\n np.testing.assert_almost_equal(rel_diff, 0.)\n\n def test_deprecation_A0(self):\n # test that a deprecation message is thrown if A0 is set at\n # initialization\n # TODO: remove in v.0.6\n X = np.array([[0, 0], [0, 1], [2, 0], [2, 1]])\n y = np.array([1, 0, 1, 0])\n mlkr = MLKR(A0=np.ones_like(X))\n msg = ('\"A0\" parameter is not used.'\n ' It has been deprecated in version 0.5.0 and will be'\n 'removed in 0.6.0. Use \"init\" instead.')\n with pytest.warns(DeprecationWarning) as raised_warning:\n mlkr.fit(X, y)\n assert any(msg == str(wrn.message) for wrn in raised_warning)\n\n def test_changed_behaviour_warning(self):\n # test that a ChangedBehavior warning is thrown about the init, if the\n # default parameters are used.\n # TODO: remove in v.0.6\n X = np.array([[0, 0], [0, 1], [2, 0], [2, 1]])\n y = np.array([0.1, 0.2, 0.3, 0.4])\n mlkr = MLKR()\n msg = (\"Warning, no init was set (`init=None`). As of version 0.5.0, \"\n \"the default init will now be set to 'auto', instead of 'pca'. \"\n \"If you still want to use PCA as an init, set init='pca'. \"\n \"This warning will disappear in v0.6.0, and `init` parameter's\"\n \" default value will be set to 'auto'.\")\n with pytest.warns(ChangedBehaviorWarning) as raised_warning:\n mlkr.fit(X, y)\n assert any(msg == str(wrn.message) for wrn in raised_warning)\n\n\[email protected]('num_dims', [None, 2])\ndef test_deprecation_num_dims_mlkr(num_dims):\n # test that a deprecation message is thrown if num_dims is set at\n # initialization\n # TODO: remove in v.0.6\n X = np.array([[0, 0], [0, 1], [2, 0], [2, 1]])\n y = np.array([1, 0, 1, 0])\n mlkr = MLKR(num_dims=num_dims)\n msg = ('\"num_dims\" parameter is not used.'\n ' It has been deprecated in version 0.5.0 and will be'\n ' removed in 0.6.0. Use \"n_components\" instead')\n with pytest.warns(DeprecationWarning) as raised_warning:\n mlkr.fit(X, y)\n assert (str(raised_warning[0].message) == msg)\n\n\nclass TestMMC(MetricTestCase):\n def test_iris(self):\n\n # Generate full set of constraints for comparison with reference\n # implementation\n mask = self.iris_labels[None] == self.iris_labels[:, None]\n a, b = np.nonzero(np.triu(mask, k=1))\n c, d = np.nonzero(np.triu(~mask, k=1))\n\n # Full metric\n n_features = self.iris_points.shape[1]\n mmc = MMC(convergence_threshold=0.01, init=np.eye(n_features) / 10)\n mmc.fit(*wrap_pairs(self.iris_points, [a, b, c, d]))\n expected = [[+0.000514, +0.000868, -0.001195, -0.001703],\n [+0.000868, +0.001468, -0.002021, -0.002879],\n [-0.001195, -0.002021, +0.002782, +0.003964],\n [-0.001703, -0.002879, +0.003964, +0.005648]]\n assert_array_almost_equal(expected, mmc.get_mahalanobis_matrix(),\n decimal=6)\n\n # Diagonal metric\n mmc = MMC(diagonal=True)\n mmc.fit(*wrap_pairs(self.iris_points, [a, b, c, d]))\n expected = [0, 0, 1.210220, 1.228596]\n assert_array_almost_equal(np.diag(expected), mmc.get_mahalanobis_matrix(),\n decimal=6)\n\n # Supervised Full\n mmc = MMC_Supervised()\n mmc.fit(self.iris_points, self.iris_labels)\n csep = class_separation(mmc.transform(self.iris_points), self.iris_labels)\n self.assertLess(csep, 0.15)\n\n # Supervised Diagonal\n mmc = MMC_Supervised(diagonal=True)\n mmc.fit(self.iris_points, self.iris_labels)\n csep = class_separation(mmc.transform(self.iris_points), self.iris_labels)\n self.assertLess(csep, 0.2)\n\n def test_deprecation_num_labeled(self):\n # test that a deprecation message is thrown if num_labeled is set at\n # initialization\n # TODO: remove in v.0.6\n X = np.array([[0, 0], [0, 1], [2, 0], [2, 1]])\n y = np.array([1, 0, 1, 0])\n mmc_supervised = MMC_Supervised(num_labeled=np.inf)\n msg = ('\"num_labeled\" parameter is not used.'\n ' It has been deprecated in version 0.5.0 and will be'\n ' removed in 0.6.0')\n assert_warns_message(DeprecationWarning, msg, mmc_supervised.fit, X, y)\n\n def test_deprecation_A0(self):\n # test that a deprecation message is thrown if A0 is set at\n # initialization\n # TODO: remove in v.0.6\n X = np.array([[0, 0], [0, 1], [2, 0], [2, 1]])\n y = np.array([1, 0, 1, 0])\n mmc_supervised = MMC_Supervised(A0=np.ones_like(X))\n msg = ('\"A0\" parameter is not used.'\n ' It has been deprecated in version 0.5.0 and will be'\n 'removed in 0.6.0. Use \"init\" instead.')\n with pytest.warns(DeprecationWarning) as raised_warning:\n mmc_supervised.fit(X, y)\n assert any(msg == str(wrn.message) for wrn in raised_warning)\n\n pairs = np.array([[[-10., 0.], [10., 0.]], [[0., 50.], [0., -60]]])\n y_pairs = [1, -1]\n mmc = MMC(A0=np.ones_like(X))\n with pytest.warns(DeprecationWarning) as raised_warning:\n mmc.fit(pairs, y_pairs)\n assert any(msg == str(wrn.message) for wrn in raised_warning)\n\n def test_changed_behaviour_warning(self):\n # test that a ChangedBehavior warning is thrown about the init, if the\n # default parameters are used.\n # TODO: remove in v.0.6\n X = np.array([[0, 0], [0, 1], [2, 0], [2, 1]])\n y = np.array([1, 0, 1, 0])\n mmc_supervised = MMC_Supervised()\n msg = (\"Warning, no init was set (`init=None`). As of version 0.5.0, \"\n \"the default init will now be set to 'identity', instead of the \"\n \"identity divided by a scaling factor of 10. \"\n \"If you still want to use the same init as in previous \"\n \"versions, set init=np.eye(d)/10, where d is the dimension \"\n \"of your input space (d=pairs.shape[1]). \"\n \"This warning will disappear in v0.6.0, and `init` parameter's\"\n \" default value will be set to 'auto'.\")\n with pytest.warns(ChangedBehaviorWarning) as raised_warning:\n mmc_supervised.fit(X, y)\n assert any(msg == str(wrn.message) for wrn in raised_warning)\n\n pairs = np.array([[[-10., 0.], [10., 0.]], [[0., 50.], [0., -60]]])\n y_pairs = [1, -1]\n mmc = MMC()\n with pytest.warns(ChangedBehaviorWarning) as raised_warning:\n mmc.fit(pairs, y_pairs)\n assert any(msg == str(wrn.message) for wrn in raised_warning)\n\n def test_deprecation_random_state(self):\n # test that a deprecation message is thrown if random_state is set at\n # fit time\n # TODO: remove in v.0.6\n X = np.array([[0, 0], [0, 1], [2, 0], [2, 1]])\n y = np.array([1, 0, 1, 0])\n mmc_supervised = MMC_Supervised()\n msg = ('\"random_state\" parameter in the `fit` function is '\n 'deprecated. Set `random_state` at initialization '\n 'instead (when instantiating a new `MMC_Supervised` '\n 'object).')\n with pytest.warns(DeprecationWarning) as raised_warning:\n mmc_supervised.fit(X, y, random_state=np.random)\n assert any(msg == str(wrn.message) for wrn in raised_warning)\n\n def test_changed_behaviour_warning_random_state(self):\n # test that a ChangedBehavior warning is thrown if the random_state is\n # not set in fit.\n # TODO: remove in v.0.6\n X = np.array([[0, 0], [0, 1], [2, 0], [2, 1]])\n y = np.array([1, 0, 1, 0])\n mmc_supervised = MMC_Supervised()\n msg = ('As of v0.5.0, `MMC_Supervised` now uses the '\n '`random_state` given at initialization to sample '\n 'constraints, not the default `np.random` from the `fit` '\n 'method, since this argument is now deprecated. '\n 'This warning will disappear in v0.6.0.')\n with pytest.warns(ChangedBehaviorWarning) as raised_warning:\n mmc_supervised.fit(X, y)\n assert any(msg == str(wrn.message) for wrn in raised_warning)\n\n\[email protected](('algo_class', 'dataset'),\n [(NCA, make_classification()),\n (MLKR, make_regression())])\ndef test_verbose(algo_class, dataset, capsys):\n # assert there is proper output when verbose = True\n X, y = dataset\n model = algo_class(verbose=True)\n model.fit(X, y)\n out, _ = capsys.readouterr()\n\n # check output\n lines = re.split('\\n+', out)\n header = '{:>10} {:>20} {:>10}'.format('Iteration', 'Objective Value',\n 'Time(s)')\n assert lines[0] == '[{}]'.format(algo_class.__name__)\n assert lines[1] == '[{}] {}'.format(algo_class.__name__, header)\n assert lines[2] == '[{}] {}'.format(algo_class.__name__, '-' * len(header))\n for line in lines[3:-2]:\n # The following regex will match for instance:\n # '[NCA] 0 6.988936e+01 0.01'\n assert re.match(r\"\\[\" + algo_class.__name__ + r\"\\]\\ *\\d+\\ *\\d\\.\\d{6}e[+|-]\"\n r\"\\d+\\ *\\d+\\.\\d{2}\", line)\n assert re.match(r\"\\[\" + algo_class.__name__ + r\"\\] Training took\\ *\"\n r\"\\d+\\.\\d{2}s\\.\", lines[-2])\n assert lines[-1] == ''\n\n\[email protected](('algo_class', 'dataset'),\n [(NCA, make_classification()),\n (MLKR, make_regression(n_features=10))])\ndef test_no_verbose(dataset, algo_class, capsys):\n # assert by default there is no output (verbose=False)\n X, y = dataset\n model = algo_class()\n model.fit(X, y)\n out, _ = capsys.readouterr()\n # check output\n assert (out == '')\n\n\[email protected](('algo_class', 'dataset'),\n [(NCA, make_classification()),\n (MLKR, make_regression(n_features=10))])\ndef test_convergence_warning(dataset, algo_class):\n X, y = dataset\n model = algo_class(max_iter=2, verbose=True)\n cls_name = model.__class__.__name__\n assert_warns_message(ConvergenceWarning,\n '[{}] {} did not converge'.format(cls_name, cls_name),\n model.fit, X, y)\n\n\nif __name__ == '__main__':\n unittest.main()\n"
] | [
[
"numpy.sum",
"numpy.diag",
"numpy.random.seed",
"numpy.ones_like",
"numpy.argsort",
"numpy.random.RandomState",
"sklearn.datasets.make_regression",
"numpy.fill_diagonal",
"numpy.cov",
"sklearn.datasets.load_iris",
"numpy.testing.assert_almost_equal",
"numpy.testing.assert_array_equal",
"numpy.where",
"numpy.nonzero",
"numpy.unique",
"numpy.eye",
"numpy.outer",
"sklearn.utils.testing.assert_warns_message",
"numpy.arange",
"numpy.finfo",
"numpy.triu",
"sklearn.datasets.make_spd_matrix",
"numpy.zeros_like",
"numpy.empty",
"numpy.random.randn",
"sklearn.datasets.make_classification",
"sklearn.metrics.euclidean_distances",
"numpy.testing.assert_allclose",
"numpy.array",
"numpy.dot",
"numpy.random.randint",
"sklearn.utils.validation.check_X_y",
"sklearn.metrics.pairwise_distances"
]
] |
WeatherGod/numpy | [
"5be45b280b258e158b93163b937f8f9c08d30393"
] | [
"numpy/ma/tests/test_regression.py"
] | [
"from __future__ import division, absolute_import, print_function\n\nimport numpy as np\nimport numpy.ma as ma\nfrom numpy.testing import *\nfrom numpy.compat import sixu\n\nrlevel = 1\n\nclass TestRegression(TestCase):\n def test_masked_array_create(self,level=rlevel):\n \"\"\"Ticket #17\"\"\"\n x = np.ma.masked_array([0,1,2,3,0,4,5,6],mask=[0,0,0,1,1,1,0,0])\n assert_array_equal(np.ma.nonzero(x),[[1,2,6,7]])\n\n def test_masked_array(self,level=rlevel):\n \"\"\"Ticket #61\"\"\"\n x = np.ma.array(1,mask=[1])\n\n def test_mem_masked_where(self,level=rlevel):\n \"\"\"Ticket #62\"\"\"\n from numpy.ma import masked_where, MaskType\n a = np.zeros((1,1))\n b = np.zeros(a.shape, MaskType)\n c = masked_where(b,a)\n a-c\n\n def test_masked_array_multiply(self,level=rlevel):\n \"\"\"Ticket #254\"\"\"\n a = np.ma.zeros((4,1))\n a[2,0] = np.ma.masked\n b = np.zeros((4,2))\n a*b\n b*a\n\n def test_masked_array_repeat(self, level=rlevel):\n \"\"\"Ticket #271\"\"\"\n np.ma.array([1],mask=False).repeat(10)\n\n def test_masked_array_repr_unicode(self):\n \"\"\"Ticket #1256\"\"\"\n repr(np.ma.array(sixu(\"Unicode\")))\n\n def test_atleast_2d(self):\n \"\"\"Ticket #1559\"\"\"\n a = np.ma.masked_array([0.0, 1.2, 3.5], mask=[False, True, False])\n b = np.atleast_2d(a)\n assert_(a.mask.ndim == 1)\n assert_(b.mask.ndim == 2)\n\n def test_set_fill_value_unicode_py3(self):\n \"\"\"Ticket #2733\"\"\"\n a = np.ma.masked_array(['a', 'b', 'c'], mask=[1, 0, 0])\n a.fill_value = 'X'\n assert_(a.fill_value == 'X')\n\n def test_var_sets_maskedarray_scalar(self):\n \"\"\"Issue gh-2757\"\"\"\n a = np.ma.array(np.arange(5), mask=True)\n mout = np.ma.array(-1, dtype=float)\n a.var(out=mout)\n assert_(mout._data == 0)\n\n\nif __name__ == \"__main__\":\n run_module_suite()\n"
] | [
[
"numpy.atleast_2d",
"numpy.ma.masked_array",
"numpy.ma.masked_where",
"numpy.zeros",
"numpy.ma.nonzero",
"numpy.ma.array",
"numpy.arange",
"numpy.compat.sixu",
"numpy.ma.zeros"
]
] |
zhanyinx/SPT_analysis | [
"1cf806c1fd6051e7fc998d2860a16bea6aa9de1a"
] | [
"source/spot_detection_tracking/trackmate_xml_2d.py"
] | [
"\"\"\"\\U0001F1EB\\U0001F1EF \\U00002B50 CSV track coordinate to TrackMate XML conversion.\nFiji allows for quick and easy viewing of images. TrackMate can be used to view tracks.\nUnfortunately, it isn't that simple to convert \"normal\" coordinate output into\nTrackMate-viewable format.\nRequires a \"tracks.csv\" file that contains the following columns:\n- x, y: Coordinate positions in x-/y-axis\n- particle: Unique ID assigned to all coordinates along one track\n- frame: Current point in time / frame\n\"\"\"\n\nimport argparse\nimport os\nimport tempfile\nimport xml.dom.minidom\nimport xml.etree.ElementTree as ET\n\nimport numpy as np\nimport pandas as pd\nimport skimage.io\n\n\ndef get_gaps(frames):\n def __longest_consecutive(a):\n \"\"\"Return length of longest consecutive range in list of integers.\"\"\"\n a = set(a)\n longest = 0\n for i in a:\n if i - 1 not in a:\n streak = 0\n while i in a:\n i += 1\n streak += 1\n longest = max(longest, streak)\n return longest\n\n full_length = np.arange(min(frames), max(frames))\n diff = np.setdiff1d(full_length, frames)\n longest = __longest_consecutive(diff)\n total = len(diff)\n return str(longest), str(total), str(len(full_length))\n\n\ndef __create_model(root, spatialunits: str = \"pixel\", timeunits: str = \"sec\"):\n dict_spotfeatures = [\n {\n \"feature\": \"QUALITY\",\n \"name\": \"Quality\",\n \"shortname\": \"Quality\",\n \"dimension\": \"QUALITY\",\n \"isint\": \"false\",\n },\n {\n \"feature\": \"POSITION_X\",\n \"name\": \"X\",\n \"shortname\": \"X\",\n \"dimension\": \"POSITION\",\n \"isint\": \"false\",\n },\n {\n \"feature\": \"POSITION_Y\",\n \"name\": \"Y\",\n \"shortname\": \"Y\",\n \"dimension\": \"POSITION\",\n \"isint\": \"false\",\n },\n {\n \"feature\": \"POSITION_Z\",\n \"name\": \"Z\",\n \"shortname\": \"Z\",\n \"dimension\": \"POSITION\",\n \"isint\": \"false\",\n },\n {\n \"feature\": \"POSITION_T\",\n \"name\": \"T\",\n \"shortname\": \"T\",\n \"dimension\": \"TIME\",\n \"isint\": \"false\",\n },\n {\n \"feature\": \"FRAME\",\n \"name\": \"Frame\",\n \"shortname\": \"Frame\",\n \"dimension\": \"NONE\",\n \"isint\": \"true\",\n },\n {\n \"feature\": \"RADIUS\",\n \"name\": \"Radius\",\n \"shortname\": \"R\",\n \"dimension\": \"LENGTH\",\n \"isint\": \"false\",\n },\n {\n \"feature\": \"VISIBILITY\",\n \"name\": \"Visibility\",\n \"shortname\": \"Visibility\",\n \"dimension\": \"NONE\",\n \"isint\": \"true\",\n },\n {\n \"feature\": \"MANUAL_INTEGER_SPOT_FEATURE\",\n \"name\": \"Custom Integer Spot Feature\",\n \"shortname\": \"Integer Spot Feature\",\n \"dimension\": \"NONE\",\n \"isint\": \"true\",\n },\n {\n \"feature\": \"MANUAL_DOUBLE_SPOT_FEATURE\",\n \"name\": \"Custom Double Spot Feature\",\n \"shortname\": \"Double Spot Feature\",\n \"dimension\": \"NONE\",\n \"isint\": \"false\",\n },\n {\n \"feature\": \"HAS_MAX_QUALITY_IN_FRAME\",\n \"name\": \"Has max quality\",\n \"shortname\": \"Max Quality\",\n \"dimension\": \"NONE\",\n \"isint\": \"true\",\n },\n {\n \"feature\": \"MANUAL_COLOR\",\n \"name\": \"Manual spot color\",\n \"shortname\": \"Spot color\",\n \"dimension\": \"NONE\",\n \"isint\": \"true\",\n },\n {\n \"feature\": \"MEAN_INTENSITY\",\n \"name\": \"Mean intensity\",\n \"shortname\": \"Mean\",\n \"dimension\": \"INTENSITY\",\n \"isint\": \"false\",\n },\n {\n \"feature\": \"MEDIAN_INTENSITY\",\n \"name\": \"Median intensity\",\n \"shortname\": \"Median\",\n \"dimension\": \"INTENSITY\",\n \"isint\": \"false\",\n },\n {\n \"feature\": \"MIN_INTENSITY\",\n \"name\": \"Minimal intensity\",\n \"shortname\": \"Min\",\n \"dimension\": \"INTENSITY\",\n \"isint\": \"false\",\n },\n {\n \"feature\": \"MAX_INTENSITY\",\n \"name\": \"Maximal intensity\",\n \"shortname\": \"Max\",\n \"dimension\": \"INTENSITY\",\n \"isint\": \"false\",\n },\n {\n \"feature\": \"TOTAL_INTENSITY\",\n \"name\": \"Total intensity\",\n \"shortname\": \"Total int.\",\n \"dimension\": \"INTENSITY\",\n \"isint\": \"false\",\n },\n {\n \"feature\": \"STANDARD_DEVIATION\",\n \"name\": \"Standard deviation\",\n \"shortname\": \"Stdev.\",\n \"dimension\": \"INTENSITY\",\n \"isint\": \"false\",\n },\n {\n \"feature\": \"ESTIMATED_DIAMETER\",\n \"name\": \"Estimated diameter\",\n \"shortname\": \"Diam.\",\n \"dimension\": \"LENGTH\",\n \"isint\": \"false\",\n },\n {\n \"feature\": \"CONTRAST\",\n \"name\": \"Contrast\",\n \"shortname\": \"Constrast\",\n \"dimension\": \"NONE\",\n \"isint\": \"false\",\n },\n {\n \"feature\": \"SNR\",\n \"name\": \"Signal/Noise, ratio\",\n \"shortname\": \"SNR\",\n \"dimension\": \"NONE\",\n \"isint\": \"false\",\n },\n ]\n\n dict_edgefeatures = [\n {\n \"feature\": \"SPOT_SOURCE_ID\",\n \"name\": \"Source spot ID\",\n \"shortname\": \"Source ID\",\n \"dimension\": \"NONE\",\n \"isint\": \"true\",\n },\n {\n \"feature\": \"SPOT_TARGET_ID\",\n \"name\": \"Target spot ID\",\n \"shortname\": \"Target ID\",\n \"dimension\": \"NONE\",\n \"isint\": \"true\",\n },\n {\n \"feature\": \"LINK_COST\",\n \"name\": \"Link cost\",\n \"shortname\": \"Cost\",\n \"dimension\": \"NONE\",\n \"isint\": \"false\",\n },\n {\n \"feature\": \"EDGE_TIME\",\n \"name\": \"Time (mean)\",\n \"shortname\": \"T\",\n \"dimension\": \"TIME\",\n \"isint\": \"false\",\n },\n {\n \"feature\": \"EDGE_X_LOCATION\",\n \"name\": \"X Location (mean)\",\n \"shortname\": \"X\",\n \"dimension\": \"POSITION\",\n \"isint\": \"false\",\n },\n {\n \"feature\": \"EDGE_Y_LOCATION\",\n \"name\": \"Y Location (mean)\",\n \"shortname\": \"Y\",\n \"dimension\": \"POSITION\",\n \"isint\": \"false\",\n },\n {\n \"feature\": \"EDGE_Z_LOCATION\",\n \"name\": \"Z Location (mean)\",\n \"shortname\": \"Z\",\n \"dimension\": \"POSITION\",\n \"isint\": \"false\",\n },\n {\n \"feature\": \"VELOCITY\",\n \"name\": \"Velocity\",\n \"shortname\": \"V\",\n \"dimension\": \"VELOCITY\",\n \"isint\": \"false\",\n },\n {\n \"feature\": \"DISPLACEMENT\",\n \"name\": \"Displacement\",\n \"shortname\": \"D\",\n \"dimension\": \"LENGTH\",\n \"isint\": \"false\",\n },\n {\n \"feature\": \"MANUAL_COLOR\",\n \"name\": \"Manual edge color\",\n \"shortname\": \"Edge color\",\n \"dimension\": \"NONE\",\n \"isint\": \"true\",\n },\n ]\n\n dict_trackfeatures = [\n {\n \"feature\": \"MANUAL_INTEGER_TRACK_FEATURE\",\n \"name\": \"Custom Integer Track Feature\",\n \"shortname\": \"Integer Track Feature\",\n \"dimension\": \"NONE\",\n \"isint\": \"true\",\n },\n {\n \"feature\": \"MANUAL_DOUBLE_TRACK_FEATURE\",\n \"name\": \"Custom Double Track Feature\",\n \"shortname\": \"Double Track Feature\",\n \"dimension\": \"NONE\",\n \"isint\": \"false\",\n },\n {\n \"feature\": \"NUMBER_SPOTS\",\n \"name\": \"Number of spots in track\",\n \"shortname\": \"N spots\",\n \"dimension\": \"NONE\",\n \"isint\": \"true\",\n },\n {\n \"feature\": \"NUMBER_GAPS\",\n \"name\": \"Number of gaps\",\n \"shortname\": \"Gaps\",\n \"dimension\": \"NONE\",\n \"isint\": \"true\",\n },\n {\n \"feature\": \"LONGEST_GAP\",\n \"name\": \"Longest gap\",\n \"shortname\": \"Longest gap\",\n \"dimension\": \"NONE\",\n \"isint\": \"true\",\n },\n {\n \"feature\": \"NUMBER_SPLITS\",\n \"name\": \"Number of split events\",\n \"shortname\": \"Splits\",\n \"dimension\": \"NONE\",\n \"isint\": \"true\",\n },\n {\n \"feature\": \"NUMBER_MERGES\",\n \"name\": \"Number of merge events\",\n \"shortname\": \"Merges\",\n \"dimension\": \"NONE\",\n \"isint\": \"true\",\n },\n {\n \"feature\": \"NUMBER_COMPLEX\",\n \"name\": \"Complex points\",\n \"shortname\": \"Complex\",\n \"dimension\": \"NONE\",\n \"isint\": \"true\",\n },\n {\n \"feature\": \"TRACK_DURATION\",\n \"name\": \"Duration of track\",\n \"shortname\": \"Duration\",\n \"dimension\": \"TIME\",\n \"isint\": \"false\",\n },\n {\n \"feature\": \"TRACK_START\",\n \"name\": \"Track start\",\n \"shortname\": \"T start\",\n \"dimension\": \"TIME\",\n \"isint\": \"false\",\n },\n {\n \"feature\": \"TRACK_STOP\",\n \"name\": \"Track stop\",\n \"shortname\": \"T stop\",\n \"dimension\": \"TIME\",\n \"isint\": \"false\",\n },\n {\n \"feature\": \"TRACK_DISPLACEMENT\",\n \"name\": \"Track displacement\",\n \"shortname\": \"Displacement\",\n \"dimension\": \"LENGTH\",\n \"isint\": \"false\",\n },\n {\n \"feature\": \"TRACK_INDEX\",\n \"name\": \"Track index\",\n \"shortname\": \"Index\",\n \"dimension\": \"NONE\",\n \"isint\": \"true\",\n },\n {\n \"feature\": \"TRACK_ID\",\n \"name\": \"Track ID\",\n \"shortname\": \"ID\",\n \"dimension\": \"NONE\",\n \"isint\": \"true\",\n },\n {\n \"feature\": \"TRACK_X_LOCATION\",\n \"name\": \"X Location (mean)\",\n \"shortname\": \"X\",\n \"dimension\": \"POSITION\",\n \"isint\": \"false\",\n },\n {\n \"feature\": \"TRACK_Y_LOCATION\",\n \"name\": \"Y Location (mean)\",\n \"shortname\": \"Y\",\n \"dimension\": \"POSITION\",\n \"isint\": \"false\",\n },\n {\n \"feature\": \"TRACK_Z_LOCATION\",\n \"name\": \"Z Location (mean)\",\n \"shortname\": \"Z\",\n \"dimension\": \"POSITION\",\n \"isint\": \"false\",\n },\n {\n \"feature\": \"TRACK_MEAN_SPEED\",\n \"name\": \"Mean velocity\",\n \"shortname\": \"Mean V\",\n \"dimension\": \"VELOCITY\",\n \"isint\": \"false\",\n },\n {\n \"feature\": \"TRACK_MAX_SPEED\",\n \"name\": \"Maximal velocity\",\n \"shortname\": \"Max V\",\n \"dimension\": \"VELOCITY\",\n \"isint\": \"false\",\n },\n {\n \"feature\": \"TRACK_MIN_SPEED\",\n \"name\": \"Minimal velocity\",\n \"shortname\": \"Min V\",\n \"dimension\": \"VELOCITY\",\n \"isint\": \"false\",\n },\n {\n \"feature\": \"TRACK_MEDIAN_SPEED\",\n \"name\": \"Median velocity\",\n \"shortname\": \"Median V\",\n \"dimension\": \"VELOCITY\",\n \"isint\": \"false\",\n },\n {\n \"feature\": \"TRACK_STD_SPEED\",\n \"name\": \"Velocity standard deviation\",\n \"shortname\": \"V std\",\n \"dimension\": \"VELOCITY\",\n \"isint\": \"false\",\n },\n {\n \"feature\": \"TRACK_MEAN_QUALITY\",\n \"name\": \"Mean quality\",\n \"shortname\": \"Mean Q\",\n \"dimension\": \"QUALITY\",\n \"isint\": \"false\",\n },\n {\n \"feature\": \"TRACK_MAX_QUALITY\",\n \"name\": \"Maximal quality\",\n \"shortname\": \"Max Q\",\n \"dimension\": \"QUALITY\",\n \"isint\": \"false\",\n },\n {\n \"feature\": \"TRACK_MIN_QUALITY\",\n \"name\": \"Minimal quality\",\n \"shortname\": \"Min Q\",\n \"dimension\": \"QUALITY\",\n \"isint\": \"false\",\n },\n {\n \"feature\": \"TRACK_MEDIAN_QUALITY\",\n \"name\": \"Median quality\",\n \"shortname\": \"Median Q\",\n \"dimension\": \"QUALITY\",\n \"isint\": \"false\",\n },\n {\n \"feature\": \"TRACK_STD_QUALITY\",\n \"name\": \"Quality standard deviation\",\n \"shortname\": \"Q std\",\n \"dimension\": \"QUALITY\",\n \"isint\": \"false\",\n },\n ]\n # Model\n model = ET.SubElement(root, \"Model\", spatialunits=spatialunits, timeunits=timeunits)\n featuredeclarations = ET.SubElement(model, \"FeatureDeclarations\")\n\n # SpotFeatures\n spotfeatures = ET.SubElement(featuredeclarations, \"SpotFeatures\")\n for dct in dict_spotfeatures:\n _ = ET.SubElement(spotfeatures, \"Feature\", **dct)\n\n # Edgefeatures\n edgefeatures = ET.SubElement(featuredeclarations, \"EdgeFeatures\")\n for dct in dict_edgefeatures:\n _ = ET.SubElement(edgefeatures, \"Feature\", **dct)\n\n # TrackFeatures\n trackfeatures = ET.SubElement(featuredeclarations, \"TrackFeatures\")\n for dct in dict_trackfeatures:\n _ = ET.SubElement(trackfeatures, \"Feature\", **dct)\n\n return model\n\n\ndef __create_allspots(model, df):\n # List of all spots (without tracks)\n allspots = ET.SubElement(model, \"AllSpots\", nspots=str(len(df)))\n spotid = 0\n for frame in df[\"slice\"].unique():\n frame_id = str(float(frame))\n df_frame = df[df[\"slice\"] == frame]\n spotsinframe = ET.SubElement(allspots, \"SpotsInFrame\", frame=str(frame))\n for row in df_frame.iterrows():\n try:\n size = str(row[1][\"size\"] * 2)\n except KeyError:\n size = \"1.0\"\n dict_spot = {\n \"ID\": f\"{spotid:06}\",\n \"name\": f\"ID{spotid:06}\",\n \"QUALITY\": \"1.0\",\n \"POSITION_T\": frame_id,\n \"MAX_INTENSITY\": \"1.0\",\n \"FRAME\": frame_id,\n \"MEDIAN_INTENSITY\": \"1.0\",\n \"VISIBILITY\": \"1\",\n \"MEAN_INTENSITY\": \"1.0\",\n \"TOTAL_INTENSITY\": \"1.0\",\n \"ESTIMATED_DIAMETER\": size,\n \"RADIUS\": \"1.0\",\n \"SNR\": \"1.0\",\n \"POSITION_X\": str(row[1][\"x\"]),\n \"POSITION_Y\": str(row[1][\"y\"]),\n \"STANDARD_DEVIATION\": \"1.0\",\n \"CONTRAST\": \"1.0\",\n \"MANUAL_COLOR\": \"-10921639\",\n \"MIN_INTENSITY\": \"0.0\",\n \"POSITION_Z\": \"1\",\n }\n _ = ET.SubElement(spotsinframe, \"Spot\", **dict_spot)\n spotid = spotid + 1\n\n\ndef __create_alltracks(model, df):\n # List of all tracks\n alltracks = ET.SubElement(model, \"AllTracks\")\n\n\n# for particle in df[\"particle\"].unique():\n# df_track = df[df[\"particle\"] == particle]\n# track_ids = list(df_track.index)\n# frames = np.array(df_track[\"slice\"])\n# longest, total, duration = get_gaps(frames)\n# dict_track = {\n# \"name\": f\"Track_{particle}\",\n# \"TRACK_ID\": str(particle),\n# \"NUMBER_SPOTS\": str(len(frames)),\n# \"NUMBER_GAPS\": longest,\n# \"LONGEST_GAP\": total,\n# \"NUMBER_SPLITS\": \"0\",\n# \"NUMBER_MERGES\": \"0\",\n# \"NUMBER_COMPLEX\": \"0\",\n# \"TRACK_DURATION\": duration,\n# \"TRACK_START\": str(min(frames)),\n# \"TRACK_STOP\": str(max(frames)),\n# \"TRACK_DISPLACEMENT\": \"0.01\",\n# \"TRACK_INDEX\": str(particle),\n# \"TRACK_X_LOCATION\": str(df_track[\"x\"].mean()),\n# \"TRACK_Y_LOCATION\": str(df_track[\"y\"].mean()),\n# \"TRACK_Z_LOCATION\": \"0.1\",\n# \"TRACK_MEAN_SPEED\": \"0.1\",\n# \"TRACK_MAX_SPEED\": \"0.1\",\n# \"TRACK_MIN_SPEED\": \"0.1\",\n# \"TRACK_MEDIAN_SPEED\": \"0.1\",\n# \"TRACK_STD_SPEED\": \"0.1\",\n# \"TRACK_MEAN_QUALITY\": \"0.1\",\n# \"TRACK_MAX_QUALITY\": \"0.1\",\n# \"TRACK_MIN_QUALITY\": \"0.1\",\n# \"TRACK_MEDIAN_QUALITY\": \"0.1\",\n# \"TRACK_STD_QUALITY\": \"0.1\",\n# }\n# track = ET.SubElement(alltracks, \"Track\", **dict_track)\n\n# # Add all spots in the corresponding track\n# for row in df_track.iterrows():\n# dict_edge = {\n# \"SPOT_SOURCE_ID\": f\"{row[0]:06}\",\n# \"SPOT_TARGET_ID\": f\"{track_ids[track_ids.index(row[0]) - 1]:06}\",\n# \"LINK_COST\": \"0.1\",\n# \"EDGE_TIME\": \"0.1\",\n# \"EDGE_X_LOCATION\": str(row[1][\"x\"]),\n# \"EDGE_Y_LOCATION\": str(row[1][\"y\"]),\n# \"EDGE_Z_LOCATION\": \"0.0\",\n# \"VELOCITY\": \"0.1\",\n# \"DISPLACEMENT\": \"0.1\",\n# }\n# _ = ET.SubElement(track, \"Edge\", **dict_edge)\n\n\ndef __create_filteredtracks(model, df):\n # Tracks after TrackMate's filtering\n filteredtracks = ET.SubElement(model, \"FilteredTracks\")\n\n\n# for particle in df[\"particle\"].unique():\n# _ = ET.SubElement(filteredtracks, \"TrackID\", TRACK_ID=str(particle))\n\n\ndef __create_settings(\n root,\n file_image,\n pixelwidth: str = \"1.0\",\n pixelheight: str = \"1.0\",\n voxeldepth: str = \"1.0\",\n timeinterval: str = \"1.0\",\n):\n # Image metadata\n path, fname = os.path.split(file_image)\n image = skimage.io.imread(file_image)\n if len(image.shape) == 2:\n Warning(\n f\"Found image with shape = 2; assuming it's 3d data with a single time point.\"\n )\n image = np.expand_dims(image, axis=0)\n frames, width, height = image.shape\n imagedata = {\n \"filename\": fname,\n \"folder\": path,\n \"width\": str(width),\n \"height\": str(height),\n \"nslices\": \"1\",\n \"nframes\": str(frames),\n \"pixelwidth\": pixelwidth,\n \"pixelheight\": pixelheight,\n \"voxeldepth\": voxeldepth,\n \"timeinterval\": timeinterval,\n }\n basicsettings = {\n \"xstart\": \"0\",\n \"xend\": str(width - 1),\n \"ystart\": \"0\",\n \"yend\": str(height - 1),\n \"zstart\": \"0\",\n \"zend\": \"0\",\n \"tstart\": \"0\",\n \"tend\": str(frames - 1),\n }\n detectorsettings = {\n \"DETECTOR_NAME\": \"LOG_DETECTOR\",\n \"TARGET_CHANNEL\": \"1\",\n \"RADIUS\": \"5.0\",\n \"THRESHOLD\": \"1000.0\",\n \"DO_MEDIAN_FILTERING\": \"false\",\n \"DO_SUBPIXEL_LOCALIZATION\": \"true\",\n }\n initialspotfilter = {\"feature\": \"QUALITY\", \"value\": \"0.0\", \"isabove\": \"true\"}\n dict_trackersettings = {\n \"TRACKER_NAME\": \"SPARSE_LAP_TRACKER\",\n \"CUTOFF_PERCENTILE\": \"0.9\",\n \"ALTERNATIVE_LINKING_COST_FACTOR\": \"1.05\",\n \"BLOCKING_VALUE\": \"Infinity\",\n }\n dict_subtrackersettings = {\n \"Linking\": {\"LINKING_MAX_DISTANCE\": \"0.8\"},\n \"GapClosing\": {\n \"ALLOW_GAP_CLOSING\": \"false\",\n \"GAP_CLOSING_MAX_DISTANCE\": \"0.5\",\n \"MAX_FRAME_GAP\": \"3\",\n },\n \"TrackSplitting\": {\n \"ALLOW_TRACK_SPLITTING\": \"false\",\n \"SPLITTING_MAX_DISTANCE\": \"15.0\",\n },\n \"TrackMerging\": {\n \"ALLOW_TRACK_MERGING\": \"false\",\n \"MERGING_MAX_DISTANCE\": \"15.0\",\n },\n }\n dict_analyzercollection = {\n \"SpotAnalyzers\": [\n \"MANUAL_SPOT_COLOR_ANALYZER\",\n \"Spot descriptive statistics\",\n \"Spot radius estimator\",\n \"Spot contrast and SNR\",\n ],\n \"EdgeAnalyzers\": [\n \"Edge target\",\n \"Edge mean location\",\n \"Edge velocity\",\n \"MANUAL_EDGE_COLOR_ANALYZER\",\n ],\n \"TrackAnalyzers\": [\n \"Branching analyzer\",\n \"Track duration\",\n \"Track index\",\n \"Track location\",\n \"Velocity\",\n \"TRACK_SPOT_QUALITY\",\n ],\n }\n\n # General Settings\n settings = ET.SubElement(root, \"Settings\")\n _ = ET.SubElement(settings, \"ImageData\", **imagedata)\n _ = ET.SubElement(settings, \"BasicSettings\", **basicsettings)\n _ = ET.SubElement(settings, \"DetectorSettings\", **detectorsettings)\n _ = ET.SubElement(settings, \"InitialSpotFilter\", **initialspotfilter)\n _ = ET.SubElement(settings, \"SpotFilterCollection\")\n\n # Tracker settings\n trackersettings = ET.SubElement(settings, \"TrackerSettings\", **dict_trackersettings)\n for k, v in dict_subtrackersettings.items():\n subelement = ET.SubElement(trackersettings, k, **v)\n _ = ET.SubElement(subelement, \"FeaturePenalties\")\n\n # Filter settings\n _ = ET.SubElement(settings, \"TrackFilterCollection\")\n analyzercollection = ET.SubElement(settings, \"AnalyzerCollection\")\n for k, v in dict_analyzercollection.items():\n subanalyzer = ET.SubElement(analyzercollection, k)\n for lst in v:\n _ = ET.SubElement(subanalyzer, \"Analyzer\", key=lst)\n\n\ndef __create_guistate(root):\n # TrackMate's GUI settings\n guistate = ET.SubElement(root, \"GUIState\", state=\"InitialFiltering\")\n for _ in range(4):\n _ = ET.SubElement(guistate, \"View\", key=\"HYPERSTACKDISPLAYER\")\n\n\ndef __pretty_output(root, file_output):\n # Save file after fancy formatting to prettify\n with tempfile.TemporaryDirectory() as tempdirname:\n fname = os.path.join(tempdirname, \"file.xml\")\n tree = ET.ElementTree(root)\n tree.write(fname, encoding=\"UTF-8\", xml_declaration=True)\n dom = xml.dom.minidom.parse(fname)\n pretty_xml = dom.toprettyxml()\n\n with open(file_output, \"w\") as f:\n f.write(pretty_xml)\n\n\ndef create_trackmate_xml(\n spots_df,\n file_image,\n file_output,\n spatialunits: str = \"pixel\",\n timeunits: str = \"sec\",\n pixelwidth: int = 1,\n pixelheight: int = 1,\n voxeldepth: int = 1,\n timeinterval: int = 1,\n):\n # Check required track df columns\n df = spots_df\n df[\"x\"] = df[\"x\"] * pixelwidth\n df[\"y\"] = df[\"y\"] * pixelheight\n df[\"z\"] = 1.0\n\n df.to_csv(file_output.replace(\"xml\", \"csv\"))\n\n req_cols = [\"x\", \"y\", \"slice\"]\n if not all(req in df.columns for req in req_cols):\n raise ValueError(f\"Not all required columns present! {req_cols} must exist.\")\n\n # XML tree\n root = ET.Element(\"TrackMate\", version=\"6.0.1\")\n\n # Model\n model = __create_model(root, spatialunits=spatialunits, timeunits=timeunits)\n __create_allspots(model, df)\n __create_alltracks(model, df)\n __create_filteredtracks(model, df)\n\n # Settings\n __create_settings(\n root,\n file_image,\n pixelwidth=str(pixelwidth),\n pixelheight=str(pixelheight),\n voxeldepth=str(voxeldepth),\n timeinterval=str(timeinterval),\n )\n __create_guistate(root)\n\n # Save output\n __pretty_output(root, file_output)\n"
] | [
[
"numpy.setdiff1d",
"numpy.expand_dims"
]
] |
PatrykNeubauer/NeMo | [
"3ada744b884dba5f233f22c6991fc6092c6ca8d0"
] | [
"scripts/asr_language_modeling/neural_rescorer/eval_neural_rescorer.py"
] | [
"# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nThis script would evaluate a neural language model (Transformer) trained with\n`examples/nlp/language_modeling/transformer_lm.py' as a rescorer for ASR systems.\nGiven a trained TransformerLMModel `.nemo` file, this script can be used to re-score the beams obtained from a beam\nsearch decoder of an ASR model.\n\nUSAGE:\n1. Obtain `.tsv` file with beams and their corresponding scores. Scores can be from a regular beam search decoder or\n in fusion with an N-gram LM scores. For a given beam size `beam_size` and a number of examples\n for evaluation `num_eval_examples`, it should contain (`beam_size` x `num_eval_examples`) lines of\n form `beam_candidate_text \\t score`. This file can be generated by `scripts/asr_language_modeling/ngram_lm/eval_beamsearch_ngram.py`.\n\n2. Rescore the candidates:\n python eval_neural_rescorer.py\n --lm_model=[path to .nemo file of the LM]\n --beams_file=[path to beams .tsv file]\n --beam_size=[size of the beams]\n --eval_manifest=[path to eval manifest .json file]\n --batch_size=[batch size used for inference on the LM model]\n --alpha=[the value for the parameter rescorer_alpha]\n --beta=[the value for the parameter rescorer_beta]\n\nYou may find more info on how to use this script at:\nhttps://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/main/asr/asr_language_modeling.html\n\n\"\"\"\n\nimport contextlib\nimport json\nfrom argparse import ArgumentParser\n\nimport editdistance\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nimport torch\nimport tqdm\n\nfrom nemo.collections.nlp.models.language_modeling import TransformerLMModel\nfrom nemo.utils import logging\n\n\nclass BeamScoresDataset(torch.utils.data.Dataset):\n \"\"\"\n Dataset to read the score file containing the beams and their score\n\n Args:\n data_path: path to the beams file\n tokenizer: tokenizer of the LM model\n manifest_path: manifest `.json` file which contains the ground truths transcripts\n beam_size: the number of beams per sample\n max_seq_length: the maximum length of sequences\n \"\"\"\n\n def __init__(self, data_path, tokenizer, manifest_path, beam_size=128, max_seq_length=256):\n self.data = pd.read_csv(data_path, delimiter=\"\\t\", header=None)\n self.tokenizer = tokenizer\n self.ground_truths = []\n with open(manifest_path, 'r') as f_orig:\n for line in f_orig:\n item = json.loads(line)\n self.ground_truths.append(item['text'])\n self.beam_size = beam_size\n self.max_seq_length = max_seq_length\n\n def __len__(self):\n return len(self.data)\n\n def __getitem__(self, idx):\n text = str(self.data[0][idx])\n tokens = [self.tokenizer.bos_id] + self.tokenizer.text_to_ids(text) + [self.tokenizer.eos_id]\n input_ids = [self.tokenizer.pad_id] * self.max_seq_length\n input_ids[: len(tokens)] = tokens\n input_ids = np.array(input_ids)\n input_mask = (input_ids != self.tokenizer.pad_id).astype(np.float32)\n acoustic_score = self.data[1][idx]\n dist = editdistance.eval(text.split(), self.ground_truths[idx // self.beam_size].split())\n ref_len = len(self.ground_truths[idx // self.beam_size].split())\n len_in_chars = len(str(self.data[0][idx]))\n return input_ids, input_mask, acoustic_score, dist, ref_len, len_in_chars, idx\n\n\ndef linear_search_wer(\n dists, scores1, scores2, total_len, coef_range=[0, 10], coef_steps=10000, param_name='parameter'\n):\n \"\"\"\n performs linear search to find the best coefficient when two set of scores are getting linearly fused.\n\n Args:\n dists: Tesnor of the distances between the ground truth and the candidates with shape of [number of samples, beam size]\n scores1: Tensor of the first set of scores with shape of [number of samples, beam size]\n scores2: Tensor of the second set of scores with shape of [number of samples, beam size]\n total_len: The total length of all samples\n coef_range: the search range for the coefficient\n coef_steps: the number of steps that the search range would get divided into\n param_name: the name of the parameter to be used in the figure\n\n Output:\n (best coefficient found, best WER achived)\n \"\"\"\n scale = scores1.mean().abs().item() / scores2.mean().abs().item()\n left = coef_range[0] * scale\n right = coef_range[1] * scale\n coefs = np.linspace(left, right, coef_steps)\n\n best_wer = 10000\n best_coef = left\n wers = []\n for coef in coefs:\n scores = scores1 + coef * scores2\n wer = compute_wer(dists, scores, total_len)\n wers.append(wer)\n if wer < best_wer:\n best_wer = wer\n best_coef = coef\n\n plt.plot(coefs, wers)\n plt.title(f'WER% after rescoring with different values of {param_name}')\n plt.ylabel('WER%')\n plt.xlabel(param_name)\n plt.show()\n return best_coef, best_wer\n\n\ndef compute_wer(dists, scores, total_len):\n \"\"\"\n Sorts the candidates based on the scores and calculates the WER with the new top candidates.\n\n Args:\n dists: Tensor of the distances between the ground truth and the candidates with shape of [number of samples, beam size]\n scores: Tensor of the scores for candidates with shape of [number of samples, beam size]\n total_len: The total length of all samples\n\n Output:\n WER with the new scores\n \"\"\"\n indices = scores.max(dim=1, keepdim=True)[1]\n wer = dists.gather(dim=1, index=indices).sum() / total_len\n wer = wer.item()\n return wer\n\n\ndef main():\n parser = ArgumentParser()\n parser.add_argument(\"--lm_model_file\", type=str, required=True, help=\"path to LM model .nemo file\")\n parser.add_argument(\"--beams_file\", type=str, required=True, help=\"path to beams .tsv file\")\n parser.add_argument(\n \"--eval_manifest\", type=str, required=True, help=\"path to the evaluation `.json` manifest file\"\n )\n parser.add_argument(\"--beam_size\", type=int, required=True, help=\"number of beams per candidate\")\n parser.add_argument(\"--batch_size\", type=int, default=256, help=\"inference batch size\")\n parser.add_argument(\"--alpha\", type=float, default=None, help=\"parameter alpha of the fusion\")\n parser.add_argument(\"--beta\", type=float, default=None, help=\"parameter beta of the fusion\")\n parser.add_argument(\n \"--scores_output_file\", default=None, type=str, help=\"The optional path to store the rescored beams\"\n )\n parser.add_argument(\n \"--device\", default=\"cuda\", type=str, help=\"The device to load the model onto to calculate the scores\"\n )\n parser.add_argument(\n \"--use_amp\", action=\"store_true\", help=\"Whether to use AMP if available to calculate the scores\"\n )\n args = parser.parse_args()\n\n device = args.device\n if device.startswith(\"cuda\") and not torch.cuda.is_available():\n logging.info(f\"cuda is not available! switched to cpu.\")\n device = \"cpu\"\n\n if args.lm_model_file.endswith(\".nemo\"):\n logging.info(\"Attempting to initialize from .nemo file\")\n model = TransformerLMModel.restore_from(\n restore_path=args.lm_model_file, map_location=torch.device(device)\n ).eval()\n else:\n raise NotImplementedError(f\"Only supports .nemo files, but got: {args.model}\")\n\n max_seq_length = model.encoder._embedding.position_embedding.pos_enc.shape[0]\n dataset = BeamScoresDataset(args.beams_file, model.tokenizer, args.eval_manifest, args.beam_size, max_seq_length)\n data_loader = torch.utils.data.DataLoader(dataset=dataset, batch_size=args.batch_size)\n\n if args.use_amp:\n if torch.cuda.is_available() and hasattr(torch.cuda, 'amp') and hasattr(torch.cuda.amp, 'autocast'):\n logging.info(\"AMP is enabled!\\n\")\n autocast = torch.cuda.amp.autocast\n else:\n\n @contextlib.contextmanager\n def autocast():\n yield\n\n logging.info(f\"Rescoring with beam_size: {args.beam_size}\")\n logging.info(\"Calculating the scores...\")\n with autocast():\n with torch.no_grad():\n am_scores, lm_scores, dists, ref_lens, lens_in_chars = [], [], [], [], []\n for batch in tqdm.tqdm(data_loader):\n input_ids, input_mask, acoustic_score, dist, ref_len, len_in_chars, idx = batch\n\n max_len_in_batch = input_mask.sum(dim=0).argmin().item()\n input_ids, input_mask = input_ids[:, :max_len_in_batch], input_mask[:, :max_len_in_batch]\n if torch.cuda.is_available():\n input_ids, input_mask = input_ids.to(device), input_mask.to(device)\n dist, acoustic_score, len_in_chars = (\n dist.to(device),\n acoustic_score.to(device),\n len_in_chars.to(device),\n )\n\n log_probs = model.forward(input_ids[:, :-1], input_mask[:, :-1])\n target_log_probs = log_probs.gather(2, input_ids[:, 1:].unsqueeze(2)).squeeze(2)\n neural_lm_score = torch.sum(target_log_probs * input_mask[:, 1:], dim=-1)\n\n am_scores.append(acoustic_score)\n lm_scores.append(neural_lm_score)\n dists.append(dist)\n ref_lens.append(ref_len)\n lens_in_chars.append(len_in_chars)\n\n am_scores = torch.cat(am_scores).view(-1, args.beam_size)\n lm_scores = torch.cat(lm_scores).view(-1, args.beam_size)\n dists = torch.cat(dists).view(-1, args.beam_size)\n ref_lens = torch.cat(ref_lens).view(-1, args.beam_size)\n lens_in_chars = torch.cat(lens_in_chars).view(-1, args.beam_size).to(am_scores.dtype)\n\n total_len = ref_lens[:, 0].sum()\n model_wer = dists[:, 0].sum() / total_len\n ideal_wer = dists.min(dim=1)[0].sum() / total_len\n\n if args.alpha is None:\n logging.info(\"Linear search for alpha...\")\n coef1, _ = linear_search_wer(\n dists=dists, scores1=am_scores, scores2=lm_scores, total_len=total_len, param_name='alpha'\n )\n coef1 = np.round(coef1, 3)\n logging.info(f\"alpha={coef1} achieved the best WER.\")\n logging.info(f\"------------------------------------------------\")\n else:\n coef1 = args.alpha\n\n scores = am_scores + coef1 * lm_scores\n\n if args.beta is None:\n logging.info(\"Linear search for beta...\")\n coef2, _ = linear_search_wer(\n dists=dists, scores1=scores, scores2=lens_in_chars, total_len=total_len, param_name='beta'\n )\n coef2 = np.round(coef2, 3)\n logging.info(f\"beta={coef2} achieved the best WER.\")\n logging.info(f\"------------------------------------------------\")\n else:\n coef2 = args.beta\n\n new_scores = am_scores + coef1 * lm_scores + coef2 * lens_in_chars\n rescored_wer = compute_wer(dists, new_scores, total_len)\n\n logging.info(f\"Input beams WER: {np.round(model_wer.item() * 100, 2)}%\")\n logging.info(f\"------------------------------------------------\")\n logging.info(f\" +LM rescoring WER: {np.round(rescored_wer * 100, 2)}%\")\n logging.info(f\" with alpha={coef1}, beta={coef2}\")\n logging.info(f\"------------------------------------------------\")\n logging.info(f\"Best possible WER: {np.round(ideal_wer.item() * 100, 2)}%\")\n logging.info(f\"------------------------------------------------\")\n\n new_scores_flatten = new_scores.flatten()\n if args.scores_output_file is not None:\n logging.info(f'Saving the candidates with their new scores at `{args.scores_output_file}`...')\n with open(args.scores_output_file, \"w\") as fout:\n for sample_id in range(len(dataset)):\n fout.write(f\"{dataset.data[0][sample_id]}\\t{new_scores_flatten[sample_id]}\\n\")\n\n\nif __name__ == '__main__':\n main()\n"
] | [
[
"torch.sum",
"torch.utils.data.DataLoader",
"pandas.read_csv",
"numpy.round",
"torch.no_grad",
"torch.device",
"matplotlib.pyplot.title",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylabel",
"torch.cuda.is_available",
"torch.cat",
"numpy.array",
"matplotlib.pyplot.plot",
"numpy.linspace",
"matplotlib.pyplot.xlabel"
]
] |
mattzett/numerical_electromagnetics | [
"07634817ba854a5515c8c31545b735f651878c5e"
] | [
"magnetic_diffusion/diffusion1D.py"
] | [
"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Feb 21 19:43:50 2022\n\nIllustrating a basic transient magnetic diffusion problem, See Jackson Section 5.18\n\n@author: zettergm\n\"\"\"\n\nimport numpy as np\nimport scipy.sparse.linalg\nimport scipy.sparse\nfrom scipy.special import erf\nimport matplotlib.pyplot as plt\nfrom numpy import pi,sqrt,abs\nfrom difftools import matrix_kernel\n\n# Material parameters\nmu=4*pi*1e-7\nsigma=1e6\nD=1/mu/sigma # equivalent diffusion coefficient\na=1\nH0=1\nnu=1/mu/sigma/a**2\n\n# Size of grid\nlz=250\nNmax=200\nz=np.linspace(-5*a,5*a,lz)\ndz=z[1]-z[0]\ndt = 5*dz**2/D/2 # explicit stabilty limit will results in really slow time stepping; use 5 times larger. \n\n# This could definitely benefit for sparse storage and a banded/tridiagonal solver\n#A=np.exp(-(x**2/2))\nHx=np.zeros(lz)\nindmin=np.argmin(abs(z+a))\nindmax=np.argmin(abs(z-a))\nHx[indmin:indmax]=1\n\n# Matrix defining finite-difference equation for laplacian operator, one-time setup for this problem\nMsparse=matrix_kernel(lz,dt,dz,D)\nrhs=np.zeros( (lz,1) )\n\n# time iterations\nfor n in range(0,Nmax):\n # set up time-dependent part of the problem and solve\n for i in range(1,lz-1):\n rhs[i]=Hx[i]\n rhssparse=scipy.sparse.csr_matrix(np.reshape(rhs,[lz,1]))\n Hx=scipy.sparse.linalg.spsolve(Msparse,rhssparse,use_umfpack=True) # umfpack is overkill for this but will presumably work\n\n # Solution from Jackson eqn. 5.176\n HxJ=H0/2*( erf((1+abs(z)/a)/2/sqrt((n+1)*dt*nu)) + erf((1-abs(z)/a)/2/sqrt((n+1)*dt*nu)) )\n\n # plot results of each time step and pause briefly\n plt.figure(1,dpi=150)\n plt.clf()\n plt.plot(z,HxJ,'o')\n plt.plot(z,Hx)\n plt.xlabel(\"$x$\")\n plt.ylabel(\"$H_x(z)$\")\n plt.title( \"$t$ = %6.4f s\" % ( (n+1)*dt) )\n plt.ylim((0,H0))\n plt.xlim((-2*a,2*a))\n plt.legend( (\"Jackson 5.176\",\"Numerical BTCS\") )\n plt.show()\n plt.pause(0.01)\n\n"
] | [
[
"matplotlib.pyplot.legend",
"matplotlib.pyplot.pause",
"numpy.zeros",
"matplotlib.pyplot.figure",
"numpy.reshape",
"numpy.abs",
"matplotlib.pyplot.clf",
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.title",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.ylim",
"numpy.sqrt",
"matplotlib.pyplot.plot",
"numpy.linspace",
"matplotlib.pyplot.xlabel"
]
] |
Yang-YiFan/DiracDeltaNet | [
"36487542422d7573fec6e852b9eece18c6cbce21"
] | [
"extensions/utils.py"
] | [
"'''Some helper functions for PyTorch, including:\r\n - get_mean_and_std: calculate the mean and std value of dataset.\r\n - msr_init: net parameter initialization.\r\n - progress_bar: progress bar mimic xlua.progress.\r\n'''\r\nimport os\r\nimport sys\r\nimport time\r\nimport math\r\n\r\nimport torch\r\nimport torch.nn as nn\r\nimport torch.nn.init as init\r\nimport numpy as np\r\n\r\n\r\n\r\ndef get_mean_and_std(dataset):\r\n '''Compute the mean and std value of dataset.'''\r\n dataloader = torch.utils.data.DataLoader(dataset, batch_size=1, shuffle=True, num_workers=2)\r\n mean = torch.zeros(3)\r\n std = torch.zeros(3)\r\n print('==> Computing mean and std..')\r\n for inputs, targets in dataloader:\r\n for i in range(3):\r\n mean[i] += inputs[:,i,:,:].mean()\r\n std[i] += inputs[:,i,:,:].std()\r\n mean.div_(len(dataset))\r\n std.div_(len(dataset))\r\n return mean, std\r\n\r\ndef init_params(net):\r\n '''Init layer parameters.'''\r\n for m in net.modules():\r\n if isinstance(m, nn.Conv2d):\r\n init.kaiming_normal(m.weight, mode='fan_out')\r\n if m.bias:\r\n init.constant(m.bias, 0)\r\n elif isinstance(m, nn.BatchNorm2d):\r\n init.constant(m.weight, 1)\r\n init.constant(m.bias, 0)\r\n elif isinstance(m, nn.Linear):\r\n init.normal(m.weight, std=1e-3)\r\n if m.bias:\r\n init.constant(m.bias, 0)\r\n\r\n\r\n_, term_width = os.popen('stty size', 'r').read().split()\r\nterm_width = int(term_width)\r\n\r\nTOTAL_BAR_LENGTH = 65.\r\nlast_time = time.time()\r\nbegin_time = last_time\r\ndef progress_bar(current, total, msg=None):\r\n global last_time, begin_time\r\n if current == 0:\r\n begin_time = time.time() # Reset for new bar.\r\n\r\n cur_len = int(TOTAL_BAR_LENGTH*current/total)\r\n rest_len = int(TOTAL_BAR_LENGTH - cur_len) - 1\r\n\r\n sys.stdout.write(' [')\r\n for i in range(cur_len):\r\n sys.stdout.write('=')\r\n sys.stdout.write('>')\r\n for i in range(rest_len):\r\n sys.stdout.write('.')\r\n sys.stdout.write(']')\r\n\r\n cur_time = time.time()\r\n step_time = cur_time - last_time\r\n last_time = cur_time\r\n tot_time = cur_time - begin_time\r\n\r\n L = []\r\n L.append(' Step: %s' % format_time(step_time))\r\n L.append(' | Tot: %s' % format_time(tot_time))\r\n if msg:\r\n L.append(' | ' + msg)\r\n\r\n msg = ''.join(L)\r\n sys.stdout.write(msg)\r\n for i in range(term_width-int(TOTAL_BAR_LENGTH)-len(msg)-3):\r\n sys.stdout.write(' ')\r\n\r\n # Go back to the center of the bar.\r\n for i in range(term_width-int(TOTAL_BAR_LENGTH/2)+2):\r\n sys.stdout.write('\\b')\r\n sys.stdout.write(' %d/%d ' % (current+1, total))\r\n\r\n if current < total-1:\r\n sys.stdout.write('\\r')\r\n else:\r\n sys.stdout.write('\\n')\r\n sys.stdout.flush()\r\n\r\ndef format_time(seconds):\r\n days = int(seconds / 3600/24)\r\n seconds = seconds - days*3600*24\r\n hours = int(seconds / 3600)\r\n seconds = seconds - hours*3600\r\n minutes = int(seconds / 60)\r\n seconds = seconds - minutes*60\r\n secondsf = int(seconds)\r\n seconds = seconds - secondsf\r\n millis = int(seconds*1000)\r\n\r\n f = ''\r\n i = 1\r\n if days > 0:\r\n f += str(days) + 'D'\r\n i += 1\r\n if hours > 0 and i <= 2:\r\n f += str(hours) + 'h'\r\n i += 1\r\n if minutes > 0 and i <= 2:\r\n f += str(minutes) + 'm'\r\n i += 1\r\n if secondsf > 0 and i <= 2:\r\n f += str(secondsf) + 's'\r\n i += 1\r\n if millis > 0 and i <= 2:\r\n f += str(millis) + 'ms'\r\n i += 1\r\n if f == '':\r\n f = '0ms'\r\n return f\r\n\r\n\r\nclass Cutout(object):\r\n \"\"\"Randomly mask out one or more patches from an image.\r\n\r\n Args:\r\n n_holes (int): Number of patches to cut out of each image.\r\n length (int): The length (in pixels) of each square patch.\r\n \"\"\"\r\n def __init__(self, n_holes, length):\r\n self.n_holes = n_holes\r\n self.length = length\r\n\r\n def __call__(self, img):\r\n \"\"\"\r\n Args:\r\n img (Tensor): Tensor image of size (C, H, W).\r\n Returns:\r\n Tensor: Image with n_holes of dimension length x length cut out of it.\r\n \"\"\"\r\n h = img.size(1)\r\n w = img.size(2)\r\n\r\n mask = np.ones((h, w), np.float32)\r\n\r\n for n in range(self.n_holes):\r\n y = np.random.randint(h)\r\n x = np.random.randint(w)\r\n\r\n y1 = np.clip(y - self.length // 2, 0, h)\r\n y2 = np.clip(y + self.length // 2, 0, h)\r\n x1 = np.clip(x - self.length // 2, 0, w)\r\n x2 = np.clip(x + self.length // 2, 0, w)\r\n\r\n mask[y1: y2, x1: x2] = 0.\r\n\r\n mask = torch.from_numpy(mask)\r\n mask = mask.expand_as(img)\r\n img = img * mask\r\n\r\n return img"
] | [
[
"torch.utils.data.DataLoader",
"numpy.ones",
"torch.nn.init.constant",
"torch.nn.init.normal",
"torch.from_numpy",
"numpy.clip",
"torch.zeros",
"torch.nn.init.kaiming_normal",
"numpy.random.randint"
]
] |
XinYao1994/mindspore | [
"2c1a2bf752a1fde311caddba22633d2f4f63cb4e"
] | [
"tests/ut/python/ops/test_tensor_slice.py"
] | [
"# Copyright 2020 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\" test_tensor_slice \"\"\"\nimport numpy as np\nimport pytest\n\nfrom mindspore import Tensor\nfrom mindspore import context\nfrom mindspore import dtype as mstype\nfrom mindspore.nn import Cell\n\nfrom ....mindspore_test_framework.mindspore_test import mindspore_test\nfrom ....mindspore_test_framework.pipeline.forward.compile_forward \\\n import pipeline_for_compile_forward_ge_graph_for_case_by_case_config\n\n\nclass NetWorkSlicePositive(Cell):\n def __init__(self):\n super(NetWorkSlicePositive, self).__init__()\n self.tensor_ret0 = Tensor(np.ones([1, 2, 2], np.int32))\n self.tensor_ret1 = Tensor(np.ones([4, 7, 4], np.int32))\n self.tensor_ret2 = Tensor(np.ones([6, 8, 10], np.int32))\n self.tensor_ret3 = Tensor(np.ones([3, 8, 10], np.int32))\n\n def construct(self, tensor):\n ret0 = tensor[3:4:3, 1:5:2, 3:6:2] + self.tensor_ret0\n ret1 = tensor[-6:4:1, 7:-8:-1, ::3] + self.tensor_ret1\n ret2 = tensor[::, ::, ::] + self.tensor_ret2\n ret3 = tensor[::2] + self.tensor_ret3\n return ret0, ret1, ret2, ret3\n\n\nclass NetWorkSliceEllipsis(Cell):\n def __init__(self):\n super(NetWorkSliceEllipsis, self).__init__()\n self.tensor_ret0 = Tensor(np.ones([2, 7, 8], np.int32))\n self.tensor_ret1 = Tensor(np.ones([6, 7, 8, 9], np.int32))\n self.tensor_ret2 = Tensor(np.ones([1, 6, 7, 8, 9], np.int32))\n\n def construct(self, tensor):\n ret0 = tensor[0:4:2, ..., 1] + self.tensor_ret0\n ret1 = tensor[...] + self.tensor_ret1\n ret2 = tensor[None] + self.tensor_ret2\n ret3 = tensor[True] + self.tensor_ret2\n return ret0, ret1, ret2, ret3\n\n\nclass NetWorkReduceDimension(Cell):\n def __init__(self):\n super(NetWorkReduceDimension, self).__init__()\n self.tensor_ret0 = Tensor(np.ones([2, 4, 1], np.int32))\n self.tensor_ret1 = Tensor(np.ones([3, 4], np.int32))\n self.tensor_ret2 = Tensor(np.ones([6, 8], np.int32))\n self.tensor_ret3 = Tensor(np.array(8, np.int32))\n self.tensor_ret4 = Tensor(np.ones([8, 10], np.int32))\n\n def construct(self, tensor):\n ret0 = tensor[0:6:3, 1:5:1, 3:5:2] + self.tensor_ret0\n ret1 = tensor[::2, 1, ::3] + self.tensor_ret1\n ret2 = tensor[::, ::, 0] + self.tensor_ret2\n ret3 = tensor[3, 2, 5] + self.tensor_ret3\n ret4 = tensor[1] + self.tensor_ret4\n return ret0, ret1, ret2, ret3, ret4\n\n\nclass NetWorkStepNegative(Cell):\n def __init__(self):\n super(NetWorkStepNegative, self).__init__()\n self.tensor_ret = Tensor(np.ones([6, 5, 10], np.int32))\n\n def construct(self, tensor):\n ret = tensor[::1, -5::, ::-1] + self.tensor_ret\n return ret\n\n\nclass NetWorkReduceToScalar(Cell):\n def __init__(self):\n super(NetWorkReduceToScalar, self).__init__()\n self.tensor_ret = Tensor(np.array(9, np.int32))\n\n def construct(self, tensor):\n ret = tensor[2, 3, 4] + self.tensor_ret\n return ret\n\n\nclass TensorAssignWithSliceError1(Cell):\n def __init__(self):\n super(TensorAssignWithSliceError1, self).__init__()\n\n def construct(self, a, b):\n a[1:3:-1,::] = b\n return a\n\nclass TensorAssignWithSliceError2(Cell):\n def __init__(self):\n super(TensorAssignWithSliceError2, self).__init__()\n\n def construct(self, a, b):\n a[1:3:-1] = b\n return a\nclass TensorAssignWithSlice2(Cell):\n def __init__(self):\n super(TensorAssignWithSlice2, self).__init__()\n\n def construct(self, a, b):\n a[1:5] = b\n a[3:4] = 5\n a[-1:1:-1] = b\n a[-1:3:-1] = 5\n a[::] = b\n a[::] = 9\n return a\nclass TensorAssignWithSlice(Cell):\n def __init__(self):\n super(TensorAssignWithSlice, self).__init__()\n self.c = 2\n\n def construct(self, a, b):\n a[1:3,::] = b\n a[2:3:,3:] = b\n a[::] = b\n a[::] = self.c\n a[::,::] = b\n a[::,::] = self.c\n a[2:3:,0:, 4:1:-1] = b\n a[2:3:,0:, 4:1:-1] = self.c\n z = a\n return z\n\ndef test_tensor_assign():\n context.set_context(mode=context.GRAPH_MODE, save_graphs=True)\n net = TensorAssignWithSlice()\n net2= TensorAssignWithSlice2()\n net_e1 = TensorAssignWithSliceError1()\n net_e2 = TensorAssignWithSliceError2()\n a = np.arange(60).reshape(3,4,5)\n b = Tensor([1])\n Ta = Tensor(a)\n Ta4d = Tensor(a.reshape(1,3,4,5))\n Tb= Tensor([1,3])\n Tc= Tensor([])\n t = Tensor([1, 2, 3, 4, 5, 6, 7, 8])\n net(Ta, b)\n net2(t, b)\n # Error for A[Slice] = Number\n # 1. A[Slice] = Number, Slice error\n with pytest.raises(ValueError):\n net_e2(t, 2)\n\n # Error for A[Slice] = U, U is a Tensor\n # 1. A[Slice] = U, u.size is error\n with pytest.raises(ValueError):\n net2(t, Tb)\n # 2. A[Slice] = U, U is empty\n with pytest.raises(ValueError):\n net2(t, Tc)\n # 3. A[Slice] = U, U.size error\n with pytest.raises(ValueError):\n net2(t, Tb)\n\n # Error for A[Tuple(Slice...)] = Tensor\n # 1. A[Tuple(Slice...)] = U, U is empty\n with pytest.raises(ValueError):\n net(Ta, Tc)\n # 2. A[Tuple(Slice...)] = U, U.size error\n with pytest.raises(ValueError):\n net(Ta, Tb)\n # 3. A[Tuple(Slice...)] = U, Slice error\n with pytest.raises(ValueError):\n net_e1(Ta, b)\n\n # Error for A[Tuple(Slice...)] = Number\n # 1. A[Tuple(Slice...)] = Number, Slice error\n with pytest.raises(ValueError):\n net_e1(Ta, 2)\n\n net = TensorAssignWithInteger()\n # Error for A[Number] = scalar/Tensor\n # 1. A[Number] = U, U is a Tensor, u.size not match\n with pytest.raises(ValueError):\n net(Ta, Tb)\n with pytest.raises(ValueError):\n net(Ta, Tc)\n # 2. A[Number] = U, the number index error\n with pytest.raises(IndexError):\n net(Ta4d, b)\n\n # Error for A[(n,m)] = scalar/Tensor\n # 1. A[(n,m)] = U, U is a tensor. u.size not match\n net = TensorAssignWithTupleInteger()\n with pytest.raises(ValueError):\n net(Ta, Tc)\n with pytest.raises(ValueError):\n net(Ta, Tb)\n # 2. A[(n,m)] = U, the number index error\n with pytest.raises(IndexError):\n net(Ta4d, b)\n\nclass TensorAssignWithInteger(Cell):\n def __init__(self):\n super(TensorAssignWithInteger, self).__init__()\n\n def construct(self, a, b):\n a[1] = 1\n a[0] = b\n return a\n\nclass TensorAssignWithTupleInteger(Cell):\n def __init__(self):\n super(TensorAssignWithTupleInteger, self).__init__()\n\n def construct(self, a, b):\n a[(1)] = 1\n a[(1)] = b\n a[(1,1)] = b\n a[(1,1)] = 1\n return a\n\nclass TensorAssignWithBoolTensorIndex(Cell):\n def __init__(self):\n super(TensorAssignWithBoolTensorIndex, self).__init__()\n self.t = Tensor(np.arange(60).reshape([3,4,5]), dtype = mstype.float64)\n\n def construct(self, a, b, c, u_tensor, _scalar):\n a[c] = u_scalar\n a[b] = u_tensor\n z = a + self.t\n return z\n\n\nclass TensorAssignWithBoolTensorIndexError(Cell):\n def __init__(self):\n super(TensorAssignWithBoolTensorIndexError, self).__init__()\n\n def construct(self, a, b, c, u_tensor):\n a[b][c] = u_tensor\n return a\n\n\nclass TensorAssignWithBoolTensorIndex2(Cell):\n def __init__(self):\n super(TensorAssignWithBoolTensorIndex2, self).__init__()\n self.t = Tensor(np.arange(6).reshape([2, 3]), dtype=mstype.float64)\n self.t = Tensor(np.arange(60).reshape([3,4,5]), dtype = mstype.float64)\n\n def construct(self, a, u_tensor, _scalar):\n a[a > 8] = u_tensor\n a[a >= 6] = u_scalar\n a[a < 3] = u_scalar\n a[a <= 5] = u_tensor\n a[a == 5] = u_scalar\n z = a + self.t\n return z\n\n\nclass TensorAssignWithBoolTensorIndex2Error(Cell):\n def __init__(self):\n super(TensorAssignWithBoolTensorIndex2Error, self).__init__()\n\n def construct(self, a, u_tensor):\n a[a > 8][a > 5] = u_tensor\n return a\n\n\na = np.random.uniform(1,10,[3,4,5])\nb = a > 5\nc = a < 3\nTa = Tensor(a)\nTb = Tensor(b)\nTc = Tensor(c)\nTd = Tensor([True, True])\nu_tensor = Tensor([1])\nu_tensor_error = Tensor([1, 2])\nt_1d = Tensor([1, 2, 3, 4, 5, 6, 7, 8])\nu_scalar = 5\n\ndef test_tensor_assign_bool_index():\n net1 = TensorAssignWithBoolTensorIndex()\n net2 = TensorAssignWithBoolTensorIndex2()\n net1(Ta, Tb, Tc, u_tensor, u_scalar)\n net1(Ta, Tb, Tc, u_tensor, u_scalar)\n with pytest.raises(ValueError):\n net1(Ta, Td, Tc, u_tensor, u_scalar)\n with pytest.raises(ValueError):\n net1(Ta, u_tensor, Tc, u_tensor, u_scalar)\n with pytest.raises(ValueError):\n net1(Ta, Tb, Td, u_tensor, u_scalar)\n with pytest.raises(ValueError):\n net1(Ta, Tb, Ta, u_tensor, u_scalar)\n with pytest.raises(ValueError):\n net1(Ta, Tb, Tc, u_tensor_error, u_scalar)\n # net1(Ta, u_tensor, Tc, u_tensor_error, u_scalar)\n with pytest.raises(ValueError):\n net2(Ta, u_tensor_error, u_scalar)\n net3 = TensorAssignWithBoolTensorIndexError()\n with pytest.raises(AttributeError):\n net3(Ta, Tb, Tc, u_tensor)\n with pytest.raises(AttributeError):\n net3(Ta, Tb, Tc, u_scalar)\n net4 = TensorAssignWithBoolTensorIndex2Error()\n with pytest.raises(AttributeError):\n net4(Ta, u_tensor)\n with pytest.raises(AttributeError):\n net4(Ta, u_scalar)\n\ntest_cases = [\n ('TensorAssignWithTupleInteger', {\n 'block': TensorAssignWithTupleInteger(),\n 'desc_inputs': [Ta, u_tensor],\n }),\n ('TensorAssignWithInteger', {\n 'block': TensorAssignWithInteger(),\n 'desc_inputs': [Ta, u_tensor],\n }),\n ('TensorAssignWithSlice', {\n 'block': TensorAssignWithSlice(),\n 'desc_inputs': [Ta, u_tensor],\n }),\n ('TensorAssignWithSlice2', {\n 'block': TensorAssignWithSlice2(),\n 'desc_inputs': [t_1d, u_tensor],\n }),\n ('TensorAssignWithBoolTensorIndex', {\n 'block': TensorAssignWithBoolTensorIndex(),\n 'desc_inputs': [Ta, Tb, Tc, u_tensor, u_scalar],\n }),\n ('TensorAssignWithBoolTensorIndex2', {\n 'block': TensorAssignWithBoolTensorIndex2(),\n 'desc_inputs': [Ta, u_tensor, u_scalar],\n }),\n ('SlicePositive', {\n 'block': NetWorkSlicePositive(),\n 'desc_inputs': [Tensor(np.ones([6, 8, 10], np.int32))],\n }),\n ('SliceReduceDimension', {\n 'block': NetWorkReduceDimension(),\n 'desc_inputs': [Tensor(np.ones([6, 8, 10], np.int32))],\n }),\n ('SliceNegative', {\n 'block': NetWorkStepNegative(),\n 'desc_inputs': [Tensor(np.ones([6, 8, 10], np.int32))],\n }),\n ('SliceReduceToScalar', {\n 'block': NetWorkReduceToScalar(),\n 'desc_inputs': [Tensor(np.ones([6, 8, 10], np.int32))],\n }),\n ('TensorSliceEllipsis', {\n 'block': NetWorkSliceEllipsis(),\n 'desc_inputs': [Tensor(np.ones([6, 7, 8, 9], np.int32))],\n }),\n]\n\n\n@mindspore_test(pipeline_for_compile_forward_ge_graph_for_case_by_case_config)\ndef test_compile():\n context.set_context(mode=context.GRAPH_MODE)\n return test_cases\n\n\ndef test_tensor_slice_reduce_out_of_bounds_neg():\n class NetWork(Cell):\n def __init__(self):\n super(NetWork, self).__init__()\n self.tensor_ret = Tensor(np.array(9, np.int32))\n\n def construct(self, tensor):\n ret = tensor[-7, 3, 4]\n return ret\n\n input_tensor = Tensor(np.ones([6, 8, 10], np.int32))\n net = NetWork()\n with pytest.raises(ValueError) as ex:\n net(input_tensor)\n assert \"For 'StridedSlice' the `begin[0]` should be an int and must greater or equal to -6, but got `-7`\" in str(ex.value)\n\n\ndef test_tensor_slice_reduce_out_of_bounds_positive():\n class NetWork(Cell):\n def __init__(self):\n super(NetWork, self).__init__()\n self.tensor_ret = Tensor(np.array(9, np.int32))\n\n def construct(self, tensor):\n ret = tensor[6, 3, 4]\n return ret\n\n input_tensor = Tensor(np.ones([6, 8, 10], np.int32))\n net = NetWork()\n with pytest.raises(ValueError) as ex:\n net(input_tensor)\n assert \"For 'StridedSlice' the `begin[0]` should be an int and must less than 6, but got `6`\" in str(ex.value)\n"
] | [
[
"numpy.random.uniform",
"numpy.ones",
"numpy.arange",
"numpy.array"
]
] |
Strasser-Pablo/pipelines | [
"a1d513eb412f3ffd44edf82af2fa7edb05c3b952"
] | [
"components/deprecated/tfx/ExampleGen/CsvExampleGen/with_URI_IO/component.py"
] | [
"from typing import NamedTuple\n\ndef CsvExampleGen(\n output_examples_uri: 'ExamplesUri',\n input_base: str,\n input_config: {'JsonObject': {'data_type': 'proto:tfx.components.example_gen.Input'}},\n output_config: {'JsonObject': {'data_type': 'proto:tfx.components.example_gen.Output'}},\n range_config: {'JsonObject': {'data_type': 'proto:tfx.configs.RangeConfig'}} = None,\n beam_pipeline_args: list = None,\n) -> NamedTuple('Outputs', [\n ('examples_uri', 'ExamplesUri'),\n]):\n from tfx.components.example_gen.csv_example_gen.component import CsvExampleGen as component_class\n\n #Generated code\n import os\n import tempfile\n from tensorflow.io import gfile\n from google.protobuf import json_format, message\n from tfx.types import channel_utils, artifact_utils\n from tfx.components.base import base_executor\n\n arguments = locals().copy()\n\n component_class_args = {}\n\n for name, execution_parameter in component_class.SPEC_CLASS.PARAMETERS.items():\n argument_value = arguments.get(name, None)\n if argument_value is None:\n continue\n parameter_type = execution_parameter.type\n if isinstance(parameter_type, type) and issubclass(parameter_type, message.Message):\n argument_value_obj = parameter_type()\n json_format.Parse(argument_value, argument_value_obj)\n else:\n argument_value_obj = argument_value\n component_class_args[name] = argument_value_obj\n\n for name, channel_parameter in component_class.SPEC_CLASS.INPUTS.items():\n artifact_path = arguments.get(name + '_uri') or arguments.get(name + '_path')\n if artifact_path:\n artifact = channel_parameter.type()\n artifact.uri = artifact_path.rstrip('/') + '/' # Some TFX components require that the artifact URIs end with a slash\n if channel_parameter.type.PROPERTIES and 'split_names' in channel_parameter.type.PROPERTIES:\n # Recovering splits\n subdirs = gfile.listdir(artifact_path)\n # Workaround for https://github.com/tensorflow/tensorflow/issues/39167\n subdirs = [subdir.rstrip('/') for subdir in subdirs]\n split_names = [subdir.replace('Split-', '') for subdir in subdirs]\n artifact.split_names = artifact_utils.encode_split_names(sorted(split_names))\n component_class_args[name] = channel_utils.as_channel([artifact])\n\n component_class_instance = component_class(**component_class_args)\n\n input_dict = channel_utils.unwrap_channel_dict(component_class_instance.inputs.get_all())\n output_dict = {}\n exec_properties = component_class_instance.exec_properties\n\n # Generating paths for output artifacts\n for name, channel in component_class_instance.outputs.items():\n artifact_path = arguments.get('output_' + name + '_uri') or arguments.get(name + '_path')\n if artifact_path:\n artifact = channel.type()\n artifact.uri = artifact_path.rstrip('/') + '/' # Some TFX components require that the artifact URIs end with a slash\n artifact_list = [artifact]\n channel._artifacts = artifact_list\n output_dict[name] = artifact_list\n\n print('component instance: ' + str(component_class_instance))\n\n executor_context = base_executor.BaseExecutor.Context(\n beam_pipeline_args=arguments.get('beam_pipeline_args'),\n tmp_dir=tempfile.gettempdir(),\n unique_id='tfx_component',\n )\n executor = component_class_instance.executor_spec.executor_class(executor_context)\n executor.Do(\n input_dict=input_dict,\n output_dict=output_dict,\n exec_properties=exec_properties,\n )\n\n return (output_examples_uri, )\n"
] | [
[
"tensorflow.io.gfile.listdir"
]
] |
AcudoDev/FinanceToolbox | [
"90676e798f2e8eac164ccfcd6708cc717e1911f2"
] | [
"Other/GaussianRandomStockPrice.py"
] | [
"import pandas as pd\nimport numpy as np\n\nimport yfinance as yf\nfrom sklearn.linear_model import LinearRegression\nimport statsmodels\nimport statsmodels.api as sm\nimport statsmodels.tsa.stattools as ts\n\nimport datetime\n\nimport scipy.stats\nimport math\nimport openpyxl as pyxl\nfrom scipy import signal\nfrom scipy import stats as ss\nimport statistics\n\nfrom finta import TA\nfrom filterpy.kalman import KalmanFilter\nfrom filterpy.common import Q_discrete_white_noise\n\nimport pandas_ta as ta\nfrom pingouin import gzscore\n\n\ndef GaussianRandomStockPrice(mu, sigma, n, end, freq, S0=100):\n \"\"\"\n This function randomly creates a stock price series bases on gaussian probabilities.\n\n Arguments:\n ----------\n - mu: float\n The mean parameter\n - sigma: float\n The standard déviation parameter\n - n: int\n Number of periods\n - end: datetime date\n The last date of thé series\n - freq: pandas frequency string\n The frequency of thé dataseries:\n - \"D\": days\n - \"min\": minutes\n - \"s\": seconds\n - S0: float\n The first stock price\n\n Return:\n ----------\n - RStock: Pandas DataFrame\n Contains thé datetime as index and thé random stock prices in a column\n\n \"\"\"\n\n RStock = np.random.normal(mu, sigma, n).astype(\"float\")\n RStock = pd.DataFrame(RStock)\n RStock.rename(inplace=True, columns={RStock.columns[0]: \"Return\"})\n RStock[\"Price\"] = ((1 + RStock[\"Return\"]).cumprod()) * S0\n times = pd.date_range(end=end, freq=freq, periods=n)\n\n RStock.index = times\n RStock = pd.DataFrame(RStock[\"Price\"])\n\n return RStock\n"
] | [
[
"numpy.random.normal",
"pandas.DataFrame",
"pandas.date_range"
]
] |
UCLA-SEAL/QDiff | [
"d968cbc47fe926b7f88b4adf10490f1edd6f8819",
"d968cbc47fe926b7f88b4adf10490f1edd6f8819",
"d968cbc47fe926b7f88b4adf10490f1edd6f8819",
"d968cbc47fe926b7f88b4adf10490f1edd6f8819",
"d968cbc47fe926b7f88b4adf10490f1edd6f8819",
"d968cbc47fe926b7f88b4adf10490f1edd6f8819",
"d968cbc47fe926b7f88b4adf10490f1edd6f8819",
"d968cbc47fe926b7f88b4adf10490f1edd6f8819",
"d968cbc47fe926b7f88b4adf10490f1edd6f8819",
"d968cbc47fe926b7f88b4adf10490f1edd6f8819",
"d968cbc47fe926b7f88b4adf10490f1edd6f8819",
"d968cbc47fe926b7f88b4adf10490f1edd6f8819",
"d968cbc47fe926b7f88b4adf10490f1edd6f8819",
"d968cbc47fe926b7f88b4adf10490f1edd6f8819",
"d968cbc47fe926b7f88b4adf10490f1edd6f8819",
"d968cbc47fe926b7f88b4adf10490f1edd6f8819",
"d968cbc47fe926b7f88b4adf10490f1edd6f8819",
"d968cbc47fe926b7f88b4adf10490f1edd6f8819",
"d968cbc47fe926b7f88b4adf10490f1edd6f8819",
"d968cbc47fe926b7f88b4adf10490f1edd6f8819",
"d968cbc47fe926b7f88b4adf10490f1edd6f8819",
"d968cbc47fe926b7f88b4adf10490f1edd6f8819",
"d968cbc47fe926b7f88b4adf10490f1edd6f8819",
"d968cbc47fe926b7f88b4adf10490f1edd6f8819",
"d968cbc47fe926b7f88b4adf10490f1edd6f8819",
"d968cbc47fe926b7f88b4adf10490f1edd6f8819",
"d968cbc47fe926b7f88b4adf10490f1edd6f8819",
"d968cbc47fe926b7f88b4adf10490f1edd6f8819",
"d968cbc47fe926b7f88b4adf10490f1edd6f8819",
"d968cbc47fe926b7f88b4adf10490f1edd6f8819",
"d968cbc47fe926b7f88b4adf10490f1edd6f8819",
"d968cbc47fe926b7f88b4adf10490f1edd6f8819",
"d968cbc47fe926b7f88b4adf10490f1edd6f8819",
"d968cbc47fe926b7f88b4adf10490f1edd6f8819",
"d968cbc47fe926b7f88b4adf10490f1edd6f8819",
"d968cbc47fe926b7f88b4adf10490f1edd6f8819",
"d968cbc47fe926b7f88b4adf10490f1edd6f8819",
"d968cbc47fe926b7f88b4adf10490f1edd6f8819",
"d968cbc47fe926b7f88b4adf10490f1edd6f8819",
"d968cbc47fe926b7f88b4adf10490f1edd6f8819",
"d968cbc47fe926b7f88b4adf10490f1edd6f8819",
"d968cbc47fe926b7f88b4adf10490f1edd6f8819",
"d968cbc47fe926b7f88b4adf10490f1edd6f8819",
"d968cbc47fe926b7f88b4adf10490f1edd6f8819"
] | [
"data/p3BR/R2/benchmark/startQiskit289.py",
"benchmark/startQiskit2375.py",
"data/p3BR/R1/benchmark/startQiskit_QC456.py",
"benchmark/startQiskit_Class2296.py",
"data/p4VQE/R4/benchmark/startQiskit_noisy62.py",
"benchmark/startQiskit_noisy2042.py",
"benchmark/startQiskit2925.py",
"benchmark/startQiskit_Class2285.py",
"benchmark/startQiskit_QC2348.py",
"data/p2DJ/New/R2/benchmark/startQiskit_Class124.py",
"data/p2DJ/New/program/qiskit/class/startQiskit_Class182.py",
"data/p4VQE/R4/benchmark/startQiskit_QC277.py",
"benchmark/startQiskit_Class3343.py",
"benchmark/startQiskit1590.py",
"data/p2DJ/New/program/qiskit/simulator/startQiskit336.py",
"benchmark/startQiskit_noisy2320.py",
"data/p3BR/R2/benchmark/startQiskit_noisy43.py",
"data/p4VQE/R2/benchmark/startQiskit_QC77.py",
"benchmark/startQiskit1126.py",
"benchmark/startQiskit_noisy2660.py",
"data/p4VQE/R4/benchmark/startQiskit92.py",
"data/p3BR/R2/benchmark/startQiskit_Class364.py",
"data/p2DJ/New/R2/benchmark/startQiskit_Class143.py",
"benchmark/startQiskit_Class3399.py",
"benchmark/startQiskit_noisy3345.py",
"data/p2DJ/New/program/qiskit/simulator/startQiskit250.py",
"benchmark/startQiskit_QC1883.py",
"benchmark/startQiskit_noisy3210.py",
"data/p3BR/R1/benchmark/startQiskit_Class408.py",
"benchmark/startQiskit_noisy1314.py",
"data/p4VQE/R4/benchmark/startQiskit_QC575.py",
"benchmark/startQiskit_QC1911.py",
"data/p2DJ/New/program/qiskit/class/startQiskit_Class230.py",
"benchmark/startQiskit_QC1786.py",
"data/p4VQE/R4/benchmark/startQiskit_noisy116.py",
"benchmark/startQiskit1740.py",
"benchmark/startQiskit_Class2002.py",
"data/p2DJ/New/program/qiskit/class/startQiskit_Class145.py",
"data/p3BR/R1/benchmark/startQiskit_Class179.py",
"benchmark/startQiskit_noisy1883.py",
"data/p2DJ/New/R2/benchmark/startQiskit_QC79.py",
"data/p2DJ/New/program/qiskit/simulator/startQiskit78.py",
"data/p2DJ/New/R2/benchmark/startQiskit_noisy126.py",
"benchmark/startQiskit_noisy2692.py"
] | [
"# qubit number=3\n# total number=60\n\nimport numpy as np\n\nfrom qiskit import QuantumCircuit, execute, Aer, QuantumRegister, ClassicalRegister, transpile, BasicAer, IBMQ\nfrom qiskit.visualization import plot_histogram\nfrom typing import *\nfrom pprint import pprint\nfrom math import log2\nfrom collections import Counter\nfrom qiskit.test.mock import FakeVigo, FakeYorktown\n\nkernel = 'circuit/bernstein'\n\n\ndef bitwise_xor(s: str, t: str) -> str:\n length = len(s)\n res = []\n for i in range(length):\n res.append(str(int(s[i]) ^ int(t[i])))\n return ''.join(res[::-1])\n\n\ndef bitwise_dot(s: str, t: str) -> str:\n length = len(s)\n res = 0\n for i in range(length):\n res += int(s[i]) * int(t[i])\n return str(res % 2)\n\n\ndef build_oracle(n: int, f: Callable[[str], str]) -> QuantumCircuit:\n # implement the oracle O_f\n # NOTE: use multi_control_toffoli_gate ('noancilla' mode)\n # https://qiskit.org/documentation/_modules/qiskit/aqua/circuits/gates/multi_control_toffoli_gate.html\n # https://quantumcomputing.stackexchange.com/questions/3943/how-do-you-implement-the-toffoli-gate-using-only-single-qubit-and-cnot-gates\n # https://quantumcomputing.stackexchange.com/questions/2177/how-can-i-implement-an-n-bit-toffoli-gate\n controls = QuantumRegister(n, \"ofc\")\n target = QuantumRegister(1, \"oft\")\n oracle = QuantumCircuit(controls, target, name=\"Of\")\n for i in range(2 ** n):\n rep = np.binary_repr(i, n)\n if f(rep) == \"1\":\n for j in range(n):\n if rep[j] == \"0\":\n oracle.x(controls[j])\n oracle.mct(controls, target[0], None, mode='noancilla')\n for j in range(n):\n if rep[j] == \"0\":\n oracle.x(controls[j])\n # oracle.barrier()\n # oracle.draw('mpl', filename=(kernel + '-oracle.png'))\n return oracle\n\n\ndef build_circuit(n: int, f: Callable[[str], str]) -> QuantumCircuit:\n # implement the Bernstein-Vazirani circuit\n zero = np.binary_repr(0, n)\n b = f(zero)\n\n # initial n + 1 bits\n input_qubit = QuantumRegister(n+1, \"qc\")\n classicals = ClassicalRegister(n, \"qm\")\n prog = QuantumCircuit(input_qubit, classicals)\n\n # inverse last one (can be omitted if using O_f^\\pm)\n prog.x(input_qubit[n])\n # circuit begin\n prog.h(input_qubit[1]) # number=1\n prog.h(input_qubit[2]) # number=38\n prog.cz(input_qubit[0],input_qubit[2]) # number=39\n prog.h(input_qubit[2]) # number=40\n prog.cx(input_qubit[0],input_qubit[2]) # number=31\n prog.h(input_qubit[2]) # number=42\n prog.cz(input_qubit[0],input_qubit[2]) # number=43\n prog.h(input_qubit[2]) # number=44\n prog.h(input_qubit[2]) # number=48\n prog.cz(input_qubit[0],input_qubit[2]) # number=49\n prog.h(input_qubit[2]) # number=50\n prog.h(input_qubit[2]) # number=57\n prog.cz(input_qubit[0],input_qubit[2]) # number=58\n prog.h(input_qubit[2]) # number=59\n prog.x(input_qubit[2]) # number=55\n prog.cx(input_qubit[0],input_qubit[2]) # number=56\n prog.cx(input_qubit[0],input_qubit[2]) # number=47\n prog.cx(input_qubit[0],input_qubit[2]) # number=37\n prog.h(input_qubit[2]) # number=51\n prog.cz(input_qubit[0],input_qubit[2]) # number=52\n prog.h(input_qubit[2]) # number=53\n prog.h(input_qubit[2]) # number=25\n prog.cz(input_qubit[0],input_qubit[2]) # number=26\n prog.h(input_qubit[2]) # number=27\n prog.h(input_qubit[1]) # number=7\n prog.cz(input_qubit[2],input_qubit[1]) # number=8\n prog.rx(0.17592918860102857,input_qubit[2]) # number=34\n prog.rx(-0.3989822670059037,input_qubit[1]) # number=30\n prog.h(input_qubit[1]) # number=9\n prog.h(input_qubit[1]) # number=18\n prog.cz(input_qubit[2],input_qubit[1]) # number=19\n prog.h(input_qubit[1]) # number=20\n prog.y(input_qubit[1]) # number=14\n prog.h(input_qubit[1]) # number=22\n prog.cz(input_qubit[2],input_qubit[1]) # number=23\n prog.h(input_qubit[1]) # number=24\n prog.z(input_qubit[2]) # number=3\n prog.z(input_qubit[1]) # number=41\n prog.x(input_qubit[1]) # number=17\n prog.y(input_qubit[2]) # number=5\n prog.x(input_qubit[2]) # number=21\n\n # apply H to get superposition\n for i in range(n):\n prog.h(input_qubit[i])\n prog.h(input_qubit[n])\n prog.barrier()\n\n # apply oracle O_f\n oracle = build_oracle(n, f)\n prog.append(\n oracle.to_gate(),\n [input_qubit[i] for i in range(n)] + [input_qubit[n]])\n\n # apply H back (QFT on Z_2^n)\n for i in range(n):\n prog.h(input_qubit[i])\n prog.barrier()\n\n # measure\n\n return prog\n\n\ndef get_statevector(prog: QuantumCircuit) -> Any:\n state_backend = Aer.get_backend('statevector_simulator')\n statevec = execute(prog, state_backend).result()\n quantum_state = statevec.get_statevector()\n qubits = round(log2(len(quantum_state)))\n quantum_state = {\n \"|\" + np.binary_repr(i, qubits) + \">\": quantum_state[i]\n for i in range(2 ** qubits)\n }\n return quantum_state\n\n\ndef evaluate(backend_str: str, prog: QuantumCircuit, shots: int, b: str) -> Any:\n # Q: which backend should we use?\n\n # get state vector\n quantum_state = get_statevector(prog)\n\n # get simulate results\n\n # provider = IBMQ.load_account()\n # backend = provider.get_backend(backend_str)\n # qobj = compile(prog, backend, shots)\n # job = backend.run(qobj)\n # job.result()\n backend = Aer.get_backend(backend_str)\n # transpile/schedule -> assemble -> backend.run\n results = execute(prog, backend, shots=shots).result()\n counts = results.get_counts()\n a = Counter(counts).most_common(1)[0][0][::-1]\n\n return {\n \"measurements\": counts,\n # \"state\": statevec,\n \"quantum_state\": quantum_state,\n \"a\": a,\n \"b\": b\n }\n\n\ndef bernstein_test_1(rep: str):\n \"\"\"011 . x + 1\"\"\"\n a = \"011\"\n b = \"1\"\n return bitwise_xor(bitwise_dot(a, rep), b)\n\n\ndef bernstein_test_2(rep: str):\n \"\"\"000 . x + 0\"\"\"\n a = \"000\"\n b = \"0\"\n return bitwise_xor(bitwise_dot(a, rep), b)\n\n\ndef bernstein_test_3(rep: str):\n \"\"\"111 . x + 1\"\"\"\n a = \"111\"\n b = \"1\"\n return bitwise_xor(bitwise_dot(a, rep), b)\n\n\nif __name__ == \"__main__\":\n n = 2\n a = \"11\"\n b = \"1\"\n f = lambda rep: \\\n bitwise_xor(bitwise_dot(a, rep), b)\n prog = build_circuit(n, f)\n sample_shot =4000\n writefile = open(\"../data/startQiskit289.csv\", \"w\")\n # prog.draw('mpl', filename=(kernel + '.png'))\n backend = BasicAer.get_backend('qasm_simulator')\n\n circuit1 = transpile(prog, FakeYorktown())\n circuit1.h(qubit=2)\n circuit1.x(qubit=3)\n circuit1.measure_all()\n\n info = execute(circuit1,backend=backend, shots=sample_shot).result().get_counts()\n\n print(info, file=writefile)\n print(\"results end\", file=writefile)\n print(circuit1.depth(), file=writefile)\n print(circuit1, file=writefile)\n writefile.close()\n",
"# qubit number=4\n# total number=40\nimport cirq\nimport qiskit\n\nfrom qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister\nfrom qiskit import BasicAer, execute, transpile\nfrom pprint import pprint\nfrom qiskit.test.mock import FakeVigo\nfrom math import log2\nimport numpy as np\nimport networkx as nx\n\ndef bitwise_xor(s: str, t: str) -> str:\n length = len(s)\n res = []\n for i in range(length):\n res.append(str(int(s[i]) ^ int(t[i])))\n return ''.join(res[::-1])\n\n\ndef bitwise_dot(s: str, t: str) -> str:\n length = len(s)\n res = 0\n for i in range(length):\n res += int(s[i]) * int(t[i])\n return str(res % 2)\n\ndef build_oracle(n: int, f) -> QuantumCircuit:\n # implement the oracle O_f\n # NOTE: use multi_control_toffoli_gate ('noancilla' mode)\n # https://qiskit.org/documentation/_modules/qiskit/aqua/circuits/gates/multi_control_toffoli_gate.html\n # https://quantumcomputing.stackexchange.com/questions/3943/how-do-you-implement-the-toffoli-gate-using-only-single-qubit-and-cnot-gates\n # https://quantumcomputing.stackexchange.com/questions/2177/how-can-i-implement-an-n-bit-toffoli-gate\n controls = QuantumRegister(n, \"ofc\")\n target = QuantumRegister(1, \"oft\")\n oracle = QuantumCircuit(controls, target, name=\"Of\")\n for i in range(2 ** n):\n rep = np.binary_repr(i, n)\n if f(rep) == \"1\":\n for j in range(n):\n if rep[j] == \"0\":\n oracle.x(controls[j])\n oracle.mct(controls, target[0], None, mode='noancilla')\n for j in range(n):\n if rep[j] == \"0\":\n oracle.x(controls[j])\n # oracle.barrier()\n return oracle\n\ndef make_circuit(n:int,f) -> QuantumCircuit:\n # circuit begin\n input_qubit = QuantumRegister(n,\"qc\")\n classical = ClassicalRegister(n, \"qm\")\n prog = QuantumCircuit(input_qubit, classical)\n prog.h(input_qubit[3]) # number=31\n prog.cz(input_qubit[0],input_qubit[3]) # number=32\n prog.h(input_qubit[3]) # number=33\n prog.x(input_qubit[3]) # number=27\n prog.h(input_qubit[3]) # number=34\n prog.cz(input_qubit[0],input_qubit[3]) # number=35\n prog.h(input_qubit[3]) # number=36\n prog.h(input_qubit[1]) # number=2\n prog.h(input_qubit[2]) # number=3\n prog.cx(input_qubit[3],input_qubit[0]) # number=37\n prog.z(input_qubit[3]) # number=38\n prog.cx(input_qubit[3],input_qubit[0]) # number=39\n prog.h(input_qubit[3]) # number=4\n prog.h(input_qubit[0]) # number=5\n\n oracle = build_oracle(n-1, f)\n prog.append(oracle.to_gate(),[input_qubit[i] for i in range(n-1)]+[input_qubit[n-1]])\n prog.h(input_qubit[1]) # number=6\n prog.h(input_qubit[2]) # number=7\n prog.h(input_qubit[3]) # number=8\n prog.h(input_qubit[0]) # number=9\n\n prog.cx(input_qubit[2],input_qubit[0]) # number=10\n prog.h(input_qubit[0]) # number=14\n prog.h(input_qubit[1]) # number=30\n prog.cz(input_qubit[2],input_qubit[0]) # number=15\n prog.h(input_qubit[0]) # number=16\n prog.cx(input_qubit[0],input_qubit[2]) # number=20\n prog.x(input_qubit[2]) # number=21\n prog.cx(input_qubit[0],input_qubit[2]) # number=22\n prog.cx(input_qubit[0],input_qubit[2]) # number=17\n prog.cx(input_qubit[0],input_qubit[2]) # number=23\n prog.x(input_qubit[2]) # number=24\n prog.cx(input_qubit[0],input_qubit[2]) # number=25\n prog.cx(input_qubit[0],input_qubit[2]) # number=19\n # circuit end\n\n for i in range(n):\n prog.measure(input_qubit[i], classical[i])\n\n\n return prog\n\n\n\nif __name__ == '__main__':\n a = \"111\"\n b = \"0\"\n f = lambda rep: bitwise_xor(bitwise_dot(a, rep), b)\n prog = make_circuit(4,f)\n backend = BasicAer.get_backend('qasm_simulator')\n sample_shot =8000\n\n info = execute(prog, backend=backend, shots=sample_shot).result().get_counts()\n backend = FakeVigo()\n circuit1 = transpile(prog,backend,optimization_level=2)\n\n writefile = open(\"../data/startQiskit2375.csv\",\"w\")\n print(info,file=writefile)\n print(\"results end\", file=writefile)\n print(circuit1.__len__(),file=writefile)\n print(circuit1,file=writefile)\n writefile.close()\n",
"# qubit number=3\n# total number=84\n\nimport numpy as np\n\nfrom qiskit import QuantumCircuit, execute, Aer, QuantumRegister, ClassicalRegister, transpile, BasicAer, IBMQ\nfrom qiskit.visualization import plot_histogram\nfrom typing import *\nfrom pprint import pprint\nfrom math import log2\nfrom collections import Counter\nfrom qiskit.test.mock import FakeVigo, FakeYorktown\n\nkernel = 'circuit/bernstein'\n\n\ndef bitwise_xor(s: str, t: str) -> str:\n length = len(s)\n res = []\n for i in range(length):\n res.append(str(int(s[i]) ^ int(t[i])))\n return ''.join(res[::-1])\n\n\ndef bitwise_dot(s: str, t: str) -> str:\n length = len(s)\n res = 0\n for i in range(length):\n res += int(s[i]) * int(t[i])\n return str(res % 2)\n\n\ndef build_oracle(n: int, f: Callable[[str], str]) -> QuantumCircuit:\n # implement the oracle O_f\n # NOTE: use multi_control_toffoli_gate ('noancilla' mode)\n # https://qiskit.org/documentation/_modules/qiskit/aqua/circuits/gates/multi_control_toffoli_gate.html\n # https://quantumcomputing.stackexchange.com/questions/3943/how-do-you-implement-the-toffoli-gate-using-only-single-qubit-and-cnot-gates\n # https://quantumcomputing.stackexchange.com/questions/2177/how-can-i-implement-an-n-bit-toffoli-gate\n controls = QuantumRegister(n, \"ofc\")\n target = QuantumRegister(1, \"oft\")\n oracle = QuantumCircuit(controls, target, name=\"Of\")\n for i in range(2 ** n):\n rep = np.binary_repr(i, n)\n if f(rep) == \"1\":\n for j in range(n):\n if rep[j] == \"0\":\n oracle.x(controls[j])\n oracle.mct(controls, target[0], None, mode='noancilla')\n for j in range(n):\n if rep[j] == \"0\":\n oracle.x(controls[j])\n # oracle.barrier()\n # oracle.draw('mpl', filename=(kernel + '-oracle.png'))\n return oracle\n\n\ndef build_circuit(n: int, f: Callable[[str], str]) -> QuantumCircuit:\n # implement the Bernstein-Vazirani circuit\n zero = np.binary_repr(0, n)\n b = f(zero)\n\n # initial n + 1 bits\n input_qubit = QuantumRegister(n+1, \"qc\")\n classicals = ClassicalRegister(n, \"qm\")\n prog = QuantumCircuit(input_qubit, classicals)\n\n # inverse last one (can be omitted if using O_f^\\pm)\n prog.x(input_qubit[n])\n # circuit begin\n prog.h(input_qubit[1]) # number=1\n prog.h(input_qubit[1]) # number=70\n prog.rx(-0.09738937226128368,input_qubit[2]) # number=2\n prog.h(input_qubit[1]) # number=33\n prog.y(input_qubit[2]) # number=56\n prog.cz(input_qubit[2],input_qubit[1]) # number=34\n prog.h(input_qubit[1]) # number=35\n prog.h(input_qubit[1]) # number=3\n\n # apply H to get superposition\n for i in range(n):\n prog.h(input_qubit[i])\n prog.h(input_qubit[n])\n prog.barrier()\n\n # apply oracle O_f\n oracle = build_oracle(n, f)\n prog.append(\n oracle.to_gate(),\n [input_qubit[i] for i in range(n)] + [input_qubit[n]])\n\n # apply H back (QFT on Z_2^n)\n for i in range(n):\n prog.h(input_qubit[i])\n prog.barrier()\n\n # measure\n\n return prog\n\n\ndef get_statevector(prog: QuantumCircuit) -> Any:\n state_backend = Aer.get_backend('statevector_simulator')\n statevec = execute(prog, state_backend).result()\n quantum_state = statevec.get_statevector()\n qubits = round(log2(len(quantum_state)))\n quantum_state = {\n \"|\" + np.binary_repr(i, qubits) + \">\": quantum_state[i]\n for i in range(2 ** qubits)\n }\n return quantum_state\n\n\ndef evaluate(backend_str: str, prog: QuantumCircuit, shots: int, b: str) -> Any:\n # Q: which backend should we use?\n\n # get state vector\n quantum_state = get_statevector(prog)\n\n # get simulate results\n\n # provider = IBMQ.load_account()\n # backend = provider.get_backend(backend_str)\n # qobj = compile(prog, backend, shots)\n # job = backend.run(qobj)\n # job.result()\n backend = Aer.get_backend(backend_str)\n # transpile/schedule -> assemble -> backend.run\n results = execute(prog, backend, shots=shots).result()\n counts = results.get_counts()\n a = Counter(counts).most_common(1)[0][0][::-1]\n\n return {\n \"measurements\": counts,\n # \"state\": statevec,\n \"quantum_state\": quantum_state,\n \"a\": a,\n \"b\": b\n }\n\n\ndef bernstein_test_1(rep: str):\n \"\"\"011 . x + 1\"\"\"\n a = \"011\"\n b = \"1\"\n return bitwise_xor(bitwise_dot(a, rep), b)\n\n\ndef bernstein_test_2(rep: str):\n \"\"\"000 . x + 0\"\"\"\n a = \"000\"\n b = \"0\"\n return bitwise_xor(bitwise_dot(a, rep), b)\n\n\ndef bernstein_test_3(rep: str):\n \"\"\"111 . x + 1\"\"\"\n a = \"111\"\n b = \"1\"\n return bitwise_xor(bitwise_dot(a, rep), b)\n\n\nif __name__ == \"__main__\":\n n = 2\n a = \"11\"\n b = \"1\"\n f = lambda rep: \\\n bitwise_xor(bitwise_dot(a, rep), b)\n prog = build_circuit(n, f)\n sample_shot =4000\n writefile = open(\"../data/startQiskit_QC456.csv\", \"w\")\n # prog.draw('mpl', filename=(kernel + '.png'))\n IBMQ.load_account() \n provider = IBMQ.get_provider(hub='ibm-q') \n provider.backends()\n backend = provider.get_backend(\"ibmq_belem\")\n\n circuit1 = transpile(prog, FakeYorktown())\n circuit1.h(qubit=2)\n circuit1.x(qubit=3)\n circuit1.measure_all()\n\n info = execute(circuit1,backend=backend, shots=sample_shot).result().get_counts()\n\n print(info, file=writefile)\n print(\"results end\", file=writefile)\n print(circuit1.depth(), file=writefile)\n print(circuit1, file=writefile)\n writefile.close()\n",
"# qubit number=4\n# total number=33\nimport cirq\nimport qiskit\n\nfrom qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister\nfrom qiskit import BasicAer, execute, transpile\nfrom pprint import pprint\nfrom qiskit.test.mock import FakeVigo\nfrom math import log2\nimport numpy as np\nimport networkx as nx\n\ndef bitwise_xor(s: str, t: str) -> str:\n length = len(s)\n res = []\n for i in range(length):\n res.append(str(int(s[i]) ^ int(t[i])))\n return ''.join(res[::-1])\n\n\ndef bitwise_dot(s: str, t: str) -> str:\n length = len(s)\n res = 0\n for i in range(length):\n res += int(s[i]) * int(t[i])\n return str(res % 2)\n\ndef build_oracle(n: int, f) -> QuantumCircuit:\n # implement the oracle O_f\n # NOTE: use multi_control_toffoli_gate ('noancilla' mode)\n # https://qiskit.org/documentation/_modules/qiskit/aqua/circuits/gates/multi_control_toffoli_gate.html\n # https://quantumcomputing.stackexchange.com/questions/3943/how-do-you-implement-the-toffoli-gate-using-only-single-qubit-and-cnot-gates\n # https://quantumcomputing.stackexchange.com/questions/2177/how-can-i-implement-an-n-bit-toffoli-gate\n controls = QuantumRegister(n, \"ofc\")\n target = QuantumRegister(1, \"oft\")\n oracle = QuantumCircuit(controls, target, name=\"Of\")\n for i in range(2 ** n):\n rep = np.binary_repr(i, n)\n if f(rep) == \"1\":\n for j in range(n):\n if rep[j] == \"0\":\n oracle.x(controls[j])\n oracle.mct(controls, target[0], None, mode='noancilla')\n for j in range(n):\n if rep[j] == \"0\":\n oracle.x(controls[j])\n # oracle.barrier()\n return oracle\n\ndef make_circuit(n:int,f) -> QuantumCircuit:\n # circuit begin\n input_qubit = QuantumRegister(n,\"qc\")\n classical = ClassicalRegister(n, \"qm\")\n prog = QuantumCircuit(input_qubit, classical)\n prog.cx(input_qubit[0],input_qubit[3]) # number=13\n prog.cx(input_qubit[0],input_qubit[3]) # number=17\n prog.x(input_qubit[3]) # number=18\n prog.rx(-3.1101767270538954,input_qubit[1]) # number=27\n prog.cx(input_qubit[0],input_qubit[3]) # number=19\n prog.cx(input_qubit[0],input_qubit[3]) # number=15\n prog.h(input_qubit[1]) # number=2\n prog.h(input_qubit[2]) # number=3\n prog.h(input_qubit[3]) # number=4\n prog.y(input_qubit[3]) # number=12\n prog.h(input_qubit[1]) # number=26\n prog.h(input_qubit[0]) # number=5\n\n oracle = build_oracle(n-1, f)\n prog.append(oracle.to_gate(),[input_qubit[i] for i in range(n-1)]+[input_qubit[n-1]])\n prog.h(input_qubit[1]) # number=6\n prog.x(input_qubit[3]) # number=29\n prog.h(input_qubit[2]) # number=7\n prog.h(input_qubit[0]) # number=30\n prog.cz(input_qubit[3],input_qubit[0]) # number=31\n prog.h(input_qubit[0]) # number=32\n prog.cx(input_qubit[3],input_qubit[0]) # number=23\n prog.z(input_qubit[3]) # number=24\n prog.cx(input_qubit[3],input_qubit[0]) # number=25\n prog.cx(input_qubit[3],input_qubit[0]) # number=22\n prog.h(input_qubit[3]) # number=8\n prog.z(input_qubit[3]) # number=28\n prog.h(input_qubit[0]) # number=9\n\n prog.y(input_qubit[2]) # number=10\n prog.y(input_qubit[2]) # number=11\n # circuit end\n\n\n\n return prog\n\n\n\nif __name__ == '__main__':\n a = \"111\"\n b = \"0\"\n f = lambda rep: bitwise_xor(bitwise_dot(a, rep), b)\n prog = make_circuit(4,f)\n backend = BasicAer.get_backend('statevector_simulator')\n sample_shot =8000\n\n info = execute(prog, backend=backend).result().get_statevector()\n qubits = round(log2(len(info)))\n info = {\n np.binary_repr(i, qubits): round((info[i]*(info[i].conjugate())).real,3)\n for i in range(2 ** qubits)\n }\n backend = FakeVigo()\n circuit1 = transpile(prog,backend,optimization_level=2)\n\n writefile = open(\"../data/startQiskit_Class2296.csv\",\"w\")\n print(info,file=writefile)\n print(\"results end\", file=writefile)\n print(circuit1.__len__(),file=writefile)\n print(circuit1,file=writefile)\n writefile.close()\n",
"# qubit number=3\n# total number=10\n\nimport numpy as np\n\nfrom qiskit import QuantumCircuit, execute, Aer, QuantumRegister, ClassicalRegister, transpile, BasicAer, IBMQ\nimport networkx as nx\nfrom qiskit.visualization import plot_histogram\nfrom typing import *\nfrom pprint import pprint\nfrom math import log2\nfrom collections import Counter\nfrom qiskit.test.mock import FakeVigo, FakeYorktown\n\nkernel = 'circuit/bernstein'\n\n\ndef make_circuit(n:int) -> QuantumCircuit:\n # circuit begin\n input_qubit = QuantumRegister(n,\"qc\")\n prog = QuantumCircuit(input_qubit)\n prog.h(input_qubit[0]) # number=1\n prog.h(input_qubit[1]) # number=2\n prog.h(input_qubit[2]) # number=3\n prog.h(input_qubit[3]) # number=4\n prog.y(input_qubit[3]) # number=5\n\n for edge in E:\n k = edge[0]\n l = edge[1]\n prog.cp(-2 * gamma, input_qubit[k-1], input_qubit[l-1])\n prog.p(gamma, k)\n prog.p(gamma, l)\n\n prog.rx(2 * beta, range(len(V)))\n\n prog.swap(input_qubit[1],input_qubit[0]) # number=6\n prog.swap(input_qubit[1],input_qubit[0]) # number=7\n prog.y(input_qubit[3]) # number=8\n prog.y(input_qubit[3]) # number=9\n # circuit end\n\n\n\n return prog\n\n\n\nif __name__ == '__main__':\n n = 4\n V = np.arange(0, n, 1)\n E = [(0, 1, 1.0), (0, 2, 1.0), (1, 2, 1.0), (3, 2, 1.0), (3, 1, 1.0)]\n\n G = nx.Graph()\n G.add_nodes_from(V)\n G.add_weighted_edges_from(E)\n\n step_size = 0.1\n\n a_gamma = np.arange(0, np.pi, step_size)\n a_beta = np.arange(0, np.pi, step_size)\n a_gamma, a_beta = np.meshgrid(a_gamma, a_beta)\n\n F1 = 3 - (np.sin(2 * a_beta) ** 2 * np.sin(2 * a_gamma) ** 2 - 0.5 * np.sin(4 * a_beta) * np.sin(4 * a_gamma)) * (\n 1 + np.cos(4 * a_gamma) ** 2)\n\n result = np.where(F1 == np.amax(F1))\n a = list(zip(result[0], result[1]))[0]\n\n gamma = a[0] * step_size\n beta = a[1] * step_size\n\n prog = make_circuit(4)\n sample_shot =5600\n writefile = open(\"../data/startQiskit_noisy62.csv\", \"w\")\n # prog.draw('mpl', filename=(kernel + '.png'))\n backend = FakeYorktown()\n\n circuit1 = transpile(prog, FakeYorktown())\n circuit1.measure_all()\n prog = circuit1\n\n info = execute(prog,backend=backend, shots=sample_shot).result().get_counts()\n\n print(info, file=writefile)\n print(\"results end\", file=writefile)\n print(circuit1.depth(), file=writefile)\n print(circuit1, file=writefile)\n writefile.close()\n",
"# qubit number=4\n# total number=36\nimport cirq\nimport qiskit\nfrom qiskit.providers.aer import QasmSimulator\nfrom qiskit.test.mock import FakeVigo\n\nfrom qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister\nfrom qiskit import BasicAer, execute, transpile\nfrom pprint import pprint\nfrom qiskit.test.mock import FakeVigo\nfrom math import log2\nimport numpy as np\nimport networkx as nx\n\ndef bitwise_xor(s: str, t: str) -> str:\n length = len(s)\n res = []\n for i in range(length):\n res.append(str(int(s[i]) ^ int(t[i])))\n return ''.join(res[::-1])\n\n\ndef bitwise_dot(s: str, t: str) -> str:\n length = len(s)\n res = 0\n for i in range(length):\n res += int(s[i]) * int(t[i])\n return str(res % 2)\n\ndef build_oracle(n: int, f) -> QuantumCircuit:\n # implement the oracle O_f\n # NOTE: use multi_control_toffoli_gate ('noancilla' mode)\n # https://qiskit.org/documentation/_modules/qiskit/aqua/circuits/gates/multi_control_toffoli_gate.html\n # https://quantumcomputing.stackexchange.com/questions/3943/how-do-you-implement-the-toffoli-gate-using-only-single-qubit-and-cnot-gates\n # https://quantumcomputing.stackexchange.com/questions/2177/how-can-i-implement-an-n-bit-toffoli-gate\n controls = QuantumRegister(n, \"ofc\")\n target = QuantumRegister(1, \"oft\")\n oracle = QuantumCircuit(controls, target, name=\"Of\")\n for i in range(2 ** n):\n rep = np.binary_repr(i, n)\n if f(rep) == \"1\":\n for j in range(n):\n if rep[j] == \"0\":\n oracle.x(controls[j])\n oracle.mct(controls, target[0], None, mode='noancilla')\n for j in range(n):\n if rep[j] == \"0\":\n oracle.x(controls[j])\n # oracle.barrier()\n return oracle\n\ndef make_circuit(n:int,f) -> QuantumCircuit:\n # circuit begin\n input_qubit = QuantumRegister(n,\"qc\")\n classical = ClassicalRegister(n, \"qm\")\n prog = QuantumCircuit(input_qubit, classical)\n prog.cx(input_qubit[0],input_qubit[3]) # number=13\n prog.cx(input_qubit[0],input_qubit[3]) # number=17\n prog.x(input_qubit[3]) # number=18\n prog.cx(input_qubit[0],input_qubit[3]) # number=19\n prog.cx(input_qubit[0],input_qubit[3]) # number=15\n prog.h(input_qubit[1]) # number=2\n prog.h(input_qubit[2]) # number=3\n prog.h(input_qubit[3]) # number=4\n prog.y(input_qubit[3]) # number=12\n prog.h(input_qubit[0]) # number=5\n\n oracle = build_oracle(n-1, f)\n prog.append(oracle.to_gate(),[input_qubit[i] for i in range(n-1)]+[input_qubit[n-1]])\n prog.h(input_qubit[1]) # number=6\n prog.h(input_qubit[2]) # number=7\n prog.cx(input_qubit[0],input_qubit[3]) # number=27\n prog.x(input_qubit[3]) # number=28\n prog.h(input_qubit[3]) # number=30\n prog.cz(input_qubit[0],input_qubit[3]) # number=31\n prog.h(input_qubit[3]) # number=32\n prog.cx(input_qubit[3],input_qubit[0]) # number=20\n prog.h(input_qubit[0]) # number=33\n prog.cz(input_qubit[3],input_qubit[0]) # number=34\n prog.h(input_qubit[0]) # number=35\n prog.z(input_qubit[3]) # number=24\n prog.cx(input_qubit[3],input_qubit[0]) # number=25\n prog.cx(input_qubit[3],input_qubit[0]) # number=22\n prog.h(input_qubit[3]) # number=8\n prog.h(input_qubit[0]) # number=9\n\n prog.y(input_qubit[2]) # number=10\n prog.y(input_qubit[2]) # number=11\n # circuit end\n\n for i in range(n):\n prog.measure(input_qubit[i], classical[i])\n\n\n return prog\n\n\n\nif __name__ == '__main__':\n a = \"111\"\n b = \"0\"\n f = lambda rep: bitwise_xor(bitwise_dot(a, rep), b)\n prog = make_circuit(4,f)\n backend = FakeVigo()\n sample_shot =8000\n\n info = execute(prog, backend=backend, shots=sample_shot).result().get_counts()\n backend = FakeVigo()\n circuit1 = transpile(prog,backend,optimization_level=2)\n\n writefile = open(\"../data/startQiskit_noisy2042.csv\",\"w\")\n print(info,file=writefile)\n print(\"results end\", file=writefile)\n print(circuit1.__len__(),file=writefile)\n print(circuit1,file=writefile)\n writefile.close()\n",
"# qubit number=4\n# total number=42\nimport cirq\nimport qiskit\n\nfrom qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister\nfrom qiskit import BasicAer, execute, transpile\nfrom pprint import pprint\nfrom qiskit.test.mock import FakeVigo\nfrom math import log2\nimport numpy as np\nimport networkx as nx\n\ndef bitwise_xor(s: str, t: str) -> str:\n length = len(s)\n res = []\n for i in range(length):\n res.append(str(int(s[i]) ^ int(t[i])))\n return ''.join(res[::-1])\n\n\ndef bitwise_dot(s: str, t: str) -> str:\n length = len(s)\n res = 0\n for i in range(length):\n res += int(s[i]) * int(t[i])\n return str(res % 2)\n\ndef build_oracle(n: int, f) -> QuantumCircuit:\n # implement the oracle O_f\n # NOTE: use multi_control_toffoli_gate ('noancilla' mode)\n # https://qiskit.org/documentation/_modules/qiskit/aqua/circuits/gates/multi_control_toffoli_gate.html\n # https://quantumcomputing.stackexchange.com/questions/3943/how-do-you-implement-the-toffoli-gate-using-only-single-qubit-and-cnot-gates\n # https://quantumcomputing.stackexchange.com/questions/2177/how-can-i-implement-an-n-bit-toffoli-gate\n controls = QuantumRegister(n, \"ofc\")\n target = QuantumRegister(1, \"oft\")\n oracle = QuantumCircuit(controls, target, name=\"Of\")\n for i in range(2 ** n):\n rep = np.binary_repr(i, n)\n if f(rep) == \"1\":\n for j in range(n):\n if rep[j] == \"0\":\n oracle.x(controls[j])\n oracle.mct(controls, target[0], None, mode='noancilla')\n for j in range(n):\n if rep[j] == \"0\":\n oracle.x(controls[j])\n # oracle.barrier()\n return oracle\n\ndef make_circuit(n:int,f) -> QuantumCircuit:\n # circuit begin\n input_qubit = QuantumRegister(n,\"qc\")\n classical = ClassicalRegister(n, \"qm\")\n prog = QuantumCircuit(input_qubit, classical)\n prog.h(input_qubit[3]) # number=35\n prog.cz(input_qubit[0],input_qubit[3]) # number=36\n prog.h(input_qubit[3]) # number=37\n prog.h(input_qubit[3]) # number=22\n prog.cx(input_qubit[0],input_qubit[3]) # number=32\n prog.x(input_qubit[3]) # number=33\n prog.cx(input_qubit[0],input_qubit[3]) # number=34\n prog.h(input_qubit[3]) # number=19\n prog.cz(input_qubit[0],input_qubit[3]) # number=20\n prog.h(input_qubit[3]) # number=21\n prog.z(input_qubit[3]) # number=10\n prog.h(input_qubit[1]) # number=2\n prog.h(input_qubit[2]) # number=3\n prog.h(input_qubit[3]) # number=4\n prog.h(input_qubit[0]) # number=5\n\n oracle = build_oracle(n-1, f)\n prog.append(oracle.to_gate(),[input_qubit[i] for i in range(n-1)]+[input_qubit[n-1]])\n prog.h(input_qubit[1]) # number=6\n prog.h(input_qubit[2]) # number=7\n prog.h(input_qubit[3]) # number=8\n prog.h(input_qubit[0]) # number=9\n prog.h(input_qubit[0]) # number=26\n prog.cz(input_qubit[1],input_qubit[0]) # number=27\n prog.h(input_qubit[0]) # number=28\n prog.z(input_qubit[1]) # number=24\n prog.h(input_qubit[2]) # number=39\n prog.cz(input_qubit[3],input_qubit[2]) # number=40\n prog.h(input_qubit[2]) # number=41\n prog.h(input_qubit[0]) # number=29\n prog.cz(input_qubit[1],input_qubit[0]) # number=30\n prog.h(input_qubit[0]) # number=31\n prog.h(input_qubit[1]) # number=18\n prog.rx(2.8902652413026093,input_qubit[2]) # number=13\n\n prog.y(input_qubit[1]) # number=11\n prog.y(input_qubit[1]) # number=12\n # circuit end\n\n for i in range(n):\n prog.measure(input_qubit[i], classical[i])\n\n\n return prog\n\n\n\nif __name__ == '__main__':\n a = \"111\"\n b = \"0\"\n f = lambda rep: bitwise_xor(bitwise_dot(a, rep), b)\n prog = make_circuit(4,f)\n backend = BasicAer.get_backend('qasm_simulator')\n sample_shot =8000\n\n info = execute(prog, backend=backend, shots=sample_shot).result().get_counts()\n backend = FakeVigo()\n circuit1 = transpile(prog,backend,optimization_level=2)\n\n writefile = open(\"../data/startQiskit2925.csv\",\"w\")\n print(info,file=writefile)\n print(\"results end\", file=writefile)\n print(circuit1.__len__(),file=writefile)\n print(circuit1,file=writefile)\n writefile.close()\n",
"# qubit number=4\n# total number=39\nimport cirq\nimport qiskit\n\nfrom qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister\nfrom qiskit import BasicAer, execute, transpile\nfrom pprint import pprint\nfrom qiskit.test.mock import FakeVigo\nfrom math import log2\nimport numpy as np\nimport networkx as nx\n\ndef bitwise_xor(s: str, t: str) -> str:\n length = len(s)\n res = []\n for i in range(length):\n res.append(str(int(s[i]) ^ int(t[i])))\n return ''.join(res[::-1])\n\n\ndef bitwise_dot(s: str, t: str) -> str:\n length = len(s)\n res = 0\n for i in range(length):\n res += int(s[i]) * int(t[i])\n return str(res % 2)\n\ndef build_oracle(n: int, f) -> QuantumCircuit:\n # implement the oracle O_f\n # NOTE: use multi_control_toffoli_gate ('noancilla' mode)\n # https://qiskit.org/documentation/_modules/qiskit/aqua/circuits/gates/multi_control_toffoli_gate.html\n # https://quantumcomputing.stackexchange.com/questions/3943/how-do-you-implement-the-toffoli-gate-using-only-single-qubit-and-cnot-gates\n # https://quantumcomputing.stackexchange.com/questions/2177/how-can-i-implement-an-n-bit-toffoli-gate\n controls = QuantumRegister(n, \"ofc\")\n target = QuantumRegister(1, \"oft\")\n oracle = QuantumCircuit(controls, target, name=\"Of\")\n for i in range(2 ** n):\n rep = np.binary_repr(i, n)\n if f(rep) == \"1\":\n for j in range(n):\n if rep[j] == \"0\":\n oracle.x(controls[j])\n oracle.mct(controls, target[0], None, mode='noancilla')\n for j in range(n):\n if rep[j] == \"0\":\n oracle.x(controls[j])\n # oracle.barrier()\n return oracle\n\ndef make_circuit(n:int,f) -> QuantumCircuit:\n # circuit begin\n input_qubit = QuantumRegister(n,\"qc\")\n classical = ClassicalRegister(n, \"qm\")\n prog = QuantumCircuit(input_qubit, classical)\n prog.h(input_qubit[3]) # number=36\n prog.cz(input_qubit[0],input_qubit[3]) # number=37\n prog.h(input_qubit[3]) # number=38\n prog.h(input_qubit[3]) # number=23\n prog.cz(input_qubit[0],input_qubit[3]) # number=24\n prog.h(input_qubit[3]) # number=25\n prog.x(input_qubit[3]) # number=18\n prog.cx(input_qubit[0],input_qubit[3]) # number=19\n prog.cx(input_qubit[0],input_qubit[3]) # number=15\n prog.h(input_qubit[1]) # number=2\n prog.h(input_qubit[2]) # number=3\n prog.h(input_qubit[3]) # number=4\n prog.y(input_qubit[3]) # number=12\n prog.h(input_qubit[0]) # number=5\n\n oracle = build_oracle(n-1, f)\n prog.append(oracle.to_gate(),[input_qubit[i] for i in range(n-1)]+[input_qubit[n-1]])\n prog.h(input_qubit[1]) # number=6\n prog.h(input_qubit[2]) # number=7\n prog.h(input_qubit[3]) # number=32\n prog.cx(input_qubit[3],input_qubit[0]) # number=20\n prog.cx(input_qubit[3],input_qubit[0]) # number=26\n prog.z(input_qubit[3]) # number=27\n prog.h(input_qubit[0]) # number=29\n prog.cz(input_qubit[3],input_qubit[0]) # number=30\n prog.h(input_qubit[0]) # number=31\n prog.h(input_qubit[0]) # number=33\n prog.cz(input_qubit[3],input_qubit[0]) # number=34\n prog.h(input_qubit[0]) # number=35\n prog.h(input_qubit[3]) # number=8\n prog.h(input_qubit[0]) # number=9\n\n prog.y(input_qubit[2]) # number=10\n prog.y(input_qubit[2]) # number=11\n # circuit end\n\n\n\n return prog\n\n\n\nif __name__ == '__main__':\n a = \"111\"\n b = \"0\"\n f = lambda rep: bitwise_xor(bitwise_dot(a, rep), b)\n prog = make_circuit(4,f)\n backend = BasicAer.get_backend('statevector_simulator')\n sample_shot =8000\n\n info = execute(prog, backend=backend).result().get_statevector()\n qubits = round(log2(len(info)))\n info = {\n np.binary_repr(i, qubits): round((info[i]*(info[i].conjugate())).real,3)\n for i in range(2 ** qubits)\n }\n backend = FakeVigo()\n circuit1 = transpile(prog,backend,optimization_level=2)\n\n writefile = open(\"../data/startQiskit_Class2285.csv\",\"w\")\n print(info,file=writefile)\n print(\"results end\", file=writefile)\n print(circuit1.__len__(),file=writefile)\n print(circuit1,file=writefile)\n writefile.close()\n",
"# qubit number=4\n# total number=36\nimport cirq\nimport qiskit\nfrom qiskit import IBMQ\nfrom qiskit.providers.ibmq import least_busy\n\nfrom qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister\nfrom qiskit import BasicAer, execute, transpile\nfrom pprint import pprint\nfrom qiskit.test.mock import FakeVigo\nfrom math import log2\nimport numpy as np\nimport networkx as nx\n\ndef bitwise_xor(s: str, t: str) -> str:\n length = len(s)\n res = []\n for i in range(length):\n res.append(str(int(s[i]) ^ int(t[i])))\n return ''.join(res[::-1])\n\n\ndef bitwise_dot(s: str, t: str) -> str:\n length = len(s)\n res = 0\n for i in range(length):\n res += int(s[i]) * int(t[i])\n return str(res % 2)\n\ndef build_oracle(n: int, f) -> QuantumCircuit:\n # implement the oracle O_f\n # NOTE: use multi_control_toffoli_gate ('noancilla' mode)\n # https://qiskit.org/documentation/_modules/qiskit/aqua/circuits/gates/multi_control_toffoli_gate.html\n # https://quantumcomputing.stackexchange.com/questions/3943/how-do-you-implement-the-toffoli-gate-using-only-single-qubit-and-cnot-gates\n # https://quantumcomputing.stackexchange.com/questions/2177/how-can-i-implement-an-n-bit-toffoli-gate\n controls = QuantumRegister(n, \"ofc\")\n target = QuantumRegister(1, \"oft\")\n oracle = QuantumCircuit(controls, target, name=\"Of\")\n for i in range(2 ** n):\n rep = np.binary_repr(i, n)\n if f(rep) == \"1\":\n for j in range(n):\n if rep[j] == \"0\":\n oracle.x(controls[j])\n oracle.mct(controls, target[0], None, mode='noancilla')\n for j in range(n):\n if rep[j] == \"0\":\n oracle.x(controls[j])\n # oracle.barrier()\n return oracle\n\ndef make_circuit(n:int,f) -> QuantumCircuit:\n # circuit begin\n input_qubit = QuantumRegister(n,\"qc\")\n classical = ClassicalRegister(n, \"qm\")\n prog = QuantumCircuit(input_qubit, classical)\n prog.h(input_qubit[3]) # number=15\n prog.cz(input_qubit[0],input_qubit[3]) # number=16\n prog.h(input_qubit[3]) # number=17\n prog.x(input_qubit[3]) # number=13\n prog.h(input_qubit[3]) # number=20\n prog.cz(input_qubit[0],input_qubit[3]) # number=21\n prog.h(input_qubit[3]) # number=22\n prog.h(input_qubit[1]) # number=2\n prog.h(input_qubit[2]) # number=3\n prog.h(input_qubit[3]) # number=4\n prog.h(input_qubit[0]) # number=5\n prog.cx(input_qubit[0],input_qubit[3]) # number=33\n prog.x(input_qubit[3]) # number=34\n prog.cx(input_qubit[0],input_qubit[3]) # number=35\n\n oracle = build_oracle(n-1, f)\n prog.append(oracle.to_gate(),[input_qubit[i] for i in range(n-1)]+[input_qubit[n-1]])\n prog.h(input_qubit[1]) # number=6\n prog.h(input_qubit[1]) # number=29\n prog.h(input_qubit[2]) # number=7\n prog.h(input_qubit[3]) # number=8\n prog.h(input_qubit[0]) # number=9\n\n prog.h(input_qubit[0]) # number=23\n prog.cz(input_qubit[2],input_qubit[0]) # number=24\n prog.h(input_qubit[0]) # number=25\n prog.y(input_qubit[2]) # number=30\n prog.cx(input_qubit[2],input_qubit[0]) # number=11\n prog.cx(input_qubit[2],input_qubit[0]) # number=18\n prog.h(input_qubit[0]) # number=26\n prog.x(input_qubit[2]) # number=31\n prog.cz(input_qubit[2],input_qubit[0]) # number=27\n prog.h(input_qubit[0]) # number=28\n # circuit end\n\n for i in range(n):\n prog.measure(input_qubit[i], classical[i])\n\n\n return prog\n\n\n\nif __name__ == '__main__':\n a = \"111\"\n b = \"0\"\n f = lambda rep: bitwise_xor(bitwise_dot(a, rep), b)\n prog = make_circuit(4,f)\n IBMQ.load_account() \n provider = IBMQ.get_provider(hub='ibm-q') \n provider.backends()\n backend = least_busy(provider.backends(filters=lambda x: x.configuration().n_qubits >= 2 and not x.configuration().simulator and x.status().operational == True))\n sample_shot =8000\n\n info = execute(prog, backend=backend, shots=sample_shot).result().get_counts()\n backend = FakeVigo()\n circuit1 = transpile(prog,backend,optimization_level=2)\n\n writefile = open(\"../data/startQiskit_QC2348.csv\",\"w\")\n print(info,file=writefile)\n print(\"results end\", file=writefile)\n print(circuit1.__len__(),file=writefile)\n print(circuit1,file=writefile)\n writefile.close()\n",
"# qubit number=2\n# total number=9\nimport cirq\nimport qiskit\n\nfrom qiskit import IBMQ\nfrom qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister\nfrom qiskit import BasicAer, execute, transpile\nfrom pprint import pprint\nfrom qiskit.test.mock import FakeVigo\nfrom math import log2,floor, sqrt, pi\nimport numpy as np\nimport networkx as nx\n\ndef build_oracle(n: int, f) -> QuantumCircuit:\n # implement the oracle O_f^\\pm\n # NOTE: use U1 gate (P gate) with \\lambda = 180 ==> CZ gate\n # or multi_control_Z_gate (issue #127)\n\n controls = QuantumRegister(n, \"ofc\")\n target = QuantumRegister(1, \"oft\")\n oracle = QuantumCircuit(controls, target, name=\"Of\")\n for i in range(2 ** n):\n rep = np.binary_repr(i, n)\n if f(rep) == \"1\":\n for j in range(n):\n if rep[j] == \"0\":\n oracle.x(controls[j])\n oracle.mct(controls, target[0], None, mode='noancilla')\n for j in range(n):\n if rep[j] == \"0\":\n oracle.x(controls[j])\n # oracle.barrier()\n # oracle.draw('mpl', filename='circuit/deutsch-oracle.png')\n return oracle\n\n\ndef make_circuit(n:int,f) -> QuantumCircuit:\n # circuit begin\n\n input_qubit = QuantumRegister(n, \"qc\")\n target = QuantumRegister(1, \"qt\")\n prog = QuantumCircuit(input_qubit, target)\n\n # inverse last one (can be omitted if using O_f^\\pm)\n prog.x(target)\n\n # apply H to get superposition\n for i in range(n):\n prog.h(input_qubit[i])\n\n prog.h(input_qubit[1]) # number=1\n prog.h(input_qubit[1]) # number=4\n prog.h(target)\n prog.barrier()\n\n # apply oracle O_f\n oracle = build_oracle(n, f)\n prog.append(\n oracle.to_gate(),\n [input_qubit[i] for i in range(n)] + [target])\n\n # apply H back (QFT on Z_2^n)\n for i in range(n):\n prog.h(input_qubit[i])\n prog.barrier()\n\n # measure\n\n prog.x(input_qubit[1]) # number=2\n prog.x(input_qubit[1]) # number=3\n prog.cx(input_qubit[1],input_qubit[0]) # number=5\n prog.cx(input_qubit[1],input_qubit[0]) # number=6\n prog.cx(input_qubit[1],input_qubit[0]) # number=7\n prog.cx(input_qubit[1],input_qubit[0]) # number=8\n # circuit end\n return prog\n\n\n\n\nif __name__ == '__main__':\n n = 2\n f = lambda rep: rep[-1]\n # f = lambda rep: \"1\" if rep[0:2] == \"01\" or rep[0:2] == \"10\" else \"0\"\n # f = lambda rep: \"0\"\n prog = make_circuit(n, f)\n sample_shot =2800\n backend = BasicAer.get_backend('statevector_simulator')\n\n circuit1 = transpile(prog,FakeVigo())\n circuit1.x(qubit=3)\n circuit1.x(qubit=3)\n prog = circuit1\n\n\n info = execute(prog, backend=backend).result().get_statevector()\n qubits = round(log2(len(info)))\n info = {\n np.binary_repr(i, qubits): round((info[i]*(info[i].conjugate())).real,3)\n for i in range(2 ** qubits)\n }\n\n writefile = open(\"../data/startQiskit_Class124.csv\",\"w\")\n print(info,file=writefile)\n print(\"results end\", file=writefile)\n print(circuit1.depth(),file=writefile)\n print(circuit1,file=writefile)\n writefile.close()\n",
"# qubit number=2\n# total number=13\nimport cirq\nimport qiskit\n\nfrom qiskit import IBMQ\nfrom qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister\nfrom qiskit import BasicAer, execute, transpile\nfrom pprint import pprint\nfrom qiskit.test.mock import FakeVigo\nfrom math import log2,floor, sqrt, pi\nimport numpy as np\nimport networkx as nx\n\ndef build_oracle(n: int, f) -> QuantumCircuit:\n # implement the oracle O_f^\\pm\n # NOTE: use U1 gate (P gate) with \\lambda = 180 ==> CZ gate\n # or multi_control_Z_gate (issue #127)\n\n controls = QuantumRegister(n, \"ofc\")\n target = QuantumRegister(1, \"oft\")\n oracle = QuantumCircuit(controls, target, name=\"Of\")\n for i in range(2 ** n):\n rep = np.binary_repr(i, n)\n if f(rep) == \"1\":\n for j in range(n):\n if rep[j] == \"0\":\n oracle.x(controls[j])\n oracle.mct(controls, target[0], None, mode='noancilla')\n for j in range(n):\n if rep[j] == \"0\":\n oracle.x(controls[j])\n # oracle.barrier()\n # oracle.draw('mpl', filename='circuit/deutsch-oracle.png')\n return oracle\n\n\ndef make_circuit(n:int,f) -> QuantumCircuit:\n # circuit begin\n\n input_qubit = QuantumRegister(n, \"qc\")\n target = QuantumRegister(1, \"qt\")\n prog = QuantumCircuit(input_qubit, target)\n\n # inverse last one (can be omitted if using O_f^\\pm)\n prog.x(target)\n\n # apply H to get superposition\n for i in range(n):\n prog.h(input_qubit[i])\n\n prog.h(input_qubit[1]) # number=1\n prog.h(target)\n prog.barrier()\n\n # apply oracle O_f\n oracle = build_oracle(n, f)\n prog.append(\n oracle.to_gate(),\n [input_qubit[i] for i in range(n)] + [target])\n\n # apply H back (QFT on Z_2^n)\n for i in range(n):\n prog.h(input_qubit[i])\n prog.barrier()\n\n # measure\n\n prog.y(input_qubit[1]) # number=2\n prog.y(input_qubit[1]) # number=4\n prog.y(input_qubit[1]) # number=3\n prog.cx(input_qubit[1],input_qubit[0]) # number=7\n prog.x(input_qubit[0]) # number=8\n prog.h(input_qubit[0]) # number=10\n prog.cz(input_qubit[1],input_qubit[0]) # number=11\n prog.h(input_qubit[0]) # number=12\n prog.x(input_qubit[0]) # number=6\n # circuit end\n return prog\n\n\n\n\nif __name__ == '__main__':\n n = 2\n f = lambda rep: rep[-1]\n # f = lambda rep: \"1\" if rep[0:2] == \"01\" or rep[0:2] == \"10\" else \"0\"\n # f = lambda rep: \"0\"\n prog = make_circuit(n, f)\n sample_shot =2800\n backend = BasicAer.get_backend('statevector_simulator')\n\n circuit1 = transpile(prog,FakeVigo())\n circuit1.x(qubit=3)\n circuit1.x(qubit=3)\n prog = circuit1\n\n\n info = execute(prog, backend=backend).result().get_statevector()\n qubits = round(log2(len(info)))\n info = {\n np.binary_repr(i, qubits): round((info[i]*(info[i].conjugate())).real,3)\n for i in range(2 ** qubits)\n }\n\n writefile = open(\"../data/startQiskit_Class182.csv\",\"w\")\n print(info,file=writefile)\n print(\"results end\", file=writefile)\n print(circuit1.depth(),file=writefile)\n print(circuit1,file=writefile)\n writefile.close()\n",
"# qubit number=3\n# total number=15\n\nimport numpy as np\n\nfrom qiskit import QuantumCircuit, execute, Aer, QuantumRegister, ClassicalRegister, transpile, BasicAer, IBMQ\nimport networkx as nx\nfrom qiskit.visualization import plot_histogram\nfrom typing import *\nfrom pprint import pprint\nfrom math import log2\nfrom collections import Counter\nfrom qiskit.test.mock import FakeVigo, FakeYorktown\n\nkernel = 'circuit/bernstein'\n\n\ndef make_circuit(n:int) -> QuantumCircuit:\n # circuit begin\n input_qubit = QuantumRegister(n,\"qc\")\n prog = QuantumCircuit(input_qubit)\n prog.h(input_qubit[0]) # number=1\n prog.h(input_qubit[1]) # number=2\n prog.h(input_qubit[2]) # number=3\n prog.cx(input_qubit[0],input_qubit[2]) # number=9\n prog.x(input_qubit[2]) # number=10\n prog.h(input_qubit[2]) # number=12\n prog.cz(input_qubit[0],input_qubit[2]) # number=13\n prog.h(input_qubit[2]) # number=14\n prog.h(input_qubit[3]) # number=4\n prog.y(input_qubit[3]) # number=5\n\n for edge in E:\n k = edge[0]\n l = edge[1]\n prog.cp(-2 * gamma, input_qubit[k-1], input_qubit[l-1])\n prog.p(gamma, k)\n prog.p(gamma, l)\n\n prog.rx(2 * beta, range(len(V)))\n\n prog.cx(input_qubit[1],input_qubit[0]) # number=7\n prog.cx(input_qubit[1],input_qubit[0]) # number=8\n # circuit end\n\n\n\n return prog\n\n\n\nif __name__ == '__main__':\n n = 4\n V = np.arange(0, n, 1)\n E = [(0, 1, 1.0), (0, 2, 1.0), (1, 2, 1.0), (3, 2, 1.0), (3, 1, 1.0)]\n\n G = nx.Graph()\n G.add_nodes_from(V)\n G.add_weighted_edges_from(E)\n\n step_size = 0.1\n\n a_gamma = np.arange(0, np.pi, step_size)\n a_beta = np.arange(0, np.pi, step_size)\n a_gamma, a_beta = np.meshgrid(a_gamma, a_beta)\n\n F1 = 3 - (np.sin(2 * a_beta) ** 2 * np.sin(2 * a_gamma) ** 2 - 0.5 * np.sin(4 * a_beta) * np.sin(4 * a_gamma)) * (\n 1 + np.cos(4 * a_gamma) ** 2)\n\n result = np.where(F1 == np.amax(F1))\n a = list(zip(result[0], result[1]))[0]\n\n gamma = a[0] * step_size\n beta = a[1] * step_size\n\n prog = make_circuit(4)\n sample_shot =5600\n writefile = open(\"../data/startQiskit_QC277.csv\", \"w\")\n # prog.draw('mpl', filename=(kernel + '.png'))\n IBMQ.load_account() \n provider = IBMQ.get_provider(hub='ibm-q') \n provider.backends()\n backend = provider.get_backend(\"ibmq_5_yorktown\")\n\n circuit1 = transpile(prog, FakeYorktown())\n circuit1.measure_all()\n prog = circuit1\n\n info = execute(prog,backend=backend, shots=sample_shot).result().get_counts()\n\n print(info, file=writefile)\n print(\"results end\", file=writefile)\n print(circuit1.depth(), file=writefile)\n print(circuit1, file=writefile)\n writefile.close()\n",
"# qubit number=4\n# total number=49\nimport cirq\nimport qiskit\n\nfrom qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister\nfrom qiskit import BasicAer, execute, transpile\nfrom pprint import pprint\nfrom qiskit.test.mock import FakeVigo\nfrom math import log2\nimport numpy as np\nimport networkx as nx\n\ndef bitwise_xor(s: str, t: str) -> str:\n length = len(s)\n res = []\n for i in range(length):\n res.append(str(int(s[i]) ^ int(t[i])))\n return ''.join(res[::-1])\n\n\ndef bitwise_dot(s: str, t: str) -> str:\n length = len(s)\n res = 0\n for i in range(length):\n res += int(s[i]) * int(t[i])\n return str(res % 2)\n\ndef build_oracle(n: int, f) -> QuantumCircuit:\n # implement the oracle O_f\n # NOTE: use multi_control_toffoli_gate ('noancilla' mode)\n # https://qiskit.org/documentation/_modules/qiskit/aqua/circuits/gates/multi_control_toffoli_gate.html\n # https://quantumcomputing.stackexchange.com/questions/3943/how-do-you-implement-the-toffoli-gate-using-only-single-qubit-and-cnot-gates\n # https://quantumcomputing.stackexchange.com/questions/2177/how-can-i-implement-an-n-bit-toffoli-gate\n controls = QuantumRegister(n, \"ofc\")\n target = QuantumRegister(1, \"oft\")\n oracle = QuantumCircuit(controls, target, name=\"Of\")\n for i in range(2 ** n):\n rep = np.binary_repr(i, n)\n if f(rep) == \"1\":\n for j in range(n):\n if rep[j] == \"0\":\n oracle.x(controls[j])\n oracle.mct(controls, target[0], None, mode='noancilla')\n for j in range(n):\n if rep[j] == \"0\":\n oracle.x(controls[j])\n # oracle.barrier()\n return oracle\n\ndef make_circuit(n:int,f) -> QuantumCircuit:\n # circuit begin\n input_qubit = QuantumRegister(n,\"qc\")\n classical = ClassicalRegister(n, \"qm\")\n prog = QuantumCircuit(input_qubit, classical)\n prog.cx(input_qubit[0],input_qubit[3]) # number=13\n prog.cx(input_qubit[0],input_qubit[3]) # number=17\n prog.x(input_qubit[3]) # number=18\n prog.cx(input_qubit[0],input_qubit[3]) # number=19\n prog.cx(input_qubit[0],input_qubit[3]) # number=15\n prog.h(input_qubit[1]) # number=2\n prog.h(input_qubit[2]) # number=3\n prog.h(input_qubit[3]) # number=4\n prog.y(input_qubit[3]) # number=12\n prog.h(input_qubit[0]) # number=5\n\n oracle = build_oracle(n-1, f)\n prog.append(oracle.to_gate(),[input_qubit[i] for i in range(n-1)]+[input_qubit[n-1]])\n prog.h(input_qubit[1]) # number=6\n prog.h(input_qubit[2]) # number=7\n prog.h(input_qubit[3]) # number=37\n prog.cz(input_qubit[0],input_qubit[3]) # number=38\n prog.h(input_qubit[3]) # number=39\n prog.cx(input_qubit[0],input_qubit[3]) # number=40\n prog.x(input_qubit[3]) # number=41\n prog.h(input_qubit[3]) # number=43\n prog.cz(input_qubit[0],input_qubit[3]) # number=44\n prog.h(input_qubit[3]) # number=45\n prog.h(input_qubit[3]) # number=30\n prog.cz(input_qubit[0],input_qubit[3]) # number=31\n prog.h(input_qubit[3]) # number=32\n prog.h(input_qubit[0]) # number=33\n prog.cz(input_qubit[3],input_qubit[0]) # number=34\n prog.rx(0.33300882128051834,input_qubit[2]) # number=36\n prog.h(input_qubit[0]) # number=35\n prog.cx(input_qubit[3],input_qubit[0]) # number=23\n prog.cx(input_qubit[3],input_qubit[0]) # number=46\n prog.z(input_qubit[3]) # number=47\n prog.cx(input_qubit[3],input_qubit[0]) # number=48\n prog.cx(input_qubit[3],input_qubit[0]) # number=25\n prog.cx(input_qubit[3],input_qubit[0]) # number=22\n prog.h(input_qubit[3]) # number=8\n prog.h(input_qubit[0]) # number=9\n\n prog.y(input_qubit[2]) # number=10\n prog.y(input_qubit[2]) # number=11\n # circuit end\n\n\n\n return prog\n\n\n\nif __name__ == '__main__':\n a = \"111\"\n b = \"0\"\n f = lambda rep: bitwise_xor(bitwise_dot(a, rep), b)\n prog = make_circuit(4,f)\n backend = BasicAer.get_backend('statevector_simulator')\n sample_shot =8000\n\n info = execute(prog, backend=backend).result().get_statevector()\n qubits = round(log2(len(info)))\n info = {\n np.binary_repr(i, qubits): round((info[i]*(info[i].conjugate())).real,3)\n for i in range(2 ** qubits)\n }\n backend = FakeVigo()\n circuit1 = transpile(prog,backend,optimization_level=2)\n\n writefile = open(\"../data/startQiskit_Class3343.csv\",\"w\")\n print(info,file=writefile)\n print(\"results end\", file=writefile)\n print(circuit1.__len__(),file=writefile)\n print(circuit1,file=writefile)\n writefile.close()\n",
"# qubit number=5\n# total number=50\nimport cirq\nimport qiskit\n\nfrom qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister\nfrom qiskit import BasicAer, execute, transpile\nfrom pprint import pprint\nfrom qiskit.test.mock import FakeVigo\nfrom math import log2,floor, sqrt, pi\nimport numpy as np\nimport networkx as nx\n\ndef build_oracle(n: int, f) -> QuantumCircuit:\n # implement the oracle O_f^\\pm\n # NOTE: use U1 gate (P gate) with \\lambda = 180 ==> CZ gate\n # or multi_control_Z_gate (issue #127)\n\n controls = QuantumRegister(n, \"ofc\")\n oracle = QuantumCircuit(controls, name=\"Zf\")\n\n for i in range(2 ** n):\n rep = np.binary_repr(i, n)\n if f(rep) == \"1\":\n for j in range(n):\n if rep[j] == \"0\":\n oracle.x(controls[j])\n\n # oracle.h(controls[n])\n if n >= 2:\n oracle.mcu1(pi, controls[1:], controls[0])\n\n for j in range(n):\n if rep[j] == \"0\":\n oracle.x(controls[j])\n # oracle.barrier()\n\n return oracle\n\n\ndef make_circuit(n:int,f) -> QuantumCircuit:\n # circuit begin\n input_qubit = QuantumRegister(n,\"qc\")\n classical = ClassicalRegister(n, \"qm\")\n prog = QuantumCircuit(input_qubit, classical)\n prog.h(input_qubit[0]) # number=3\n prog.h(input_qubit[1]) # number=4\n prog.h(input_qubit[2]) # number=5\n prog.h(input_qubit[3]) # number=6\n prog.h(input_qubit[4]) # number=21\n prog.h(input_qubit[0]) # number=44\n prog.cz(input_qubit[3],input_qubit[0]) # number=45\n prog.h(input_qubit[0]) # number=46\n prog.cx(input_qubit[3],input_qubit[0]) # number=47\n prog.z(input_qubit[3]) # number=48\n prog.cx(input_qubit[3],input_qubit[0]) # number=49\n prog.cx(input_qubit[3],input_qubit[0]) # number=34\n prog.rx(0.11938052083641225,input_qubit[1]) # number=36\n\n Zf = build_oracle(n, f)\n\n repeat = floor(sqrt(2 ** n) * pi / 4)\n for i in range(repeat):\n prog.append(Zf.to_gate(), [input_qubit[i] for i in range(n)])\n prog.h(input_qubit[0]) # number=1\n prog.rx(1.4765485471872026,input_qubit[2]) # number=35\n prog.h(input_qubit[1]) # number=2\n prog.h(input_qubit[2]) # number=7\n prog.h(input_qubit[3]) # number=8\n\n\n prog.cx(input_qubit[1],input_qubit[0]) # number=41\n prog.x(input_qubit[0]) # number=42\n prog.cx(input_qubit[1],input_qubit[0]) # number=43\n prog.x(input_qubit[4]) # number=30\n prog.x(input_qubit[1]) # number=10\n prog.x(input_qubit[2]) # number=11\n prog.rx(0.45238934211692994,input_qubit[3]) # number=38\n prog.y(input_qubit[1]) # number=39\n prog.rx(-2.5258404934861938,input_qubit[1]) # number=25\n prog.h(input_qubit[3]) # number=29\n prog.cx(input_qubit[0],input_qubit[3]) # number=22\n prog.x(input_qubit[3]) # number=23\n prog.cx(input_qubit[0],input_qubit[3]) # number=24\n\n if n>=2:\n prog.mcu1(pi,input_qubit[1:],input_qubit[0])\n\n prog.x(input_qubit[0]) # number=13\n prog.rx(-0.0722566310325653,input_qubit[4]) # number=37\n prog.x(input_qubit[1]) # number=14\n prog.cx(input_qubit[0],input_qubit[2]) # number=26\n prog.x(input_qubit[2]) # number=27\n prog.h(input_qubit[4]) # number=40\n prog.cx(input_qubit[0],input_qubit[2]) # number=28\n prog.x(input_qubit[3]) # number=16\n\n\n prog.h(input_qubit[0]) # number=17\n prog.h(input_qubit[1]) # number=18\n prog.h(input_qubit[2]) # number=19\n prog.h(input_qubit[3]) # number=20\n\n\n # circuit end\n\n for i in range(n):\n prog.measure(input_qubit[i], classical[i])\n\n\n return prog\n\n\n\n\nif __name__ == '__main__':\n key = \"00000\"\n f = lambda rep: str(int(rep == key))\n prog = make_circuit(5,f)\n backend = BasicAer.get_backend('qasm_simulator')\n sample_shot =7924\n\n info = execute(prog, backend=backend, shots=sample_shot).result().get_counts()\n backend = FakeVigo()\n circuit1 = transpile(prog,backend,optimization_level=2)\n\n writefile = open(\"../data/startQiskit1590.csv\",\"w\")\n print(info,file=writefile)\n print(\"results end\", file=writefile)\n print(circuit1.depth(),file=writefile)\n print(circuit1,file=writefile)\n writefile.close()\n",
"# qubit number=2\n# total number=18\nimport cirq\nimport qiskit\n\nfrom qiskit import IBMQ\nfrom qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister\nfrom qiskit import BasicAer, execute, transpile\nfrom pprint import pprint\nfrom qiskit.test.mock import FakeVigo\nfrom math import log2,floor, sqrt, pi\nimport numpy as np\nimport networkx as nx\n\ndef build_oracle(n: int, f) -> QuantumCircuit:\n # implement the oracle O_f^\\pm\n # NOTE: use U1 gate (P gate) with \\lambda = 180 ==> CZ gate\n # or multi_control_Z_gate (issue #127)\n\n controls = QuantumRegister(n, \"ofc\")\n target = QuantumRegister(1, \"oft\")\n oracle = QuantumCircuit(controls, target, name=\"Of\")\n for i in range(2 ** n):\n rep = np.binary_repr(i, n)\n if f(rep) == \"1\":\n for j in range(n):\n if rep[j] == \"0\":\n oracle.x(controls[j])\n oracle.mct(controls, target[0], None, mode='noancilla')\n for j in range(n):\n if rep[j] == \"0\":\n oracle.x(controls[j])\n # oracle.barrier()\n # oracle.draw('mpl', filename='circuit/deutsch-oracle.png')\n return oracle\n\n\ndef make_circuit(n:int,f) -> QuantumCircuit:\n # circuit begin\n\n input_qubit = QuantumRegister(n, \"qc\")\n target = QuantumRegister(1, \"qt\")\n prog = QuantumCircuit(input_qubit, target)\n\n # inverse last one (can be omitted if using O_f^\\pm)\n prog.x(target)\n\n # apply H to get superposition\n for i in range(n):\n prog.h(input_qubit[i])\n\n prog.h(input_qubit[1]) # number=1\n prog.h(target)\n prog.barrier()\n\n # apply oracle O_f\n oracle = build_oracle(n, f)\n prog.append(\n oracle.to_gate(),\n [input_qubit[i] for i in range(n)] + [target])\n\n # apply H back (QFT on Z_2^n)\n for i in range(n):\n prog.h(input_qubit[i])\n prog.barrier()\n\n # measure\n #for i in range(n):\n # prog.measure(input_qubit[i], classicals[i])\n\n prog.y(input_qubit[1]) # number=2\n prog.y(input_qubit[1]) # number=4\n prog.y(input_qubit[1]) # number=3\n prog.rx(2.0860175219836226,input_qubit[1]) # number=7\n prog.x(input_qubit[0]) # number=5\n prog.x(input_qubit[0]) # number=6\n prog.h(input_qubit[0]) # number=10\n prog.cz(input_qubit[1],input_qubit[0]) # number=11\n prog.h(input_qubit[0]) # number=12\n prog.h(input_qubit[0]) # number=13\n prog.cz(input_qubit[1],input_qubit[0]) # number=14\n prog.h(input_qubit[0]) # number=15\n prog.x(input_qubit[1]) # number=16\n prog.x(input_qubit[1]) # number=17\n # circuit end\n return prog\n\n\n\n\nif __name__ == '__main__':\n n = 2\n f = lambda rep: rep[-1]\n # f = lambda rep: \"1\" if rep[0:2] == \"01\" or rep[0:2] == \"10\" else \"0\"\n # f = lambda rep: \"0\"\n prog = make_circuit(n, f)\n sample_shot =2800\n backend = BasicAer.get_backend('qasm_simulator')\n\n circuit1 = transpile(prog,FakeVigo())\n circuit1.x(qubit=3)\n circuit1.x(qubit=3)\n circuit1.measure_all()\n prog = circuit1\n\n\n info = execute(prog, backend=backend, shots=sample_shot).result().get_counts()\n\n writefile = open(\"../data/startQiskit336.csv\",\"w\")\n print(info,file=writefile)\n print(\"results end\", file=writefile)\n print(circuit1.depth(),file=writefile)\n print(circuit1,file=writefile)\n writefile.close()\n",
"# qubit number=4\n# total number=37\nimport cirq\nimport qiskit\nfrom qiskit.providers.aer import QasmSimulator\nfrom qiskit.test.mock import FakeVigo\n\nfrom qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister\nfrom qiskit import BasicAer, execute, transpile\nfrom pprint import pprint\nfrom qiskit.test.mock import FakeVigo\nfrom math import log2\nimport numpy as np\nimport networkx as nx\n\ndef bitwise_xor(s: str, t: str) -> str:\n length = len(s)\n res = []\n for i in range(length):\n res.append(str(int(s[i]) ^ int(t[i])))\n return ''.join(res[::-1])\n\n\ndef bitwise_dot(s: str, t: str) -> str:\n length = len(s)\n res = 0\n for i in range(length):\n res += int(s[i]) * int(t[i])\n return str(res % 2)\n\ndef build_oracle(n: int, f) -> QuantumCircuit:\n # implement the oracle O_f\n # NOTE: use multi_control_toffoli_gate ('noancilla' mode)\n # https://qiskit.org/documentation/_modules/qiskit/aqua/circuits/gates/multi_control_toffoli_gate.html\n # https://quantumcomputing.stackexchange.com/questions/3943/how-do-you-implement-the-toffoli-gate-using-only-single-qubit-and-cnot-gates\n # https://quantumcomputing.stackexchange.com/questions/2177/how-can-i-implement-an-n-bit-toffoli-gate\n controls = QuantumRegister(n, \"ofc\")\n target = QuantumRegister(1, \"oft\")\n oracle = QuantumCircuit(controls, target, name=\"Of\")\n for i in range(2 ** n):\n rep = np.binary_repr(i, n)\n if f(rep) == \"1\":\n for j in range(n):\n if rep[j] == \"0\":\n oracle.x(controls[j])\n oracle.mct(controls, target[0], None, mode='noancilla')\n for j in range(n):\n if rep[j] == \"0\":\n oracle.x(controls[j])\n # oracle.barrier()\n return oracle\n\ndef make_circuit(n:int,f) -> QuantumCircuit:\n # circuit begin\n input_qubit = QuantumRegister(n,\"qc\")\n classical = ClassicalRegister(n, \"qm\")\n prog = QuantumCircuit(input_qubit, classical)\n prog.h(input_qubit[3]) # number=19\n prog.cz(input_qubit[0],input_qubit[3]) # number=20\n prog.h(input_qubit[3]) # number=21\n prog.h(input_qubit[3]) # number=24\n prog.cz(input_qubit[0],input_qubit[3]) # number=25\n prog.h(input_qubit[3]) # number=26\n prog.cx(input_qubit[0],input_qubit[3]) # number=31\n prog.x(input_qubit[3]) # number=32\n prog.h(input_qubit[3]) # number=34\n prog.cz(input_qubit[0],input_qubit[3]) # number=35\n prog.h(input_qubit[3]) # number=36\n prog.cx(input_qubit[0],input_qubit[3]) # number=18\n prog.cx(input_qubit[0],input_qubit[3]) # number=15\n prog.h(input_qubit[1]) # number=2\n prog.h(input_qubit[2]) # number=3\n prog.h(input_qubit[3]) # number=4\n prog.y(input_qubit[3]) # number=12\n prog.h(input_qubit[0]) # number=5\n\n oracle = build_oracle(n-1, f)\n prog.append(oracle.to_gate(),[input_qubit[i] for i in range(n-1)]+[input_qubit[n-1]])\n prog.h(input_qubit[1]) # number=6\n prog.y(input_qubit[1]) # number=29\n prog.h(input_qubit[2]) # number=7\n prog.h(input_qubit[1]) # number=30\n prog.h(input_qubit[3]) # number=8\n prog.h(input_qubit[0]) # number=9\n\n prog.y(input_qubit[2]) # number=10\n prog.y(input_qubit[2]) # number=11\n prog.swap(input_qubit[3],input_qubit[0]) # number=22\n prog.swap(input_qubit[3],input_qubit[0]) # number=23\n prog.swap(input_qubit[1],input_qubit[0]) # number=27\n prog.swap(input_qubit[1],input_qubit[0]) # number=28\n # circuit end\n\n for i in range(n):\n prog.measure(input_qubit[i], classical[i])\n\n\n return prog\n\n\n\nif __name__ == '__main__':\n a = \"111\"\n b = \"0\"\n f = lambda rep: bitwise_xor(bitwise_dot(a, rep), b)\n prog = make_circuit(4,f)\n backend = FakeVigo()\n sample_shot =8000\n\n info = execute(prog, backend=backend, shots=sample_shot).result().get_counts()\n backend = FakeVigo()\n circuit1 = transpile(prog,backend,optimization_level=2)\n\n writefile = open(\"../data/startQiskit_noisy2320.csv\",\"w\")\n print(info,file=writefile)\n print(\"results end\", file=writefile)\n print(circuit1.__len__(),file=writefile)\n print(circuit1,file=writefile)\n writefile.close()\n",
"# qubit number=3\n# total number=7\n\nimport numpy as np\n\nfrom qiskit import QuantumCircuit, execute, Aer, QuantumRegister, ClassicalRegister, transpile, BasicAer, IBMQ\nfrom qiskit.visualization import plot_histogram\nfrom typing import *\nfrom pprint import pprint\nfrom math import log2\nfrom collections import Counter\nfrom qiskit.test.mock import FakeVigo, FakeYorktown\n\nkernel = 'circuit/bernstein'\n\n\ndef bitwise_xor(s: str, t: str) -> str:\n length = len(s)\n res = []\n for i in range(length):\n res.append(str(int(s[i]) ^ int(t[i])))\n return ''.join(res[::-1])\n\n\ndef bitwise_dot(s: str, t: str) -> str:\n length = len(s)\n res = 0\n for i in range(length):\n res += int(s[i]) * int(t[i])\n return str(res % 2)\n\n\ndef build_oracle(n: int, f: Callable[[str], str]) -> QuantumCircuit:\n # implement the oracle O_f\n # NOTE: use multi_control_toffoli_gate ('noancilla' mode)\n # https://qiskit.org/documentation/_modules/qiskit/aqua/circuits/gates/multi_control_toffoli_gate.html\n # https://quantumcomputing.stackexchange.com/questions/3943/how-do-you-implement-the-toffoli-gate-using-only-single-qubit-and-cnot-gates\n # https://quantumcomputing.stackexchange.com/questions/2177/how-can-i-implement-an-n-bit-toffoli-gate\n controls = QuantumRegister(n, \"ofc\")\n target = QuantumRegister(1, \"oft\")\n oracle = QuantumCircuit(controls, target, name=\"Of\")\n for i in range(2 ** n):\n rep = np.binary_repr(i, n)\n if f(rep) == \"1\":\n for j in range(n):\n if rep[j] == \"0\":\n oracle.x(controls[j])\n oracle.mct(controls, target[0], None, mode='noancilla')\n for j in range(n):\n if rep[j] == \"0\":\n oracle.x(controls[j])\n # oracle.barrier()\n # oracle.draw('mpl', filename=(kernel + '-oracle.png'))\n return oracle\n\n\ndef build_circuit(n: int, f: Callable[[str], str]) -> QuantumCircuit:\n # implement the Bernstein-Vazirani circuit\n zero = np.binary_repr(0, n)\n b = f(zero)\n\n # initial n + 1 bits\n input_qubit = QuantumRegister(n+1, \"qc\")\n classicals = ClassicalRegister(n, \"qm\")\n prog = QuantumCircuit(input_qubit, classicals)\n\n # inverse last one (can be omitted if using O_f^\\pm)\n prog.x(input_qubit[n])\n # circuit begin\n prog.h(input_qubit[1]) # number=1\n prog.x(input_qubit[2]) # number=2\n prog.cx(input_qubit[2],input_qubit[1]) # number=4\n prog.z(input_qubit[2]) # number=3\n\n # apply H to get superposition\n for i in range(n):\n prog.h(input_qubit[i])\n prog.h(input_qubit[n])\n prog.barrier()\n\n # apply oracle O_f\n oracle = build_oracle(n, f)\n prog.append(\n oracle.to_gate(),\n [input_qubit[i] for i in range(n)] + [input_qubit[n]])\n\n # apply H back (QFT on Z_2^n)\n for i in range(n):\n prog.h(input_qubit[i])\n prog.barrier()\n\n # measure\n\n return prog\n\n\ndef get_statevector(prog: QuantumCircuit) -> Any:\n state_backend = Aer.get_backend('statevector_simulator')\n statevec = execute(prog, state_backend).result()\n quantum_state = statevec.get_statevector()\n qubits = round(log2(len(quantum_state)))\n quantum_state = {\n \"|\" + np.binary_repr(i, qubits) + \">\": quantum_state[i]\n for i in range(2 ** qubits)\n }\n return quantum_state\n\n\ndef evaluate(backend_str: str, prog: QuantumCircuit, shots: int, b: str) -> Any:\n # Q: which backend should we use?\n\n # get state vector\n quantum_state = get_statevector(prog)\n\n # get simulate results\n\n # provider = IBMQ.load_account()\n # backend = provider.get_backend(backend_str)\n # qobj = compile(prog, backend, shots)\n # job = backend.run(qobj)\n # job.result()\n backend = Aer.get_backend(backend_str)\n # transpile/schedule -> assemble -> backend.run\n results = execute(prog, backend, shots=shots).result()\n counts = results.get_counts()\n a = Counter(counts).most_common(1)[0][0][::-1]\n\n return {\n \"measurements\": counts,\n # \"state\": statevec,\n \"quantum_state\": quantum_state,\n \"a\": a,\n \"b\": b\n }\n\n\ndef bernstein_test_1(rep: str):\n \"\"\"011 . x + 1\"\"\"\n a = \"011\"\n b = \"1\"\n return bitwise_xor(bitwise_dot(a, rep), b)\n\n\ndef bernstein_test_2(rep: str):\n \"\"\"000 . x + 0\"\"\"\n a = \"000\"\n b = \"0\"\n return bitwise_xor(bitwise_dot(a, rep), b)\n\n\ndef bernstein_test_3(rep: str):\n \"\"\"111 . x + 1\"\"\"\n a = \"111\"\n b = \"1\"\n return bitwise_xor(bitwise_dot(a, rep), b)\n\n\nif __name__ == \"__main__\":\n n = 2\n a = \"11\"\n b = \"1\"\n f = lambda rep: \\\n bitwise_xor(bitwise_dot(a, rep), b)\n prog = build_circuit(n, f)\n sample_shot =4000\n writefile = open(\"../data/startQiskit_noisy43.csv\", \"w\")\n # prog.draw('mpl', filename=(kernel + '.png'))\n backend = FakeYorktown()\n\n circuit1 = transpile(prog, FakeYorktown())\n circuit1.h(qubit=2)\n circuit1.x(qubit=3)\n circuit1.measure_all()\n\n info = execute(circuit1,backend=backend, shots=sample_shot).result().get_counts()\n\n print(info, file=writefile)\n print(\"results end\", file=writefile)\n print(circuit1.depth(), file=writefile)\n print(circuit1, file=writefile)\n writefile.close()\n",
"# qubit number=3\n# total number=12\n\nimport numpy as np\n\nfrom qiskit import QuantumCircuit, execute, Aer, QuantumRegister, ClassicalRegister, transpile, BasicAer, IBMQ\nimport networkx as nx\nfrom qiskit.visualization import plot_histogram\nfrom typing import *\nfrom pprint import pprint\nfrom math import log2\nfrom collections import Counter\nfrom qiskit.test.mock import FakeVigo, FakeYorktown\n\nkernel = 'circuit/bernstein'\n\n\ndef make_circuit(n:int) -> QuantumCircuit:\n # circuit begin\n input_qubit = QuantumRegister(n,\"qc\")\n prog = QuantumCircuit(input_qubit)\n prog.h(input_qubit[0]) # number=1\n prog.h(input_qubit[1]) # number=2\n prog.h(input_qubit[2]) # number=3\n prog.h(input_qubit[3]) # number=4\n\n for edge in E:\n k = edge[0]\n l = edge[1]\n prog.cp(-2 * gamma, input_qubit[k-1], input_qubit[l-1])\n prog.p(gamma, k)\n prog.p(gamma, l)\n\n prog.rx(2 * beta, range(len(V)))\n\n prog.x(input_qubit[2]) # number=5\n prog.cx(input_qubit[0],input_qubit[2]) # number=9\n prog.x(input_qubit[2]) # number=10\n prog.cx(input_qubit[0],input_qubit[2]) # number=11\n prog.cx(input_qubit[1],input_qubit[0]) # number=7\n prog.cx(input_qubit[1],input_qubit[0]) # number=8\n # circuit end\n\n\n\n return prog\n\n\n\nif __name__ == '__main__':\n n = 4\n V = np.arange(0, n, 1)\n E = [(0, 1, 1.0), (0, 2, 1.0), (1, 2, 1.0), (3, 2, 1.0), (3, 1, 1.0)]\n\n G = nx.Graph()\n G.add_nodes_from(V)\n G.add_weighted_edges_from(E)\n\n step_size = 0.1\n\n a_gamma = np.arange(0, np.pi, step_size)\n a_beta = np.arange(0, np.pi, step_size)\n a_gamma, a_beta = np.meshgrid(a_gamma, a_beta)\n\n F1 = 3 - (np.sin(2 * a_beta) ** 2 * np.sin(2 * a_gamma) ** 2 - 0.5 * np.sin(4 * a_beta) * np.sin(4 * a_gamma)) * (\n 1 + np.cos(4 * a_gamma) ** 2)\n\n result = np.where(F1 == np.amax(F1))\n a = list(zip(result[0], result[1]))[0]\n\n gamma = a[0] * step_size\n beta = a[1] * step_size\n\n prog = make_circuit(4)\n sample_shot =3962\n writefile = open(\"../data/startQiskit_QC77.csv\", \"w\")\n # prog.draw('mpl', filename=(kernel + '.png'))\n IBMQ.load_account() \n provider = IBMQ.get_provider(hub='ibm-q') \n provider.backends()\n backend = provider.get_backend(\"ibmq_5_yorktown\")\n\n circuit1 = transpile(prog, FakeYorktown())\n circuit1.measure_all()\n prog = circuit1\n\n info = execute(prog,backend=backend, shots=sample_shot).result().get_counts()\n\n print(info, file=writefile)\n print(\"results end\", file=writefile)\n print(circuit1.depth(), file=writefile)\n print(circuit1, file=writefile)\n writefile.close()\n",
"# qubit number=5\n# total number=42\nimport cirq\nimport qiskit\n\nfrom qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister\nfrom qiskit import BasicAer, execute, transpile\nfrom pprint import pprint\nfrom qiskit.test.mock import FakeVigo\nfrom math import log2,floor, sqrt, pi\nimport numpy as np\nimport networkx as nx\n\ndef build_oracle(n: int, f) -> QuantumCircuit:\n # implement the oracle O_f^\\pm\n # NOTE: use U1 gate (P gate) with \\lambda = 180 ==> CZ gate\n # or multi_control_Z_gate (issue #127)\n\n controls = QuantumRegister(n, \"ofc\")\n oracle = QuantumCircuit(controls, name=\"Zf\")\n\n for i in range(2 ** n):\n rep = np.binary_repr(i, n)\n if f(rep) == \"1\":\n for j in range(n):\n if rep[j] == \"0\":\n oracle.x(controls[j])\n\n # oracle.h(controls[n])\n if n >= 2:\n oracle.mcu1(pi, controls[1:], controls[0])\n\n for j in range(n):\n if rep[j] == \"0\":\n oracle.x(controls[j])\n # oracle.barrier()\n\n return oracle\n\n\ndef make_circuit(n:int,f) -> QuantumCircuit:\n # circuit begin\n input_qubit = QuantumRegister(n,\"qc\")\n classical = ClassicalRegister(n, \"qm\")\n prog = QuantumCircuit(input_qubit, classical)\n prog.h(input_qubit[0]) # number=3\n prog.h(input_qubit[1]) # number=4\n prog.h(input_qubit[2]) # number=5\n prog.h(input_qubit[3]) # number=6\n prog.h(input_qubit[4]) # number=21\n prog.h(input_qubit[0]) # number=39\n prog.cz(input_qubit[3],input_qubit[0]) # number=40\n prog.h(input_qubit[0]) # number=41\n prog.z(input_qubit[3]) # number=33\n prog.cx(input_qubit[3],input_qubit[0]) # number=34\n prog.rx(0.11938052083641225,input_qubit[1]) # number=36\n\n Zf = build_oracle(n, f)\n\n repeat = floor(sqrt(2 ** n) * pi / 4)\n for i in range(repeat):\n prog.append(Zf.to_gate(), [input_qubit[i] for i in range(n)])\n prog.h(input_qubit[0]) # number=1\n prog.rx(1.4765485471872026,input_qubit[2]) # number=35\n prog.h(input_qubit[1]) # number=2\n prog.h(input_qubit[2]) # number=7\n prog.h(input_qubit[3]) # number=8\n\n\n prog.x(input_qubit[0]) # number=9\n prog.x(input_qubit[4]) # number=30\n prog.x(input_qubit[1]) # number=10\n prog.x(input_qubit[2]) # number=11\n prog.rx(0.45238934211692994,input_qubit[3]) # number=38\n prog.rx(-2.5258404934861938,input_qubit[1]) # number=25\n prog.h(input_qubit[3]) # number=29\n prog.cx(input_qubit[0],input_qubit[3]) # number=22\n prog.x(input_qubit[3]) # number=23\n prog.cx(input_qubit[0],input_qubit[3]) # number=24\n\n if n>=2:\n prog.mcu1(pi,input_qubit[1:],input_qubit[0])\n\n prog.x(input_qubit[0]) # number=13\n prog.rx(-0.0722566310325653,input_qubit[4]) # number=37\n prog.x(input_qubit[1]) # number=14\n prog.cx(input_qubit[0],input_qubit[2]) # number=26\n prog.x(input_qubit[2]) # number=27\n prog.cx(input_qubit[0],input_qubit[2]) # number=28\n prog.x(input_qubit[3]) # number=16\n\n\n prog.h(input_qubit[0]) # number=17\n prog.h(input_qubit[1]) # number=18\n prog.h(input_qubit[2]) # number=19\n prog.h(input_qubit[3]) # number=20\n\n\n # circuit end\n\n for i in range(n):\n prog.measure(input_qubit[i], classical[i])\n\n\n return prog\n\n\n\n\nif __name__ == '__main__':\n key = \"00000\"\n f = lambda rep: str(int(rep == key))\n prog = make_circuit(5,f)\n backend = BasicAer.get_backend('qasm_simulator')\n sample_shot =7924\n\n info = execute(prog, backend=backend, shots=sample_shot).result().get_counts()\n backend = FakeVigo()\n circuit1 = transpile(prog,backend,optimization_level=2)\n\n writefile = open(\"../data/startQiskit1126.csv\",\"w\")\n print(info,file=writefile)\n print(\"results end\", file=writefile)\n print(circuit1.depth(),file=writefile)\n print(circuit1,file=writefile)\n writefile.close()\n",
"# qubit number=4\n# total number=41\nimport cirq\nimport qiskit\nfrom qiskit.providers.aer import QasmSimulator\nfrom qiskit.test.mock import FakeVigo\n\nfrom qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister\nfrom qiskit import BasicAer, execute, transpile\nfrom pprint import pprint\nfrom qiskit.test.mock import FakeVigo\nfrom math import log2\nimport numpy as np\nimport networkx as nx\n\ndef bitwise_xor(s: str, t: str) -> str:\n length = len(s)\n res = []\n for i in range(length):\n res.append(str(int(s[i]) ^ int(t[i])))\n return ''.join(res[::-1])\n\n\ndef bitwise_dot(s: str, t: str) -> str:\n length = len(s)\n res = 0\n for i in range(length):\n res += int(s[i]) * int(t[i])\n return str(res % 2)\n\ndef build_oracle(n: int, f) -> QuantumCircuit:\n # implement the oracle O_f\n # NOTE: use multi_control_toffoli_gate ('noancilla' mode)\n # https://qiskit.org/documentation/_modules/qiskit/aqua/circuits/gates/multi_control_toffoli_gate.html\n # https://quantumcomputing.stackexchange.com/questions/3943/how-do-you-implement-the-toffoli-gate-using-only-single-qubit-and-cnot-gates\n # https://quantumcomputing.stackexchange.com/questions/2177/how-can-i-implement-an-n-bit-toffoli-gate\n controls = QuantumRegister(n, \"ofc\")\n target = QuantumRegister(1, \"oft\")\n oracle = QuantumCircuit(controls, target, name=\"Of\")\n for i in range(2 ** n):\n rep = np.binary_repr(i, n)\n if f(rep) == \"1\":\n for j in range(n):\n if rep[j] == \"0\":\n oracle.x(controls[j])\n oracle.mct(controls, target[0], None, mode='noancilla')\n for j in range(n):\n if rep[j] == \"0\":\n oracle.x(controls[j])\n # oracle.barrier()\n return oracle\n\ndef make_circuit(n:int,f) -> QuantumCircuit:\n # circuit begin\n input_qubit = QuantumRegister(n,\"qc\")\n classical = ClassicalRegister(n, \"qm\")\n prog = QuantumCircuit(input_qubit, classical)\n prog.h(input_qubit[3]) # number=35\n prog.cz(input_qubit[0],input_qubit[3]) # number=36\n prog.h(input_qubit[3]) # number=37\n prog.h(input_qubit[3]) # number=22\n prog.cx(input_qubit[0],input_qubit[3]) # number=32\n prog.x(input_qubit[3]) # number=33\n prog.cx(input_qubit[0],input_qubit[3]) # number=34\n prog.h(input_qubit[3]) # number=19\n prog.cz(input_qubit[0],input_qubit[3]) # number=20\n prog.h(input_qubit[3]) # number=21\n prog.cx(input_qubit[3],input_qubit[0]) # number=38\n prog.z(input_qubit[3]) # number=39\n prog.cx(input_qubit[3],input_qubit[0]) # number=40\n prog.h(input_qubit[1]) # number=2\n prog.h(input_qubit[2]) # number=3\n prog.h(input_qubit[3]) # number=4\n prog.h(input_qubit[0]) # number=5\n\n oracle = build_oracle(n-1, f)\n prog.append(oracle.to_gate(),[input_qubit[i] for i in range(n-1)]+[input_qubit[n-1]])\n prog.h(input_qubit[1]) # number=6\n prog.h(input_qubit[2]) # number=7\n prog.h(input_qubit[3]) # number=8\n prog.h(input_qubit[0]) # number=9\n prog.h(input_qubit[0]) # number=26\n prog.cz(input_qubit[1],input_qubit[0]) # number=27\n prog.h(input_qubit[0]) # number=28\n prog.z(input_qubit[1]) # number=24\n prog.h(input_qubit[0]) # number=29\n prog.cz(input_qubit[1],input_qubit[0]) # number=30\n prog.h(input_qubit[0]) # number=31\n prog.h(input_qubit[1]) # number=18\n prog.rx(2.8902652413026093,input_qubit[2]) # number=13\n\n prog.y(input_qubit[1]) # number=11\n prog.y(input_qubit[1]) # number=12\n # circuit end\n\n for i in range(n):\n prog.measure(input_qubit[i], classical[i])\n\n\n return prog\n\n\n\nif __name__ == '__main__':\n a = \"111\"\n b = \"0\"\n f = lambda rep: bitwise_xor(bitwise_dot(a, rep), b)\n prog = make_circuit(4,f)\n backend = FakeVigo()\n sample_shot =8000\n\n info = execute(prog, backend=backend, shots=sample_shot).result().get_counts()\n backend = FakeVigo()\n circuit1 = transpile(prog,backend,optimization_level=2)\n\n writefile = open(\"../data/startQiskit_noisy2660.csv\",\"w\")\n print(info,file=writefile)\n print(\"results end\", file=writefile)\n print(circuit1.__len__(),file=writefile)\n print(circuit1,file=writefile)\n writefile.close()\n",
"# qubit number=3\n# total number=11\n\nimport numpy as np\n\nfrom qiskit import QuantumCircuit, execute, Aer, QuantumRegister, ClassicalRegister, transpile, BasicAer, IBMQ\nimport networkx as nx\nfrom qiskit.visualization import plot_histogram\nfrom typing import *\nfrom pprint import pprint\nfrom math import log2\nfrom collections import Counter\nfrom qiskit.test.mock import FakeVigo, FakeYorktown\n\nkernel = 'circuit/bernstein'\n\n\ndef make_circuit(n:int) -> QuantumCircuit:\n # circuit begin\n input_qubit = QuantumRegister(n,\"qc\")\n prog = QuantumCircuit(input_qubit)\n prog.h(input_qubit[0]) # number=1\n prog.cx(input_qubit[3],input_qubit[0]) # number=8\n prog.z(input_qubit[3]) # number=9\n prog.cx(input_qubit[3],input_qubit[0]) # number=10\n prog.h(input_qubit[1]) # number=2\n prog.h(input_qubit[2]) # number=3\n prog.h(input_qubit[3]) # number=4\n\n for edge in E:\n k = edge[0]\n l = edge[1]\n prog.cp(-2 * gamma, input_qubit[k-1], input_qubit[l-1])\n prog.p(gamma, k)\n prog.p(gamma, l)\n\n prog.rx(2 * beta, range(len(V)))\n\n prog.swap(input_qubit[3],input_qubit[0]) # number=5\n prog.swap(input_qubit[3],input_qubit[0]) # number=6\n # circuit end\n\n\n\n return prog\n\n\n\nif __name__ == '__main__':\n n = 4\n V = np.arange(0, n, 1)\n E = [(0, 1, 1.0), (0, 2, 1.0), (1, 2, 1.0), (3, 2, 1.0), (3, 1, 1.0)]\n\n G = nx.Graph()\n G.add_nodes_from(V)\n G.add_weighted_edges_from(E)\n\n step_size = 0.1\n\n a_gamma = np.arange(0, np.pi, step_size)\n a_beta = np.arange(0, np.pi, step_size)\n a_gamma, a_beta = np.meshgrid(a_gamma, a_beta)\n\n F1 = 3 - (np.sin(2 * a_beta) ** 2 * np.sin(2 * a_gamma) ** 2 - 0.5 * np.sin(4 * a_beta) * np.sin(4 * a_gamma)) * (\n 1 + np.cos(4 * a_gamma) ** 2)\n\n result = np.where(F1 == np.amax(F1))\n a = list(zip(result[0], result[1]))[0]\n\n gamma = a[0] * step_size\n beta = a[1] * step_size\n\n prog = make_circuit(4)\n sample_shot =5600\n writefile = open(\"../data/startQiskit92.csv\", \"w\")\n # prog.draw('mpl', filename=(kernel + '.png'))\n backend = BasicAer.get_backend('qasm_simulator')\n\n circuit1 = transpile(prog, FakeYorktown())\n circuit1.measure_all()\n prog = circuit1\n\n info = execute(prog,backend=backend, shots=sample_shot).result().get_counts()\n\n print(info, file=writefile)\n print(\"results end\", file=writefile)\n print(circuit1.depth(), file=writefile)\n print(circuit1, file=writefile)\n writefile.close()\n",
"# qubit number=3\n# total number=73\n\nimport numpy as np\n\nfrom qiskit import QuantumCircuit, execute, Aer, QuantumRegister, ClassicalRegister, transpile, BasicAer, IBMQ\nfrom qiskit.visualization import plot_histogram\nfrom typing import *\nfrom pprint import pprint\nfrom math import log2\nfrom collections import Counter\nfrom qiskit.test.mock import FakeVigo, FakeYorktown\n\nkernel = 'circuit/bernstein'\n\n\ndef bitwise_xor(s: str, t: str) -> str:\n length = len(s)\n res = []\n for i in range(length):\n res.append(str(int(s[i]) ^ int(t[i])))\n return ''.join(res[::-1])\n\n\ndef bitwise_dot(s: str, t: str) -> str:\n length = len(s)\n res = 0\n for i in range(length):\n res += int(s[i]) * int(t[i])\n return str(res % 2)\n\n\ndef build_oracle(n: int, f: Callable[[str], str]) -> QuantumCircuit:\n # implement the oracle O_f\n # NOTE: use multi_control_toffoli_gate ('noancilla' mode)\n # https://qiskit.org/documentation/_modules/qiskit/aqua/circuits/gates/multi_control_toffoli_gate.html\n # https://quantumcomputing.stackexchange.com/questions/3943/how-do-you-implement-the-toffoli-gate-using-only-single-qubit-and-cnot-gates\n # https://quantumcomputing.stackexchange.com/questions/2177/how-can-i-implement-an-n-bit-toffoli-gate\n controls = QuantumRegister(n, \"ofc\")\n target = QuantumRegister(1, \"oft\")\n oracle = QuantumCircuit(controls, target, name=\"Of\")\n for i in range(2 ** n):\n rep = np.binary_repr(i, n)\n if f(rep) == \"1\":\n for j in range(n):\n if rep[j] == \"0\":\n oracle.x(controls[j])\n oracle.mct(controls, target[0], None, mode='noancilla')\n for j in range(n):\n if rep[j] == \"0\":\n oracle.x(controls[j])\n # oracle.barrier()\n # oracle.draw('mpl', filename=(kernel + '-oracle.png'))\n return oracle\n\n\ndef build_circuit(n: int, f: Callable[[str], str]) -> QuantumCircuit:\n # implement the Bernstein-Vazirani circuit\n zero = np.binary_repr(0, n)\n b = f(zero)\n\n # initial n + 1 bits\n input_qubit = QuantumRegister(n+1, \"qc\")\n classicals = ClassicalRegister(n, \"qm\")\n prog = QuantumCircuit(input_qubit, classicals)\n\n # inverse last one (can be omitted if using O_f^\\pm)\n prog.x(input_qubit[n])\n # circuit begin\n prog.h(input_qubit[1]) # number=1\n prog.h(input_qubit[2]) # number=38\n prog.cz(input_qubit[0],input_qubit[2]) # number=39\n prog.h(input_qubit[2]) # number=40\n prog.h(input_qubit[2]) # number=59\n prog.cz(input_qubit[0],input_qubit[2]) # number=60\n prog.h(input_qubit[2]) # number=61\n prog.h(input_qubit[2]) # number=42\n prog.cz(input_qubit[0],input_qubit[2]) # number=43\n prog.h(input_qubit[2]) # number=44\n prog.h(input_qubit[2]) # number=48\n prog.cz(input_qubit[0],input_qubit[2]) # number=49\n prog.h(input_qubit[2]) # number=50\n prog.h(input_qubit[2]) # number=70\n prog.cz(input_qubit[0],input_qubit[2]) # number=71\n prog.h(input_qubit[2]) # number=72\n prog.x(input_qubit[2]) # number=55\n prog.h(input_qubit[2]) # number=67\n prog.cz(input_qubit[0],input_qubit[2]) # number=68\n prog.h(input_qubit[2]) # number=69\n prog.h(input_qubit[2]) # number=64\n prog.cz(input_qubit[0],input_qubit[2]) # number=65\n prog.h(input_qubit[2]) # number=66\n prog.cx(input_qubit[0],input_qubit[2]) # number=37\n prog.h(input_qubit[2]) # number=51\n prog.cz(input_qubit[0],input_qubit[2]) # number=52\n prog.h(input_qubit[2]) # number=53\n prog.h(input_qubit[2]) # number=25\n prog.cz(input_qubit[0],input_qubit[2]) # number=26\n prog.h(input_qubit[2]) # number=27\n prog.h(input_qubit[1]) # number=7\n prog.cz(input_qubit[2],input_qubit[1]) # number=8\n prog.rx(0.17592918860102857,input_qubit[2]) # number=34\n prog.rx(-0.3989822670059037,input_qubit[1]) # number=30\n prog.h(input_qubit[1]) # number=9\n prog.h(input_qubit[1]) # number=18\n prog.rx(2.3310617489636263,input_qubit[2]) # number=58\n prog.cz(input_qubit[2],input_qubit[1]) # number=19\n prog.h(input_qubit[1]) # number=20\n prog.x(input_qubit[1]) # number=62\n prog.y(input_qubit[1]) # number=14\n prog.h(input_qubit[1]) # number=22\n prog.cz(input_qubit[2],input_qubit[1]) # number=23\n prog.rx(-0.9173450548482197,input_qubit[1]) # number=57\n prog.cx(input_qubit[2],input_qubit[1]) # number=63\n prog.h(input_qubit[1]) # number=24\n prog.z(input_qubit[2]) # number=3\n prog.z(input_qubit[1]) # number=41\n prog.x(input_qubit[1]) # number=17\n prog.y(input_qubit[2]) # number=5\n prog.x(input_qubit[2]) # number=21\n\n # apply H to get superposition\n for i in range(n):\n prog.h(input_qubit[i])\n prog.h(input_qubit[n])\n prog.barrier()\n\n # apply oracle O_f\n oracle = build_oracle(n, f)\n prog.append(\n oracle.to_gate(),\n [input_qubit[i] for i in range(n)] + [input_qubit[n]])\n\n # apply H back (QFT on Z_2^n)\n for i in range(n):\n prog.h(input_qubit[i])\n prog.barrier()\n\n # measure\n\n return prog\n\n\ndef get_statevector(prog: QuantumCircuit) -> Any:\n state_backend = Aer.get_backend('statevector_simulator')\n statevec = execute(prog, state_backend).result()\n quantum_state = statevec.get_statevector()\n qubits = round(log2(len(quantum_state)))\n quantum_state = {\n \"|\" + np.binary_repr(i, qubits) + \">\": quantum_state[i]\n for i in range(2 ** qubits)\n }\n return quantum_state\n\n\ndef evaluate(backend_str: str, prog: QuantumCircuit, shots: int, b: str) -> Any:\n # Q: which backend should we use?\n\n # get state vector\n quantum_state = get_statevector(prog)\n\n # get simulate results\n\n # provider = IBMQ.load_account()\n # backend = provider.get_backend(backend_str)\n # qobj = compile(prog, backend, shots)\n # job = backend.run(qobj)\n # job.result()\n backend = Aer.get_backend(backend_str)\n # transpile/schedule -> assemble -> backend.run\n results = execute(prog, backend, shots=shots).result()\n counts = results.get_counts()\n a = Counter(counts).most_common(1)[0][0][::-1]\n\n return {\n \"measurements\": counts,\n # \"state\": statevec,\n \"quantum_state\": quantum_state,\n \"a\": a,\n \"b\": b\n }\n\n\ndef bernstein_test_1(rep: str):\n \"\"\"011 . x + 1\"\"\"\n a = \"011\"\n b = \"1\"\n return bitwise_xor(bitwise_dot(a, rep), b)\n\n\ndef bernstein_test_2(rep: str):\n \"\"\"000 . x + 0\"\"\"\n a = \"000\"\n b = \"0\"\n return bitwise_xor(bitwise_dot(a, rep), b)\n\n\ndef bernstein_test_3(rep: str):\n \"\"\"111 . x + 1\"\"\"\n a = \"111\"\n b = \"1\"\n return bitwise_xor(bitwise_dot(a, rep), b)\n\n\nif __name__ == \"__main__\":\n n = 2\n a = \"11\"\n b = \"1\"\n f = lambda rep: \\\n bitwise_xor(bitwise_dot(a, rep), b)\n prog = build_circuit(n, f)\n sample_shot =4000\n writefile = open(\"../data/startQiskit_Class364.csv\", \"w\")\n # prog.draw('mpl', filename=(kernel + '.png'))\n backend = BasicAer.get_backend('statevector_simulator')\n\n circuit1 = transpile(prog, FakeYorktown())\n circuit1.h(qubit=2)\n circuit1.x(qubit=3)\n\n info = execute(circuit1,backend=backend, shots=sample_shot).result().get_counts()\n\n print(info, file=writefile)\n print(\"results end\", file=writefile)\n print(circuit1.depth(), file=writefile)\n print(circuit1, file=writefile)\n writefile.close()\n",
"# qubit number=2\n# total number=10\nimport cirq\nimport qiskit\n\nfrom qiskit import IBMQ\nfrom qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister\nfrom qiskit import BasicAer, execute, transpile\nfrom pprint import pprint\nfrom qiskit.test.mock import FakeVigo\nfrom math import log2,floor, sqrt, pi\nimport numpy as np\nimport networkx as nx\n\ndef build_oracle(n: int, f) -> QuantumCircuit:\n # implement the oracle O_f^\\pm\n # NOTE: use U1 gate (P gate) with \\lambda = 180 ==> CZ gate\n # or multi_control_Z_gate (issue #127)\n\n controls = QuantumRegister(n, \"ofc\")\n target = QuantumRegister(1, \"oft\")\n oracle = QuantumCircuit(controls, target, name=\"Of\")\n for i in range(2 ** n):\n rep = np.binary_repr(i, n)\n if f(rep) == \"1\":\n for j in range(n):\n if rep[j] == \"0\":\n oracle.x(controls[j])\n oracle.mct(controls, target[0], None, mode='noancilla')\n for j in range(n):\n if rep[j] == \"0\":\n oracle.x(controls[j])\n # oracle.barrier()\n # oracle.draw('mpl', filename='circuit/deutsch-oracle.png')\n return oracle\n\n\ndef make_circuit(n:int,f) -> QuantumCircuit:\n # circuit begin\n\n input_qubit = QuantumRegister(n, \"qc\")\n target = QuantumRegister(1, \"qt\")\n prog = QuantumCircuit(input_qubit, target)\n\n # inverse last one (can be omitted if using O_f^\\pm)\n prog.x(target)\n\n # apply H to get superposition\n for i in range(n):\n prog.h(input_qubit[i])\n\n prog.h(input_qubit[1]) # number=1\n prog.h(input_qubit[1]) # number=4\n prog.h(target)\n prog.barrier()\n\n # apply oracle O_f\n oracle = build_oracle(n, f)\n prog.append(\n oracle.to_gate(),\n [input_qubit[i] for i in range(n)] + [target])\n\n # apply H back (QFT on Z_2^n)\n for i in range(n):\n prog.h(input_qubit[i])\n prog.barrier()\n\n # measure\n\n prog.cx(input_qubit[0],input_qubit[1]) # number=7\n prog.x(input_qubit[1]) # number=8\n prog.cx(input_qubit[0],input_qubit[1]) # number=9\n prog.x(input_qubit[1]) # number=3\n prog.cx(input_qubit[1],input_qubit[0]) # number=5\n prog.cx(input_qubit[1],input_qubit[0]) # number=6\n # circuit end\n return prog\n\n\n\n\nif __name__ == '__main__':\n n = 2\n f = lambda rep: rep[-1]\n # f = lambda rep: \"1\" if rep[0:2] == \"01\" or rep[0:2] == \"10\" else \"0\"\n # f = lambda rep: \"0\"\n prog = make_circuit(n, f)\n sample_shot =2800\n backend = BasicAer.get_backend('statevector_simulator')\n\n circuit1 = transpile(prog,FakeVigo())\n circuit1.x(qubit=3)\n circuit1.x(qubit=3)\n prog = circuit1\n\n\n info = execute(prog, backend=backend).result().get_statevector()\n qubits = round(log2(len(info)))\n info = {\n np.binary_repr(i, qubits): round((info[i]*(info[i].conjugate())).real,3)\n for i in range(2 ** qubits)\n }\n\n writefile = open(\"../data/startQiskit_Class143.csv\",\"w\")\n print(info,file=writefile)\n print(\"results end\", file=writefile)\n print(circuit1.depth(),file=writefile)\n print(circuit1,file=writefile)\n writefile.close()\n",
"# qubit number=4\n# total number=46\nimport cirq\nimport qiskit\n\nfrom qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister\nfrom qiskit import BasicAer, execute, transpile\nfrom pprint import pprint\nfrom qiskit.test.mock import FakeVigo\nfrom math import log2\nimport numpy as np\nimport networkx as nx\n\ndef bitwise_xor(s: str, t: str) -> str:\n length = len(s)\n res = []\n for i in range(length):\n res.append(str(int(s[i]) ^ int(t[i])))\n return ''.join(res[::-1])\n\n\ndef bitwise_dot(s: str, t: str) -> str:\n length = len(s)\n res = 0\n for i in range(length):\n res += int(s[i]) * int(t[i])\n return str(res % 2)\n\ndef build_oracle(n: int, f) -> QuantumCircuit:\n # implement the oracle O_f\n # NOTE: use multi_control_toffoli_gate ('noancilla' mode)\n # https://qiskit.org/documentation/_modules/qiskit/aqua/circuits/gates/multi_control_toffoli_gate.html\n # https://quantumcomputing.stackexchange.com/questions/3943/how-do-you-implement-the-toffoli-gate-using-only-single-qubit-and-cnot-gates\n # https://quantumcomputing.stackexchange.com/questions/2177/how-can-i-implement-an-n-bit-toffoli-gate\n controls = QuantumRegister(n, \"ofc\")\n target = QuantumRegister(1, \"oft\")\n oracle = QuantumCircuit(controls, target, name=\"Of\")\n for i in range(2 ** n):\n rep = np.binary_repr(i, n)\n if f(rep) == \"1\":\n for j in range(n):\n if rep[j] == \"0\":\n oracle.x(controls[j])\n oracle.mct(controls, target[0], None, mode='noancilla')\n for j in range(n):\n if rep[j] == \"0\":\n oracle.x(controls[j])\n # oracle.barrier()\n return oracle\n\ndef make_circuit(n:int,f) -> QuantumCircuit:\n # circuit begin\n input_qubit = QuantumRegister(n,\"qc\")\n classical = ClassicalRegister(n, \"qm\")\n prog = QuantumCircuit(input_qubit, classical)\n prog.h(input_qubit[3]) # number=20\n prog.cz(input_qubit[0],input_qubit[3]) # number=21\n prog.h(input_qubit[3]) # number=22\n prog.x(input_qubit[3]) # number=13\n prog.h(input_qubit[3]) # number=23\n prog.cz(input_qubit[0],input_qubit[3]) # number=24\n prog.h(input_qubit[3]) # number=25\n prog.h(input_qubit[1]) # number=2\n prog.h(input_qubit[2]) # number=3\n prog.h(input_qubit[3]) # number=4\n prog.h(input_qubit[0]) # number=5\n prog.y(input_qubit[2]) # number=18\n prog.h(input_qubit[0]) # number=43\n prog.cz(input_qubit[3],input_qubit[0]) # number=44\n prog.h(input_qubit[0]) # number=45\n prog.z(input_qubit[3]) # number=41\n prog.cx(input_qubit[3],input_qubit[0]) # number=42\n\n oracle = build_oracle(n-1, f)\n prog.append(oracle.to_gate(),[input_qubit[i] for i in range(n-1)]+[input_qubit[n-1]])\n prog.h(input_qubit[1]) # number=6\n prog.h(input_qubit[2]) # number=7\n prog.h(input_qubit[3]) # number=8\n prog.h(input_qubit[0]) # number=9\n\n prog.h(input_qubit[0]) # number=33\n prog.cz(input_qubit[2],input_qubit[0]) # number=34\n prog.h(input_qubit[0]) # number=35\n prog.h(input_qubit[1]) # number=19\n prog.h(input_qubit[0]) # number=15\n prog.cz(input_qubit[2],input_qubit[0]) # number=16\n prog.h(input_qubit[0]) # number=17\n prog.rx(1.6838936623241292,input_qubit[2]) # number=36\n prog.y(input_qubit[1]) # number=26\n prog.y(input_qubit[1]) # number=27\n prog.swap(input_qubit[1],input_qubit[0]) # number=29\n prog.swap(input_qubit[1],input_qubit[0]) # number=30\n prog.x(input_qubit[0]) # number=31\n prog.cx(input_qubit[1],input_qubit[0]) # number=37\n prog.x(input_qubit[0]) # number=38\n prog.cx(input_qubit[1],input_qubit[0]) # number=39\n # circuit end\n\n\n\n return prog\n\n\n\nif __name__ == '__main__':\n a = \"111\"\n b = \"0\"\n f = lambda rep: bitwise_xor(bitwise_dot(a, rep), b)\n prog = make_circuit(4,f)\n backend = BasicAer.get_backend('statevector_simulator')\n sample_shot =8000\n\n info = execute(prog, backend=backend).result().get_statevector()\n qubits = round(log2(len(info)))\n info = {\n np.binary_repr(i, qubits): round((info[i]*(info[i].conjugate())).real,3)\n for i in range(2 ** qubits)\n }\n backend = FakeVigo()\n circuit1 = transpile(prog,backend,optimization_level=2)\n\n writefile = open(\"../data/startQiskit_Class3399.csv\",\"w\")\n print(info,file=writefile)\n print(\"results end\", file=writefile)\n print(circuit1.__len__(),file=writefile)\n print(circuit1,file=writefile)\n writefile.close()\n",
"# qubit number=4\n# total number=49\nimport cirq\nimport qiskit\nfrom qiskit.providers.aer import QasmSimulator\nfrom qiskit.test.mock import FakeVigo\n\nfrom qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister\nfrom qiskit import BasicAer, execute, transpile\nfrom pprint import pprint\nfrom qiskit.test.mock import FakeVigo\nfrom math import log2\nimport numpy as np\nimport networkx as nx\n\ndef bitwise_xor(s: str, t: str) -> str:\n length = len(s)\n res = []\n for i in range(length):\n res.append(str(int(s[i]) ^ int(t[i])))\n return ''.join(res[::-1])\n\n\ndef bitwise_dot(s: str, t: str) -> str:\n length = len(s)\n res = 0\n for i in range(length):\n res += int(s[i]) * int(t[i])\n return str(res % 2)\n\ndef build_oracle(n: int, f) -> QuantumCircuit:\n # implement the oracle O_f\n # NOTE: use multi_control_toffoli_gate ('noancilla' mode)\n # https://qiskit.org/documentation/_modules/qiskit/aqua/circuits/gates/multi_control_toffoli_gate.html\n # https://quantumcomputing.stackexchange.com/questions/3943/how-do-you-implement-the-toffoli-gate-using-only-single-qubit-and-cnot-gates\n # https://quantumcomputing.stackexchange.com/questions/2177/how-can-i-implement-an-n-bit-toffoli-gate\n controls = QuantumRegister(n, \"ofc\")\n target = QuantumRegister(1, \"oft\")\n oracle = QuantumCircuit(controls, target, name=\"Of\")\n for i in range(2 ** n):\n rep = np.binary_repr(i, n)\n if f(rep) == \"1\":\n for j in range(n):\n if rep[j] == \"0\":\n oracle.x(controls[j])\n oracle.mct(controls, target[0], None, mode='noancilla')\n for j in range(n):\n if rep[j] == \"0\":\n oracle.x(controls[j])\n # oracle.barrier()\n return oracle\n\ndef make_circuit(n:int,f) -> QuantumCircuit:\n # circuit begin\n input_qubit = QuantumRegister(n,\"qc\")\n classical = ClassicalRegister(n, \"qm\")\n prog = QuantumCircuit(input_qubit, classical)\n prog.cx(input_qubit[0],input_qubit[3]) # number=13\n prog.cx(input_qubit[0],input_qubit[3]) # number=17\n prog.x(input_qubit[3]) # number=18\n prog.cx(input_qubit[0],input_qubit[3]) # number=19\n prog.cx(input_qubit[0],input_qubit[3]) # number=15\n prog.h(input_qubit[1]) # number=2\n prog.h(input_qubit[2]) # number=3\n prog.h(input_qubit[3]) # number=4\n prog.y(input_qubit[3]) # number=12\n prog.h(input_qubit[0]) # number=5\n\n oracle = build_oracle(n-1, f)\n prog.append(oracle.to_gate(),[input_qubit[i] for i in range(n-1)]+[input_qubit[n-1]])\n prog.h(input_qubit[1]) # number=6\n prog.h(input_qubit[2]) # number=7\n prog.h(input_qubit[3]) # number=37\n prog.cz(input_qubit[0],input_qubit[3]) # number=38\n prog.h(input_qubit[3]) # number=39\n prog.cx(input_qubit[0],input_qubit[3]) # number=40\n prog.cx(input_qubit[0],input_qubit[3]) # number=46\n prog.x(input_qubit[3]) # number=47\n prog.cx(input_qubit[0],input_qubit[3]) # number=48\n prog.h(input_qubit[3]) # number=43\n prog.cz(input_qubit[0],input_qubit[3]) # number=44\n prog.h(input_qubit[3]) # number=45\n prog.h(input_qubit[3]) # number=30\n prog.cz(input_qubit[0],input_qubit[3]) # number=31\n prog.h(input_qubit[3]) # number=32\n prog.h(input_qubit[0]) # number=33\n prog.cz(input_qubit[3],input_qubit[0]) # number=34\n prog.rx(0.33300882128051834,input_qubit[2]) # number=36\n prog.h(input_qubit[0]) # number=35\n prog.cx(input_qubit[3],input_qubit[0]) # number=23\n prog.z(input_qubit[3]) # number=24\n prog.cx(input_qubit[3],input_qubit[0]) # number=25\n prog.cx(input_qubit[3],input_qubit[0]) # number=22\n prog.h(input_qubit[3]) # number=8\n prog.h(input_qubit[0]) # number=9\n\n prog.y(input_qubit[2]) # number=10\n prog.y(input_qubit[2]) # number=11\n # circuit end\n\n for i in range(n):\n prog.measure(input_qubit[i], classical[i])\n\n\n return prog\n\n\n\nif __name__ == '__main__':\n a = \"111\"\n b = \"0\"\n f = lambda rep: bitwise_xor(bitwise_dot(a, rep), b)\n prog = make_circuit(4,f)\n backend = FakeVigo()\n sample_shot =8000\n\n info = execute(prog, backend=backend, shots=sample_shot).result().get_counts()\n backend = FakeVigo()\n circuit1 = transpile(prog,backend,optimization_level=2)\n\n writefile = open(\"../data/startQiskit_noisy3345.csv\",\"w\")\n print(info,file=writefile)\n print(\"results end\", file=writefile)\n print(circuit1.__len__(),file=writefile)\n print(circuit1,file=writefile)\n writefile.close()\n",
"# qubit number=2\n# total number=13\nimport cirq\nimport qiskit\n\nfrom qiskit import IBMQ\nfrom qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister\nfrom qiskit import BasicAer, execute, transpile\nfrom pprint import pprint\nfrom qiskit.test.mock import FakeVigo\nfrom math import log2,floor, sqrt, pi\nimport numpy as np\nimport networkx as nx\n\ndef build_oracle(n: int, f) -> QuantumCircuit:\n # implement the oracle O_f^\\pm\n # NOTE: use U1 gate (P gate) with \\lambda = 180 ==> CZ gate\n # or multi_control_Z_gate (issue #127)\n\n controls = QuantumRegister(n, \"ofc\")\n target = QuantumRegister(1, \"oft\")\n oracle = QuantumCircuit(controls, target, name=\"Of\")\n for i in range(2 ** n):\n rep = np.binary_repr(i, n)\n if f(rep) == \"1\":\n for j in range(n):\n if rep[j] == \"0\":\n oracle.x(controls[j])\n oracle.mct(controls, target[0], None, mode='noancilla')\n for j in range(n):\n if rep[j] == \"0\":\n oracle.x(controls[j])\n # oracle.barrier()\n # oracle.draw('mpl', filename='circuit/deutsch-oracle.png')\n return oracle\n\n\ndef make_circuit(n:int,f) -> QuantumCircuit:\n # circuit begin\n\n input_qubit = QuantumRegister(n, \"qc\")\n target = QuantumRegister(1, \"qt\")\n prog = QuantumCircuit(input_qubit, target)\n\n # inverse last one (can be omitted if using O_f^\\pm)\n prog.x(target)\n\n # apply H to get superposition\n for i in range(n):\n prog.h(input_qubit[i])\n\n prog.h(input_qubit[1]) # number=1\n prog.h(input_qubit[1]) # number=8\n prog.cz(input_qubit[0],input_qubit[1]) # number=9\n prog.h(input_qubit[1]) # number=10\n prog.cx(input_qubit[0],input_qubit[1]) # number=5\n prog.cx(input_qubit[0],input_qubit[1]) # number=7\n prog.h(target)\n prog.barrier()\n\n # apply oracle O_f\n oracle = build_oracle(n, f)\n prog.append(\n oracle.to_gate(),\n [input_qubit[i] for i in range(n)] + [target])\n\n # apply H back (QFT on Z_2^n)\n for i in range(n):\n prog.h(input_qubit[i])\n prog.barrier()\n\n # measure\n #for i in range(n):\n # prog.measure(input_qubit[i], classicals[i])\n\n prog.x(input_qubit[0]) # number=3\n prog.y(input_qubit[1]) # number=6\n prog.x(input_qubit[0]) # number=4\n prog.x(input_qubit[1]) # number=11\n prog.x(input_qubit[1]) # number=12\n # circuit end\n return prog\n\n\n\n\nif __name__ == '__main__':\n n = 2\n f = lambda rep: rep[-1]\n # f = lambda rep: \"1\" if rep[0:2] == \"01\" or rep[0:2] == \"10\" else \"0\"\n # f = lambda rep: \"0\"\n prog = make_circuit(n, f)\n sample_shot =2800\n backend = BasicAer.get_backend('qasm_simulator')\n\n circuit1 = transpile(prog,FakeVigo())\n circuit1.x(qubit=3)\n circuit1.x(qubit=3)\n circuit1.measure_all()\n prog = circuit1\n\n\n info = execute(prog, backend=backend, shots=sample_shot).result().get_counts()\n\n writefile = open(\"../data/startQiskit250.csv\",\"w\")\n print(info,file=writefile)\n print(\"results end\", file=writefile)\n print(circuit1.depth(),file=writefile)\n print(circuit1,file=writefile)\n writefile.close()\n",
"# qubit number=5\n# total number=69\nimport cirq\nimport qiskit\nfrom qiskit import IBMQ\nfrom qiskit.providers.ibmq import least_busy\n\nfrom qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister\nfrom qiskit import BasicAer, execute, transpile\nfrom pprint import pprint\nfrom qiskit.test.mock import FakeVigo\nfrom math import log2,floor, sqrt, pi\nimport numpy as np\nimport networkx as nx\n\ndef build_oracle(n: int, f) -> QuantumCircuit:\n # implement the oracle O_f^\\pm\n # NOTE: use U1 gate (P gate) with \\lambda = 180 ==> CZ gate\n # or multi_control_Z_gate (issue #127)\n\n controls = QuantumRegister(n, \"ofc\")\n oracle = QuantumCircuit(controls, name=\"Zf\")\n\n for i in range(2 ** n):\n rep = np.binary_repr(i, n)\n if f(rep) == \"1\":\n for j in range(n):\n if rep[j] == \"0\":\n oracle.x(controls[j])\n\n # oracle.h(controls[n])\n if n >= 2:\n oracle.mcu1(pi, controls[1:], controls[0])\n\n for j in range(n):\n if rep[j] == \"0\":\n oracle.x(controls[j])\n # oracle.barrier()\n\n return oracle\n\n\ndef make_circuit(n:int,f) -> QuantumCircuit:\n # circuit begin\n input_qubit = QuantumRegister(n,\"qc\")\n classical = ClassicalRegister(n, \"qm\")\n prog = QuantumCircuit(input_qubit, classical)\n prog.h(input_qubit[0]) # number=3\n prog.h(input_qubit[1]) # number=4\n prog.h(input_qubit[0]) # number=57\n prog.cz(input_qubit[4],input_qubit[0]) # number=58\n prog.h(input_qubit[0]) # number=59\n prog.z(input_qubit[4]) # number=55\n prog.cx(input_qubit[4],input_qubit[0]) # number=56\n prog.h(input_qubit[2]) # number=50\n prog.cz(input_qubit[4],input_qubit[2]) # number=51\n prog.h(input_qubit[2]) # number=52\n prog.h(input_qubit[2]) # number=5\n prog.h(input_qubit[3]) # number=6\n prog.h(input_qubit[4]) # number=21\n\n Zf = build_oracle(n, f)\n\n repeat = floor(sqrt(2 ** n) * pi / 4)\n for i in range(repeat):\n prog.append(Zf.to_gate(), [input_qubit[i] for i in range(n)])\n prog.h(input_qubit[0]) # number=1\n prog.h(input_qubit[1]) # number=2\n prog.h(input_qubit[2]) # number=7\n prog.h(input_qubit[3]) # number=8\n\n\n prog.h(input_qubit[0]) # number=28\n prog.h(input_qubit[0]) # number=66\n prog.cz(input_qubit[3],input_qubit[0]) # number=67\n prog.h(input_qubit[0]) # number=68\n prog.z(input_qubit[3]) # number=61\n prog.cx(input_qubit[3],input_qubit[0]) # number=62\n prog.cz(input_qubit[1],input_qubit[0]) # number=29\n prog.h(input_qubit[0]) # number=30\n prog.h(input_qubit[0]) # number=43\n prog.cz(input_qubit[1],input_qubit[0]) # number=44\n prog.h(input_qubit[0]) # number=45\n prog.cx(input_qubit[1],input_qubit[0]) # number=35\n prog.cx(input_qubit[1],input_qubit[0]) # number=38\n prog.x(input_qubit[0]) # number=39\n prog.cx(input_qubit[1],input_qubit[0]) # number=40\n prog.cx(input_qubit[1],input_qubit[0]) # number=37\n prog.h(input_qubit[0]) # number=46\n prog.cz(input_qubit[1],input_qubit[0]) # number=47\n prog.h(input_qubit[0]) # number=48\n prog.h(input_qubit[0]) # number=63\n prog.cz(input_qubit[1],input_qubit[0]) # number=64\n prog.h(input_qubit[0]) # number=65\n prog.x(input_qubit[1]) # number=10\n prog.x(input_qubit[2]) # number=11\n prog.x(input_qubit[3]) # number=12\n\n if n>=2:\n prog.mcu1(pi,input_qubit[1:],input_qubit[0])\n\n prog.x(input_qubit[0]) # number=13\n prog.cx(input_qubit[0],input_qubit[1]) # number=22\n prog.y(input_qubit[2]) # number=41\n prog.x(input_qubit[1]) # number=23\n prog.cx(input_qubit[0],input_qubit[1]) # number=24\n prog.rx(1.0398671683382215,input_qubit[2]) # number=31\n prog.x(input_qubit[2]) # number=15\n prog.x(input_qubit[3]) # number=16\n\n\n prog.h(input_qubit[0]) # number=17\n prog.h(input_qubit[1]) # number=18\n prog.h(input_qubit[2]) # number=19\n prog.h(input_qubit[3]) # number=20\n\n\n # circuit end\n\n for i in range(n):\n prog.measure(input_qubit[i], classical[i])\n\n\n return prog\n\n\n\n\nif __name__ == '__main__':\n key = \"00000\"\n f = lambda rep: str(int(rep == key))\n prog = make_circuit(5,f)\n IBMQ.load_account() \n provider = IBMQ.get_provider(hub='ibm-q') \n provider.backends()\n backend = least_busy(provider.backends(filters=lambda x: x.configuration().n_qubits >= 2 and not x.configuration().simulator and x.status().operational == True))\n sample_shot =7924\n\n info = execute(prog, backend=backend, shots=sample_shot).result().get_counts()\n backend = FakeVigo()\n circuit1 = transpile(prog,backend,optimization_level=2)\n\n writefile = open(\"../data/startQiskit_QC1883.csv\",\"w\")\n print(info,file=writefile)\n print(\"results end\", file=writefile)\n print(circuit1.depth(),file=writefile)\n print(circuit1,file=writefile)\n writefile.close()\n",
"# qubit number=4\n# total number=47\nimport cirq\nimport qiskit\nfrom qiskit.providers.aer import QasmSimulator\nfrom qiskit.test.mock import FakeVigo\n\nfrom qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister\nfrom qiskit import BasicAer, execute, transpile\nfrom pprint import pprint\nfrom qiskit.test.mock import FakeVigo\nfrom math import log2\nimport numpy as np\nimport networkx as nx\n\ndef bitwise_xor(s: str, t: str) -> str:\n length = len(s)\n res = []\n for i in range(length):\n res.append(str(int(s[i]) ^ int(t[i])))\n return ''.join(res[::-1])\n\n\ndef bitwise_dot(s: str, t: str) -> str:\n length = len(s)\n res = 0\n for i in range(length):\n res += int(s[i]) * int(t[i])\n return str(res % 2)\n\ndef build_oracle(n: int, f) -> QuantumCircuit:\n # implement the oracle O_f\n # NOTE: use multi_control_toffoli_gate ('noancilla' mode)\n # https://qiskit.org/documentation/_modules/qiskit/aqua/circuits/gates/multi_control_toffoli_gate.html\n # https://quantumcomputing.stackexchange.com/questions/3943/how-do-you-implement-the-toffoli-gate-using-only-single-qubit-and-cnot-gates\n # https://quantumcomputing.stackexchange.com/questions/2177/how-can-i-implement-an-n-bit-toffoli-gate\n controls = QuantumRegister(n, \"ofc\")\n target = QuantumRegister(1, \"oft\")\n oracle = QuantumCircuit(controls, target, name=\"Of\")\n for i in range(2 ** n):\n rep = np.binary_repr(i, n)\n if f(rep) == \"1\":\n for j in range(n):\n if rep[j] == \"0\":\n oracle.x(controls[j])\n oracle.mct(controls, target[0], None, mode='noancilla')\n for j in range(n):\n if rep[j] == \"0\":\n oracle.x(controls[j])\n # oracle.barrier()\n return oracle\n\ndef make_circuit(n:int,f) -> QuantumCircuit:\n # circuit begin\n input_qubit = QuantumRegister(n,\"qc\")\n classical = ClassicalRegister(n, \"qm\")\n prog = QuantumCircuit(input_qubit, classical)\n prog.h(input_qubit[3]) # number=16\n prog.cz(input_qubit[0],input_qubit[3]) # number=17\n prog.rx(-0.5686282702997527,input_qubit[3]) # number=32\n prog.h(input_qubit[3]) # number=18\n prog.h(input_qubit[3]) # number=26\n prog.cz(input_qubit[0],input_qubit[3]) # number=27\n prog.h(input_qubit[3]) # number=28\n prog.x(input_qubit[3]) # number=21\n prog.rx(0.4241150082346221,input_qubit[2]) # number=33\n prog.cx(input_qubit[0],input_qubit[3]) # number=22\n prog.cx(input_qubit[0],input_qubit[3]) # number=12\n prog.h(input_qubit[1]) # number=2\n prog.h(input_qubit[2]) # number=3\n prog.h(input_qubit[3]) # number=4\n prog.h(input_qubit[0]) # number=5\n\n oracle = build_oracle(n-1, f)\n prog.append(oracle.to_gate(),[input_qubit[i] for i in range(n-1)]+[input_qubit[n-1]])\n prog.h(input_qubit[1]) # number=6\n prog.rx(-0.9927432785343745,input_qubit[1]) # number=43\n prog.h(input_qubit[2]) # number=23\n prog.cz(input_qubit[1],input_qubit[2]) # number=24\n prog.h(input_qubit[2]) # number=25\n prog.h(input_qubit[2]) # number=7\n prog.h(input_qubit[3]) # number=8\n prog.h(input_qubit[0]) # number=34\n prog.cz(input_qubit[2],input_qubit[0]) # number=35\n prog.h(input_qubit[0]) # number=36\n prog.cx(input_qubit[2],input_qubit[0]) # number=37\n prog.cx(input_qubit[2],input_qubit[0]) # number=44\n prog.z(input_qubit[2]) # number=45\n prog.cx(input_qubit[2],input_qubit[0]) # number=46\n prog.cx(input_qubit[2],input_qubit[0]) # number=39\n prog.h(input_qubit[0]) # number=40\n prog.cz(input_qubit[2],input_qubit[0]) # number=41\n prog.h(input_qubit[0]) # number=42\n prog.h(input_qubit[0]) # number=9\n\n prog.y(input_qubit[0]) # number=14\n prog.y(input_qubit[0]) # number=15\n # circuit end\n\n for i in range(n):\n prog.measure(input_qubit[i], classical[i])\n\n\n return prog\n\n\n\nif __name__ == '__main__':\n a = \"111\"\n b = \"0\"\n f = lambda rep: bitwise_xor(bitwise_dot(a, rep), b)\n prog = make_circuit(4,f)\n backend = FakeVigo()\n sample_shot =8000\n\n info = execute(prog, backend=backend, shots=sample_shot).result().get_counts()\n backend = FakeVigo()\n circuit1 = transpile(prog,backend,optimization_level=2)\n\n writefile = open(\"../data/startQiskit_noisy3210.csv\",\"w\")\n print(info,file=writefile)\n print(\"results end\", file=writefile)\n print(circuit1.__len__(),file=writefile)\n print(circuit1,file=writefile)\n writefile.close()\n",
"# qubit number=3\n# total number=73\n\nimport numpy as np\n\nfrom qiskit import QuantumCircuit, execute, Aer, QuantumRegister, ClassicalRegister, transpile, BasicAer, IBMQ\nfrom qiskit.visualization import plot_histogram\nfrom typing import *\nfrom pprint import pprint\nfrom math import log2\nfrom collections import Counter\nfrom qiskit.test.mock import FakeVigo, FakeYorktown\n\nkernel = 'circuit/bernstein'\n\n\ndef bitwise_xor(s: str, t: str) -> str:\n length = len(s)\n res = []\n for i in range(length):\n res.append(str(int(s[i]) ^ int(t[i])))\n return ''.join(res[::-1])\n\n\ndef bitwise_dot(s: str, t: str) -> str:\n length = len(s)\n res = 0\n for i in range(length):\n res += int(s[i]) * int(t[i])\n return str(res % 2)\n\n\ndef build_oracle(n: int, f: Callable[[str], str]) -> QuantumCircuit:\n # implement the oracle O_f\n # NOTE: use multi_control_toffoli_gate ('noancilla' mode)\n # https://qiskit.org/documentation/_modules/qiskit/aqua/circuits/gates/multi_control_toffoli_gate.html\n # https://quantumcomputing.stackexchange.com/questions/3943/how-do-you-implement-the-toffoli-gate-using-only-single-qubit-and-cnot-gates\n # https://quantumcomputing.stackexchange.com/questions/2177/how-can-i-implement-an-n-bit-toffoli-gate\n controls = QuantumRegister(n, \"ofc\")\n target = QuantumRegister(1, \"oft\")\n oracle = QuantumCircuit(controls, target, name=\"Of\")\n for i in range(2 ** n):\n rep = np.binary_repr(i, n)\n if f(rep) == \"1\":\n for j in range(n):\n if rep[j] == \"0\":\n oracle.x(controls[j])\n oracle.mct(controls, target[0], None, mode='noancilla')\n for j in range(n):\n if rep[j] == \"0\":\n oracle.x(controls[j])\n # oracle.barrier()\n # oracle.draw('mpl', filename=(kernel + '-oracle.png'))\n return oracle\n\n\ndef build_circuit(n: int, f: Callable[[str], str]) -> QuantumCircuit:\n # implement the Bernstein-Vazirani circuit\n zero = np.binary_repr(0, n)\n b = f(zero)\n\n # initial n + 1 bits\n input_qubit = QuantumRegister(n+1, \"qc\")\n classicals = ClassicalRegister(n, \"qm\")\n prog = QuantumCircuit(input_qubit, classicals)\n\n # inverse last one (can be omitted if using O_f^\\pm)\n prog.x(input_qubit[n])\n # circuit begin\n prog.h(input_qubit[1]) # number=1\n prog.rx(-0.09738937226128368,input_qubit[2]) # number=2\n prog.h(input_qubit[1]) # number=33\n prog.y(input_qubit[2]) # number=56\n prog.cz(input_qubit[2],input_qubit[1]) # number=34\n prog.h(input_qubit[1]) # number=35\n prog.h(input_qubit[1]) # number=3\n\n # apply H to get superposition\n for i in range(n):\n prog.h(input_qubit[i])\n prog.h(input_qubit[n])\n prog.barrier()\n\n # apply oracle O_f\n oracle = build_oracle(n, f)\n prog.append(\n oracle.to_gate(),\n [input_qubit[i] for i in range(n)] + [input_qubit[n]])\n\n # apply H back (QFT on Z_2^n)\n for i in range(n):\n prog.h(input_qubit[i])\n prog.barrier()\n\n # measure\n\n return prog\n\n\ndef get_statevector(prog: QuantumCircuit) -> Any:\n state_backend = Aer.get_backend('statevector_simulator')\n statevec = execute(prog, state_backend).result()\n quantum_state = statevec.get_statevector()\n qubits = round(log2(len(quantum_state)))\n quantum_state = {\n \"|\" + np.binary_repr(i, qubits) + \">\": quantum_state[i]\n for i in range(2 ** qubits)\n }\n return quantum_state\n\n\ndef evaluate(backend_str: str, prog: QuantumCircuit, shots: int, b: str) -> Any:\n # Q: which backend should we use?\n\n # get state vector\n quantum_state = get_statevector(prog)\n\n # get simulate results\n\n # provider = IBMQ.load_account()\n # backend = provider.get_backend(backend_str)\n # qobj = compile(prog, backend, shots)\n # job = backend.run(qobj)\n # job.result()\n backend = Aer.get_backend(backend_str)\n # transpile/schedule -> assemble -> backend.run\n results = execute(prog, backend, shots=shots).result()\n counts = results.get_counts()\n a = Counter(counts).most_common(1)[0][0][::-1]\n\n return {\n \"measurements\": counts,\n # \"state\": statevec,\n \"quantum_state\": quantum_state,\n \"a\": a,\n \"b\": b\n }\n\n\ndef bernstein_test_1(rep: str):\n \"\"\"011 . x + 1\"\"\"\n a = \"011\"\n b = \"1\"\n return bitwise_xor(bitwise_dot(a, rep), b)\n\n\ndef bernstein_test_2(rep: str):\n \"\"\"000 . x + 0\"\"\"\n a = \"000\"\n b = \"0\"\n return bitwise_xor(bitwise_dot(a, rep), b)\n\n\ndef bernstein_test_3(rep: str):\n \"\"\"111 . x + 1\"\"\"\n a = \"111\"\n b = \"1\"\n return bitwise_xor(bitwise_dot(a, rep), b)\n\n\nif __name__ == \"__main__\":\n n = 2\n a = \"11\"\n b = \"1\"\n f = lambda rep: \\\n bitwise_xor(bitwise_dot(a, rep), b)\n prog = build_circuit(n, f)\n sample_shot =4000\n writefile = open(\"../data/startQiskit_Class408.csv\", \"w\")\n # prog.draw('mpl', filename=(kernel + '.png'))\n backend = BasicAer.get_backend('statevector_simulator')\n\n circuit1 = transpile(prog, FakeYorktown())\n circuit1.h(qubit=2)\n circuit1.x(qubit=3)\n\n info = execute(circuit1,backend=backend, shots=sample_shot).result().get_counts()\n\n print(info, file=writefile)\n print(\"results end\", file=writefile)\n print(circuit1.depth(), file=writefile)\n print(circuit1, file=writefile)\n writefile.close()\n",
"# qubit number=5\n# total number=54\nimport cirq\nimport qiskit\nfrom qiskit.providers.aer import QasmSimulator\nfrom qiskit.test.mock import FakeVigo\n\nfrom qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister\nfrom qiskit import BasicAer, execute, transpile\nfrom pprint import pprint\nfrom qiskit.test.mock import FakeVigo\nfrom math import log2,floor, sqrt, pi\nimport numpy as np\nimport networkx as nx\n\ndef build_oracle(n: int, f) -> QuantumCircuit:\n # implement the oracle O_f^\\pm\n # NOTE: use U1 gate (P gate) with \\lambda = 180 ==> CZ gate\n # or multi_control_Z_gate (issue #127)\n\n controls = QuantumRegister(n, \"ofc\")\n oracle = QuantumCircuit(controls, name=\"Zf\")\n\n for i in range(2 ** n):\n rep = np.binary_repr(i, n)\n if f(rep) == \"1\":\n for j in range(n):\n if rep[j] == \"0\":\n oracle.x(controls[j])\n\n # oracle.h(controls[n])\n if n >= 2:\n oracle.mcu1(pi, controls[1:], controls[0])\n\n for j in range(n):\n if rep[j] == \"0\":\n oracle.x(controls[j])\n # oracle.barrier()\n\n return oracle\n\n\ndef make_circuit(n:int,f) -> QuantumCircuit:\n # circuit begin\n input_qubit = QuantumRegister(n,\"qc\")\n classical = ClassicalRegister(n, \"qm\")\n prog = QuantumCircuit(input_qubit, classical)\n prog.h(input_qubit[0]) # number=3\n prog.h(input_qubit[1]) # number=4\n prog.h(input_qubit[2]) # number=5\n prog.h(input_qubit[3]) # number=6\n prog.h(input_qubit[4]) # number=21\n\n Zf = build_oracle(n, f)\n\n repeat = floor(sqrt(2 ** n) * pi / 4)\n for i in range(repeat):\n prog.append(Zf.to_gate(), [input_qubit[i] for i in range(n)])\n prog.h(input_qubit[0]) # number=1\n prog.h(input_qubit[1]) # number=2\n prog.h(input_qubit[2]) # number=7\n prog.h(input_qubit[3]) # number=8\n\n\n prog.h(input_qubit[0]) # number=31\n prog.cz(input_qubit[1],input_qubit[0]) # number=32\n prog.h(input_qubit[0]) # number=33\n prog.h(input_qubit[1]) # number=44\n prog.cz(input_qubit[0],input_qubit[1]) # number=45\n prog.h(input_qubit[1]) # number=46\n prog.x(input_qubit[1]) # number=41\n prog.h(input_qubit[1]) # number=48\n prog.cz(input_qubit[0],input_qubit[1]) # number=49\n prog.h(input_qubit[1]) # number=50\n prog.cx(input_qubit[1],input_qubit[0]) # number=51\n prog.x(input_qubit[0]) # number=52\n prog.cx(input_qubit[1],input_qubit[0]) # number=53\n prog.cx(input_qubit[1],input_qubit[0]) # number=27\n prog.h(input_qubit[1]) # number=37\n prog.cz(input_qubit[0],input_qubit[1]) # number=38\n prog.h(input_qubit[1]) # number=39\n prog.x(input_qubit[1]) # number=35\n prog.cx(input_qubit[0],input_qubit[1]) # number=36\n prog.x(input_qubit[2]) # number=11\n prog.x(input_qubit[3]) # number=12\n prog.cx(input_qubit[3],input_qubit[2]) # number=43\n prog.cx(input_qubit[3],input_qubit[2]) # number=47\n\n if n>=2:\n prog.mcu1(pi,input_qubit[1:],input_qubit[0])\n\n prog.x(input_qubit[0]) # number=13\n prog.cx(input_qubit[0],input_qubit[1]) # number=22\n prog.x(input_qubit[1]) # number=23\n prog.cx(input_qubit[0],input_qubit[1]) # number=24\n prog.x(input_qubit[2]) # number=15\n prog.x(input_qubit[1]) # number=29\n prog.y(input_qubit[4]) # number=28\n prog.x(input_qubit[3]) # number=16\n\n\n prog.h(input_qubit[0]) # number=17\n prog.h(input_qubit[1]) # number=18\n prog.h(input_qubit[2]) # number=19\n prog.h(input_qubit[3]) # number=20\n\n\n # circuit end\n\n for i in range(n):\n prog.measure(input_qubit[i], classical[i])\n\n\n return prog\n\n\n\n\nif __name__ == '__main__':\n key = \"00000\"\n f = lambda rep: str(int(rep == key))\n prog = make_circuit(5,f)\n backend = FakeVigo()\n sample_shot =7924\n\n info = execute(prog, backend=backend, shots=sample_shot).result().get_counts()\n backend = FakeVigo()\n circuit1 = transpile(prog,backend,optimization_level=2)\n\n writefile = open(\"../data/startQiskit_noisy1314.csv\",\"w\")\n print(info,file=writefile)\n print(\"results end\", file=writefile)\n print(circuit1.depth(),file=writefile)\n print(circuit1,file=writefile)\n writefile.close()\n",
"# qubit number=3\n# total number=13\n\nimport numpy as np\n\nfrom qiskit import QuantumCircuit, execute, Aer, QuantumRegister, ClassicalRegister, transpile, BasicAer, IBMQ\nimport networkx as nx\nfrom qiskit.visualization import plot_histogram\nfrom typing import *\nfrom pprint import pprint\nfrom math import log2\nfrom collections import Counter\nfrom qiskit.test.mock import FakeVigo, FakeYorktown\n\nkernel = 'circuit/bernstein'\n\n\ndef make_circuit(n:int) -> QuantumCircuit:\n # circuit begin\n input_qubit = QuantumRegister(n,\"qc\")\n prog = QuantumCircuit(input_qubit)\n prog.h(input_qubit[1]) # number=2\n prog.h(input_qubit[2]) # number=3\n prog.h(input_qubit[3]) # number=4\n\n for edge in E:\n k = edge[0]\n l = edge[1]\n prog.cp(-2 * gamma, input_qubit[k-1], input_qubit[l-1])\n prog.p(gamma, k)\n prog.p(gamma, l)\n\n prog.rx(2 * beta, range(len(V)))\n\n prog.swap(input_qubit[1],input_qubit[0]) # number=6\n prog.swap(input_qubit[1],input_qubit[0]) # number=7\n prog.cx(input_qubit[0],input_qubit[1]) # number=10\n prog.x(input_qubit[1]) # number=11\n prog.cx(input_qubit[0],input_qubit[1]) # number=12\n prog.x(input_qubit[1]) # number=9\n # circuit end\n\n\n\n return prog\n\n\n\nif __name__ == '__main__':\n n = 4\n V = np.arange(0, n, 1)\n E = [(0, 1, 1.0), (0, 2, 1.0), (1, 2, 1.0), (3, 2, 1.0), (3, 1, 1.0)]\n\n G = nx.Graph()\n G.add_nodes_from(V)\n G.add_weighted_edges_from(E)\n\n step_size = 0.1\n\n a_gamma = np.arange(0, np.pi, step_size)\n a_beta = np.arange(0, np.pi, step_size)\n a_gamma, a_beta = np.meshgrid(a_gamma, a_beta)\n\n F1 = 3 - (np.sin(2 * a_beta) ** 2 * np.sin(2 * a_gamma) ** 2 - 0.5 * np.sin(4 * a_beta) * np.sin(4 * a_gamma)) * (\n 1 + np.cos(4 * a_gamma) ** 2)\n\n result = np.where(F1 == np.amax(F1))\n a = list(zip(result[0], result[1]))[0]\n\n gamma = a[0] * step_size\n beta = a[1] * step_size\n\n prog = make_circuit(4)\n sample_shot =5600\n writefile = open(\"../data/startQiskit_QC575.csv\", \"w\")\n # prog.draw('mpl', filename=(kernel + '.png'))\n IBMQ.load_account() \n provider = IBMQ.get_provider(hub='ibm-q') \n provider.backends()\n backend = provider.get_backend(\"ibmq_5_yorktown\")\n\n circuit1 = transpile(prog, FakeYorktown())\n circuit1.measure_all()\n prog = circuit1\n\n info = execute(prog,backend=backend, shots=sample_shot).result().get_counts()\n\n print(info, file=writefile)\n print(\"results end\", file=writefile)\n print(circuit1.depth(), file=writefile)\n print(circuit1, file=writefile)\n writefile.close()\n",
"# qubit number=5\n# total number=55\nimport cirq\nimport qiskit\nfrom qiskit import IBMQ\nfrom qiskit.providers.ibmq import least_busy\n\nfrom qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister\nfrom qiskit import BasicAer, execute, transpile\nfrom pprint import pprint\nfrom qiskit.test.mock import FakeVigo\nfrom math import log2,floor, sqrt, pi\nimport numpy as np\nimport networkx as nx\n\ndef build_oracle(n: int, f) -> QuantumCircuit:\n # implement the oracle O_f^\\pm\n # NOTE: use U1 gate (P gate) with \\lambda = 180 ==> CZ gate\n # or multi_control_Z_gate (issue #127)\n\n controls = QuantumRegister(n, \"ofc\")\n oracle = QuantumCircuit(controls, name=\"Zf\")\n\n for i in range(2 ** n):\n rep = np.binary_repr(i, n)\n if f(rep) == \"1\":\n for j in range(n):\n if rep[j] == \"0\":\n oracle.x(controls[j])\n\n # oracle.h(controls[n])\n if n >= 2:\n oracle.mcu1(pi, controls[1:], controls[0])\n\n for j in range(n):\n if rep[j] == \"0\":\n oracle.x(controls[j])\n # oracle.barrier()\n\n return oracle\n\n\ndef make_circuit(n:int,f) -> QuantumCircuit:\n # circuit begin\n input_qubit = QuantumRegister(n,\"qc\")\n classical = ClassicalRegister(n, \"qm\")\n prog = QuantumCircuit(input_qubit, classical)\n prog.h(input_qubit[0]) # number=3\n prog.h(input_qubit[1]) # number=4\n prog.cx(input_qubit[0],input_qubit[1]) # number=52\n prog.x(input_qubit[1]) # number=53\n prog.cx(input_qubit[0],input_qubit[1]) # number=54\n prog.h(input_qubit[1]) # number=26\n prog.cz(input_qubit[4],input_qubit[1]) # number=27\n prog.h(input_qubit[1]) # number=28\n prog.h(input_qubit[2]) # number=5\n prog.h(input_qubit[3]) # number=6\n prog.h(input_qubit[4]) # number=21\n prog.h(input_qubit[1]) # number=34\n prog.cz(input_qubit[4],input_qubit[1]) # number=35\n prog.z(input_qubit[4]) # number=46\n prog.rx(0.8011061266653969,input_qubit[2]) # number=37\n prog.h(input_qubit[1]) # number=36\n prog.z(input_qubit[3]) # number=51\n\n Zf = build_oracle(n, f)\n\n repeat = floor(sqrt(2 ** n) * pi / 4)\n for i in range(repeat):\n prog.append(Zf.to_gate(), [input_qubit[i] for i in range(n)])\n prog.h(input_qubit[0]) # number=1\n prog.h(input_qubit[1]) # number=2\n prog.h(input_qubit[2]) # number=7\n prog.h(input_qubit[3]) # number=8\n\n\n prog.cx(input_qubit[1],input_qubit[0]) # number=38\n prog.x(input_qubit[0]) # number=39\n prog.cx(input_qubit[1],input_qubit[0]) # number=40\n prog.cx(input_qubit[0],input_qubit[1]) # number=42\n prog.rx(-1.928937889304133,input_qubit[2]) # number=49\n prog.x(input_qubit[1]) # number=43\n prog.cx(input_qubit[0],input_qubit[1]) # number=44\n prog.x(input_qubit[2]) # number=11\n prog.y(input_qubit[1]) # number=45\n prog.x(input_qubit[3]) # number=12\n prog.h(input_qubit[2]) # number=41\n\n if n>=2:\n prog.mcu1(pi,input_qubit[1:],input_qubit[0])\n\n prog.cx(input_qubit[1],input_qubit[0]) # number=22\n prog.x(input_qubit[4]) # number=47\n prog.x(input_qubit[0]) # number=23\n prog.cx(input_qubit[1],input_qubit[0]) # number=24\n prog.cx(input_qubit[0],input_qubit[1]) # number=30\n prog.x(input_qubit[1]) # number=31\n prog.cx(input_qubit[0],input_qubit[1]) # number=32\n prog.x(input_qubit[2]) # number=15\n prog.h(input_qubit[4]) # number=29\n prog.x(input_qubit[3]) # number=16\n prog.z(input_qubit[3]) # number=50\n\n\n prog.h(input_qubit[0]) # number=17\n prog.h(input_qubit[1]) # number=18\n prog.h(input_qubit[2]) # number=19\n prog.h(input_qubit[3]) # number=20\n\n\n # circuit end\n\n for i in range(n):\n prog.measure(input_qubit[i], classical[i])\n\n\n return prog\n\n\n\n\nif __name__ == '__main__':\n key = \"00000\"\n f = lambda rep: str(int(rep == key))\n prog = make_circuit(5,f)\n IBMQ.load_account() \n provider = IBMQ.get_provider(hub='ibm-q') \n provider.backends()\n backend = least_busy(provider.backends(filters=lambda x: x.configuration().n_qubits >= 2 and not x.configuration().simulator and x.status().operational == True))\n sample_shot =7924\n\n info = execute(prog, backend=backend, shots=sample_shot).result().get_counts()\n backend = FakeVigo()\n circuit1 = transpile(prog,backend,optimization_level=2)\n\n writefile = open(\"../data/startQiskit_QC1911.csv\",\"w\")\n print(info,file=writefile)\n print(\"results end\", file=writefile)\n print(circuit1.depth(),file=writefile)\n print(circuit1,file=writefile)\n writefile.close()\n",
"# qubit number=2\n# total number=16\nimport cirq\nimport qiskit\n\nfrom qiskit import IBMQ\nfrom qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister\nfrom qiskit import BasicAer, execute, transpile\nfrom pprint import pprint\nfrom qiskit.test.mock import FakeVigo\nfrom math import log2,floor, sqrt, pi\nimport numpy as np\nimport networkx as nx\n\ndef build_oracle(n: int, f) -> QuantumCircuit:\n # implement the oracle O_f^\\pm\n # NOTE: use U1 gate (P gate) with \\lambda = 180 ==> CZ gate\n # or multi_control_Z_gate (issue #127)\n\n controls = QuantumRegister(n, \"ofc\")\n target = QuantumRegister(1, \"oft\")\n oracle = QuantumCircuit(controls, target, name=\"Of\")\n for i in range(2 ** n):\n rep = np.binary_repr(i, n)\n if f(rep) == \"1\":\n for j in range(n):\n if rep[j] == \"0\":\n oracle.x(controls[j])\n oracle.mct(controls, target[0], None, mode='noancilla')\n for j in range(n):\n if rep[j] == \"0\":\n oracle.x(controls[j])\n # oracle.barrier()\n # oracle.draw('mpl', filename='circuit/deutsch-oracle.png')\n return oracle\n\n\ndef make_circuit(n:int,f) -> QuantumCircuit:\n # circuit begin\n\n input_qubit = QuantumRegister(n, \"qc\")\n target = QuantumRegister(1, \"qt\")\n prog = QuantumCircuit(input_qubit, target)\n\n # inverse last one (can be omitted if using O_f^\\pm)\n prog.x(target)\n\n # apply H to get superposition\n for i in range(n):\n prog.h(input_qubit[i])\n\n prog.h(input_qubit[1]) # number=1\n prog.h(target)\n prog.barrier()\n\n # apply oracle O_f\n oracle = build_oracle(n, f)\n prog.append(\n oracle.to_gate(),\n [input_qubit[i] for i in range(n)] + [target])\n\n # apply H back (QFT on Z_2^n)\n for i in range(n):\n prog.h(input_qubit[i])\n prog.barrier()\n\n # measure\n\n prog.y(input_qubit[1]) # number=2\n prog.y(input_qubit[1]) # number=4\n prog.y(input_qubit[1]) # number=3\n prog.h(input_qubit[0]) # number=13\n prog.cz(input_qubit[1],input_qubit[0]) # number=14\n prog.h(input_qubit[0]) # number=15\n prog.x(input_qubit[0]) # number=8\n prog.cx(input_qubit[1],input_qubit[0]) # number=9\n prog.cx(input_qubit[1],input_qubit[0]) # number=10\n prog.x(input_qubit[0]) # number=11\n prog.cx(input_qubit[1],input_qubit[0]) # number=12\n # circuit end\n return prog\n\n\n\n\nif __name__ == '__main__':\n n = 2\n f = lambda rep: rep[-1]\n # f = lambda rep: \"1\" if rep[0:2] == \"01\" or rep[0:2] == \"10\" else \"0\"\n # f = lambda rep: \"0\"\n prog = make_circuit(n, f)\n sample_shot =2800\n backend = BasicAer.get_backend('statevector_simulator')\n\n circuit1 = transpile(prog,FakeVigo())\n circuit1.x(qubit=3)\n circuit1.x(qubit=3)\n prog = circuit1\n\n\n info = execute(prog, backend=backend).result().get_statevector()\n qubits = round(log2(len(info)))\n info = {\n np.binary_repr(i, qubits): round((info[i]*(info[i].conjugate())).real,3)\n for i in range(2 ** qubits)\n }\n\n writefile = open(\"../data/startQiskit_Class230.csv\",\"w\")\n print(info,file=writefile)\n print(\"results end\", file=writefile)\n print(circuit1.depth(),file=writefile)\n print(circuit1,file=writefile)\n writefile.close()\n",
"# qubit number=5\n# total number=70\nimport cirq\nimport qiskit\nfrom qiskit import IBMQ\nfrom qiskit.providers.ibmq import least_busy\n\nfrom qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister\nfrom qiskit import BasicAer, execute, transpile\nfrom pprint import pprint\nfrom qiskit.test.mock import FakeVigo\nfrom math import log2,floor, sqrt, pi\nimport numpy as np\nimport networkx as nx\n\ndef build_oracle(n: int, f) -> QuantumCircuit:\n # implement the oracle O_f^\\pm\n # NOTE: use U1 gate (P gate) with \\lambda = 180 ==> CZ gate\n # or multi_control_Z_gate (issue #127)\n\n controls = QuantumRegister(n, \"ofc\")\n oracle = QuantumCircuit(controls, name=\"Zf\")\n\n for i in range(2 ** n):\n rep = np.binary_repr(i, n)\n if f(rep) == \"1\":\n for j in range(n):\n if rep[j] == \"0\":\n oracle.x(controls[j])\n\n # oracle.h(controls[n])\n if n >= 2:\n oracle.mcu1(pi, controls[1:], controls[0])\n\n for j in range(n):\n if rep[j] == \"0\":\n oracle.x(controls[j])\n # oracle.barrier()\n\n return oracle\n\n\ndef make_circuit(n:int,f) -> QuantumCircuit:\n # circuit begin\n input_qubit = QuantumRegister(n,\"qc\")\n classical = ClassicalRegister(n, \"qm\")\n prog = QuantumCircuit(input_qubit, classical)\n prog.h(input_qubit[0]) # number=3\n prog.h(input_qubit[1]) # number=4\n prog.h(input_qubit[2]) # number=5\n prog.h(input_qubit[1]) # number=29\n prog.cz(input_qubit[3],input_qubit[1]) # number=30\n prog.h(input_qubit[1]) # number=31\n prog.h(input_qubit[3]) # number=6\n prog.h(input_qubit[4]) # number=21\n\n Zf = build_oracle(n, f)\n\n repeat = floor(sqrt(2 ** n) * pi / 4)\n for i in range(repeat):\n prog.append(Zf.to_gate(), [input_qubit[i] for i in range(n)])\n prog.h(input_qubit[0]) # number=1\n prog.h(input_qubit[1]) # number=2\n prog.h(input_qubit[2]) # number=7\n prog.h(input_qubit[3]) # number=8\n\n\n prog.h(input_qubit[0]) # number=38\n prog.cz(input_qubit[1],input_qubit[0]) # number=39\n prog.h(input_qubit[0]) # number=40\n prog.h(input_qubit[0]) # number=51\n prog.cz(input_qubit[1],input_qubit[0]) # number=52\n prog.h(input_qubit[0]) # number=53\n prog.h(input_qubit[0]) # number=64\n prog.cz(input_qubit[1],input_qubit[0]) # number=65\n prog.h(input_qubit[0]) # number=66\n prog.x(input_qubit[0]) # number=49\n prog.h(input_qubit[0]) # number=57\n prog.cz(input_qubit[1],input_qubit[0]) # number=58\n prog.h(input_qubit[0]) # number=59\n prog.h(input_qubit[0]) # number=54\n prog.cz(input_qubit[1],input_qubit[0]) # number=55\n prog.h(input_qubit[0]) # number=56\n prog.h(input_qubit[4]) # number=41\n prog.h(input_qubit[0]) # number=61\n prog.cz(input_qubit[1],input_qubit[0]) # number=62\n prog.h(input_qubit[0]) # number=63\n prog.cx(input_qubit[0],input_qubit[1]) # number=67\n prog.x(input_qubit[1]) # number=68\n prog.cx(input_qubit[0],input_qubit[1]) # number=69\n prog.h(input_qubit[2]) # number=25\n prog.cz(input_qubit[0],input_qubit[2]) # number=26\n prog.h(input_qubit[2]) # number=27\n prog.x(input_qubit[2]) # number=23\n prog.cx(input_qubit[0],input_qubit[2]) # number=24\n prog.cx(input_qubit[0],input_qubit[3]) # number=32\n prog.x(input_qubit[3]) # number=33\n prog.h(input_qubit[3]) # number=42\n prog.cz(input_qubit[0],input_qubit[3]) # number=43\n prog.h(input_qubit[3]) # number=44\n\n if n>=2:\n prog.mcu1(pi,input_qubit[1:],input_qubit[0])\n\n prog.x(input_qubit[0]) # number=13\n prog.rx(0.6157521601035993,input_qubit[1]) # number=60\n prog.x(input_qubit[1]) # number=14\n prog.x(input_qubit[2]) # number=15\n prog.x(input_qubit[3]) # number=16\n\n\n prog.h(input_qubit[0]) # number=17\n prog.h(input_qubit[1]) # number=18\n prog.h(input_qubit[2]) # number=19\n prog.h(input_qubit[3]) # number=20\n\n\n # circuit end\n\n for i in range(n):\n prog.measure(input_qubit[i], classical[i])\n\n\n return prog\n\n\n\n\nif __name__ == '__main__':\n key = \"00000\"\n f = lambda rep: str(int(rep == key))\n prog = make_circuit(5,f)\n IBMQ.load_account() \n provider = IBMQ.get_provider(hub='ibm-q') \n provider.backends()\n backend = least_busy(provider.backends(filters=lambda x: x.configuration().n_qubits >= 2 and not x.configuration().simulator and x.status().operational == True))\n sample_shot =7924\n\n info = execute(prog, backend=backend, shots=sample_shot).result().get_counts()\n backend = FakeVigo()\n circuit1 = transpile(prog,backend,optimization_level=2)\n\n writefile = open(\"../data/startQiskit_QC1786.csv\",\"w\")\n print(info,file=writefile)\n print(\"results end\", file=writefile)\n print(circuit1.depth(),file=writefile)\n print(circuit1,file=writefile)\n writefile.close()\n",
"# qubit number=3\n# total number=12\n\nimport numpy as np\n\nfrom qiskit import QuantumCircuit, execute, Aer, QuantumRegister, ClassicalRegister, transpile, BasicAer, IBMQ\nimport networkx as nx\nfrom qiskit.visualization import plot_histogram\nfrom typing import *\nfrom pprint import pprint\nfrom math import log2\nfrom collections import Counter\nfrom qiskit.test.mock import FakeVigo, FakeYorktown\n\nkernel = 'circuit/bernstein'\n\n\ndef make_circuit(n:int) -> QuantumCircuit:\n # circuit begin\n input_qubit = QuantumRegister(n,\"qc\")\n prog = QuantumCircuit(input_qubit)\n prog.h(input_qubit[0]) # number=1\n prog.h(input_qubit[1]) # number=2\n prog.h(input_qubit[2]) # number=3\n prog.cx(input_qubit[0],input_qubit[2]) # number=9\n prog.x(input_qubit[2]) # number=10\n prog.cx(input_qubit[0],input_qubit[2]) # number=11\n prog.h(input_qubit[3]) # number=4\n prog.y(input_qubit[3]) # number=5\n\n for edge in E:\n k = edge[0]\n l = edge[1]\n prog.cp(-2 * gamma, input_qubit[k-1], input_qubit[l-1])\n prog.p(gamma, k)\n prog.p(gamma, l)\n\n prog.rx(2 * beta, range(len(V)))\n\n prog.swap(input_qubit[1],input_qubit[0]) # number=7\n prog.swap(input_qubit[1],input_qubit[0]) # number=8\n # circuit end\n\n\n\n return prog\n\n\n\nif __name__ == '__main__':\n n = 4\n V = np.arange(0, n, 1)\n E = [(0, 1, 1.0), (0, 2, 1.0), (1, 2, 1.0), (3, 2, 1.0), (3, 1, 1.0)]\n\n G = nx.Graph()\n G.add_nodes_from(V)\n G.add_weighted_edges_from(E)\n\n step_size = 0.1\n\n a_gamma = np.arange(0, np.pi, step_size)\n a_beta = np.arange(0, np.pi, step_size)\n a_gamma, a_beta = np.meshgrid(a_gamma, a_beta)\n\n F1 = 3 - (np.sin(2 * a_beta) ** 2 * np.sin(2 * a_gamma) ** 2 - 0.5 * np.sin(4 * a_beta) * np.sin(4 * a_gamma)) * (\n 1 + np.cos(4 * a_gamma) ** 2)\n\n result = np.where(F1 == np.amax(F1))\n a = list(zip(result[0], result[1]))[0]\n\n gamma = a[0] * step_size\n beta = a[1] * step_size\n\n prog = make_circuit(4)\n sample_shot =5600\n writefile = open(\"../data/startQiskit_noisy116.csv\", \"w\")\n # prog.draw('mpl', filename=(kernel + '.png'))\n backend = FakeYorktown()\n\n circuit1 = transpile(prog, FakeYorktown())\n circuit1.measure_all()\n prog = circuit1\n\n info = execute(prog,backend=backend, shots=sample_shot).result().get_counts()\n\n print(info, file=writefile)\n print(\"results end\", file=writefile)\n print(circuit1.depth(), file=writefile)\n print(circuit1, file=writefile)\n writefile.close()\n",
"# qubit number=5\n# total number=59\nimport cirq\nimport qiskit\n\nfrom qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister\nfrom qiskit import BasicAer, execute, transpile\nfrom pprint import pprint\nfrom qiskit.test.mock import FakeVigo\nfrom math import log2,floor, sqrt, pi\nimport numpy as np\nimport networkx as nx\n\ndef build_oracle(n: int, f) -> QuantumCircuit:\n # implement the oracle O_f^\\pm\n # NOTE: use U1 gate (P gate) with \\lambda = 180 ==> CZ gate\n # or multi_control_Z_gate (issue #127)\n\n controls = QuantumRegister(n, \"ofc\")\n oracle = QuantumCircuit(controls, name=\"Zf\")\n\n for i in range(2 ** n):\n rep = np.binary_repr(i, n)\n if f(rep) == \"1\":\n for j in range(n):\n if rep[j] == \"0\":\n oracle.x(controls[j])\n\n # oracle.h(controls[n])\n if n >= 2:\n oracle.mcu1(pi, controls[1:], controls[0])\n\n for j in range(n):\n if rep[j] == \"0\":\n oracle.x(controls[j])\n # oracle.barrier()\n\n return oracle\n\n\ndef make_circuit(n:int,f) -> QuantumCircuit:\n # circuit begin\n input_qubit = QuantumRegister(n,\"qc\")\n classical = ClassicalRegister(n, \"qm\")\n prog = QuantumCircuit(input_qubit, classical)\n prog.h(input_qubit[0]) # number=3\n prog.h(input_qubit[1]) # number=4\n prog.h(input_qubit[2]) # number=5\n prog.h(input_qubit[3]) # number=6\n prog.h(input_qubit[0]) # number=41\n prog.cz(input_qubit[1],input_qubit[0]) # number=42\n prog.h(input_qubit[0]) # number=43\n prog.z(input_qubit[1]) # number=37\n prog.h(input_qubit[0]) # number=51\n prog.cz(input_qubit[1],input_qubit[0]) # number=52\n prog.h(input_qubit[0]) # number=53\n prog.h(input_qubit[4]) # number=21\n prog.x(input_qubit[2]) # number=39\n\n Zf = build_oracle(n, f)\n\n repeat = floor(sqrt(2 ** n) * pi / 4)\n for i in range(repeat):\n prog.append(Zf.to_gate(), [input_qubit[i] for i in range(n)])\n prog.h(input_qubit[0]) # number=1\n prog.h(input_qubit[1]) # number=2\n prog.h(input_qubit[2]) # number=7\n prog.h(input_qubit[3]) # number=8\n prog.h(input_qubit[0]) # number=56\n prog.cz(input_qubit[3],input_qubit[0]) # number=57\n prog.h(input_qubit[0]) # number=58\n prog.h(input_qubit[0]) # number=48\n prog.cz(input_qubit[3],input_qubit[0]) # number=49\n prog.h(input_qubit[0]) # number=50\n prog.z(input_qubit[3]) # number=46\n prog.cx(input_qubit[3],input_qubit[0]) # number=47\n prog.x(input_qubit[4]) # number=40\n prog.cx(input_qubit[3],input_qubit[0]) # number=35\n\n\n prog.x(input_qubit[0]) # number=9\n prog.cx(input_qubit[0],input_qubit[1]) # number=29\n prog.x(input_qubit[1]) # number=30\n prog.cx(input_qubit[0],input_qubit[1]) # number=31\n prog.x(input_qubit[2]) # number=11\n prog.x(input_qubit[1]) # number=44\n prog.x(input_qubit[3]) # number=12\n\n if n>=2:\n prog.mcu1(pi,input_qubit[1:],input_qubit[0])\n\n prog.cx(input_qubit[1],input_qubit[0]) # number=24\n prog.x(input_qubit[0]) # number=25\n prog.cx(input_qubit[1],input_qubit[0]) # number=26\n prog.x(input_qubit[1]) # number=14\n prog.x(input_qubit[2]) # number=15\n prog.x(input_qubit[3]) # number=16\n\n\n prog.h(input_qubit[0]) # number=17\n prog.h(input_qubit[1]) # number=18\n prog.cx(input_qubit[4],input_qubit[3]) # number=54\n prog.h(input_qubit[2]) # number=19\n prog.h(input_qubit[3]) # number=20\n\n\n prog.x(input_qubit[1]) # number=22\n prog.y(input_qubit[1]) # number=32\n prog.x(input_qubit[1]) # number=23\n prog.cx(input_qubit[4],input_qubit[3]) # number=55\n # circuit end\n\n for i in range(n):\n prog.measure(input_qubit[i], classical[i])\n\n\n return prog\n\n\n\n\nif __name__ == '__main__':\n key = \"00000\"\n f = lambda rep: str(int(rep == key))\n prog = make_circuit(5,f)\n backend = BasicAer.get_backend('qasm_simulator')\n sample_shot =7924\n\n info = execute(prog, backend=backend, shots=sample_shot).result().get_counts()\n backend = FakeVigo()\n circuit1 = transpile(prog,backend,optimization_level=2)\n\n writefile = open(\"../data/startQiskit1740.csv\",\"w\")\n print(info,file=writefile)\n print(\"results end\", file=writefile)\n print(circuit1.depth(),file=writefile)\n print(circuit1,file=writefile)\n writefile.close()\n",
"# qubit number=4\n# total number=34\nimport cirq\nimport qiskit\n\nfrom qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister\nfrom qiskit import BasicAer, execute, transpile\nfrom pprint import pprint\nfrom qiskit.test.mock import FakeVigo\nfrom math import log2\nimport numpy as np\nimport networkx as nx\n\ndef bitwise_xor(s: str, t: str) -> str:\n length = len(s)\n res = []\n for i in range(length):\n res.append(str(int(s[i]) ^ int(t[i])))\n return ''.join(res[::-1])\n\n\ndef bitwise_dot(s: str, t: str) -> str:\n length = len(s)\n res = 0\n for i in range(length):\n res += int(s[i]) * int(t[i])\n return str(res % 2)\n\ndef build_oracle(n: int, f) -> QuantumCircuit:\n # implement the oracle O_f\n # NOTE: use multi_control_toffoli_gate ('noancilla' mode)\n # https://qiskit.org/documentation/_modules/qiskit/aqua/circuits/gates/multi_control_toffoli_gate.html\n # https://quantumcomputing.stackexchange.com/questions/3943/how-do-you-implement-the-toffoli-gate-using-only-single-qubit-and-cnot-gates\n # https://quantumcomputing.stackexchange.com/questions/2177/how-can-i-implement-an-n-bit-toffoli-gate\n controls = QuantumRegister(n, \"ofc\")\n target = QuantumRegister(1, \"oft\")\n oracle = QuantumCircuit(controls, target, name=\"Of\")\n for i in range(2 ** n):\n rep = np.binary_repr(i, n)\n if f(rep) == \"1\":\n for j in range(n):\n if rep[j] == \"0\":\n oracle.x(controls[j])\n oracle.mct(controls, target[0], None, mode='noancilla')\n for j in range(n):\n if rep[j] == \"0\":\n oracle.x(controls[j])\n # oracle.barrier()\n return oracle\n\ndef make_circuit(n:int,f) -> QuantumCircuit:\n # circuit begin\n input_qubit = QuantumRegister(n,\"qc\")\n classical = ClassicalRegister(n, \"qm\")\n prog = QuantumCircuit(input_qubit, classical)\n prog.cx(input_qubit[0],input_qubit[3]) # number=13\n prog.cx(input_qubit[0],input_qubit[3]) # number=17\n prog.x(input_qubit[3]) # number=18\n prog.cx(input_qubit[0],input_qubit[3]) # number=19\n prog.cx(input_qubit[0],input_qubit[3]) # number=15\n prog.h(input_qubit[1]) # number=2\n prog.cx(input_qubit[2],input_qubit[1]) # number=27\n prog.h(input_qubit[2]) # number=3\n prog.h(input_qubit[3]) # number=4\n prog.y(input_qubit[3]) # number=12\n prog.h(input_qubit[0]) # number=5\n\n oracle = build_oracle(n-1, f)\n prog.append(oracle.to_gate(),[input_qubit[i] for i in range(n-1)]+[input_qubit[n-1]])\n prog.h(input_qubit[1]) # number=6\n prog.h(input_qubit[2]) # number=7\n prog.h(input_qubit[0]) # number=24\n prog.cz(input_qubit[3],input_qubit[0]) # number=25\n prog.h(input_qubit[0]) # number=26\n prog.h(input_qubit[0]) # number=31\n prog.cz(input_qubit[3],input_qubit[0]) # number=32\n prog.h(input_qubit[0]) # number=33\n prog.z(input_qubit[3]) # number=29\n prog.cx(input_qubit[3],input_qubit[0]) # number=30\n prog.x(input_qubit[2]) # number=23\n prog.cx(input_qubit[3],input_qubit[0]) # number=22\n prog.h(input_qubit[3]) # number=8\n prog.h(input_qubit[0]) # number=9\n\n prog.y(input_qubit[2]) # number=10\n prog.y(input_qubit[2]) # number=11\n # circuit end\n\n\n\n return prog\n\n\n\nif __name__ == '__main__':\n a = \"111\"\n b = \"0\"\n f = lambda rep: bitwise_xor(bitwise_dot(a, rep), b)\n prog = make_circuit(4,f)\n backend = BasicAer.get_backend('statevector_simulator')\n sample_shot =8000\n\n info = execute(prog, backend=backend).result().get_statevector()\n qubits = round(log2(len(info)))\n info = {\n np.binary_repr(i, qubits): round((info[i]*(info[i].conjugate())).real,3)\n for i in range(2 ** qubits)\n }\n backend = FakeVigo()\n circuit1 = transpile(prog,backend,optimization_level=2)\n\n writefile = open(\"../data/startQiskit_Class2002.csv\",\"w\")\n print(info,file=writefile)\n print(\"results end\", file=writefile)\n print(circuit1.__len__(),file=writefile)\n print(circuit1,file=writefile)\n writefile.close()\n",
"# qubit number=2\n# total number=10\nimport cirq\nimport qiskit\n\nfrom qiskit import IBMQ\nfrom qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister\nfrom qiskit import BasicAer, execute, transpile\nfrom pprint import pprint\nfrom qiskit.test.mock import FakeVigo\nfrom math import log2,floor, sqrt, pi\nimport numpy as np\nimport networkx as nx\n\ndef build_oracle(n: int, f) -> QuantumCircuit:\n # implement the oracle O_f^\\pm\n # NOTE: use U1 gate (P gate) with \\lambda = 180 ==> CZ gate\n # or multi_control_Z_gate (issue #127)\n\n controls = QuantumRegister(n, \"ofc\")\n target = QuantumRegister(1, \"oft\")\n oracle = QuantumCircuit(controls, target, name=\"Of\")\n for i in range(2 ** n):\n rep = np.binary_repr(i, n)\n if f(rep) == \"1\":\n for j in range(n):\n if rep[j] == \"0\":\n oracle.x(controls[j])\n oracle.mct(controls, target[0], None, mode='noancilla')\n for j in range(n):\n if rep[j] == \"0\":\n oracle.x(controls[j])\n # oracle.barrier()\n # oracle.draw('mpl', filename='circuit/deutsch-oracle.png')\n return oracle\n\n\ndef make_circuit(n:int,f) -> QuantumCircuit:\n # circuit begin\n\n input_qubit = QuantumRegister(n, \"qc\")\n target = QuantumRegister(1, \"qt\")\n prog = QuantumCircuit(input_qubit, target)\n\n # inverse last one (can be omitted if using O_f^\\pm)\n prog.x(target)\n\n # apply H to get superposition\n for i in range(n):\n prog.h(input_qubit[i])\n\n prog.h(input_qubit[1]) # number=1\n prog.cx(input_qubit[0],input_qubit[1]) # number=2\n prog.cx(input_qubit[0],input_qubit[1]) # number=5\n prog.h(target)\n prog.barrier()\n\n # apply oracle O_f\n oracle = build_oracle(n, f)\n prog.append(\n oracle.to_gate(),\n [input_qubit[i] for i in range(n)] + [target])\n\n # apply H back (QFT on Z_2^n)\n for i in range(n):\n prog.h(input_qubit[i])\n prog.barrier()\n\n # measure\n\n prog.x(input_qubit[0]) # number=3\n prog.y(input_qubit[1]) # number=6\n prog.cx(input_qubit[1],input_qubit[0]) # number=7\n prog.x(input_qubit[0]) # number=8\n prog.cx(input_qubit[1],input_qubit[0]) # number=9\n # circuit end\n return prog\n\n\n\n\nif __name__ == '__main__':\n n = 2\n f = lambda rep: rep[-1]\n # f = lambda rep: \"1\" if rep[0:2] == \"01\" or rep[0:2] == \"10\" else \"0\"\n # f = lambda rep: \"0\"\n prog = make_circuit(n, f)\n sample_shot =2800\n backend = BasicAer.get_backend('statevector_simulator')\n\n circuit1 = transpile(prog,FakeVigo())\n circuit1.x(qubit=3)\n circuit1.x(qubit=3)\n prog = circuit1\n\n\n info = execute(prog, backend=backend).result().get_statevector()\n qubits = round(log2(len(info)))\n info = {\n np.binary_repr(i, qubits): round((info[i]*(info[i].conjugate())).real,3)\n for i in range(2 ** qubits)\n }\n\n writefile = open(\"../data/startQiskit_Class145.csv\",\"w\")\n print(info,file=writefile)\n print(\"results end\", file=writefile)\n print(circuit1.depth(),file=writefile)\n print(circuit1,file=writefile)\n writefile.close()\n",
"# qubit number=3\n# total number=31\n\nimport numpy as np\n\nfrom qiskit import QuantumCircuit, execute, Aer, QuantumRegister, ClassicalRegister, transpile, BasicAer, IBMQ\nfrom qiskit.visualization import plot_histogram\nfrom typing import *\nfrom pprint import pprint\nfrom math import log2\nfrom collections import Counter\nfrom qiskit.test.mock import FakeVigo, FakeYorktown\n\nkernel = 'circuit/bernstein'\n\n\ndef bitwise_xor(s: str, t: str) -> str:\n length = len(s)\n res = []\n for i in range(length):\n res.append(str(int(s[i]) ^ int(t[i])))\n return ''.join(res[::-1])\n\n\ndef bitwise_dot(s: str, t: str) -> str:\n length = len(s)\n res = 0\n for i in range(length):\n res += int(s[i]) * int(t[i])\n return str(res % 2)\n\n\ndef build_oracle(n: int, f: Callable[[str], str]) -> QuantumCircuit:\n # implement the oracle O_f\n # NOTE: use multi_control_toffoli_gate ('noancilla' mode)\n # https://qiskit.org/documentation/_modules/qiskit/aqua/circuits/gates/multi_control_toffoli_gate.html\n # https://quantumcomputing.stackexchange.com/questions/3943/how-do-you-implement-the-toffoli-gate-using-only-single-qubit-and-cnot-gates\n # https://quantumcomputing.stackexchange.com/questions/2177/how-can-i-implement-an-n-bit-toffoli-gate\n controls = QuantumRegister(n, \"ofc\")\n target = QuantumRegister(1, \"oft\")\n oracle = QuantumCircuit(controls, target, name=\"Of\")\n for i in range(2 ** n):\n rep = np.binary_repr(i, n)\n if f(rep) == \"1\":\n for j in range(n):\n if rep[j] == \"0\":\n oracle.x(controls[j])\n oracle.mct(controls, target[0], None, mode='noancilla')\n for j in range(n):\n if rep[j] == \"0\":\n oracle.x(controls[j])\n # oracle.barrier()\n # oracle.draw('mpl', filename=(kernel + '-oracle.png'))\n return oracle\n\n\ndef build_circuit(n: int, f: Callable[[str], str]) -> QuantumCircuit:\n # implement the Bernstein-Vazirani circuit\n zero = np.binary_repr(0, n)\n b = f(zero)\n\n # initial n + 1 bits\n input_qubit = QuantumRegister(n+1, \"qc\")\n classicals = ClassicalRegister(n, \"qm\")\n prog = QuantumCircuit(input_qubit, classicals)\n\n # inverse last one (can be omitted if using O_f^\\pm)\n prog.x(input_qubit[n])\n # circuit begin\n prog.h(input_qubit[1]) # number=1\n prog.rx(-0.09738937226128368,input_qubit[2]) # number=2\n prog.cx(input_qubit[2],input_qubit[1]) # number=27\n prog.h(input_qubit[1]) # number=3\n\n # apply H to get superposition\n for i in range(n):\n prog.h(input_qubit[i])\n prog.h(input_qubit[n])\n prog.barrier()\n\n # apply oracle O_f\n oracle = build_oracle(n, f)\n prog.append(\n oracle.to_gate(),\n [input_qubit[i] for i in range(n)] + [input_qubit[n]])\n\n # apply H back (QFT on Z_2^n)\n for i in range(n):\n prog.h(input_qubit[i])\n prog.barrier()\n\n # measure\n\n return prog\n\n\ndef get_statevector(prog: QuantumCircuit) -> Any:\n state_backend = Aer.get_backend('statevector_simulator')\n statevec = execute(prog, state_backend).result()\n quantum_state = statevec.get_statevector()\n qubits = round(log2(len(quantum_state)))\n quantum_state = {\n \"|\" + np.binary_repr(i, qubits) + \">\": quantum_state[i]\n for i in range(2 ** qubits)\n }\n return quantum_state\n\n\ndef evaluate(backend_str: str, prog: QuantumCircuit, shots: int, b: str) -> Any:\n # Q: which backend should we use?\n\n # get state vector\n quantum_state = get_statevector(prog)\n\n # get simulate results\n\n # provider = IBMQ.load_account()\n # backend = provider.get_backend(backend_str)\n # qobj = compile(prog, backend, shots)\n # job = backend.run(qobj)\n # job.result()\n backend = Aer.get_backend(backend_str)\n # transpile/schedule -> assemble -> backend.run\n results = execute(prog, backend, shots=shots).result()\n counts = results.get_counts()\n a = Counter(counts).most_common(1)[0][0][::-1]\n\n return {\n \"measurements\": counts,\n # \"state\": statevec,\n \"quantum_state\": quantum_state,\n \"a\": a,\n \"b\": b\n }\n\n\ndef bernstein_test_1(rep: str):\n \"\"\"011 . x + 1\"\"\"\n a = \"011\"\n b = \"1\"\n return bitwise_xor(bitwise_dot(a, rep), b)\n\n\ndef bernstein_test_2(rep: str):\n \"\"\"000 . x + 0\"\"\"\n a = \"000\"\n b = \"0\"\n return bitwise_xor(bitwise_dot(a, rep), b)\n\n\ndef bernstein_test_3(rep: str):\n \"\"\"111 . x + 1\"\"\"\n a = \"111\"\n b = \"1\"\n return bitwise_xor(bitwise_dot(a, rep), b)\n\n\nif __name__ == \"__main__\":\n n = 2\n a = \"11\"\n b = \"1\"\n f = lambda rep: \\\n bitwise_xor(bitwise_dot(a, rep), b)\n prog = build_circuit(n, f)\n sample_shot =4000\n writefile = open(\"../data/startQiskit_Class179.csv\", \"w\")\n # prog.draw('mpl', filename=(kernel + '.png'))\n backend = BasicAer.get_backend('statevector_simulator')\n\n circuit1 = transpile(prog, FakeYorktown())\n circuit1.h(qubit=2)\n circuit1.x(qubit=3)\n\n info = execute(circuit1,backend=backend, shots=sample_shot).result().get_counts()\n\n print(info, file=writefile)\n print(\"results end\", file=writefile)\n print(circuit1.depth(), file=writefile)\n print(circuit1, file=writefile)\n writefile.close()\n",
"# qubit number=5\n# total number=69\nimport cirq\nimport qiskit\nfrom qiskit.providers.aer import QasmSimulator\nfrom qiskit.test.mock import FakeVigo\n\nfrom qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister\nfrom qiskit import BasicAer, execute, transpile\nfrom pprint import pprint\nfrom qiskit.test.mock import FakeVigo\nfrom math import log2,floor, sqrt, pi\nimport numpy as np\nimport networkx as nx\n\ndef build_oracle(n: int, f) -> QuantumCircuit:\n # implement the oracle O_f^\\pm\n # NOTE: use U1 gate (P gate) with \\lambda = 180 ==> CZ gate\n # or multi_control_Z_gate (issue #127)\n\n controls = QuantumRegister(n, \"ofc\")\n oracle = QuantumCircuit(controls, name=\"Zf\")\n\n for i in range(2 ** n):\n rep = np.binary_repr(i, n)\n if f(rep) == \"1\":\n for j in range(n):\n if rep[j] == \"0\":\n oracle.x(controls[j])\n\n # oracle.h(controls[n])\n if n >= 2:\n oracle.mcu1(pi, controls[1:], controls[0])\n\n for j in range(n):\n if rep[j] == \"0\":\n oracle.x(controls[j])\n # oracle.barrier()\n\n return oracle\n\n\ndef make_circuit(n:int,f) -> QuantumCircuit:\n # circuit begin\n input_qubit = QuantumRegister(n,\"qc\")\n classical = ClassicalRegister(n, \"qm\")\n prog = QuantumCircuit(input_qubit, classical)\n prog.h(input_qubit[0]) # number=3\n prog.h(input_qubit[1]) # number=4\n prog.h(input_qubit[0]) # number=57\n prog.cz(input_qubit[4],input_qubit[0]) # number=58\n prog.h(input_qubit[0]) # number=59\n prog.z(input_qubit[4]) # number=55\n prog.cx(input_qubit[4],input_qubit[0]) # number=56\n prog.h(input_qubit[2]) # number=50\n prog.cz(input_qubit[4],input_qubit[2]) # number=51\n prog.h(input_qubit[2]) # number=52\n prog.h(input_qubit[2]) # number=5\n prog.h(input_qubit[3]) # number=6\n prog.h(input_qubit[4]) # number=21\n\n Zf = build_oracle(n, f)\n\n repeat = floor(sqrt(2 ** n) * pi / 4)\n for i in range(repeat):\n prog.append(Zf.to_gate(), [input_qubit[i] for i in range(n)])\n prog.h(input_qubit[0]) # number=1\n prog.h(input_qubit[1]) # number=2\n prog.h(input_qubit[2]) # number=7\n prog.h(input_qubit[3]) # number=8\n\n\n prog.h(input_qubit[0]) # number=28\n prog.h(input_qubit[0]) # number=66\n prog.cz(input_qubit[3],input_qubit[0]) # number=67\n prog.h(input_qubit[0]) # number=68\n prog.z(input_qubit[3]) # number=61\n prog.cx(input_qubit[3],input_qubit[0]) # number=62\n prog.cz(input_qubit[1],input_qubit[0]) # number=29\n prog.h(input_qubit[0]) # number=30\n prog.h(input_qubit[0]) # number=43\n prog.cz(input_qubit[1],input_qubit[0]) # number=44\n prog.h(input_qubit[0]) # number=45\n prog.cx(input_qubit[1],input_qubit[0]) # number=35\n prog.cx(input_qubit[1],input_qubit[0]) # number=38\n prog.x(input_qubit[0]) # number=39\n prog.cx(input_qubit[1],input_qubit[0]) # number=40\n prog.cx(input_qubit[1],input_qubit[0]) # number=37\n prog.h(input_qubit[0]) # number=46\n prog.cz(input_qubit[1],input_qubit[0]) # number=47\n prog.h(input_qubit[0]) # number=48\n prog.h(input_qubit[0]) # number=63\n prog.cz(input_qubit[1],input_qubit[0]) # number=64\n prog.h(input_qubit[0]) # number=65\n prog.x(input_qubit[1]) # number=10\n prog.x(input_qubit[2]) # number=11\n prog.x(input_qubit[3]) # number=12\n\n if n>=2:\n prog.mcu1(pi,input_qubit[1:],input_qubit[0])\n\n prog.x(input_qubit[0]) # number=13\n prog.cx(input_qubit[0],input_qubit[1]) # number=22\n prog.y(input_qubit[2]) # number=41\n prog.x(input_qubit[1]) # number=23\n prog.cx(input_qubit[0],input_qubit[1]) # number=24\n prog.rx(1.0398671683382215,input_qubit[2]) # number=31\n prog.x(input_qubit[2]) # number=15\n prog.x(input_qubit[3]) # number=16\n\n\n prog.h(input_qubit[0]) # number=17\n prog.h(input_qubit[1]) # number=18\n prog.h(input_qubit[2]) # number=19\n prog.h(input_qubit[3]) # number=20\n\n\n # circuit end\n\n for i in range(n):\n prog.measure(input_qubit[i], classical[i])\n\n\n return prog\n\n\n\n\nif __name__ == '__main__':\n key = \"00000\"\n f = lambda rep: str(int(rep == key))\n prog = make_circuit(5,f)\n backend = FakeVigo()\n sample_shot =7924\n\n info = execute(prog, backend=backend, shots=sample_shot).result().get_counts()\n backend = FakeVigo()\n circuit1 = transpile(prog,backend,optimization_level=2)\n\n writefile = open(\"../data/startQiskit_noisy1883.csv\",\"w\")\n print(info,file=writefile)\n print(\"results end\", file=writefile)\n print(circuit1.depth(),file=writefile)\n print(circuit1,file=writefile)\n writefile.close()\n",
"# qubit number=2\n# total number=9\nimport cirq\nimport qiskit\n\nfrom qiskit import IBMQ\nfrom qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister\nfrom qiskit import BasicAer, execute, transpile\nfrom pprint import pprint\nfrom qiskit.test.mock import FakeVigo\nfrom math import log2,floor, sqrt, pi\nimport numpy as np\nimport networkx as nx\n\ndef build_oracle(n: int, f) -> QuantumCircuit:\n # implement the oracle O_f^\\pm\n # NOTE: use U1 gate (P gate) with \\lambda = 180 ==> CZ gate\n # or multi_control_Z_gate (issue #127)\n\n controls = QuantumRegister(n, \"ofc\")\n target = QuantumRegister(1, \"oft\")\n oracle = QuantumCircuit(controls, target, name=\"Of\")\n for i in range(2 ** n):\n rep = np.binary_repr(i, n)\n if f(rep) == \"1\":\n for j in range(n):\n if rep[j] == \"0\":\n oracle.x(controls[j])\n oracle.mct(controls, target[0], None, mode='noancilla')\n for j in range(n):\n if rep[j] == \"0\":\n oracle.x(controls[j])\n # oracle.barrier()\n # oracle.draw('mpl', filename='circuit/deutsch-oracle.png')\n return oracle\n\n\ndef make_circuit(n:int,f) -> QuantumCircuit:\n # circuit begin\n\n input_qubit = QuantumRegister(n, \"qc\")\n target = QuantumRegister(1, \"qt\")\n prog = QuantumCircuit(input_qubit, target)\n\n # inverse last one (can be omitted if using O_f^\\pm)\n prog.x(target)\n\n # apply H to get superposition\n for i in range(n):\n prog.h(input_qubit[i])\n\n prog.h(input_qubit[1]) # number=1\n prog.x(input_qubit[1]) # number=5\n prog.cx(input_qubit[0],input_qubit[1]) # number=4\n prog.h(target)\n prog.barrier()\n\n # apply oracle O_f\n oracle = build_oracle(n, f)\n prog.append(\n oracle.to_gate(),\n [input_qubit[i] for i in range(n)] + [target])\n\n # apply H back (QFT on Z_2^n)\n for i in range(n):\n prog.h(input_qubit[i])\n prog.barrier()\n\n # measure\n #for i in range(n):\n # prog.measure(input_qubit[i], classicals[i])\n\n prog.cx(input_qubit[0],input_qubit[1]) # number=6\n prog.x(input_qubit[1]) # number=7\n prog.cx(input_qubit[0],input_qubit[1]) # number=8\n prog.x(input_qubit[1]) # number=3\n # circuit end\n return prog\n\n\n\n\nif __name__ == '__main__':\n n = 2\n f = lambda rep: rep[-1]\n # f = lambda rep: \"1\" if rep[0:2] == \"01\" or rep[0:2] == \"10\" else \"0\"\n # f = lambda rep: \"0\"\n prog = make_circuit(n, f)\n sample_shot =2800\n IBMQ.load_account() \n provider = IBMQ.get_provider(hub='ibm-q') \n provider.backends()\n backend = provider.get_backend(\"ibmq_belem\")\n\n circuit1 = transpile(prog,FakeVigo())\n circuit1.x(qubit=3)\n circuit1.x(qubit=3)\n circuit1.measure_all()\n prog = circuit1\n\n\n info = execute(prog, backend=backend, shots=sample_shot).result().get_counts()\n\n writefile = open(\"../data/startQiskit_QC79.csv\",\"w\")\n print(info,file=writefile)\n print(\"results end\", file=writefile)\n print(circuit1.depth(),file=writefile)\n print(circuit1,file=writefile)\n writefile.close()\n",
"# qubit number=2\n# total number=7\nimport cirq\nimport qiskit\n\nfrom qiskit import IBMQ\nfrom qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister\nfrom qiskit import BasicAer, execute, transpile\nfrom pprint import pprint\nfrom qiskit.test.mock import FakeVigo\nfrom math import log2,floor, sqrt, pi\nimport numpy as np\nimport networkx as nx\n\ndef build_oracle(n: int, f) -> QuantumCircuit:\n # implement the oracle O_f^\\pm\n # NOTE: use U1 gate (P gate) with \\lambda = 180 ==> CZ gate\n # or multi_control_Z_gate (issue #127)\n\n controls = QuantumRegister(n, \"ofc\")\n target = QuantumRegister(1, \"oft\")\n oracle = QuantumCircuit(controls, target, name=\"Of\")\n for i in range(2 ** n):\n rep = np.binary_repr(i, n)\n if f(rep) == \"1\":\n for j in range(n):\n if rep[j] == \"0\":\n oracle.x(controls[j])\n oracle.mct(controls, target[0], None, mode='noancilla')\n for j in range(n):\n if rep[j] == \"0\":\n oracle.x(controls[j])\n # oracle.barrier()\n # oracle.draw('mpl', filename='circuit/deutsch-oracle.png')\n return oracle\n\n\ndef make_circuit(n:int,f) -> QuantumCircuit:\n # circuit begin\n\n input_qubit = QuantumRegister(n, \"qc\")\n target = QuantumRegister(1, \"qt\")\n prog = QuantumCircuit(input_qubit, target)\n\n # inverse last one (can be omitted if using O_f^\\pm)\n prog.x(target)\n\n # apply H to get superposition\n for i in range(n):\n prog.h(input_qubit[i])\n\n prog.h(input_qubit[1]) # number=1\n prog.h(target)\n prog.barrier()\n\n # apply oracle O_f\n oracle = build_oracle(n, f)\n prog.append(\n oracle.to_gate(),\n [input_qubit[i] for i in range(n)] + [target])\n\n # apply H back (QFT on Z_2^n)\n for i in range(n):\n prog.h(input_qubit[i])\n prog.barrier()\n\n # measure\n #for i in range(n):\n # prog.measure(input_qubit[i], classicals[i])\n\n prog.y(input_qubit[1]) # number=2\n prog.y(input_qubit[1]) # number=4\n prog.y(input_qubit[1]) # number=3\n prog.swap(input_qubit[1],input_qubit[0]) # number=5\n prog.swap(input_qubit[1],input_qubit[0]) # number=6\n # circuit end\n return prog\n\n\n\n\nif __name__ == '__main__':\n n = 2\n f = lambda rep: rep[-1]\n # f = lambda rep: \"1\" if rep[0:2] == \"01\" or rep[0:2] == \"10\" else \"0\"\n # f = lambda rep: \"0\"\n prog = make_circuit(n, f)\n sample_shot =2800\n backend = BasicAer.get_backend('qasm_simulator')\n\n circuit1 = transpile(prog,FakeVigo())\n circuit1.x(qubit=3)\n circuit1.x(qubit=3)\n circuit1.measure_all()\n prog = circuit1\n\n\n info = execute(prog, backend=backend, shots=sample_shot).result().get_counts()\n\n writefile = open(\"../data/startQiskit78.csv\",\"w\")\n print(info,file=writefile)\n print(\"results end\", file=writefile)\n print(circuit1.depth(),file=writefile)\n print(circuit1,file=writefile)\n writefile.close()\n",
"# qubit number=2\n# total number=9\nimport cirq\nimport qiskit\nfrom qiskit.providers.aer import QasmSimulator\nfrom qiskit.test.mock import FakeVigo\n\nfrom qiskit import IBMQ\nfrom qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister\nfrom qiskit import BasicAer, execute, transpile\nfrom pprint import pprint\nfrom qiskit.test.mock import FakeVigo\nfrom math import log2,floor, sqrt, pi\nimport numpy as np\nimport networkx as nx\n\ndef build_oracle(n: int, f) -> QuantumCircuit:\n # implement the oracle O_f^\\pm\n # NOTE: use U1 gate (P gate) with \\lambda = 180 ==> CZ gate\n # or multi_control_Z_gate (issue #127)\n\n controls = QuantumRegister(n, \"ofc\")\n target = QuantumRegister(1, \"oft\")\n oracle = QuantumCircuit(controls, target, name=\"Of\")\n for i in range(2 ** n):\n rep = np.binary_repr(i, n)\n if f(rep) == \"1\":\n for j in range(n):\n if rep[j] == \"0\":\n oracle.x(controls[j])\n oracle.mct(controls, target[0], None, mode='noancilla')\n for j in range(n):\n if rep[j] == \"0\":\n oracle.x(controls[j])\n # oracle.barrier()\n # oracle.draw('mpl', filename='circuit/deutsch-oracle.png')\n return oracle\n\n\ndef make_circuit(n:int,f) -> QuantumCircuit:\n # circuit begin\n\n input_qubit = QuantumRegister(n, \"qc\")\n target = QuantumRegister(1, \"qt\")\n prog = QuantumCircuit(input_qubit, target)\n\n # inverse last one (can be omitted if using O_f^\\pm)\n prog.x(target)\n\n # apply H to get superposition\n for i in range(n):\n prog.h(input_qubit[i])\n\n prog.h(input_qubit[1]) # number=1\n prog.h(input_qubit[1]) # number=4\n prog.h(target)\n prog.barrier()\n\n # apply oracle O_f\n oracle = build_oracle(n, f)\n prog.append(\n oracle.to_gate(),\n [input_qubit[i] for i in range(n)] + [target])\n\n # apply H back (QFT on Z_2^n)\n for i in range(n):\n prog.h(input_qubit[i])\n prog.barrier()\n\n # measure\n #for i in range(n):\n # prog.measure(input_qubit[i], classicals[i])\n\n prog.x(input_qubit[1]) # number=2\n prog.x(input_qubit[1]) # number=3\n prog.cx(input_qubit[1],input_qubit[0]) # number=5\n prog.cx(input_qubit[1],input_qubit[0]) # number=6\n prog.y(input_qubit[1]) # number=7\n prog.y(input_qubit[1]) # number=8\n # circuit end\n return prog\n\n\n\n\nif __name__ == '__main__':\n n = 2\n f = lambda rep: rep[-1]\n # f = lambda rep: \"1\" if rep[0:2] == \"01\" or rep[0:2] == \"10\" else \"0\"\n # f = lambda rep: \"0\"\n prog = make_circuit(n, f)\n sample_shot =2800\n backend = FakeVigo()\n\n circuit1 = transpile(prog,FakeVigo())\n circuit1.x(qubit=3)\n circuit1.x(qubit=3)\n circuit1.measure_all()\n prog = circuit1\n\n\n info = execute(prog, backend=backend, shots=sample_shot).result().get_counts()\n\n writefile = open(\"../data/startQiskit_noisy126.csv\",\"w\")\n print(info,file=writefile)\n print(\"results end\", file=writefile)\n print(circuit1.depth(),file=writefile)\n print(circuit1,file=writefile)\n writefile.close()\n",
"# qubit number=4\n# total number=43\nimport cirq\nimport qiskit\nfrom qiskit.providers.aer import QasmSimulator\nfrom qiskit.test.mock import FakeVigo\n\nfrom qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister\nfrom qiskit import BasicAer, execute, transpile\nfrom pprint import pprint\nfrom qiskit.test.mock import FakeVigo\nfrom math import log2\nimport numpy as np\nimport networkx as nx\n\ndef bitwise_xor(s: str, t: str) -> str:\n length = len(s)\n res = []\n for i in range(length):\n res.append(str(int(s[i]) ^ int(t[i])))\n return ''.join(res[::-1])\n\n\ndef bitwise_dot(s: str, t: str) -> str:\n length = len(s)\n res = 0\n for i in range(length):\n res += int(s[i]) * int(t[i])\n return str(res % 2)\n\ndef build_oracle(n: int, f) -> QuantumCircuit:\n # implement the oracle O_f\n # NOTE: use multi_control_toffoli_gate ('noancilla' mode)\n # https://qiskit.org/documentation/_modules/qiskit/aqua/circuits/gates/multi_control_toffoli_gate.html\n # https://quantumcomputing.stackexchange.com/questions/3943/how-do-you-implement-the-toffoli-gate-using-only-single-qubit-and-cnot-gates\n # https://quantumcomputing.stackexchange.com/questions/2177/how-can-i-implement-an-n-bit-toffoli-gate\n controls = QuantumRegister(n, \"ofc\")\n target = QuantumRegister(1, \"oft\")\n oracle = QuantumCircuit(controls, target, name=\"Of\")\n for i in range(2 ** n):\n rep = np.binary_repr(i, n)\n if f(rep) == \"1\":\n for j in range(n):\n if rep[j] == \"0\":\n oracle.x(controls[j])\n oracle.mct(controls, target[0], None, mode='noancilla')\n for j in range(n):\n if rep[j] == \"0\":\n oracle.x(controls[j])\n # oracle.barrier()\n return oracle\n\ndef make_circuit(n:int,f) -> QuantumCircuit:\n # circuit begin\n input_qubit = QuantumRegister(n,\"qc\")\n classical = ClassicalRegister(n, \"qm\")\n prog = QuantumCircuit(input_qubit, classical)\n prog.h(input_qubit[3]) # number=16\n prog.cz(input_qubit[0],input_qubit[3]) # number=17\n prog.rx(-0.5686282702997527,input_qubit[3]) # number=32\n prog.h(input_qubit[3]) # number=18\n prog.h(input_qubit[3]) # number=26\n prog.cz(input_qubit[0],input_qubit[3]) # number=27\n prog.h(input_qubit[3]) # number=28\n prog.x(input_qubit[3]) # number=21\n prog.rx(0.4241150082346221,input_qubit[2]) # number=33\n prog.cx(input_qubit[0],input_qubit[3]) # number=22\n prog.cx(input_qubit[0],input_qubit[3]) # number=12\n prog.h(input_qubit[1]) # number=2\n prog.h(input_qubit[2]) # number=3\n prog.h(input_qubit[3]) # number=4\n prog.h(input_qubit[0]) # number=5\n\n oracle = build_oracle(n-1, f)\n prog.append(oracle.to_gate(),[input_qubit[i] for i in range(n-1)]+[input_qubit[n-1]])\n prog.h(input_qubit[1]) # number=6\n prog.h(input_qubit[2]) # number=23\n prog.cz(input_qubit[1],input_qubit[2]) # number=24\n prog.h(input_qubit[2]) # number=25\n prog.h(input_qubit[2]) # number=7\n prog.h(input_qubit[3]) # number=8\n prog.h(input_qubit[0]) # number=34\n prog.cz(input_qubit[2],input_qubit[0]) # number=35\n prog.h(input_qubit[0]) # number=36\n prog.cx(input_qubit[2],input_qubit[0]) # number=37\n prog.cx(input_qubit[2],input_qubit[0]) # number=40\n prog.z(input_qubit[2]) # number=41\n prog.cx(input_qubit[2],input_qubit[0]) # number=42\n prog.cx(input_qubit[2],input_qubit[0]) # number=39\n prog.cx(input_qubit[2],input_qubit[0]) # number=31\n prog.h(input_qubit[0]) # number=9\n\n prog.y(input_qubit[0]) # number=14\n prog.y(input_qubit[0]) # number=15\n # circuit end\n\n for i in range(n):\n prog.measure(input_qubit[i], classical[i])\n\n\n return prog\n\n\n\nif __name__ == '__main__':\n a = \"111\"\n b = \"0\"\n f = lambda rep: bitwise_xor(bitwise_dot(a, rep), b)\n prog = make_circuit(4,f)\n backend = FakeVigo()\n sample_shot =8000\n\n info = execute(prog, backend=backend, shots=sample_shot).result().get_counts()\n backend = FakeVigo()\n circuit1 = transpile(prog,backend,optimization_level=2)\n\n writefile = open(\"../data/startQiskit_noisy2692.csv\",\"w\")\n print(info,file=writefile)\n print(\"results end\", file=writefile)\n print(circuit1.__len__(),file=writefile)\n print(circuit1,file=writefile)\n writefile.close()\n"
] | [
[
"numpy.binary_repr"
],
[
"numpy.binary_repr"
],
[
"numpy.binary_repr"
],
[
"numpy.binary_repr"
],
[
"numpy.cos",
"numpy.arange",
"numpy.amax",
"numpy.sin",
"numpy.meshgrid"
],
[
"numpy.binary_repr"
],
[
"numpy.binary_repr"
],
[
"numpy.binary_repr"
],
[
"numpy.binary_repr"
],
[
"numpy.binary_repr"
],
[
"numpy.binary_repr"
],
[
"numpy.cos",
"numpy.arange",
"numpy.amax",
"numpy.sin",
"numpy.meshgrid"
],
[
"numpy.binary_repr"
],
[
"numpy.binary_repr"
],
[
"numpy.binary_repr"
],
[
"numpy.binary_repr"
],
[
"numpy.binary_repr"
],
[
"numpy.cos",
"numpy.arange",
"numpy.amax",
"numpy.sin",
"numpy.meshgrid"
],
[
"numpy.binary_repr"
],
[
"numpy.binary_repr"
],
[
"numpy.cos",
"numpy.arange",
"numpy.amax",
"numpy.sin",
"numpy.meshgrid"
],
[
"numpy.binary_repr"
],
[
"numpy.binary_repr"
],
[
"numpy.binary_repr"
],
[
"numpy.binary_repr"
],
[
"numpy.binary_repr"
],
[
"numpy.binary_repr"
],
[
"numpy.binary_repr"
],
[
"numpy.binary_repr"
],
[
"numpy.binary_repr"
],
[
"numpy.cos",
"numpy.arange",
"numpy.amax",
"numpy.sin",
"numpy.meshgrid"
],
[
"numpy.binary_repr"
],
[
"numpy.binary_repr"
],
[
"numpy.binary_repr"
],
[
"numpy.cos",
"numpy.arange",
"numpy.amax",
"numpy.sin",
"numpy.meshgrid"
],
[
"numpy.binary_repr"
],
[
"numpy.binary_repr"
],
[
"numpy.binary_repr"
],
[
"numpy.binary_repr"
],
[
"numpy.binary_repr"
],
[
"numpy.binary_repr"
],
[
"numpy.binary_repr"
],
[
"numpy.binary_repr"
],
[
"numpy.binary_repr"
]
] |
JoZimmer/ParOptBeam | [
"50d15d8d822a2718f2932807e06c4a7e02f866a3"
] | [
"test_scripts/test_element_and_compare_to_kratos.py"
] | [
"from source.element.cr_beam_element import CRBeamElement\n\nimport numpy as np\n\nnp.set_printoptions(suppress=False, precision=4, linewidth=100)\n\n\ndef test_crbeam_element_update_incremental():\n material_params = {'rho': 7850, 'e': 2069000000.0, 'nu': 0.29, 'zeta': 0.05, 'lx_i': 1.2, 'is_nonlinear': True}\n element_params = {'a': 0.0001, 'asy': 0.0, 'asz': 0.0, 'iy': 0.0001, 'iz': 0.0001, 'it': 0.0001}\n\n coords = np.array([[1.2, 0.0, 0.0], [0.0, 0.0, 0.0]])\n element = CRBeamElement(material_params, element_params, coords, 0, '3D')\n\n Kd_kratos = np.array([\n [66828.2, 0, 0, 0, 0, 0],\n [0, 172417, 0, 0, 0, 0],\n [0, 0, 172417, 0, 0, 0],\n [0, 0, 0, 172417, 0, 0],\n [0, 0, 0, 0, 517250, 0],\n [0, 0, 0, 0, 0, 517250]\n ])\n\n Kd = element.Kd_mat\n\n try:\n assert (abs(Kd_kratos - Kd) < 10).all()\n except AssertionError:\n msg = \"##################################################################################\\n\"\n msg += \"Deformation Stiffness matrix\\n\"\n msg += \"Kd in Kratos:\\n\" + str(Kd_kratos)\n msg += \"\\nIt is however:\\n\" + str(Kd)\n print(msg)\n\n Ke_mat_kratos = np.array([\n [172417, 0, 0, 0, 0, 0, -172417, 0, 0, 0, 0, 0],\n [0, 1.43681e+06, 0, 0, 0, 862083, 0, -1.43681e+06, 0, 0, 0, 862083],\n [0, 0, 1.43681e+06, 0, -862083, 0, 0, 0, -1.43681e+06, 0, -862083, 0],\n [0, 0, 0, 66828.2, 0, 0, 0, 0, 0, -66828.2, 0, 0],\n [0, 0, -862083, 0, 689667, 0, 0, 0, 862083, 0, 344833, 0],\n [0, 862083, 0, 0, 0, 689667, 0, -862083, 0, 0, 0, 344833],\n [-172417, 0, 0, 0, 0, 0, 172417, 0, 0, 0, 0, 0],\n [0, -1.43681e+06, 0, 0, 0, -862083, 0, 1.43681e+06, 0, 0, 0, -862083],\n [0, 0, -1.43681e+06, 0, 862083, 0, 0, 0, 1.43681e+06, 0, 862083, 0],\n [0, 0, 0, -66828.2, 0, 0, 0, 0, 0, 66828.2, 0, 0],\n [0, 0, -862083, 0, 344833, 0, 0, 0, 862083, 0, 689667, 0],\n [0, 862083, 0, 0, 0, 344833, 0, -862083, 0, 0, 0, 689667]\n ])\n\n Ke_mat = element.Ke_mat\n\n try:\n assert (abs(Ke_mat_kratos - Ke_mat) < 10).all()\n except AssertionError:\n msg = \"##################################################################################\\n\"\n msg += \"Material Stiffness matrix\\n\"\n msg += \"Ke_mat in Kratos:\\n\" + str(Ke_mat_kratos)\n msg += \"\\nIt is however:\\n\" + str(Ke_mat)\n print(msg)\n\n Phiz = 0.0\n Phiy = 0.0\n\n CTy = (element.rho * element.A * element.L) / ((1 + Phiy) * (1 + Phiy))\n CTz = (element.rho * element.A * element.L) / ((1 + Phiz) * (1 + Phiz))\n\n CRy = (element.rho * element.Iy) / ((1 + Phiy) * (1 + Phiy) * element.L)\n CRz = (element.rho * element.Iz) / ((1 + Phiz) * (1 + Phiz) * element.L)\n\n bending_mass_matrix_z = element.build_single_mass_matrix(Phiz, CTz, CRz, element.L, +1)\n\n bending_mass_matrix_kratos_z = np.array([\n [1.13489, 0.137711, -0.663886, 0.0435114],\n [0.137711, 0.138519, -0.0435114, -0.0410891],\n [-0.663886, -0.0435114, 1.13489, -0.137711],\n [0.0435114, -0.0410891, -0.137711, 0.138519]\n ])\n\n try:\n assert (abs(bending_mass_matrix_z - bending_mass_matrix_kratos_z) < 1e-4).all()\n print(\"Bending mass_matrix z is correct\")\n except AssertionError:\n msg = \"##################################################################################\\n\"\n msg += \"Bending mass matrix z\\n\"\n msg += \"Me in Kratos:\\n\" + str(bending_mass_matrix_kratos_z)\n msg += \"\\nIt is however:\\n\" + str(bending_mass_matrix_z)\n print(msg)\n\n bending_mass_matrix_y = element.build_single_mass_matrix(Phiz, CTy, CRy, element.L, -1)\n\n bending_mass_matrix_kratos_y = np.array([\n [1.13489, -0.137711, -0.663886, -0.0435114],\n [-0.137711, 0.138519, 0.0435114, -0.0410891],\n [-0.663886, 0.0435114, 1.13489, 0.137711],\n [-0.0435114, -0.0410891, 0.137711, 0.138519]\n ])\n\n try:\n assert (abs(bending_mass_matrix_y - bending_mass_matrix_kratos_y) < 1e-4).all()\n print(\"Bending mass_matrix y is correct\")\n except AssertionError:\n msg = \"##################################################################################\\n\"\n msg += \"Bending mass matrix y\\n\"\n msg += \"Me in Kratos:\\n\" + str(bending_mass_matrix_kratos_y)\n msg += \"\\nIt is however:\\n\" + str(bending_mass_matrix_y)\n print(msg)\n\n Me = element._get_consistent_mass_matrix()\n\n Me_kratos = np.array([\n [0.314, 0, 0, 0, 0, 0, 0.157, 0, 0, 0, 0, 0],\n [0, 1.13489, 0, 0, 0, 0.137711, 0, -0.663886, 0, 0, 0, 0.0435114],\n [0, 0, 1.13489, 0, -0.137711, 0, 0, 0, -0.663886, 0, -0.0435114, 0],\n [0, 0, 0, 0.628, 0, 0, 0, 0, 0, 0.314, 0, 0],\n [0, 0, -0.137711, 0, 0.138519, 0, 0, 0, 0.0435114, 0, -0.0410891, 0],\n [0, 0.137711, 0, 0, 0, 0.138519, 0, -0.0435114, 0, 0, 0, -0.0410891],\n [0.157, 0, 0, 0, 0, 0, 0.314, 0, 0, 0, 0, 0],\n [0, -0.663886, 0, 0, 0, -0.0435114, 0, 1.13489, 0, 0, 0, -0.137711],\n [0, 0, -0.663886, 0, 0.0435114, 0, 0, 0, 1.13489, 0, 0.137711, 0],\n [0, 0, 0, 0.314, 0, 0, 0, 0, 0, 0.628, 0, 0],\n [0, 0, -0.0435114, 0, -0.0410891, 0, 0, 0, 0.137711, 0, 0.138519, 0],\n [0, 0.0435114, 0, 0, 0, -0.0410891, 0, -0.137711, 0, 0, 0, 0.138519]\n ])\n\n try:\n assert (abs(Me - Me_kratos) < 1e-2).all()\n print(\"Mass matrix is correct\")\n except AssertionError:\n msg = \"##################################################################################\\n\"\n msg += \"Consistent mass matrix\\n\"\n msg += \"Me in Kratos:\\n\" + str(Me_kratos)\n msg += \"\\nIt is however:\\n\" + str(Me)\n print(msg)\n"
] | [
[
"numpy.array",
"numpy.set_printoptions"
]
] |
jab0707/UncertainSCI | [
"569c978c4f67dd7bb37e730276f2a376b8639235"
] | [
"tests/test_laguerre_inv.py"
] | [
"import unittest\n\nimport numpy as np\n\nfrom UncertainSCI.families import LaguerrePolynomials\n\n\nclass IDistTestCase(unittest.TestCase):\n \"\"\"\n Tests for (Laguerre polynomial) inversed induced distributions.\n \"\"\"\n\n def test_idistinv_laguerre(self):\n \"\"\"Evaluation of Laguerre inversed induced distribution function.\"\"\"\n\n # Randomly generate x, use idist to generate u\n rho = 11*np.random.random() - 1\n L = LaguerrePolynomials(rho=rho)\n\n n = int(np.ceil(10*np.random.rand(1))[0])\n M = 25\n x1 = 4*(n+1)*np.random.rand(M)\n u = L.idist(x1, n)\n\n # see if idistinv givens x back\n x2 = L.idistinv(u, n)\n\n delta = 5e-3\n ind = np.where(np.abs(x1-x2) > delta)[:2][0]\n if ind.size > 0:\n errstr = 'Failed for rho={0:1.3f}, n={1:d}'.format(rho, n)\n else:\n errstr = ''\n\n self.assertAlmostEqual(np.linalg.norm(x1-x2, ord=np.inf), 0., delta=delta, msg=errstr)\n\n\nif __name__ == \"__main__\":\n\n unittest.main(verbosity=2)\n"
] | [
[
"numpy.random.random",
"numpy.linalg.norm",
"numpy.random.rand",
"numpy.abs"
]
] |
alnah005/aggregation-for-caesar | [
"d6bca0a1126e0397315d5773401c71075c33ee2f"
] | [
"panoptes_aggregation/running_reducers/tess_user_reducer.py"
] | [
"'''\nTESS User Reducer\n-----------------\nThis module porvides functions to calculate uesr weights for the TESS project.\nExtracts are from Ceasars `PluckFieldExtractor`.\n'''\nfrom .running_reducer_wrapper import running_reducer_wrapper\nimport numpy as np\n\n\n@running_reducer_wrapper(relevant_reduction=True)\ndef tess_user_reducer(data, **kwargs):\n '''Calculate TESS user weights\n\n Parameters\n ----------\n data : list\n A list with one item containing the extract with the user's feedback on a\n gold standard subject\n store : keyword, dict\n A dictinary with two keys:\n\n * `seed`: sum of all previous `seed` values\n * `count`: sum of all previous gold standard transits seen\n relevant_reduction : keyword, list\n A list with one item containing the results of the current subject's stats reducer.\n This item is a dictinary with two keys:\n\n * `True`: number of users who correctly identified the gold standard transits in the subject\n * `False`: number of users who incorrectly identified the gold standard transits in the subject\n\n Returns\n -------\n reduction : dict\n A dictinary with two keys:\n\n * `data`: A dictionary with the `skill` value as the only item\n * `store`: The updated store for the user\n '''\n success = [d['success'] for d in data[0]['feedback']]\n store = kwargs.pop('store')\n relevant_reduction = kwargs.pop('relevant_reduction')[0]\n try:\n d_subject = relevant_reduction['data']['difficulty']\n except:\n d_subject = 0\n\n seed_current = (np.where(success, 2, -1) * d_subject).sum()\n seed = store.get('seed', 0) + seed_current\n count = store.get('count', 0) + len(success)\n store = {\n 'seed': seed,\n 'count': count\n }\n c0 = 1\n skill = c0 * pow((1.0 + np.log10(count)), (seed / count))\n skill = min([3.0, max([0.05, skill])])\n return {\n 'skill': skill,\n '_store': store\n }\n"
] | [
[
"numpy.where",
"numpy.log10"
]
] |
StarGazer1995/OpenPCDet | [
"4af33e8badb0c8e68c7c94c71b0ec5667aad2348"
] | [
"pcdet/models/backbones_3d/spconv_unet.py"
] | [
"import torch\nimport torch.nn as nn\nimport spconv\nfrom functools import partial\nfrom .spconv_backbone import post_act_block\nfrom ...utils import common_utils\n\n\nclass SparseBasicBlock(spconv.SparseModule):\n expansion = 1\n\n def __init__(self, inplanes, planes, stride=1, downsample=None, indice_key=None, norm_fn=None):\n super(SparseBasicBlock, self).__init__()\n self.conv1 = spconv.SubMConv3d(\n inplanes, planes, kernel_size=3, stride=stride, padding=1, bias=False, indice_key=indice_key\n )\n self.bn1 = norm_fn(planes)\n self.relu = nn.ReLU()\n self.conv2 = spconv.SubMConv3d(\n planes, planes, kernel_size=3, stride=1, padding=1, bias=False, indice_key=indice_key\n )\n self.bn2 = norm_fn(planes)\n self.downsample = downsample\n self.stride = stride\n\n def forward(self, x):\n identity = x.features\n\n assert x.features.dim() == 2, 'x.features.dim()=%d' % x.features.dim()\n\n out = self.conv1(x)\n out.features = self.bn1(out.features)\n out.features = self.relu(out.features)\n\n out = self.conv2(out)\n out.features = self.bn2(out.features)\n\n if self.downsample is not None:\n identity = self.downsample(x)\n\n out.features += identity\n out.features = self.relu(out.features)\n\n return out\n\n\nclass UNetV2(nn.Module):\n \"\"\"\n Sparse Convolution based UNet for point-wise feature learning.\n Reference Paper: https://arxiv.org/abs/1907.03670 (Shaoshuai Shi, et. al)\n From Points to Parts: 3D Object Detection from Point Cloud with Part-aware and Part-aggregation Network\n \"\"\"\n def __init__(self, model_cfg, input_channels, grid_size, voxel_size, point_cloud_range, **kwargs):\n super().__init__()\n self.model_cfg = model_cfg\n self.sparse_shape = grid_size[::-1] + [1, 0, 0]\n self.voxel_size = voxel_size\n self.point_cloud_range = point_cloud_range\n\n norm_fn = partial(nn.BatchNorm1d, eps=1e-3, momentum=0.01)\n\n self.conv_input = spconv.SparseSequential(\n spconv.SubMConv3d(input_channels, 16, 3, padding=1, bias=False, indice_key='subm1'),\n norm_fn(16),\n nn.ReLU(),\n )\n block = post_act_block\n\n self.conv1 = spconv.SparseSequential(\n block(16, 16, 3, norm_fn=norm_fn, padding=1, indice_key='subm1'),\n )\n\n self.conv2 = spconv.SparseSequential(\n # [1600, 1408, 41] <- [800, 704, 21]\n block(16, 32, 3, norm_fn=norm_fn, stride=2, padding=1, indice_key='spconv2', conv_type='spconv'),\n block(32, 32, 3, norm_fn=norm_fn, padding=1, indice_key='subm2'),\n block(32, 32, 3, norm_fn=norm_fn, padding=1, indice_key='subm2'),\n )\n\n self.conv3 = spconv.SparseSequential(\n # [800, 704, 21] <- [400, 352, 11]\n block(32, 64, 3, norm_fn=norm_fn, stride=2, padding=1, indice_key='spconv3', conv_type='spconv'),\n block(64, 64, 3, norm_fn=norm_fn, padding=1, indice_key='subm3'),\n block(64, 64, 3, norm_fn=norm_fn, padding=1, indice_key='subm3'),\n )\n\n self.conv4 = spconv.SparseSequential(\n # [400, 352, 11] <- [200, 176, 5]\n block(64, 64, 3, norm_fn=norm_fn, stride=2, padding=(0, 1, 1), indice_key='spconv4', conv_type='spconv'),\n block(64, 64, 3, norm_fn=norm_fn, padding=1, indice_key='subm4'),\n block(64, 64, 3, norm_fn=norm_fn, padding=1, indice_key='subm4'),\n )\n\n last_pad = 0\n last_pad = self.model_cfg.get('last_pad', last_pad)\n\n self.conv_out = spconv.SparseSequential(\n # [200, 150, 5] -> [200, 150, 2]\n spconv.SparseConv3d(64, 128, (3, 1, 1), stride=(2, 1, 1), padding=last_pad,\n bias=False, indice_key='spconv_down2'),\n norm_fn(128),\n nn.ReLU(),\n )\n\n # decoder\n # [400, 352, 11] <- [200, 176, 5]\n self.conv_up_t4 = SparseBasicBlock(64, 64, indice_key='subm4', norm_fn=norm_fn)\n self.conv_up_m4 = block(128, 64, 3, norm_fn=norm_fn, padding=1, indice_key='subm4')\n self.inv_conv4 = block(64, 64, 3, norm_fn=norm_fn, indice_key='spconv4', conv_type='inverseconv')\n\n # [800, 704, 21] <- [400, 352, 11]\n self.conv_up_t3 = SparseBasicBlock(64, 64, indice_key='subm3', norm_fn=norm_fn)\n self.conv_up_m3 = block(128, 64, 3, norm_fn=norm_fn, padding=1, indice_key='subm3')\n self.inv_conv3 = block(64, 32, 3, norm_fn=norm_fn, indice_key='spconv3', conv_type='inverseconv')\n\n # [1600, 1408, 41] <- [800, 704, 21]\n self.conv_up_t2 = SparseBasicBlock(32, 32, indice_key='subm2', norm_fn=norm_fn)\n self.conv_up_m2 = block(64, 32, 3, norm_fn=norm_fn, indice_key='subm2')\n self.inv_conv2 = block(32, 16, 3, norm_fn=norm_fn, indice_key='spconv2', conv_type='inverseconv')\n\n # [1600, 1408, 41] <- [1600, 1408, 41]\n self.conv_up_t1 = SparseBasicBlock(16, 16, indice_key='subm1', norm_fn=norm_fn)\n self.conv_up_m1 = block(32, 16, 3, norm_fn=norm_fn, indice_key='subm1')\n\n self.conv5 = spconv.SparseSequential(\n block(16, 16, 3, norm_fn=norm_fn, padding=1, indice_key='subm1')\n )\n self.num_point_features = 16\n\n def UR_block_forward(self, x_lateral, x_bottom, conv_t, conv_m, conv_inv):\n x_trans = conv_t(x_lateral)\n x = x_trans\n x.features = torch.cat((x_bottom.features, x_trans.features), dim=1)\n x_m = conv_m(x)\n x = self.channel_reduction(x, x_m.features.shape[1])\n x.features = x_m.features + x.features\n x = conv_inv(x)\n return x\n\n @staticmethod\n def channel_reduction(x, out_channels):\n \"\"\"\n Args:\n x: x.features (N, C1)\n out_channels: C2\n\n Returns:\n\n \"\"\"\n features = x.features\n n, in_channels = features.shape\n assert (in_channels % out_channels == 0) and (in_channels >= out_channels)\n\n x.features = features.view(n, out_channels, -1).sum(dim=2)\n return x\n\n def forward(self, batch_dict):\n \"\"\"\n Args:\n batch_dict:\n batch_size: int\n vfe_features: (num_voxels, C)\n voxel_coords: (num_voxels, 4), [batch_idx, z_idx, y_idx, x_idx]\n Returns:\n batch_dict:\n encoded_spconv_tensor: sparse tensor\n point_features: (N, C)\n \"\"\"\n voxel_features, voxel_coords = batch_dict['voxel_features'], batch_dict['voxel_coords']\n batch_size = batch_dict['batch_size']\n input_sp_tensor = spconv.SparseConvTensor(\n features=voxel_features,\n indices=voxel_coords.int(),\n spatial_shape=self.sparse_shape,\n batch_size=batch_size\n )\n x = self.conv_input(input_sp_tensor)\n \n x_conv1 = self.conv1(x)\n x_conv2 = self.conv2(x_conv1)\n x_conv3 = self.conv3(x_conv2)\n x_conv4 = self.conv4(x_conv3)\n\n # for detection head\n # [200, 176, 5] -> [200, 176, 2]\n out = self.conv_out(x_conv4)\n\n # for segmentation head\n # [400, 352, 11] <- [200, 176, 5]\n x_up4 = self.UR_block_forward(x_conv4, x_conv4, self.conv_up_t4, self.conv_up_m4, self.inv_conv4)\n # [800, 704, 21] <- [400, 352, 11]\n x_up3 = self.UR_block_forward(x_conv3, x_up4, self.conv_up_t3, self.conv_up_m3, self.inv_conv3)\n # [1600, 1408, 41] <- [800, 704, 21]\n x_up2 = self.UR_block_forward(x_conv2, x_up3, self.conv_up_t2, self.conv_up_m2, self.inv_conv2)\n # [1600, 1408, 41] <- [1600, 1408, 41]\n x_up1 = self.UR_block_forward(x_conv1, x_up2, self.conv_up_t1, self.conv_up_m1, self.conv5)\n\n batch_dict['point_features'] = x_up1.features\n point_coords = common_utils.get_voxel_centers(\n x_up1.indices[:, 1:], downsample_times=1, voxel_size=self.voxel_size,\n point_cloud_range=self.point_cloud_range\n )\n batch_dict['point_coords'] = torch.cat((x_up1.indices[:, 0:1].float(), point_coords), dim=1)\n batch_dict['encoded_spconv_tensor'] = out\n batch_dict['encoded_spconv_tensor_stride'] = 8\n return batch_dict\n"
] | [
[
"torch.nn.ReLU",
"torch.cat"
]
] |
ChenLi0830/Clevo-Categorization-Service | [
"44b509786849a6dce610171d86e5da68ad748b4b"
] | [
"temp/train_cnn.py"
] | [
"'''This example demonstrates the use of Convolution1D for text classification.\n'''\n\nfrom __future__ import print_function\n\n\nimport sys\nsys.path.append('/Users/wangwei/anaconda2/envs/python3_keras/lib/python3.6/site-packages')\n\nfrom keras.preprocessing import sequence\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Dropout, Activation\nfrom keras.layers import Embedding\nfrom keras.layers import Conv1D, GlobalMaxPooling1D\nfrom keras import backend as K\n#os.chdir('/Users/wangwei/cuda_keras_projets/keras/examples/')\n\nimport six.moves.cPickle as pickle # for python 3\n#import cPickle for python 2.7\n\nimport pandas as pd\nimport numpy as np\n\nimport jieba\n\n\n# set parameters:\n\nmaxlen = 64 #11\nbatch_size = 5\nembedding_dims = 300\nfilters = 50 # 100\nkernel_size = 3\nhidden_dims = 100\nepochs = 10\n\ndef get_idx_from_sent(sent, word_idx_map, k=300):\n \"\"\"\n Transforms sentence into a list of indices. \n \"\"\"\n x = []\n words = list(jieba.cut(sent, cut_all=False)) \n\n \n for word in words:\n \n if word in word_idx_map:\n x.append(word_idx_map[word])\n return x\n\ndef make_idx_data_cv(revs, word_idx_map, cv, k=300):\n \"\"\"\n Transforms sentences into a 2-d matrix.\n \"\"\"\n train, test = [], []\n train_y, test_y = [],[]\n for rev in revs:\n sent = get_idx_from_sent(rev['text'], word_idx_map, k)\n \n if rev[\"split\"]==cv:\n test.append(sent)\n test_y.append(rev[\"y\"])\n else:\n train.append(sent)\n train_y.append(rev[\"y\"])\n #train = np.array(train, dtype='int')\n #test = np.array(test, dtype='int')\n \n return [train, test, train_y, test_y]\n\n\n\nif __name__==\"__main__\": \n print('The script that is running is :', __file__)\n print('Depending on the training datasets: \\n maximum length of a sentence is :', maxlen)\n\n\t######### Main code starts here ###########\n print(\"loading data...\")\n x = pickle.load(open(\"mr_folder/mr.p\",\"rb\"), encoding='latin1')\n revs, W, W2, word_idx_map, word_idx_map2, vocab = x[0], x[1], x[2], x[3], x[4],x[5]\n print(\"data loaded!\")\n print(\"using: word2vec vectors\")\n\n tmp = pd.DataFrame(revs)\n\n max_l = np.max(tmp[\"num_words\"])\n print(\"number of sentences: \" , str(len(revs)))\n print(\"vocab size: \" , str(len(vocab)))\n print(\"max sentence length: \" + str(max_l))\n\n max_features = len(vocab)#50\n\n #### Make datasets\n datasets = make_idx_data_cv(revs, word_idx_map2, 1, k=300)\n x_train = datasets[0]\n x_test = datasets[1]\n y_train = datasets[2]\n y_test = datasets[3]\n \n\n print('Pad sequences (samples x time)')\n x_train = sequence.pad_sequences(x_train, maxlen=maxlen)\n x_test = sequence.pad_sequences(x_test, maxlen=maxlen)\n print('x_train shape:', x_train.shape)\n print('x_test shape:', x_test.shape)\n\n ############# modelling with CNN\n import keras\n num_classes = 9\n # convert class vectors to binary class matrices\n y_train = keras.utils.to_categorical(y_train, num_classes)\n y_test = keras.utils.to_categorical(y_test, num_classes)\n print('lengh of y_train is :', y_train.shape[0])\n print('Build model...')\n\n K.clear_session()\n \n \n model = Sequential()\n \n # we start off with an efficient embedding layer which maps\n # our vocab indices into embedding_dims dimensions\n model.add(Embedding(max_features+1,\n\t embedding_dims,\n\t weights=[W],\n\t input_length=maxlen,\n\t trainable=False))\n model.add(Dropout(0.2))\n\n # we add a Convolution1D, which will learn filters\n # word group filters of size filter_length:\n model.add(Conv1D(filters,\n\t kernel_size,\n\t padding='valid',\n\t activation='relu',\n\t strides=1))\n # we use max pooling:\n model.add(GlobalMaxPooling1D())\n\n # We add a vanilla hidden layer:\n model.add(Dense(hidden_dims))\n model.add(Dropout(0.2))\n #model.add(Activation('relu'))\n\n # We project onto a single unit output layer, and squash it with a sigmoid:\n #model.add(Dense(1))\n model.add(Activation('sigmoid'))\n\n\n ######################\n model.add(Dropout(0.2))\n model.add(Dense(num_classes, activation='softmax'))\n # model.compile(loss=keras.losses.categorical_crossentropy,\n # optimizer=keras.optimizers.Adadelta(),\n # metrics=['accuracy'])\n model.compile(optimizer='rmsprop', \n\t loss='categorical_crossentropy', \n\t metrics=['accuracy'])\n model.fit(x_train, y_train,\n\t batch_size=batch_size,\n\t epochs=epochs,\n\t verbose=1,\n\t validation_data=(x_test, y_test))\n score = model.evaluate(x_test, y_test, verbose=0)\n print('Test loss:', score[0])\n print('Test accuracy:', score[1])\n\n # serialize model to JSON\n model_json = model.to_json()\n with open(\"mr_folder/model.json\", \"w\") as json_file:\n json_file.write(model_json)\n # serialize weights to HDF5\n model.save_weights(\"mr_folder/model.h5\")\n print(\"Saved model to disk\")"
] | [
[
"pandas.DataFrame",
"numpy.max"
]
] |
MOONJOOYOUNG/AdamP | [
"64a63106a2ac62bcbe90627f2a83ec1b488f3416"
] | [
"adamp/sgdp.py"
] | [
"\"\"\"\nAdamP\nCopyright (c) 2020-present NAVER Corp.\nMIT license\n\"\"\"\n\nimport torch\nimport torch.nn as nn\nfrom torch.optim.optimizer import Optimizer, required\nimport math\n\nclass SGDP(Optimizer):\n def __init__(self, params, lr=required, momentum=0, dampening=0,\n weight_decay=0, nesterov=False, eps=1e-8, delta=0.1, wd_ratio=0.1):\n defaults = dict(lr=lr, momentum=momentum, dampening=dampening, weight_decay=weight_decay,\n nesterov=nesterov, eps=eps, delta=delta, wd_ratio=wd_ratio)\n super(SGDP, self).__init__(params, defaults)\n\n def _channel_view(self, x):\n return x.view(x.size(0), -1)\n\n def _layer_view(self, x):\n return x.view(1, -1)\n\n def _cosine_similarity(self, x, y, eps, view_func):\n x = view_func(x)\n y = view_func(y)\n\n x_norm = x.norm(dim=1).add_(eps)\n y_norm = y.norm(dim=1).add_(eps)\n dot = (x * y).sum(dim=1)\n\n return dot.abs() / x_norm / y_norm\n\n def _projection(self, p, grad, perturb, delta, wd_ratio, eps):\n wd = 1\n expand_size = [-1] + [1] * (len(p.shape) - 1)\n for view_func in [self._channel_view, self._layer_view]:\n\n cosine_sim = self._cosine_similarity(grad, p.data, eps, view_func)\n\n if cosine_sim.max() < delta / math.sqrt(view_func(p.data).size(1)):\n p_n = p.data / view_func(p.data).norm(dim=1).view(expand_size).add_(eps)\n perturb -= p_n * view_func(p_n * perturb).sum(dim=1).view(expand_size)\n wd = wd_ratio\n\n return perturb, wd\n\n return perturb, wd\n\n def step(self, closure=None):\n loss = None\n if closure is not None:\n loss = closure()\n\n for group in self.param_groups:\n weight_decay = group['weight_decay']\n momentum = group['momentum']\n dampening = group['dampening']\n nesterov = group['nesterov']\n\n for p in group['params']:\n if p.grad is None:\n continue\n grad = p.grad.data\n state = self.state[p]\n\n # State initialization\n if len(state) == 0:\n state['momentum'] = torch.zeros_like(p.data)\n\n # SGD\n buf = state['momentum']\n buf.mul_(momentum).add_(1 - dampening, grad)\n if nesterov:\n d_p = grad + momentum * buf\n else:\n d_p = buf\n\n # Projection\n wd_ratio = 1\n if len(p.shape) > 1:\n d_p, wd_ratio = self._projection(p, grad, d_p, group['delta'], group['wd_ratio'], group['eps'])\n\n # Weight decay\n if weight_decay != 0:\n p.data.mul_(1 - group['lr'] * group['weight_decay'] * wd_ratio / (1-momentum))\n\n # Step\n p.data.add_(-group['lr'], d_p)\n\n return loss\n"
] | [
[
"torch.zeros_like"
]
] |
antgonza/qtp-diversity | [
"0c2ec84711decf798ea6ffdb3e97dc9582ba4035"
] | [
"qtp_diversity/tests/test_validate.py"
] | [
"# -----------------------------------------------------------------------------\n# Copyright (c) 2014--, The Qiita Development Team.\n#\n# Distributed under the terms of the BSD 3-clause License.\n#\n# The full license is in the file LICENSE, distributed with this software.\n# -----------------------------------------------------------------------------\n\nfrom unittest import main\nfrom tempfile import mkdtemp, mkstemp\nfrom os.path import exists, isdir, join\nfrom os import remove, close\nfrom shutil import rmtree\nfrom json import dumps\n\nfrom skbio.stats.distance import randdm\nfrom skbio import OrdinationResults\nfrom qiita_client import ArtifactInfo\nfrom qiita_client.testing import PluginTestCase\nimport pandas as pd\nimport numpy as np\n\nfrom qtp_diversity import plugin\nfrom qtp_diversity.validate import (\n _validate_distance_matrix, _validate_ordination_results,\n _validate_alpha_vector, _validate_feature_data_taxonomy, validate)\n\n\nclass ValidateTests(PluginTestCase):\n def setUp(self):\n self.out_dir = mkdtemp()\n self._clean_up_files = [self.out_dir]\n self.metadata = {\n '1.SKM4.640180': {'col': \"doesn't really matters\"},\n '1.SKB8.640193': {'col': \"doesn't really matters\"},\n '1.SKD8.640184': {'col': \"doesn't really matters\"},\n '1.SKM9.640192': {'col': \"doesn't really matters\"},\n '1.SKB7.640196': {'col': \"doesn't really matters\"}}\n\n plugin('https://localhost:8383', 'register', 'ignored')\n\n def tearDown(self):\n for fp in self._clean_up_files:\n if exists(fp):\n if isdir(fp):\n rmtree(fp)\n else:\n remove(fp)\n\n def _create_distance_matrix(self, sample_ids):\n dm = randdm(len(sample_ids), sample_ids)\n fd, fp = mkstemp(suffix='.txt', dir=self.out_dir)\n close(fd)\n dm.write(fp)\n return fp\n\n def _create_ordination_results(self, sample_ids):\n eigvals = [0.51236726, 0.30071909, 0.26791207, 0.20898868]\n proportion_explained = [0.2675738328, 0.157044696, 0.1399118638,\n 0.1091402725]\n axis_labels = ['PC1', 'PC2', 'PC3', 'PC4']\n samples = [[-2.584, 1.739, 3.828, -1.944],\n [-2.710, -1.859, -8.648, 1.180],\n [2.350, 9.625, -3.457, -3.208],\n [2.614, -1.114, 1.476, 2.908],\n [2.850, -1.925, 6.232, 1.381]]\n ord_res = OrdinationResults(\n short_method_name='PCoA',\n long_method_name='Principal Coordinate Analysis',\n eigvals=pd.Series(eigvals, index=axis_labels),\n samples=pd.DataFrame(np.asarray(samples), index=sample_ids,\n columns=axis_labels),\n proportion_explained=pd.Series(proportion_explained,\n index=axis_labels))\n fd, fp = mkstemp(suffix='.txt', dir=self.out_dir)\n close(fd)\n ord_res.write(fp)\n return fp\n\n def _create_alpha_vector(self, sample_ids):\n fd, fp = mkstemp(suffix='.txt', dir=self.out_dir)\n close(fd)\n with open(fp, 'w') as f:\n f.write(\"\\tobserved_otus\\n\")\n for s_id in sample_ids:\n f.write(\"%s\\t%d\\n\" % (s_id, np.random.randint(1, 200)))\n\n return fp\n\n def _create_job(self, a_type, files, analysis):\n parameters = {'template': None,\n 'files': dumps(files),\n 'artifact_type': a_type,\n 'analysis': analysis}\n data = {'command': dumps(['Diversity types', '0.1.1', 'Validate']),\n 'parameters': dumps(parameters),\n 'status': 'running'}\n job_id = self.qclient.post(\n '/apitest/processing_job/', data=data)['job']\n return job_id, parameters\n\n def test_validate_distance_matrix(self):\n # Create a distance matrix\n sample_ids = ['1.SKM4.640180', '1.SKB8.640193', '1.SKD8.640184',\n '1.SKM9.640192', '1.SKB7.640196']\n dm_fp = self._create_distance_matrix(sample_ids)\n\n # Test success\n obs_success, obs_ainfo, obs_error = _validate_distance_matrix(\n {'plain_text': [dm_fp]}, self.metadata, self.out_dir)\n self.assertTrue(obs_success)\n exp_ainfo = [ArtifactInfo(None, \"distance_matrix\",\n [(dm_fp, 'plain_text')])]\n self.assertEqual(obs_ainfo, exp_ainfo)\n self.assertEqual(obs_error, \"\")\n\n # Test failure\n sample_ids = ['1.SKM4.640180', '1.SKB8.640193', '1.SKD8.640184',\n '1.SKM9.640192', 'NotASample']\n dm_fp = self._create_distance_matrix(sample_ids)\n obs_success, obs_ainfo, obs_error = _validate_distance_matrix(\n {'plain_text': [dm_fp]}, self.metadata, self.out_dir)\n self.assertFalse(obs_success)\n self.assertIsNone(obs_ainfo)\n self.assertEqual(obs_error, \"The distance matrix contain samples not \"\n \"present in the metadata\")\n\n def test_validate_ordination_results(self):\n # Create the ordination results\n sample_ids = ['1.SKM4.640180', '1.SKB8.640193', '1.SKD8.640184',\n '1.SKM9.640192', '1.SKB7.640196']\n ord_res_fp = self._create_ordination_results(sample_ids)\n\n # Test success\n obs_success, obs_ainfo, obs_error = _validate_ordination_results(\n {'plain_text': [ord_res_fp]}, self.metadata, self.out_dir)\n self.assertTrue(obs_success)\n exp_ainfo = [ArtifactInfo(None, \"ordination_results\",\n [(ord_res_fp, 'plain_text')])]\n self.assertEqual(obs_ainfo, exp_ainfo)\n self.assertEqual(obs_error, \"\")\n\n # Test failure\n sample_ids = ['1.SKM4.640180', '1.SKB8.640193', '1.SKD8.640184',\n '1.SKM9.640192', 'NotASample']\n ord_res_fp = self._create_ordination_results(sample_ids)\n obs_success, obs_ainfo, obs_error = _validate_ordination_results(\n {'plain_text': [ord_res_fp]}, self.metadata, self.out_dir)\n self.assertFalse(obs_success)\n self.assertIsNone(obs_ainfo)\n self.assertEqual(obs_error, \"The ordination results contain samples \"\n \"not present in the metadata\")\n\n def test_validate_alpha_vector(self):\n # Create the alpha vector\n sample_ids = ['1.SKM4.640180', '1.SKB8.640193', '1.SKD8.640184',\n '1.SKM9.640192']\n alpha_vector_fp = self._create_alpha_vector(sample_ids)\n\n # Test success\n obs_success, obs_ainfo, obs_error = _validate_alpha_vector(\n {'plain_text': [alpha_vector_fp]}, self.metadata, self.out_dir)\n self.assertEqual(obs_error, \"\")\n self.assertTrue(obs_success)\n exp_ainfo = [ArtifactInfo(None, \"alpha_vector\",\n [(alpha_vector_fp, 'plain_text')])]\n self.assertEqual(obs_ainfo, exp_ainfo)\n\n # Test failure wrong ids\n sample_ids = ['1.SKM4.640180', '1.SKB8.640193', '1.SKD8.640184',\n 'NotASample']\n alpha_vector_fp = self._create_alpha_vector(sample_ids)\n obs_success, obs_ainfo, obs_error = _validate_alpha_vector(\n {'plain_text': [alpha_vector_fp]}, self.metadata, self.out_dir)\n self.assertEqual(obs_error, \"The alpha vector contains samples not \"\n \"present in the metadata\")\n self.assertFalse(obs_success)\n self.assertIsNone(obs_ainfo)\n\n # Test failure wrong format\n fd, alpha_vector_fp = mkstemp(suffix='.txt', dir=self.out_dir)\n close(fd)\n with open(alpha_vector_fp, 'w') as f:\n f.write(\"\\tobserved_otus\\nsample 1\\n\")\n obs_success, obs_ainfo, obs_error = _validate_alpha_vector(\n {'plain_text': [alpha_vector_fp]}, self.metadata, self.out_dir)\n self.assertEqual(obs_error, \"The alpha vector format is incorrect\")\n self.assertFalse(obs_success)\n self.assertIsNone(obs_ainfo)\n\n def test_validate(self):\n # Test artifact type error\n job_id, params = self._create_job(\n 'NotAType', {'plan_text': 'Will fail before checking this'}, 1)\n obs_success, obs_ainfo, obs_error = validate(\n self.qclient, job_id, params, self.out_dir)\n self.assertFalse(obs_success)\n self.assertIsNone(obs_ainfo)\n self.assertEqual(\n obs_error, \"Unknown artifact type NotAType. Supported types: \"\n \"FeatureData[Taxonomy], alpha_vector, distance_matrix, \"\n \"ordination_results\")\n\n # Test missing metadata error - to be fair, I don't know how this error\n # can happen in the live system, but better be safe than sorry\n job_id, params = self._create_job(\n 'distance_matrix', {'plan_text': 'Will fail before checking this'},\n None)\n obs_success, obs_ainfo, obs_error = validate(\n self.qclient, job_id, params, self.out_dir)\n self.assertFalse(obs_success)\n self.assertIsNone(obs_ainfo)\n self.assertEqual(\n obs_error, \"Missing metadata information\")\n\n # Test distance matrix success\n sample_ids = ['1.SKM4.640180', '1.SKB8.640193', '1.SKD8.640184',\n '1.SKM9.640192', '1.SKB7.640196']\n dm_fp = self._create_distance_matrix(sample_ids)\n job_id, params = self._create_job(\n 'distance_matrix', {'plain_text': [dm_fp]}, 1)\n obs_success, obs_ainfo, obs_error = validate(\n self.qclient, job_id, params, self.out_dir)\n self.assertTrue(obs_success)\n html_fp = join(self.out_dir, 'index.html')\n exp_ainfo = [ArtifactInfo(None, \"distance_matrix\",\n [(dm_fp, 'plain_text'),\n (html_fp, 'html_summary')])]\n self.assertEqual(obs_ainfo, exp_ainfo)\n self.assertEqual(obs_error, \"\")\n\n # Test ordination results success\n ord_res_fp = self._create_ordination_results(sample_ids)\n job_id, params = self._create_job(\n 'ordination_results', {'plain_text': [ord_res_fp]}, 1)\n obs_success, obs_ainfo, obs_error = validate(\n self.qclient, job_id, params, self.out_dir)\n self.assertTrue(obs_success)\n html_fp = join(self.out_dir, 'index.html')\n esf_fp = join(self.out_dir, 'emperor_support_files')\n exp_ainfo = [ArtifactInfo(None, \"ordination_results\",\n [(ord_res_fp, 'plain_text'),\n (html_fp, 'html_summary'),\n (esf_fp, 'html_summary_dir')])]\n self.assertEqual(obs_ainfo, exp_ainfo)\n self.assertEqual(obs_error, \"\")\n\n # Test alpha vector success\n alpha_vector_fp = self._create_alpha_vector(sample_ids)\n job_id, params = self._create_job(\n 'alpha_vector', {'plain_text': [alpha_vector_fp]}, 1)\n obs_success, obs_ainfo, obs_error = validate(\n self.qclient, job_id, params, self.out_dir)\n self.assertTrue(obs_success)\n html_fp = join(self.out_dir, 'index.html')\n sf_fp = join(self.out_dir, 'support_files')\n exp_ainfo = [ArtifactInfo(None, \"alpha_vector\",\n [(alpha_vector_fp, 'plain_text'),\n (html_fp, 'html_summary'),\n (sf_fp, 'html_summary_dir')])]\n self.assertEqual(obs_ainfo, exp_ainfo)\n self.assertEqual(obs_error, \"\")\n\n def test_validate_FeatureData_Taxonomy(self):\n # Create the feature data\n fd, taxonomy_fp = mkstemp(suffix='.txt', dir=self.out_dir)\n close(fd)\n with open(taxonomy_fp, 'w') as f:\n f.write(\"Feature ID\\tTaxonomy\\tConfidence\\n\")\n f.write(\"TACGGAGGA\\tk__Bacteria;p__Bacteroidetes;c__Bacteroidia\\t\"\n \"0.9998743\\n\")\n f.write(\"TACGTAGGG\\tk__Bacteria;p__Firmicutes;c__Clostridia\\t\"\n \"0.9999999\\n\")\n\n # Test success\n obs_success, obs_ainfo, obs_error = _validate_feature_data_taxonomy(\n {'plain_text': [taxonomy_fp]}, None, self.out_dir)\n self.assertEqual(obs_error, \"\")\n self.assertTrue(obs_success)\n exp_ainfo = [ArtifactInfo(None, \"FeatureData[Taxonomy]\",\n [(taxonomy_fp, 'plain_text')])]\n self.assertEqual(obs_ainfo, exp_ainfo)\n\n # Test failure wrong format\n fd, taxonomy_fp = mkstemp(suffix='.txt', dir=self.out_dir)\n close(fd)\n with open(taxonomy_fp, 'w') as f:\n f.write(\"Feature ID\\tIt's gonna fail!\\tConfidence\\n\")\n f.write(\"TACGGAGGA\\tk__Bacteria;p__Bacteroidetes;c__Bacteroidia\\t\"\n \"0.9998743\\n\")\n f.write(\"TACGTAGGG\\tk__Bacteria;p__Firmicutes;c__Clostridia\\t\"\n \"0.9999999\\n\")\n obs_success, obs_ainfo, obs_error = _validate_feature_data_taxonomy(\n {'plain_text': [taxonomy_fp]}, None, self.out_dir)\n self.assertIn(\"The file header seems wrong\", obs_error)\n self.assertFalse(obs_success)\n self.assertIsNone(obs_ainfo)\n\n\nif __name__ == '__main__':\n main()\n"
] | [
[
"pandas.Series",
"numpy.asarray",
"numpy.random.randint"
]
] |
methane/astropy | [
"1a065d5ce403e226799cfb3d606fda33be0a6c08"
] | [
"astropy/coordinates/sky_coordinate.py"
] | [
"import re\nimport copy\nimport warnings\nimport operator\n\nimport numpy as np\n\nfrom astropy import _erfa as erfa\nfrom astropy.utils.compat.misc import override__dir__\nfrom astropy import units as u\nfrom astropy.constants import c as speed_of_light\nfrom astropy.utils.data_info import MixinInfo\nfrom astropy.utils import ShapedLikeNDArray\nfrom astropy.time import Time\nfrom astropy.utils.exceptions import AstropyUserWarning\n\nfrom .distances import Distance\nfrom .angles import Angle\nfrom .baseframe import (BaseCoordinateFrame, frame_transform_graph,\n GenericFrame)\nfrom .builtin_frames import ICRS, SkyOffsetFrame\nfrom .representation import (SphericalRepresentation,\n UnitSphericalRepresentation, SphericalDifferential)\nfrom .sky_coordinate_parsers import (_get_frame_class, _get_frame_without_data,\n _parse_coordinate_data)\n\n__all__ = ['SkyCoord', 'SkyCoordInfo']\n\n\nclass SkyCoordInfo(MixinInfo):\n \"\"\"\n Container for meta information like name, description, format. This is\n required when the object is used as a mixin column within a table, but can\n be used as a general way to store meta information.\n \"\"\"\n attrs_from_parent = set(['unit']) # Unit is read-only\n _supports_indexing = False\n\n @staticmethod\n def default_format(val):\n repr_data = val.info._repr_data\n formats = ['{0.' + compname + '.value:}' for compname\n in repr_data.components]\n return ','.join(formats).format(repr_data)\n\n @property\n def unit(self):\n repr_data = self._repr_data\n unit = ','.join(str(getattr(repr_data, comp).unit) or 'None'\n for comp in repr_data.components)\n return unit\n\n @property\n def _repr_data(self):\n if self._parent is None:\n return None\n\n sc = self._parent\n if (issubclass(sc.representation_type, SphericalRepresentation)\n and isinstance(sc.data, UnitSphericalRepresentation)):\n repr_data = sc.represent_as(sc.data.__class__, in_frame_units=True)\n else:\n repr_data = sc.represent_as(sc.representation_type,\n in_frame_units=True)\n return repr_data\n\n def _represent_as_dict(self):\n obj = self._parent\n attrs = (list(obj.representation_component_names) +\n list(frame_transform_graph.frame_attributes.keys()))\n\n # Don't output distance if it is all unitless 1.0\n if 'distance' in attrs and np.all(obj.distance == 1.0):\n attrs.remove('distance')\n\n out = super()._represent_as_dict(attrs)\n\n out['representation_type'] = obj.representation_type.get_name()\n out['frame'] = obj.frame.name\n # Note that obj.info.unit is a fake composite unit (e.g. 'deg,deg,None'\n # or None,None,m) and is not stored. The individual attributes have\n # units.\n\n return out\n\n def new_like(self, skycoords, length, metadata_conflicts='warn', name=None):\n \"\"\"\n Return a new SkyCoord instance which is consistent with the input\n SkyCoord objects ``skycoords`` and has ``length`` rows. Being\n \"consistent\" is defined as being able to set an item from one to each of\n the rest without any exception being raised.\n\n This is intended for creating a new SkyCoord instance whose elements can\n be set in-place for table operations like join or vstack. This is used\n when a SkyCoord object is used as a mixin column in an astropy Table.\n\n The data values are not predictable and it is expected that the consumer\n of the object will fill in all values.\n\n Parameters\n ----------\n skycoords : list\n List of input SkyCoord objects\n length : int\n Length of the output skycoord object\n metadata_conflicts : str ('warn'|'error'|'silent')\n How to handle metadata conflicts\n name : str\n Output name (sets output skycoord.info.name)\n\n Returns\n -------\n skycoord : SkyCoord (or subclass)\n Instance of this class consistent with ``skycoords``\n\n \"\"\"\n # Get merged info attributes like shape, dtype, format, description, etc.\n attrs = self.merge_cols_attributes(skycoords, metadata_conflicts, name,\n ('meta', 'description'))\n skycoord0 = skycoords[0]\n\n # Make a new SkyCoord object with the desired length and attributes\n # by using the _apply / __getitem__ machinery to effectively return\n # skycoord0[[0, 0, ..., 0, 0]]. This will have the all the right frame\n # attributes with the right shape.\n indexes = np.zeros(length, dtype=np.int64)\n out = skycoord0[indexes]\n\n # Use __setitem__ machinery to check for consistency of all skycoords\n for skycoord in skycoords[1:]:\n try:\n out[0] = skycoord[0]\n except Exception as err:\n raise ValueError(f'input skycoords are inconsistent: {err}')\n\n # Set (merged) info attributes\n for attr in ('name', 'meta', 'description'):\n if attr in attrs:\n setattr(out.info, attr, attrs[attr])\n\n return out\n\n\nclass SkyCoord(ShapedLikeNDArray):\n \"\"\"High-level object providing a flexible interface for celestial coordinate\n representation, manipulation, and transformation between systems.\n\n The `SkyCoord` class accepts a wide variety of inputs for initialization. At\n a minimum these must provide one or more celestial coordinate values with\n unambiguous units. Inputs may be scalars or lists/tuples/arrays, yielding\n scalar or array coordinates (can be checked via ``SkyCoord.isscalar``).\n Typically one also specifies the coordinate frame, though this is not\n required. The general pattern for spherical representations is::\n\n SkyCoord(COORD, [FRAME], keyword_args ...)\n SkyCoord(LON, LAT, [FRAME], keyword_args ...)\n SkyCoord(LON, LAT, [DISTANCE], frame=FRAME, unit=UNIT, keyword_args ...)\n SkyCoord([FRAME], <lon_attr>=LON, <lat_attr>=LAT, keyword_args ...)\n\n It is also possible to input coordinate values in other representations\n such as cartesian or cylindrical. In this case one includes the keyword\n argument ``representation_type='cartesian'`` (for example) along with data\n in ``x``, ``y``, and ``z``.\n\n See also: http://docs.astropy.org/en/stable/coordinates/\n\n Examples\n --------\n The examples below illustrate common ways of initializing a `SkyCoord`\n object. For a complete description of the allowed syntax see the\n full coordinates documentation. First some imports::\n\n >>> from astropy.coordinates import SkyCoord # High-level coordinates\n >>> from astropy.coordinates import ICRS, Galactic, FK4, FK5 # Low-level frames\n >>> from astropy.coordinates import Angle, Latitude, Longitude # Angles\n >>> import astropy.units as u\n\n The coordinate values and frame specification can now be provided using\n positional and keyword arguments::\n\n >>> c = SkyCoord(10, 20, unit=\"deg\") # defaults to ICRS frame\n >>> c = SkyCoord([1, 2, 3], [-30, 45, 8], frame=\"icrs\", unit=\"deg\") # 3 coords\n\n >>> coords = [\"1:12:43.2 +31:12:43\", \"1 12 43.2 +31 12 43\"]\n >>> c = SkyCoord(coords, frame=FK4, unit=(u.hourangle, u.deg), obstime=\"J1992.21\")\n\n >>> c = SkyCoord(\"1h12m43.2s +1d12m43s\", frame=Galactic) # Units from string\n >>> c = SkyCoord(frame=\"galactic\", l=\"1h12m43.2s\", b=\"+1d12m43s\")\n\n >>> ra = Longitude([1, 2, 3], unit=u.deg) # Could also use Angle\n >>> dec = np.array([4.5, 5.2, 6.3]) * u.deg # Astropy Quantity\n >>> c = SkyCoord(ra, dec, frame='icrs')\n >>> c = SkyCoord(frame=ICRS, ra=ra, dec=dec, obstime='2001-01-02T12:34:56')\n\n >>> c = FK4(1 * u.deg, 2 * u.deg) # Uses defaults for obstime, equinox\n >>> c = SkyCoord(c, obstime='J2010.11', equinox='B1965') # Override defaults\n\n >>> c = SkyCoord(w=0, u=1, v=2, unit='kpc', frame='galactic',\n ... representation_type='cartesian')\n\n >>> c = SkyCoord([ICRS(ra=1*u.deg, dec=2*u.deg), ICRS(ra=3*u.deg, dec=4*u.deg)])\n\n Velocity components (proper motions or radial velocities) can also be\n provided in a similar manner::\n\n >>> c = SkyCoord(ra=1*u.deg, dec=2*u.deg, radial_velocity=10*u.km/u.s)\n\n >>> c = SkyCoord(ra=1*u.deg, dec=2*u.deg, pm_ra_cosdec=2*u.mas/u.yr, pm_dec=1*u.mas/u.yr)\n\n As shown, the frame can be a `~astropy.coordinates.BaseCoordinateFrame`\n class or the corresponding string alias. The frame classes that are built in\n to astropy are `ICRS`, `FK5`, `FK4`, `FK4NoETerms`, and `Galactic`.\n The string aliases are simply lower-case versions of the class name, and\n allow for creating a `SkyCoord` object and transforming frames without\n explicitly importing the frame classes.\n\n Parameters\n ----------\n frame : `~astropy.coordinates.BaseCoordinateFrame` class or string, optional\n Type of coordinate frame this `SkyCoord` should represent. Defaults to\n to ICRS if not given or given as None.\n unit : `~astropy.units.Unit`, string, or tuple of :class:`~astropy.units.Unit` or str, optional\n Units for supplied ``LON`` and ``LAT`` values, respectively. If\n only one unit is supplied then it applies to both ``LON`` and\n ``LAT``.\n obstime : valid `~astropy.time.Time` initializer, optional\n Time(s) of observation.\n equinox : valid `~astropy.time.Time` initializer, optional\n Coordinate frame equinox.\n representation_type : str or Representation class\n Specifies the representation, e.g. 'spherical', 'cartesian', or\n 'cylindrical'. This affects the positional args and other keyword args\n which must correspond to the given representation.\n copy : bool, optional\n If `True` (default), a copy of any coordinate data is made. This\n argument can only be passed in as a keyword argument.\n **keyword_args\n Other keyword arguments as applicable for user-defined coordinate frames.\n Common options include:\n\n ra, dec : valid `~astropy.coordinates.Angle` initializer, optional\n RA and Dec for frames where ``ra`` and ``dec`` are keys in the\n frame's ``representation_component_names``, including `ICRS`,\n `FK5`, `FK4`, and `FK4NoETerms`.\n pm_ra_cosdec, pm_dec : `~astropy.units.Quantity`, optional\n Proper motion components, in angle per time units.\n l, b : valid `~astropy.coordinates.Angle` initializer, optional\n Galactic ``l`` and ``b`` for for frames where ``l`` and ``b`` are\n keys in the frame's ``representation_component_names``, including\n the `Galactic` frame.\n pm_l_cosb, pm_b : `~astropy.units.Quantity`, optional\n Proper motion components in the `Galactic` frame, in angle per time\n units.\n x, y, z : float or `~astropy.units.Quantity`, optional\n Cartesian coordinates values\n u, v, w : float or `~astropy.units.Quantity`, optional\n Cartesian coordinates values for the Galactic frame.\n radial_velocity : `~astropy.units.Quantity`, optional\n The component of the velocity along the line-of-sight (i.e., the\n radial direction), in velocity units.\n \"\"\"\n\n # Declare that SkyCoord can be used as a Table column by defining the\n # info property.\n info = SkyCoordInfo()\n\n def __init__(self, *args, copy=True, **kwargs):\n\n # these are frame attributes set on this SkyCoord but *not* a part of\n # the frame object this SkyCoord contains\n self._extra_frameattr_names = set()\n\n # If all that is passed in is a frame instance that already has data,\n # we should bypass all of the parsing and logic below. This is here\n # to make this the fastest way to create a SkyCoord instance. Many of\n # the classmethods implemented for performance enhancements will use\n # this as the initialization path\n if (len(args) == 1 and len(kwargs) == 0\n and isinstance(args[0], (BaseCoordinateFrame, SkyCoord))):\n\n coords = args[0]\n if isinstance(coords, SkyCoord):\n self._extra_frameattr_names = coords._extra_frameattr_names\n self.info = coords.info\n\n # Copy over any extra frame attributes\n for attr_name in self._extra_frameattr_names:\n # Setting it will also validate it.\n setattr(self, attr_name, getattr(coords, attr_name))\n\n coords = coords.frame\n\n if not coords.has_data:\n raise ValueError('Cannot initialize from a coordinate frame '\n 'instance without coordinate data')\n\n if copy:\n self._sky_coord_frame = coords.copy()\n else:\n self._sky_coord_frame = coords\n\n else:\n # Get the frame instance without coordinate data but with all frame\n # attributes set - these could either have been passed in with the\n # frame as an instance, or passed in as kwargs here\n frame_cls, frame_kwargs = _get_frame_without_data(args, kwargs)\n\n # Parse the args and kwargs to assemble a sanitized and validated\n # kwargs dict for initializing attributes for this object and for\n # creating the internal self._sky_coord_frame object\n args = list(args) # Make it mutable\n skycoord_kwargs, components, info = _parse_coordinate_data(\n frame_cls(**frame_kwargs), args, kwargs)\n\n # In the above two parsing functions, these kwargs were identified\n # as valid frame attributes for *some* frame, but not the frame that\n # this SkyCoord will have. We keep these attributes as special\n # skycoord frame attributes:\n for attr in skycoord_kwargs:\n # Setting it will also validate it.\n setattr(self, attr, skycoord_kwargs[attr])\n\n if info is not None:\n self.info = info\n\n # Finally make the internal coordinate object.\n frame_kwargs.update(components)\n self._sky_coord_frame = frame_cls(copy=copy, **frame_kwargs)\n\n if not self._sky_coord_frame.has_data:\n raise ValueError('Cannot create a SkyCoord without data')\n\n @property\n def frame(self):\n return self._sky_coord_frame\n\n @property\n def representation_type(self):\n return self.frame.representation_type\n\n @representation_type.setter\n def representation_type(self, value):\n self.frame.representation_type = value\n\n # TODO: remove these in future\n @property\n def representation(self):\n return self.frame.representation\n\n @representation.setter\n def representation(self, value):\n self.frame.representation = value\n\n @property\n def shape(self):\n return self.frame.shape\n\n def __eq__(self, value):\n \"\"\"Equality operator for SkyCoord\n\n This implements strict equality and requires that the frames are\n equivalent, extra frame attributes are equivalent, and that the\n representation data are exactly equal.\n \"\"\"\n # Make sure that any extra frame attribute names are equivalent.\n for attr in self._extra_frameattr_names | value._extra_frameattr_names:\n if not self.frame._frameattr_equiv(getattr(self, attr),\n getattr(value, attr)):\n raise ValueError(f\"cannot compare: extra frame attribute \"\n f\"'{attr}' is not equivalent \"\n f\"(perhaps compare the frames directly to avoid \"\n f\"this exception)\")\n\n return self._sky_coord_frame == value._sky_coord_frame\n\n def __ne__(self, value):\n return np.logical_not(self == value)\n\n def _apply(self, method, *args, **kwargs):\n \"\"\"Create a new instance, applying a method to the underlying data.\n\n In typical usage, the method is any of the shape-changing methods for\n `~numpy.ndarray` (``reshape``, ``swapaxes``, etc.), as well as those\n picking particular elements (``__getitem__``, ``take``, etc.), which\n are all defined in `~astropy.utils.shapes.ShapedLikeNDArray`. It will be\n applied to the underlying arrays in the representation (e.g., ``x``,\n ``y``, and ``z`` for `~astropy.coordinates.CartesianRepresentation`),\n as well as to any frame attributes that have a shape, with the results\n used to create a new instance.\n\n Internally, it is also used to apply functions to the above parts\n (in particular, `~numpy.broadcast_to`).\n\n Parameters\n ----------\n method : str or callable\n If str, it is the name of a method that is applied to the internal\n ``components``. If callable, the function is applied.\n args : tuple\n Any positional arguments for ``method``.\n kwargs : dict\n Any keyword arguments for ``method``.\n \"\"\"\n def apply_method(value):\n if isinstance(value, ShapedLikeNDArray):\n return value._apply(method, *args, **kwargs)\n else:\n if callable(method):\n return method(value, *args, **kwargs)\n else:\n return getattr(value, method)(*args, **kwargs)\n\n # create a new but empty instance, and copy over stuff\n new = super().__new__(self.__class__)\n new._sky_coord_frame = self._sky_coord_frame._apply(method,\n *args, **kwargs)\n new._extra_frameattr_names = self._extra_frameattr_names.copy()\n for attr in self._extra_frameattr_names:\n value = getattr(self, attr)\n if getattr(value, 'shape', ()):\n value = apply_method(value)\n elif method == 'copy' or method == 'flatten':\n # flatten should copy also for a single element array, but\n # we cannot use it directly for array scalars, since it\n # always returns a one-dimensional array. So, just copy.\n value = copy.copy(value)\n setattr(new, '_' + attr, value)\n\n # Copy other 'info' attr only if it has actually been defined.\n # See PR #3898 for further explanation and justification, along\n # with Quantity.__array_finalize__\n if 'info' in self.__dict__:\n new.info = self.info\n\n return new\n\n def __setitem__(self, item, value):\n \"\"\"Implement self[item] = value for SkyCoord\n\n The right hand ``value`` must be strictly consistent with self:\n - Identical class\n - Equivalent frames\n - Identical representation_types\n - Identical representation differentials keys\n - Identical frame attributes\n - Identical \"extra\" frame attributes (e.g. obstime for an ICRS coord)\n\n With these caveats the setitem ends up as effectively a setitem on\n the representation data.\n\n self.frame.data[item] = value.frame.data\n \"\"\"\n if self.__class__ is not value.__class__:\n raise TypeError(f'can only set from object of same class: '\n f'{self.__class__.__name__} vs. '\n f'{value.__class__.__name__}')\n\n # Make sure that any extra frame attribute names are equivalent.\n for attr in self._extra_frameattr_names | value._extra_frameattr_names:\n if not self.frame._frameattr_equiv(getattr(self, attr),\n getattr(value, attr)):\n raise ValueError(f'attribute {attr} is not equivalent')\n\n # Set the frame values. This checks frame equivalence and also clears\n # the cache to ensure that the object is not in an inconsistent state.\n self._sky_coord_frame[item] = value._sky_coord_frame\n\n def insert(self, obj, values, axis=0):\n \"\"\"\n Insert coordinate values before the given indices in the object and\n return a new Frame object.\n\n The values to be inserted must conform to the rules for in-place setting\n of ``SkyCoord`` objects.\n\n The API signature matches the ``np.insert`` API, but is more limited.\n The specification of insert index ``obj`` must be a single integer,\n and the ``axis`` must be ``0`` for simple insertion before the index.\n\n Parameters\n ----------\n obj : int\n Integer index before which ``values`` is inserted.\n values : array_like\n Value(s) to insert. If the type of ``values`` is different\n from that of quantity, ``values`` is converted to the matching type.\n axis : int, optional\n Axis along which to insert ``values``. Default is 0, which is the\n only allowed value and will insert a row.\n\n Returns\n -------\n out : `~astropy.coordinates.SkyCoord` instance\n New coordinate object with inserted value(s)\n\n \"\"\"\n # Validate inputs: obj arg is integer, axis=0, self is not a scalar, and\n # input index is in bounds.\n try:\n idx0 = operator.index(obj)\n except TypeError:\n raise TypeError('obj arg must be an integer')\n\n if axis != 0:\n raise ValueError('axis must be 0')\n\n if not self.shape:\n raise TypeError('cannot insert into scalar {} object'\n .format(self.__class__.__name__))\n\n if abs(idx0) > len(self):\n raise IndexError('index {} is out of bounds for axis 0 with size {}'\n .format(idx0, len(self)))\n\n # Turn negative index into positive\n if idx0 < 0:\n idx0 = len(self) + idx0\n\n n_values = len(values) if values.shape else 1\n\n # Finally make the new object with the correct length and set values for the\n # three sections, before insert, the insert, and after the insert.\n out = self.__class__.info.new_like([self], len(self) + n_values, name=self.info.name)\n\n # Set the output values. This is where validation of `values` takes place to ensure\n # that it can indeed be inserted.\n out[:idx0] = self[:idx0]\n out[idx0:idx0 + n_values] = values\n out[idx0 + n_values:] = self[idx0:]\n\n return out\n\n def transform_to(self, frame, merge_attributes=True):\n \"\"\"Transform this coordinate to a new frame.\n\n The precise frame transformed to depends on ``merge_attributes``.\n If `False`, the destination frame is used exactly as passed in.\n But this is often not quite what one wants. E.g., suppose one wants to\n transform an ICRS coordinate that has an obstime attribute to FK4; in\n this case, one likely would want to use this information. Thus, the\n default for ``merge_attributes`` is `True`, in which the precedence is\n as follows: (1) explicitly set (i.e., non-default) values in the\n destination frame; (2) explicitly set values in the source; (3) default\n value in the destination frame.\n\n Note that in either case, any explicitly set attributes on the source\n `SkyCoord` that are not part of the destination frame's definition are\n kept (stored on the resulting `SkyCoord`), and thus one can round-trip\n (e.g., from FK4 to ICRS to FK4 without loosing obstime).\n\n Parameters\n ----------\n frame : str, `BaseCoordinateFrame` class or instance, or `SkyCoord` instance\n The frame to transform this coordinate into. If a `SkyCoord`, the\n underlying frame is extracted, and all other information ignored.\n merge_attributes : bool, optional\n Whether the default attributes in the destination frame are allowed\n to be overridden by explicitly set attributes in the source\n (see note above; default: `True`).\n\n Returns\n -------\n coord : `SkyCoord`\n A new object with this coordinate represented in the `frame` frame.\n\n Raises\n ------\n ValueError\n If there is no possible transformation route.\n\n \"\"\"\n from astropy.coordinates.errors import ConvertError\n\n frame_kwargs = {}\n\n # Frame name (string) or frame class? Coerce into an instance.\n try:\n frame = _get_frame_class(frame)()\n except Exception:\n pass\n\n if isinstance(frame, SkyCoord):\n frame = frame.frame # Change to underlying coord frame instance\n\n if isinstance(frame, BaseCoordinateFrame):\n new_frame_cls = frame.__class__\n # Get frame attributes, allowing defaults to be overridden by\n # explicitly set attributes of the source if ``merge_attributes``.\n for attr in frame_transform_graph.frame_attributes:\n self_val = getattr(self, attr, None)\n frame_val = getattr(frame, attr, None)\n if (frame_val is not None\n and not (merge_attributes\n and frame.is_frame_attr_default(attr))):\n frame_kwargs[attr] = frame_val\n elif (self_val is not None\n and not self.is_frame_attr_default(attr)):\n frame_kwargs[attr] = self_val\n elif frame_val is not None:\n frame_kwargs[attr] = frame_val\n else:\n raise ValueError('Transform `frame` must be a frame name, class, or instance')\n\n # Get the composite transform to the new frame\n trans = frame_transform_graph.get_transform(self.frame.__class__, new_frame_cls)\n if trans is None:\n raise ConvertError('Cannot transform from {} to {}'\n .format(self.frame.__class__, new_frame_cls))\n\n # Make a generic frame which will accept all the frame kwargs that\n # are provided and allow for transforming through intermediate frames\n # which may require one or more of those kwargs.\n generic_frame = GenericFrame(frame_kwargs)\n\n # Do the transformation, returning a coordinate frame of the desired\n # final type (not generic).\n new_coord = trans(self.frame, generic_frame)\n\n # Finally make the new SkyCoord object from the `new_coord` and\n # remaining frame_kwargs that are not frame_attributes in `new_coord`.\n for attr in (set(new_coord.get_frame_attr_names()) &\n set(frame_kwargs.keys())):\n frame_kwargs.pop(attr)\n\n return self.__class__(new_coord, **frame_kwargs)\n\n def apply_space_motion(self, new_obstime=None, dt=None):\n \"\"\"\n Compute the position of the source represented by this coordinate object\n to a new time using the velocities stored in this object and assuming\n linear space motion (including relativistic corrections). This is\n sometimes referred to as an \"epoch transformation.\"\n\n The initial time before the evolution is taken from the ``obstime``\n attribute of this coordinate. Note that this method currently does not\n support evolving coordinates where the *frame* has an ``obstime`` frame\n attribute, so the ``obstime`` is only used for storing the before and\n after times, not actually as an attribute of the frame. Alternatively,\n if ``dt`` is given, an ``obstime`` need not be provided at all.\n\n Parameters\n ----------\n new_obstime : `~astropy.time.Time`, optional\n The time at which to evolve the position to. Requires that the\n ``obstime`` attribute be present on this frame.\n dt : `~astropy.units.Quantity`, `~astropy.time.TimeDelta`, optional\n An amount of time to evolve the position of the source. Cannot be\n given at the same time as ``new_obstime``.\n\n Returns\n -------\n new_coord : `SkyCoord`\n A new coordinate object with the evolved location of this coordinate\n at the new time. ``obstime`` will be set on this object to the new\n time only if ``self`` also has ``obstime``.\n \"\"\"\n\n if (new_obstime is None and dt is None or\n new_obstime is not None and dt is not None):\n raise ValueError(\"You must specify one of `new_obstime` or `dt`, \"\n \"but not both.\")\n\n # Validate that we have velocity info\n if 's' not in self.frame.data.differentials:\n raise ValueError('SkyCoord requires velocity data to evolve the '\n 'position.')\n\n if 'obstime' in self.frame.frame_attributes:\n raise NotImplementedError(\"Updating the coordinates in a frame \"\n \"with explicit time dependence is \"\n \"currently not supported. If you would \"\n \"like this functionality, please open an \"\n \"issue on github:\\n\"\n \"https://github.com/astropy/astropy\")\n\n if new_obstime is not None and self.obstime is None:\n # If no obstime is already on this object, raise an error if a new\n # obstime is passed: we need to know the time / epoch at which the\n # the position / velocity were measured initially\n raise ValueError('This object has no associated `obstime`. '\n 'apply_space_motion() must receive a time '\n 'difference, `dt`, and not a new obstime.')\n\n # Compute t1 and t2, the times used in the starpm call, which *only*\n # uses them to compute a delta-time\n t1 = self.obstime\n if dt is None:\n # self.obstime is not None and new_obstime is not None b/c of above\n # checks\n t2 = new_obstime\n else:\n # new_obstime is definitely None b/c of the above checks\n if t1 is None:\n # MAGIC NUMBER: if the current SkyCoord object has no obstime,\n # assume J2000 to do the dt offset. This is not actually used\n # for anything except a delta-t in starpm, so it's OK that it's\n # not necessarily the \"real\" obstime\n t1 = Time('J2000')\n new_obstime = None # we don't actually know the inital obstime\n t2 = t1 + dt\n else:\n t2 = t1 + dt\n new_obstime = t2\n # starpm wants tdb time\n t1 = t1.tdb\n t2 = t2.tdb\n\n # proper motion in RA should not include the cos(dec) term, see the\n # erfa function eraStarpv, comment (4). So we convert to the regular\n # spherical differentials.\n icrsrep = self.icrs.represent_as(SphericalRepresentation, SphericalDifferential)\n icrsvel = icrsrep.differentials['s']\n\n parallax_zero = False\n try:\n plx = icrsrep.distance.to_value(u.arcsecond, u.parallax())\n except u.UnitConversionError: # No distance: set to 0 by convention\n plx = 0.\n parallax_zero = True\n\n try:\n rv = icrsvel.d_distance.to_value(u.km/u.s)\n except u.UnitConversionError: # No RV\n rv = 0.\n\n starpm = erfa.pmsafe(icrsrep.lon.radian, icrsrep.lat.radian,\n icrsvel.d_lon.to_value(u.radian/u.yr),\n icrsvel.d_lat.to_value(u.radian/u.yr),\n plx, rv, t1.jd1, t1.jd2, t2.jd1, t2.jd2)\n\n if parallax_zero:\n new_distance = None\n else:\n new_distance = Distance(parallax=starpm[4] << u.arcsec)\n\n icrs2 = ICRS(ra=u.Quantity(starpm[0], u.radian, copy=False),\n dec=u.Quantity(starpm[1], u.radian, copy=False),\n pm_ra=u.Quantity(starpm[2], u.radian/u.yr, copy=False),\n pm_dec=u.Quantity(starpm[3], u.radian/u.yr, copy=False),\n distance=new_distance,\n radial_velocity=u.Quantity(starpm[5], u.km/u.s, copy=False),\n differential_type=SphericalDifferential)\n\n # Update the obstime of the returned SkyCoord, and need to carry along\n # the frame attributes\n frattrs = {attrnm: getattr(self, attrnm)\n for attrnm in self._extra_frameattr_names}\n frattrs['obstime'] = new_obstime\n return self.__class__(icrs2, **frattrs).transform_to(self.frame)\n\n def _is_name(self, string):\n \"\"\"\n Returns whether a string is one of the aliases for the frame.\n \"\"\"\n return (self.frame.name == string or\n (isinstance(self.frame.name, list) and string in self.frame.name))\n\n def __getattr__(self, attr):\n \"\"\"\n Overrides getattr to return coordinates that this can be transformed\n to, based on the alias attr in the master transform graph.\n \"\"\"\n if '_sky_coord_frame' in self.__dict__:\n if self._is_name(attr):\n return self # Should this be a deepcopy of self?\n\n # Anything in the set of all possible frame_attr_names is handled\n # here. If the attr is relevant for the current frame then delegate\n # to self.frame otherwise get it from self._<attr>.\n if attr in frame_transform_graph.frame_attributes:\n if attr in self.frame.get_frame_attr_names():\n return getattr(self.frame, attr)\n else:\n return getattr(self, '_' + attr, None)\n\n # Some attributes might not fall in the above category but still\n # are available through self._sky_coord_frame.\n if not attr.startswith('_') and hasattr(self._sky_coord_frame, attr):\n return getattr(self._sky_coord_frame, attr)\n\n # Try to interpret as a new frame for transforming.\n frame_cls = frame_transform_graph.lookup_name(attr)\n if frame_cls is not None and self.frame.is_transformable_to(frame_cls):\n return self.transform_to(attr)\n\n # Fail\n raise AttributeError(\"'{}' object has no attribute '{}'\"\n .format(self.__class__.__name__, attr))\n\n def __setattr__(self, attr, val):\n # This is to make anything available through __getattr__ immutable\n if '_sky_coord_frame' in self.__dict__:\n if self._is_name(attr):\n raise AttributeError(f\"'{attr}' is immutable\")\n\n if not attr.startswith('_') and hasattr(self._sky_coord_frame, attr):\n setattr(self._sky_coord_frame, attr, val)\n return\n\n frame_cls = frame_transform_graph.lookup_name(attr)\n if frame_cls is not None and self.frame.is_transformable_to(frame_cls):\n raise AttributeError(f\"'{attr}' is immutable\")\n\n if attr in frame_transform_graph.frame_attributes:\n # All possible frame attributes can be set, but only via a private\n # variable. See __getattr__ above.\n super().__setattr__('_' + attr, val)\n # Validate it\n frame_transform_graph.frame_attributes[attr].__get__(self)\n # And add to set of extra attributes\n self._extra_frameattr_names |= {attr}\n\n else:\n # Otherwise, do the standard Python attribute setting\n super().__setattr__(attr, val)\n\n def __delattr__(self, attr):\n # mirror __setattr__ above\n if '_sky_coord_frame' in self.__dict__:\n if self._is_name(attr):\n raise AttributeError(f\"'{attr}' is immutable\")\n\n if not attr.startswith('_') and hasattr(self._sky_coord_frame,\n attr):\n delattr(self._sky_coord_frame, attr)\n return\n\n frame_cls = frame_transform_graph.lookup_name(attr)\n if frame_cls is not None and self.frame.is_transformable_to(frame_cls):\n raise AttributeError(f\"'{attr}' is immutable\")\n\n if attr in frame_transform_graph.frame_attributes:\n # All possible frame attributes can be deleted, but need to remove\n # the corresponding private variable. See __getattr__ above.\n super().__delattr__('_' + attr)\n # Also remove it from the set of extra attributes\n self._extra_frameattr_names -= {attr}\n\n else:\n # Otherwise, do the standard Python attribute setting\n super().__delattr__(attr)\n\n @override__dir__\n def __dir__(self):\n \"\"\"\n Override the builtin `dir` behavior to include:\n - Transforms available by aliases\n - Attribute / methods of the underlying self.frame object\n \"\"\"\n\n # determine the aliases that this can be transformed to.\n dir_values = set()\n for name in frame_transform_graph.get_names():\n frame_cls = frame_transform_graph.lookup_name(name)\n if self.frame.is_transformable_to(frame_cls):\n dir_values.add(name)\n\n # Add public attributes of self.frame\n dir_values.update(set(attr for attr in dir(self.frame) if not attr.startswith('_')))\n\n # Add all possible frame attributes\n dir_values.update(frame_transform_graph.frame_attributes.keys())\n\n return dir_values\n\n def __repr__(self):\n clsnm = self.__class__.__name__\n coonm = self.frame.__class__.__name__\n frameattrs = self.frame._frame_attrs_repr()\n if frameattrs:\n frameattrs = ': ' + frameattrs\n\n data = self.frame._data_repr()\n if data:\n data = ': ' + data\n\n return '<{clsnm} ({coonm}{frameattrs}){data}>'.format(**locals())\n\n def to_string(self, style='decimal', **kwargs):\n \"\"\"\n A string representation of the coordinates.\n\n The default styles definitions are::\n\n 'decimal': 'lat': {'decimal': True, 'unit': \"deg\"}\n 'lon': {'decimal': True, 'unit': \"deg\"}\n 'dms': 'lat': {'unit': \"deg\"}\n 'lon': {'unit': \"deg\"}\n 'hmsdms': 'lat': {'alwayssign': True, 'pad': True, 'unit': \"deg\"}\n 'lon': {'pad': True, 'unit': \"hour\"}\n\n See :meth:`~astropy.coordinates.Angle.to_string` for details and\n keyword arguments (the two angles forming the coordinates are are\n both :class:`~astropy.coordinates.Angle` instances). Keyword\n arguments have precedence over the style defaults and are passed\n to :meth:`~astropy.coordinates.Angle.to_string`.\n\n Parameters\n ----------\n style : {'hmsdms', 'dms', 'decimal'}\n The formatting specification to use. These encode the three most\n common ways to represent coordinates. The default is `decimal`.\n kwargs\n Keyword args passed to :meth:`~astropy.coordinates.Angle.to_string`.\n \"\"\"\n\n sph_coord = self.frame.represent_as(SphericalRepresentation)\n\n styles = {'hmsdms': {'lonargs': {'unit': u.hour, 'pad': True},\n 'latargs': {'unit': u.degree, 'pad': True, 'alwayssign': True}},\n 'dms': {'lonargs': {'unit': u.degree},\n 'latargs': {'unit': u.degree}},\n 'decimal': {'lonargs': {'unit': u.degree, 'decimal': True},\n 'latargs': {'unit': u.degree, 'decimal': True}}\n }\n\n lonargs = {}\n latargs = {}\n\n if style in styles:\n lonargs.update(styles[style]['lonargs'])\n latargs.update(styles[style]['latargs'])\n else:\n raise ValueError('Invalid style. Valid options are: {}'.format(\",\".join(styles)))\n\n lonargs.update(kwargs)\n latargs.update(kwargs)\n\n if np.isscalar(sph_coord.lon.value):\n coord_string = (sph_coord.lon.to_string(**lonargs) +\n \" \" + sph_coord.lat.to_string(**latargs))\n else:\n coord_string = []\n for lonangle, latangle in zip(sph_coord.lon.ravel(), sph_coord.lat.ravel()):\n coord_string += [(lonangle.to_string(**lonargs) +\n \" \" + latangle.to_string(**latargs))]\n if len(sph_coord.shape) > 1:\n coord_string = np.array(coord_string).reshape(sph_coord.shape)\n\n return coord_string\n\n def is_equivalent_frame(self, other):\n \"\"\"\n Checks if this object's frame as the same as that of the ``other``\n object.\n\n To be the same frame, two objects must be the same frame class and have\n the same frame attributes. For two `SkyCoord` objects, *all* of the\n frame attributes have to match, not just those relevant for the object's\n frame.\n\n Parameters\n ----------\n other : SkyCoord or BaseCoordinateFrame\n The other object to check.\n\n Returns\n -------\n isequiv : bool\n True if the frames are the same, False if not.\n\n Raises\n ------\n TypeError\n If ``other`` isn't a `SkyCoord` or a `BaseCoordinateFrame` or subclass.\n \"\"\"\n if isinstance(other, BaseCoordinateFrame):\n return self.frame.is_equivalent_frame(other)\n elif isinstance(other, SkyCoord):\n if other.frame.name != self.frame.name:\n return False\n\n for fattrnm in frame_transform_graph.frame_attributes:\n if not BaseCoordinateFrame._frameattr_equiv(getattr(self, fattrnm),\n getattr(other, fattrnm)):\n return False\n return True\n else:\n # not a BaseCoordinateFrame nor a SkyCoord object\n raise TypeError(\"Tried to do is_equivalent_frame on something that \"\n \"isn't frame-like\")\n\n # High-level convenience methods\n def separation(self, other):\n \"\"\"\n Computes on-sky separation between this coordinate and another.\n\n .. note::\n\n If the ``other`` coordinate object is in a different frame, it is\n first transformed to the frame of this object. This can lead to\n unintuitive behavior if not accounted for. Particularly of note is\n that ``self.separation(other)`` and ``other.separation(self)`` may\n not give the same answer in this case.\n\n For more on how to use this (and related) functionality, see the\n examples in :doc:`/coordinates/matchsep`.\n\n Parameters\n ----------\n other : `~astropy.coordinates.SkyCoord` or `~astropy.coordinates.BaseCoordinateFrame`\n The coordinate to get the separation to.\n\n Returns\n -------\n sep : `~astropy.coordinates.Angle`\n The on-sky separation between this and the ``other`` coordinate.\n\n Notes\n -----\n The separation is calculated using the Vincenty formula, which\n is stable at all locations, including poles and antipodes [1]_.\n\n .. [1] https://en.wikipedia.org/wiki/Great-circle_distance\n\n \"\"\"\n from . import Angle\n from .angle_utilities import angular_separation\n\n if not self.is_equivalent_frame(other):\n try:\n kwargs = {'merge_attributes': False} if isinstance(other, SkyCoord) else {}\n other = other.transform_to(self, **kwargs)\n except TypeError:\n raise TypeError('Can only get separation to another SkyCoord '\n 'or a coordinate frame with data')\n\n lon1 = self.spherical.lon\n lat1 = self.spherical.lat\n lon2 = other.spherical.lon\n lat2 = other.spherical.lat\n\n # Get the separation as a Quantity, convert to Angle in degrees\n sep = angular_separation(lon1, lat1, lon2, lat2)\n return Angle(sep, unit=u.degree)\n\n def separation_3d(self, other):\n \"\"\"\n Computes three dimensional separation between this coordinate\n and another.\n\n For more on how to use this (and related) functionality, see the\n examples in :doc:`/coordinates/matchsep`.\n\n Parameters\n ----------\n other : `~astropy.coordinates.SkyCoord` or `~astropy.coordinates.BaseCoordinateFrame`\n The coordinate to get the separation to.\n\n Returns\n -------\n sep : `~astropy.coordinates.Distance`\n The real-space distance between these two coordinates.\n\n Raises\n ------\n ValueError\n If this or the other coordinate do not have distances.\n \"\"\"\n if not self.is_equivalent_frame(other):\n try:\n kwargs = {'merge_attributes': False} if isinstance(other, SkyCoord) else {}\n other = other.transform_to(self, **kwargs)\n except TypeError:\n raise TypeError('Can only get separation to another SkyCoord '\n 'or a coordinate frame with data')\n\n if issubclass(self.data.__class__, UnitSphericalRepresentation):\n raise ValueError('This object does not have a distance; cannot '\n 'compute 3d separation.')\n if issubclass(other.data.__class__, UnitSphericalRepresentation):\n raise ValueError('The other object does not have a distance; '\n 'cannot compute 3d separation.')\n\n c1 = self.cartesian.without_differentials()\n c2 = other.cartesian.without_differentials()\n return Distance((c1 - c2).norm())\n\n def spherical_offsets_to(self, tocoord):\n r\"\"\"\n Computes angular offsets to go *from* this coordinate *to* another.\n\n Parameters\n ----------\n tocoord : `~astropy.coordinates.BaseCoordinateFrame`\n The coordinate to find the offset to.\n\n Returns\n -------\n lon_offset : `~astropy.coordinates.Angle`\n The angular offset in the longitude direction (i.e., RA for\n equatorial coordinates).\n lat_offset : `~astropy.coordinates.Angle`\n The angular offset in the latitude direction (i.e., Dec for\n equatorial coordinates).\n\n Raises\n ------\n ValueError\n If the ``tocoord`` is not in the same frame as this one. This is\n different from the behavior of the `separation`/`separation_3d`\n methods because the offset components depend critically on the\n specific choice of frame.\n\n Notes\n -----\n This uses the sky offset frame machinery, and hence will produce a new\n sky offset frame if one does not already exist for this object's frame\n class.\n\n See Also\n --------\n separation : for the *total* angular offset (not broken out into components).\n position_angle : for the direction of the offset.\n\n \"\"\"\n if not self.is_equivalent_frame(tocoord):\n raise ValueError('Tried to use spherical_offsets_to with two non-matching frames!')\n\n aframe = self.skyoffset_frame()\n acoord = tocoord.transform_to(aframe)\n\n dlon = acoord.spherical.lon.view(Angle)\n dlat = acoord.spherical.lat.view(Angle)\n return dlon, dlat\n\n def directional_offset_by(self, position_angle, separation):\n \"\"\"\n Computes coordinates at the given offset from this coordinate.\n\n Parameters\n ----------\n position_angle : `~astropy.coordinates.Angle`\n position_angle of offset\n separation : `~astropy.coordinates.Angle`\n offset angular separation\n\n Returns\n -------\n newpoints : `~astropy.coordinates.SkyCoord`\n The coordinates for the location that corresponds to offsetting by\n the given `position_angle` and `separation`.\n\n Notes\n -----\n Returned SkyCoord frame retains only the frame attributes that are for\n the resulting frame type. (e.g. if the input frame is\n `~astropy.coordinates.ICRS`, an ``equinox`` value will be retained, but\n an ``obstime`` will not.)\n\n For a more complete set of transform offsets, use `~astropy.wcs.WCS`.\n `~astropy.coordinates.SkyCoord.skyoffset_frame()` can also be used to\n create a spherical frame with (lat=0, lon=0) at a reference point,\n approximating an xy cartesian system for small offsets. This method\n is distinct in that it is accurate on the sphere.\n\n See Also\n --------\n position_angle : inverse operation for the ``position_angle`` component\n separation : inverse operation for the ``separation`` component\n\n \"\"\"\n from . import angle_utilities\n\n slat = self.represent_as(UnitSphericalRepresentation).lat\n slon = self.represent_as(UnitSphericalRepresentation).lon\n\n newlon, newlat = angle_utilities.offset_by(\n lon=slon, lat=slat,\n posang=position_angle, distance=separation)\n\n return SkyCoord(newlon, newlat, frame=self.frame)\n\n def match_to_catalog_sky(self, catalogcoord, nthneighbor=1):\n \"\"\"\n Finds the nearest on-sky matches of this coordinate in a set of\n catalog coordinates.\n\n For more on how to use this (and related) functionality, see the\n examples in :doc:`/coordinates/matchsep`.\n\n Parameters\n ----------\n catalogcoord : `~astropy.coordinates.SkyCoord` or `~astropy.coordinates.BaseCoordinateFrame`\n The base catalog in which to search for matches. Typically this\n will be a coordinate object that is an array (i.e.,\n ``catalogcoord.isscalar == False``)\n nthneighbor : int, optional\n Which closest neighbor to search for. Typically ``1`` is\n desired here, as that is correct for matching one set of\n coordinates to another. The next likely use case is ``2``,\n for matching a coordinate catalog against *itself* (``1``\n is inappropriate because each point will find itself as the\n closest match).\n\n Returns\n -------\n idx : integer array\n Indices into ``catalogcoord`` to get the matched points for\n each of this object's coordinates. Shape matches this\n object.\n sep2d : `~astropy.coordinates.Angle`\n The on-sky separation between the closest match for each\n element in this object in ``catalogcoord``. Shape matches\n this object.\n dist3d : `~astropy.units.Quantity`\n The 3D distance between the closest match for each element\n in this object in ``catalogcoord``. Shape matches this\n object. Unless both this and ``catalogcoord`` have associated\n distances, this quantity assumes that all sources are at a\n distance of 1 (dimensionless).\n\n Notes\n -----\n This method requires `SciPy <https://www.scipy.org/>`_ to be\n installed or it will fail.\n\n See Also\n --------\n astropy.coordinates.match_coordinates_sky\n SkyCoord.match_to_catalog_3d\n \"\"\"\n from .matching import match_coordinates_sky\n\n if (isinstance(catalogcoord, (SkyCoord, BaseCoordinateFrame))\n and catalogcoord.has_data):\n self_in_catalog_frame = self.transform_to(catalogcoord)\n else:\n raise TypeError('Can only get separation to another SkyCoord or a '\n 'coordinate frame with data')\n\n res = match_coordinates_sky(self_in_catalog_frame, catalogcoord,\n nthneighbor=nthneighbor,\n storekdtree='_kdtree_sky')\n return res\n\n def match_to_catalog_3d(self, catalogcoord, nthneighbor=1):\n \"\"\"\n Finds the nearest 3-dimensional matches of this coordinate to a set\n of catalog coordinates.\n\n This finds the 3-dimensional closest neighbor, which is only different\n from the on-sky distance if ``distance`` is set in this object or the\n ``catalogcoord`` object.\n\n For more on how to use this (and related) functionality, see the\n examples in :doc:`/coordinates/matchsep`.\n\n Parameters\n ----------\n catalogcoord : `~astropy.coordinates.SkyCoord` or `~astropy.coordinates.BaseCoordinateFrame`\n The base catalog in which to search for matches. Typically this\n will be a coordinate object that is an array (i.e.,\n ``catalogcoord.isscalar == False``)\n nthneighbor : int, optional\n Which closest neighbor to search for. Typically ``1`` is\n desired here, as that is correct for matching one set of\n coordinates to another. The next likely use case is\n ``2``, for matching a coordinate catalog against *itself*\n (``1`` is inappropriate because each point will find\n itself as the closest match).\n\n Returns\n -------\n idx : integer array\n Indices into ``catalogcoord`` to get the matched points for\n each of this object's coordinates. Shape matches this\n object.\n sep2d : `~astropy.coordinates.Angle`\n The on-sky separation between the closest match for each\n element in this object in ``catalogcoord``. Shape matches\n this object.\n dist3d : `~astropy.units.Quantity`\n The 3D distance between the closest match for each element\n in this object in ``catalogcoord``. Shape matches this\n object.\n\n Notes\n -----\n This method requires `SciPy <https://www.scipy.org/>`_ to be\n installed or it will fail.\n\n See Also\n --------\n astropy.coordinates.match_coordinates_3d\n SkyCoord.match_to_catalog_sky\n \"\"\"\n from .matching import match_coordinates_3d\n\n if (isinstance(catalogcoord, (SkyCoord, BaseCoordinateFrame))\n and catalogcoord.has_data):\n self_in_catalog_frame = self.transform_to(catalogcoord)\n else:\n raise TypeError('Can only get separation to another SkyCoord or a '\n 'coordinate frame with data')\n\n res = match_coordinates_3d(self_in_catalog_frame, catalogcoord,\n nthneighbor=nthneighbor,\n storekdtree='_kdtree_3d')\n\n return res\n\n def search_around_sky(self, searcharoundcoords, seplimit):\n \"\"\"\n Searches for all coordinates in this object around a supplied set of\n points within a given on-sky separation.\n\n This is intended for use on `~astropy.coordinates.SkyCoord` objects\n with coordinate arrays, rather than a scalar coordinate. For a scalar\n coordinate, it is better to use\n `~astropy.coordinates.SkyCoord.separation`.\n\n For more on how to use this (and related) functionality, see the\n examples in :doc:`/coordinates/matchsep`.\n\n Parameters\n ----------\n searcharoundcoords : `~astropy.coordinates.SkyCoord` or `~astropy.coordinates.BaseCoordinateFrame`\n The coordinates to search around to try to find matching points in\n this `SkyCoord`. This should be an object with array coordinates,\n not a scalar coordinate object.\n seplimit : `~astropy.units.Quantity` with angle units\n The on-sky separation to search within.\n\n Returns\n -------\n idxsearcharound : integer array\n Indices into ``searcharoundcoords`` that match the\n corresponding elements of ``idxself``. Shape matches\n ``idxself``.\n idxself : integer array\n Indices into ``self`` that match the\n corresponding elements of ``idxsearcharound``. Shape matches\n ``idxsearcharound``.\n sep2d : `~astropy.coordinates.Angle`\n The on-sky separation between the coordinates. Shape matches\n ``idxsearcharound`` and ``idxself``.\n dist3d : `~astropy.units.Quantity`\n The 3D distance between the coordinates. Shape matches\n ``idxsearcharound`` and ``idxself``.\n\n Notes\n -----\n This method requires `SciPy <https://www.scipy.org/>`_ (>=0.12.0) to be\n installed or it will fail.\n\n In the current implementation, the return values are always sorted in\n the same order as the ``searcharoundcoords`` (so ``idxsearcharound`` is\n in ascending order). This is considered an implementation detail,\n though, so it could change in a future release.\n\n See Also\n --------\n astropy.coordinates.search_around_sky\n SkyCoord.search_around_3d\n \"\"\"\n from .matching import search_around_sky\n\n return search_around_sky(searcharoundcoords, self, seplimit,\n storekdtree='_kdtree_sky')\n\n def search_around_3d(self, searcharoundcoords, distlimit):\n \"\"\"\n Searches for all coordinates in this object around a supplied set of\n points within a given 3D radius.\n\n This is intended for use on `~astropy.coordinates.SkyCoord` objects\n with coordinate arrays, rather than a scalar coordinate. For a scalar\n coordinate, it is better to use\n `~astropy.coordinates.SkyCoord.separation_3d`.\n\n For more on how to use this (and related) functionality, see the\n examples in :doc:`/coordinates/matchsep`.\n\n Parameters\n ----------\n searcharoundcoords : `~astropy.coordinates.SkyCoord` or `~astropy.coordinates.BaseCoordinateFrame`\n The coordinates to search around to try to find matching points in\n this `SkyCoord`. This should be an object with array coordinates,\n not a scalar coordinate object.\n distlimit : `~astropy.units.Quantity` with distance units\n The physical radius to search within.\n\n Returns\n -------\n idxsearcharound : integer array\n Indices into ``searcharoundcoords`` that match the\n corresponding elements of ``idxself``. Shape matches\n ``idxself``.\n idxself : integer array\n Indices into ``self`` that match the\n corresponding elements of ``idxsearcharound``. Shape matches\n ``idxsearcharound``.\n sep2d : `~astropy.coordinates.Angle`\n The on-sky separation between the coordinates. Shape matches\n ``idxsearcharound`` and ``idxself``.\n dist3d : `~astropy.units.Quantity`\n The 3D distance between the coordinates. Shape matches\n ``idxsearcharound`` and ``idxself``.\n\n Notes\n -----\n This method requires `SciPy <https://www.scipy.org/>`_ (>=0.12.0) to be\n installed or it will fail.\n\n In the current implementation, the return values are always sorted in\n the same order as the ``searcharoundcoords`` (so ``idxsearcharound`` is\n in ascending order). This is considered an implementation detail,\n though, so it could change in a future release.\n\n See Also\n --------\n astropy.coordinates.search_around_3d\n SkyCoord.search_around_sky\n \"\"\"\n from .matching import search_around_3d\n\n return search_around_3d(searcharoundcoords, self, distlimit,\n storekdtree='_kdtree_3d')\n\n def position_angle(self, other):\n \"\"\"\n Computes the on-sky position angle (East of North) between this\n `SkyCoord` and another.\n\n Parameters\n ----------\n other : `SkyCoord`\n The other coordinate to compute the position angle to. It is\n treated as the \"head\" of the vector of the position angle.\n\n Returns\n -------\n pa : `~astropy.coordinates.Angle`\n The (positive) position angle of the vector pointing from ``self``\n to ``other``. If either ``self`` or ``other`` contain arrays, this\n will be an array following the appropriate `numpy` broadcasting\n rules.\n\n Examples\n --------\n\n >>> c1 = SkyCoord(0*u.deg, 0*u.deg)\n >>> c2 = SkyCoord(1*u.deg, 0*u.deg)\n >>> c1.position_angle(c2).degree\n 90.0\n >>> c3 = SkyCoord(1*u.deg, 1*u.deg)\n >>> c1.position_angle(c3).degree # doctest: +FLOAT_CMP\n 44.995636455344844\n \"\"\"\n from . import angle_utilities\n\n if not self.is_equivalent_frame(other):\n try:\n other = other.transform_to(self, merge_attributes=False)\n except TypeError:\n raise TypeError('Can only get position_angle to another '\n 'SkyCoord or a coordinate frame with data')\n\n slat = self.represent_as(UnitSphericalRepresentation).lat\n slon = self.represent_as(UnitSphericalRepresentation).lon\n olat = other.represent_as(UnitSphericalRepresentation).lat\n olon = other.represent_as(UnitSphericalRepresentation).lon\n\n return angle_utilities.position_angle(slon, slat, olon, olat)\n\n def skyoffset_frame(self, rotation=None):\n \"\"\"\n Returns the sky offset frame with this `SkyCoord` at the origin.\n\n Returns\n -------\n astrframe : `~astropy.coordinates.SkyOffsetFrame`\n A sky offset frame of the same type as this `SkyCoord` (e.g., if\n this object has an ICRS coordinate, the resulting frame is\n SkyOffsetICRS, with the origin set to this object)\n rotation : `~astropy.coordinates.Angle` or `~astropy.units.Quantity` with angle units\n The final rotation of the frame about the ``origin``. The sign of\n the rotation is the left-hand rule. That is, an object at a\n particular position angle in the un-rotated system will be sent to\n the positive latitude (z) direction in the final frame.\n \"\"\"\n return SkyOffsetFrame(origin=self, rotation=rotation)\n\n def get_constellation(self, short_name=False, constellation_list='iau'):\n \"\"\"\n Determines the constellation(s) of the coordinates this `SkyCoord`\n contains.\n\n Parameters\n ----------\n short_name : bool\n If True, the returned names are the IAU-sanctioned abbreviated\n names. Otherwise, full names for the constellations are used.\n constellation_list : str\n The set of constellations to use. Currently only ``'iau'`` is\n supported, meaning the 88 \"modern\" constellations endorsed by the IAU.\n\n Returns\n -------\n constellation : str or string array\n If this is a scalar coordinate, returns the name of the\n constellation. If it is an array `SkyCoord`, it returns an array of\n names.\n\n Notes\n -----\n To determine which constellation a point on the sky is in, this first\n precesses to B1875, and then uses the Delporte boundaries of the 88\n modern constellations, as tabulated by\n `Roman 1987 <http://cdsarc.u-strasbg.fr/viz-bin/Cat?VI/42>`_.\n\n See Also\n --------\n astropy.coordinates.get_constellation\n \"\"\"\n from .funcs import get_constellation\n\n # because of issue #7028, the conversion to a PrecessedGeocentric\n # system fails in some cases. Work around is to drop the velocities.\n # they are not needed here since only position infromation is used\n extra_frameattrs = {nm: getattr(self, nm)\n for nm in self._extra_frameattr_names}\n novel = SkyCoord(self.realize_frame(self.data.without_differentials()),\n **extra_frameattrs)\n return get_constellation(novel, short_name, constellation_list)\n\n # the simpler version below can be used when gh-issue #7028 is resolved\n # return get_constellation(self, short_name, constellation_list)\n\n # WCS pixel to/from sky conversions\n def to_pixel(self, wcs, origin=0, mode='all'):\n \"\"\"\n Convert this coordinate to pixel coordinates using a `~astropy.wcs.WCS`\n object.\n\n Parameters\n ----------\n wcs : `~astropy.wcs.WCS`\n The WCS to use for convert\n origin : int\n Whether to return 0 or 1-based pixel coordinates.\n mode : 'all' or 'wcs'\n Whether to do the transformation including distortions (``'all'``) or\n only including only the core WCS transformation (``'wcs'``).\n\n Returns\n -------\n xp, yp : `numpy.ndarray`\n The pixel coordinates\n\n See Also\n --------\n astropy.wcs.utils.skycoord_to_pixel : the implementation of this method\n \"\"\"\n from astropy.wcs.utils import skycoord_to_pixel\n return skycoord_to_pixel(self, wcs=wcs, origin=origin, mode=mode)\n\n @classmethod\n def from_pixel(cls, xp, yp, wcs, origin=0, mode='all'):\n \"\"\"\n Create a new `SkyCoord` from pixel coordinates using an\n `~astropy.wcs.WCS` object.\n\n Parameters\n ----------\n xp, yp : float or `numpy.ndarray`\n The coordinates to convert.\n wcs : `~astropy.wcs.WCS`\n The WCS to use for convert\n origin : int\n Whether to return 0 or 1-based pixel coordinates.\n mode : 'all' or 'wcs'\n Whether to do the transformation including distortions (``'all'``) or\n only including only the core WCS transformation (``'wcs'``).\n\n Returns\n -------\n coord : an instance of this class\n A new object with sky coordinates corresponding to the input ``xp``\n and ``yp``.\n\n See Also\n --------\n to_pixel : to do the inverse operation\n astropy.wcs.utils.pixel_to_skycoord : the implementation of this method\n \"\"\"\n from astropy.wcs.utils import pixel_to_skycoord\n return pixel_to_skycoord(xp, yp, wcs=wcs, origin=origin, mode=mode, cls=cls)\n\n def contained_by(self, wcs, image=None, **kwargs):\n \"\"\"\n Determines if the SkyCoord is contained in the given wcs footprint.\n\n Parameters\n ----------\n wcs : `~astropy.wcs.WCS`\n The coordinate to check if it is within the wcs coordinate.\n image : array\n Optional. The image associated with the wcs object that the cooordinate\n is being checked against. If not given the naxis keywords will be used\n to determine if the coordinate falls within the wcs footprint.\n **kwargs :\n Additional arguments to pass to `~astropy.coordinates.SkyCoord.to_pixel`\n\n Returns\n -------\n response : bool\n True means the WCS footprint contains the coordinate, False means it does not.\n \"\"\"\n\n if image is not None:\n ymax, xmax = image.shape\n else:\n xmax, ymax = wcs._naxis\n\n import warnings\n with warnings.catch_warnings():\n # Suppress warnings since they just mean we didn't find the coordinate\n warnings.simplefilter(\"ignore\")\n try:\n x, y = self.to_pixel(wcs, **kwargs)\n except Exception:\n return False\n\n return (x < xmax) & (x > 0) & (y < ymax) & (y > 0)\n\n def radial_velocity_correction(self, kind='barycentric', obstime=None,\n location=None):\n \"\"\"\n Compute the correction required to convert a radial velocity at a given\n time and place on the Earth's Surface to a barycentric or heliocentric\n velocity.\n\n Parameters\n ----------\n kind : str\n The kind of velocity correction. Must be 'barycentric' or\n 'heliocentric'.\n obstime : `~astropy.time.Time` or None, optional\n The time at which to compute the correction. If `None`, the\n ``obstime`` frame attribute on the `SkyCoord` will be used.\n location : `~astropy.coordinates.EarthLocation` or None, optional\n The observer location at which to compute the correction. If\n `None`, the ``location`` frame attribute on the passed-in\n ``obstime`` will be used, and if that is None, the ``location``\n frame attribute on the `SkyCoord` will be used.\n\n Raises\n ------\n ValueError\n If either ``obstime`` or ``location`` are passed in (not ``None``)\n when the frame attribute is already set on this `SkyCoord`.\n TypeError\n If ``obstime`` or ``location`` aren't provided, either as arguments\n or as frame attributes.\n\n Returns\n -------\n vcorr : `~astropy.units.Quantity` with velocity units\n The correction with a positive sign. I.e., *add* this\n to an observed radial velocity to get the barycentric (or\n heliocentric) velocity. If m/s precision or better is needed,\n see the notes below.\n\n Notes\n -----\n The barycentric correction is calculated to higher precision than the\n heliocentric correction and includes additional physics (e.g time dilation).\n Use barycentric corrections if m/s precision is required.\n\n The algorithm here is sufficient to perform corrections at the mm/s level, but\n care is needed in application. The barycentric correction returned uses the optical\n approximation v = z * c. Strictly speaking, the barycentric correction is\n multiplicative and should be applied as::\n\n >>> from astropy.time import Time\n >>> from astropy.coordinates import SkyCoord, EarthLocation\n >>> from astropy.constants import c\n >>> t = Time(56370.5, format='mjd', scale='utc')\n >>> loc = EarthLocation('149d33m00.5s','-30d18m46.385s',236.87*u.m)\n >>> sc = SkyCoord(1*u.deg, 2*u.deg)\n >>> vcorr = sc.radial_velocity_correction(kind='barycentric', obstime=t, location=loc) # doctest: +REMOTE_DATA\n >>> rv = rv + vcorr + rv * vcorr / c # doctest: +SKIP\n\n Also note that this method returns the correction velocity in the so-called\n *optical convention*::\n\n >>> vcorr = zb * c # doctest: +SKIP\n\n where ``zb`` is the barycentric correction redshift as defined in section 3\n of Wright & Eastman (2014). The application formula given above follows from their\n equation (11) under assumption that the radial velocity ``rv`` has also been defined\n using the same optical convention. Note, this can be regarded as a matter of\n velocity definition and does not by itself imply any loss of accuracy, provided\n sufficient care has been taken during interpretation of the results. If you need\n the barycentric correction expressed as the full relativistic velocity (e.g., to provide\n it as the input to another software which performs the application), the\n following recipe can be used::\n\n >>> zb = vcorr / c # doctest: +REMOTE_DATA\n >>> zb_plus_one_squared = (zb + 1) ** 2 # doctest: +REMOTE_DATA\n >>> vcorr_rel = c * (zb_plus_one_squared - 1) / (zb_plus_one_squared + 1) # doctest: +REMOTE_DATA\n\n or alternatively using just equivalencies::\n\n >>> vcorr_rel = vcorr.to(u.Hz, u.doppler_optical(1*u.Hz)).to(vcorr.unit, u.doppler_relativistic(1*u.Hz)) # doctest: +REMOTE_DATA\n\n See also `~astropy.units.equivalencies.doppler_optical`,\n `~astropy.units.equivalencies.doppler_radio`, and\n `~astropy.units.equivalencies.doppler_relativistic` for more information on\n the velocity conventions.\n\n The default is for this method to use the builtin ephemeris for\n computing the sun and earth location. Other ephemerides can be chosen\n by setting the `~astropy.coordinates.solar_system_ephemeris` variable,\n either directly or via ``with`` statement. For example, to use the JPL\n ephemeris, do::\n\n >>> from astropy.coordinates import solar_system_ephemeris\n >>> sc = SkyCoord(1*u.deg, 2*u.deg)\n >>> with solar_system_ephemeris.set('jpl'): # doctest: +REMOTE_DATA\n ... rv += sc.radial_velocity_correction(obstime=t, location=loc) # doctest: +SKIP\n\n \"\"\"\n # has to be here to prevent circular imports\n from .solar_system import get_body_barycentric_posvel\n\n # location validation\n timeloc = getattr(obstime, 'location', None)\n if location is None:\n if self.location is not None:\n location = self.location\n if timeloc is not None:\n raise ValueError('`location` cannot be in both the '\n 'passed-in `obstime` and this `SkyCoord` '\n 'because it is ambiguous which is meant '\n 'for the radial_velocity_correction.')\n elif timeloc is not None:\n location = timeloc\n else:\n raise TypeError('Must provide a `location` to '\n 'radial_velocity_correction, either as a '\n 'SkyCoord frame attribute, as an attribute on '\n 'the passed in `obstime`, or in the method '\n 'call.')\n\n elif self.location is not None or timeloc is not None:\n raise ValueError('Cannot compute radial velocity correction if '\n '`location` argument is passed in and there is '\n 'also a `location` attribute on this SkyCoord or '\n 'the passed-in `obstime`.')\n\n # obstime validation\n coo_at_rv_obstime = self # assume we need no space motion for now\n if obstime is None:\n obstime = self.obstime\n if obstime is None:\n raise TypeError('Must provide an `obstime` to '\n 'radial_velocity_correction, either as a '\n 'SkyCoord frame attribute or in the method '\n 'call.')\n elif self.obstime is not None and self.frame.data.differentials:\n # we do need space motion after all\n coo_at_rv_obstime = self.apply_space_motion(obstime)\n elif self.obstime is None:\n # warn the user if the object has differentials set\n if 's' in self.data.differentials:\n warnings.warn(\n \"SkyCoord has space motion, and therefore the specified \"\n \"position of the SkyCoord may not be the same as \"\n \"the `obstime` for the radial velocity measurement. \"\n \"This may affect the rv correction at the order of km/s\"\n \"for very high proper motions sources. If you wish to \"\n \"apply space motion of the SkyCoord to correct for this\"\n \"the `obstime` attribute of the SkyCoord must be set\",\n AstropyUserWarning\n )\n\n pos_earth, v_earth = get_body_barycentric_posvel('earth', obstime)\n if kind == 'barycentric':\n v_origin_to_earth = v_earth\n elif kind == 'heliocentric':\n v_sun = get_body_barycentric_posvel('sun', obstime)[1]\n v_origin_to_earth = v_earth - v_sun\n else:\n raise ValueError(\"`kind` argument to radial_velocity_correction must \"\n \"be 'barycentric' or 'heliocentric', but got \"\n \"'{}'\".format(kind))\n\n gcrs_p, gcrs_v = location.get_gcrs_posvel(obstime)\n # transforming to GCRS is not the correct thing to do here, since we don't want to\n # include aberration (or light deflection)? Instead, only apply parallax if necessary\n icrs_cart = coo_at_rv_obstime.icrs.cartesian\n icrs_cart_novel = icrs_cart.without_differentials()\n if self.data.__class__ is UnitSphericalRepresentation:\n targcart = icrs_cart_novel\n else:\n # skycoord has distances so apply parallax\n obs_icrs_cart = pos_earth + gcrs_p\n targcart = icrs_cart_novel - obs_icrs_cart\n targcart /= targcart.norm()\n\n if kind == 'barycentric':\n beta_obs = (v_origin_to_earth + gcrs_v) / speed_of_light\n gamma_obs = 1 / np.sqrt(1 - beta_obs.norm()**2)\n gr = location.gravitational_redshift(obstime)\n # barycentric redshift according to eq 28 in Wright & Eastmann (2014),\n # neglecting Shapiro delay and effects of the star's own motion\n zb = gamma_obs * (1 + beta_obs.dot(targcart)) / (1 + gr/speed_of_light)\n # try and get terms corresponding to stellar motion.\n if icrs_cart.differentials:\n try:\n ro = self.icrs.cartesian\n beta_star = ro.differentials['s'].to_cartesian() / speed_of_light\n # ICRS unit vector at coordinate epoch\n ro = ro.without_differentials()\n ro /= ro.norm()\n zb *= (1 + beta_star.dot(ro)) / (1 + beta_star.dot(targcart))\n except u.UnitConversionError:\n warnings.warn(\"SkyCoord contains some velocity information, but not enough to \"\n \"calculate the full space motion of the source, and so this has \"\n \"been ignored for the purposes of calculating the radial velocity \"\n \"correction. This can lead to errors on the order of metres/second.\",\n AstropyUserWarning)\n\n zb = zb - 1\n return zb * speed_of_light\n else:\n # do a simpler correction ignoring time dilation and gravitational redshift\n # this is adequate since Heliocentric corrections shouldn't be used if\n # cm/s precision is required.\n return targcart.dot(v_origin_to_earth + gcrs_v)\n\n # Table interactions\n @classmethod\n def guess_from_table(cls, table, **coord_kwargs):\n r\"\"\"\n A convenience method to create and return a new `SkyCoord` from the data\n in an astropy Table.\n\n This method matches table columns that start with the case-insensitive\n names of the the components of the requested frames, if they are also\n followed by a non-alphanumeric character. It will also match columns\n that *end* with the component name if a non-alphanumeric character is\n *before* it.\n\n For example, the first rule means columns with names like\n ``'RA[J2000]'`` or ``'ra'`` will be interpreted as ``ra`` attributes for\n `~astropy.coordinates.ICRS` frames, but ``'RAJ2000'`` or ``'radius'``\n are *not*. Similarly, the second rule applied to the\n `~astropy.coordinates.Galactic` frame means that a column named\n ``'gal_l'`` will be used as the the ``l`` component, but ``gall`` or\n ``'fill'`` will not.\n\n The definition of alphanumeric here is based on Unicode's definition\n of alphanumeric, except without ``_`` (which is normally considered\n alphanumeric). So for ASCII, this means the non-alphanumeric characters\n are ``<space>_!\"#$%&'()*+,-./\\:;<=>?@[]^`{|}~``).\n\n Parameters\n ----------\n table : astropy.Table\n The table to load data from.\n coord_kwargs\n Any additional keyword arguments are passed directly to this class's\n constructor.\n\n Returns\n -------\n newsc : same as this class\n The new `SkyCoord` (or subclass) object.\n \"\"\"\n _frame_cls, _frame_kwargs = _get_frame_without_data([], coord_kwargs)\n frame = _frame_cls(**_frame_kwargs)\n coord_kwargs['frame'] = coord_kwargs.get('frame', frame)\n\n comp_kwargs = {}\n for comp_name in frame.representation_component_names:\n # this matches things like 'ra[...]'' but *not* 'rad'.\n # note that the \"_\" must be in there explicitly, because\n # \"alphanumeric\" usually includes underscores.\n starts_with_comp = comp_name + r'(\\W|\\b|_)'\n # this part matches stuff like 'center_ra', but *not*\n # 'aura'\n ends_with_comp = r'.*(\\W|\\b|_)' + comp_name + r'\\b'\n # the final regex ORs together the two patterns\n rex = re.compile('(' + starts_with_comp + ')|(' + ends_with_comp + ')',\n re.IGNORECASE | re.UNICODE)\n\n for col_name in table.colnames:\n if rex.match(col_name):\n if comp_name in comp_kwargs:\n oldname = comp_kwargs[comp_name].name\n msg = ('Found at least two matches for component \"{0}\"'\n ': \"{1}\" and \"{2}\". Cannot continue with this '\n 'ambiguity.')\n raise ValueError(msg.format(comp_name, oldname, col_name))\n comp_kwargs[comp_name] = table[col_name]\n\n for k, v in comp_kwargs.items():\n if k in coord_kwargs:\n raise ValueError('Found column \"{}\" in table, but it was '\n 'already provided as \"{}\" keyword to '\n 'guess_from_table function.'.format(v.name, k))\n else:\n coord_kwargs[k] = v\n\n return cls(**coord_kwargs)\n\n # Name resolve\n @classmethod\n def from_name(cls, name, frame='icrs', parse=False, cache=True):\n \"\"\"\n Given a name, query the CDS name resolver to attempt to retrieve\n coordinate information for that object. The search database, sesame\n url, and query timeout can be set through configuration items in\n ``astropy.coordinates.name_resolve`` -- see docstring for\n `~astropy.coordinates.get_icrs_coordinates` for more\n information.\n\n Parameters\n ----------\n name : str\n The name of the object to get coordinates for, e.g. ``'M42'``.\n frame : str or `BaseCoordinateFrame` class or instance\n The frame to transform the object to.\n parse: bool\n Whether to attempt extracting the coordinates from the name by\n parsing with a regex. For objects catalog names that have\n J-coordinates embedded in their names, e.g.,\n 'CRTS SSS100805 J194428-420209', this may be much faster than a\n Sesame query for the same object name. The coordinates extracted\n in this way may differ from the database coordinates by a few\n deci-arcseconds, so only use this option if you do not need\n sub-arcsecond accuracy for coordinates.\n cache : bool, optional\n Determines whether to cache the results or not. To update or\n overwrite an existing value, pass ``cache='update'``.\n\n Returns\n -------\n coord : SkyCoord\n Instance of the SkyCoord class.\n \"\"\"\n\n from .name_resolve import get_icrs_coordinates\n\n icrs_coord = get_icrs_coordinates(name, parse, cache=cache)\n icrs_sky_coord = cls(icrs_coord)\n if frame in ('icrs', icrs_coord.__class__):\n return icrs_sky_coord\n else:\n return icrs_sky_coord.transform_to(frame)\n"
] | [
[
"numpy.zeros",
"numpy.logical_not",
"numpy.all",
"numpy.array",
"numpy.isscalar"
]
] |
LauraOlivera/gammapy | [
"8aadf0ec524bcf51d0ac5655a04507d5d449e7ed"
] | [
"gammapy/estimators/tests/test_flux_point.py"
] | [
"# Licensed under a 3-clause BSD style license - see LICENSE.rst\nimport pytest\nimport numpy as np\nfrom numpy.testing import assert_allclose\nimport astropy.units as u\nfrom astropy.table import Table\nfrom gammapy.catalog.fermi import SourceCatalog3FGL\nfrom gammapy.estimators import FluxPoints\nfrom gammapy.modeling.models import SpectralModel\nfrom gammapy.utils.scripts import make_path\nfrom gammapy.utils.testing import (\n assert_quantity_allclose,\n mpl_plot_check,\n requires_data,\n requires_dependency,\n)\n\nFLUX_POINTS_FILES = [\n \"diff_flux_points.ecsv\",\n \"diff_flux_points.fits\",\n \"flux_points.ecsv\",\n \"flux_points.fits\",\n]\n\n\nclass LWTestModel(SpectralModel):\n @staticmethod\n def evaluate(x):\n return 1e4 * np.exp(-6 * x)\n\n def integral(self, xmin, xmax, **kwargs):\n return -1.0 / 6 * 1e4 * (np.exp(-6 * xmax) - np.exp(-6 * xmin))\n\n def inverse(self, y):\n return -1.0 / 6 * np.log(y * 1e-4)\n\n\nclass XSqrTestModel(SpectralModel):\n @staticmethod\n def evaluate(x):\n return x ** 2\n\n def integral(self, xmin, xmax, **kwargs):\n return 1.0 / 3 * (xmax ** 3 - xmin ** 2)\n\n def inverse(self, y):\n return np.sqrt(y)\n\n\nclass ExpTestModel(SpectralModel):\n @staticmethod\n def evaluate(x):\n return np.exp(x * u.Unit(\"1 / TeV\"))\n\n def integral(self, xmin, xmax, **kwargs):\n return np.exp(xmax * u.Unit(\"1 / TeV\")) - np.exp(xmin * u.Unit(\"1 / TeV\"))\n\n def inverse(self, y):\n return np.log(y * u.TeV) * u.TeV\n\n\ndef test_energy_ref_lafferty():\n \"\"\"\n Tests Lafferty & Wyatt x-point method.\n\n Using input function g(x) = 10^4 exp(-6x) against\n check values from paper Lafferty & Wyatt. Nucl. Instr. and Meth. in Phys.\n Res. A 355 (1995) 541-547, p. 542 Table 1\n \"\"\"\n # These are the results from the paper\n desired = np.array([0.048, 0.190, 0.428, 0.762])\n\n model = LWTestModel()\n energy_min = np.array([0.0, 0.1, 0.3, 0.6])\n energy_max = np.array([0.1, 0.3, 0.6, 1.0])\n actual = FluxPoints._energy_ref_lafferty(model, energy_min, energy_max)\n assert_allclose(actual, desired, atol=1e-3)\n\n\[email protected]\ndef test_dnde_from_flux():\n \"\"\"Tests y-value normalization adjustment method.\n \"\"\"\n table = Table()\n table[\"e_min\"] = np.array([10, 20, 30, 40])\n table[\"e_max\"] = np.array([20, 30, 40, 50])\n table[\"flux\"] = np.array([42, 52, 62, 72]) # 'True' integral flux in this test bin\n\n # Get values\n model = XSqrTestModel()\n table[\"e_ref\"] = FluxPoints._energy_ref_lafferty(model, table[\"e_min\"], table[\"e_max\"])\n dnde = FluxPoints.from_table(table, reference_model=model)\n\n # Set up test case comparison\n dnde_model = model(table[\"e_ref\"])\n\n # Test comparison result\n desired = model.integral(table[\"e_min\"], table[\"e_max\"])\n # Test output result\n actual = table[\"flux\"] * (dnde_model / dnde)\n # Compare\n assert_allclose(actual, desired, rtol=1e-6)\n\n\[email protected]\[email protected](\"method\", [\"table\", \"lafferty\", \"log_center\"])\ndef test_compute_flux_points_dnde_exp(method):\n \"\"\"\n Tests against analytical result or result from a powerlaw.\n \"\"\"\n model = ExpTestModel()\n\n energy_min = [1.0, 10.0] * u.TeV\n energy_max = [10.0, 100.0] * u.TeV\n\n table = Table()\n table.meta[\"SED_TYPE\"] = \"flux\"\n table[\"e_min\"] = energy_min\n table[\"e_max\"] = energy_max\n\n flux = model.integral(energy_min, energy_max)\n table[\"flux\"] = flux\n\n if method == \"log_center\":\n energy_ref = np.sqrt(energy_min * energy_max)\n elif method == \"table\":\n energy_ref = [2.0, 20.0] * u.TeV\n elif method == \"lafferty\":\n energy_ref = FluxPoints._energy_ref_lafferty(model, energy_min, energy_max)\n\n table[\"e_ref\"] = energy_ref\n\n result = FluxPoints.from_table(table, reference_model=model)\n\n # Test energy\n actual = result.energy_ref\n assert_quantity_allclose(actual, energy_ref, rtol=1e-8)\n\n # Test flux\n actual = result.dnde\n desired = model(energy_ref)\n assert_quantity_allclose(actual, desired, rtol=1e-8)\n\n\n@requires_data()\ndef test_fermi_to_dnde():\n from gammapy.catalog import SourceCatalog4FGL\n\n catalog_4fgl = SourceCatalog4FGL(\"$GAMMAPY_DATA/catalogs/fermi/gll_psc_v20.fit.gz\")\n src = catalog_4fgl[\"FGES J1553.8-5325\"]\n fp = src.flux_points\n\n assert_allclose(\n fp.dnde.quantity[1, 0, 0],\n 4.567393e-10 * u.Unit(\"cm-2 s-1 MeV-1\"),\n rtol=1e-5,\n )\n\n\[email protected](params=FLUX_POINTS_FILES, scope=\"session\")\ndef flux_points(request):\n path = \"$GAMMAPY_DATA/tests/spectrum/flux_points/\" + request.param\n return FluxPoints.read(path)\n\n\[email protected](scope=\"session\")\ndef flux_points_likelihood():\n path = \"$GAMMAPY_DATA/tests/spectrum/flux_points/binlike.fits\"\n return FluxPoints.read(path)\n\n\n@requires_data()\nclass TestFluxPoints:\n def test_info(self, flux_points):\n info = str(flux_points)\n assert \"geom\" in info\n assert \"axes\" in info\n assert \"ref. model\" in info\n assert \"quantities\" in info\n\n def test_energy_ref(self, flux_points):\n actual = flux_points.energy_ref\n desired = np.sqrt(flux_points.energy_min * flux_points.energy_max)\n assert_quantity_allclose(actual, desired)\n\n def test_energy_min(self, flux_points):\n actual = flux_points.energy_min\n desired = 299530.97 * u.MeV\n assert_quantity_allclose(actual.sum(), desired)\n\n def test_energy_max(self, flux_points):\n actual = flux_points.energy_max\n desired = 399430.975 * u.MeV\n assert_quantity_allclose(actual.sum(), desired)\n\n def test_write_fits(self, tmp_path, flux_points):\n flux_points.write(tmp_path / \"tmp.fits\", sed_type=flux_points.sed_type_init)\n actual = FluxPoints.read(tmp_path / \"tmp.fits\")\n assert str(flux_points) == str(actual)\n\n def test_write_ecsv(self, tmp_path, flux_points):\n flux_points.write(tmp_path / \"flux_points.ecsv\", sed_type=flux_points.sed_type_init)\n actual = FluxPoints.read(tmp_path / \"flux_points.ecsv\")\n assert str(flux_points) == str(actual)\n\n def test_quantity_access(self, flux_points_likelihood):\n assert flux_points_likelihood.sqrt_ts\n assert flux_points_likelihood.ts\n assert flux_points_likelihood.stat\n assert_allclose(flux_points_likelihood.n_sigma_ul, 2)\n assert flux_points_likelihood.sed_type_init == \"likelihood\"\n\n @requires_dependency(\"matplotlib\")\n def test_plot(self, flux_points):\n with mpl_plot_check():\n flux_points.plot()\n\n @requires_dependency(\"matplotlib\")\n def test_plot_likelihood(self, flux_points_likelihood):\n with mpl_plot_check():\n flux_points_likelihood.plot_ts_profiles()\n\n @requires_dependency(\"matplotlib\")\n def test_plot_likelihood_error(self, flux_points_likelihood):\n del flux_points_likelihood._data[\"stat_scan\"]\n with pytest.raises(AttributeError):\n flux_points_likelihood.plot_ts_profiles()\n\n\n@requires_data()\ndef test_compute_flux_points_dnde_fermi():\n \"\"\"\n Test compute_flux_points_dnde on fermi source.\n \"\"\"\n fermi_3fgl = SourceCatalog3FGL()\n source = fermi_3fgl[\"3FGL J0835.3-4510\"]\n flux_points = source.flux_points\n table = source.flux_points_table\n\n for column in [\"e2dnde\", \"e2dnde_errn\", \"e2dnde_errp\", \"e2dnde_ul\"]:\n actual = table[column].quantity\n desired = getattr(flux_points, column).quantity.squeeze()\n assert_quantity_allclose(actual[:-1], desired[:-1], rtol=0.05)\n\n@requires_data()\n@requires_dependency(\"matplotlib\")\ndef test_plot_fp_no_ul():\n path = make_path(\"$GAMMAPY_DATA/tests/spectrum/flux_points/diff_flux_points.fits\")\n table = Table.read(path)\n table.remove_column('dnde_ul')\n fp = FluxPoints.from_table(table, sed_type='dnde')\n\n with mpl_plot_check():\n fp.plot()\n\n"
] | [
[
"numpy.sqrt",
"numpy.exp",
"numpy.log",
"numpy.testing.assert_allclose",
"numpy.array"
]
] |
MaxxWilson/ASE389Project | [
"13c3c72887e27fbed2eef63c1e27b4a185036a39"
] | [
"pnc/draco3_lb_pnc/draco3_lb_controller.py"
] | [
"import numpy as np\n\nfrom util import util\nfrom config.draco3_lb_config import PnCConfig, WBCConfig\nfrom pnc.wbc.ihwbc.ihwbc import IHWBC\nfrom pnc.wbc.ihwbc.joint_integrator import JointIntegrator\n\n\nclass Draco3LBController(object):\n def __init__(self, tci_container, robot):\n self._tci_container = tci_container\n self._robot = robot\n\n # Initialize WBC\n l_jp_idx, l_jd_idx, r_jp_idx, r_jd_idx = self._robot.get_q_dot_idx(\n ['l_knee_fe_jp', 'l_knee_fe_jd', 'r_knee_fe_jp', 'r_knee_fe_jd'])\n act_list = [False] * robot.n_floating + [True] * robot.n_a\n act_list[l_jd_idx] = False\n act_list[r_jd_idx] = False\n\n n_q_dot = len(act_list)\n n_active = np.count_nonzero(np.array(act_list))\n n_passive = n_q_dot - n_active - 6\n\n self._sa = np.zeros((n_active, n_q_dot))\n self._sv = np.zeros((n_passive, n_q_dot))\n j, k = 0, 0\n for i in range(n_q_dot):\n if i >= 6:\n if act_list[i]:\n self._sa[j, i] = 1.\n j += 1\n else:\n self._sv[k, i] = 1.\n k += 1\n self._sf = np.zeros((6, n_q_dot))\n self._sf[0:6, 0:6] = np.eye(6)\n\n self._ihwbc = IHWBC(self._sf, self._sa, self._sv, PnCConfig.SAVE_DATA)\n if WBCConfig.B_TRQ_LIMIT:\n self._ihwbc.trq_limit = np.dot(self._sa[:, 6:],\n self._robot.joint_trq_limit)\n self._ihwbc.lambda_q_ddot = WBCConfig.LAMBDA_Q_DDOT\n self._ihwbc.lambda_rf = WBCConfig.LAMBDA_RF\n\n # Initialize Joint Integrator\n self._joint_integrator = JointIntegrator(robot.n_a,\n PnCConfig.CONTROLLER_DT)\n self._joint_integrator.pos_cutoff_freq = WBCConfig.POS_CUTOFF_FREQ\n self._joint_integrator.vel_cutoff_freq = WBCConfig.VEL_CUTOFF_FREQ\n self._joint_integrator.max_pos_err = WBCConfig.MAX_POS_ERR\n self._joint_integrator.joint_pos_limit = self._robot.joint_pos_limit\n self._joint_integrator.joint_vel_limit = self._robot.joint_vel_limit\n\n self._b_first_visit = True\n\n def get_command(self):\n if self._b_first_visit:\n self.first_visit()\n\n # Dynamics properties\n mass_matrix = self._robot.get_mass_matrix()\n mass_matrix_inv = np.linalg.inv(mass_matrix)\n coriolis = self._robot.get_coriolis()\n gravity = self._robot.get_gravity()\n self._ihwbc.update_setting(mass_matrix, mass_matrix_inv, coriolis,\n gravity)\n # Task, Contact, and Internal Constraint Setup\n w_hierarchy_list = []\n for task in self._tci_container.task_list:\n task.update_jacobian()\n task.update_cmd()\n w_hierarchy_list.append(task.w_hierarchy)\n self._ihwbc.w_hierarchy = np.array(w_hierarchy_list)\n for contact in self._tci_container.contact_list:\n contact.update_contact()\n for internal_constraint in self._tci_container.internal_constraint_list:\n internal_constraint.update_internal_constraint()\n # WBC commands\n joint_trq_cmd, joint_acc_cmd, rf_cmd = self._ihwbc.solve(\n self._tci_container.task_list, self._tci_container.contact_list,\n self._tci_container.internal_constraint_list)\n joint_trq_cmd = np.dot(self._sa[:, 6:].transpose(), joint_trq_cmd)\n joint_acc_cmd = np.dot(self._sa[:, 6:].transpose(), joint_acc_cmd)\n # Double integration\n joint_vel_cmd, joint_pos_cmd = self._joint_integrator.integrate(\n joint_acc_cmd, self._robot.joint_velocities,\n self._robot.joint_positions)\n\n command = self._robot.create_cmd_ordered_dict(joint_pos_cmd,\n joint_vel_cmd,\n joint_trq_cmd)\n return command\n\n def first_visit(self):\n joint_pos_ini = self._robot.joint_positions\n self._joint_integrator.initialize_states(np.zeros(self._robot.n_a),\n joint_pos_ini)\n\n self._b_first_visit = False\n"
] | [
[
"numpy.eye",
"numpy.zeros",
"numpy.linalg.inv",
"numpy.array",
"numpy.dot"
]
] |
Barry-lab/Publication_TanniDeCothiBarry2021 | [
"425bc0bd9a74b837d912820e9ea1539a111fcb1f"
] | [
"visual_change_analysis/main.py"
] | [
"import numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\nfrom scipy.stats import pearsonr\nfrom bin_data import bin_data\n\n# import pixel data \nright_z_pixel_change = np.load(\"right_z_pixel_change.npy\")\nleft_z_pixel_change = np.load(\"left_z_pixel_change.npy\")\nfront_z_pixel_change = np.load(\"front_z_pixel_change.npy\")\n\n# average pixel change across front, left & right fovs\npixel_change = np.vstack((left_z_pixel_change, front_z_pixel_change, right_z_pixel_change)).mean(axis=0)\n\n# import rate change data \ndat = pd.read_pickle(\"df_population_vector_change.p\")\n\n# Clean the data (sequential data points are 1cm apart along trajectory)\ndat = dat[dat.environment == 'D']\ndf = dat.filter(['animal', 'x_coord', 'y_coord', 'direction', 'timestamp'], axis=1)\ndat = dat[~df.isnull().any(axis=1)]\ngood_pixel_ids = np.array(np.diff(dat.x_coord)**2 + np.diff(dat.y_coord)**2 < 1.01, dtype=bool)\npixel_change = pixel_change[good_pixel_ids]\ngood_rate_ids = np.append(False, good_pixel_ids)\nturning_rate = np.abs(np.diff(dat['direction'])) % 360\nturning_rate = turning_rate[good_pixel_ids]\ndat = dat[good_rate_ids]\n\n# z-score data\ndat['rate change\\n(euclidean)'] = (dat['rate change\\n(euclidean)'] - np.mean(dat['rate change\\n(euclidean)']))/np.std(dat['rate change\\n(euclidean)'])\npixel_change = (pixel_change - np.mean(pixel_change))/np.std(pixel_change)\n\n# Plot Occupancy\noccupancy = bin_data([dat.x_coord, dat.y_coord], bin_size = 4, limits = [(0, 350), (0, 250)])\nplt.imshow(occupancy.T, origin='upper', cmap=plt.get_cmap('jet'))\nplt.title('Occupancy')\nplt.show()\n\n# Plot pixel change across space\npixel_change_map = bin_data([dat.x_coord, dat.y_coord], bin_size = 4, limits = [(0, 350), (0, 250)], var_to_bin = pixel_change) / occupancy\nplt.imshow(pixel_change_map.T, origin='upper', cmap=plt.get_cmap('jet'))\nplt.axis('off')\nplt.clim([-1.5,1.5])\nplt.title('Pixel Change Map')\nplt.show()\n\n# Plot firing rate change across space\nrate_change_map = bin_data([dat.x_coord, dat.y_coord], bin_size = 4, limits = [(0, 350), (0, 250)], var_to_bin = dat['rate change\\n(euclidean)']) / occupancy\nplt.imshow(rate_change_map.T, origin='upper', cmap=plt.get_cmap('jet'))\nplt.axis('off')\nplt.clim([-1.5,1.5])\nplt.title('Rate Change Map')\nplt.show()\n\ncorr, _ = pearsonr(pixel_change, dat['rate change\\n(euclidean)'])\nprint('Rate Change vs Pixel Change Pearson r = %.3f' % corr)\n\n# Filter bits of trajectory by head direction\nnorth_ids = (np.degrees(dat.direction) % 360 >= 315) | (np.degrees(dat.direction) % 360 < 45)\nnorth_occupancy = bin_data([dat.x_coord[north_ids], dat.y_coord[north_ids]], bin_size = 4, limits = [(0, 350), (0, 250)])\nsouth_ids = (np.degrees(dat.direction) % 360 >= 135) & (np.degrees(dat.direction) % 360 < 225)\nsouth_occupancy = bin_data([dat.x_coord[south_ids], dat.y_coord[south_ids]], bin_size = 4, limits = [(0, 350), (0, 250)])\neast_ids = (np.degrees(dat.direction) % 360 >= 45) & (np.degrees(dat.direction) % 360 < 135)\neast_occupancy = bin_data([dat.x_coord[east_ids], dat.y_coord[east_ids]], bin_size = 4, limits = [(0, 350), (0, 250)])\nwest_ids = (np.degrees(dat.direction) % 360 >= 225) & (np.degrees(dat.direction) % 360 < 315)\nwest_occupancy = bin_data([dat.x_coord[west_ids], dat.y_coord[west_ids]], bin_size = 4, limits = [(0, 350), (0, 250)])\n\ncmap = plt.get_cmap('jet')\ncmap.set_bad('w',1.)\n\n# Calculate pixel and rate change maps by heading direction\nnorth_pix_map = bin_data([dat.x_coord[north_ids], dat.y_coord[north_ids]], bin_size = 4, limits = [(0, 350), (0, 250)], var_to_bin = pixel_change[north_ids]) / north_occupancy\nsouth_pix_map = bin_data([dat.x_coord[south_ids], dat.y_coord[south_ids]], bin_size = 4, limits = [(0, 350), (0, 250)], var_to_bin = pixel_change[south_ids]) / south_occupancy\neast_pix_map = bin_data([dat.x_coord[east_ids], dat.y_coord[east_ids]], bin_size = 4, limits = [(0, 350), (0, 250)], var_to_bin = pixel_change[east_ids]) / east_occupancy\nwest_pix_map = bin_data([dat.x_coord[west_ids], dat.y_coord[west_ids]], bin_size = 4, limits = [(0, 350), (0, 250)], var_to_bin = pixel_change[west_ids]) / west_occupancy\nnorth_rat_map = bin_data([dat.x_coord[north_ids], dat.y_coord[north_ids]], bin_size = 4, limits = [(0, 350), (0, 250)], var_to_bin = dat['rate change\\n(euclidean)'][north_ids]) / north_occupancy\nsouth_rat_map = bin_data([dat.x_coord[south_ids], dat.y_coord[south_ids]], bin_size = 4, limits = [(0, 350), (0, 250)], var_to_bin = dat['rate change\\n(euclidean)'][south_ids]) / south_occupancy\neast_rat_map = bin_data([dat.x_coord[east_ids], dat.y_coord[east_ids]], bin_size = 4, limits = [(0, 350), (0, 250)], var_to_bin = dat['rate change\\n(euclidean)'][east_ids]) / east_occupancy\nwest_rat_map = bin_data([dat.x_coord[west_ids], dat.y_coord[west_ids]], bin_size = 4, limits = [(0, 350), (0, 250)], var_to_bin = dat['rate change\\n(euclidean)'][west_ids]) / west_occupancy\n\nc_lo = -1.5\nc_hi = 1.5\n\n# Plot change maps filtered by direction\nplt.subplot(3,3,2)\nplt.title('Unfolded Pixel Change Map')\nplt.imshow(west_pix_map.T, origin='upper', cmap=cmap)\nplt.clim([c_lo,c_hi])\nplt.axis('off')\nplt.subplot(3,3,4)\nplt.imshow(south_pix_map.T, origin='upper', cmap=cmap)\nplt.clim([c_lo,c_hi])\nplt.axis('off')\nplt.subplot(3,3,5)\nplt.imshow(pixel_change_map.T, origin='upper', cmap=cmap)\nplt.clim([c_lo,c_hi])\nplt.axis('off')\nplt.subplot(3,3,6)\nplt.imshow(north_pix_map.T, origin='upper', cmap=cmap)\nplt.clim([c_lo,c_hi])\nplt.axis('off')\nplt.subplot(3,3,8)\nplt.imshow(east_pix_map.T, origin='upper', cmap=cmap)\nplt.clim([c_lo,c_hi])\nplt.axis('off')\nplt.show()\n\nplt.subplot(3,3,2)\nplt.title('Unfolded Rate Change Map')\nplt.imshow(west_rat_map.T, origin='upper', cmap=cmap)\nplt.clim([c_lo,c_hi])\nplt.axis('off')\nplt.subplot(3,3,4)\nplt.imshow(south_rat_map.T, origin='upper', cmap=cmap)\nplt.clim([c_lo,c_hi])\nplt.axis('off')\nplt.subplot(3,3,5)\nplt.imshow(rate_change_map.T, origin='upper', cmap=cmap)\nplt.clim([c_lo,c_hi])\nplt.axis('off')\nplt.subplot(3,3,6)\nplt.imshow(north_rat_map.T, origin='upper', cmap=cmap)\nplt.clim([c_lo,c_hi])\nplt.axis('off')\nplt.subplot(3,3,8)\nplt.imshow(east_rat_map.T, origin='upper', cmap=cmap)\nplt.clim([c_lo,c_hi])\nplt.axis('off')\nplt.show()"
] | [
[
"numpy.vstack",
"numpy.load",
"pandas.read_pickle",
"numpy.degrees",
"scipy.stats.pearsonr",
"numpy.append",
"numpy.diff",
"matplotlib.pyplot.axis",
"matplotlib.pyplot.title",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.show",
"matplotlib.pyplot.clim",
"matplotlib.pyplot.get_cmap",
"matplotlib.pyplot.imshow",
"numpy.std",
"numpy.mean"
]
] |
alik-git/mbrl-lib | [
"b364f8e64ca71ebd18147fe8cdbd3068b74e1f1e"
] | [
"mbrl/examples/main.py"
] | [
"# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\nfrom typing import Iterable\nimport hydra\nimport numpy as np\nimport omegaconf\nimport torch\n\nimport mbrl.algorithms.mbpo as mbpo\nimport mbrl.algorithms.pets as pets\nimport mbrl.algorithms.planet as planet\nimport mbrl.algorithms.dreamer as dreamer #added April 2022 for project\nimport mbrl.util.env\n\nimport pandas as pd\nfrom collections import Iterable\n\nimport wandb\n\ndef flatten_config(cfg, curr_nested_key):\n \"\"\"The nested config file provided by Hydra cannot be parsed by wandb. This recursive function flattens the config file, separating the nested keys and their parents via an underscore. Allows for easier configuration using wandb.\n\n Args:\n cfg (Hydra config): The nested config file used by Hydra.\n curr_nested_key (str): The current parent key (used for recursive calls).\n\n Returns:\n (dict): A flatt configuration dictionary.\n \"\"\" \n \n flat_cfg = {}\n\n for curr_key in cfg.keys():\n\n # deal with missing values\n try:\n curr_item = cfg[curr_key]\n except Exception as e:\n curr_item = 'NA'\n\n # deal with lists\n if type(curr_item) == list or type(curr_item) == omegaconf.listconfig.ListConfig:\n for nested_idx, nested_item in enumerate(curr_item):\n list_nested_key = f\"{curr_nested_key}_{curr_key}_{nested_idx}\"\n flat_cfg[list_nested_key] = nested_item\n \n # check if item is also a config\n # recurse\n elif isinstance(curr_item, Iterable) and type(curr_item) != str:\n flat_cfg.update(flatten_config(curr_item, f\"{curr_nested_key}_{curr_key}\"))\n\n # otherwise just add to return dict\n else:\n flat_cfg[f\"{curr_nested_key}_{curr_key}\"] = curr_item\n\n return flat_cfg\n\[email protected](config_path=\"conf\", config_name=\"main\")\ndef run(cfg: omegaconf.DictConfig):\n env, term_fn, reward_fn = mbrl.util.env.EnvHandler.make_env(cfg)\n \n for config_item in cfg:\n wandb.config[config_item] = cfg[config_item]\n \n flat_cfg = flatten_config(cfg, \"\")\n for config_item in flat_cfg:\n wandb.config[config_item] = flat_cfg[config_item]\n \n np.random.seed(cfg.seed)\n torch.manual_seed(cfg.seed)\n if cfg.algorithm.name == \"pets\":\n return pets.train(env, term_fn, reward_fn, cfg)\n if cfg.algorithm.name == \"mbpo\":\n test_env, *_ = mbrl.util.env.EnvHandler.make_env(cfg)\n return mbpo.train(env, test_env, term_fn, cfg)\n if cfg.algorithm.name == \"planet\":\n return planet.train(env, cfg)\n if cfg.algorithm.name == \"dreamer\": #added for project\n return dreamer.train(env, cfg)\n\n\nif __name__ == \"__main__\":\n wandb.init(project=\"MBRL_Duckyt\", entity=\"mbrl_ducky\", monitor_gym=True)\n run()\n"
] | [
[
"torch.manual_seed",
"numpy.random.seed"
]
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.