repo_name
stringlengths
6
130
hexsha
sequence
file_path
sequence
code
sequence
apis
sequence
possible_versions
list
ebbingcasa/cs224u
[ "d491b93ecb12a523bfbeb84230a5a3510493c448" ]
[ "test/test_utils.py" ]
[ "import numpy as np\nimport os\nimport pytest\nimport random\nimport utils\n\n__author__ = \"Christopher Potts\"\n__version__ = \"CS224u, Stanford, Spring 2021\"\n\n\nutils.fix_random_seeds()\n\n\[email protected](\"arg, expected\", [\n [\n np.array([0.0, 0.25, 0.75]),\n np.array([0.22721977, 0.29175596, 0.48102426])\n ]\n])\ndef test_softmax(arg, expected):\n result = utils.softmax(arg).round(8)\n expected = expected.round(8)\n assert np.array_equal(result, expected)\n\n\[email protected](\"arg, expected\", [\n [-1, 0],\n [np.array([-1.0, 1.0]), np.array([0.0, 0.0])]\n])\ndef test_d_tanh(arg, expected):\n assert np.array_equal(utils.d_tanh(arg), expected)\n\n\ndef test_randvec():\n x = utils.randvec(10)\n assert len(x) == 10\n\n\ndef test_randmatrix():\n X = utils.randmatrix(10, 20)\n assert X.shape == (10, 20)\n\n\ndef test_safe_macro_f1():\n y = [1, 1, 2, 2, 1]\n y_pred = [1, 2, 2, 1, 1]\n utils.safe_macro_f1(y, y_pred)\n\[email protected](\"arg, expected\", [\n [\n np.array([[1.0, 0.0], [0.0, 1.0]]),\n np.array([[0.0, 0.0], [0.0, 0.0]])\n ]\n])\ndef test_log_of_array_ignoring_zeros(arg, expected):\n result = utils.log_of_array_ignoring_zeros(arg)\n return np.array_equal(result, expected)\n\n\ndef test_glove2dict():\n src_filename = os.path.join(\"data\", \"glove.6B\", \"glove.6B.50d.txt\")\n data = utils.glove2dict(src_filename)\n assert len(data) == 400000\n\[email protected](\"X, n_words, mincount, expected\", [\n [\n [[\"a\", \"b\", \"c\"], [\"b\", \"c\", \"d\"]],\n None,\n 1,\n [\"$UNK\", \"a\", \"b\", \"c\", \"d\"]\n ],\n [\n [[\"a\", \"b\", \"c\"], [\"b\", \"c\", \"d\"]],\n 2,\n 1,\n [\"$UNK\", \"b\", \"c\"]\n ],\n [\n [],\n 2,\n 1,\n [\"$UNK\"]\n ],\n [\n [[\"a\", \"b\", \"b\"], [\"b\", \"c\", \"a\"]],\n None,\n 3,\n [\"$UNK\", \"b\"]\n ],\n [\n [[\"b\", \"b\", \"b\"], [\"b\", \"a\", \"a\", \"c\"]],\n 2,\n 3,\n [\"$UNK\", \"b\"]\n ],\n])\ndef test_get_vocab(X, n_words, mincount, expected):\n result = utils.get_vocab(X, n_words=n_words, mincount=mincount)\n assert result == expected\n\n\[email protected](\"lookup, vocab, required_tokens, expected_shape\", [\n [\n {\"a\": [1,2]}, [\"a\", \"b\"], [\"$UNK\"], (3,2)\n ],\n [\n {\"a\": [1,2], \"b\": [3,4]}, [\"b\"], [\"$UNK\"], (2,2)\n ]\n])\ndef test_create_pretrained_embedding(lookup, vocab, required_tokens, expected_shape):\n result, new_vocab = utils.create_pretrained_embedding(lookup, vocab, required_tokens)\n assert result.shape == expected_shape\n assert \"$UNK\" in new_vocab\n new_vocab.remove(\"$UNK\")\n assert vocab == new_vocab\n\n\[email protected](\"set_value\", [True, False])\ndef test_fix_random_seeds_system(set_value):\n params = dict(\n seed=42,\n set_system=set_value,\n set_tensorflow=False,\n set_torch=False,\n set_torch_cudnn=False)\n utils.fix_random_seeds(**params)\n x = np.random.random()\n utils.fix_random_seeds(**params)\n y = np.random.random()\n assert (x == y) == set_value\n\n\[email protected](\"set_value\", [True, False])\ndef test_fix_random_seeds_pytorch(set_value):\n import torch\n params = dict(\n seed=42,\n set_system=False,\n set_tensorflow=False,\n set_torch=set_value,\n set_torch_cudnn=set_value)\n utils.fix_random_seeds(**params)\n x = torch.rand(1)\n utils.fix_random_seeds(**params)\n y = torch.rand(1)\n assert (x == y) == set_value\n" ]
[ [ "numpy.array", "numpy.random.random", "torch.rand", "numpy.array_equal" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
MeRajat/skorch
[ "6b013a1fa8b52cf781d54abbae515c755e1c6f9c" ]
[ "skorch/callbacks/regularization.py" ]
[ "\"\"\" Post-process regularization steps such as gradient normalizing. \"\"\"\n\nfrom torch.nn.utils import clip_grad_norm_\n\nfrom skorch.callbacks import Callback\n\n\n__all__ = ['GradientNormClipping']\n\n\nclass GradientNormClipping(Callback):\n \"\"\"Clips gradient norm of a module's parameters.\n\n The norm is computed over all gradients together, as if they were\n concatenated into a single vector. Gradients are modified\n in-place.\n\n See ``torch.nn.utils.clip_grad_norm_`` for more information.\n\n Parameters\n ----------\n gradient_clip_value : float (default=None)\n If not None, clip the norm of all model parameter gradients to this\n value. The type of the norm is determined by the\n ``gradient_clip_norm_type`` parameter and defaults to L2.\n\n gradient_clip_norm_type : float (default=2)\n Norm to use when gradient clipping is active. The default is\n to use L2-norm. Can be 'inf' for infinity norm.\n\n \"\"\"\n def __init__(\n self,\n gradient_clip_value=None,\n gradient_clip_norm_type=2,\n ):\n self.gradient_clip_value = gradient_clip_value\n self.gradient_clip_norm_type = gradient_clip_norm_type\n\n def on_grad_computed(self, _, named_parameters, **kwargs):\n if self.gradient_clip_value is None:\n return\n\n clip_grad_norm_(\n (p for _, p in named_parameters),\n max_norm=self.gradient_clip_value,\n norm_type=self.gradient_clip_norm_type,\n )\n" ]
[ [ "torch.nn.utils.clip_grad_norm_" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
giruenf/GRIPy-3
[ "ea0f5175bbdd966ca2df956f4667a806d035e532" ]
[ "classes/ui/propgrid.py" ]
[ "from collections import OrderedDict, Sequence\n\nimport wx\nimport wx.propgrid as pg\nimport matplotlib.colors as mcolors\n\nfrom classes.ui import UIManager\nfrom classes.ui import UIControllerObject\nfrom classes.ui import UIViewObject\nimport app.pubsub as pub\nfrom app.app_utils import MPL_COLORS, MPL_COLORMAPS\n\n\nclass GripyPgProperty(object):\n\n def __init__(self, obj_uid, obj_attr, getter_func=None, setter_func=None):\n self._obj_uid = obj_uid\n self._obj_attr = obj_attr\n self._getter_func = getter_func\n self._setter_func = setter_func\n\n def _get_object(self):\n app = wx.App.Get()\n Manager = app.get_manager_class(self._obj_uid[0])\n manager = Manager()\n obj = manager.get(self._obj_uid)\n return obj\n\n def _get_value(self):\n if self._getter_func:\n kwargs = {'obj_uid': self._obj_uid}\n return self._getter_func(self._obj_attr, **kwargs)\n obj = self._get_object()\n return obj[self._obj_attr]\n\n def _set_value(self, value):\n try:\n if self._setter_func:\n kwargs = {'obj_uid': self._obj_uid}\n return self._setter_func(self._obj_attr, value, **kwargs)\n obj = self._get_object()\n obj[self._obj_attr] = value\n return True\n except Exception as e:\n print('ERROR at GripyPgProperty._set_value:', e)\n # raise\n return False\n\n def ValueToString(self, *args):\n return str(self._get_value())\n\n def StringToValue(self, text, flag=0):\n variant = self._set_value(text)\n if self._set_value(text):\n return True, variant\n return False, variant\n\n\nclass StringProperty(pg.StringProperty, GripyPgProperty):\n def __init__(self, obj_uid, obj_attr, label=pg.PG_LABEL):\n GripyPgProperty.__init__(self, obj_uid, obj_attr)\n pg.StringProperty.__init__(self, label, name=obj_attr)\n\n\nclass IntProperty(pg.IntProperty, GripyPgProperty):\n def __init__(self, obj_uid, obj_attr, label=pg.PG_LABEL):\n GripyPgProperty.__init__(self, obj_uid, obj_attr)\n pg.IntProperty.__init__(self, label, name=obj_attr)\n\n\nclass FloatProperty(pg.FloatProperty, GripyPgProperty):\n def __init__(self, obj_uid, obj_attr, label=pg.PG_LABEL,\n getter_func=None, setter_func=None):\n GripyPgProperty.__init__(self, obj_uid, obj_attr,\n getter_func, setter_func)\n pg.FloatProperty.__init__(self, label, name=obj_attr)\n\n\nclass BoolProperty(pg.BoolProperty, GripyPgProperty):\n def __init__(self, obj_uid, obj_attr, label=pg.PG_LABEL):\n GripyPgProperty.__init__(self, obj_uid, obj_attr)\n pg.BoolProperty.__init__(self, label, name=obj_attr)\n #\n self.SetAttribute(\"UseCheckbox\", 1)\n self.SetAttribute(\"UseDClickCycling\", 1)\n # Setting m_value with GripyPgProperty value\n value = self._get_value()\n self.SetValue(value)\n\n def OnSetValue(self):\n # Called after m_value was setted...\n # print('\\nGripyPgProperty.OnSetValue')\n self._set_value(self.GetValue())\n\n\nclass EnumProperty(pg.EnumProperty, GripyPgProperty):\n\n def __init__(self, obj_uid, obj_attr, label=pg.PG_LABEL,\n opt_labels=[], opt_values=None):\n\n if opt_labels is None:\n raise Exception('No options labels values found in: {} - model key: {}'. \\\n format(obj_uid, obj_attr)\n )\n\n try:\n\n GripyPgProperty.__init__(self, obj_uid, obj_attr)\n pg.EnumProperty.__init__(self, label, obj_attr, opt_labels,\n values=list(range(len(opt_labels))), value=0)\n\n self._opt_labels = opt_labels\n self._opt_values = opt_values\n if self._opt_values is None:\n self._opt_values = opt_labels\n\n #\n val = self._get_value()\n # \n idx = self._get_index(val)\n self.SetValue(idx)\n\n except Exception as e:\n print('\\nDEU RUIM!!!! - {}\\n\\n\\n'.format(e))\n raise\n\n def _get_index(self, val):\n\n try:\n return self._opt_values.index(val)\n except ValueError:\n try:\n return [key.lower() for key in self._opt_values].index(val)\n except:\n try:\n return self._opt_labels.index(val)\n except:\n print()\n print('ERRO idx = self._opt_values.index(val)')\n print(self._opt_values)\n print(self._opt_labels)\n print(val, type(val))\n raise\n #\n\n def IntToValue(self, int_value, flag=0):\n \"\"\"Given a wx.Choice integer index, get its associated value \n (from opt_values) and set the object attribute with this value.\n \n Parameters\n ----------\n variant : not used\n Default parameter from wx.propgrid.EnumProperty.\n int_value : int\n A wx.Choice integer index. \n flag : not used\n Default parameter from wx.propgrid.EnumProperty.\n \n Returns\n -------\n ret_val : bool\n A value indicating operation was successful. \n \"\"\"\n\n opt_value = self._opt_values[int_value]\n ret_val = self._set_value(opt_value)\n return True, opt_value\n\n def ValueToString(self, value, flag=0):\n \"\"\"Get object property value and returns a string associated with it.\n This string will be selected on wx.Choice container.\n \n Parameters\n ----------\n value : not used\n Default parameter from wx.propgrid.EnumProperty.\n flag : not used\n Default parameter from wx.propgrid.EnumProperty.\n \n Returns\n -------\n ret_str : str\n A string that will be selected on wx.Choice container. \n \"\"\"\n\n val = self._get_value()\n idx = self._get_index(val)\n ret_str = str(self._opt_labels[idx])\n return ret_str\n\n def GetIndexForValue(self, value):\n \"\"\"Given a value, returns its associated index.\n \n Parameters\n ----------\n value : not used\n Default parameter from wx.propgrid.EnumProperty.\n\n Returns\n -------\n idx : int\n A integer index for given value. \n \"\"\"\n\n val = self._get_value()\n idx = self._get_index(val)\n return idx\n\n\nclass MPLColorsProperty(EnumProperty):\n\n def __init__(self, obj_uid, obj_attr, label=pg.PG_LABEL,\n opt_labels=[], opt_values=None, values=None, value=0):\n super().__init__(obj_uid, obj_attr, label=label,\n opt_labels=list(MPL_COLORS.keys()))\n\n\nclass MPLColormapsProperty(EnumProperty):\n\n def __init__(self, obj_uid, obj_attr, label=pg.PG_LABEL,\n opt_labels=[], opt_values=None, values=None, value=0):\n super().__init__(obj_uid, obj_attr, label=label,\n opt_labels=list(MPL_COLORMAPS))\n\n\nclass MPLScaleProperty(EnumProperty):\n\n def __init__(self, obj_uid, obj_attr, label=pg.PG_LABEL,\n opt_labels=[], opt_values=None, values=None, value=0):\n super().__init__(obj_uid, obj_attr, label=label,\n opt_labels=[\"Linear\", \"Log\", \"Symlog\", \"Logit\"],\n opt_values=[\"linear\", \"log\", \"symlog\", \"logit\"]\n )\n\n\nclass MPLHAProperty(EnumProperty):\n\n def __init__(self, obj_uid, obj_attr, label=pg.PG_LABEL,\n opt_labels=[], opt_values=None, values=None, value=0):\n super().__init__(obj_uid, obj_attr, label=label,\n opt_labels=[\"Center\", \"Right\", \"Left\"],\n opt_values=[\"center\", \"right\", \"left\"]\n )\n\n\nclass MPLVAProperty(EnumProperty):\n\n def __init__(self, obj_uid, obj_attr, label=pg.PG_LABEL,\n opt_labels=[], opt_values=None, values=None, value=0):\n super().__init__(obj_uid, obj_attr, label=label,\n opt_labels=[\"Center\", \"Top\", \"Bottom\",\n \"Baseline\", \"Center baseline\"],\n opt_values=[\"center\", \"top\", \"bottom\",\n \"baseline\", \"center_baseline\"]\n )\n\n\nclass MPLFontWeightProperty(EnumProperty):\n\n def __init__(self, obj_uid, obj_attr, label=pg.PG_LABEL,\n opt_labels=[], opt_values=None, values=None, value=0):\n super().__init__(obj_uid, obj_attr, label=label,\n opt_labels=[\"Normal\", \"Regular\", \"Light\",\n \"Ultralight\", \"Book\", \"Medium\",\n \"Roman\", \"Semibold\", \"Demibold\",\n \"Demi\", \"Bold\", \"Heavy\", \"Extra bold\",\n \"Black\"],\n opt_values=[\"normal\", \"regular\", \"light\",\n \"ultralight\", \"book\", \"medium\",\n \"roman\", \"semibold\", \"demibold\",\n \"demi\", \"bold\", \"heavy\", \"extra bold\",\n \"black\"]\n )\n\n\nclass SystemColourProperty(pg.SystemColourProperty, GripyPgProperty):\n def __init__(self, obj_uid, obj_attr, label=pg.PG_LABEL,\n opt_labels=[], opt_values=None, values=None, value=0):\n GripyPgProperty.__init__(self, obj_uid, obj_attr)\n pg.SystemColourProperty.__init__(self, label, name=obj_attr,\n value=pg.ColourPropertyValue())\n\n\nclass ColourProperty(pg.ColourProperty, GripyPgProperty):\n def __init__(self, obj_uid, obj_attr, label=pg.PG_LABEL,\n opt_labels=[], opt_values=None, values=None, value=0):\n\n GripyPgProperty.__init__(self, obj_uid, obj_attr)\n pg.ColourProperty.__init__(self, label, name=obj_attr)\n #\n color = self._get_value()\n #\n if mcolors.is_color_like(color):\n color = tuple([255 * c for c in mcolors.to_rgb(color)])\n #\n self.SetValue(color)\n\n def OnSetValue(self):\n # Called after m_value was setted...\n val = self.GetValue()\n color = None\n if isinstance(val, Sequence):\n color = tuple([c / 255 for c in val])\n color = mcolors.to_hex(color)\n elif isinstance(val, wx.Colour):\n color = val.GetAsString(wx.C2S_HTML_SYNTAX)\n\n self._set_value(color)\n\n def ValueToString(self, value, flag):\n ret_str = ''\n mpl_colors_values_list = list(MPL_COLORS.values())\n if wx.Colour(value) in mpl_colors_values_list:\n idx = mpl_colors_values_list.index(wx.Colour(value))\n ret_str = list(MPL_COLORS.keys())[idx]\n #\n mpl_colors_od = OrderedDict(mcolors.get_named_colors_mapping())\n mpl_colors_values_list = list(mpl_colors_od.values())\n #\n if not ret_str:\n value = tuple(c / 255 for c in value)\n if value in mpl_colors_values_list:\n idx = mpl_colors_values_list.index(value)\n ret_str = list(mpl_colors_od.keys())[idx]\n #\n if not ret_str:\n value = mcolors.to_hex(value)\n if value in mpl_colors_values_list:\n idx = mpl_colors_values_list.index(value)\n ret_str = list(mpl_colors_od.keys())[idx]\n else:\n ret_str = value\n # \n if ret_str == 'k':\n ret_str = 'Black'\n elif ret_str == 'w':\n ret_str = 'White'\n elif ret_str == 'b':\n ret_str = 'Blue'\n elif ret_str == 'g':\n ret_str = 'Green'\n elif ret_str == 'r':\n ret_str = 'Red'\n elif ret_str == 'c':\n ret_str = 'Cyan'\n elif ret_str == 'm':\n ret_str = 'Magenta'\n elif ret_str == 'w':\n ret_str = 'Yellow'\n elif ':' in ret_str:\n ret_str = ret_str.split(':')[-1]\n # \n ret_str = ret_str.capitalize()\n #\n return ret_str\n\n\ndef _get_pg_property(obj_uid, obj_attr, obj_attr_props):\n if obj_attr_props.get('label') is None:\n obj_attr_props['label'] = obj_attr\n\n getter_func = obj_attr_props.get('getter_func')\n setter_func = obj_attr_props.get('setter_func')\n #\n enable = obj_attr_props.get('enabled', True)\n prop = None\n #\n\n if obj_attr_props.get('pg_property') == 'IntProperty' or \\\n obj_attr_props.get('type') == int:\n prop = IntProperty(obj_uid, obj_attr,\n label=obj_attr_props.get('label')\n )\n\n elif obj_attr_props.get('pg_property') == 'FloatProperty' or \\\n obj_attr_props.get('type') == float:\n prop = FloatProperty(obj_uid, obj_attr,\n label=obj_attr_props.get('label'),\n getter_func=getter_func,\n setter_func=setter_func\n )\n\n elif obj_attr_props.get('pg_property') == 'EnumProperty':\n prop = EnumProperty(obj_uid, obj_attr,\n label=obj_attr_props.get('label'),\n opt_labels=obj_attr_props.get('options_labels'),\n opt_values=obj_attr_props.get('options_values')\n )\n\n elif obj_attr_props.get('pg_property') == 'MPLColorsProperty':\n prop = MPLColorsProperty(obj_uid, obj_attr,\n label=obj_attr_props.get('label')\n )\n\n elif obj_attr_props.get('pg_property') == 'MPLColormapsProperty':\n prop = MPLColormapsProperty(obj_uid, obj_attr,\n label=obj_attr_props.get('label')\n )\n\n elif obj_attr_props.get('pg_property') == 'MPLScaleProperty':\n prop = MPLScaleProperty(obj_uid, obj_attr,\n label=obj_attr_props.get('label')\n )\n\n elif obj_attr_props.get('pg_property') == 'MPLHAProperty':\n prop = MPLHAProperty(obj_uid, obj_attr,\n label=obj_attr_props.get('label')\n )\n\n elif obj_attr_props.get('pg_property') == 'MPLVAProperty':\n prop = MPLVAProperty(obj_uid, obj_attr,\n label=obj_attr_props.get('label')\n )\n\n elif obj_attr_props.get('pg_property') == 'SystemColourProperty':\n prop = SystemColourProperty(obj_uid, obj_attr,\n label=obj_attr_props.get('label')\n )\n\n elif obj_attr_props.get('pg_property') == 'ColourProperty':\n prop = ColourProperty(obj_uid, obj_attr,\n label=obj_attr_props.get('label')\n )\n\n\n elif obj_attr_props.get('pg_property') == 'BoolProperty' or \\\n obj_attr_props.get('type') == bool:\n prop = BoolProperty(obj_uid, obj_attr,\n label=obj_attr_props.get('label')\n )\n\n elif obj_attr_props.get('pg_property') == 'StringProperty' or \\\n obj_attr_props.get('type') == str:\n prop = StringProperty(obj_uid, obj_attr,\n label=obj_attr_props.get('label')\n )\n\n elif isinstance(obj_attr_props.get('type'), tuple) and \\\n obj_attr_props.get('type')[0] == tuple:\n prop = StringProperty(obj_uid, obj_attr,\n label=obj_attr_props.get('label')\n )\n\n if prop is not None:\n prop.Enable(enable)\n return prop\n else:\n raise Exception(' ERROR at _get_pg_property:', obj_uid, obj_attr, obj_attr_props)\n\n\n# BLUE_COLORS_SCALE = ['#030C54', '#022F8E', '#1C70C8', '#51A2D5']\n# BLUE_COLORS_SCALE = ['#022F8E', '#1C70C8', '#51A2D5']\nBLUE_COLORS_SCALE = ['#0A619A', '#406A97', '#1974D2']\n\n\nclass PropertyGridController(UIControllerObject):\n tid = 'property_grid_controller'\n\n _ATTRIBUTES = OrderedDict()\n _ATTRIBUTES['obj_uid'] = {\n 'default_value': None,\n 'type': 'uid'\n }\n\n def __init__(self, **state):\n super().__init__(**state)\n # self._properties = OrderedDict()\n # self._fake_properties = OrderedDict()\n self.subscribe(self.on_change_obj_uid, 'change.obj_uid')\n\n # def PostInit(self):\n\n # if self.obj_uid is not None:\n # self.on_change_obj_uid(self.obj_uid, None)\n\n def _get_object(self, obj_uid=None):\n if obj_uid is None:\n obj_uid = self.obj_uid\n app = wx.App.Get()\n Manager = app.get_manager_class(obj_uid[0])\n manager = Manager()\n obj = manager.get(obj_uid)\n return obj\n\n def _create_pg_categories(self, categories, start_with=None,\n blue_color_level=1):\n if start_with is None:\n start_with = self._root_category\n for name, data in categories.items():\n cat = pg.PropertyCategory(data['label'], name=name)\n # cat.SetBackgroundColour(wx.Colour(0, 128, blue_color))\n # self.view.SetCaptionBackgroundColour(wx.Colour(0, 128, blue_color))\n cell = cat.GetCell(0)\n cell.SetFgCol('white')\n # cell.SetBgCol(BLUE_COLORS_SCALE[blue_color_level])\n cell.SetBgCol(BLUE_COLORS_SCALE[2])\n self.view.AppendIn(start_with, cat)\n if data.get('children'):\n self._create_pg_categories(data['children'], cat,\n blue_color_level + 1)\n\n def on_change_obj_uid(self, new_value, old_value):\n # print('\\n\\non_change_obj_uid:', new_value, old_value)\n if old_value is not None:\n self.remove_properties(old_value)\n obj = self._get_object()\n title = obj.get_friendly_name()\n #\n self._root_category = pg.PropertyCategory(title, name='root')\n self.view.Append(self._root_category)\n #\n try:\n categories = obj._get_pg_categories()\n self._create_pg_categories(categories)\n except NotImplementedError:\n print('NotImplementedError: obj._get_pg_categories()')\n print(obj, obj.uid)\n categories = None\n #\n try:\n properties = obj._get_pg_properties()\n except NotImplementedError:\n print('NotImplementedError: obj._get_pg_properties()')\n print(obj, obj.uid)\n properties = obj._ATTRIBUTES\n # for key, key_props in od.items():\n # property_ = _get_pg_property(obj.uid, key, key_props)\n # self.view.Append(property_)\n\n #\n\n #\n for key, key_props in properties.items():\n try:\n property_ = _get_pg_property(obj.uid, key, key_props)\n\n if property_ is None:\n print('property_ is None:', key, key_props)\n\n if key_props.get('category'):\n category = self.view.GetProperty(key_props['category'])\n if category is None:\n raise Exception('Category not found:', key_props['category'])\n self.view.AppendIn(category, property_)\n else:\n self.view.Append(property_)\n #\n\n \"\"\"\n if key_props.get('getter_func') and \\\n key_props.get('listening'):\n for obj_uid, obj_key in key_props.get('listening'):\n print('\\nobj_uid:', obj_uid)\n print('obj_key:', obj_key)\n obj = self._get_object(obj_uid) \n obj.subscribe(self.refresh_property, \n 'change.' + obj_key)\n #self._fake_properties[key] = property_\n else:\n \"\"\"\n # self._properties[key] = property_\n obj.subscribe(self.refresh_property, 'change.' + key)\n\n except Exception as e:\n print('\\nERRO loading properties:', obj, key, key_props, e)\n raise\n\n def remove_properties(self, obj_uid):\n # print('\\n\\n\\nRemoving ALL properties for:', obj_uid)\n UIM = UIManager()\n parent_controller_uid = UIM._getparentuid(self.uid)\n parent_controller = UIM.get(parent_controller_uid)\n # TODO: Retirar isso daqui...\n if parent_controller.view.splitter.IsSplit():\n parent_controller.view.splitter.Unsplit(self.view)\n #\n # if self._properties:\n obj = self._get_object(obj_uid)\n\n # print('self.view.GetPropertyValues():', self.view.GetPropertyValues())\n\n for key, value in self._properties.items():\n obj.unsubscribe(self.refresh_property, 'change.' + key)\n\n # self._properties.clear()\n self.view.Clear()\n\n # print('Removed properties for:', obj_uid, ' OK!')\n\n def refresh_property(self, new_value, old_value, topicObj=pub.AUTO_TOPIC):\n \"\"\"\n Refresh a property, when it is changed.\n \"\"\"\n key = topicObj.getName().split('.')[-1]\n prop = self.view.GetPropertyByName(key)\n # print('refresh_property:', new_value, old_value, key, topicObj, prop)\n # print('self.view.GetPropertyKeys():', self.view.GetPropertyValues().keys())\n self.view.RefreshProperty(prop)\n\n\nclass PropertyGridView(UIViewObject, pg.PropertyGrid):\n tid = 'property_grid_view'\n\n def __init__(self, controller_uid):\n try:\n UIViewObject.__init__(self, controller_uid)\n UIM = UIManager()\n parent_controller_uid = UIM._getparentuid(self._controller_uid)\n parent_controller = UIM.get(parent_controller_uid)\n wx_parent = parent_controller._get_wx_parent(self.tid)\n pg.PropertyGrid.__init__(self, wx_parent,\n style=pg.PG_SPLITTER_AUTO_CENTER # |\\\n # pg.PG_HIDE_MARGIN\n )\n self.SetMarginColour('white')\n self.SetCaptionBackgroundColour(BLUE_COLORS_SCALE[2])\n self.SetCaptionTextColour('white')\n\n\n except Exception as e:\n print('ERRO PropertyGridView.__init__:', e)\n" ]
[ [ "matplotlib.colors.get_named_colors_mapping", "matplotlib.colors.is_color_like", "matplotlib.colors.to_hex", "matplotlib.colors.to_rgb" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
junyoungkim22/sqlova
[ "b7525459dfc7033a071eead3d29b0849906b7974" ]
[ "sqlova/utils/utils_wikisql.py" ]
[ "# Copyright 2019-present NAVER Corp.\n# Apache License v2.0\n\n# Wonseok Hwang\n\nimport os, json\nimport random as rd\nfrom copy import deepcopy\n\nfrom matplotlib.pylab import *\n\nimport torch\nimport torchvision.datasets as dsets\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\nfrom .utils import generate_perm_inv\nfrom .utils import json_default_type_checker\n\nfrom .wikisql_formatter import get_squad_style_ans\n\n\n\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n# Load data -----------------------------------------------------------------------------------------------\ndef load_wikisql(path_wikisql, toy_model, toy_size, bert=False, no_w2i=False, no_hs_tok=False, aug=False):\n # Get data\n train_data, train_table = load_wikisql_data(path_wikisql, mode='train', toy_model=toy_model, toy_size=toy_size, no_hs_tok=no_hs_tok, aug=aug)\n dev_path = os.path.join('bert_and_wikisql', 'wikisql')\n dev_data, dev_table = load_wikisql_data(dev_path, mode='dev', toy_model=toy_model, toy_size=toy_size, no_hs_tok=no_hs_tok)\n\n\n # Get word vector\n if no_w2i:\n w2i, wemb = None, None\n else:\n w2i, wemb = load_w2i_wemb(path_wikisql, bert)\n\n\n return train_data, train_table, dev_data, dev_table, w2i, wemb\n\n\ndef load_wikisql_data(path_wikisql, mode='train', toy_model=False, toy_size=10, no_hs_tok=False, aug=False):\n \"\"\" Load training sets\n \"\"\"\n if aug:\n mode = f\"aug.{mode}\"\n print('Augmented data is loaded!')\n\n path_sql = os.path.join(path_wikisql, mode+'_tok.jsonl')\n if no_hs_tok:\n if mode is 'train':\n path_table = os.path.join('bert_and_wikisql', 'wikisql', 'train.tables.jsonl')\n else:\n path_table = os.path.join(path_wikisql, mode + '.tables.jsonl')\n else:\n path_table = os.path.join(path_wikisql, mode+'_tok.tables.jsonl')\n\n data = []\n table = {}\n with open(path_sql) as f:\n for idx, line in enumerate(f):\n if toy_model and idx >= toy_size:\n break\n\n t1 = json.loads(line.strip())\n data.append(t1)\n\n with open(path_table) as f:\n for idx, line in enumerate(f):\n if toy_model and idx > toy_size:\n break\n\n t1 = json.loads(line.strip())\n table[t1['id']] = t1\n\n return data, table\n\n\ndef load_w2i_wemb(path_wikisql, bert=False):\n \"\"\" Load pre-made subset of TAPI.\n \"\"\"\n if bert:\n with open(os.path.join(path_wikisql, 'w2i_bert.json'), 'r') as f_w2i:\n w2i = json.load(f_w2i)\n wemb = load(os.path.join(path_wikisql, 'wemb_bert.npy'), )\n else:\n with open(os.path.join(path_wikisql, 'w2i.json'), 'r') as f_w2i:\n w2i = json.load(f_w2i)\n\n wemb = load(os.path.join(path_wikisql, 'wemb.npy'), )\n return w2i, wemb\n\ndef get_loader_wikisql(data_train, data_dev, bS, shuffle_train=True, shuffle_dev=False):\n train_loader = torch.utils.data.DataLoader(\n batch_size=bS,\n dataset=data_train,\n shuffle=shuffle_train,\n num_workers=4,\n collate_fn=lambda x: x # now dictionary values are not merged!\n )\n\n dev_loader = torch.utils.data.DataLoader(\n batch_size=bS,\n dataset=data_dev,\n shuffle=shuffle_dev,\n num_workers=4,\n collate_fn=lambda x: x # now dictionary values are not merged!\n )\n\n return train_loader, dev_loader\n\n\ndef get_fields_1(t1, tables, no_hs_t=False, no_sql_t=False):\n nlu1 = t1['question']\n nlu_t1 = t1['question_tok']\n tid1 = t1['table_id']\n sql_i1 = t1['sql']\n sql_q1 = t1['query']\n if no_sql_t:\n sql_t1 = None\n else:\n sql_t1 = t1['query_tok']\n\n tb1 = tables[tid1]\n if not no_hs_t:\n hs_t1 = tb1['header_tok']\n else:\n hs_t1 = []\n hs1 = tb1['header']\n\n return nlu1, nlu_t1, tid1, sql_i1, sql_q1, sql_t1, tb1, hs_t1, hs1\n\ndef get_fields(t1s, tables, no_hs_t=False, no_sql_t=False):\n\n nlu, nlu_t, tid, sql_i, sql_q, sql_t, tb, hs_t, hs = [], [], [], [], [], [], [], [], []\n for t1 in t1s:\n if no_hs_t:\n nlu1, nlu_t1, tid1, sql_i1, sql_q1, sql_t1, tb1, hs_t1, hs1 = get_fields_1(t1, tables, no_hs_t, no_sql_t)\n else:\n nlu1, nlu_t1, tid1, sql_i1, sql_q1, sql_t1, tb1, hs_t1, hs1 = get_fields_1(t1, tables, no_hs_t, no_sql_t)\n\n nlu.append(nlu1)\n nlu_t.append(nlu_t1)\n tid.append(tid1)\n sql_i.append(sql_i1)\n sql_q.append(sql_q1)\n sql_t.append(sql_t1)\n\n tb.append(tb1)\n hs_t.append(hs_t1)\n hs.append(hs1)\n\n return nlu, nlu_t, sql_i, sql_q, sql_t, tb, hs_t, hs\n\n\n# Embedding -------------------------------------------------------------------------\n\ndef word_to_idx1(words1, w2i, no_BE):\n w2i_l1 = []\n l1 = len(words1) # +2 because of <BEG>, <END>\n\n\n for w in words1:\n idx = w2i.get(w, 0)\n w2i_l1.append(idx)\n\n if not no_BE:\n l1 += 2\n w2i_l1 = [1] + w2i_l1 + [2]\n\n return w2i_l1, l1\n\n\ndef words_to_idx(words, w2i, no_BE=False):\n \"\"\"\n Input: [ ['I', 'am', 'hero'],\n ['You', 'are 'geneus'] ]\n output:\n\n w2i = [ B x max_seq_len, 1]\n wemb = [B x max_seq_len, dim]\n\n - Zero-padded when word is not available (teated as <UNK>)\n \"\"\"\n bS = len(words)\n l = torch.zeros(bS, dtype=torch.long).to(device) # length of the seq. of words.\n w2i_l_list = [] # shall be replaced to arr\n\n # wemb_NLq_batch = []\n\n for i, words1 in enumerate(words):\n\n w2i_l1, l1 = word_to_idx1(words1, w2i, no_BE)\n w2i_l_list.append(w2i_l1)\n l[i] = l1\n\n # Prepare tensor of wemb\n # overwrite w2i_l\n w2i_l = torch.zeros([bS, int(max(l))], dtype=torch.long).to(device)\n for b in range(bS):\n w2i_l[b, :l[b]] = torch.LongTensor(w2i_l_list[b]).to(device)\n\n return w2i_l, l\n\ndef hs_to_idx(hs_t, w2i, no_BE=False):\n \"\"\" Zero-padded when word is not available (teated as <UNK>)\n Treat each \"header tokens\" as if they are NL-utterance tokens.\n \"\"\"\n\n bS = len(hs_t) # now, B = B_NLq\n hpu_t = [] # header pseudo-utterance\n l_hs = []\n for hs_t1 in hs_t:\n hpu_t += hs_t1\n l_hs1 = len(hs_t1)\n l_hs.append(l_hs1)\n\n w2i_hpu, l_hpu = words_to_idx(hpu_t, w2i, no_BE=no_BE)\n return w2i_hpu, l_hpu, l_hs\n\n\n# Encoding ---------------------------------------------------------------------\n\ndef encode(lstm, wemb_l, l, return_hidden=False, hc0=None, last_only=False):\n \"\"\" [batch_size, max token length, dim_emb]\n \"\"\"\n bS, mL, eS = wemb_l.shape\n\n\n # sort before packking\n l = array(l)\n perm_idx = argsort(-l)\n perm_idx_inv = generate_perm_inv(perm_idx)\n\n # pack sequence\n\n packed_wemb_l = nn.utils.rnn.pack_padded_sequence(wemb_l[perm_idx, :, :],\n l[perm_idx],\n batch_first=True)\n\n # Time to encode\n if hc0 is not None:\n hc0 = (hc0[0][:, perm_idx], hc0[1][:, perm_idx])\n\n # ipdb.set_trace()\n packed_wemb_l = packed_wemb_l.float() # I don't know why..\n packed_wenc, hc_out = lstm(packed_wemb_l, hc0)\n hout, cout = hc_out\n\n # unpack\n wenc, _l = nn.utils.rnn.pad_packed_sequence(packed_wenc, batch_first=True)\n\n if last_only:\n # Take only final outputs for each columns.\n wenc = wenc[tuple(range(bS)), l[perm_idx] - 1] # [batch_size, dim_emb]\n wenc.unsqueeze_(1) # [batch_size, 1, dim_emb]\n\n wenc = wenc[perm_idx_inv]\n\n\n\n if return_hidden:\n # hout.shape = [number_of_directoin * num_of_layer, seq_len(=batch size), dim * number_of_direction ] w/ batch_first.. w/o batch_first? I need to see.\n hout = hout[:, perm_idx_inv].to(device)\n cout = cout[:, perm_idx_inv].to(device) # Is this correct operation?\n\n return wenc, hout, cout\n else:\n return wenc\n\n\ndef encode_hpu(lstm, wemb_hpu, l_hpu, l_hs):\n wenc_hpu, hout, cout = encode( lstm,\n wemb_hpu,\n l_hpu,\n return_hidden=True,\n hc0=None,\n last_only=True )\n\n wenc_hpu = wenc_hpu.squeeze(1)\n bS_hpu, mL_hpu, eS = wemb_hpu.shape\n hS = wenc_hpu.size(-1)\n\n wenc_hs = wenc_hpu.new_zeros(len(l_hs), max(l_hs), hS)\n wenc_hs = wenc_hs.to(device)\n\n # Re-pack according to batch.\n # ret = [B_NLq, max_len_headers_all, dim_lstm]\n st = 0\n for i, l_hs1 in enumerate(l_hs):\n wenc_hs[i, :l_hs1] = wenc_hpu[st:(st + l_hs1)]\n st += l_hs1\n\n return wenc_hs\n\n\n# Statistics -------------------------------------------------------------------------------------------------------------------\n\n\n\ndef get_wc1(conds):\n \"\"\"\n [ [wc, wo, wv],\n [wc, wo, wv], ...\n ]\n \"\"\"\n wc1 = []\n for cond in conds:\n wc1.append(cond[0])\n return wc1\n\n\ndef get_wo1(conds):\n \"\"\"\n [ [wc, wo, wv],\n [wc, wo, wv], ...\n ]\n \"\"\"\n wo1 = []\n for cond in conds:\n wo1.append(cond[1])\n return wo1\n\n\ndef get_wv1(conds):\n \"\"\"\n [ [wc, wo, wv],\n [wc, wo, wv], ...\n ]\n \"\"\"\n wv1 = []\n for cond in conds:\n wv1.append(cond[2])\n return wv1\n\n\ndef get_g(sql_i):\n \"\"\" for backward compatibility, separated with get_g\"\"\"\n g_sc = []\n g_sa = []\n g_wn = []\n g_wc = []\n g_wo = []\n g_wv = []\n for b, psql_i1 in enumerate(sql_i):\n g_sc.append( psql_i1[\"sel\"] )\n g_sa.append( psql_i1[\"agg\"])\n\n conds = psql_i1['conds']\n if not psql_i1[\"agg\"] < 0:\n g_wn.append( len( conds ) )\n g_wc.append( get_wc1(conds) )\n g_wo.append( get_wo1(conds) )\n g_wv.append( get_wv1(conds) )\n else:\n raise EnvironmentError\n return g_sc, g_sa, g_wn, g_wc, g_wo, g_wv\n\ndef get_g_wvi_corenlp(t):\n g_wvi_corenlp = []\n for t1 in t:\n g_wvi_corenlp.append( t1['wvi_corenlp'] )\n return g_wvi_corenlp\n\n\ndef update_w2i_wemb(word, wv, idx_w2i, n_total, w2i, wemb):\n \"\"\" Follow same approach from SQLNet author's code.\n Used inside of generaet_w2i_wemb.\n \"\"\"\n\n # global idx_w2i, w2i, wemb # idx, word2vec, word to idx dictionary, list of embedding vec, n_total: total number of words\n if (word in wv) and (word not in w2i):\n idx_w2i += 1\n w2i[word] = idx_w2i\n wemb.append(wv[word])\n n_total += 1\n return idx_w2i, n_total\n\ndef make_w2i_wemb(args, path_save_w2i_wemb, wv, data_train, data_dev, data_test, table_train, table_dev, table_test):\n\n w2i = {'<UNK>': 0, '<BEG>': 1, '<END>': 2} # to use it when embeds NL query.\n idx_w2i = 2\n n_total = 3\n\n wemb = [np.zeros(300, dtype=np.float32) for _ in range(3)] # 128 is of TAPI vector.\n idx_w2i, n_total = generate_w2i_wemb(data_train, wv, idx_w2i, n_total, w2i, wemb)\n idx_w2i, n_total = generate_w2i_wemb_table(table_train, wv, idx_w2i, n_total, w2i, wemb)\n\n idx_w2i, n_total = generate_w2i_wemb(data_dev, wv, idx_w2i, n_total, w2i, wemb)\n idx_w2i, n_total = generate_w2i_wemb_table(table_dev, wv, idx_w2i, n_total, w2i, wemb)\n\n idx_w2i, n_total = generate_w2i_wemb(data_test, wv, idx_w2i, n_total, w2i, wemb)\n idx_w2i, n_total = generate_w2i_wemb_table(table_test, wv, idx_w2i, n_total, w2i, wemb)\n\n path_w2i = os.path.join(path_save_w2i_wemb, 'w2i.json')\n path_wemb = os.path.join(path_save_w2i_wemb, 'wemb.npy')\n\n wemb = np.stack(wemb, axis=0)\n\n with open(path_w2i, 'w') as f_w2i:\n json.dump(w2i, f_w2i)\n\n np.save(path_wemb, wemb)\n\n return w2i, wemb\n\ndef generate_w2i_wemb_table(tables, wv, idx_w2i, n_total, w2i, wemb):\n \"\"\" Generate subset of GloVe\n update_w2i_wemb. It uses wv, w2i, wemb, idx_w2i as global variables.\n\n To do\n 1. What should we do with the numeric?\n \"\"\"\n # word_set from NL query\n for table_id, table_contents in tables.items():\n\n # NLq = t1['question']\n # word_tokens = NLq.rstrip().replace('?', '').split(' ')\n headers = table_contents['header_tok'] # [ ['state/terriotry'], ['current', 'slogan'], [],\n for header_tokens in headers:\n for token in header_tokens:\n idx_w2i, n_total = update_w2i_wemb(token, wv, idx_w2i, n_total, w2i, wemb)\n # WikiSQL generaets unbelivable query... using state/territory in the NLq. Unnatural.. but as is\n # when there is slash, unlike original SQLNet which treats them as single token, we use\n # both tokens. e.g. 'state/terriotry' -> 'state'\n # token_spl = token.split('/')\n # for token_spl1 in token_spl:\n # idx_w2i, n_total = update_w2i_wemb(token_spl1, wv, idx_w2i, n_total, w2i, wemb)\n\n return idx_w2i, n_total\ndef generate_w2i_wemb(train_data, wv, idx_w2i, n_total, w2i, wemb):\n \"\"\" Generate subset of GloVe\n update_w2i_wemb. It uses wv, w2i, wemb, idx_w2i as global variables.\n\n To do\n 1. What should we do with the numeric?\n \"\"\"\n # word_set from NL query\n for i, t1 in enumerate(train_data):\n # NLq = t1['question']\n # word_tokens = NLq.rstrip().replace('?', '').split(' ')\n word_tokens = t1['question_tok']\n # Currently, TAPI does not use \"?\". So, it is removed.\n for word in word_tokens:\n idx_w2i, n_total = update_w2i_wemb(word, wv, idx_w2i, n_total, w2i, wemb)\n n_total += 1\n\n return idx_w2i, n_total\n\ndef generate_w2i_wemb_e2k_headers(e2k_dicts, wv, idx_w2i, n_total, w2i, wemb):\n \"\"\" Generate subset of TAPI from english-to-korean dict of table headers etc..\n update_w2i_wemb. It uses wv, w2i, wemb, idx_w2i as global variables.\n\n To do\n 1. What should we do with the numeric?\n Current version do not treat them specially. But this would be modified later so that we can use tags.\n \"\"\"\n # word_set from NL query\n for table_name, e2k_dict in e2k_dicts.items():\n word_tokens_list = list(e2k_dict.values())\n # Currently, TAPI does not use \"?\". So, it is removed.\n for word_tokens in word_tokens_list:\n for word in word_tokens:\n idx_w2i, n_total = update_w2i_wemb(word, wv, idx_w2i, n_total, w2i, wemb)\n n_total += 1\n\n return idx_w2i, n_total\n\n\n# BERT =================================================================================================================\ndef tokenize_nlu1(tokenizer, nlu1):\n nlu1_tok = tokenizer.tokenize(nlu1)\n return nlu1_tok\n\n\ndef tokenize_hds1(tokenizer, hds1):\n hds_all_tok = []\n for hds11 in hds1:\n sub_tok = tokenizer.tokenize(hds11)\n hds_all_tok.append(sub_tok)\n\ndef generate_inputs(tokenizer, nlu1_tok, hds1):\n tokens = []\n segment_ids = []\n\n tokens.append(\"[CLS]\")\n i_st_nlu = len(tokens) # to use it later\n\n segment_ids.append(0)\n for token in nlu1_tok:\n tokens.append(token)\n segment_ids.append(0)\n i_ed_nlu = len(tokens)\n tokens.append(\"[SEP]\")\n segment_ids.append(0)\n\n i_hds = []\n # for doc\n for i, hds11 in enumerate(hds1):\n i_st_hd = len(tokens)\n sub_tok = tokenizer.tokenize(hds11)\n tokens += sub_tok\n i_ed_hd = len(tokens)\n i_hds.append((i_st_hd, i_ed_hd))\n segment_ids += [1] * len(sub_tok)\n if i < len(hds1)-1:\n tokens.append(\"[SEP]\")\n segment_ids.append(0)\n elif i == len(hds1)-1:\n tokens.append(\"[SEP]\")\n segment_ids.append(1)\n else:\n raise EnvironmentError\n\n i_nlu = (i_st_nlu, i_ed_nlu)\n\n return tokens, segment_ids, i_nlu, i_hds\n\ndef gen_l_hpu(i_hds):\n \"\"\"\n # Treat columns as if it is a batch of natural language utterance with batch-size = # of columns * # of batch_size\n i_hds = [(17, 18), (19, 21), (22, 23), (24, 25), (26, 29), (30, 34)])\n \"\"\"\n l_hpu = []\n for i_hds1 in i_hds:\n for i_hds11 in i_hds1:\n l_hpu.append(i_hds11[1] - i_hds11[0])\n\n return l_hpu\n\ndef get_bert_output_s2s(model_bert, tokenizer, nlu_t, hds, sql_vocab, max_seq_length):\n \"\"\"\n s2s version. Treat SQL-tokens as pseudo-headers\n sql_vocab = (\"sql select\", \"sql where\", \"sql and\", \"sql equal\", \"sql greater than\", \"sql less than\")\n\n e.g.)\n Q: What is the name of the player with score greater than 15?\n H: Name of the player, score\n Input: [CLS], what, is, ...,\n [SEP], name, of, the, player, [SEP], score,\n [SEP] sql, select, [SEP], sql, where, [SEP], sql, and, [SEP], ...\n\n Here, input is tokenized further by WordPiece (WP) tokenizer and fed into BERT.\n\n INPUT\n :param model_bert:\n :param tokenizer: WordPiece toknizer\n :param nlu: Question\n :param nlu_t: CoreNLP tokenized nlu.\n :param hds: Headers\n :param hs_t: None or 1st-level tokenized headers\n :param max_seq_length: max input token length\n\n OUTPUT\n tokens: BERT input tokens\n nlu_tt: WP-tokenized input natural language questions\n orig_to_tok_index: map the index of 1st-level-token to the index of 2nd-level-token\n tok_to_orig_index: inverse map.\n\n \"\"\"\n\n\n l_n = []\n l_hs = [] # The length of columns for each batch\n l_input = []\n input_ids = []\n tokens = []\n segment_ids = []\n input_mask = []\n\n i_nlu = [] # index to retreive the position of contextual vector later.\n i_hds = []\n i_sql_vocab = []\n\n doc_tokens = []\n nlu_tt = []\n\n t_to_tt_idx = []\n tt_to_t_idx = []\n for b, nlu_t1 in enumerate(nlu_t):\n\n hds1 = hds[b]\n l_hs.append(len(hds1))\n\n\n # 1. 2nd tokenization using WordPiece\n tt_to_t_idx1 = [] # number indicates where sub-token belongs to in 1st-level-tokens (here, CoreNLP).\n t_to_tt_idx1 = [] # orig_to_tok_idx[i] = start index of i-th-1st-level-token in all_tokens.\n nlu_tt1 = [] # all_doc_tokens[ orig_to_tok_idx[i] ] returns first sub-token segement of i-th-1st-level-token\n for (i, token) in enumerate(nlu_t1):\n t_to_tt_idx1.append(\n len(nlu_tt1)) # all_doc_tokens[ indicate the start position of original 'white-space' tokens.\n sub_tokens = tokenizer.tokenize(token)\n for sub_token in sub_tokens:\n tt_to_t_idx1.append(i)\n nlu_tt1.append(sub_token) # all_doc_tokens are further tokenized using WordPiece tokenizer\n nlu_tt.append(nlu_tt1)\n tt_to_t_idx.append(tt_to_t_idx1)\n t_to_tt_idx.append(t_to_tt_idx1)\n\n l_n.append(len(nlu_tt1))\n # hds1_all_tok = tokenize_hds1(tokenizer, hds1)\n\n\n\n # [CLS] nlu [SEP] col1 [SEP] col2 [SEP] ...col-n [SEP]\n # 2. Generate BERT inputs & indices.\n # Combine hds1 and sql_vocab\n tokens1, segment_ids1, i_sql_vocab1, i_nlu1, i_hds1 = generate_inputs_s2s(tokenizer, nlu_tt1, hds1, sql_vocab)\n\n # i_hds1\n input_ids1 = tokenizer.convert_tokens_to_ids(tokens1)\n\n # Input masks\n # The mask has 1 for real tokens and 0 for padding tokens. Only real\n # tokens are attended to.\n input_mask1 = [1] * len(input_ids1)\n\n # 3. Zero-pad up to the sequence length.\n l_input.append( len(input_ids1) )\n while len(input_ids1) < max_seq_length:\n input_ids1.append(0)\n input_mask1.append(0)\n segment_ids1.append(0)\n\n assert len(input_ids1) == max_seq_length\n assert len(input_mask1) == max_seq_length\n assert len(segment_ids1) == max_seq_length\n\n input_ids.append(input_ids1)\n tokens.append(tokens1)\n segment_ids.append(segment_ids1)\n input_mask.append(input_mask1)\n\n i_nlu.append(i_nlu1)\n i_hds.append(i_hds1)\n i_sql_vocab.append(i_sql_vocab1)\n\n # Convert to tensor\n all_input_ids = torch.tensor(input_ids, dtype=torch.long).to(device)\n all_input_mask = torch.tensor(input_mask, dtype=torch.long).to(device)\n all_segment_ids = torch.tensor(segment_ids, dtype=torch.long).to(device)\n\n # 4. Generate BERT output.\n all_encoder_layer, pooled_output = model_bert(all_input_ids, all_segment_ids, all_input_mask)\n\n # 5. generate l_hpu from i_hds\n l_hpu = gen_l_hpu(i_hds)\n\n return all_encoder_layer, pooled_output, tokens, i_nlu, i_hds, i_sql_vocab, \\\n l_n, l_hpu, l_hs, l_input, \\\n nlu_tt, t_to_tt_idx, tt_to_t_idx\n\n\ndef get_bert_output(model_bert, tokenizer, nlu_t, hds, max_seq_length):\n \"\"\"\n Here, input is toknized further by WordPiece (WP) tokenizer and fed into BERT.\n\n INPUT\n :param model_bert:\n :param tokenizer: WordPiece toknizer\n :param nlu: Question\n :param nlu_t: CoreNLP tokenized nlu.\n :param hds: Headers\n :param hs_t: None or 1st-level tokenized headers\n :param max_seq_length: max input token length\n\n OUTPUT\n tokens: BERT input tokens\n nlu_tt: WP-tokenized input natural language questions\n orig_to_tok_index: map the index of 1st-level-token to the index of 2nd-level-token\n tok_to_orig_index: inverse map.\n\n \"\"\"\n\n l_n = []\n l_hs = [] # The length of columns for each batch\n\n input_ids = []\n tokens = []\n segment_ids = []\n input_mask = []\n\n i_nlu = [] # index to retreive the position of contextual vector later.\n i_hds = []\n\n doc_tokens = []\n nlu_tt = []\n\n t_to_tt_idx = []\n tt_to_t_idx = []\n for b, nlu_t1 in enumerate(nlu_t):\n\n hds1 = hds[b]\n l_hs.append(len(hds1))\n\n\n # 1. 2nd tokenization using WordPiece\n tt_to_t_idx1 = [] # number indicates where sub-token belongs to in 1st-level-tokens (here, CoreNLP).\n t_to_tt_idx1 = [] # orig_to_tok_idx[i] = start index of i-th-1st-level-token in all_tokens.\n nlu_tt1 = [] # all_doc_tokens[ orig_to_tok_idx[i] ] returns first sub-token segement of i-th-1st-level-token\n for (i, token) in enumerate(nlu_t1):\n t_to_tt_idx1.append(\n len(nlu_tt1)) # all_doc_tokens[ indicate the start position of original 'white-space' tokens.\n sub_tokens = tokenizer.tokenize(token)\n for sub_token in sub_tokens:\n tt_to_t_idx1.append(i)\n nlu_tt1.append(sub_token) # all_doc_tokens are further tokenized using WordPiece tokenizer\n nlu_tt.append(nlu_tt1)\n tt_to_t_idx.append(tt_to_t_idx1)\n t_to_tt_idx.append(t_to_tt_idx1)\n\n l_n.append(len(nlu_tt1))\n # hds1_all_tok = tokenize_hds1(tokenizer, hds1)\n\n\n\n # [CLS] nlu [SEP] col1 [SEP] col2 [SEP] ...col-n [SEP]\n # 2. Generate BERT inputs & indices.\n tokens1, segment_ids1, i_nlu1, i_hds1 = generate_inputs(tokenizer, nlu_tt1, hds1)\n input_ids1 = tokenizer.convert_tokens_to_ids(tokens1)\n\n # Input masks\n # The mask has 1 for real tokens and 0 for padding tokens. Only real\n # tokens are attended to.\n input_mask1 = [1] * len(input_ids1)\n\n # 3. Zero-pad up to the sequence length.\n while len(input_ids1) < max_seq_length:\n input_ids1.append(0)\n input_mask1.append(0)\n segment_ids1.append(0)\n\n assert len(input_ids1) == max_seq_length\n assert len(input_mask1) == max_seq_length\n assert len(segment_ids1) == max_seq_length\n\n input_ids.append(input_ids1)\n tokens.append(tokens1)\n segment_ids.append(segment_ids1)\n input_mask.append(input_mask1)\n\n i_nlu.append(i_nlu1)\n i_hds.append(i_hds1)\n\n # Convert to tensor\n all_input_ids = torch.tensor(input_ids, dtype=torch.long).to(device)\n all_input_mask = torch.tensor(input_mask, dtype=torch.long).to(device)\n all_segment_ids = torch.tensor(segment_ids, dtype=torch.long).to(device)\n\n # 4. Generate BERT output.\n all_encoder_layer, pooled_output = model_bert(all_input_ids, all_segment_ids, all_input_mask)\n\n # 5. generate l_hpu from i_hds\n l_hpu = gen_l_hpu(i_hds)\n\n return all_encoder_layer, pooled_output, tokens, i_nlu, i_hds, \\\n l_n, l_hpu, l_hs, \\\n nlu_tt, t_to_tt_idx, tt_to_t_idx\n\n\n\ndef get_wemb_n(i_nlu, l_n, hS, num_hidden_layers, all_encoder_layer, num_out_layers_n):\n \"\"\"\n Get the representation of each tokens.\n \"\"\"\n bS = len(l_n)\n l_n_max = max(l_n)\n wemb_n = torch.zeros([bS, l_n_max, hS * num_out_layers_n]).to(device)\n for b in range(bS):\n # [B, max_len, dim]\n # Fill zero for non-exist part.\n l_n1 = l_n[b]\n i_nlu1 = i_nlu[b]\n for i_noln in range(num_out_layers_n):\n i_layer = num_hidden_layers - 1 - i_noln\n st = i_noln * hS\n ed = (i_noln + 1) * hS\n wemb_n[b, 0:(i_nlu1[1] - i_nlu1[0]), st:ed] = all_encoder_layer[i_layer][b, i_nlu1[0]:i_nlu1[1], :]\n return wemb_n\n #\n\n\ndef get_wemb_h(i_hds, l_hpu, l_hs, hS, num_hidden_layers, all_encoder_layer, num_out_layers_h):\n \"\"\"\n As if\n [ [table-1-col-1-tok1, t1-c1-t2, ...],\n [t1-c2-t1, t1-c2-t2, ...].\n ...\n [t2-c1-t1, ...,]\n ]\n \"\"\"\n bS = len(l_hs)\n l_hpu_max = max(l_hpu)\n num_of_all_hds = sum(l_hs)\n wemb_h = torch.zeros([num_of_all_hds, l_hpu_max, hS * num_out_layers_h]).to(device)\n b_pu = -1\n for b, i_hds1 in enumerate(i_hds):\n for b1, i_hds11 in enumerate(i_hds1):\n b_pu += 1\n for i_nolh in range(num_out_layers_h):\n i_layer = num_hidden_layers - 1 - i_nolh\n st = i_nolh * hS\n ed = (i_nolh + 1) * hS\n wemb_h[b_pu, 0:(i_hds11[1] - i_hds11[0]), st:ed] \\\n = all_encoder_layer[i_layer][b, i_hds11[0]:i_hds11[1],:]\n\n\n return wemb_h\n\n\n\ndef get_wemb_bert(bert_config, model_bert, tokenizer, nlu_t, hds, max_seq_length, num_out_layers_n=1, num_out_layers_h=1):\n\n # get contextual output of all tokens from bert\n all_encoder_layer, pooled_output, tokens, i_nlu, i_hds,\\\n l_n, l_hpu, l_hs, \\\n nlu_tt, t_to_tt_idx, tt_to_t_idx = get_bert_output(model_bert, tokenizer, nlu_t, hds, max_seq_length)\n # all_encoder_layer: BERT outputs from all layers.\n # pooled_output: output of [CLS] vec.\n # tokens: BERT intput tokens\n # i_nlu: start and end indices of question in tokens\n # i_hds: start and end indices of headers\n\n\n # get the wemb\n wemb_n = get_wemb_n(i_nlu, l_n, bert_config.hidden_size, bert_config.num_hidden_layers, all_encoder_layer,\n num_out_layers_n)\n\n wemb_h = get_wemb_h(i_hds, l_hpu, l_hs, bert_config.hidden_size, bert_config.num_hidden_layers, all_encoder_layer,\n num_out_layers_h)\n\n return wemb_n, wemb_h, l_n, l_hpu, l_hs, \\\n nlu_tt, t_to_tt_idx, tt_to_t_idx\n\n\ndef gen_pnt_n(g_wvi, mL_w, mL_nt):\n \"\"\"\n Generate one-hot idx indicating vectors with their lenghts.\n\n :param g_wvi: e.g. [[[0, 6, 7, 8, 15], [0, 1, 2, 3, 4, 15]], [[0, 1, 2, 3, 16], [0, 7, 8, 9, 16]]]\n where_val idx in nlu_t. 0 = <BEG>, -1 = <END>.\n :param mL_w: 4\n :param mL_nt: 200\n :return:\n \"\"\"\n bS = len(g_wvi)\n for g_wvi1 in g_wvi:\n for g_wvi11 in g_wvi1:\n l11 = len(g_wvi11)\n\n mL_g_wvi = max([max([0] + [len(tok) for tok in gwsi]) for gwsi in g_wvi]) - 1\n # zero because of '' case.\n # -1 because we already have <BEG>\n if mL_g_wvi < 1:\n mL_g_wvi = 1\n # NLq_token_pos = torch.zeros(bS, 5 - 1, mL_g_wvi, self.max_NLq_token_num)\n\n # l_g_wvi = torch.zeros(bS, 5 - 1)\n pnt_n = torch.zeros(bS, mL_w, mL_g_wvi, mL_nt).to(device) # one hot\n l_g_wvi = torch.zeros(bS, mL_w).to(device)\n\n for b, g_wvi1 in enumerate(g_wvi):\n i_wn = 0 # To prevent error from zero number of condition.\n for i_wn, g_wvi11 in enumerate(g_wvi1):\n # g_wvi11: [0, where_conds pos in NLq, end]\n g_wvi11_n1 = g_wvi11[:-1] # doesn't count <END> idx.\n l_g_wvi[b, i_wn] = len(g_wvi11_n1)\n for t, idx in enumerate(g_wvi11_n1):\n pnt_n[b, i_wn, t, idx] = 1\n\n # Pad\n if i_wn < (mL_w - 1): # maximum number of conidtions is 4\n pnt_n[b, i_wn + 1:, 0, 1] = 1 # # cannot understand... [<BEG>, <END>]??\n l_g_wvi[b, i_wn + 1:] = 1 # it means there is only <BEG>.\n\n\n return pnt_n, l_g_wvi\n\n\ndef pred_sc(s_sc):\n \"\"\"\n return: [ pr_wc1_i, pr_wc2_i, ...]\n \"\"\"\n # get g_num\n pr_sc = []\n for s_sc1 in s_sc:\n pr_sc.append(s_sc1.argmax().item())\n\n return pr_sc\n\ndef pred_sc_beam(s_sc, beam_size):\n \"\"\"\n return: [ pr_wc1_i, pr_wc2_i, ...]\n \"\"\"\n # get g_num\n pr_sc_beam = []\n\n\n for s_sc1 in s_sc:\n val, idxes = s_sc1.topk(k=beam_size)\n pr_sc_beam.append(idxes.tolist())\n\n return pr_sc_beam\n\ndef pred_sa(s_sa):\n \"\"\"\n return: [ pr_wc1_i, pr_wc2_i, ...]\n \"\"\"\n # get g_num\n pr_sa = []\n for s_sa1 in s_sa:\n pr_sa.append(s_sa1.argmax().item())\n\n return pr_sa\n\n\ndef pred_wn(s_wn):\n \"\"\"\n return: [ pr_wc1_i, pr_wc2_i, ...]\n \"\"\"\n # get g_num\n pr_wn = []\n for s_wn1 in s_wn:\n pr_wn.append(s_wn1.argmax().item())\n # print(pr_wn, s_wn1)\n # if s_wn1.argmax().item() == 3:\n # input('')\n\n return pr_wn\n\ndef pred_wc_old(sql_i, s_wc):\n \"\"\"\n return: [ pr_wc1_i, pr_wc2_i, ...]\n \"\"\"\n # get g_num\n pr_wc = []\n for b, sql_i1 in enumerate(sql_i):\n wn = len(sql_i1['conds'])\n s_wc1 = s_wc[b]\n\n pr_wc1 = argsort(-s_wc1.data.cpu().numpy())[:wn]\n pr_wc1.sort()\n\n pr_wc.append(list(pr_wc1))\n return pr_wc\n\ndef pred_wc(wn, s_wc):\n \"\"\"\n return: [ pr_wc1_i, pr_wc2_i, ...]\n ! Returned index is sorted!\n \"\"\"\n # get g_num\n pr_wc = []\n for b, wn1 in enumerate(wn):\n s_wc1 = s_wc[b]\n\n pr_wc1 = argsort(-s_wc1.data.cpu().numpy())[:wn1]\n pr_wc1.sort()\n\n pr_wc.append(list(pr_wc1))\n return pr_wc\n\ndef pred_wc_sorted_by_prob(s_wc):\n \"\"\"\n return: [ pr_wc1_i, pr_wc2_i, ...]\n ! Returned index is sorted by prob.\n All colume-indexes are returned here.\n \"\"\"\n # get g_num\n bS = len(s_wc)\n pr_wc = []\n\n for b in range(bS):\n s_wc1 = s_wc[b]\n pr_wc1 = argsort(-s_wc1.data.cpu().numpy())\n pr_wc.append(list(pr_wc1))\n return pr_wc\n\n\ndef pred_wo(wn, s_wo):\n \"\"\"\n return: [ pr_wc1_i, pr_wc2_i, ...]\n \"\"\"\n # s_wo = [B, 4, n_op]\n pr_wo_a = s_wo.argmax(dim=2) # [B, 4]\n # get g_num\n pr_wo = []\n for b, pr_wo_a1 in enumerate(pr_wo_a):\n wn1 = wn[b]\n pr_wo.append(list(pr_wo_a1.data.cpu().numpy()[:wn1]))\n\n return pr_wo\n\n\ndef pred_wvi_se(wn, s_wv):\n \"\"\"\n s_wv: [B, 4, mL, 2]\n - predict best st-idx & ed-idx\n \"\"\"\n\n s_wv_st, s_wv_ed = s_wv.split(1, dim=3) # [B, 4, mL, 2] -> [B, 4, mL, 1], [B, 4, mL, 1]\n\n s_wv_st = s_wv_st.squeeze(3) # [B, 4, mL, 1] -> [B, 4, mL]\n s_wv_ed = s_wv_ed.squeeze(3)\n\n pr_wvi_st_idx = s_wv_st.argmax(dim=2) # [B, 4, mL] -> [B, 4, 1]\n pr_wvi_ed_idx = s_wv_ed.argmax(dim=2)\n\n pr_wvi = []\n for b, wn1 in enumerate(wn):\n pr_wvi1 = []\n for i_wn in range(wn1):\n pr_wvi_st_idx11 = pr_wvi_st_idx[b][i_wn]\n pr_wvi_ed_idx11 = pr_wvi_ed_idx[b][i_wn]\n pr_wvi1.append([pr_wvi_st_idx11.item(), pr_wvi_ed_idx11.item()])\n pr_wvi.append(pr_wvi1)\n\n return pr_wvi\n\ndef pred_wvi_se_beam(max_wn, s_wv, beam_size):\n \"\"\"\n s_wv: [B, 4, mL, 2]\n - predict best st-idx & ed-idx\n\n\n output:\n pr_wvi_beam = [B, max_wn, n_pairs, 2]. 2 means [st, ed].\n prob_wvi_beam = [B, max_wn, n_pairs]\n \"\"\"\n bS = s_wv.shape[0]\n\n s_wv_st, s_wv_ed = s_wv.split(1, dim=3) # [B, 4, mL, 2] -> [B, 4, mL, 1], [B, 4, mL, 1]\n\n s_wv_st = s_wv_st.squeeze(3) # [B, 4, mL, 1] -> [B, 4, mL]\n s_wv_ed = s_wv_ed.squeeze(3)\n\n prob_wv_st = F.softmax(s_wv_st, dim=-1).detach().to('cpu').numpy()\n prob_wv_ed = F.softmax(s_wv_ed, dim=-1).detach().to('cpu').numpy()\n\n k_logit = int(ceil(sqrt(beam_size)))\n n_pairs = k_logit**2\n assert n_pairs >= beam_size\n values_st, idxs_st = s_wv_st.topk(k_logit) # [B, 4, mL] -> [B, 4, k_logit]\n values_ed, idxs_ed = s_wv_ed.topk(k_logit) # [B, 4, mL] -> [B, 4, k_logit]\n\n # idxs = [B, k_logit, 2]\n # Generate all possible combination of st, ed indices & prob\n pr_wvi_beam = [] # [B, max_wn, k_logit**2 [st, ed] paris]\n prob_wvi_beam = zeros([bS, max_wn, n_pairs])\n for b in range(bS):\n pr_wvi_beam1 = []\n\n idxs_st1 = idxs_st[b]\n idxs_ed1 = idxs_ed[b]\n for i_wn in range(max_wn):\n idxs_st11 = idxs_st1[i_wn]\n idxs_ed11 = idxs_ed1[i_wn]\n\n pr_wvi_beam11 = []\n pair_idx = -1\n for i_k in range(k_logit):\n for j_k in range(k_logit):\n pair_idx += 1\n st = idxs_st11[i_k].item()\n ed = idxs_ed11[j_k].item()\n pr_wvi_beam11.append([st, ed])\n\n p1 = prob_wv_st[b, i_wn, st]\n p2 = prob_wv_ed[b, i_wn, ed]\n prob_wvi_beam[b, i_wn, pair_idx] = p1*p2\n pr_wvi_beam1.append(pr_wvi_beam11)\n pr_wvi_beam.append(pr_wvi_beam1)\n\n\n # prob\n\n return pr_wvi_beam, prob_wvi_beam\n\ndef is_whitespace_g_wvi(c):\n # if c == \" \" or c == \"\\t\" or c == \"\\r\" or c == \"\\n\" or ord(c) == 0x202F:\n if c == \" \":\n return True\n return False\n\ndef convert_pr_wvi_to_string(pr_wvi, nlu_t, nlu_wp_t, wp_to_wh_index, nlu):\n \"\"\"\n - Convert to the string in whilte-space-separated tokens\n - Add-hoc addition.\n \"\"\"\n pr_wv_str_wp = [] # word-piece version\n pr_wv_str = []\n for b, pr_wvi1 in enumerate(pr_wvi):\n pr_wv_str_wp1 = []\n pr_wv_str1 = []\n wp_to_wh_index1 = wp_to_wh_index[b]\n nlu_wp_t1 = nlu_wp_t[b]\n nlu_t1 = nlu_t[b]\n\n for i_wn, pr_wvi11 in enumerate(pr_wvi1):\n st_idx, ed_idx = pr_wvi11\n\n # Ad-hoc modification of ed_idx to deal with wp-tokenization effect.\n # e.g.) to convert \"butler cc (\" ->\"butler cc (ks)\" (dev set 1st question).\n pr_wv_str_wp11 = nlu_wp_t1[st_idx:ed_idx+1]\n pr_wv_str_wp1.append(pr_wv_str_wp11)\n\n st_wh_idx = wp_to_wh_index1[st_idx]\n ed_wh_idx = wp_to_wh_index1[ed_idx]\n pr_wv_str11 = nlu_t1[st_wh_idx:ed_wh_idx+1]\n\n pr_wv_str1.append(pr_wv_str11)\n\n pr_wv_str_wp.append(pr_wv_str_wp1)\n pr_wv_str.append(pr_wv_str1)\n\n return pr_wv_str, pr_wv_str_wp\n\n\n\n\ndef pred_sw_se(s_sc, s_sa, s_wn, s_wc, s_wo, s_wv):\n pr_sc = pred_sc(s_sc)\n pr_sa = pred_sa(s_sa)\n pr_wn = pred_wn(s_wn)\n pr_wc = pred_wc(pr_wn, s_wc)\n pr_wo = pred_wo(pr_wn, s_wo)\n pr_wvi = pred_wvi_se(pr_wn, s_wv)\n\n return pr_sc, pr_sa, pr_wn, pr_wc, pr_wo, pr_wvi\n\n\n\n\n\ndef merge_wv_t1_eng(where_str_tokens, NLq):\n \"\"\"\n Almost copied of SQLNet.\n The main purpose is pad blank line while combining tokens.\n \"\"\"\n nlq = NLq.lower()\n where_str_tokens = [tok.lower() for tok in where_str_tokens]\n alphabet = 'abcdefghijklmnopqrstuvwxyz0123456789$'\n special = {'-LRB-': '(',\n '-RRB-': ')',\n '-LSB-': '[',\n '-RSB-': ']',\n '``': '\"',\n '\\'\\'': '\"',\n }\n # '--': '\\u2013'} # this generate error for test 5661 case.\n ret = ''\n double_quote_appear = 0\n for raw_w_token in where_str_tokens:\n # if '' (empty string) of None, continue\n if not raw_w_token:\n continue\n\n # Change the special characters\n w_token = special.get(raw_w_token, raw_w_token) # maybe necessary for some case?\n\n # check the double quote\n if w_token == '\"':\n double_quote_appear = 1 - double_quote_appear\n\n # Check whether ret is empty. ret is selected where condition.\n if len(ret) == 0:\n pass\n # Check blank character.\n elif len(ret) > 0 and ret + ' ' + w_token in nlq:\n # Pad ' ' if ret + ' ' is part of nlq.\n ret = ret + ' '\n\n elif len(ret) > 0 and ret + w_token in nlq:\n pass # already in good form. Later, ret + w_token will performed.\n\n # Below for unnatural question I guess. Is it likely to appear?\n elif w_token == '\"':\n if double_quote_appear:\n ret = ret + ' ' # pad blank line between next token when \" because in this case, it is of closing apperas\n # for the case of opening, no blank line.\n\n elif w_token[0] not in alphabet:\n pass # non alphabet one does not pad blank line.\n\n # when previous character is the special case.\n elif (ret[-1] not in ['(', '/', '\\u2013', '#', '$', '&']) and (ret[-1] != '\"' or not double_quote_appear):\n ret = ret + ' '\n ret = ret + w_token\n\n return ret.strip()\n\n\n\ndef find_sql_where_op(gt_sql_tokens_part):\n \"\"\"\n gt_sql_tokens_part: Between 'WHERE' and 'AND'(if exists).\n \"\"\"\n # sql_where_op = ['=', 'EQL', '<', 'LT', '>', 'GT']\n sql_where_op = ['EQL','LT','GT'] # wv sometimes contains =, < or >.\n\n\n for sql_where_op in sql_where_op:\n if sql_where_op in gt_sql_tokens_part:\n found_sql_where_op = sql_where_op\n break\n\n return found_sql_where_op\n\n\ndef find_sub_list(sl, l):\n # from stack overflow.\n results = []\n sll = len(sl)\n for ind in (i for i, e in enumerate(l) if e == sl[0]):\n if l[ind:ind + sll] == sl:\n results.append((ind, ind + sll - 1))\n\n return results\n\ndef get_g_wvi_bert(nlu, nlu_t, wh_to_wp_index, sql_i, sql_t, tokenizer, nlu_wp_t):\n \"\"\"\n Generate SQuAD style start and end index of wv in nlu. Index is for of after WordPiece tokenization.\n\n Assumption: where_str always presents in the nlu.\n \"\"\"\n g_wvi = []\n for b, sql_i1 in enumerate(sql_i):\n nlu1 = nlu[b]\n nlu_t1 = nlu_t[b]\n nlu_wp_t1 = nlu_wp_t[b]\n sql_t1 = sql_t[b]\n wh_to_wp_index1 = wh_to_wp_index[b]\n\n st = sql_t1.index('WHERE') + 1 if 'WHERE' in sql_t1 else len(sql_t1)\n g_wvi1 = []\n while st < len(sql_t1):\n if 'AND' not in sql_t1[st:]:\n ed = len(sql_t1)\n else:\n ed = sql_t1[st:].index('AND') + st\n sql_wop = find_sql_where_op(sql_t1[st:ed]) # sql where operator\n st_wop = st + sql_t1[st:ed].index(sql_wop)\n\n wv_str11_t = sql_t1[st_wop + 1:ed]\n results = find_sub_list(wv_str11_t, nlu_t1)\n st_idx, ed_idx = results[0]\n\n st_wp_idx = wh_to_wp_index1[st_idx]\n ed_wp_idx = wh_to_wp_index1[ed_idx]\n\n\n g_wvi11 = [st_wp_idx, ed_wp_idx]\n g_wvi1.append(g_wvi11)\n st = ed + 1\n g_wvi.append(g_wvi1)\n\n return g_wvi\n\n\ndef get_g_wvi_bert_from_g_wvi_corenlp(wh_to_wp_index, g_wvi_corenlp):\n \"\"\"\n Generate SQuAD style start and end index of wv in nlu. Index is for of after WordPiece tokenization.\n\n Assumption: where_str always presents in the nlu.\n \"\"\"\n g_wvi = []\n for b, g_wvi_corenlp1 in enumerate(g_wvi_corenlp):\n wh_to_wp_index1 = wh_to_wp_index[b]\n g_wvi1 = []\n for i_wn, g_wvi_corenlp11 in enumerate(g_wvi_corenlp1):\n\n st_idx, ed_idx = g_wvi_corenlp11\n\n st_wp_idx = wh_to_wp_index1[st_idx]\n ed_wp_idx = wh_to_wp_index1[ed_idx]\n\n g_wvi11 = [st_wp_idx, ed_wp_idx]\n g_wvi1.append(g_wvi11)\n\n g_wvi.append(g_wvi1)\n\n return g_wvi\n\n\ndef get_g_wvi_bert_from_sql_i(nlu, nlu_t, wh_to_wp_index, sql_i, sql_t, tokenizer, nlu_wp_t):\n \"\"\"\n Generate SQuAD style start and end index of wv in nlu. Index is for of after WordPiece tokenization.\n\n Assumption: where_str always presents in the nlu.\n \"\"\"\n g_wvi = []\n for b, sql_i1 in enumerate(sql_i):\n nlu1 = nlu[b]\n nlu_t1 = nlu_t[b]\n nlu_wp_t1 = nlu_wp_t[b]\n sql_t1 = sql_t[b]\n wh_to_wp_index1 = wh_to_wp_index[b]\n\n st = sql_t1.index('WHERE') + 1 if 'WHERE' in sql_t1 else len(sql_t1)\n g_wvi1 = []\n while st < len(sql_t1):\n if 'AND' not in sql_t1[st:]:\n ed = len(sql_t1)\n else:\n ed = sql_t1[st:].index('AND') + st\n sql_wop = find_sql_where_op(sql_t1[st:ed]) # sql where operator\n st_wop = st + sql_t1[st:ed].index(sql_wop)\n\n wv_str11_t = sql_t1[st_wop + 1:ed]\n results = find_sub_list(wv_str11_t, nlu_t1)\n st_idx, ed_idx = results[0]\n\n st_wp_idx = wh_to_wp_index1[st_idx]\n ed_wp_idx = wh_to_wp_index1[ed_idx]\n\n\n g_wvi11 = [st_wp_idx, ed_wp_idx]\n g_wvi1.append(g_wvi11)\n st = ed + 1\n g_wvi.append(g_wvi1)\n\n return g_wvi\n\ndef get_cnt_sc(g_sc, pr_sc):\n cnt = 0\n for b, g_sc1 in enumerate(g_sc):\n pr_sc1 = pr_sc[b]\n if pr_sc1 == g_sc1:\n cnt += 1\n\n return cnt\n\ndef get_cnt_sc_list(g_sc, pr_sc):\n cnt_list = []\n for b, g_sc1 in enumerate(g_sc):\n pr_sc1 = pr_sc[b]\n if pr_sc1 == g_sc1:\n cnt_list.append(1)\n else:\n cnt_list.append(0)\n\n return cnt_list\n\ndef get_cnt_sa(g_sa, pr_sa):\n cnt = 0\n for b, g_sa1 in enumerate(g_sa):\n pr_sa1 = pr_sa[b]\n if pr_sa1 == g_sa1:\n cnt += 1\n\n return cnt\n\n\ndef get_cnt_wn(g_wn, pr_wn):\n cnt = 0\n for b, g_wn1 in enumerate(g_wn):\n pr_wn1 = pr_wn[b]\n if pr_wn1 == g_wn1:\n cnt += 1\n\n return cnt\n\ndef get_cnt_wc(g_wc, pr_wc):\n cnt = 0\n for b, g_wc1 in enumerate(g_wc):\n\n pr_wc1 = pr_wc[b]\n pr_wn1 = len(pr_wc1)\n g_wn1 = len(g_wc1)\n\n if pr_wn1 != g_wn1:\n continue\n else:\n wc1 = array(g_wc1)\n wc1.sort()\n\n if array_equal(pr_wc1, wc1):\n cnt += 1\n\n return cnt\n\ndef get_cnt_wc_list(g_wc, pr_wc):\n cnt_list= []\n for b, g_wc1 in enumerate(g_wc):\n\n pr_wc1 = pr_wc[b]\n pr_wn1 = len(pr_wc1)\n g_wn1 = len(g_wc1)\n\n if pr_wn1 != g_wn1:\n cnt_list.append(0)\n continue\n else:\n wc1 = array(g_wc1)\n wc1.sort()\n\n if array_equal(pr_wc1, wc1):\n cnt_list.append(1)\n else:\n cnt_list.append(0)\n\n return cnt_list\n\n\ndef get_cnt_wo(g_wn, g_wc, g_wo, pr_wc, pr_wo, mode):\n \"\"\" pr's are all sorted as pr_wc are sorted in increasing order (in column idx)\n However, g's are not sorted.\n\n Sort g's in increasing order (in column idx)\n \"\"\"\n cnt = 0\n for b, g_wo1 in enumerate(g_wo):\n g_wc1 = g_wc[b]\n pr_wc1 = pr_wc[b]\n pr_wo1 = pr_wo[b]\n pr_wn1 = len(pr_wo1)\n g_wn1 = g_wn[b]\n\n if g_wn1 != pr_wn1:\n continue\n else:\n # Sort based on wc sequence.\n if mode == 'test':\n idx = argsort(array(g_wc1))\n\n g_wo1_s = array(g_wo1)[idx]\n g_wo1_s = list(g_wo1_s)\n elif mode == 'train':\n # due to teacher forcing, no need to sort.\n g_wo1_s = g_wo1\n else:\n raise ValueError\n\n if type(pr_wo1) != list:\n raise TypeError\n if g_wo1_s == pr_wo1:\n cnt += 1\n return cnt\n\ndef get_cnt_wo_list(g_wn, g_wc, g_wo, pr_wc, pr_wo, mode):\n \"\"\" pr's are all sorted as pr_wc are sorted in increasing order (in column idx)\n However, g's are not sorted.\n\n Sort g's in increasing order (in column idx)\n \"\"\"\n cnt_list=[]\n for b, g_wo1 in enumerate(g_wo):\n g_wc1 = g_wc[b]\n pr_wc1 = pr_wc[b]\n pr_wo1 = pr_wo[b]\n pr_wn1 = len(pr_wo1)\n g_wn1 = g_wn[b]\n\n if g_wn1 != pr_wn1:\n cnt_list.append(0)\n continue\n else:\n # Sort based wc sequence.\n if mode == 'test':\n idx = argsort(array(g_wc1))\n\n g_wo1_s = array(g_wo1)[idx]\n g_wo1_s = list(g_wo1_s)\n elif mode == 'train':\n # due to tearch forcing, no need to sort.\n g_wo1_s = g_wo1\n else:\n raise ValueError\n\n if type(pr_wo1) != list:\n raise TypeError\n if g_wo1_s == pr_wo1:\n cnt_list.append(1)\n else:\n cnt_list.append(0)\n return cnt_list\n\n\ndef get_cnt_wv(g_wn, g_wc, g_wvi, pr_wvi, mode):\n \"\"\" usalbe only when g_wc was used to find pr_wv\n\n g_wvi\n \"\"\"\n cnt = 0\n for b, g_wvi1 in enumerate(g_wvi):\n pr_wvi1 = pr_wvi[b]\n g_wc1 = g_wc[b]\n pr_wn1 = len(pr_wvi1)\n g_wn1 = g_wn[b]\n\n # Now sorting.\n # Sort based wc sequence.\n if mode == 'test':\n idx1 = argsort(array(g_wc1))\n elif mode == 'train':\n idx1 = list( range( g_wn1) )\n else:\n raise ValueError\n\n if g_wn1 != pr_wn1:\n continue\n else:\n flag = True\n for i_wn, idx11 in enumerate(idx1):\n g_wvi11 = g_wvi1[idx11]\n pr_wvi11 = pr_wvi1[i_wn]\n if g_wvi11 != pr_wvi11:\n flag = False\n # print(g_wv1, g_wv11)\n # print(pr_wv1, pr_wv11)\n # input('')\n break\n if flag:\n cnt += 1\n\n return cnt\n\n\ndef get_cnt_wvi_list(g_wn, g_wc, g_wvi, pr_wvi, mode):\n \"\"\" usalbe only when g_wc was used to find pr_wv\n \"\"\"\n cnt_list =[]\n for b, g_wvi1 in enumerate(g_wvi):\n g_wc1 = g_wc[b]\n pr_wvi1 = pr_wvi[b]\n pr_wn1 = len(pr_wvi1)\n g_wn1 = g_wn[b]\n\n # Now sorting.\n # Sort based wc sequence.\n if mode == 'test':\n idx1 = argsort(array(g_wc1))\n elif mode == 'train':\n idx1 = list( range( g_wn1) )\n else:\n raise ValueError\n\n if g_wn1 != pr_wn1:\n cnt_list.append(0)\n continue\n else:\n flag = True\n for i_wn, idx11 in enumerate(idx1):\n g_wvi11 = g_wvi1[idx11]\n pr_wvi11 = pr_wvi1[i_wn]\n if g_wvi11 != pr_wvi11:\n flag = False\n # print(g_wv1, g_wv11)\n # print(pr_wv1, pr_wv11)\n # input('')\n break\n if flag:\n cnt_list.append(1)\n else:\n cnt_list.append(0)\n\n return cnt_list\n\n\ndef get_cnt_wv_list(g_wn, g_wc, g_sql_i, pr_sql_i, mode):\n \"\"\" usalbe only when g_wc was used to find pr_wv\n \"\"\"\n cnt_list =[]\n for b, g_wc1 in enumerate(g_wc):\n pr_wn1 = len(pr_sql_i[b][\"conds\"])\n g_wn1 = g_wn[b]\n\n # Now sorting.\n # Sort based wc sequence.\n if mode == 'test':\n idx1 = argsort(array(g_wc1))\n elif mode == 'train':\n idx1 = list( range( g_wn1) )\n else:\n raise ValueError\n\n if g_wn1 != pr_wn1:\n cnt_list.append(0)\n continue\n else:\n flag = True\n for i_wn, idx11 in enumerate(idx1):\n g_wvi_str11 = str(g_sql_i[b][\"conds\"][idx11][2]).lower()\n pr_wvi_str11 = str(pr_sql_i[b][\"conds\"][i_wn][2]).lower()\n # print(g_wvi_str11)\n # print(pr_wvi_str11)\n # print(g_wvi_str11==pr_wvi_str11)\n if g_wvi_str11 != pr_wvi_str11:\n flag = False\n # print(g_wv1, g_wv11)\n # print(pr_wv1, pr_wv11)\n # input('')\n break\n if flag:\n cnt_list.append(1)\n else:\n cnt_list.append(0)\n\n return cnt_list\n\ndef get_cnt_sw(g_sc, g_sa, g_wn, g_wc, g_wo, g_wvi, pr_sc, pr_sa, pr_wn, pr_wc, pr_wo, pr_wvi, mode):\n \"\"\" usalbe only when g_wc was used to find pr_wv\n \"\"\"\n cnt_sc = get_cnt_sc(g_sc, pr_sc)\n cnt_sa = get_cnt_sa(g_sa, pr_sa)\n cnt_wn = get_cnt_wn(g_wn, pr_wn)\n cnt_wc = get_cnt_wc(g_wc, pr_wc)\n cnt_wo = get_cnt_wo(g_wn, g_wc, g_wo, pr_wc, pr_wo, mode)\n cnt_wv = get_cnt_wv(g_wn, g_wc, g_wvi, pr_wvi, mode)\n\n return cnt_sc, cnt_sa, cnt_wn, cnt_wc, cnt_wo, cnt_wv\n\ndef get_cnt_sw_list(g_sc, g_sa, g_wn, g_wc, g_wo, g_wvi,\n pr_sc, pr_sa, pr_wn, pr_wc, pr_wo, pr_wvi,\n g_sql_i, pr_sql_i,\n mode):\n \"\"\" usalbe only when g_wc was used to find pr_wv\n \"\"\"\n cnt_sc = get_cnt_sc_list(g_sc, pr_sc)\n cnt_sa = get_cnt_sc_list(g_sa, pr_sa)\n cnt_wn = get_cnt_sc_list(g_wn, pr_wn)\n cnt_wc = get_cnt_wc_list(g_wc, pr_wc)\n cnt_wo = get_cnt_wo_list(g_wn, g_wc, g_wo, pr_wc, pr_wo, mode)\n if pr_wvi:\n cnt_wvi = get_cnt_wvi_list(g_wn, g_wc, g_wvi, pr_wvi, mode)\n else:\n cnt_wvi = [0]*len(cnt_sc)\n cnt_wv = get_cnt_wv_list(g_wn, g_wc, g_sql_i, pr_sql_i, mode) # compare using wv-str which presented in original data.\n\n\n return cnt_sc, cnt_sa, cnt_wn, cnt_wc, cnt_wo, cnt_wvi, cnt_wv\n\n\ndef get_cnt_lx_list(cnt_sc1, cnt_sa1, cnt_wn1, cnt_wc1, cnt_wo1, cnt_wv1):\n # all cnt are list here.\n cnt_list = []\n cnt_lx = 0\n for csc, csa, cwn, cwc, cwo, cwv in zip(cnt_sc1, cnt_sa1, cnt_wn1, cnt_wc1, cnt_wo1, cnt_wv1):\n if csc and csa and cwn and cwc and cwo and cwv:\n cnt_list.append(1)\n else:\n cnt_list.append(0)\n\n return cnt_list\n\n\ndef get_cnt_x_list(engine, tb, g_sc, g_sa, g_sql_i, pr_sc, pr_sa, pr_sql_i):\n cnt_x1_list = []\n g_ans = []\n pr_ans = []\n for b in range(len(g_sc)):\n g_ans1 = engine.execute(tb[b]['id'], g_sc[b], g_sa[b], g_sql_i[b]['conds'])\n # print(f'cnt: {cnt}')\n # print(f\"pr_sql_i: {pr_sql_i[b]['conds']}\")\n try:\n pr_ans1 = engine.execute(tb[b]['id'], pr_sc[b], pr_sa[b], pr_sql_i[b]['conds'])\n\n if bool(pr_ans1): # not empty due to lack of the data from incorretly generated sql\n if g_ans1 == pr_ans1:\n cnt_x1 = 1\n else:\n cnt_x1 = 0\n else:\n cnt_x1 = 0\n except:\n # type error etc... Execution-guided decoding may be used here.\n pr_ans1 = None\n cnt_x1 = 0\n cnt_x1_list.append(cnt_x1)\n g_ans.append(g_ans1)\n pr_ans.append(pr_ans1)\n\n return cnt_x1_list, g_ans, pr_ans\n\ndef get_mean_grad(named_parameters):\n \"\"\"\n Get list of mean, std of grad of each parameters\n Code based on web searched result..\n \"\"\"\n mu_list = []\n sig_list = []\n for name, param in named_parameters:\n if param.requires_grad: # and (\"bias\" not in name) :\n # bias makes std = nan as it is of single parameters\n magnitude = param.grad.abs()\n mu_list.append(magnitude.mean())\n if len(magnitude) == 1:\n # why nan for single param? Anyway to avoid that..\n sig_list.append(torch.tensor(0))\n else:\n sig_list.append(magnitude.std())\n\n # if \"svp_se\"\n\n return mu_list, sig_list\n\n\ndef generate_sql_i(pr_sc, pr_sa, pr_wn, pr_wc, pr_wo, pr_wv_str, nlu):\n pr_sql_i = []\n for b, nlu1 in enumerate(nlu):\n conds = []\n for i_wn in range(pr_wn[b]):\n conds1 = []\n conds1.append(pr_wc[b][i_wn])\n conds1.append(pr_wo[b][i_wn])\n merged_wv11 = merge_wv_t1_eng(pr_wv_str[b][i_wn], nlu[b])\n conds1.append(merged_wv11)\n conds.append(conds1)\n\n pr_sql_i1 = {'agg': pr_sa[b], 'sel': pr_sc[b], 'conds': conds}\n pr_sql_i.append(pr_sql_i1)\n return pr_sql_i\n\n\ndef save_for_evaluation(path_save, results, dset_name, ):\n path_save_file = os.path.join(path_save, f'results_{dset_name}.jsonl')\n with open(path_save_file, 'w', encoding='utf-8') as f:\n for i, r1 in enumerate(results):\n json_str = json.dumps(r1, ensure_ascii=False, default=json_default_type_checker)\n json_str += '\\n'\n\n f.writelines(json_str)\n\ndef save_for_evaluation_aux(path_save, results, dset_name, ):\n path_save_file = os.path.join(path_save, f'results_aux_{dset_name}.jsonl')\n with open(path_save_file, 'w', encoding='utf-8') as f:\n for i, r1 in enumerate(results):\n json_str = json.dumps(r1, ensure_ascii=False, default=json_default_type_checker)\n json_str += '\\n'\n\n f.writelines(json_str)\n\n\ndef check_sc_sa_pairs(tb, pr_sc, pr_sa, ):\n \"\"\"\n Check whether pr_sc, pr_sa are allowed pairs or not.\n agg_ops = ['', 'MAX', 'MIN', 'COUNT', 'SUM', 'AVG']\n\n \"\"\"\n bS = len(pr_sc)\n check = [False] * bS\n for b, pr_sc1 in enumerate(pr_sc):\n pr_sa1 = pr_sa[b]\n hd_types1 = tb[b]['types']\n hd_types11 = hd_types1[pr_sc1]\n if hd_types11 == 'text':\n if pr_sa1 == 0 or pr_sa1 == 3: # ''\n check[b] = True\n else:\n check[b] = False\n\n elif hd_types11 == 'real':\n check[b] = True\n else:\n raise Exception(\"New TYPE!!\")\n\n return check\n\n\ndef remap_sc_idx(idxs, pr_sc_beam):\n for b, idxs1 in enumerate(idxs):\n for i_beam, idxs11 in enumerate(idxs1):\n sc_beam_idx = idxs[b][i_beam][0]\n sc_idx = pr_sc_beam[b][sc_beam_idx]\n idxs[b][i_beam][0] = sc_idx\n\n return idxs\n\n\ndef sort_and_generate_pr_w(pr_sql_i):\n pr_wc = []\n pr_wo = []\n pr_wv = []\n for b, pr_sql_i1 in enumerate(pr_sql_i):\n conds1 = pr_sql_i1[\"conds\"]\n pr_wc1 = []\n pr_wo1 = []\n pr_wv1 = []\n\n # Generate\n for i_wn, conds11 in enumerate(conds1):\n pr_wc1.append( conds11[0])\n pr_wo1.append( conds11[1])\n pr_wv1.append( conds11[2])\n\n # sort based on pr_wc1\n idx = argsort(pr_wc1)\n pr_wc1 = array(pr_wc1)[idx].tolist()\n pr_wo1 = array(pr_wo1)[idx].tolist()\n pr_wv1 = array(pr_wv1)[idx].tolist()\n\n conds1_sorted = []\n for i, idx1 in enumerate(idx):\n conds1_sorted.append( conds1[idx1] )\n\n\n pr_wc.append(pr_wc1)\n pr_wo.append(pr_wo1)\n pr_wv.append(pr_wv1)\n\n pr_sql_i1['conds'] = conds1_sorted\n\n return pr_wc, pr_wo, pr_wv, pr_sql_i\n\ndef generate_sql_q(sql_i, tb):\n sql_q = []\n for b, sql_i1 in enumerate(sql_i):\n tb1 = tb[b]\n sql_q1 = generate_sql_q1(sql_i1, tb1)\n sql_q.append(sql_q1)\n\n return sql_q\n\ndef generate_sql_q1(sql_i1, tb1):\n \"\"\"\n sql = {'sel': 5, 'agg': 4, 'conds': [[3, 0, '59']]}\n agg_ops = ['', 'max', 'min', 'count', 'sum', 'avg']\n cond_ops = ['=', '>', '<', 'OP']\n\n Temporal as it can show only one-time conditioned case.\n sql_query: real sql_query\n sql_plus_query: More redable sql_query\n\n \"PLUS\" indicates, it deals with the some of db specific facts like PCODE <-> NAME\n \"\"\"\n agg_ops = ['', 'max', 'min', 'count', 'sum', 'avg']\n cond_ops = ['=', '>', '<', 'OP']\n\n headers = tb1[\"header\"]\n # select_header = headers[sql['sel']].lower()\n # try:\n # select_table = tb1[\"name\"]\n # except:\n # print(f\"No table name while headers are {headers}\")\n select_table = tb1[\"id\"]\n\n select_agg = agg_ops[sql_i1['agg']]\n select_header = headers[sql_i1['sel']]\n sql_query_part1 = f'SELECT {select_agg}({select_header}) '\n\n\n where_num = len(sql_i1['conds'])\n if where_num == 0:\n sql_query_part2 = f'FROM {select_table}'\n # sql_plus_query_part2 = f'FROM {select_table}'\n\n else:\n sql_query_part2 = f'FROM {select_table} WHERE'\n # sql_plus_query_part2 = f'FROM {select_table_refined} WHERE'\n # ----------------------------------------------------------------------------------------------------------\n for i in range(where_num):\n # check 'OR'\n # number_of_sub_conds = len(sql['conds'][i])\n where_header_idx, where_op_idx, where_str = sql_i1['conds'][i]\n where_header = headers[where_header_idx]\n where_op = cond_ops[where_op_idx]\n if i > 0:\n sql_query_part2 += ' AND'\n # sql_plus_query_part2 += ' AND'\n\n sql_query_part2 += f\" {where_header} {where_op} {where_str}\"\n\n sql_query = sql_query_part1 + sql_query_part2\n # sql_plus_query = sql_plus_query_part1 + sql_plus_query_part2\n\n return sql_query\n\n\ndef get_pnt_idx1(col_pool_type, st_ed):\n st, ed = st_ed\n if col_pool_type == 'start_tok':\n pnt_idx1 = st\n elif col_pool_type == 'end_tok':\n pnt_idx1 = ed\n elif col_pool_type == 'avg':\n pnt_idx1 = arange(st, ed, 1)\n return pnt_idx1\n\n\ndef gen_g_pnt_idx(g_wvi, sql_i, i_hds, i_sql_vocab, col_pool_type):\n \"\"\"\n sql_vocab = (\n 0.. \"sql none\", \"sql max\", \"sql min\", \"sql count\", \"sql sum\", \"sql average\", ..5\n 6.. \"sql select\", \"sql where\", \"sql and\", .. 8\n 9.. \"sql equal\", \"sql greater than\", \"sql less than\", .. 11\n 12.. \"sql start\", \"sql end\" .. 13\n )\n \"\"\"\n g_pnt_idxs = []\n\n\n\n for b, sql_i1 in enumerate(sql_i):\n i_sql_vocab1 = i_sql_vocab[b]\n i_hds1 = i_hds[b]\n g_pnt_idxs1 = []\n\n # start token\n pnt_idx1 = get_pnt_idx1(col_pool_type, i_sql_vocab1[-2])\n g_pnt_idxs1.append(pnt_idx1)\n\n # select token\n pnt_idx1 = get_pnt_idx1(col_pool_type, i_sql_vocab1[6])\n g_pnt_idxs1.append(pnt_idx1)\n\n # select agg\n idx_agg = sql_i1[\"agg\"]\n pnt_idx1 = get_pnt_idx1(col_pool_type, i_sql_vocab1[idx_agg])\n g_pnt_idxs1.append(pnt_idx1)\n\n # select column\n idx_sc = sql_i1[\"sel\"]\n pnt_idx1 = get_pnt_idx1(col_pool_type, i_hds1[idx_sc])\n g_pnt_idxs1.append(pnt_idx1)\n\n conds = sql_i1[\"conds\"]\n wn = len(conds)\n if wn <= 0:\n pass\n else:\n # select where\n pnt_idx1 = get_pnt_idx1(col_pool_type, i_sql_vocab1[7])\n g_pnt_idxs1.append(pnt_idx1)\n\n for i_wn, conds1 in enumerate(conds):\n # where column\n idx_wc = conds1[0]\n pnt_idx1 = get_pnt_idx1(col_pool_type, i_hds1[idx_wc])\n g_pnt_idxs1.append(pnt_idx1)\n\n # where op\n idx_wo = conds1[1]\n pnt_idx1 = get_pnt_idx1(col_pool_type, i_sql_vocab1[idx_wo + 9])\n g_pnt_idxs1.append(pnt_idx1)\n\n # where val\n st, ed = g_wvi[b][i_wn]\n end_pos_of_sql_vocab = i_sql_vocab1[-1][-1]\n g_pnt_idxs1.append(st + 1 + end_pos_of_sql_vocab) # due to inital [CLS] token in BERT-input vector\n g_pnt_idxs1.append(ed + 1 + end_pos_of_sql_vocab) # due to inital [CLS] token in BERT-input vector\n\n # and token\n if i_wn < wn - 1:\n pnt_idx1 = get_pnt_idx1(col_pool_type, i_sql_vocab1[8])\n g_pnt_idxs1.append(pnt_idx1)\n\n # end token\n pnt_idx1 = get_pnt_idx1(col_pool_type, i_sql_vocab1[-1])\n g_pnt_idxs1.append(pnt_idx1)\n\n g_pnt_idxs.append(g_pnt_idxs1)\n\n return g_pnt_idxs\n\n\ndef pred_pnt_idxs(score, pnt_start_tok, pnt_end_tok):\n pr_pnt_idxs = []\n for b, score1 in enumerate(score):\n # score1 = [T, max_seq_length]\n pr_pnt_idxs1 = [pnt_start_tok]\n for t, score11 in enumerate(score1):\n pnt = score11.argmax().item()\n pr_pnt_idxs1.append(pnt)\n\n if pnt == pnt_end_tok:\n break\n pr_pnt_idxs.append(pr_pnt_idxs1)\n\n return pr_pnt_idxs\n\n\ndef generate_sql_q_s2s(pnt_idxs, tokens, tb):\n sql_q = []\n for b, pnt_idxs1 in enumerate(pnt_idxs):\n tb1 = tb[b]\n sql_q1 = generate_sql_q1_s2s(pnt_idxs1, tokens[b], tb1)\n sql_q.append(sql_q1)\n\n return sql_q\n\n\ndef generate_sql_q1_s2s(pnt_idxs1, tokens1, tb1):\n \"\"\"\n agg_ops = ['', 'max', 'min', 'count', 'sum', 'avg']\n cond_ops = ['=', '>', '<', 'OP']\n\n Temporal as it can show only one-time conditioned case.\n sql_query: real sql_query\n sql_plus_query: More redable sql_query\n\n \"PLUS\" indicates, it deals with the some of db specific facts like PCODE <-> NAME\n \"\"\"\n sql_query = \"\"\n for t, pnt_idxs11 in enumerate(pnt_idxs1):\n tok = tokens1[pnt_idxs11]\n sql_query += tok\n if t < len(pnt_idxs1)-1:\n sql_query += \" \"\n\n\n return sql_query\n\n\n# Generate sql_i from pnt_idxs\ndef find_where_pnt_belong(pnt, vg):\n idx_sub = -1\n for i, st_ed in enumerate(vg):\n st, ed = st_ed\n if pnt < ed and pnt >= st:\n idx_sub = i\n\n return idx_sub\n\n\ndef gen_pnt_i_from_pnt(pnt, i_sql_vocab1, i_nlu1, i_hds1):\n # Find where it belong\n vg_list = [i_sql_vocab1, [i_nlu1], i_hds1] # as i_nlu has only single st and ed\n i_vg = -1\n i_vg_sub = -1\n for i, vg in enumerate(vg_list):\n idx_sub = find_where_pnt_belong(pnt, vg)\n if idx_sub > -1:\n i_vg = i\n i_vg_sub = idx_sub\n break\n return i_vg, i_vg_sub\n\n\ndef gen_i_vg_from_pnt_idxs(pnt_idxs, i_sql_vocab, i_nlu, i_hds):\n i_vg_list = []\n i_vg_sub_list = []\n for b, pnt_idxs1 in enumerate(pnt_idxs):\n # if properly generated,\n sql_q1_list = []\n i_vg_list1 = [] # index of (sql_vocab, nlu, hds)\n i_vg_sub_list1 = [] # index inside of each vocab group\n\n for t, pnt in enumerate(pnt_idxs1):\n i_vg, i_vg_sub = gen_pnt_i_from_pnt(pnt, i_sql_vocab[b], i_nlu[b], i_hds[b])\n i_vg_list1.append(i_vg)\n i_vg_sub_list1.append(i_vg_sub)\n\n # sql_q1 = sql_q1.join(' ')\n # sql_q.append(sql_q1)\n i_vg_list.append(i_vg_list1)\n i_vg_sub_list.append(i_vg_sub_list1)\n return i_vg_list, i_vg_sub_list\n\n\ndef gen_sql_q_from_i_vg(tokens, nlu, nlu_t, hds, tt_to_t_idx, pnt_start_tok, pnt_end_tok, pnt_idxs, i_vg_list, i_vg_sub_list):\n \"\"\"\n (\n \"none\", \"max\", \"min\", \"count\", \"sum\", \"average\",\n \"select\", \"where\", \"and\",\n \"equal\", \"greater than\", \"less than\",\n \"start\", \"end\"\n ),\n \"\"\"\n sql_q = []\n sql_i = []\n for b, nlu_t1 in enumerate(nlu_t):\n sql_q1_list = []\n sql_i1 = {}\n tt_to_t_idx1 = tt_to_t_idx[b]\n nlu_st_observed = False\n agg_observed = False\n wc_obs = False\n wo_obs = False\n conds = []\n\n for t, i_vg in enumerate(i_vg_list[b]):\n i_vg_sub = i_vg_sub_list[b][t]\n pnt = pnt_idxs[b][t]\n if i_vg == 0:\n # sql_vocab\n if pnt == pnt_start_tok or pnt == pnt_end_tok:\n pass\n else:\n tok = tokens[b][pnt]\n if tok in [\"none\", \"max\", \"min\", \"count\", \"sum\", \"average\"]:\n agg_observed = True\n if tok == \"none\":\n pass\n sql_i1[\"agg\"] = [\"none\", \"max\", \"min\", \"count\", \"sum\", \"average\"].index(tok)\n else:\n if tok in [\"greater\", \"less\", \"equal\"]:\n if tok == 'greater':\n tok = '>'\n elif tok == 'less':\n tok = '<'\n elif tok == 'equal':\n tok = '='\n\n # gen conds1\n if wc_obs:\n conds1.append( ['=','>','<'].index(tok) )\n wo_obs = True\n\n sql_q1_list.append(tok)\n\n elif i_vg == 1:\n # nlu case\n if not nlu_st_observed:\n idx_nlu_st = pnt\n nlu_st_observed = True\n else:\n # now to wrap up\n idx_nlu_ed = pnt\n st_wh_idx = tt_to_t_idx1[idx_nlu_st - pnt_end_tok - 2]\n ed_wh_idx = tt_to_t_idx1[idx_nlu_ed - pnt_end_tok - 2]\n pr_wv_str11 = nlu_t1[st_wh_idx:ed_wh_idx + 1]\n merged_wv11 = merge_wv_t1_eng(pr_wv_str11, nlu[b])\n sql_q1_list.append(merged_wv11)\n nlu_st_observed = False\n\n if wc_obs and wo_obs:\n conds1.append(merged_wv11)\n conds.append(conds1)\n\n wc_obs = False\n wo_obs = False\n\n\n elif i_vg == 2:\n # headers\n tok = hds[b][i_vg_sub]\n if agg_observed:\n sql_q1_list.append(f\"({tok})\")\n sql_i1[\"sel\"] = i_vg_sub\n agg_observed = False\n else:\n wc_obs = True\n conds1 = [i_vg_sub]\n\n sql_q1_list.append(tok)\n\n # insert table name between.\n sql_i1[\"conds\"] = conds\n sql_i.append(sql_i1)\n sql_q1 = ' '.join(sql_q1_list)\n sql_q.append(sql_q1)\n\n return sql_q, sql_i\n\n\ndef get_cnt_lx_list_s2s(g_pnt_idxs, pr_pnt_idxs):\n # all cnt are list here.\n cnt_list = []\n for b, g_pnt_idxs1 in enumerate(g_pnt_idxs):\n pr_pnt_idxs1 = pr_pnt_idxs[b]\n\n if g_pnt_idxs1 == pr_pnt_idxs1:\n cnt_list.append(1)\n else:\n cnt_list.append(0)\n\n return cnt_list\n\n\ndef get_wemb_h_FT_Scalar_1(i_hds, l_hs, hS, all_encoder_layer, col_pool_type='start_tok'):\n \"\"\"\n As if\n [ [table-1-col-1-tok1, t1-c1-t2, ...],\n [t1-c2-t1, t1-c2-t2, ...].\n ...\n [t2-c1-t1, ...,]\n ]\n\n # i_hds = [ [ Batch 1 ] [ Batch 2 ] ]\n # [Batch 1] = [ (col1_st_idx, col1_ed_idx), (col2_st_idx, col2_ed_idx), ...]\n # i_hds = [[(11, 14), (15, 19), (20, 21), (22, 24), (25, 27), (28, 29)],\n # [(16, 19), (20, 24), (25, 26), (27, 29), (30, 32), (33, 34)]]\n\n pool_type = 'start_tok', 'end_tok', 'avg'\n\n \"\"\"\n bS = len(l_hs)\n l_hs_max = max(l_hs)\n wemb_h = torch.zeros([bS, l_hs_max, hS]).to(device)\n for b, i_hds1 in enumerate(i_hds):\n for i_hd, st_ed_pair in enumerate(i_hds1):\n st, ed = st_ed_pair\n if col_pool_type == 'start_tok':\n vec = all_encoder_layer[-1][b, st,:]\n elif col_pool_type == 'end_tok':\n vec = all_encoder_layer[-1][b, ed, :]\n elif col_pool_type == 'avg':\n vecs = all_encoder_layer[-1][b, st:ed,:]\n vec = vecs.mean(dim=1, keepdim=True)\n else:\n raise ValueError\n wemb_h[b, i_hd, :] = vec\n\n return wemb_h\n\n\ndef cal_prob(s_sc, s_sa, s_wn, s_wc, s_wo, s_wv, pr_sc, pr_sa, pr_wn, pr_wc, pr_wo, pr_wvi):\n \"\"\"\n\n :param s_sc: [B, l_h]\n :param s_sa: [B, l_a] # 16\n :param s_wn: [B, 5]\n :param s_wc: [B, l_h]\n :param s_wo: [B, 4, l_o] #\n :param s_wv: [B, 4, 22]\n :return:\n \"\"\"\n # First get selected index\n\n #\n\n # Predict prob\n p_sc = cal_prob_sc(s_sc, pr_sc)\n p_sa = cal_prob_sa(s_sa, pr_sa)\n p_wn = cal_prob_wn(s_wn, pr_wn)\n p_wc = cal_prob_wc(s_wc, pr_wc)\n p_wo = cal_prob_wo(s_wo, pr_wo)\n p_wvi = cal_prob_wvi_se(s_wv, pr_wvi)\n\n # calculate select-clause probability\n p_select = cal_prob_select(p_sc, p_sa)\n\n # calculate where-clause probability\n p_where = cal_prob_where(p_wn, p_wc, p_wo, p_wvi)\n\n # calculate total probability\n p_tot = cal_prob_tot(p_select, p_where)\n\n return p_tot, p_select, p_where, p_sc, p_sa, p_wn, p_wc, p_wo, p_wvi\n\ndef cal_prob_tot(p_select, p_where):\n p_tot = []\n for b, p_select1 in enumerate(p_select):\n p_where1 = p_where[b]\n p_tot.append( p_select1 * p_where1 )\n\n return p_tot\n\ndef cal_prob_select(p_sc, p_sa):\n p_select = []\n for b, p_sc1 in enumerate(p_sc):\n p1 = 1.0\n p1 *= p_sc1\n p1 *= p_sa[b]\n\n p_select.append(p1)\n return p_select\n\ndef cal_prob_where(p_wn, p_wc, p_wo, p_wvi):\n p_where = []\n for b, p_wn1 in enumerate(p_wn):\n p1 = 1.0\n p1 *= p_wn1\n p_wc1 = p_wc[b]\n\n for i_wn, p_wc11 in enumerate(p_wc1):\n p_wo11 = p_wo[b][i_wn]\n p_wv11_st, p_wv11_ed = p_wvi[b][i_wn]\n\n p1 *= p_wc11\n p1 *= p_wo11\n p1 *= p_wv11_st\n p1 *= p_wv11_ed\n\n p_where.append(p1)\n\n return p_where\n\n\ndef cal_prob_sc(s_sc, pr_sc):\n ps = F.softmax(s_sc, dim=1)\n p = []\n for b, ps1 in enumerate(ps):\n pr_sc1 = pr_sc[b]\n p1 = ps1[pr_sc1]\n p.append(p1.item())\n\n return p\n\ndef cal_prob_sa(s_sa, pr_sa):\n ps = F.softmax(s_sa, dim=1)\n p = []\n for b, ps1 in enumerate(ps):\n pr_sa1 = pr_sa[b]\n p1 = ps1[pr_sa1]\n p.append(p1.item())\n\n return p\n\ndef cal_prob_wn(s_wn, pr_wn):\n ps = F.softmax(s_wn, dim=1)\n p = []\n for b, ps1 in enumerate(ps):\n pr_wn1 = pr_wn[b]\n p1 = ps1[pr_wn1]\n p.append(p1.item())\n\n return p\n\ndef cal_prob_wc(s_wc, pr_wc):\n ps = torch.sigmoid(s_wc)\n ps_out = []\n for b, pr_wc1 in enumerate(pr_wc):\n ps1 = array(ps[b].cpu())\n ps_out1 = ps1[pr_wc1]\n ps_out.append(list(ps_out1))\n\n return ps_out\n\ndef cal_prob_wo(s_wo, pr_wo):\n # assume there is always at least single condition.\n ps = F.softmax(s_wo, dim=2)\n ps_out = []\n\n\n for b, pr_wo1 in enumerate(pr_wo):\n ps_out1 = []\n for n, pr_wo11 in enumerate(pr_wo1):\n ps11 = ps[b][n]\n ps_out1.append( ps11[pr_wo11].item() )\n\n\n ps_out.append(ps_out1)\n\n return ps_out\n\n\ndef cal_prob_wvi_se(s_wv, pr_wvi):\n prob_wv = F.softmax(s_wv, dim=-2).detach().to('cpu').numpy()\n p_wv = []\n for b, pr_wvi1 in enumerate(pr_wvi):\n p_wv1 = []\n for i_wn, pr_wvi11 in enumerate(pr_wvi1):\n st, ed = pr_wvi11\n p_st = prob_wv[b, i_wn, st, 0]\n p_ed = prob_wv[b, i_wn, ed, 1]\n p_wv1.append([p_st, p_ed])\n p_wv.append(p_wv1)\n\n return p_wv\n\ndef generate_inputs_s2s(tokenizer, nlu1_tt, hds1, sql_vocab1):\n \"\"\"\n [CLS] sql_vocab [SEP] question [SEP] headers\n To make sql_vocab in a fixed position.\n \"\"\"\n\n tokens = []\n segment_ids = []\n\n tokens.append(\"[CLS]\")\n\n\n # sql_vocab\n i_sql_vocab = []\n # for doc\n for i, sql_vocab11 in enumerate(sql_vocab1):\n i_st_sql = len(tokens)\n sub_tok = tokenizer.tokenize(sql_vocab11)\n tokens += sub_tok\n i_ed_sql = len(tokens)\n i_sql_vocab.append((i_st_sql, i_ed_sql))\n segment_ids += [1] * len(sub_tok)\n if i < len(sql_vocab1) - 1:\n tokens.append(\"[SEP]\")\n segment_ids.append(0)\n elif i == len(sql_vocab1) - 1:\n tokens.append(\"[SEP]\")\n segment_ids.append(1)\n else:\n raise EnvironmentError\n\n\n # question\n i_st_nlu = len(tokens) # to use it later\n\n segment_ids.append(0)\n for token in nlu1_tt:\n tokens.append(token)\n segment_ids.append(0)\n i_ed_nlu = len(tokens)\n tokens.append(\"[SEP]\")\n segment_ids.append(0)\n i_nlu = (i_st_nlu, i_ed_nlu)\n\n\n # headers\n i_hds = []\n # for doc\n for i, hds11 in enumerate(hds1):\n i_st_hd = len(tokens)\n sub_tok = tokenizer.tokenize(hds11)\n tokens += sub_tok\n i_ed_hd = len(tokens)\n i_hds.append((i_st_hd, i_ed_hd))\n segment_ids += [1] * len(sub_tok)\n if i < len(hds1)-1:\n tokens.append(\"[SEP]\")\n segment_ids.append(0)\n elif i == len(hds1)-1:\n tokens.append(\"[SEP]\")\n segment_ids.append(1)\n else:\n raise EnvironmentError\n\n\n return tokens, segment_ids, i_sql_vocab, i_nlu, i_hds\n\n\ndef sort_pr_wc(pr_wc, g_wc):\n \"\"\"\n Input: list\n pr_wc = [B, n_conds]\n g_wc = [B, n_conds]\n\n\n Return: list\n pr_wc_sorted = [B, n_conds]\n \"\"\"\n pr_wc_sorted = []\n for b, pr_wc1 in enumerate(pr_wc):\n g_wc1 = g_wc[b]\n pr_wc1_sorted = []\n\n if set(g_wc1) == set(pr_wc1):\n pr_wc1_sorted = deepcopy(g_wc1)\n else:\n # no sorting when g_wc1 and pr_wc1 are different.\n pr_wc1_sorted = deepcopy(pr_wc1)\n\n pr_wc_sorted.append(pr_wc1_sorted)\n return pr_wc_sorted\n\n" ]
[ [ "torch.sigmoid", "torch.nn.functional.softmax", "torch.LongTensor", "torch.zeros", "torch.utils.data.DataLoader", "torch.nn.utils.rnn.pack_padded_sequence", "torch.tensor", "torch.nn.utils.rnn.pad_packed_sequence", "torch.cuda.is_available" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
hi-zhengcheng/examples
[ "6c3c9b62e202923d1d87d03c51b1e79279bdae4b" ]
[ "tensorflow_examples/lite/model_maker/demo/image_classification_demo_test.py" ]
[ "# Copyright 2019 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the 'License');\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an 'AS IS' BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport tempfile\nfrom unittest.mock import patch\n\nimport tensorflow as tf\n\nfrom tensorflow_examples.lite.model_maker.core import test_util\nfrom tensorflow_examples.lite.model_maker.core.data_util.image_dataloader import ImageClassifierDataLoader\nfrom tensorflow_examples.lite.model_maker.demo import image_classification_demo\n\n\ndef get_cache_dir():\n return os.path.join(test_util.get_test_data_path('demo'), 'testdata')\n\n\nfrom_folder_fn = ImageClassifierDataLoader.from_folder\n\n\ndef patch_data_loader():\n \"\"\"Patch to train partial dataset rather than all of them.\"\"\"\n\n def side_effect(*args, **kwargs):\n tf.compat.v1.logging.info('Train on partial dataset')\n data_loader = from_folder_fn(*args, **kwargs)\n if data_loader.size > 10: # Trim dataset to at most 10.\n data_loader.size = 10\n data_loader.dataset = data_loader.dataset.take(data_loader.size)\n return data_loader\n\n return patch.object(\n ImageClassifierDataLoader, 'from_folder', side_effect=side_effect)\n\n\nclass ImageClassificationDemoTest(tf.test.TestCase):\n\n def test_image_classification_demo(self):\n with patch_data_loader():\n with tempfile.TemporaryDirectory() as temp_dir:\n # Use cached training data if exists.\n data_dir = image_classification_demo.download_demo_data(\n cache_dir=get_cache_dir(),\n file_hash='6f87fb78e9cc9ab41eff2015b380011d')\n\n tflite_filename = os.path.join(temp_dir, 'model.tflite')\n label_filename = os.path.join(temp_dir, 'labels.txt')\n image_classification_demo.run(\n data_dir,\n temp_dir,\n spec='efficientnet_lite0',\n epochs=1,\n batch_size=1)\n\n self.assertTrue(tf.io.gfile.exists(tflite_filename))\n self.assertGreater(os.path.getsize(tflite_filename), 0)\n\n self.assertTrue(tf.io.gfile.exists(label_filename))\n self.assertGreater(os.path.getsize(label_filename), 0)\n\n\nif __name__ == '__main__':\n tf.test.main()\n" ]
[ [ "tensorflow.io.gfile.exists", "tensorflow.compat.v1.logging.info", "tensorflow.test.main" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
zuoqin/restapi_admin
[ "89f6f6709f1c006e67497e2484cfcc03f695c534" ]
[ "AddressAPI.py" ]
[ "#!/usr/bin/python\n# -*- coding: utf-8 -*-\nimport psycopg2\nimport psycopg2.extras\nimport pandas as pd\nimport random\nimport apilib\nfrom flask import Flask, jsonify, request\nfrom flasgger import Swagger\nfrom flask_jwt_extended import (create_access_token,\n create_refresh_token, jwt_required, jwt_refresh_token_required,\n get_jwt_identity, get_raw_jwt)\nfrom datetime import datetime, timedelta\napp = Flask(__name__)\nSwagger(app)\n\n\nfrom flask_jwt_extended import JWTManager\napp.config['JWT_SECRET_KEY'] = 'jwt-secret-string'\n#app.config['JWT_EXPIRATION_DELTA'] = timedelta(days=10)\napp.config['JWT_ACCESS_TOKEN_EXPIRES'] = timedelta(days=10)\njwt = JWTManager(app)\nconn_string = \"host='37.18.75.197' port=5432 dbname='omnia' user='postgres' password='Qwerty123'\"\n\nfrom flask_restful import reqparse, request\nfrom flask_cors import CORS\n\n\n\nCORS(app)\n\[email protected]('/api/getparams', methods=['GET'])\n@jwt_required\ndef get_params_by_address():\n \"\"\"\n This is FinCase RealEstate API\n Вызовите этот метод и передайте адрес в качестве параметра\n ---\n tags:\n - Финкейс Жилая Недвижимость API\n parameters:\n - in: header\n name: Authorization\n type: string\n required: true\n - name: address\n in: query\n type: string\n required: true\n description: Адрес объекта жилой недвижимости в любом формате\n - name: analogscount\n in: query\n type: number\n required: true\n default: 10\n description: Количество возвращаемых аналогов\n - name: totalsquare\n in: query\n type: number\n required: false\n default: 100\n description: Площадь объекта (по умолчанию 100 если отсутствует в росреестре)\n definitions:\n Analog:\n type: object\n properties:\n id:\n type: integer\n description: Идентификатор объекта\n default: 10\n housetype:\n type: string\n description: Материал здания\n default: 'панельный'\n price:\n type: number\n description: Цена объекта\n default: 2000.99\n totalsquare:\n type: number\n description: Общая площадь объета\n default: 100.0\n leavingsquare:\n type: number\n description: Жилая площадь объета\n default: 100.0\n kitchensquare:\n type: number\n description: Площадь кухни объета\n default: 100.0\n city:\n type: string\n description: Город\n default: 'Москва'\n repair:\n type: string\n description: Тип ремонта\n default: 'косметический'\n lat:\n type: number\n description: Широта расположения объекта\n default: 55.89\n lon:\n type: number\n description: Долгота расположения объекта\n default: 37.89\n buildingyear:\n type: integer\n description: Год постройки\n default: 2000\n ceilingheight:\n type: number\n description: Высота потолков\n default: 2.71\n floor:\n type: integer\n description: Этаж расположения\n default: 2\n storeys:\n type: integer\n description: Количество этажей в здании\n default: 9\n fulladdress:\n type: string\n description: Адрес объекта\n default: 'Москва'\n responses:\n 500:\n description: Ошибка, адрес некорректный\n 200:\n description: Наиболее полный набор параметров и оценка стоимости объекта\n schema:\n id: object_params\n properties:\n analogs:\n type: array\n description: Аналогичные объекты искомому\n items:\n $ref: '#/definitions/Analog'\n buildingyear:\n type: integer\n description: Год постройки объекта\n default: 2000\n ceilingheight:\n type: number\n description: Высота потолков\n default: 2.70\n city:\n type: string\n description: Город нахождения объекта\n default: 'Москва'\n housetype:\n type: string\n description: Материал здания\n default: 'панельный'\n latitude:\n type: number\n description: Географическая широта объекта\n default: 55.751244\n longitude:\n type: number\n description: Географическая долгота объекта\n default: 37.618423\n storey:\n type: integer\n description: Этаж расположения объекта\n default: 7\n storeysnum:\n type: integer\n description: Количество этажей в здании\n default: 9\n totalsquare:\n type: number\n description: Общая площадь объета\n default: 100.0\n cadCost:\n type: number\n description: Кадастровая стоимость объекта\n default: 1000000.00\n kadastrovy_nomer:\n type: string\n description: Кадастровый номер объекта\n default: ''\n priceDivergency:\n type: number\n description: Отклонение кадастровой стоимости от рыночной в процентах\n default: 3.09\n price:\n type: number\n description: Текущая цена объекта\n default: 1000000.00\n pricePerMetr:\n type: number\n description: Стоимость квадратного метра объекта\n default: 1000000.00\n houseAvrgPrice:\n type: number\n description: Средняя цена квадратного метра в доме\n default: 1000000.00\n regionAvrgPrice:\n type: number\n description: Средняя цена квадратного метра в районе\n default: 1000000.00\n cityAvrgPrice:\n type: number\n description: Средняя цена квадратного метра в городе\n default: 1000000.00\n\n \"\"\"\n\n conn = psycopg2.connect(conn_string)\n cur = conn.cursor()\n current_user = get_jwt_identity()\n sql = 'select id from accounts_customuser where username=\\'' + current_user + '\\''\n df = pd.read_sql(sql, con=conn)\n\n address = request.args.get('address', default = '*', type = str)\n analogscount = request.args.get('analogscount', default = 10, type = int)\n totalsquare = request.args.get('totalsquare', default = 100, type = float)\n\n sql = 'insert into user_actions (user_id, action, time, comment) values(' +\\\n str(df['id'][0]) + ', 1, current_timestamp' + ', \\'' +\\\n '/api/getparams address=' + address + '\\')'\n cur.execute(sql)\n conn.commit()\n cur.close()\n result = apilib.getparams(address, analogscount, totalsquare)\n return jsonify(\n result\n )\n\[email protected]('/api/login', methods=['POST'])\ndef get_token():\n \"\"\"\n This is FinCase RealEstate API\n Вызовите этот метод и передайте имя пользователя и пароль в качестве параметра\n ---\n tags:\n - Финкейс Жилая Недвижимость API\n parameters:\n - in: body\n name: user\n schema:\n type: object\n required:\n - username\n properties:\n username:\n type: string\n password:\n type: string\n responses:\n 500:\n description: Ошибка, адрес некорректный\n 200:\n description: Токен для доступа к ресурсам\n schema:\n id: login_params\n properties:\n token:\n type: string\n description: Код досутпа\n default: ''\n \"\"\"\n import os\n\n # We defer to a DJANGO_SETTINGS_MODULE already in the environment. This breaks\n # if running multiple sites in the same mod_wsgi process. To fix this, use\n # mod_wsgi daemon mode with each site in its own daemon process, or use\n # os.environ[\"DJANGO_SETTINGS_MODULE\"] = \"django_custom_user_example.settings\"\n os.environ.setdefault(\"DJANGO_SETTINGS_MODULE\", \"customuser.settings\")\n\n import django\n django.setup()\n from django.contrib.auth import authenticate\n\n parser = reqparse.RequestParser()\n parser.add_argument('username', help='This field cannot be blank', required=True)\n parser.add_argument('password', help='This field cannot be blank', required=True)\n data = parser.parse_args()\n\n user = authenticate(username=data.username, password=data.password)\n\n if user is not None:\n access_token = create_access_token(identity=data['username'])\n else:\n access_token = ''\n result = {'token': access_token}\n return jsonify(\n result\n )\n\[email protected]('/api/getzkh', methods=['GET'])\n@jwt_required\ndef get_zkh_by_address():\n \"\"\"\n This is FinCase RealEstate API\n Вызовите этот метод и передайте адрес в качестве параметра\n ---\n tags:\n - Финкейс Жилая Недвижимость API\n parameters:\n - in: header\n name: Authorization\n type: string\n required: true\n - name: address\n in: query\n type: string\n required: true\n description: Адрес объекта жилой недвижимости в любом формате\n responses:\n 500:\n description: Ошибка, адрес некорректный\n 200:\n description: Наиболее полный набор параметров БТИ объекта\n schema:\n id: zkh_params\n properties:\n buildingyear:\n type: integer\n description: Год постройки объекта\n default: 2000\n region:\n type: string\n description: Регион нахождения объекта\n default: 'Москва'\n foundation:\n type: string\n description: Основание здания\n default: 'Ленточный'\n house:\n type: string\n description: Тип дома\n default: 'Многоквартирный дом'\n project:\n type: string\n description: Проект или серия дома\n default: 'Индивидуальный проект'\n housetype:\n type: string\n description: Материал здания\n default: 'панельный'\n storeysnum:\n type: integer\n description: Количество этажей в здании\n default: 9\n \"\"\"\n\n address = request.args.get('address', default = '*', type = str)\n fulladdress = apilib.getdadata(address)['fulladdress']\n flatpos = fulladdress.find(\", кв\")\n if flatpos >= 0:\n address = fulladdress[:flatpos]\n else:\n address = fulladdress\n\n result = apilib.getzkh(address)\n return jsonify(\n result\n )\n\n\napp.run(debug=True, host='127.0.0.1', port=3005)\n" ]
[ [ "pandas.read_sql" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
bopopescu/Social-Lite
[ "ee05d6a7431c36ff582c8d6b58bb20a8c5f550bf" ]
[ "google-cloud-sdk/lib/third_party/ml_sdk/cloud/ml/prediction/frameworks/tf_prediction_lib.py" ]
[ "# Copyright 2018 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Utilities for running predictions for TF framework.\n\nNote that we avoid importing tensorflow and tensorflow.contrib at the top.\nThis is because this module gets loaded for other frameworks as well,\nand loading xgboost after tensorflow.contrib causes an error.\nMore context: b/71906188#comment20.\n\"\"\"\nimport base64\nimport collections\nimport logging\nimport os\n\nfrom .. import prediction_utils\nfrom .._interfaces import PredictionClient\nimport numpy as np\nfrom ..prediction_utils import PredictionError\nimport six\n\nimport tensorflow as tf\n\n# pylint: disable=g-import-not-at-top\nif tf.__version__.startswith(\"2.\"):\n import tensorflow.compat.v1 as tf\n from tensorflow import dtypes\n from tensorflow import compat\n SERVING = tf.saved_model.SERVING\n DEFAULT_SERVING_SIGNATURE_DEF_KEY = (\n tf.saved_model.DEFAULT_SERVING_SIGNATURE_DEF_KEY)\n tf.disable_v2_behavior()\nelse:\n # tf.dtypes and tf.compat weren't added until later versions of TF.\n # These imports and constants work for all TF 1.X.\n from tensorflow.python.util import compat # pylint: disable=g-direct-tensorflow-import\n from tensorflow.python.framework import dtypes # pylint: disable=g-direct-tensorflow-import\n SERVING = tf.saved_model.tag_constants.SERVING\n DEFAULT_SERVING_SIGNATURE_DEF_KEY = (\n tf.saved_model.signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY)\n\n # Force Tensorflow contrib to load in order to provide access to all the\n # libraries in contrib to batch prediction (also, when using SESSION_RUN\n # instead of MODEL_SERVER for online prediction, which we no longer do).\n # However, contrib is no longer a part of TensorFlow 2.0, so check for its\n # existence first.\n try:\n import tensorflow.contrib # pylint: disable=unused-import\n # TF 1.15 introduced lazy loading for tensorflow.contrib, but doing\n # a dir forces it to load.\n dir(tensorflow.contrib)\n except: # pylint: disable=bare-except\n pass\n# pylint: enable=g-import-not-at-top\n\n# --------------------------\n# prediction.frameworks.tf_prediction_lib\n# --------------------------\n_CUSTOM_OP_DIRECTORY_NAME = \"assets.extra\"\n_CUSTOM_OP_SUFFIX = \"*.so\"\n_CUSTOM_OP_LOCAL_DIR = \"/tmp/custom_ops/\"\n\n\ndef columnarize(instances):\n \"\"\"Columnarize inputs.\n\n Each line in the input is a dictionary of input names to the value\n for that input (a single instance). For each input \"column\", this method\n appends each of the input values to a list. The result is a dict mapping\n input names to a batch of input data. This can be directly used as the\n feed dict during prediction.\n\n For example,\n\n instances = [{\"a\": [1.0, 2.0], \"b\": \"a\"},\n {\"a\": [3.0, 4.0], \"b\": \"c\"},\n {\"a\": [5.0, 6.0], \"b\": \"e\"},]\n batch = prediction_server_lib.columnarize(instances)\n assert batch == {\"a\": [[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]],\n \"b\": [\"a\", \"c\", \"e\"]}\n\n Arguments:\n instances: (list of dict) where the dictionaries map input names\n to the values for those inputs.\n\n Returns:\n A dictionary mapping input names to values, as described above.\n \"\"\"\n columns = collections.defaultdict(list)\n for instance in instances:\n for k, v in six.iteritems(instance):\n columns[k].append(v)\n return columns\n\n\ndef rowify(columns):\n \"\"\"Converts columnar input to row data.\n\n Consider the following code:\n\n columns = {\"prediction\": np.array([1, # 1st instance\n 0, # 2nd\n 1]), # 3rd\n \"scores\": np.array([[0.1, 0.9], # 1st instance\n [0.7, 0.3], # 2nd\n [0.4, 0.6]])} # 3rd\n\n Then rowify will return the equivalent of:\n\n [{\"prediction\": 1, \"scores\": [0.1, 0.9]},\n {\"prediction\": 0, \"scores\": [0.7, 0.3]},\n {\"prediction\": 1, \"scores\": [0.4, 0.6]}]\n\n (each row is yielded; no list is actually created).\n\n Arguments:\n columns: (dict) mapping names to numpy arrays, where the arrays\n contain a batch of data.\n\n Raises:\n PredictionError: if the outer dimension of each input isn't identical\n for each of element.\n\n Yields:\n A map with a single instance, as described above. Note: instances\n is not a numpy array.\n \"\"\"\n sizes_set = {e.shape[0] for e in six.itervalues(columns)}\n\n # All the elements in the length array should be identical. Otherwise,\n # raise an exception.\n if len(sizes_set) != 1:\n sizes_dict = {name: e.shape[0] for name, e in six.iteritems(columns)}\n raise PredictionError(\n PredictionError.INVALID_OUTPUTS,\n \"Bad output from running tensorflow session: outputs had differing \"\n \"sizes in the batch (outer) dimension. See the outputs and their \"\n \"size: %s. Check your model for bugs that effect the size of the \"\n \"outputs.\" % sizes_dict)\n # Pick an arbitrary value in the map to get its size.\n num_instances = len(next(six.itervalues(columns)))\n for row in six.moves.xrange(num_instances):\n yield {\n name: output[row, ...].tolist()\n for name, output in six.iteritems(columns)\n }\n\n\ndef canonicalize_single_tensor_input(instances, tensor_name):\n \"\"\"Canonicalize single input tensor instances into list of dicts.\n\n Instances that are single input tensors may or may not be provided with their\n tensor name. The following are both valid instances:\n 1) instances = [{\"x\": \"a\"}, {\"x\": \"b\"}, {\"x\": \"c\"}]\n 2) instances = [\"a\", \"b\", \"c\"]\n This function canonicalizes the input instances to be of type 1).\n\n Arguments:\n instances: single input tensor instances as supplied by the user to the\n predict method.\n tensor_name: the expected name of the single input tensor.\n\n Raises:\n PredictionError: if the wrong tensor name is supplied to instances.\n\n Returns:\n A list of dicts. Where each dict is a single instance, mapping the\n tensor_name to the value (as supplied by the original instances).\n \"\"\"\n\n # Input is a single string tensor, the tensor name might or might not\n # be given.\n # There are 3 cases (assuming the tensor name is \"t\", tensor = \"abc\"):\n # 1) {\"t\": \"abc\"}\n # 2) \"abc\"\n # 3) {\"y\": ...} --> wrong tensor name is given.\n def parse_single_tensor(x, tensor_name):\n if not isinstance(x, dict):\n # case (2)\n return {tensor_name: x}\n elif len(x) == 1 and tensor_name == list(x.keys())[0]:\n # case (1)\n return x\n else:\n raise PredictionError(PredictionError.INVALID_INPUTS,\n \"Expected tensor name: %s, got tensor name: %s.\" %\n (tensor_name, list(x.keys())))\n\n if not isinstance(instances, list):\n instances = [instances]\n instances = [parse_single_tensor(x, tensor_name) for x in instances]\n return instances\n\n\n# TODO(b/34686738): when we no longer load the model to get the signature\n# consider making this a named constructor on SessionClient.\ndef load_tf_model(model_path,\n tags=(SERVING,),\n config=None):\n \"\"\"Loads the model at the specified path.\n\n Args:\n model_path: the path to either session_bundle or SavedModel\n tags: the tags that determines the model to load.\n config: tf.ConfigProto containing session configuration options.\n\n Returns:\n A pair of (Session, map<string, SignatureDef>) objects.\n\n Raises:\n PredictionError: if the model could not be loaded.\n \"\"\"\n _load_tf_custom_op(model_path)\n if tf.saved_model.loader.maybe_saved_model_directory(model_path):\n try:\n logging.info(\"Importing tensorflow.contrib in load_tf_model\")\n\n if tf.__version__.startswith(\"1.0\"):\n session = tf.Session(target=\"\", graph=None, config=config)\n else:\n session = tf.Session(target=\"\", graph=tf.Graph(), config=config)\n meta_graph = tf.saved_model.loader.load(\n session, tags=list(tags), export_dir=model_path)\n except Exception as e: # pylint: disable=broad-except\n msg = (\"Failed to load the model due to bad model data. \"\n \"tags: %s\" % (list(tags),))\n logging.exception(msg)\n raise PredictionError(PredictionError.FAILED_TO_LOAD_MODEL,\n \"%s\\n%s\" % (msg, str(e)))\n else:\n raise PredictionError(PredictionError.FAILED_TO_LOAD_MODEL,\n \"Cloud ML only supports TF 1.0 or above and models \"\n \"saved in SavedModel format.\")\n\n if session is None:\n raise PredictionError(PredictionError.FAILED_TO_LOAD_MODEL,\n \"Failed to create session when loading the model\")\n\n if not meta_graph.signature_def:\n raise PredictionError(PredictionError.FAILED_TO_LOAD_MODEL,\n \"MetaGraph must have at least one signature_def.\")\n\n # Remove invalid signatures from the signature map.\n invalid_signatures = []\n for signature_name in meta_graph.signature_def:\n try:\n signature = meta_graph.signature_def[signature_name]\n _update_dtypes(session.graph, signature.inputs)\n _update_dtypes(session.graph, signature.outputs)\n except ValueError as e:\n logging.warn(\"Error updating signature %s: %s\", signature_name, str(e))\n invalid_signatures.append(signature_name)\n for signature_name in invalid_signatures:\n del meta_graph.signature_def[signature_name]\n\n return session, meta_graph.signature_def\n\n\ndef _update_dtypes(graph, interface):\n \"\"\"Adds dtype to TensorInfos in interface if necessary.\n\n If already present, validates TensorInfo matches values in the graph.\n TensorInfo is updated in place.\n\n Args:\n graph: the TensorFlow graph; used to lookup datatypes of tensors.\n interface: map from alias to TensorInfo object.\n\n Raises:\n ValueError: if the data type in the TensorInfo does not match the type\n found in graph.\n \"\"\"\n for alias, info in six.iteritems(interface):\n # Postpone conversion to enum for better error messages.\n dtype = graph.get_tensor_by_name(info.name).dtype\n if not info.dtype:\n info.dtype = dtype.as_datatype_enum\n elif info.dtype != dtype.as_datatype_enum:\n raise ValueError(\"Specified data types do not match for alias %s. \"\n \"Graph has %d while TensorInfo reports %d.\" %\n (alias, dtype, info.dtype))\n\n\n# (TODO:b/68775232): Move this to a Tensorflow specific library.\nclass TensorFlowClient(PredictionClient):\n \"\"\"A client for Prediction that uses Session.run.\"\"\"\n\n def __init__(self, signature_map, *args, **kwargs):\n self._signature_map = signature_map\n super(TensorFlowClient, self).__init__(*args, **kwargs)\n\n @property\n def signature_map(self):\n return self._signature_map\n\n def get_signature(self, signature_name=None):\n \"\"\"Gets tensorflow signature for the given signature_name.\n\n Args:\n signature_name: string The signature name to use to choose the signature\n from the signature map.\n\n Returns:\n a pair of signature_name and signature. The first element is the\n signature name in string that is actually used. The second one is the\n signature.\n\n Raises:\n PredictionError: when the signature is not found with the given signature\n name or when there are more than one signatures in the signature map.\n \"\"\"\n # The way to find signature is:\n # 1) if signature_name is specified, try to find it in the signature_map. If\n # not found, raise an exception.\n # 2) if signature_name is not specified, check if signature_map only\n # contains one entry. If so, return the only signature.\n # 3) Otherwise, use the default signature_name and do 1).\n if not signature_name and len(self.signature_map) == 1:\n return (list(self.signature_map.keys())[0],\n list(self.signature_map.values())[0])\n\n key = (signature_name or DEFAULT_SERVING_SIGNATURE_DEF_KEY)\n if key in self.signature_map:\n return key, self.signature_map[key]\n else:\n raise PredictionError(\n PredictionError.INVALID_INPUTS,\n \"No signature found for signature key %s.\" % signature_name)\n\n\nclass SessionClient(TensorFlowClient):\n \"\"\"A client for Prediction that uses Session.run.\"\"\"\n\n def __init__(self, session, signature_map):\n self._session = session\n super(SessionClient, self).__init__(signature_map)\n\n def predict(self, inputs, stats=None,\n signature_name=None, **unused_kwargs):\n \"\"\"Produces predictions for the given inputs.\n\n Args:\n inputs: a dict mapping input names to values\n stats: Stats object for recording timing information.\n signature_name: name of SignatureDef to use in this prediction\n **unused_kwargs: placeholder, pre/postprocess may have additional args\n\n Returns:\n A dict mapping output names to output values, similar to the input\n dict.\n \"\"\"\n stats = stats or prediction_utils.Stats()\n stats[prediction_utils.ENGINE] = \"SessionRun\"\n stats[\n prediction_utils.FRAMEWORK] = prediction_utils.TENSORFLOW_FRAMEWORK_NAME\n\n with stats.time(prediction_utils.UNALIAS_TIME):\n _, signature = self.get_signature(signature_name)\n fetches = [output.name for output in signature.outputs.values()]\n try:\n unaliased = {\n signature.inputs[key].name: val\n for key, val in six.iteritems(inputs)\n }\n except Exception as e: # pylint: disable=broad-except\n logging.exception(\"Input mismatch.\")\n raise PredictionError(PredictionError.INVALID_INPUTS,\n \"Input mismatch: \" + str(e))\n\n with stats.time(prediction_utils.SESSION_RUN_TIME):\n try:\n # TODO(b/33849399): measure the actual session.run() time, even in the\n # case of ModelServer.\n outputs = self._session.run(fetches=fetches, feed_dict=unaliased)\n except Exception as e: # pylint: disable=broad=except\n logging.exception(\"Exception running the graph.\")\n raise PredictionError(PredictionError.FAILED_TO_RUN_MODEL,\n \"Exception during running the graph: \" + str(e))\n\n with stats.time(prediction_utils.ALIAS_TIME):\n return dict(zip(six.iterkeys(signature.outputs), outputs))\n\n\nclass TensorFlowModel(prediction_utils.BaseModel):\n \"\"\"The default implementation of the Model interface that uses TensorFlow.\n\n This implementation optionally performs preprocessing and postprocessing\n using the provided functions. These functions accept a single instance\n as input and produce a corresponding output to send to the prediction\n client.\n \"\"\"\n\n def _get_columns(self, instances, stats, signature):\n \"\"\"Columnarize the instances, appending input_name, if necessary.\n\n Instances are the same instances passed to the predict() method. Since\n models with a single input can accept the raw input without the name,\n we create a dict here with that name.\n\n This list of instances is then converted into a column-oriented format:\n The result is a dictionary mapping input name to a list of values for just\n that input (one entry per row in the original instances list).\n\n Args:\n instances: the list of instances as provided to the predict() method.\n stats: Stats object for recording timing information.\n signature: SignatureDef for the current request.\n\n Returns:\n A dictionary mapping input names to their values.\n\n Raises:\n PredictionError: if an error occurs during prediction.\n \"\"\"\n with stats.time(prediction_utils.COLUMNARIZE_TIME):\n columns = columnarize(instances)\n for k, v in six.iteritems(columns):\n if k not in signature.inputs.keys():\n raise PredictionError(\n PredictionError.INVALID_INPUTS,\n \"Unexpected tensor name: %s\" % k)\n # Detect whether or not the user omits an input in one or more inputs.\n # TODO(b/34686738): perform this check in columnarize?\n if isinstance(v, list) and len(v) != len(instances):\n raise PredictionError(\n PredictionError.INVALID_INPUTS,\n \"Input %s was missing in at least one input instance.\" % k)\n return columns\n\n # TODO(b/34686738): can this be removed?\n def is_single_input(self, signature):\n \"\"\"Returns True if the graph only has one input tensor.\"\"\"\n return len(signature.inputs) == 1\n\n # TODO(b/34686738): can this be removed?\n def is_single_string_input(self, signature):\n \"\"\"Returns True if the graph only has one string input tensor.\"\"\"\n if self.is_single_input(signature):\n dtype = list(signature.inputs.values())[0].dtype\n return dtype == dtypes.string.as_datatype_enum\n return False\n\n def get_signature(self, signature_name=None):\n return self._client.get_signature(signature_name)\n\n def preprocess(self, instances, stats=None, signature_name=None, **kwargs):\n _, signature = self.get_signature(signature_name)\n preprocessed = self._canonicalize_input(instances, signature)\n return self._get_columns(preprocessed, stats, signature)\n\n def _canonicalize_input(self, instances, signature):\n \"\"\"Preprocess single-input instances to be dicts if they aren't already.\"\"\"\n # The instances should be already (b64-) decoded here.\n if not self.is_single_input(signature):\n return instances\n\n tensor_name = list(signature.inputs.keys())[0]\n return canonicalize_single_tensor_input(instances, tensor_name)\n\n def postprocess(self, predicted_output, original_input=None, stats=None,\n signature_name=None, **kwargs):\n \"\"\"Performs the necessary transformations on the prediction results.\n\n The transformations include rowifying the predicted results, and also\n making sure that each input/output is a dict mapping input/output alias to\n the value for that input/output.\n\n Args:\n predicted_output: list of instances returned by the predict() method on\n preprocessed instances.\n original_input: List of instances, before any pre-processing was applied.\n stats: Stats object for recording timing information.\n signature_name: the signature name to find out the signature.\n **kwargs: Additional keyword arguments for postprocessing\n\n Returns:\n A list which is a dict mapping output alias to the output.\n \"\"\"\n _, signature = self.get_signature(signature_name)\n with stats.time(prediction_utils.ROWIFY_TIME):\n # When returned element only contains one result (batch size == 1),\n # tensorflow's session.run() will return a scalar directly instead of a\n # a list. So we need to listify that scalar.\n # TODO(b/34686738): verify this behavior is correct.\n def listify(value):\n if not hasattr(value, \"shape\"):\n return np.asarray([value], dtype=np.object)\n elif not value.shape:\n # TODO(b/34686738): pretty sure this is a bug that only exists because\n # samples like iris have a bug where they use tf.squeeze which removes\n # the batch dimension. The samples should be fixed.\n return np.expand_dims(value, axis=0)\n else:\n return value\n\n postprocessed_outputs = {\n alias: listify(val)\n for alias, val in six.iteritems(predicted_output)\n }\n postprocessed_outputs = rowify(postprocessed_outputs)\n\n postprocessed_outputs = list(postprocessed_outputs)\n with stats.time(prediction_utils.ENCODE_TIME):\n try:\n postprocessed_outputs = encode_base64(\n postprocessed_outputs, signature.outputs)\n except PredictionError as e:\n logging.exception(\"Encode base64 failed.\")\n raise PredictionError(PredictionError.INVALID_OUTPUTS,\n \"Prediction failed during encoding instances: {0}\"\n .format(e.error_detail))\n except ValueError as e:\n logging.exception(\"Encode base64 failed.\")\n raise PredictionError(PredictionError.INVALID_OUTPUTS,\n \"Prediction failed during encoding instances: {0}\"\n .format(e))\n except Exception as e: # pylint: disable=broad-except\n logging.exception(\"Encode base64 failed.\")\n raise PredictionError(PredictionError.INVALID_OUTPUTS,\n \"Prediction failed during encoding instances\")\n return postprocessed_outputs\n\n @classmethod\n def from_client(cls, client, unused_model_path, **unused_kwargs):\n \"\"\"Creates a TensorFlowModel from a SessionClient and model data files.\"\"\"\n return cls(client)\n\n @property\n def signature_map(self):\n return self._client.signature_map\n\n\ndef create_tf_session_client(model_dir,\n tags=(SERVING,),\n config=None):\n\n return SessionClient(*load_tf_model(model_dir, tags, config))\n\n\ndef encode_base64(instances, outputs_map):\n \"\"\"Encodes binary data in a JSON-friendly way.\"\"\"\n if not isinstance(instances, list):\n raise ValueError(\"only lists allowed in output; got %s\" %\n (type(instances),))\n\n if not instances:\n return instances\n first_value = instances[0]\n if not isinstance(first_value, dict):\n if len(outputs_map) != 1:\n return ValueError(\"The first instance was a string, but there are \"\n \"more than one output tensor, so dict expected.\")\n # Only string tensors whose name ends in _bytes needs encoding.\n tensor_name, tensor_info = outputs_map.items()[0]\n tensor_type = tensor_info.dtype\n if tensor_type == dtypes.string:\n instances = _encode_str_tensor(instances, tensor_name)\n return instances\n\n encoded_data = []\n for instance in instances:\n encoded_instance = {}\n for tensor_name, tensor_info in six.iteritems(outputs_map):\n tensor_type = tensor_info.dtype\n tensor_data = instance[tensor_name]\n if tensor_type == dtypes.string:\n tensor_data = _encode_str_tensor(tensor_data, tensor_name)\n encoded_instance[tensor_name] = tensor_data\n encoded_data.append(encoded_instance)\n return encoded_data\n\n\ndef _encode_str_tensor(data, tensor_name):\n \"\"\"Encodes tensor data of type string.\n\n Data is a bytes in python 3 and a string in python 2. Base 64 encode the data\n if the tensorname ends in '_bytes', otherwise convert data to a string.\n\n Args:\n data: Data of the tensor, type bytes in python 3, string in python 2.\n tensor_name: The corresponding name of the tensor.\n\n Returns:\n JSON-friendly encoded version of the data.\n \"\"\"\n if isinstance(data, list):\n return [_encode_str_tensor(val, tensor_name) for val in data]\n if tensor_name.endswith(\"_bytes\"):\n return {\"b64\": compat.as_text(base64.b64encode(data))}\n else:\n return compat.as_text(data)\n\n\ndef _load_tf_custom_op(model_path):\n \"\"\"Loads a custom TF OP (in .so format) from /assets.extra directory.\"\"\"\n assets_dir = os.path.join(model_path, _CUSTOM_OP_DIRECTORY_NAME)\n if tf.gfile.IsDirectory(assets_dir):\n custom_ops_pattern = os.path.join(assets_dir, _CUSTOM_OP_SUFFIX)\n for custom_op_path_original in tf.gfile.Glob(custom_ops_pattern):\n logging.info(\"Found custom op file: %s\", custom_op_path_original)\n if custom_op_path_original.startswith(\"gs://\"):\n if not os.path.isdir(_CUSTOM_OP_LOCAL_DIR):\n os.makedirs(_CUSTOM_OP_LOCAL_DIR)\n custom_op_path_local = os.path.join(\n _CUSTOM_OP_LOCAL_DIR, os.path.basename(custom_op_path_original))\n logging.info(\"Copying custop op from: %s to: %s\",\n custom_op_path_original, custom_op_path_local)\n tf.gfile.Copy(custom_op_path_original, custom_op_path_local, True)\n else:\n custom_op_path_local = custom_op_path_original\n try:\n logging.info(\"Loading custom op: %s\", custom_op_path_local)\n logging.info(\"TF Version: %s\", tf.__version__)\n tf.load_op_library(custom_op_path_local)\n except RuntimeError as e:\n logging.exception(\n \"Failed to load custom op: %s with error: %s. Prediction \"\n \"will likely fail due to missing operations.\", custom_op_path_local,\n e)\n" ]
[ [ "numpy.expand_dims", "tensorflow.compat.v1.saved_model.loader.maybe_saved_model_directory", "tensorflow.compat.v1.disable_v2_behavior", "numpy.asarray", "tensorflow.python.util.compat.as_text", "tensorflow.compat.v1.gfile.Copy", "tensorflow.compat.v1.Session", "tensorflow.compat.v1.gfile.Glob", "tensorflow.compat.v1.Graph", "tensorflow.compat.v1.gfile.IsDirectory", "tensorflow.compat.v1.__version__.startswith", "tensorflow.compat.v1.load_op_library" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
AiPBAND/OmiTrans
[ "8e5d9198a1ee422eb805e5ead068c1a2523aeed5" ]
[ "models/c_gan_model.py" ]
[ "import torch\nfrom .basic_model import BasicModel\nfrom . import networks\nfrom . import losses\nimport torch.nn as nn\n\n\nclass CGanModel(BasicModel):\n \"\"\"\n This class implements the conditional GAN model, for learning a mapping from input omics data type to output omics\n data type given paired data.\n \"\"\"\n\n @staticmethod\n def modify_commandline_parameters(parser, is_train=True):\n # changing the default values of parameters to match the conditional GAN model\n parser.set_defaults(netG='fcg_sep', netD='fcd_sep')\n parser.add_argument('--latent_dim', type=int, default=256,\n help='the dimensionality of the latent space')\n # changing the default values of parameters to match the conditional GAN model\n if is_train:\n parser.add_argument('--lambda_dist', type=float, default=100.0, help='weight for the dist loss')\n return parser\n\n def __init__(self, param):\n \"\"\"\n Initialize the conditional GAN class.\n \"\"\"\n BasicModel.__init__(self, param)\n # specify the training losses you want to print out.\n self.loss_names = ['D_GAN', 'G_GAN', 'G_dist']\n # specify the models you want to save to the disk.\n if self.isTrain:\n self.model_names = ['G', 'D']\n else: # during test phase, only load G\n self.model_names = ['G']\n\n # define networks (both generator and discriminator)\n self.netG = networks.define_G(param.input_chan_num, param.output_chan_num, param.netG, param.A_dim, param.B_dim,\n param.gen_filter_num, param.conv_k_size, param.norm_type, param.init_type,\n param.init_gain, self.gpu_ids, param.leaky_slope, param.dropout_p, param.latent_dim)\n\n # define a discriminator if it is the training phase, if it's the testing phase the discriminator is not necessary\n if self.isTrain:\n self.netD = networks.define_D(param.input_chan_num, param.output_chan_num, param.dis_filter_num, param.netD,\n param.A_dim, param.B_dim, param.layer_num_D, param.norm_type, param.init_type,\n param.init_gain, self.gpu_ids, param.leaky_slope, param.dropout_p)\n\n # define loss functions: G = L_GAN + λ L_dist\n # The GAN part of the loss function, this return a loss function nn.Module not a value\n # self.device was defined in BaseModel\n self.lossFuncGAN = losses.GANLossObj(param.GAN_mode).to(self.device)\n self.lossFuncDist = losses.get_dist_loss(param.dist_loss)\n\n # Set optimizer for both generator and discriminator\n # generator and discriminator actually can set to different initial learning rate\n self.optimizer_G = torch.optim.Adam(self.netG.parameters(), lr=param.lr_G, betas=(param.beta1, 0.999))\n self.optimizer_D = torch.optim.Adam(self.netD.parameters(), lr=param.lr_D, betas=(param.beta1, 0.999))\n # optimizer list was already defined in BaseModel\n self.optimizers.append(self.optimizer_G)\n self.optimizers.append(self.optimizer_D)\n\n self.loss_D_fake = []\n self.loss_D_real = []\n self.loss_D_GAN = []\n self.loss_G_GAN = []\n self.loss_G_dist = []\n self.loss_G = []\n\n if self.param.zo_norm:\n self.sigmoid = nn.Sigmoid()\n\n def set_input(self, input_dict):\n \"\"\"\n Unpack input data from the output dictionary of the dataloader\n\n Parameters:\n input_dict (dict): include the data tensor and its index.\n \"\"\"\n self.real_A_tensor = input_dict['A_tensor'].to(self.device)\n if self.param.ch_separate:\n self.real_B_tensor = []\n for ch in range(0, 23):\n self.real_B_tensor.append(input_dict['B_tensor'][ch].to(self.device))\n else:\n self.real_B_tensor = input_dict['B_tensor'].to(self.device)\n self.data_index = input_dict['index']\n\n def forward(self):\n # Default B -> A\n self.fake_A_tensor = self.netG(self.real_B_tensor) # A' = G(B)\n if self.param.zo_norm:\n self.fake_A_tensor = self.sigmoid(self.fake_A_tensor)\n self.fake_A_tensor = (self.param.target_max - self.param.target_min) * self.fake_A_tensor + self.param.target_min\n # Calculate metrics of the fake tensor\n self.calculate_current_metrics()\n\n def backward_G(self):\n \"\"\"Calculate GAN and dist loss for the generator\"\"\"\n # G(B) should fake the discriminator to treat it as real omics data\n # The different part compared with the backward_D is that we don't need the detach fake tensor here\n pred_fake = self.netD(self.fake_A_tensor, self.real_B_tensor) # The prediction vector get from the discriminator for the fake omics data\n self.loss_G_GAN = self.lossFuncGAN(pred_fake, True) # The boolean variable will be extend to a vector as the same size of pred_fake\n\n # G(B) should be as close as A, we use the distance loss\n if self.param.zo_norm and self.param.dist_loss == 'BCE':\n self.loss_G_dist = self.lossFuncDist((self.fake_A_tensor - self.param.target_min)/(self.param.target_max - self.param.target_min), (self.real_A_tensor - self.param.target_min)/(self.param.target_max - self.param.target_min))\n else:\n self.loss_G_dist = self.lossFuncDist(self.fake_A_tensor, self.real_A_tensor)\n\n # Combine the loss and calculate gradients\n # G = L_GAN + λ L_dist\n # The parameter lambda_dist was introduced in this class\n self.loss_G = self.loss_G_GAN + self.param.lambda_dist * self.loss_G_dist\n self.loss_G.backward()\n\n def backward_D(self):\n \"\"\"Calculate GAN loss for the discriminator\"\"\"\n # Fake\n # Stop backprop to the generator by detaching fake_A\n # Conditional GAN was applied so both the input and output of generator were fed to discriminator\n pred_fake = self.netD(self.fake_A_tensor.detach(), self.real_B_tensor) # the prediction vector get from the discriminator for the fake omics data\n self.loss_D_fake = self.lossFuncGAN(pred_fake, False) # The boolean variable will be extend to a vector as the same size of pred_fake\n\n # Real\n pred_real = self.netD(self.real_A_tensor, self.real_B_tensor)\n self.loss_D_real = self.lossFuncGAN(pred_real, True)\n\n # Combine the loss and calculate gradients\n self.loss_D_GAN = (self.loss_D_fake + self.loss_D_real) / 2\n self.loss_D_GAN.backward()\n\n def update(self):\n self.forward() # Get the fake omics data: G(B)\n\n # Update parameters of the discriminator\n # the method <set_requires_grad> is defined in BaseModel\n self.set_requires_grad(self.netD, True) # Enable backprop for D\n self.optimizer_D.zero_grad() # Set D's gradients to zero\n self.backward_D() # Calculate gradients for D\n self.optimizer_D.step() # Update D's weights\n\n # Update parameters of the generator\n self.set_requires_grad(self.netD, False) # Stop backprop for D when optimizing G\n self.optimizer_G.zero_grad() # Set G's gradients to zero\n self.backward_G() # Calculate gradients for G\n self.optimizer_G.step() # Update G's weights\n\n" ]
[ [ "torch.nn.Sigmoid" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
numahha/wmopo
[ "1557dab2e8168c1f2e53ffbc435b4000680f1d28" ]
[ "pendulum_experiments/model.py" ]
[ "import torch\nimport torch.nn.functional as F\n\nclass DynamicsModel(torch.nn.Module): # transitioin function\n def __init__(self, D_in, D_out, hidden_unit_num):\n\n print(\"[DynamicsModel] H =\",hidden_unit_num)\n super(DynamicsModel, self).__init__()\n\n # zero hidden layer\n #self.l1 = torch.nn.Linear(D_in, D_out, bias=False)\n\n # one hidden layer\n self.l1 = torch.nn.Linear(D_in, hidden_unit_num)\n self.l2 = torch.nn.Linear(hidden_unit_num, D_out) # , bias=False\n\n self.logvar = torch.nn.Parameter(torch.zeros(D_out), requires_grad=True)\n\n\n # two hidden layer\n #self.l1 = torch.nn.Linear(D_in, hidden_unit_num)\n #self.l2 = torch.nn.Linear(hidden_unit_num, hidden_unit_num)\n #self.l3 = torch.nn.Linear(hidden_unit_num, D_out)\n\n def forward(self, X):\n\n mu = self.l2(torch.tanh(self.l1(X)))\n return self.l2(torch.tanh(self.l1(X))), self.logvar*torch.ones_like(mu)\n #return self.l2(F.relu(self.l1(X)))\n\n #return self.l3(torch.tanh(self.l2(torch.tanh(self.l1(X)))))\n #return self.l3(F.relu(self.l2(F.relu(self.l1(X)))))\n\n\n\nclass RatioModel(torch.nn.Module): # density ratio\n def __init__(self, D_in, hidden_unit_num):\n super().__init__()\n print(\"[RatioModel] H =\",hidden_unit_num)\n\n #self.l1 = torch.nn.Linear(D_in, hidden_unit_num)\n #self.l2 = torch.nn.Linear(hidden_unit_num, 1) # output dimension is always 1.\n\n self.l1 = torch.nn.Linear(D_in, hidden_unit_num)\n self.l2 = torch.nn.Linear(hidden_unit_num, hidden_unit_num)\n self.l3 = torch.nn.Linear(hidden_unit_num, 1)\n\n def forward(self, X):\n #return F.softplus(self.l2(torch.tanh(self.l1(X))))\n return F.softplus(self.l3(torch.tanh(self.l2(torch.tanh(self.l1(X))))))\n\n\n\nclass GradLSDModel(torch.nn.Module): # gradient of log-stationary distribution\n def __init__(self, D_in, D_out):\n super().__init__()\n self.l1 = torch.nn.Linear(D_in, D_out)\n\n def forward(self, X):\n return self.l1(X)\n\n\nclass NLLModel(torch.nn.Module): # nll\n def __init__(self, D_in, hidden_unit_num):\n super().__init__()\n\n print(\"[NLLModel] H =\", hidden_unit_num)\n self.l1 = torch.nn.Linear(D_in, hidden_unit_num)\n #self.l2 = torch.nn.Linear(hidden_unit_num, 1) # , bias=False\n\n self.l2 = torch.nn.Linear(hidden_unit_num, hidden_unit_num) \n self.l3 = torch.nn.Linear(hidden_unit_num, 1) \n\n\n def forward(self, X):\n\n #return self.l2(torch.tanh(self.l1(X)))\n return self.l3(torch.tanh(self.l2(torch.tanh(self.l1(X)))))\n\n\n" ]
[ [ "torch.nn.Linear", "torch.ones_like", "torch.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
WavesLi/NSC_2to3
[ "3292ac804f8bebce722c0c9fcb2b41ff4f298e2d" ]
[ "NSC+LA/src/LSTMModel.py" ]
[ "#-*- coding: UTF-8 -*- \nfrom EmbLayer import EmbLayer\nfrom LSTMLayer import LSTMLayer\nfrom HiddenLayer import HiddenLayer\nfrom PoolLayer import *\nfrom SentenceSortLayer import *\nimport theano\nimport theano.tensor as T\nimport numpy\nimport random\nimport sys\nfrom Update import AdaUpdates\n\nclass LSTMModel(object):\n def __init__(self, n_voc, trainset, testset, dataname, classes, prefix):\n if prefix != None:\n prefix += '/'\n self.trainset = trainset\n self.testset = testset\n\n docs = T.imatrix()\n label = T.ivector()\n wordmask = T.fmatrix()\n sentencemask = T.fmatrix()\n maxsentencenum = T.iscalar()\n isTrain = T.iscalar()\n\n rng = numpy.random\n\n layers = []\n layers.append(EmbLayer(rng, docs, n_voc, 200, 'emblayer', dataname, prefix))\n layers.append(LSTMLayer(rng, layers[-1].output, wordmask, 200, 200, 'wordlstmlayer', prefix))\n layers.append(SimpleAttentionLayer(rng, layers[-1].output, wordmask,200, 200, 'wordattentionlayer', prefix))\n layers.append(SentenceSortLayer(layers[-1].output,maxsentencenum,prefix))\n layers.append(LSTMLayer(rng, layers[-1].output, sentencemask, 200, 200, 'sentencelstmlayer', prefix))\n layers.append(SimpleAttentionLayer(rng, layers[-1].output, sentencemask,200, 200, 'sentenceattentionlayer', prefix))\n layers.append(HiddenLayer(rng, layers[-1].output, 200, 200, 'fulllayer', prefix))\n layers.append(HiddenLayer(rng, layers[-1].output, 200, int(classes), 'softmaxlayer', prefix, activation=T.nnet.softmax))\n self.layers = layers\n \n cost = -T.mean(T.log(layers[-1].output)[T.arange(label.shape[0]), label], acc_dtype='float32')\n correct = T.sum(T.eq(T.argmax(layers[-1].output, axis=1), label), acc_dtype='int32')\n err = T.argmax(layers[-1].output, axis=1) - label\n mse = T.sum(err * err)\n \n params = []\n for layer in layers:\n params += layer.params\n L2_rate = numpy.float32(1e-5)\n for param in params[1:]:\n cost += T.sum(L2_rate * (param * param), acc_dtype='float32')\n gparams = [T.grad(cost, param) for param in params]\n\n updates = AdaUpdates(params, gparams, 0.95, 1e-6)\n\n self.train_model = theano.function(\n inputs=[docs, label,wordmask,sentencemask,maxsentencenum],\n outputs=cost,\n updates=updates,\n )\n\n self.test_model = theano.function(\n inputs=[docs, label,wordmask,sentencemask,maxsentencenum],\n outputs=[correct, mse],\n )\n\n def train(self, iters):\n lst = numpy.random.randint(self.trainset.epoch, size = iters)\n n = 0\n for i in lst:\n n += 1\n out = self.train_model(self.trainset.docs[i], self.trainset.label[i],self.trainset.wordmask[i],self.trainset.sentencemask[i],self.trainset.maxsentencenum[i])\n print(n, 'cost:',out)\n \n def test(self):\n cor = 0\n tot = 0\n mis = 0\n for i in range(self.testset.epoch):\n tmp = self.test_model(self.testset.docs[i], self.testset.label[i],self.testset.wordmask[i],self.testset.sentencemask[i],self.testset.maxsentencenum[i])\n cor += tmp[0]\n mis += tmp[1]\n tot += len(self.testset.label[i])\n print('Accuracy:',float(cor)/float(tot),'RMSE:',numpy.sqrt(float(mis)/float(tot)))\n return cor, mis, tot\n\n\n def save(self, prefix):\n prefix += '/'\n for layer in self.layers:\n layer.save(prefix)\n" ]
[ [ "numpy.float32", "numpy.random.randint" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
khushhallchandra/Deep-Learning-Project
[ "cf832f96d55e268b3eaba62a25ee6b04c22389e0" ]
[ "ExchangeRate/method1b.py" ]
[ "import numpy as np\nimport matplotlib.pyplot as plt\nimport pandas\nimport math\nimport datetime\nfrom keras.models import Sequential\nfrom keras.layers import Dense\nfrom keras.layers import LSTM\nfrom sklearn.preprocessing import MinMaxScaler\nfrom sklearn.metrics import mean_squared_error\n\ndata_folder = 'data/'\ncurrency = 'pound'\nlength = 1\ndata = np.genfromtxt(data_folder+currency+'.csv', delimiter=',')[:365,1]\ndef get_data(currency,length):\n\tdata = np.genfromtxt(data_folder+currency+'.csv', delimiter=',')[:365,1]\n\tX,Y = [],[]\n\tfor i in range(len(data)-length):\n\t\tX.append(data[i:i+length])\n\t\tY.append(data[i+length])\n\treturn np.array(X),np.array(Y)\n\ndef convert_binary(X):\n\tnew_X = np.zeros(X.shape[0]-1)\n\tfor i in range(X.shape[0]-1):\n\t\tnew_X[i] = (X[i+1]-X[i])>0\n\treturn new_X\n\n\ndef build_model(length):\n\tmodel = Sequential()\n#\tmodel.add(Dense(5, input_dim=length))\n\tmodel.add(LSTM(20,input_dim=length))\n\tmodel.add(Dense(1))\n\tmodel.compile(loss='mean_squared_error', optimizer='adam')\n\treturn model\n\n\ndatasetX, datasetY = get_data(currency,length)\ntrain_size = int(datasetX.shape[0] * 0.7)\ntest_size = datasetX.shape[0] - train_size\n\ntrainX, testX = datasetX[0:train_size,:], datasetX[train_size:len(datasetX),:]\ntrainY, testY = datasetY[0:train_size], datasetY[train_size:len(datasetX)]\ntrainX = np.reshape(trainX,(trainX.shape[0],1,trainX.shape[1]))\ntestX = np.reshape(testX,(testX.shape[0],1,testX.shape[1]))\n\nmodel = build_model(length)\nmodel.fit(trainX, trainY, nb_epoch=50, batch_size=1)\n\n# make predictions\ntrainout = model.predict(trainX)\ntestout = model.predict(testX)\n\n# calculate Binary error\n\nbinTestout = convert_binary(testout)\nbinTestTruth = convert_binary(testY)\ntestScore = sum(binTestout==binTestTruth)*1.0/binTestout.shape[0]\n\nbinTrainout = convert_binary(trainout)\nbinTrainTruth = convert_binary(trainY)\ntrainScore = sum(binTrainout==binTrainTruth)*1.0/binTrainout.shape[0]\n\n#trainPredict = scaler.inverse_transform(trainPredict)\n#trainY = scaler.inverse_transform([trainY])\n#testPredict = scaler.inverse_transform(testPredict)\n#testY = scaler.inverse_transform([testY])\n\ntrainScorer = math.sqrt(mean_squared_error(binTrainout, binTrainTruth))\ntestScorer = math.sqrt(mean_squared_error(binTestout, binTestTruth))\n\nprint('Train Score: %.2f Binary error' % (trainScore))\nprint('Test Score: %.2f Binary error' % (testScore))\nprint('Train Score: %.2f RMSE error' % (trainScorer))\nprint('Test Score: %.2f RMSE error' % (testScorer))\n\nplt.subplot(1,2,1)\nplt.plot(testout,'r')\nplt.subplot(1,2,1)\nplt.plot(list(data[train_size:train_size+len(testout)]),'b')\n\nplt.subplot(1,2,2)\nplt.plot(trainout,'r')\nplt.subplot(1,2,2)\nplt.plot(list(data[:train_size]),'b')\nplt.show()\n\n" ]
[ [ "numpy.reshape", "numpy.genfromtxt", "matplotlib.pyplot.plot", "sklearn.metrics.mean_squared_error", "matplotlib.pyplot.subplot", "numpy.array", "matplotlib.pyplot.show", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
megrao/BioSPPy
[ "52340610f850f382082136cd645496e22fbdbae5" ]
[ "biosppy/signals/bcg.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"\nbiosppy.signals.bcg\n-------------------\n\nThis module provides methods to process Ballistocardiographic (BCG) signals.\nImplemented code assumes a single-channel head-to-foot like BCG signal.\n\n:author: Guillaume Cathelain\n\n\"\"\"\n\n# Imports\n# compat\nfrom __future__ import absolute_import, division, print_function\nfrom six.moves import range, zip\n\n# 3rd party\nimport numpy as np\nimport scipy.signal as ss\nimport scipy.ndimage as si\nimport scipy.cluster.hierarchy as sch\nimport scipy.spatial.distance as ssd\nimport scipy.fftpack as sf\nimport scipy.optimize as so\nfrom cv2 import matchTemplate,TM_CCORR_NORMED,TM_CCORR\nfrom plotly.offline import plot\nfrom plotly.graph_objs import Scatter\nimport matplotlib.pyplot as plt\nfrom operator import itemgetter\nfrom itertools import groupby\n# local\nfrom . import tools as st\nfrom .. import plotting, utils\nfrom . import ecg\n\ndef bcg(signal=None, sampling_rate=1000., show=True):\n \"\"\"Process a raw BCG signal and extract relevant signal features using\n default parameters.\n\n Parameters\n ----------\n signal : array\n Raw BCG signal.\n sampling_rate : int, float, optional\n Sampling frequency (Hz).\n show : bool, optional\n If True, show a summary plot.\n\n Returns\n -------\n ts : array\n Signal time axis reference (seconds).\n filtered : array\n Filtered BCG signal.\n rpeaks : array\n R-peak location indices.\n templates_ts : array\n Templates time axis reference (seconds).\n templates : array\n Extracted heartbeat templates.\n heart_rate_ts : array\n Heart rate time axis reference (seconds).\n heart_rate : array\n Instantaneous heart rate (bpm).\n\n \"\"\"\n\n # check inputs\n if signal is None:\n raise TypeError(\"Please specify an input signal.\")\n\n # ensure numpy\n signal = np.array(signal)\n\n sampling_rate = float(sampling_rate)\n\n # segment\n gpeaks,hpeaks,ipeaks,jpeaks,filtered = bsegpp_segmenter(signal=signal,\n sampling_rate=sampling_rate)\n\n # extract templates\n templates, jpeaks = extract_heartbeats(signal=filtered,\n peaks=jpeaks,\n sampling_rate=sampling_rate,\n before=0.4,\n after=0.4)\n\n # compute heart rate\n hr_idx, hr = st.get_heart_rate(beats=jpeaks,\n sampling_rate=sampling_rate,\n smooth=True,\n size=3)\n\n # get time vectors\n length = len(signal)\n T = (length - 1) / sampling_rate\n ts = np.linspace(0, T, length, endpoint=True)\n ts_hr = ts[hr_idx]\n ts_tmpl = np.linspace(-0.4, 0.4, templates.shape[1], endpoint=False)\n\n # plot\n if show:\n plotting.plot_bcg(ts=ts,\n raw=signal,\n filtered=filtered,\n jpeaks=jpeaks,\n templates_ts=ts_tmpl,\n templates=templates,\n heart_rate_ts=ts_hr,\n heart_rate=hr,\n path=None,\n show=True)\n\n # output\n args = (ts, filtered, gpeaks,hpeaks,ipeaks,jpeaks, ts_tmpl, templates,\n ts_hr, hr)\n names = ('ts', 'filtered', 'gpeaks', 'hpeaks', 'ipeaks', 'jpeaks',\n 'templates_ts', 'templates','heart_rate_ts', 'heart_rate')\n\n return utils.ReturnTuple(args, names)\n\n\ndef bsegpp_segmenter(signal=None, sampling_rate=1000., thresholds= [0.05,5],\n R=0.1, t1=0.6, H=0.2, I=0.3, J=0.4):\n \"\"\"BSEG++ BCG cycle extraction algorithm.\n\n Follows the approach by Akhbardeh et al. [Akhb07]_.\n It was adapted to our BCG device, which measures higher G-peaks in the [1,2]\n Hz frequency band. Thus G-peaks are here synchronization points and H, I, J\n peaks are searched time ranges depending on G-peaks positions.\n\n Parameters\n ----------\n signal : array\n Input unfiltered BCG signal.\n sampling_rate : int, float, optional\n Sampling frequency (Hz).\n thresholds : array\n Lower and upper amplitude threshold values for local maxima of the\n absolute coarse signal.\n R : float\n Range of local extrema search for final synchronization points (seconds).\n Empirically 0.1<R<0.5.\n t1 : float\n Minimum delay between final synchronization points (seconds).\n Empirically 0.4<t1<0.6\n H : float\n Maximum delay between G and H waves (seconds).\n I : float\n Maximum delay between G and I waves (seconds).\n J : float\n Maximum delay between G and J waves (seconds).\n Returns\n -------\n gpeaks : array\n G-peak location indices.\n hpeaks : array\n H-peak location indices.\n ipeaks : array\n I-peak location indices.\n jpeaks : array\n J-peak location indices.\n filtered : array\n Bandpassed signal in the [2,20] Hz frequency range.\n\n References\n ----------\n .. [Akhb07] A. Akhbardeh, B. Kaminska, K. Tavakolian, \"BSeg++: A modified\n Blind Segmentation Method for Ballistocardiogram Cycle Extraction\",\n Proceedings of the 29th Annual International Conference of the IEEE EMBS,\n 2007\n\n \"\"\"\n\n # check inputs\n if signal is None:\n raise TypeError(\"Please specify an input signal.\")\n\n # 1) normalization\n signal_normed = signal - np.mean(signal)\n signal_normed /= max(abs(signal_normed))\n signal_normed *= 5\n\n # 2) filtering\n filtered, _, _ = st.filter_signal(signal=signal,\n ftype='butter',\n band='bandpass',\n order=4,\n frequency=[2, 20],\n sampling_rate=sampling_rate)\n\n # 3) extract coarse bcg\n coarse, _, _ = st.filter_signal(signal=signal_normed,\n ftype='butter',\n band='highpass',\n order=6,\n frequency=1,\n sampling_rate=sampling_rate)\n coarse, _, _ = st.filter_signal(signal=coarse,\n ftype='butter',\n band='lowpass',\n order=6,\n frequency=2,\n sampling_rate=sampling_rate)\n coarse = abs(coarse)\n\n # synchronization points\n # a) local maxima of absolute coarse BCG with distance constraint\n cntr,properties = ss.find_peaks(coarse,\n height=thresholds,\n threshold=None,\n distance=int(t1*sampling_rate))\n\n # b) final synchronization points\n p, = correct_peaks(signal=-filtered,\n peaks=cntr,\n sampling_rate=sampling_rate,\n tol=R)\n\n # define G waves\n gpeaks = p\n # search for H waves\n hpeaks, = search_peaks(signal=filtered,\n peaks=gpeaks,\n sampling_rate=sampling_rate,\n before = 0,\n after = H)\n # search for I waves\n ipeaks, = search_peaks(signal=-filtered,\n peaks=gpeaks,\n sampling_rate=sampling_rate,\n before = -H,\n after = I)\n # search for J waves\n jpeaks, = search_peaks(signal=filtered,\n peaks=gpeaks,\n sampling_rate=sampling_rate,\n before = 0,\n after = J)\n\n return utils.ReturnTuple((gpeaks,hpeaks,ipeaks,jpeaks,filtered),\n ('gpeaks','hpeaks','ipeaks','jpeaks','filtered'))\n\n\ndef template_matching(signal=None,filtered=None,peaks=None,sampling_rate=1000.,\n threshold = 0.5,R=0.1,show=True):\n \"\"\"Manual template matching algorithm.\n\n Follows the approach by Shin et al. [Shin08]_.\n\n Parameters\n ----------\n signal : array\n Input unfiltered BCG signal.\n filtered : array\n Input filtered BCG signal, bandpassed to the [2,20] Hz frequency range.\n peaks : array\n J-peaks labels.\n sampling_rate : int, float, optional\n Sampling frequency (Hz).\n threshold : float\n Minimal correlation value for local maxima.\n R : float\n Range of local extrema search for final synchronization points (seconds).\n Empirically 0.1<R<0.5.\n\n Returns\n -------\n template : array\n Template model.\n peaks : array\n J-peaks location indices.\n\n References\n ----------\n .. [Shin08] J. H. Shin, B. H. Choi, Y. G. Lim, D. U. Jeong, K. S. Park,\n \"Automatic Ballistocardiogram (BCG) Beat Detection Using a Template Matching\n Approach\", Proceedings of the 30th Annual International Conference of the\n IEEE EMBS, 2008\n\n \"\"\"\n # check inputs\n if signal is None:\n raise TypeError(\"Please specify an input unfiltered signal.\")\n if filtered is None:\n raise TypeError(\"Please specify an input filtered signal.\")\n if peaks is None:\n raise TypeError(\"Please specify peaks indices in the input signal.\")\n\n # ensure numpy\n signal = np.array(signal)\n filtered = np.array(filtered)\n sampling_rate = float(sampling_rate)\n\n #template modelling\n templates, peaks = extract_heartbeats(signal=filtered, peaks=peaks,\n sampling_rate=1000., before=0.4, after=0.4)\n for n,tmpl in enumerate(templates):\n tmpl -= np.mean(tmpl)\n tmpl /= max(abs(tmpl))\n templates[n] = tmpl\n template = np.mean(templates,axis=0)\n\n #template_matching\n corr = matchTemplate(filtered.astype('float32'),template.astype('float32'),TM_CCORR_NORMED)\n corr = corr.flatten()\n cntr,properties = ss.find_peaks(corr,height=threshold)\n cntr += int(len(template)/2)\n peaks, = correct_peaks(signal=filtered,\n peaks=cntr,\n sampling_rate=sampling_rate,\n tol=R)\n # plot\n if show:\n # extract templates\n templates, peaks = extract_heartbeats(signal=filtered,\n peaks=peaks,\n sampling_rate=sampling_rate,\n before=0.4,\n after=0.4)\n # compute heart rate\n hr_idx, hr = st.get_heart_rate(beats=peaks,\n sampling_rate=sampling_rate,\n smooth=True,\n size=3)\n # get time vectors\n length = len(signal)\n T = (length - 1) / sampling_rate\n ts = np.linspace(0, T, length, endpoint=True)\n ts_hr = ts[hr_idx]\n ts_tmpl = np.linspace(-0.4, 0.4, templates.shape[1], endpoint=False)\n\n plotting.plot_bcg(ts=ts,\n raw=signal,\n filtered=filtered,\n jpeaks=peaks,\n templates_ts=ts_tmpl,\n templates=templates,\n heart_rate_ts=ts_hr,\n heart_rate=hr,\n path=None,\n show=True)\n\n return utils.ReturnTuple((template,peaks),('template','peaks'))\n\ndef adaptive_heartbeat_modelling(signal=None,sampling_rate=1000.,initial_length=0.6,residual_threshold = 0.35, show=True):\n\n \"\"\"Adaptive Heartbeat Modelling.\n\n Follows the approach by Paalasmaa et al. [Paal14]_. Only suitable here for 15s-long BCG.\n\n Parameters\n ----------\n signal : array\n Input unfiltered BCG signal.\n sampling_rate : int, float, optional\n Sampling frequency (Hz).\n initial_length : float, optional\n Initial length of the template.\n residual_threshold :\n Threshold for heartbeat intervals selection.\n\n Returns\n -------\n template : array\n Heartbeat model.\n peaks : array\n Heartbeats location indices.\n\n References\n ----------\n .. [Paal14] J. Paalasmaa, H. Toivonen, M. Partinen,\n \"Adaptive heartbeat modeling for beat-to-beat heart rate measurement in\n ballistocardiograms\", IEEE journal of biomedical and health informatics, 2015\n\n \"\"\"\n # check inputs\n if signal is None:\n raise TypeError(\"Please specify an input signal.\")\n\n # ensure numpy\n signal = np.array(signal)\n sampling_rate = float(sampling_rate)\n\n #preprocessing\n signal -= np.mean(signal)\n filtered, _, _ = st.filter_signal(signal=signal,\n ftype='butter',\n band='lowpass',\n order=2,\n frequency=10,\n sampling_rate=sampling_rate)\n gaussian_filter_std = 0.1\n filtered -= si.gaussian_filter(filtered,gaussian_filter_std*sampling_rate)\n\n #D. Initial estimation of the heartbeat model\n #clustering\n filtered_grad = np.gradient(filtered)\n windows_center_p,_ = ss.find_peaks(filtered_grad)\n windows_center_n,_ = ss.find_peaks(-filtered_grad)\n windows_center = np.sort(np.concatenate((windows_center_p,windows_center_n)))\n windows, windows_center = extract_heartbeats(signal=filtered,\n peaks=windows_center,\n sampling_rate=sampling_rate,\n before=initial_length/2,\n after=initial_length/2)\n\n #clustering\n dist_matrix = ssd.pdist(windows)\n n = len(windows)\n linkage_matrix = sch.linkage(dist_matrix,method='complete')\n densest_4_cluster_indices, = np.where(linkage_matrix[:,3]==4)\n densest_4_cluster_index = densest_4_cluster_indices[0]\n leader_node = densest_4_cluster_index + n\n max_inconsistent_value = linkage_matrix[densest_4_cluster_index,2]\n flat_clusters = sch.fcluster(linkage_matrix,max_inconsistent_value,criterion='distance')\n L,M = sch.leaders(linkage_matrix,flat_clusters)\n leaves, = np.where(flat_clusters == M[L == leader_node])\n\n windows, windows_center = extract_heartbeats(signal=filtered,\n peaks=windows_center[leaves],\n sampling_rate=sampling_rate,\n before=1.25,\n after=1.25)\n\n mu = np.mean(windows,axis=0)\n\n\n hvs_result = modified_heart_valve_signal(signal = signal, sampling_rate=sampling_rate)\n hvs = hvs_result['hvs']\n hvs_minima,_ = ss.find_peaks(-hvs)\n half_lengths = []\n for center in windows_center:\n half_lengths.append(min(center - hvs_minima[hvs_minima<center]))\n half_lengths.append(min(hvs_minima[hvs_minima>center] - center))\n\n half_len = min(half_lengths)\n mu = mu[int(len(mu)/2)-half_len:int(len(mu)/2)+half_len]\n mu_center = int(len(mu)/2)\n\n #E/ Detecting heartbeat position candidates\n peaks=[]\n ta=[]\n tb=[]\n\n for iter in range(2):\n peaks=[]\n ta =[]\n tb=[]\n half_len = int(initial_length*sampling_rate/2)\n if (half_len > mu_center)|(half_len > len(mu)-mu_center):\n raise ValueError('Template is too short or badly centered')\n mu_corr = mu[mu_center-half_len:mu_center+half_len]\n corr = matchTemplate(filtered.astype('float32'),mu_corr.astype('float32'),TM_CCORR_NORMED)\n corr = corr.flatten()\n candidates_pos,_ = ss.find_peaks(corr)\n corr_delay = -mu_center+half_len\n\n #F/Detecting beat-to-beat intervals\n half_len = int(1*sampling_rate)\n if half_len > len(mu)-mu_center:\n mu2 = np.append(mu,np.zeros(2*half_len-len(mu)))\n else:\n mu2 = mu[:int(2*sampling_rate)]\n\n candidates_pos += corr_delay\n candidates_pos = candidates_pos[candidates_pos>=0]\n\n #1) Initialize ta to the first candidate position\n ta_cand = candidates_pos[0]\n while ta_cand < candidates_pos[-1]:\n try:\n if ta_cand+int(2*sampling_rate)>len(filtered):\n raise Exception\n sa = filtered[ta_cand:ta_cand+int(2*sampling_rate)]\n za = so.least_squares(lambda z: np.mean(np.power(sa-z*mu2,2)),1).x[0]\n xa = za*mu2\n #2) Find candidates for tb\n tb_candidates = candidates_pos[np.logical_and(ta_cand+int(0.4*sampling_rate)<candidates_pos,candidates_pos<ta_cand+int(2*sampling_rate))]\n #3) find best tb or find another ta -> step 2)\n for tb_cand in tb_candidates:\n if tb_cand+int(2*sampling_rate)>len(filtered):\n raise Exception\n sb = filtered[tb_cand:tb_cand+int(2*sampling_rate)]\n zb = so.least_squares(lambda z: np.mean(np.power(sb-z*mu2,2)),1).x[0]\n xb = zb*mu2\n xa_tmp = np.concatenate((xa,np.zeros(max([0,2*(tb_cand-ta_cand)-int(2*sampling_rate)]))))\n xb_tmp = np.concatenate((np.zeros(tb_cand-ta_cand),xb))\n x = xa_tmp[:2*(tb_cand-ta_cand)]+xb_tmp[:2*(tb_cand-ta_cand)]\n s = filtered[ta_cand:ta_cand+2*(tb_cand-ta_cand)]\n eps = s - x\n\n if (np.mean(np.power(eps,2)) < residual_threshold*np.mean(np.power(s,2))) & (max([za,zb])<2*min([za,zb])):\n ta.append(ta_cand)\n tb.append(tb_cand)\n peak_a = ta_cand+mu_center\n peak_b = tb_cand+mu_center\n if peak_a not in peaks:\n peaks.append(peak_a)\n peaks.append(peak_b)\n ta_cand = tb_cand\n break\n else:\n continue\n\n if ta_cand != tb_cand:\n ta_candidates = candidates_pos[np.logical_and(candidates_pos>ta_cand,candidates_pos<ta_cand+int(2*sampling_rate))]\n ta_cand = ta_candidates[np.argmax(corr[ta_candidates-corr_delay])]\n except Exception:\n break\n beats = dict(peaks=np.array(peaks),ta =np.array(ta),tb=np.array(tb))\n\n #G. re-estimation of the model with detected beat to beat intervals\n template_extraction = long_template_extraction(signal = filtered, beats = beats, mu_center = mu_center,sampling_rate=1000.)\n try:\n mu = template_extraction['long_template']\n mu_center_new = template_extraction['long_template_center']\n mu = mu[mu_center_new-mu_center:]\n except KeyError:\n mu = template_extraction['short_template']\n peaks = beats['peaks']\n print('iteration no ',iter,': ',len(peaks),' beats detected')\n\n #H. Accounting for abrupt changes of the heartbeat shape\n # to complete, with four different instances of the beat-to-beat detection method\n\n #I. Post-preprocessing\n # slightly different in our case : we added a smoother rather than the non linear filter explained in the paper\n\n if show:\n # extract templates\n templates, peaks = extract_heartbeats(signal=filtered,\n peaks=peaks,\n sampling_rate=sampling_rate,\n before=0.6,\n after=0.2)\n # compute heart rate\n hr_idx, hr = st.get_heart_rate(beats=peaks,\n sampling_rate=sampling_rate,\n smooth=True,\n size=3)\n # get time vectors\n length = len(signal)\n T = (length - 1) / sampling_rate\n ts = np.linspace(0, T, length, endpoint=True)\n ts_hr = ts[hr_idx]\n ts_tmpl = np.linspace(-0.4, 0.4, templates.shape[1], endpoint=False)\n\n plotting.plot_bcg(ts=ts,\n raw=signal,\n filtered=filtered,\n jpeaks=peaks,\n templates_ts=ts_tmpl,\n templates=templates,\n heart_rate_ts=ts_hr,\n heart_rate=hr,\n path=None,\n show=True)\n\n return utils.ReturnTuple((mu,peaks),('template','peaks'))\n\n\ndef heart_valve_signal(signal = None, sampling_rate=1000.):\n \"\"\"Heart valve signal filtering.\n\n Follows the approach by Friedrich et al. [Frie10]_.\n\n Parameters\n ----------\n signal : array\n Input unfiltered BCG signal.\n sampling_rate : int, float, optional\n Sampling frequency (Hz).\n\n Returns\n -------\n hvs : array\n Heart valve signal.\n\n References\n ----------\n .. [Frie10] Heart Rate Estimation on a Beat-to-Beat Basis via\n Ballistocardiography - A hybrid Approach.\n David Friedrich, Xavier L. Aubert, Hartmut Führ and Andreas Brauers\n\n \"\"\"\n\n filtered, _, _ = st.filter_signal(signal=signal,\n ftype='butter',\n band='bandpass',\n order=2,\n frequency=[20, 40],\n sampling_rate=sampling_rate)\n\n hvs = np.power(filtered,2)\n\n hvs, _, _ = st.filter_signal(signal=hvs,\n ftype='butter',\n band='lowpass',\n order=2,\n frequency=2,\n sampling_rate=sampling_rate)\n\n return utils.ReturnTuple((hvs,),('hvs',))\n\n\ndef modified_heart_valve_signal(signal = None, sampling_rate=1000.):\n \"\"\"Heart valve signal filtering.\n\n Follows the approach by Bruser et al. [Brus11]_.\n\n Parameters\n ----------\n signal : array\n Input unfiltered BCG signal.\n sampling_rate : int, float, optional\n Sampling frequency (Hz).\n\n Returns\n -------\n hvs : array\n Heart valve signal.\n center_frequency : float\n Auto-tuned center frequency of the bandpass filter.\n\n References\n ----------\n .. [Brus11] C. Bruser and K. Stadlthanner and S. de Waele and S. Leonhardt,\n \"Adaptive Beat-to-Beat Heart Rate Estimation in Ballistocardiograms\",\n IEEE Transactions on Information Technology in Biomedicine, 2011\n\n \"\"\"\n N = len(signal)\n signal_f = sf.fft(signal)\n signal_f = signal_f[:int(N/2)+1]\n freq_step = sampling_rate/N\n band_start_index = int(20/freq_step)\n band_stop_index = int(40/freq_step)\n center_frequency_index = np.argmax(signal_f[band_start_index:band_stop_index])\n center_frequency_index += band_start_index\n center_frequency = center_frequency_index*freq_step\n frequency = [center_frequency - 2, center_frequency + 2]\n filtered, _, _ = st.filter_signal(signal=signal,\n ftype='butter',\n band='bandpass',\n order=2,\n frequency=frequency,\n sampling_rate=sampling_rate)\n\n hvs = np.power(filtered,2)\n\n hvs, _, _ = st.filter_signal(signal=hvs,\n ftype='butter',\n band='lowpass',\n order=2,\n frequency=2,\n sampling_rate=sampling_rate)\n\n return utils.ReturnTuple((hvs,center_frequency),('hvs','center_frequency'))\n\ndef long_template_extraction(signal = None, beats = None, mu_center=None, sampling_rate=1000.):\n \"\"\"Long template extraction.\n\n Follows the approach by Inan et al. [Inan09]_.\n\n Parameters\n ----------\n signal : array\n Input unfiltered BCG signal.\n beats : dict\n 'ta':start indices of heart beat interval\n 'tb':stop indices of heart beat interval\n 'peaks': peaks positions.\n mu_center : int\n Inner position reference of the template.\n sampling_rate : int, float, optional\n Sampling frequency (Hz).\n\n Returns\n -------\n short_template : array\n Template result when there is no detected heartbeat quadruplet\n amplitudes: array\n Relative amplitude of heartbeats\n long_template:\n Template result when there is at least one detected heartbeat quadruplet\n long_template_center:\n Template inner position reference.\n\n References\n ----------\n .. [Inan09] O. T. Inan, M. Etemadi, R. M. Wiard, G. T. A. Kovacs,\n \"Novel Methods for Estimating the Ballistocardiogram Signal Using a Simultaneously Acquired Electrocardiogram\",\n 31st Annual International Conference of the IEEE EMBS Minneapolis, Minnesota, USA, September 2-6, 2009\n\n \"\"\"\n\n ## Esimation of BCG Heartbeat Amplitudes\n ta = beats['ta']\n tb = beats['tb']\n t = np.append(ta,tb[-1])\n T = tb-ta\n Tmin = np.min(T)\n T = np.insert(T,0,0)\n x = np.array([signal[tn:tn+Tmin] for tn in t])\n s = np.mean(x,axis=0)\n\n ## Modified Ensemble Averaging for Extracting Long-Window Pulse Response\n # first find the longest segment with adjacent IBIs.\n is_adjacent = (tb[:-1] == ta[1:])\n are_adjacent_beats = [list([g[0] for g in group]) for key,group in groupby(enumerate(is_adjacent),lambda x:x[1]) if key == True]\n adjacent_beats_indices = np.array([seg.append(seg[-1]+1) or seg for seg in are_adjacent_beats])\n adjacent_beats_len = np.array([len(seg) for seg in adjacent_beats_indices])\n\n # if it's long enough find quadruplet templates\n if max(adjacent_beats_len)<4-1:\n return utils.ReturnTuple((s,),('short_template',))\n\n longest_segment_index = np.argmax(adjacent_beats_len)\n ta = ta[adjacent_beats_indices[longest_segment_index]]\n tb = tb[adjacent_beats_indices[longest_segment_index]]\n t = np.append(ta,tb[-1])\n T = tb-ta\n Tmin = min(T)\n Tmax = max(T)\n T = np.insert(T,0,0)\n x = np.array([signal[tn:tn+Tmin] for tn in t])\n s = np.mean(x,axis=0)\n Rss = matchTemplate(s.astype('float32'),s.astype('float32'),TM_CCORR)\n a = np.array([matchTemplate(xn.astype('float32'),s.astype('float32'),TM_CCORR)/Rss for xn in x]).flatten()\n K = 4*Tmax\n N = len(t)\n x_tilde = []\n for tn in t:\n if (tn>Tmax)&(tn+3*Tmax<len(signal)):\n x_tilde.append(signal[tn-Tmax:tn+3*Tmax])\n x_tilde = np.array(x_tilde)\n hcon = np.mean(x_tilde,axis=0)\n\n h = np.zeros(K)\n for n in range(1,N-2):\n h += x_tilde[n]\n delay = -T[n]+Tmax\n sn_1 = delay_and_pad(signal = s, delay = delay, desired_len = K)\n h -= a[n-1]*sn_1\n delay = T[n+1]+Tmax\n sn1 = delay_and_pad(signal = s, delay = delay, desired_len = K)\n h -= a[n+1]*sn1\n delay = T[n+2]+T[n+1]+Tmax\n sn2 = delay_and_pad(signal = s, delay = delay, desired_len = K)\n h -= a[n+2]*sn2\n\n h /= N-3\n mu_center += Tmax\n return utils.ReturnTuple((s,a,h,mu_center),('short_template','amplitudes','long_template','long_template_center'))\n\ndef delay_and_pad(signal = None, delay = None, desired_len = None):\n if delay < 0:\n raise TypeError(\"Please specify a positive delay.\")\n elif delay > desired_len-len(signal):\n raise TypeError(\"Please specify a shorter delay.\")\n else:\n return np.pad(signal,(delay,desired_len-len(signal)-delay),'constant',constant_values = (0,0))\n\ndef correct_peaks(signal=None, peaks=None, sampling_rate=1000., tol=0.3):\n return ecg.correct_rpeaks(signal=signal,\n rpeaks=peaks,\n sampling_rate=sampling_rate,\n tol=tol)\n\ndef search_peaks(signal=None, peaks=None, sampling_rate=1000.,\n before=0.2, after=0.2):\n return ecg.correct_rpeaks(signal=signal,\n rpeaks=peaks+int(sampling_rate*(after-before)/2),\n sampling_rate=sampling_rate,\n tol=(after+before)/2)\n\ndef extract_heartbeats(signal=None, peaks=None, sampling_rate=1000.,\n before=0.4, after=0.4):\n return ecg.extract_heartbeats(signal=signal,\n rpeaks=peaks,\n sampling_rate=sampling_rate,\n before=before,\n after=after)\n" ]
[ [ "scipy.signal.find_peaks", "numpy.linspace", "scipy.fftpack.fft", "numpy.concatenate", "numpy.mean", "scipy.cluster.hierarchy.leaders", "numpy.where", "numpy.argmax", "numpy.insert", "scipy.cluster.hierarchy.linkage", "numpy.zeros", "numpy.power", "numpy.min", "numpy.append", "numpy.array", "scipy.cluster.hierarchy.fcluster", "scipy.ndimage.gaussian_filter", "numpy.gradient", "scipy.spatial.distance.pdist" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "1.6", "1.10", "1.4", "1.9", "1.5", "1.2", "1.7", "1.3", "1.8" ], "tensorflow": [] } ]
adrn/thejoker
[ "e77182bdb368e20127a17cc76ba1083ab77746ea" ]
[ "thejoker/src/setup_package.py" ]
[ "from distutils.core import Extension\nfrom collections import defaultdict\nimport os\n\n\ndef get_extensions():\n exts = []\n\n import numpy as np\n import twobody\n\n cfg = defaultdict(list)\n cfg['include_dirs'].append(np.get_include())\n\n twobody_path = os.path.dirname(twobody.__file__)\n cfg['include_dirs'].append(twobody_path)\n cfg['sources'].append(os.path.join(twobody_path, 'src/twobody.c'))\n\n cfg['extra_compile_args'].append('--std=gnu99')\n cfg['sources'].append('thejoker/src/fast_likelihood.pyx')\n exts.append(Extension('thejoker.src.fast_likelihood', **cfg))\n\n return exts\n" ]
[ [ "numpy.get_include" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
seraconlp/transformers
[ "1073a2bde5d608f9891d6da6df7b63921dca1b71" ]
[ "tests/test_modeling_tf_openai.py" ]
[ "# coding=utf-8\n# Copyright 2018 The Google AI Language Team Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nimport unittest\n\nfrom transformers import OpenAIGPTConfig, is_tf_available\nfrom transformers.testing_utils import require_tf, slow\n\nfrom .test_configuration_common import ConfigTester\nfrom .test_modeling_tf_common import TFModelTesterMixin, ids_tensor\n\n\nif is_tf_available():\n import tensorflow as tf\n\n from transformers.modeling_tf_openai import (\n TF_OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,\n TFOpenAIGPTDoubleHeadsModel,\n TFOpenAIGPTLMHeadModel,\n TFOpenAIGPTModel,\n )\n\n\nclass TFOpenAIGPTModelTester:\n def __init__(\n self,\n parent,\n ):\n self.parent = parent\n self.batch_size = 13\n self.seq_length = 7\n self.is_training = True\n self.use_token_type_ids = True\n self.use_input_mask = True\n self.use_labels = True\n self.use_mc_token_ids = True\n self.vocab_size = 99\n self.hidden_size = 32\n self.num_hidden_layers = 5\n self.num_attention_heads = 4\n self.intermediate_size = 37\n self.hidden_act = \"gelu\"\n self.hidden_dropout_prob = 0.1\n self.attention_probs_dropout_prob = 0.1\n self.max_position_embeddings = 512\n self.type_vocab_size = 16\n self.type_sequence_label_size = 2\n self.initializer_range = 0.02\n self.num_labels = 3\n self.num_choices = 4\n self.scope = None\n\n def prepare_config_and_inputs(self):\n input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)\n\n input_mask = None\n if self.use_input_mask:\n input_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2)\n\n token_type_ids = None\n if self.use_token_type_ids:\n token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size)\n\n mc_token_ids = None\n if self.use_mc_token_ids:\n mc_token_ids = ids_tensor([self.batch_size, self.num_choices], self.seq_length)\n\n sequence_labels = None\n token_labels = None\n choice_labels = None\n if self.use_labels:\n sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size)\n token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels)\n choice_labels = ids_tensor([self.batch_size], self.num_choices)\n\n config = OpenAIGPTConfig(\n vocab_size=self.vocab_size,\n n_embd=self.hidden_size,\n n_layer=self.num_hidden_layers,\n n_head=self.num_attention_heads,\n # intermediate_size=self.intermediate_size,\n # hidden_act=self.hidden_act,\n # hidden_dropout_prob=self.hidden_dropout_prob,\n # attention_probs_dropout_prob=self.attention_probs_dropout_prob,\n n_positions=self.max_position_embeddings,\n n_ctx=self.max_position_embeddings,\n # type_vocab_size=self.type_vocab_size,\n # initializer_range=self.initializer_range,\n )\n\n head_mask = ids_tensor([self.num_hidden_layers, self.num_attention_heads], 2)\n\n return (\n config,\n input_ids,\n input_mask,\n head_mask,\n token_type_ids,\n mc_token_ids,\n sequence_labels,\n token_labels,\n choice_labels,\n )\n\n def create_and_check_openai_gpt_model(self, config, input_ids, input_mask, head_mask, token_type_ids, *args):\n model = TFOpenAIGPTModel(config=config)\n inputs = {\"input_ids\": input_ids, \"attention_mask\": input_mask, \"token_type_ids\": token_type_ids}\n result = model(inputs)\n\n inputs = [input_ids, input_mask]\n result = model(inputs)\n\n result = model(input_ids)\n\n self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))\n\n def create_and_check_openai_gpt_lm_head(self, config, input_ids, input_mask, head_mask, token_type_ids, *args):\n model = TFOpenAIGPTLMHeadModel(config=config)\n inputs = {\"input_ids\": input_ids, \"attention_mask\": input_mask, \"token_type_ids\": token_type_ids}\n result = model(inputs)\n self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size))\n\n def create_and_check_openai_gpt_double_head(\n self, config, input_ids, input_mask, head_mask, token_type_ids, mc_token_ids, *args\n ):\n model = TFOpenAIGPTDoubleHeadsModel(config=config)\n\n multiple_choice_inputs_ids = tf.tile(tf.expand_dims(input_ids, 1), (1, self.num_choices, 1))\n multiple_choice_input_mask = tf.tile(tf.expand_dims(input_mask, 1), (1, self.num_choices, 1))\n multiple_choice_token_type_ids = tf.tile(tf.expand_dims(token_type_ids, 1), (1, self.num_choices, 1))\n\n inputs = {\n \"input_ids\": multiple_choice_inputs_ids,\n \"mc_token_ids\": mc_token_ids,\n \"attention_mask\": multiple_choice_input_mask,\n \"token_type_ids\": multiple_choice_token_type_ids,\n }\n result = model(inputs)\n self.parent.assertEqual(\n result.logits.shape, (self.batch_size, self.num_choices, self.seq_length, self.vocab_size)\n )\n self.parent.assertEqual(result.mc_logits.shape, (self.batch_size, self.num_choices))\n\n def prepare_config_and_inputs_for_common(self):\n config_and_inputs = self.prepare_config_and_inputs()\n\n (\n config,\n input_ids,\n input_mask,\n head_mask,\n token_type_ids,\n mc_token_ids,\n sequence_labels,\n token_labels,\n choice_labels,\n ) = config_and_inputs\n\n inputs_dict = {\"input_ids\": input_ids, \"token_type_ids\": token_type_ids, \"attention_mask\": input_mask}\n return config, inputs_dict\n\n\n@require_tf\nclass TFOpenAIGPTModelTest(TFModelTesterMixin, unittest.TestCase):\n\n all_model_classes = (\n (TFOpenAIGPTModel, TFOpenAIGPTLMHeadModel, TFOpenAIGPTDoubleHeadsModel) if is_tf_available() else ()\n )\n all_generative_model_classes = (\n (TFOpenAIGPTLMHeadModel,) if is_tf_available() else ()\n ) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly\n\n def setUp(self):\n self.model_tester = TFOpenAIGPTModelTester(self)\n self.config_tester = ConfigTester(self, config_class=OpenAIGPTConfig, n_embd=37)\n\n def test_config(self):\n self.config_tester.run_common_tests()\n\n def test_openai_gpt_model(self):\n config_and_inputs = self.model_tester.prepare_config_and_inputs()\n self.model_tester.create_and_check_openai_gpt_model(*config_and_inputs)\n\n def test_openai_gpt_lm_head(self):\n config_and_inputs = self.model_tester.prepare_config_and_inputs()\n self.model_tester.create_and_check_openai_gpt_lm_head(*config_and_inputs)\n\n def test_openai_gpt_double_head(self):\n config_and_inputs = self.model_tester.prepare_config_and_inputs()\n self.model_tester.create_and_check_openai_gpt_double_head(*config_and_inputs)\n\n @slow\n def test_model_from_pretrained(self):\n for model_name in TF_OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:\n model = TFOpenAIGPTModel.from_pretrained(model_name)\n self.assertIsNotNone(model)\n\n\n@require_tf\nclass TFOPENAIGPTModelLanguageGenerationTest(unittest.TestCase):\n @slow\n def test_lm_generate_openai_gpt(self):\n model = TFOpenAIGPTLMHeadModel.from_pretrained(\"openai-gpt\")\n input_ids = tf.convert_to_tensor([[481, 4735, 544]], dtype=tf.int32) # the president is\n expected_output_ids = [\n 481,\n 4735,\n 544,\n 246,\n 963,\n 870,\n 762,\n 239,\n 244,\n 40477,\n 244,\n 249,\n 719,\n 881,\n 487,\n 544,\n 240,\n 244,\n 603,\n 481,\n ] # the president is a very good man. \" \\n \" i\\'m sure he is, \" said the\n\n output_ids = model.generate(input_ids, do_sample=False)\n self.assertListEqual(output_ids[0].numpy().tolist(), expected_output_ids)\n" ]
[ [ "tensorflow.convert_to_tensor", "tensorflow.expand_dims" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "1.12", "1.4", "1.13", "1.5", "1.7", "0.12", "1.0", "1.2" ] } ]
VMK11/cnn-text-classification
[ "63df563b6dede311f0b4f882f0e2759f375f768f" ]
[ "train.py" ]
[ "#! /usr/bin/env python\n\nimport tensorflow as tf\nimport numpy as np\nimport os\nimport time\nimport datetime\nimport data_helpers\nfrom text_cnn import TextCNN\nfrom tensorflow.contrib import learn\nimport os, argparse\nfrom tensorflow.python.framework import graph_util\n\n\n# Parameters\n# ==================================================\n\n# Data loading params\ntf.flags.DEFINE_float(\"dev_sample_percentage\", .1, \"Percentage of the training data to use for validation\")\ntf.flags.DEFINE_string(\"positive_data_file\", \"./data/rt-polaritydata/rt-polarity.pos\", \"Data source for the positive data.\")\ntf.flags.DEFINE_string(\"negative_data_file\", \"./data/rt-polaritydata/rt-polarity.neg\", \"Data source for the negative data.\")\n\n# Model Hyperparameters\ntf.flags.DEFINE_integer(\"embedding_dim\", 128, \"Dimensionality of character embedding (default: 128)\")\ntf.flags.DEFINE_string(\"filter_sizes\", \"3,4,5\", \"Comma-separated filter sizes (default: '3,4,5')\")\ntf.flags.DEFINE_integer(\"num_filters\", 128, \"Number of filters per filter size (default: 128)\")\ntf.flags.DEFINE_float(\"dropout_keep_prob\", 0.5, \"Dropout keep probability (default: 0.5)\")\ntf.flags.DEFINE_float(\"l2_reg_lambda\", 0.0, \"L2 regularization lambda (default: 0.0)\")\n\n# Training parameters\ntf.flags.DEFINE_integer(\"batch_size\", 150, \"Batch Size (default: 64)\")\ntf.flags.DEFINE_integer(\"num_epochs\", 2, \"Number of training epochs (default: 200)\")\ntf.flags.DEFINE_integer(\"evaluate_every\", 100, \"Evaluate model on dev set after this many steps (default: 100)\")\ntf.flags.DEFINE_integer(\"checkpoint_every\", 100, \"Save model after this many steps (default: 100)\")\ntf.flags.DEFINE_integer(\"num_checkpoints\", 5, \"Number of checkpoints to store (default: 5)\")\n\n# Misc Parameters\ntf.flags.DEFINE_boolean(\"allow_soft_placement\", True, \"Allow device soft device placement\")\ntf.flags.DEFINE_boolean(\"log_device_placement\", False, \"Log placement of ops on devices\")\n\nFLAGS = tf.flags.FLAGS\n\n\ndef preprocess():\n # Data Preparation\n # ==================================================\n\n # Load data\n print(\"Loading data...\")\n x_text, y = data_helpers.load_data_and_labels(FLAGS.positive_data_file, FLAGS.negative_data_file)\n\n # Build vocabulary\n max_document_length = max([len(x.split(\" \")) for x in x_text])\n vocab_processor = learn.preprocessing.VocabularyProcessor(max_document_length)\n x = np.array(list(vocab_processor.fit_transform(x_text)))\n\n # Randomly shuffle data\n np.random.seed(10)\n shuffle_indices = np.random.permutation(np.arange(len(y)))\n x_shuffled = x[shuffle_indices]\n y_shuffled = y[shuffle_indices]\n\n # Split train/test set\n # TODO: This is very crude, should use cross-validation\n dev_sample_index = -1 * int(FLAGS.dev_sample_percentage * float(len(y)))\n x_train, x_dev = x_shuffled[:dev_sample_index], x_shuffled[dev_sample_index:]\n y_train, y_dev = y_shuffled[:dev_sample_index], y_shuffled[dev_sample_index:]\n\n del x, y, x_shuffled, y_shuffled\n\n print(\"Vocabulary Size: {:d}\".format(len(vocab_processor.vocabulary_)))\n print(\"Train/Dev split: {:d}/{:d}\".format(len(y_train), len(y_dev)))\n return x_train, y_train, vocab_processor, x_dev, y_dev\n\ndef train(x_train, y_train, vocab_processor, x_dev, y_dev):\n # Training\n # ==================================================\n\n with tf.Graph().as_default():\n session_conf = tf.ConfigProto(\n allow_soft_placement=FLAGS.allow_soft_placement,\n log_device_placement=FLAGS.log_device_placement)\n sess = tf.Session(config=session_conf)\n with sess.as_default():\n cnn = TextCNN(\n sequence_length=x_train.shape[1],\n num_classes=y_train.shape[1],\n vocab_size=len(vocab_processor.vocabulary_),\n embedding_size=FLAGS.embedding_dim,\n filter_sizes=list(map(int, FLAGS.filter_sizes.split(\",\"))),\n num_filters=FLAGS.num_filters,\n l2_reg_lambda=FLAGS.l2_reg_lambda)\n\n # Define Training procedure\n global_step = tf.Variable(0, name=\"global_step\", trainable=False)\n optimizer = tf.train.AdamOptimizer(1e-3)\n grads_and_vars = optimizer.compute_gradients(cnn.loss)\n train_op = optimizer.apply_gradients(grads_and_vars, global_step=global_step)\n\n # Keep track of gradient values and sparsity (optional)\n grad_summaries = []\n for g, v in grads_and_vars:\n if g is not None:\n grad_hist_summary = tf.summary.histogram(\"{}/grad/hist\".format(v.name), g)\n sparsity_summary = tf.summary.scalar(\"{}/grad/sparsity\".format(v.name), tf.nn.zero_fraction(g))\n grad_summaries.append(grad_hist_summary)\n grad_summaries.append(sparsity_summary)\n grad_summaries_merged = tf.summary.merge(grad_summaries)\n\n # Output directory for models and summaries\n timestamp = str(int(time.time()))\n out_dir = os.path.abspath(os.path.join(os.path.curdir, \"runs\", timestamp))\n print(\"Writing to {}\\n\".format(out_dir))\n\n # Summaries for loss and accuracy\n loss_summary = tf.summary.scalar(\"loss\", cnn.loss)\n acc_summary = tf.summary.scalar(\"accuracy\", cnn.accuracy)\n\n # Train Summaries\n train_summary_op = tf.summary.merge([loss_summary, acc_summary, grad_summaries_merged])\n train_summary_dir = os.path.join(out_dir, \"summaries\", \"train\")\n train_summary_writer = tf.summary.FileWriter(train_summary_dir, sess.graph)\n\n # Dev summaries\n dev_summary_op = tf.summary.merge([loss_summary, acc_summary])\n dev_summary_dir = os.path.join(out_dir, \"summaries\", \"dev\")\n dev_summary_writer = tf.summary.FileWriter(dev_summary_dir, sess.graph)\n\n # Checkpoint directory. Tensorflow assumes this directory already exists so we need to create it\n checkpoint_dir = os.path.abspath(os.path.join(out_dir, \"checkpoints\"))\n checkpoint_prefix = os.path.join(checkpoint_dir, \"model\")\n if not os.path.exists(checkpoint_dir):\n os.makedirs(checkpoint_dir)\n saver = tf.train.Saver(tf.global_variables(), max_to_keep=FLAGS.num_checkpoints)\n\n # Write vocabulary\n vocab_processor.save(os.path.join(out_dir, \"vocab\"))\n\n # Initialize all variables\n sess.run(tf.global_variables_initializer())\n\n def train_step(x_batch, y_batch):\n \"\"\"\n A single training step\n \"\"\"\n feed_dict = {\n cnn.input_x: x_batch,\n cnn.input_y: y_batch,\n cnn.dropout_keep_prob: FLAGS.dropout_keep_prob\n }\n _, step, summaries, loss, accuracy = sess.run(\n [train_op, global_step, train_summary_op, cnn.loss, cnn.accuracy],\n feed_dict)\n time_str = datetime.datetime.now().isoformat()\n print(\"{}: step {}, loss {:g}, acc {:g}\".format(time_str, step, loss, accuracy))\n train_summary_writer.add_summary(summaries, step)\n\n def dev_step(x_batch, y_batch, writer=None):\n \"\"\"\n Evaluates model on a dev set\n \"\"\"\n feed_dict = {\n cnn.input_x: x_batch,\n cnn.input_y: y_batch,\n cnn.dropout_keep_prob: 1.0\n }\n step, summaries, loss, accuracy = sess.run(\n [global_step, dev_summary_op, cnn.loss, cnn.accuracy],\n feed_dict)\n time_str = datetime.datetime.now().isoformat()\n print(\"{}: step {}, loss {:g}, acc {:g}\".format(time_str, step, loss, accuracy))\n if writer:\n writer.add_summary(summaries, step)\n \n # Generate batches\n batches = data_helpers.batch_iter(\n list(zip(x_train, y_train)), FLAGS.batch_size, FLAGS.num_epochs)\n # Training loop. For each batch...\n for batch in batches:\n x_batch, y_batch = zip(*batch)\n train_step(x_batch, y_batch)\n current_step = tf.train.global_step(sess, global_step)\n if current_step % FLAGS.evaluate_every == 0:\n print(\"\\nEvaluation:\")\n dev_step(x_dev, y_dev, writer=dev_summary_writer)\n print(\"\")\n if current_step % FLAGS.checkpoint_every == 0:\n path = saver.save(sess, checkpoint_prefix, global_step=current_step)\n print(\"Saved model checkpoint to {}\\n\".format(path))\n\n \ndef main(argv=None):\n x_train, y_train, vocab_processor, x_dev, y_dev = preprocess()\n train(x_train, y_train, vocab_processor, x_dev, y_dev)\n\nif __name__ == '__main__':\n tf.app.run()\n" ]
[ [ "tensorflow.train.global_step", "tensorflow.global_variables", "tensorflow.train.AdamOptimizer", "tensorflow.flags.DEFINE_float", "tensorflow.summary.scalar", "tensorflow.Graph", "tensorflow.Variable", "tensorflow.ConfigProto", "tensorflow.Session", "tensorflow.app.run", "tensorflow.flags.DEFINE_boolean", "tensorflow.global_variables_initializer", "tensorflow.summary.merge", "tensorflow.flags.DEFINE_integer", "tensorflow.summary.FileWriter", "numpy.random.seed", "tensorflow.flags.DEFINE_string", "tensorflow.nn.zero_fraction", "tensorflow.contrib.learn.preprocessing.VocabularyProcessor" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
whzhangg/IFermi
[ "511c69acd1e5ac849f073d435210a25a603d5bcb" ]
[ "ifermi/QE_adaptor.py" ]
[ "import re\nimport numpy as np\nfrom pymatgen.electronic_structure.bandstructure import BandStructure\nfrom pymatgen.core.lattice import Lattice\nfrom pymatgen.core.structure import Structure\nfrom pymatgen.electronic_structure.core import Spin\n\n# Wenhao Zhang, 2021/09/23 [email protected]\n# examples/MgB2_QE contains test output for spin non-polarized and spin polarized calculation, which reproduce the \n# result as in MgB2\n\nBohr2A = 0.529177210 # A\n\nclass nscfOut:\n \"\"\"\n a class written to praser nscf output of Quantum espresso pw.x\n\n requirement:\n verbosity = 'high' (output energies for each k point)\n\n parsing assumes a fixed output format of PWSCF v. 6.4\n\n \"\"\"\n def __init__(self, filename: str):\n self.output = filename\n \n self.spin_polarized = False\n self.efermi = None\n\n # crystal structure\n self.positions = None\n self.symbols = None\n self.lattice = None\n\n # reciprocal lattice\n self.reciprocal = None\n\n # kpoints\n self.kpoints = None\n self.eig = None\n\n self._parse_nscf()\n\n def _parse_nscf(self) -> None:\n \"\"\"\n extract crystal structure, reciprocal cell,\n kpoints fraction coordinate and eigenvalue\n \"\"\"\n alat = 0\n lattice = np.zeros((3,3))\n recip = np.zeros((3,3))\n nbnd = 0\n natom = 0\n positions = []\n nk = 0\n symbols = []\n k_frac = []\n efermi = 0\n\n energy = {\"spinup\" : [],\n \"spindown\" : []\n }\n\n which = \"spinup\" # remember if we are reading spin up or spin down\n \n with open(self.output,'r') as f:\n aline=f.readline()\n\n while aline:\n # read information by checking the flags\n if \"lattice parameter (alat) =\" in aline:\n data = aline.split('=')[1]\n data = data.split()\n alat = float(data[0]) # in Bohr\n\n if \"number of Kohn-Sham states\" in aline:\n data = aline.split()[-1]\n nbnd = int(data)\n\n if \"number of atoms/cell\" in aline:\n data = aline.split()[-1]\n natom = int(data)\n\n if \"crystal axes: (cart. coord. in units of alat)\" in aline:\n for i in range(3):\n data = f.readline().split()[3:6]\n lattice[i] = np.array(data, dtype = float) \n lattice *= alat * Bohr2A\n\n if \"reciprocal axes: (cart. coord. in units 2 pi/alat)\" in aline:\n for i in range(3):\n data = f.readline().split()[3:6]\n recip[i] = np.array(data, dtype = float)\n recip *= 2 * np.pi / (alat * Bohr2A)\n\n if \"site n. atom positions (cryst. coord.)\" in aline:\n for i in range(natom):\n data = f.readline()\n symbols.append(re.findall(r'[A-Z][a-z]*', data)[0])\n positions.append(np.array(re.findall('-?\\d+\\.\\d+', data), dtype = float))\n \n if \"number of k points= \" in aline:\n nk = int( re.findall(r'\\d+', aline)[0] )\n k_frac = np.zeros((nk,3))\n\n if re.search(r'k\\(.+\\)\\s+=\\s+\\(.+\\)', aline) != None:\n parts = aline.split('=')\n ik = int( re.findall(r'\\d+', parts[0])[0] )\n pos = np.array(re.findall(r'-?\\d+\\.\\d+', parts[1]), dtype = float)\n k_frac[ik-1] = pos\n\n if \"the Fermi energy is\" in aline:\n efermi = float(re.findall(r'-?\\d+\\.\\d+', aline)[0])\n\n if \"------ SPIN UP ------------\" in aline:\n which = \"spinup\"\n\n if \"------ SPIN DOWN ----------\" in aline:\n which = \"spindown\"\n\n if re.search('k\\s+=\\s*-?\\d+\\.\\d+\\s*-?\\d+\\.\\d+\\s*-?\\d+\\.\\d+\\s',aline) != None:\n kstr=re.findall(r'-?\\d+\\.\\d+',aline)\n\n f.readline()\n\n lenergy = [] # local energy for each k point\n while len(lenergy) < nbnd:\n aline = f.readline()\n data = np.array(aline.split(), dtype = float)\n for d in data:\n lenergy.append(d)\n\n if len(lenergy) > nbnd:\n raise \"length of energy > nbnd\"\n\n energy[which].append(lenergy)\n \n aline = f.readline()\n\n self.efermi = efermi\n self.lattice = lattice\n self.symbols = symbols \n self.positions = np.array(positions)\n self.reciprocal = recip\n self.kpoints = k_frac\n\n self.eig = {}\n self.eig[Spin.up] = np.array(energy[\"spinup\"]).T\n\n if energy[\"spindown\"]:\n self.spin_polarized = True\n self.eig[Spin.down] = np.array(energy[\"spindown\"]).T\n\n\ndef QE_get_band_structure(nscfout: str) -> BandStructure:\n \"\"\"\n get pymatgen.electronic_structure.bandstructure object from a nscf output\n \n args:\n file name of the nscf calculation\n \"\"\"\n result = nscfOut(nscfout)\n\n lattice_new = Lattice(result.reciprocal)\n structure = Structure(result.lattice, result.symbols, result.positions)\n\n return BandStructure(result.kpoints, \n result.eig, \n lattice_new, \n result.efermi, \n structure = structure)\n\nif __name__ == \"__main__\":\n bs = QE_get_band_structure(\"../examples/MgB2_QE/MgB2/nscf.nspin1.out\")\n print(bs.bands)" ]
[ [ "numpy.array", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
agora-ecosystem/tee-bench
[ "befa7ff19963643a6e98840b38065ad7ce5791c1" ]
[ "scripts/exp6-scale-r-experiment-SUDO.py" ]
[ "#!/usr/bin/python3\n\nimport commons\nimport re\nimport statistics\nimport subprocess\nimport csv\nimport matplotlib.pyplot as plt\n\nfilename = \"data/scale-r-output.csv\"\nmb_of_data = 131072\n\n\ndef run_join(prog, alg, size_r, size_s, threads, reps, mode):\n f = open(filename, \"a\")\n results = []\n ewbs = []\n for i in range(0,reps):\n stdout = subprocess.check_output(prog + \" -a \" + alg + \" -r \" + str(size_r) + \" -s \" + str(size_s) +\n \" -n \" + str(threads), cwd=\"../\", shell=True) \\\n .decode('utf-8')\n for line in stdout.splitlines():\n if \"Throughput\" in line:\n throughput = re.findall(\"\\d+\\.\\d+\", line)[1]\n results.append(float(throughput))\n print (\"Throughput = \" + str(throughput))\n elif \"EWB Total [MB]\" in line:\n ewb = re.findall(\"\\d+\\.\\d+\", line)[1]\n print(\"EWB = \" + str(ewb))\n ewbs.append(float(ewb))\n # remove max and min values as extreme outliers\n if reps > 4:\n results.remove(max(results))\n results.remove(min(results))\n ewbs.remove(max(ewbs))\n ewbs.remove(min(ewbs))\n\n if len(results) == 0:\n results = [-1]\n if len(ewbs) == 0:\n ewbs = [-1]\n res = statistics.mean(results)\n ewb = statistics.mean(ewbs)\n s = (mode + \",\" + alg + \",\" + str(threads) + \",\" + str(round(size_r/mb_of_data,2)) +\n \",\" + str(round(size_s/mb_of_data,2)) + \",\" + str(round(res,2)) + \",\" + str(round(ewb,2)))\n print (\"AVG : \" + s)\n f.write(s + \"\\n\")\n f.close()\n\n\ndef plot():\n s_sizes_names = [\n '$S_{size}$ < L2',\n 'L2 < $S_{size}$ < L3',\n 'L3 < $S_{size}$ < EPC',\n 'EPC < $S_{size}$'\n ]\n csvf = open(filename, mode='r')\n csvr = csv.DictReader(csvf)\n all_data = list(csvr)\n r_sizes = sorted(set(map(lambda x:float(x['sizeR']), all_data)))\n s_sizes = sorted(set(map(lambda x:float(x['sizeS']), all_data)))\n algos = sorted(set(map(lambda x:x['alg'], all_data)))\n splitted = [[[y['alg'], y['sizeR'], y['sizeS'], y['throughput']] for y in all_data if y['sizeS'] == str(x)] for x in s_sizes]\n titles = ['S < L2', 'L2 < S < L3', 'L3 < S < EPC', 'EPC < S']\n fig,a = plt.subplots(3,2,figsize=(10,5))\n for i in range(0, len(s_sizes)):\n ds = splitted[i]\n ds = [[[y[0], y[1], y[2], y[3]] for y in ds if y[0] == x] for x in algos]\n x = 1 if i & (1<<1) else 0\n y = 1 if i & (1<<0) else 0\n for j in range(0, len(algos)):\n x_sizes = list(filter(lambda x: x['alg'] == algos[j], all_data))\n x_sizes = sorted(set(map(lambda x:float(x['sizeR']), x_sizes)))\n a[x][y].plot(x_sizes, list(map(lambda x: float(x[3]),ds[j])), '-o', label=algos[j],\n color=commons.color_alg(algos[j]))\n a[x][y].legend()\n a[x][y].set_xlabel('R size [MB]')\n a[x][y].set_ylabel('Throughput [M rec/s]')\n a[x][y].set_title(titles[i] + ' (' + str(s_sizes[i]) + ' MB)')\n\n commons.savefig('img/scale-r.png')\n\n # print graphs per algorithm\n fig = plt.figure(figsize=(8,6))\n plt.clf()\n for alg in algos:\n data = list(filter(lambda x: x['alg'] == alg, all_data))\n data_splitted = [[y for y in data if y['sizeS'] == str(x)] for x in s_sizes]\n plt.subplot(3,2,algos.index(alg)+1)\n for i in range(0, len(s_sizes)):\n x_sizes = list(filter(lambda x: x['alg'] == alg, all_data))\n x_sizes = sorted(set(map(lambda x:float(x['sizeR']), x_sizes)))\n plt.plot(x_sizes, list(map(lambda x: float(x['throughput']), data_splitted[i])),\n '-o', label=s_sizes_names[i], color=commons.color_size(i))\n if alg == 'PHT':\n plt.legend()\n plt.gca().yaxis.grid(linestyle='dashed')\n plt.xlabel('R size [MB]')\n plt.ylabel('Throughput [M rec/s]')\n plt.title(alg)\n plt.ylim([0,70])\n commons.savefig('img/scale-r-algos.png')\n\n # print only CHT\n fig = plt.figure(figsize=(4,3))\n # plt.clf()\n data = list(filter(lambda x: x['alg'] == 'CHT', all_data))\n data_splitted = [[y for y in data if y['sizeS'] == str(x)] for x in s_sizes]\n markers = ['o', 'v', 'D', 's']\n for i in range(0, len(s_sizes)):\n x_sizes = list(filter(lambda x: x['alg'] == 'CHT', all_data))\n x_sizes = sorted(set(map(lambda x:float(x['sizeR']), x_sizes)))\n plt.plot(x_sizes, list(map(lambda x: float(x['throughput']), data_splitted[i])),\n label=s_sizes_names[i], color=commons.color_size(i), linewidth=2,\n marker=markers[i], markersize=8, markeredgecolor='black',\n markeredgewidth=0.3)\n # plt.legend(fontsize='small')\n lines, labels = fig.axes[-1].get_legend_handles_labels()\n fig.legend(lines, labels, fontsize='x-small', frameon=0,\n ncol=2, bbox_to_anchor = (0.05, 0.95), loc='lower left', borderaxespad=0)\n plt.gca().yaxis.grid(linestyle='dashed')\n plt.xlabel('Size of outer table [MB]')\n plt.ylabel('Throughput [M rec/s]')\n plt.xlim(left=0)\n plt.ylim(bottom=0)\n commons.savefig('img/scale-r-CHT.png')\n\n # print only PHT\n fig = plt.figure(figsize=(4,4))\n plt.clf()\n data = list(filter(lambda x: x['alg'] == 'PHT', all_data))\n data_splitted = [[y for y in data if y['sizeS'] == str(x)] for x in s_sizes]\n for i in range(0, len(s_sizes)):\n x_sizes = list(filter(lambda x: x['alg'] == 'PHT', all_data))\n x_sizes = sorted(set(map(lambda x:float(x['sizeR']), x_sizes)))\n plt.plot(x_sizes, list(map(lambda x: float(x['throughput']), data_splitted[i])),\n '-o', label=s_sizes_names[i], color=commons.color_size(i))\n plt.legend(fontsize='small')\n plt.gca().yaxis.grid(linestyle='dashed')\n plt.xlabel('R size [MB]')\n plt.ylabel('Throughput [M rec/s]')\n plt.title('PHT')\n # plt.ylim([0,70])\n commons.savefig('img/scale-r-PHT.png')\n\ndef plot_with_ewb():\n csvf = open(filename, mode='r')\n csvr = csv.DictReader(csvf)\n all_data = list(csvr)\n r_sizes = sorted(set(map(lambda x:float(x['sizeR']), all_data)))\n s_sizes = sorted(set(map(lambda x:float(x['sizeS']), all_data)))\n algos = sorted(set(map(lambda x:x['alg'], all_data)))\n splitted = [[y for y in all_data if y['sizeS'] == str(x)] for x in s_sizes]\n titles = ['S < L2', 'L2 < S < L3', 'L3 < S < EPC', 'EPC < S']\n width = 1\n # fig,ax1 = plt.subplots(2,2,figsize=(20,10))\n # for i in range(0, len(s_sizes)):\n # ds = splitted[i]\n # ds = [[y for y in ds if y['alg'] == x] for x in algos]\n # x = 1 if i & (1<<1) else 0\n # y = 1 if i & (1<<0) else 0\n # for j in range(0, len(algos)):\n # ax1[x][y].plot(r_sizes, list(map(lambda x: float(x['throughput']),ds[j])),\n # '-o', label=algos[j], color=commons.color_alg(algos[j]))\n # ax1[x][y].legend()\n # ax1[x][y].set_xlabel('R size [MB]')\n # ax1[x][y].set_ylabel('Throughput [M rec/s]')\n # axes2 = ax1[x][y].twinx()\n # br = [float(x + width*j) for x in r_sizes]\n # axes2.bar(br, list(map(lambda x: float(x['ewb']), ds[j])), width=width,\n # label=algos[j], color=commons.color_alg(algos[j]), alpha=0.5)\n # axes2.set_ylabel('EWB [MB]')\n # axes2.set_ylim([0,65000])\n #\n # plt.title(titles[i] + ' (' + str(s_sizes[i]) + ' MB)')\n #\n # commons.savefig('img/scale-r-with-ewb' + '.png')\n\n # plot only PHT for S < L2\n fig, ax1 = plt.subplots(figsize=(5,4))\n # plt.clf()\n data = list(filter(lambda x: x['alg'] == 'PHT' and x['sizeS'] == '0.2', all_data))\n rs = list(map(lambda x:float(x['sizeR']), data))\n plot = list(map(lambda x: float(x['throughput']), data))\n bar = list(map(lambda x: float(x['ewb']), data))\n ax1.plot(rs, plot, '-o', color=commons.color_alg('PHT'))\n ax1.set_xlabel('R size [MB]')\n ax1.set_ylabel('Throughput [M rec/s]')\n ax2 = ax1.twinx()\n ax2.bar(rs, bar, color=commons.color_size(3), alpha=0.4, width=3)\n ax2.set_ylabel('EPCMiss [k]')\n commons.savefig('img/scale-r-with-ewb' + '-PHT.png')\n\n # plot only CHT for EPC < S\n # fig, ax1 = plt.subplots(figsize=(5,4))\n # plt.clf()\n fig = plt.figure(figsize=(5,4))\n ax1 = plt.gca()\n # plt.clf()\n data = list(filter(lambda x: x['alg'] == 'CHT' and x['sizeS'] == '100.0', all_data))\n rs = list(filter(lambda x: x['alg'] == 'CHT', all_data))\n rs = sorted(set(map(lambda x:float(x['sizeR']), rs)))\n plot = list(map(lambda x: float(x['throughput']), data))\n bar = list(map(lambda x: int(float(x['ewb'])*1024/4/1000), data))\n line1, = ax1.plot(rs, plot, '-o', color=commons.color_alg('CHT'), linewidth=2,\n marker=commons.marker_alg('CHT'), markeredgecolor='black', markersize=8,\n label='Throughput')\n ax1.set_xlabel('Size of outer table [MB]')\n ax1.set_ylabel('Throughput [M rec/s]')\n ax1.set_xlim([0,130])\n ax1.set_ylim(bottom=0)\n ax2 = ax1.twinx()\n bar2 = ax2.bar(rs, bar, color=commons.color_size(3), alpha=0.4, width=3,\n label='EPC Miss')\n ax2.set_ylabel('EPC Miss [k]')\n ax2.set_ylim(bottom=0)\n ax1.yaxis.grid(linestyle='dashed')\n ax1.axvline(x=90, linestyle='--', color='#209bb4', linewidth=2)\n fig.text(0.55,0.77, \"EPC\", color='#209bb4', rotation=90, weight='bold')\n fig.legend(handles=[line1, bar2], ncol=2, frameon=False,\n bbox_to_anchor=(0.08,0.91,1,0), loc=\"lower left\")\n commons.savefig('img/scale-r-with-ewb' + '-CHT.png')\n\n\nif __name__ == '__main__':\n timer = commons.start_timer()\n mode = \"sgx\"\n max_s_size_mb = 256\n reps = 5\n threads = 3\n s_sizes = [int(0.2*mb_of_data), # ~205kB\n int(6.4 * mb_of_data), # 6.4 MB\n 16 * mb_of_data, # 16 MB\n 100 * mb_of_data] # 100 MB\n\n # commons.make_app_with_flags(True, [\"PCM_COUNT\", \"SGX_COUNTERS\"])\n # commons.make_app_with_flags(True, [])\n # commons.remove_file(filename)\n # commons.init_file(filename, \"mode,alg,threads,sizeR,sizeS,throughput,ewb\\n\")\n #\n # for s_size in s_sizes:\n # for alg in ['CHT']:#commons.get_all_algorithms():\n # for i in range(144, max_s_size_mb+1, 16):\n # run_join(commons.PROG, alg, i*mb_of_data, s_size, threads, reps, mode)\n\n plot()\n # plot_with_ewb()\n commons.stop_timer(timer)\n" ]
[ [ "matplotlib.pyplot.legend", "matplotlib.pyplot.gca", "matplotlib.pyplot.title", "matplotlib.pyplot.figure", "matplotlib.pyplot.ylim", "matplotlib.pyplot.subplots", "matplotlib.pyplot.xlim", "matplotlib.pyplot.clf", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.ylabel" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
AndrewZeitler/Cyber-Bullying-Detection-Bot
[ "4973077fe0f56f8a98f111d8fa0f9afca01166c8" ]
[ "cyber/cyberbullying_detection.py" ]
[ "# Data processing tools\r\nimport praw\r\nimport pickle\r\nimport pandas\r\nimport numpy\r\n\r\n# Machine Learning Tools\r\nfrom sklearn.model_selection import train_test_split\r\nfrom sklearn.feature_extraction.text import CountVectorizer\r\nfrom sklearn.feature_extraction.text import TfidfVectorizer\r\nfrom sklearn.naive_bayes import MultinomialNB\r\nfrom sklearn.svm import SVC\r\n\r\n# Natural language processing tools\r\nfrom nltk.corpus import stopwords\r\nfrom nltk.tokenize import word_tokenize\r\nfrom nltk.stem.snowball import SnowballStemmer\r\nimport re\r\n\r\nclass CyberBullyingDetectionEngine:\r\n \"\"\" Class that deals with training and deploying cyberbullying detection models\r\n \"\"\" \r\n def __init__(self):\r\n self.corpus = None\r\n self.tags = None\r\n self.lexicon = None\r\n self.vecotrizer = None\r\n self.model = None\r\n self.metrics = None\r\n\r\n\r\n class CustomVectorizer:\r\n \"\"\" Extracts features from text and creates word vectors\r\n \"\"\"\r\n\r\n def __init__(self, lexicon):\r\n self.lexicon = lexicon\r\n\r\n\r\n def transform(self, corpus):\r\n word_vectors = []\r\n for text in corpus:\r\n features = []\r\n for k, v in self.lexicon.items():\r\n features.append(len([w for w in word_tokenize(text) if w in v]))\r\n\r\n word_vectors.append(features)\r\n\r\n return numpy.array(word_vectors)\r\n \r\n \r\n def _simplify(self, corpus):\r\n \"\"\" Takes in a list of strings (corpus) amd removes stop words (the, I, a, an... etc.)\r\n Also converts all to lowercase, removed non-alphanumeric characters,\r\n stem -> treats words in different tenses as the same\r\n \"\"\"\r\n stop_words = set(stopwords.words('english'))\r\n stemmer = SnowballStemmer('english')\r\n\r\n def clean (text):\r\n text = re.sub('[^a-zA-Z0-9]', ' ', text)\r\n # make lower case, remove stop words, and then stems words\r\n words = [stemmer.stem(w) for w in word_tokenize(text.lower()) if w not in stop_words]\r\n return \" \".join(words)\r\n\r\n \r\n return[clean(text) for text in corpus]\r\n\r\n\r\n def _model_metrics(self, features, tags):\r\n \"\"\" Takes in testing data and returns a dictionary of metrics\r\n \"\"\"\r\n\r\n tp = 0\r\n fp = 0\r\n tn = 0\r\n fn = 0\r\n\r\n predictions = self.model.predict(features)\r\n for r in zip(predictions, tags):\r\n if r[0] == 1 and r[1] == 1:\r\n tp += 1\r\n elif r[0] == 1 and r[1] == 0:\r\n fp += 1\r\n elif r[0] == 0 and r[1] == 1:\r\n fn += 1\r\n else:\r\n tn += 1\r\n \r\n precision = tp / (tp + fp)\r\n recall = tp / (tp + fn)\r\n\r\n return {\r\n 'precision': precision,\r\n 'recall': recall,\r\n 'f1': (2 * precision * recall) / (precision + recall)\r\n }\r\n\r\n\r\n def _get_lexicon(self, path):\r\n \"\"\" Takes in a path to a text file and returns a set containing every word in the file\r\n \"\"\"\r\n\r\n words = set()\r\n with open(path) as file:\r\n for line in file:\r\n words.update(line.strip().split(' '))\r\n\r\n return words\r\n \r\n\r\n def load_corpus(self, path, corpus_col, tag_col):\r\n \"\"\" Takes in a path to pickled pandas dataframe, the name of corpus, and name of corpus column,\r\n and the name of tag column, and extracts a tagged corpus\r\n \"\"\"\r\n\r\n data = pandas.read_pickle(path)[[corpus_col, tag_col]].values\r\n self.corpus = [row[0] for row in data]\r\n self.tags = [row[1] for row in data]\r\n\r\n\r\n def load_lexicon(self, fname):\r\n \"\"\" Loads a set of words from a text file\r\n \"\"\"\r\n if self.lexicon is None:\r\n self.lexicon = {}\r\n\r\n self.lexicon[fname] = self._get_lexicon('./data/' + fname + '.txt')\r\n \r\n\r\n def load_model(self, model_name):\r\n \"\"\" Loads the machine learning model, its corresponding vectorizer, and its performance metrics\r\n \"\"\"\r\n\r\n self.model = pickle.load(open('./models/' + model_name + '_ml_model.pkl', 'rb'))\r\n self.vectorizer = pickle.load(open('./models/' + model_name + '_vectorizer.pkl', 'rb'))\r\n self.metrics = pickle.load(open('./models/' + model_name + '_metrics.pkl', 'rb'))\r\n \r\n \r\n def train_using_bow(self):\r\n \"\"\" Trains a model using Bag of words (word counts) on the corpus and tags\r\n \"\"\"\r\n\r\n corpus = self._simplify(self.corpus)\r\n self.vectorizer = CountVectorizer()\r\n self.vectorizer.fit(corpus)\r\n\r\n bag_of_words = self.vectorizer.transform(corpus)\r\n x_train, x_test, y_train, y_test = train_test_split(bag_of_words, self.tags, test_size=0.2, stratify=self.tags)\r\n\r\n self.model = MultinomialNB()\r\n self.model.fit(x_train, y_train)\r\n\r\n self.metrics = self._model_metrics(x_test, y_test)\r\n\r\n\r\n def train_using_tfidf(self):\r\n \"\"\" Trains model using tf-idf weighted words counts as features\r\n \"\"\"\r\n\r\n corpus = self._simplify(self.corpus)\r\n self.vectorizer = TfidfVectorizer()\r\n self.vectorizer.fit(corpus)\r\n\r\n word_vectors = self.vectorizer.transform(corpus)\r\n x_train, x_test, y_train, y_test = train_test_split(word_vectors, self.tags, test_size=0.2, stratify=self.tags)\r\n\r\n self.model = MultinomialNB()\r\n self.model.fit(x_train, y_train)\r\n\r\n self.metrics = self._model_metrics(x_test, y_test)\r\n \r\n\r\n def train_using_custom(self):\r\n \"\"\" Trains model using a custom feature abstraction approach\r\n \"\"\"\r\n\r\n corpus = self._simplify(self.corpus)\r\n self.vectorizer = self.CustomVectorizer(self.lexicon)\r\n\r\n word_vectors = self.vectorizer.transform(corpus)\r\n x_train, x_test, y_train, y_test = train_test_split(word_vectors, self.tags, test_size=0.2, stratify=self.tags)\r\n\r\n self.model = SVC()\r\n self.model.fit(x_train, y_train)\r\n\r\n self.metrics = self._model_metrics(x_test, y_test)\r\n \r\n \r\n \r\n def predict(self, corpus):\r\n \"\"\" Takes in a text corpus and returns predictions\r\n \"\"\"\r\n\r\n x = self.vectorizer.transform(self._simplify(corpus))\r\n return self.model.predict(x)\r\n\r\n\r\n def evaluate(self):\r\n \"\"\" Returns model metrics\r\n \"\"\"\r\n\r\n return self.metrics\r\n\r\n\r\n def save_model(self, model_name):\r\n \"\"\" Saves the model for future use\r\n \"\"\"\r\n\r\n pickle.dump(self.model, open('./models/' + model_name + '_ml_model.pkl', 'wb'))\r\n pickle.dump(self.vectorizer, open('./models/' + model_name + '_vectorizer.pkl', 'wb'))\r\n pickle.dump(self.metrics, open('./models/' + model_name + '_metrics.pkl', 'wb'))\r\n \r\n\r\nif __name__ == '__main__':\r\n \r\n reddit = praw.Reddit(\r\n client_id = '6g1p99Dk11pHRQ',\r\n client_secret = 'hWrASTlrst0O6vF93VPIR3TE20Y',\r\n user_agent = 'cyberbullying model by Andrew'\r\n )\r\n\r\n new_comments = reddit.subreddit('TwoXChromosomes').comments(limit=1000)\r\n queries = [comment.body for comment in new_comments]\r\n print(len(queries))\r\n engine = CyberBullyingDetectionEngine()\r\n engine.load_corpus('./data/final_labelled_data.pkl', 'tweet', 'class')\r\n\r\n \"\"\" Trains model if it has not already been trained \"\"\"\r\n engine.train_using_bow()\r\n engine.save_model('bow')\r\n print(engine.evaluate())\r\n print(engine.predict(queries))\r\n\r\n engine.load_lexicon(\"hate-words\")\r\n engine.load_lexicon(\"neg-words\")\r\n engine.load_lexicon(\"pos-words\")\r\n \r\n \"\"\" Loads model for these techniques \"\"\"\r\n\r\n engine.load_model('tfidf')\r\n print(engine.evaluate())\r\n print(engine.predict(queries))\r\n engine.save_model('tfidf')\r\n\r\n engine.load_model('custom')\r\n print(engine.evaluate())\r\n print(engine.predict(queries))\r\n engine.save_model('custom')\r\n \r\n" ]
[ [ "pandas.read_pickle", "sklearn.naive_bayes.MultinomialNB", "sklearn.model_selection.train_test_split", "sklearn.feature_extraction.text.CountVectorizer", "sklearn.svm.SVC", "numpy.array", "sklearn.feature_extraction.text.TfidfVectorizer" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
apoorvaish/mujoco-rl
[ "234bd7689990cdd63db458d0367e14ccd1b62c1f" ]
[ "td-actor-critic-state-value-functions/dl.py" ]
[ "import numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\n\nclass Network(nn.Module):\n def __init__(self, lr, input_dims, n_hidden=64, output_dims=4):\n super(Network, self).__init__()\n self.fc1 = nn.Linear(input_dims, n_hidden)\n self.fc2 = nn.Linear(n_hidden, n_hidden)\n self.pi = nn.Linear(n_hidden, output_dims)\n self.v = nn.Linear(n_hidden, 1)\n self.optimizer = optim.Adam(self.parameters(), lr=lr)\n \n # self.device = (torch.device('cuda:0' if torch.cuda.is_available() else 'cpu'))\n # self.to(self.device)\n \n def forward(self, state):\n x = F.relu(self.fc1(state))\n x = F.relu(self.fc2(x))\n pi = self.pi(x)\n v_s = self.v(x)\n return pi, v_s\n " ]
[ [ "torch.nn.Linear" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
lolwuz/traffic_simulator
[ "4ba221f764d0c80a457b75c0d6059e6aee826421" ]
[ "python_controller/server.py" ]
[ "#!/usr/bin/env python\nfrom objects.controller import Controller\nfrom SimpleWebSocketServer import SimpleWebSocketServer, WebSocket\nimport json\nimport pandas\nimport time\ntry:\n import thread as thread # For ubuntu thread\nexcept ModuleNotFoundError:\n import _thread as thread # Mac/Windows\n\n\n_ADDRESS = \"127.0.0.1\"\nclients = []\ncontrollers = []\ninfo_clients = []\n\n\ndef update():\n \"\"\" Updates the active controllers \"\"\"\n while True:\n for controller in controllers:\n controller.update()\n\n for client in info_clients:\n update_info(client)\n\n time.sleep(0.2)\n\n\ndef update_info(client):\n \"\"\" Gives info about connected controllers to the info server \"\"\"\n info = []\n\n for controller in controllers:\n lights = []\n waiting_times = []\n for light in controller.lights:\n lights.append(light.to_dict())\n\n for t in controller.waiting_times:\n diff = int(time.time() - t)\n waiting_times.append(diff)\n\n info.append({\n \"entries\": controller.entries,\n \"waiting_times\": waiting_times,\n \"total_entries\": controller.total_entries,\n \"phase\": controller.current_phase,\n \"lights\": lights,\n \"mode\": controller.mode,\n \"client\": controller.client.address\n })\n\n send_json = json.dumps(info)\n client.sendMessage(send_json)\n\n\ndef on_open():\n \"\"\" Starts update function on a new thread \"\"\"\n thread.start_new_thread(update, ())\n\n\nclass SimpleServer(WebSocket):\n def handleConnected(self):\n \"\"\" A new client was added to the server \"\"\"\n print(self.address[0])\n # if self.address[0] == _ADDRESS:\n # print(\"info server has connected\")\n # info_clients.append(self)\n # return\n\n print(\"client has connected: \" + self.address[0])\n\n data_frame = pandas.read_csv('Intersects.csv', sep=\";\")\n matrix = data_frame.values\n traffic_lights = list(data_frame.columns.values)\n\n del traffic_lights[0]\n new_controller = Controller(self, traffic_lights, matrix)\n controllers.append(new_controller)\n\n def handleClose(self):\n \"\"\" A client has disconnected from the server \"\"\"\n print(\"client disconnected: \" + self.address[0])\n if self in info_clients:\n print(\"info server has disconnected\")\n info_clients.remove(self)\n\n for controller in controllers:\n if controller.client == self:\n controllers.remove(controller)\n\n clients.remove(self)\n\n def handleMessage(self):\n \"\"\" Handles messages and json decoding \"\"\"\n for client in info_clients:\n if client == self:\n json_data = json.loads(self.data)\n for controller in controllers:\n if controller.client.address[0] == json_data[\"client\"][0]:\n controller.mode = json_data[\"mode\"]\n\n for controller in controllers:\n if self == controller.client:\n # Make a entry to a existing controller\n entry_from_json = []\n try:\n entry_from_json = json.loads(self.data)\n\n for entry in entry_from_json:\n if entry not in controller.light_names:\n entry_from_json.remove(entry)\n raise Exception(entry + ' value not in light names')\n\n controller.entry(entry_from_json)\n except Exception as error:\n pass\n # self.sendMessage(\"Exception: \" + repr(error))\n\n\nif __name__ == \"__main__\":\n server = SimpleWebSocketServer('0.0.0.0', 8080, SimpleServer)\n on_open()\n server.serveforever()\n" ]
[ [ "pandas.read_csv" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] } ]
thimathabeysirigunawardena/plot
[ "1f3639d853cfe91bce628eba63b7fcde971b3c2e" ]
[ "scatterplot.py" ]
[ "import pandas as pd\nimport plotly.express as px\n\ndf = pd.read_csv(\"data.csv\")\nfig = px.scatter(df, x=\"Population\", y=\"cases\",\n\t size=\"number\",color=\"Country\",\n size_max=60)\nfig.show()\n" ]
[ [ "pandas.read_csv" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] } ]
gitaar9/graf
[ "f7692ad885c1a3b8a377fed3b74c9ce6b9210732" ]
[ "graf/models/generator.py" ]
[ "import numpy as np\nimport torch\nfrom ..utils import sample_on_sphere, look_at, to_sphere\nfrom ..transforms import FullRaySampler\nfrom submodules.nerf_pytorch.run_nerf_mod import render, run_network # import conditional render\nfrom functools import partial\n\n\nclass Generator(object):\n def __init__(self, H, W, focal, radius, ray_sampler, render_kwargs_train, render_kwargs_test, parameters, named_parameters,\n range_u=(0, 1), range_v=(0.01, 0.49), chunk=None, device='cuda', orthographic=False):\n self.device = device\n self.H = int(H)\n self.W = int(W)\n self.focal = focal\n self.radius = radius\n self.range_u = range_u\n self.range_v = range_v\n self.chunk = chunk\n coords = torch.from_numpy(np.stack(np.meshgrid(np.arange(H), np.arange(W), indexing='ij'), -1))\n self.coords = coords.view(-1, 2)\n\n self.ray_sampler = ray_sampler\n self.val_ray_sampler = FullRaySampler(orthographic=orthographic)\n self.render_kwargs_train = render_kwargs_train\n self.render_kwargs_test = render_kwargs_test\n self.initial_raw_noise_std = self.render_kwargs_train['raw_noise_std']\n self._parameters = parameters\n self._named_parameters = named_parameters\n self.module_dict = {'generator': self.render_kwargs_train['network_fn']}\n for name, module in [('generator_fine', self.render_kwargs_train['network_fine'])]:\n if module is not None:\n self.module_dict[name] = module\n \n for k, v in self.module_dict.items():\n if k in ['generator', 'generator_fine']:\n continue # parameters already included\n self._parameters += list(v.parameters())\n self._named_parameters += list(v.named_parameters())\n \n self.parameters = lambda: self._parameters # save as function to enable calling model.parameters()\n self.named_parameters = lambda: self._named_parameters # save as function to enable calling model.named_parameters()\n self.use_test_kwargs = False\n\n self.render = partial(render, H=self.H, W=self.W, focal=self.focal, chunk=self.chunk)\n self.mirror_allowed = self.render_kwargs_train['mirror']\n\n def __call__(self, z, y=None, rays=None):\n bs = z.shape[0]\n if rays is None:\n rays = torch.cat([self.sample_rays() for _ in range(bs)], dim=1)\n\n render_kwargs = self.render_kwargs_test if self.use_test_kwargs else self.render_kwargs_train\n render_kwargs = dict(render_kwargs) # copy\n \n # in the case of a variable radius\n # we need to adjust near and far plane for the rays\n # so they stay within the bounds defined wrt. maximal radius\n # otherwise each camera samples within its own near/far plane (relative to this camera's radius)\n # instead of the absolute value (relative to maximum camera radius)\n if isinstance(self.radius, tuple):\n assert self.radius[1] - self.radius[0] <= render_kwargs['near'], 'Your smallest radius lies behind your near plane!'\n \n rays_radius = rays[0].norm(dim=-1)\n shift = (self.radius[1] - rays_radius).view(-1, 1).float() # reshape s.t. shape matches required shape in run_nerf\n render_kwargs['near'] = render_kwargs['near'] - shift\n render_kwargs['far'] = render_kwargs['far'] - shift\n assert (render_kwargs['near'] >= 0).all() and (render_kwargs['far'] >= 0).all(), \\\n (rays_radius.min(), rays_radius.max(), shift.min(), shift.max())\n \n\n render_kwargs['features'] = z\n rgb, disp, acc, extras = render(self.H, self.W, self.focal, chunk=self.chunk, rays=rays,\n **render_kwargs)\n\n rays_to_output = lambda x: x.view(len(x), -1) * 2 - 1 # (BxN_samples)xC\n \n if self.use_test_kwargs: # return all outputs\n return rays_to_output(rgb), \\\n rays_to_output(disp), \\\n rays_to_output(acc), extras\n\n rgb = rays_to_output(rgb)\n return rgb, extras['raw']\n\n def decrease_nerf_noise(self, it):\n end_it = 5000\n if it < end_it:\n noise_std = self.initial_raw_noise_std - self.initial_raw_noise_std/end_it * it\n self.render_kwargs_train['raw_noise_std'] = noise_std\n\n def sample_pose(self):\n # sample location on unit sphere\n loc = sample_on_sphere(self.range_u, self.range_v)\n \n # sample radius if necessary\n radius = self.radius\n if isinstance(radius, tuple):\n radius = np.random.uniform(*radius)\n\n loc = loc * radius\n R = look_at(loc)[0]\n\n RT = np.concatenate([R, loc.reshape(3, 1)], axis=1)\n RT = torch.Tensor(RT.astype(np.float32))\n return RT\n\n def sample_rays(self):\n pose = self.sample_pose()\n sampler = self.val_ray_sampler if self.use_test_kwargs else self.ray_sampler\n batch_rays, _, _ = sampler(self.H, self.W, self.focal, pose)\n return batch_rays\n\n def to(self, device):\n self.render_kwargs_train['network_fn'].to(device)\n if self.render_kwargs_train['network_fine'] is not None:\n self.render_kwargs_train['network_fine'].to(device)\n self.device = device\n return self\n\n def train(self):\n self.use_test_kwargs = False\n self.render_kwargs_train['network_fn'].train()\n if self.render_kwargs_train['network_fine'] is not None:\n self.render_kwargs_train['network_fine'].train()\n\n def eval(self):\n self.use_test_kwargs = True\n self.render_kwargs_train['network_fn'].eval()\n if self.render_kwargs_train['network_fine'] is not None:\n self.render_kwargs_train['network_fine'].eval()\n\n def mirror_mode(self):\n if self.mirror_allowed:\n self.render_kwargs_train['mirror'] = True\n\n def normal_mode(self):\n self.render_kwargs_train['mirror'] = False\n" ]
[ [ "numpy.random.uniform", "numpy.arange" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
bjoern1001001/viziphant
[ "1a63e0dbbae9d69173ed482771227d2970a95a80" ]
[ "viziphant/rasterplot.py" ]
[ "\"\"\"\nSimple but highly configurable plotting functions for spiketrains in neo format.\nWhile building on the matplotlib libary the functions lay an emphasis on clear,\npleasant-to-look-at visualizations.\n\"\"\"\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nfrom elephant.statistics import mean_firing_rate\nfrom math import log10, floor\n\n\ndef _round_to_1(x):\n rounded = round(x, -int(floor(log10(abs(x)))))\n return rounded, rounded > x\n\n\ndef _get_attributes(spiketrains, key_list):\n \"\"\"\n This function returns attribute_array which is of an array of shape\n (len(spiketrains), len(key_list)) and consists of numerical ids for each\n value of each key for each spike train.\n\n Passed spike trains must be already sorted according to key_list and\n key_list must not be empty.\n \"\"\"\n\n key_count = len(key_list)\n attribute_array = np.zeros((len(spiketrains), len(key_list)))\n # count all group sizes for all keys in key_list:\n while key_count > 0:\n key_count -= 1\n group_key = key_list[key_count]\n i = 0\n if group_key in spiketrains[i].annotations:\n current_value = spiketrains[i].annotations[group_key]\n else:\n # use placeholder value when key is not in annotations\n # of the current spike train\n current_value = '####BLANK####'\n ref_value = current_value\n values = np.array([])\n # count all group sizes for values of current key:\n while i < spiketrains.__len__():\n if not len(values) or current_value not in values:\n values = np.append(values, current_value)\n # count group size for a value of the current key:\n while i < len(spiketrains) and current_value == ref_value:\n attribute_array[i][key_count] = \\\n np.where(values == current_value)[0][0]\n i += 1\n if i < len(spiketrains):\n if group_key in spiketrains[i].annotations:\n current_value = spiketrains[i].annotations[\n group_key]\n else:\n current_value = '####BLANK####'\n ref_value = current_value\n return attribute_array\n\n\ndef rasterplot(spiketrain_list,\n key_list=[],\n groupingdepth=0,\n spacing=[8, 3],\n colorkey=0,\n pophist_mode='color',\n pophistbins=100,\n right_histogram=mean_firing_rate,\n righthist_barwidth=1.01,\n filter_function=None,\n histscale=.1,\n labelkey=None,\n markerargs={'markersize':4,'marker':'.'},\n separatorargs=[{'linewidth':2, 'linestyle':'--', 'color':'0.8'},\n {'linewidth':1, 'linestyle':'--', 'color':'0.8'}],\n legend=False,\n legendargs={'loc':(.98,1.), 'markerscale':1.5, 'handletextpad':0},\n ax=None,\n style='ticks',\n palette=None,\n context=None, # paper, poster, talk\n ):\n\n \"\"\"\n This function plots the dot display of spike trains alongside its\n population histogram and the mean firing rate (or a custom function).\n\n Optional visual aids are offered such as sorting, grouping and color coding\n on the basis of the arrangement in list of spike trains and spike train\n annotations.\n Changes to optics of the dot marker, the separators and the legend can be\n applied by providing a dict with the respective parameters. Changes and\n additions to the dot display itself or the two histograms are best realized\n by using the returned axis handles.\n\n :param spiketrain_list: list\n List can either contain Neo SpikeTrains object or lists of Neo\n SpikeTrains objects.\n :param key_list: str | list of str\n Annotation key(s) for which the spike trains should be ordered.\n When list of keys is given the spike trains are ordered successively\n for the keys.\n By default the ordering by the given lists of spike trains have\n priority. This can be bypassed by using an empty string '' as list-key\n at any position in the key_list.\n :param groupingdepth: 0 | 1 | 2\n * 0: No grouping (default)\n * 1: grouping by first key in key_list.\n Note that when list of lists of spike trains are given the first\n key is by the list identification key ''. If this is unwanted\n the empty string '' can be placed at a different position in\n key_list.\n * 2: additional grouping by second key respectively\n The groups are separated by whitespace specified in the spacing\n parameter and optionally by a line specified by the the separatorargs.\n :param spacing: int | [int] | [int, int]\n Size of whitespace separating the groups in units of spike trains.\n When groupingdepth == 2 a list of two values can specify the distance\n between the groups in level 1 and level 2. When only one value is given\n level 2 spacing is set to half the spacing of level 1.\n Default: [5, 3]\n :param colorkey: str | int (default 0)\n Contrasts values of a key by color. The key can be defined by its\n namestring or its position in key_list. Note that position 0 points to\n the list identification key ('') when list of lists of spike trains are\n given, if not otherwise specified in key_list!\n :param pophist_mode: 'color' (default) | 'total'\n * total: One population histogram for all drawn spike trains\n * color: Additionally to the total population histogram,\n a histogram for each colored subset is drawn (see colorkey).\n :param pophistbins: int (default 100)\n Number of bins used for the population histogram.\n :param right_histogram: function\n The function gets ONE neo.SpikeTrain object as argument and has to\n return a scalar.\n For example the functions in the elephant.statistics module can\n be used. (default: mean_firing_rate)\n When a function is applied is is recommended to set the axis label\n accordingly by using the axis handle returned by the function:\n axhisty.set_xlabel('Label Name')\n :param filter_function: function\n The function gets ONE neo.SpikeTrain object as argument and if the\n return is True the spike train is included; if False it is exluded.\n :param histscale: float (default .1)\n Portion of the figure used for the histograms on the right and upper\n side.\n :param labelkey: 0 | 1 | '0+1' (default) | 'annotation key' | None\n * 0, 1: Set label according to first or second key in key_list.\n Note that the first key is by default the list identification\n key ('') when list of lists of spike trains are given.\n * '0+1': Two level labeling of 0 and 1\n * annotation-key: Labeling each spike train with its value for given key\n * None: No labeling\n Note that only groups (-> see groupingdepth) can be labeled as bulks.\n Alternatively you can color for an annotation key and show a legend.\n :param markerargs: dict\n Arguments dictionary is passed on to matplotlib.pyplot.plot()\n :param separatorargs: dict | [dict, dict] | None\n If only one dict is given and groupingdepth == 2 the arguments are\n applied to the separator of both level. Otherwise the arguments are\n of separatorargs[0] are applied to the level 1 and [1] to level 2.\n Arguments dictionary is passed on to matplotlib.pyplot.plot()\n To turn of separators set it to None.\n :param legend: boolean\n Show legend?\n :param legendargs: dict\n Arguments dictionary is passed on to matplotlib.pyplot.legend()\n :param ax: matplotlib axis | None (default)\n The axis onto which to plot. If None a new figure is created.\n When an axis is given, the function can't handle the figure settings.\n Therefore it is recommended to call seaborn.set() with your preferred\n settings before creating your matplotlib figure in order to control\n your plotting layout.\n :param style: str\n seaborn style setting. Default: 'ticks'\n :param palette: string | sequence\n Define the color palette either by its name or use a custom palette in\n a sequence of the form ([r,g,b],[r,g,b],...).\n :param colorcodes: str\n seaborn colorcodes setting which determines the colors for the\n shorthand codes ('r', 'g', ...). If the given palette does not provide\n its own color codes the colorcode parameter is applied.\n By default this is set to 'colorblind'.\n This setting should ensure a coherent appearance even when additional\n drawings are added to the rasterplot after its execution.\n :param context: 'paper'(default) | 'talk' | 'poster'\n seaborn context setting which controls the scaling of labels. For the\n three options the parameters are scaled by .8, 1.3, and 1.6\n respectively.\n :return: ax, axhistx, axhisty <matplotlib axis handle>\n * ax is handle of the dot display plot\n * axhistx is handle of the histogram plot above the the dot display\n * axhisty is handle of the histogram plot on the right hand side\n\n *Basic Example:*\n >>> from elephant.spike_train_generation import homogeneous_poisson_process as HPP\n >>> from quantities import Hz\n >>> import matplotlib.pyplot as plt\n >>>\n >>> st_list = [HPP(rate=10*Hz) for _ in range(100)]\n >>> rasterplot(st_list)\n >>> plt.show()\n\n *Grouping Example:*\n >>> from elephant.spike_train_generation import homogeneous_poisson_process as HPP\n >>> from elephant.spike_train_generation import homogeneous_gamma_process as HGP\n >>> from quantities import Hz\n >>> import matplotlib.pyplot as plt\n >>>\n >>> st_list1 = [HPP(rate=10*Hz) for _ in range(100)]\n >>> st_list2 = [HGP(a=3, b=10*Hz) for _ in range(100)]\n >>>\n >>> # plot visually separates the two lists\n >>> rasterplot([st_list1, st_list2])\n >>>\n >>> # add annotations to spike trains\n >>> for i, (st1, st2) in enumerate(zip(st_list1, st_list2)):\n >>> if i.__mod__(2):\n >>> st1.annotations['parity'] = 'odd'\n >>> st2.annotations['parity'] = 'odd'\n >>> else:\n >>> st1.annotations['parity'] = 'even'\n >>> st2.annotations['parity'] = 'even'\n >>>\n >>> # plot separates the lists and the annotation values within each list\n >>> rasterplot([st_list1, st_list2], key_list=['parity'],\n >>> groupingdepth=2, labelkey='0+1')\n >>>\n >>> # '' key can change the priority of the list grouping\n >>> rasterplot([st_list1, st_list2], key_list=['parity', ''],\n >>> groupingdepth=2, labelkey='0+1')\n >>>\n >>> # groups can also be emphasized by an explicit color code\n >>> rasterplot([st_list1, st_list2], key_list=['', 'parity'],\n >>> groupingdepth=1, labelkey=0, colorkey='parity',\n >>> legend=True)\n >>>\n >>> plt.show()\n\n \"\"\"\n\n # Initialize plotting canvas\n sns.set_style(style)\n\n if context is not None:\n sns.set_context(context)\n\n if palette is not None:\n sns.set_palette(palette)\n else:\n palette = sns.color_palette()\n\n if ax is None:\n fig, ax = plt.subplots()\n # axis must be created after sns.set() command for style to apply!\n\n margin = 1 - histscale\n left, bottom, width, height = ax.get_position().bounds\n ax.set_position([ left, bottom,\n margin * width, margin * height])\n axhistx = plt.axes([left, bottom + margin * height,\n margin * width, histscale * height])\n axhisty = plt.axes([left + margin * width, bottom,\n histscale * width, margin * height])\n\n sns.despine(ax=axhistx)\n sns.despine(ax=axhisty)\n\n # Whitespace margin around dot display = 2%\n ws_margin = 0.02\n\n # Control of user entries\n if groupingdepth > 2:\n raise ValueError(\"Grouping is limited to two layers.\")\n groupingdepth = int(groupingdepth)\n\n list_key = \"%$\\@[#*&/!\" # unique key to be added to annotations to store\n # list ordering information.\n\n if type(key_list) == 'str':\n key_list = [key_list]\n\n if '' not in key_list:\n key_list = [list_key] + key_list\n else:\n key_list = [list_key if not key else key for key in key_list]\n\n if type(spacing) == list:\n if len(spacing) == 1:\n spacing = [spacing[0], spacing[0]/2.]\n else:\n spacing = [spacing, spacing/2.]\n if spacing[0] < spacing[1]:\n raise DeprecationWarning(\"For reasonable visual aid, spacing between\" \\\n + \" top level group (spacing[0]) must be larger\" \\\n + \" than for subgroups (spacing[1]).\")\n\n if type(colorkey) == int and len(key_list):\n if colorkey >= len(key_list):\n raise IndexError(\"An integer colorkey must refer to a position in\" \\\n + \" key_list.\")\n colorkey = key_list[colorkey]\n else:\n if not colorkey:\n colorkey = list_key\n elif colorkey not in key_list:\n raise AttributeError(\"colorkey must be in key_list.\")\n\n if legend and not key_list:\n raise AttributeError(\"Legend requires a non empty key_list.\")\n\n if labelkey == '':\n labelkey = list_key\n\n if type(separatorargs) == list:\n if len(separatorargs) == 1:\n separatorargs += separatorargs\n for args in separatorargs:\n if type(args) != dict:\n raise TypeError(\"The parameters must be given as dict.\")\n else:\n separatorargs = [separatorargs, separatorargs]\n\n for i, args in enumerate(separatorargs):\n if 'c' in args:\n separatorargs[i]['color'] = args['c']\n elif 'color' not in args:\n separatorargs[i]['color'] = '0.8'\n\n markerargs['linestyle'] = ''\n\n # Flatten list of lists while keeping the grouping info in annotations\n if isinstance(spiketrain_list[0], list):\n for list_nbr, st_list in enumerate(spiketrain_list):\n for st in st_list:\n st.annotations[list_key] = \"set {}\".format(list_nbr)\n spiketrain_list = [st for sublist in spiketrain_list for st in sublist]\n else:\n for st in spiketrain_list:\n st.annotations[list_key] = \"set {}\".format(0)\n key_list.remove(list_key)\n key_list.append(list_key)\n\n # Input checks on flattened lists\n if len(key_list) < groupingdepth:\n raise ValueError(\"Can't group more as keys in key_list.\")\n\n # Filter spike trains according to given filter function\n if filter_function is not None:\n filter_index = []\n for st_count, spiketrain in enumerate(spiketrain_list):\n if filter_function(spiketrain):\n filter_index += [st_count]\n spiketrain_list = [spiketrain_list[i] for i in filter_index]\n\n # Initialize plotting parameters\n t_lims = [(st.t_start, st.t_stop) for st in spiketrain_list]\n tmin = min(t_lims, key=lambda f: f[0])[0]\n tmax = max(t_lims, key=lambda f: f[1])[1]\n period = tmax - tmin\n ax.set_xlim(tmin - ws_margin*period, tmax + ws_margin*period)\n yticks = np.zeros(len(spiketrain_list))\n\n # Sort spike trains according to keylist\n def sort_func(x):\n return ['' if key not in x.annotations\n else x.annotations[key] for key in key_list]\n\n spiketrain_list = sorted(spiketrain_list, key=lambda x: sort_func(x))\n if len(key_list) > 1:\n attribute_array = _get_attributes(spiketrain_list, key_list)\n elif len(key_list) == 1:\n attribute_array = np.zeros((len(spiketrain_list), 2))\n attribute_array[:,0] = _get_attributes(spiketrain_list, key_list)[:,0]\n else:\n attribute_array = np.zeros((len(spiketrain_list), 1))\n\n # Define colormap\n if not len(key_list):\n nbr_of_colors = 1\n colorkey = None\n else:\n colorkey = np.where(colorkey == np.array(key_list))[0][0]\n nbr_of_colors = int(max(attribute_array[:, colorkey]) + 1)\n\n colormap = sns.color_palette(palette, nbr_of_colors)\n\n # Draw population histogram (upper side)\n colorkeyvalues = np.unique(attribute_array[:, colorkey])\n\n if pophist_mode == 'color' and len(colorkeyvalues)-1:\n if len(sns.color_palette()) < len(colorkeyvalues):\n print(\"\\033[31mWarning: There are more subsets than can be \" +\n \"separated by colors in the color palette which might lead \" +\n \"to confusion!\\033[0m\")\n max_y = 0\n for value in colorkeyvalues:\n idx = np.where(attribute_array[:, colorkey] == value)[0]\n histout = axhistx.hist(np.concatenate([spiketrain_list[i] for i in idx]),\n pophistbins, histtype='step', linewidth=1,\n color=colormap[int(value)])\n max_y = np.max([max_y, np.max(histout[0])])\n\n else: # pophist_mode == 'total':\n if len(colorkeyvalues)-1:\n sum_color = separatorargs[0]['color']\n else:\n sum_color = sns.color_palette()[0]\n histout = axhistx.hist(np.concatenate(spiketrain_list),\n pophistbins, histtype='step', linewidth=1,\n color=sum_color)\n max_y = np.max(histout[0])\n\n # Set ticks and labels for population histogram\n axhistx_ydim, up = _round_to_1(max_y)\n if max_y > axhistx.get_ylim()[-1]:\n axhistx.set_ylim(0, max_y)\n if up and axhistx_ydim > max_y:\n axhistx.set_ylim(0, axhistx_ydim)\n axhistx.set_yticks([axhistx_ydim])\n axhistx.set_yticklabels(['{:.0f}'.format(axhistx_ydim)])\n axhistx.set_ylabel('count')\n\n # Legend for colorkey\n if legend:\n __, index = np.unique(attribute_array[:, colorkey], return_index=True)\n legend_labels = [spiketrain_list[i].annotations[key_list[colorkey]]\n for i in index]\n legend_handles = [0] * len(index)\n\n # Reshape list into sublists according to groupingdepth\n if groupingdepth > 0:\n value1, index1, counts1 = np.unique(attribute_array[:, 0],\n return_index=True,\n return_counts=True)\n for v1, i1, c1 in zip(value1, index1, counts1):\n v1 = int(v1)\n spiketrain_list[v1:v1 + c1] = [spiketrain_list[v1:v1 + c1]]\n if groupingdepth > 1:\n __, counts2 = np.unique(attribute_array[i1:i1 + c1, 1],\n return_counts=True)\n for v2, c2 in enumerate(counts2):\n v2 = int(v2)\n spiketrain_list[v1][v2:v2+c2] = [spiketrain_list[v1][v2:v2+c2]]\n else:\n spiketrain_list[v1] = [spiketrain_list[v1]]\n else:\n spiketrain_list = [[spiketrain_list]]\n\n # HIERARCHIE:\n # [ [ []..[] ] .... [ []..[] ] ] spiketrain_list\n # [ []..[] ] LIST\n # [] list\n # spike train\n\n # Loop through lists of lists of spike trains\n for COUNT, SLIST in enumerate(spiketrain_list):\n\n # Separator depth 1\n if COUNT and separatorargs is not None:\n linepos = ypos + len(spiketrain_list[COUNT-1][-1]) \\\n + spacing[0]/2. - 0.5\n ax.plot(ax.get_xlim(), [linepos] * 2, **separatorargs[0])\n\n # Loop through lists of spike trains\n for count, slist in enumerate(SLIST):\n nbr_of_drawn_sts = int(sum([len(sl) for SL in\n spiketrain_list[:COUNT] for sl in SL])\\\n + sum([len(sl) for sl in SLIST[:count]]))\n\n # Calculate postition of next spike train to draw\n prev_spaces = np.sum([len(SLIST_it) - 1\n for SLIST_it in spiketrain_list[:COUNT]])\n ypos = nbr_of_drawn_sts \\\n + int(bool(groupingdepth)) * COUNT * spacing[0] \\\n + groupingdepth/2 * count * spacing[1] \\\n + groupingdepth/2 * prev_spaces * spacing[1]\n\n # Separator depth 2\n if count and separatorargs is not None:\n linepos = ypos - (spacing[1] + 1) / 2.\n ax.plot(ax.get_xlim(), [linepos] * 2, **separatorargs[1])\n\n # Loop through spike trains\n for st_count, st in enumerate(slist):\n current_st = nbr_of_drawn_sts + st_count\n annotation_value = int(attribute_array[current_st, colorkey])\n color = colormap[annotation_value]\n\n # Dot display\n handle = ax.plot(st.times.magnitude,\n [st_count + ypos] * st.__len__(),\n color=color, **markerargs)\n if legend:\n legend_handles[annotation_value] = handle[0]\n\n # Right side histogram bar\n barvalue = right_histogram(st)\n barwidth = righthist_barwidth\n axhisty.barh(y=st_count + ypos, # - barwidth/2.,\n width=barvalue,\n height=barwidth,\n color=color,\n edgecolor=color)\n\n # Append positions of spike trains to tick list\n ycoords = np.arange(len(slist)) + ypos\n yticks[nbr_of_drawn_sts:nbr_of_drawn_sts+len(slist)] = ycoords\n\n # Plotting axis\n yrange = yticks[-1] - yticks[0]\n ax.set_ylim(yticks[0] - ws_margin*yrange,\n yticks[-1] + ws_margin*yrange)\n axhistx.set_xlim(ax.get_xlim())\n axhisty.set_ylim(ax.get_ylim())\n ax.set_xlabel('t [{}]'.format(spiketrain_list[0][0][0].units.dimensionality))\n axhistx.get_xaxis().set_visible(False)\n axhisty.get_yaxis().set_visible(False)\n\n # Set ticks and labels for right side histogram\n axhisty_xdim, up = _round_to_1(axhisty.get_xlim()[-1])\n if up:\n axhistx.set_ylim(0, axhistx_ydim)\n axhisty.set_xticks([axhisty_xdim])\n axhisty.set_xticklabels(['{}'.format(axhisty_xdim)])\n\n # Y labeling\n if key_list and labelkey in key_list + [0, 1, '0+1']:\n if labelkey == key_list[0]:\n if groupingdepth > 0:\n labelkey = 0\n elif len(key_list) > 1 and labelkey == key_list[1]:\n if groupingdepth > 1:\n labelkey = 1\n\n if type(labelkey) == int or labelkey == '0+1':\n labelpos = [[] for label_level in range(2)]\n labelname = [[] for label_level in range(2)]\n\n # Labeling depth 1 + 2\n if groupingdepth:\n values1, index1, counts1 = np.unique(attribute_array[:, 0],\n return_index=True,\n return_counts=True)\n\n for v1, i1, c1 in zip(values1, index1, counts1):\n st = spiketrain_list[int(v1)][0][0]\n if key_list[0] in st.annotations:\n labelname[0] += [st.annotations[key_list[0]]]\n if labelkey == '0+1':\n labelname[0][-1] += ' ' * 5\n else:\n labelname[0] += ['']\n\n labelpos[0] += [(yticks[i1] + yticks[i1+c1-1])/2.]\n\n # Labeling depth 2\n if groupingdepth / 2 and labelkey and len(key_list) - 1:\n __, index2, counts2 = np.unique(attribute_array[i1:i1+c1, 1],\n return_index=True,\n return_counts=True)\n\n for v2, (i2, c2) in enumerate(zip(index2, counts2)):\n st = spiketrain_list[int(v1)][int(v2)][0]\n if key_list[1] in st.annotations:\n labelname[1] += [st.annotations[key_list[1]]]\n else:\n labelname[1] += ['']\n\n labelpos[1] += [(yticks[i1+i2] + yticks[i1+i2+c2-1])/2.]\n\n # Set labels according to labelkey\n if type(labelkey) == int:\n ax.set_yticks(labelpos[1] if labelkey else labelpos[0])\n ax.set_yticklabels(labelname[1] if labelkey else labelname[0])\n\n elif labelkey == \"0+1\":\n ax.set_yticks(labelpos[0] + labelpos[1])\n ax.set_yticklabels(labelname[0] + labelname[1])\n\n else:\n # Annotatation key as labelkey\n labelname = []\n for COUNT, SLIST in enumerate(spiketrain_list):\n for count, slist in enumerate(SLIST):\n for st_count, st in enumerate(slist):\n if labelkey in st.annotations:\n labelname += [st.annotations[labelkey]]\n else:\n labelname += ['']\n ax.set_yticks(yticks)\n ax.set_yticklabels(labelname)\n\n else:\n ax.set_yticks([])\n ax.set_yticklabels([''])\n\n # Draw legend\n if legend:\n ax.legend(legend_handles, legend_labels, **legendargs)\n\n # Remove list_key from annotations\n for SLIST in spiketrain_list:\n for slist in SLIST:\n for st in slist:\n st.annotations.pop(list_key, None)\n\n return ax, axhistx, axhisty\n" ]
[ [ "numpy.unique", "matplotlib.pyplot.subplots", "matplotlib.pyplot.axes", "numpy.max", "numpy.concatenate", "numpy.append", "numpy.array", "numpy.where" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
gwallison/open-FF
[ "d0c1a9ace91abfb6f5393c85ee511366ca4b1fbe" ]
[ "update_density_PHASE_4.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Mar 5 14:33:38 2021\n\n@author: Gary\n\nThis is used to fetch basic information from the IngredientComment field.\n\n\"\"\"\n\nimport pandas as pd\nimport numpy as np\nimport core.Read_FF as rff\n\n#### ----------- File handles -------------- ####\n\n####### uncomment below for local runs\noutdir = './out/'\nsources = './sources/'\ntempfolder = './tmp/'\nmake_files = True\n### uncomment below for running on CodeOcean\n#outdir = '../results/'\n#sources = '../data/'\n#tempfolder = '../'\n\n\n####### zip input files\nzfilename = 'currentData'\nstfilename = 'sky_truth_final'\n\nreader = rff.Read_FF()\n\nt = reader.import_raw_as_str(varsToKeep=['UploadKey','IngredientComment',\n 'PercentHFJob','CASNumber'])\ndf = t[t.IngredientComment.str.lower().str.contains('density')][['UploadKey','IngredientComment',\n 'PercentHFJob','CASNumber']].copy()\ndf['dens'] = df.IngredientComment.str.extract(r\"(\\d*\\.\\d+|\\d+)\").astype('float')\n\ndf.PercentHFJob = np.where(df.PercentHFJob=='',np.nan,df.PercentHFJob)" ]
[ [ "numpy.where" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
fplk/gpt-neox
[ "9992042ab113428022e5e91421c04917577b8e00" ]
[ "megatron/global_vars.py" ]
[ "# coding=utf-8\n# Copyright (c) 2021, EleutherAI contributors\n# This file is based on code by the authors denoted below and has been modified from its original version.\n#\n# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Megatron global variables.\"\"\"\n\nimport os\nimport sys\nimport time\n\nimport torch\nimport wandb\n\nfrom megatron.tokenizer import build_tokenizer\n\n_GLOBAL_ARGS = None\n_GLOBAL_TOKENIZER = None\n_GLOBAL_TENSORBOARD_WRITER = None\n_GLOBAL_ADLR_AUTORESUME = None\n_GLOBAL_TIMERS = None\n_GLOBAL_USE_WANDB = False\n\ndef get_args():\n \"\"\"Return arguments.\"\"\"\n _ensure_var_is_initialized(_GLOBAL_ARGS, 'args')\n return _GLOBAL_ARGS\n\n\ndef get_tokenizer():\n \"\"\"Return tokenizer.\"\"\"\n _ensure_var_is_initialized(_GLOBAL_TOKENIZER, 'tokenizer')\n return _GLOBAL_TOKENIZER\n\n\ndef get_tensorboard_writer():\n \"\"\"Return tensorboard writer. It can be None so no need\n to check if it is initialized.\"\"\"\n return _GLOBAL_TENSORBOARD_WRITER\n\n\ndef get_adlr_autoresume():\n \"\"\"ADLR autoresume object. It can be None so no need\n to check if it is initialized.\"\"\"\n return _GLOBAL_ADLR_AUTORESUME\n\n\ndef get_timers():\n \"\"\"Return timers.\"\"\"\n _ensure_var_is_initialized(_GLOBAL_TIMERS, 'timers')\n return _GLOBAL_TIMERS\n\n\ndef set_global_variables():\n \"\"\"Set args, tokenizer, tensorboard-writer, adlr-autoresume, and timers.\"\"\"\n from megatron.neox_arguments import NeoXArgs\n args = NeoXArgs.consume_megatron_args()\n\n global _GLOBAL_ARGS\n _ensure_var_is_not_initialized(_GLOBAL_ARGS, 'args')\n _GLOBAL_ARGS = args\n\n _ = _build_tokenizer(args)\n _set_tensorboard_writer(args)\n _set_adlr_autoresume(args)\n _set_timers()\n\n\ndef _build_tokenizer(args):\n \"\"\"Initialize tokenizer.\"\"\"\n global _GLOBAL_TOKENIZER\n _ensure_var_is_not_initialized(_GLOBAL_TOKENIZER, 'tokenizer')\n _GLOBAL_TOKENIZER = build_tokenizer(args)\n return _GLOBAL_TOKENIZER\n\n\ndef rebuild_tokenizer(args):\n global _GLOBAL_TOKENIZER\n _GLOBAL_TOKENIZER = None\n return _build_tokenizer(args)\n\n\ndef _set_tensorboard_writer(args):\n \"\"\"Set tensorboard writer.\"\"\"\n global _GLOBAL_TENSORBOARD_WRITER\n _ensure_var_is_not_initialized(_GLOBAL_TENSORBOARD_WRITER,\n 'tensorboard writer')\n\n if hasattr(args, 'tensorboard_dir') and \\\n args.tensorboard_dir and args.rank == 0:\n try:\n from torch.utils.tensorboard import SummaryWriter\n print('> setting tensorboard ...')\n _GLOBAL_TENSORBOARD_WRITER = SummaryWriter(\n log_dir=args.tensorboard_dir)\n except ModuleNotFoundError:\n print('WARNING: TensorBoard writing requested but is not '\n 'available (are you using PyTorch 1.1.0 or later?), '\n 'no TensorBoard logs will be written.', flush=True)\n\n\ndef _set_adlr_autoresume(args):\n \"\"\"Initialize ADLR autoresume.\"\"\"\n global _GLOBAL_ADLR_AUTORESUME\n _ensure_var_is_not_initialized(_GLOBAL_ADLR_AUTORESUME, 'adlr autoresume')\n\n if args.adlr_autoresume:\n if args.rank == 0:\n print('enabling autoresume ...', flush=True)\n sys.path.append(os.environ.get('SUBMIT_SCRIPTS', '.'))\n try:\n from userlib.auto_resume import AutoResume\n except BaseException:\n print('ADLR autoresume is not available, exiting ...')\n sys.exit()\n\n _GLOBAL_ADLR_AUTORESUME = AutoResume\n\n\ndef _set_timers():\n \"\"\"Initialize timers.\"\"\"\n global _GLOBAL_TIMERS\n _ensure_var_is_not_initialized(_GLOBAL_TIMERS, 'timers')\n _GLOBAL_TIMERS = Timers()\n\n\ndef _ensure_var_is_initialized(var, name):\n \"\"\"Make sure the input variable is not None.\"\"\"\n assert var is not None, '{} is not initialized.'.format(name)\n\n\ndef _ensure_var_is_not_initialized(var, name):\n \"\"\"Make sure the input variable is not None.\"\"\"\n assert var is None, '{} is already initialized.'.format(name)\n\n\nclass _Timer:\n \"\"\"Timer.\"\"\"\n\n def __init__(self, name):\n self.name_ = name\n self.elapsed_ = 0.0\n self.started_ = False\n self.start_time = time.time()\n\n def start(self):\n \"\"\"Start the timer.\"\"\"\n assert not self.started_, 'timer has already been started'\n torch.cuda.synchronize()\n self.start_time = time.time()\n self.started_ = True\n\n def stop(self):\n \"\"\"Stop the timer.\"\"\"\n assert self.started_, 'timer is not started'\n torch.cuda.synchronize()\n self.elapsed_ += (time.time() - self.start_time)\n self.started_ = False\n\n def reset(self):\n \"\"\"Reset timer.\"\"\"\n self.elapsed_ = 0.0\n self.started_ = False\n\n def elapsed(self, reset=True):\n \"\"\"Calculate the elapsed time.\"\"\"\n started_ = self.started_\n # If the timing in progress, end it first.\n if self.started_:\n self.stop()\n # Get the elapsed time.\n elapsed_ = self.elapsed_\n # Reset the elapsed time\n if reset:\n self.reset()\n # If timing was in progress, set it back.\n if started_:\n self.start()\n return elapsed_\n\n\nclass Timers:\n \"\"\"Group of timers.\"\"\"\n\n def __init__(self):\n self.timers = {}\n\n def __call__(self, name):\n if name not in self.timers:\n self.timers[name] = _Timer(name)\n return self.timers[name]\n\n def write(self, names, iteration, normalizer=1.0, reset=False):\n \"\"\"Write timers to a tensorboard writer\"\"\"\n # currently when using add_scalars,\n # torch.utils.add_scalars makes each timer its own run, which\n # polutes the runs list, so we just add each as a scalar\n assert normalizer > 0.0\n for name in names:\n value = self.timers[name].elapsed(reset=reset) / normalizer\n\n writer = get_tensorboard_writer()\n if writer:\n writer.add_scalar(f\"timers/{name}\", value, iteration)\n\n if get_use_wandb():\n wandb.log({f\"timers/{name}\": value}, step=iteration)\n\n def log(self, names, normalizer=1.0, reset=True):\n \"\"\"Log a group of timers.\"\"\"\n assert normalizer > 0.0\n string = 'time (ms)'\n for name in names:\n elapsed_time = self.timers[name].elapsed(\n reset=reset) * 1000.0 / normalizer\n string += ' | {}: {:.2f}'.format(name, elapsed_time)\n if torch.distributed.is_initialized():\n if torch.distributed.get_rank() == 0:\n print(string, flush=True)\n else:\n print(string, flush=True)\n\ndef get_use_wandb():\n global _GLOBAL_USE_WANDB\n return _GLOBAL_USE_WANDB\n\ndef set_use_wandb(b: bool):\n global _GLOBAL_USE_WANDB\n _GLOBAL_USE_WANDB = b" ]
[ [ "torch.distributed.get_rank", "torch.cuda.synchronize", "torch.distributed.is_initialized", "torch.utils.tensorboard.SummaryWriter" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
darianyang/traj-plot
[ "849c1e6b5a6f6f64fb66e62149f98318b07fe292" ]
[ "traj_plot/data_plot_2D.py" ]
[ "\"\"\"\nPlot 2D timeseries data heatmaps such as secondary structure and per residue RMSD.\n\"\"\"\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib as mpl\n\nimport matplotlib.cm as cm\nfrom matplotlib.colors import Normalize\nfrom matplotlib.colors import ListedColormap\n\nfrom matplotlib.ticker import (MultipleLocator, AutoMinorLocator)\n\nimport matplotlib.patches\nfrom numpy.lib import genfromtxt \n\n\n#plt.rcParams['figure.figsize']= (12,6)\nplt.rcParams.update({'font.size': 14})\nplt.rcParams[\"font.family\"]=\"Sans-serif\"\nplt.rcParams['font.sans-serif'] = 'Arial'\nplt.rcParams['mathtext.default'] = 'regular'\nplt.rcParams['axes.linewidth'] = 2.25\nplt.rcParams['xtick.major.size'] = 6\nplt.rcParams['xtick.major.width'] = 2.5\nplt.rcParams['xtick.minor.size'] = 2\nplt.rcParams['xtick.minor.width'] = 2\nplt.rcParams['ytick.major.size'] = 6\nplt.rcParams['ytick.major.width'] = 2.5\n\nclass Per_Res_Plot:\n\n def __init__(self, file, timescale=10**6, data_interval=1, ax=None):\n \"\"\"\n Parameters\n ----------\n file : str or list\n Path to the cpptraj data file.\n timescale : int\n Convert from frame to timescale. Default 1ps per frame to us scale.\n data_interval : int\n Optionally process data in larger intervals, default 1.\n \"\"\"\n self.file = file\n self.timescale = timescale\n self.data_interval = data_interval\n\n # TODO: there has to be a best practise for this\n if ax is None:\n self.fig, self.ax = plt.subplots(figsize=(12,5))\n else:\n self.fig = plt.gca()\n self.ax = ax\n\n def add_patch(self, ax, recx, recy, facecolor, text, recwidth=0.04, recheight=0.05, recspace=0):\n ax = self.ax\n ax.add_patch(matplotlib.patches.Rectangle((recx, recy), \n recwidth, recheight, \n facecolor=facecolor,\n edgecolor='black',\n clip_on=False,\n transform=ax.transAxes,\n lw=2.25)\n )\n ax.text(recx + recheight + recspace, recy + recheight / 2.8, text, ha='left', va='center',\n transform=ax.transAxes, fontsize=12)\n\n def process_per_res_data(self):\n \"\"\"\n Process the cpptraj data that is in per-residue format, e.g. for DSSP and RMSD.\n Head of file is each residue, e.g. \"1:MET\", first column of file is the frame.\n\n Returns\n -------\n x : ndarray\n 1D array of the timepoints.\n y : ndarray\n 1D array of the residue string and numbers.\n z : ndarray\n 2D array of the x by y data values.\n \"\"\"\n if type(self.file) == list:\n header = np.genfromtxt(self.file[0], dtype=str, max_rows=1, comments=None)\n # build array from total frame counts and residue number\n frames = [len(np.genfromtxt(f, usecols=0)) for f in self.file]\n self.x = np.divide(np.arange(0, sum(frames), 1), self.timescale)\n data = np.zeros(shape=(sum(frames), len(header)))\n\n # fill out data array for each dataset in file list\n frame_index = 0\n for num, val in enumerate(frames):\n data[frame_index:val + frame_index, :] = np.genfromtxt(self.file[num])\n frame_index += val\n\n else:\n header = np.genfromtxt(self.file, dtype=str, max_rows=1, comments=None)\n data = np.genfromtxt(self.file)[::self.data_interval,:]\n self.x = np.divide(np.genfromtxt(self.file, usecols=0), self.timescale)\n\n # rotate so frame is x and res is y\n data = np.transpose(data)\n\n # timeseries frame data\n #self.x = np.genfromtxt(f, usecols=0)\n #self.x = np.divide(data[0,:], self.timescale)\n\n # split y into seperate res_name and res_num\n y = np.char.split(header[1:], sep=\":\")\n self.y_name = [i[0] for i in y]\n self.y_num = [int(i[1]) for i in y]\n\n # z is the data array without the frame column\n self.z = data[1:,:]\n\n def ss_cmap(self):\n \"\"\"\n Custom cmap for the DSSP data.\n \"\"\"\n # cmap = cm.tab10\n # norm = Normalize(vmin=0, vmax=10) \n # self.ss_colors = ListedColormap([\"white\"] + [cmap(norm(c)) for c in range(0, 7)])\n\n # first color should be white for None\n self.ss_colors = ListedColormap([\"white\", \"tab:brown\", \"tab:orange\", \"tab:red\",\n \"tab:blue\", \"tab:purple\", \"tab:green\", \"tab:olive\"])\n self.ss_labels = {\"None\":0, \"Parallel β Sheet\":1, \"Antiparallel β Sheet\":2, \n \"$3_{10}$ Helix\":3, \"α Helix\":4, \"π Helix\":5, \"Turn\":6, \"Bend\":7}\n\n def ss_master(self, legend=False, labels=(False, False)):\n \"\"\"\n Main public method for plotting DSSP data.\n\n Parameters\n ----------\n legend : bool\n Optionally plot the patch and text SSP legend.\n labels : tuple of bool\n Tuple of 2 boolean values for x and y labels respectively.\n \"\"\"\n ax = self.ax\n \n self.ss_cmap()\n self.process_per_res_data()\n ax.pcolormesh(self.x, self.y_num, self.z, cmap=self.ss_colors, shading=\"auto\")\n if type(self.file) == list:\n ax.xaxis.grid(color=\"k\", linewidth=2.5)\n ax.yaxis.grid(color=\"k\", linewidth=1)\n else:\n ax.grid(color=\"k\", linewidth=1)\n\n if labels[0]:\n ax.set_xlabel(\"Time(µs)\", fontweight=\"bold\", labelpad=10) \n if labels[1]:\n ax.set_ylabel(\"Residue\", fontweight=\"bold\", labelpad=12)\n\n if legend:\n norm = Normalize(vmin=0, vmax=7)\n ax.text(1.05, 0.85, \"Secondary Structure\", ha='left', va='center',\n transform=ax.transAxes, fontweight=\"bold\")\n for ss, val in self.ss_labels.items():\n self.add_patch(ax, 1.05, 0.75 - 0.08 * val, self.ss_colors(norm(val)), ss)\n # attempt to add thick vlines\n #ax.vlines(np.linspace(0, 5, len(self.x)), ymin=0, ymax=len(self.y_num), linewidths=0.5, color=\"k\")\n\n def ss_legend(self):\n \"\"\"\n Add a DSSP legend to its own axes object (located on the right).\n \"\"\"\n ax = self.ax\n self.ss_cmap()\n norm = Normalize(vmin=0, vmax=7)\n ax.text(1.05, 0.85, \"Secondary Structure\", ha='left', va='center',\n transform=ax.transAxes, fontweight=\"bold\")\n for ss, val in self.ss_labels.items():\n self.add_patch(ax, 1.05, 0.75 - 0.09 * val, self.ss_colors(norm(val)), ss)\n\n def rmsd_master(self, legend=False, labels=(False, False)):\n \"\"\"\n Main public method for plotting per residue RMSD data.\n\n Parameters\n ----------\n legend : bool\n Optionally plot the cbar.\n labels : tuple of bool\n Tuple of 2 boolean values for x and y labels respectively.\n \"\"\"\n ax = self.ax\n \n self.process_per_res_data()\n self.plot = ax.pcolormesh(self.x, self.y_num, self.z, cmap=\"afmhot_r\", \n shading=\"auto\", vmin=0, vmax=5)\n if type(self.file) == list:\n ax.xaxis.grid(color=\"k\", linewidth=2.5)\n ax.yaxis.grid(color=\"k\", linewidth=1)\n else:\n ax.grid(color=\"k\", linewidth=1)\n\n if labels[0]:\n ax.set_xlabel(\"Time(µs)\", fontweight=\"bold\", labelpad=10) \n if labels[1]:\n ax.set_ylabel(\"Residue\", fontweight=\"bold\", labelpad=12)\n\n if legend:\n cbar = plt.colorbar(self.plot)\n cbar.set_label(\"RMSD ($\\AA$)\", weight=\"bold\", labelpad=16)\n\n # TODO: make this sep legend ax object plotting method\n def rmsd_legend(self, vmax=5): \n \"\"\"\n Add rmsd cbar to its own axes object.\n \"\"\"\n ax = self.ax # TODO: need to make this dynamic for ac inputs \n \n cax, cbar_kwds = mpl.colorbar.make_axes(ax, location=\"right\",\n fraction=0.65, shrink=1.2, aspect=10, anchor=(0, 1.6))\n #cax = self.fig.add_axes([0.95, 0.1, 0.025, 0.4])\n\n cmap = cm.afmhot_r\n norm = Normalize(vmin=0, vmax=vmax)\n \n cbar = mpl.colorbar.ColorbarBase(cax, cmap=cmap, \n norm=norm,\n orientation=\"vertical\")\n #cbar.add_lines(range(0,vmax + 1), \"k\", linewidths=1)\n cbar.set_label(\"RMSD ($\\AA$)\", fontweight=\"bold\", labelpad=16)\n\n\ndef single_plot_test():\n ss_data = \"ipq/ired_test/NO_ION/ss_10ns.dat\"\n rms_data = \"ipq/ired_test/NO_ION/rmsd_per_res_bb_10ns.dat\"\n\n # fig, ax = plt.subplots(ncols=2, figsize=(12, 4),\n # gridspec_kw={'width_ratios' : [20, 3]})\n fig, ax = plt.subplots()\n # x says us but using ns for test\n # TODO: make subplot for legs\n #Per_Res_Plot([ss_data, ss_data], timescale=1000, ax=ax).ss_master(labels=(True, True), legend=True)\n Per_Res_Plot(ss_data, timescale=1000, ax=ax).ss_master(labels=(True, True), legend=True)\n #Per_Res_Plot([rms_data, rms_data], timescale=1000, ax=ax).rmsd_master(legend=False)\n\n #ax.set_xticks(np.arange(0,12,2))\n\n plt.tight_layout()\n plt.show()\n\ndef multi_cypa_plot(type):\n \"\"\"\n Parameters\n ----------\n type : str\n Can be 'dssp' or 'rmsd'.\n \"\"\"\n systems = [\"wt\", \"w4f\", \"w5f\", \"w6f\", \"w7f\"]\n if type == \"dssp\":\n ratios = [20, 1]\n size = (9.79, 12.5)\n elif type == \"rmsd\":\n ratios = [15, 5]\n size = (9.25, 12.5)\n fig, ax = plt.subplots(nrows=len(systems), ncols=2, sharex=\"col\", figsize=size,\n gridspec_kw={'width_ratios' : ratios})\n for num, sys in enumerate(systems):\n # plot legend and x label on the bottom axis\n if num == len(systems) - 1:\n if type == \"dssp\":\n Per_Res_Plot(f\"ipq/{sys}/v00/1us_noion/ss.dat\", timescale=10**4, ax=ax[num, 0]).ss_legend()\n elif type == \"rmsd\":\n Per_Res_Plot(f\"ipq/{sys}/v00/1us_noion/rmsd_3K0N_1-165_BB_perres.dat\", \n timescale=10**4, ax=ax[num, 1]).rmsd_legend()\n labels = (True, True)\n else:\n labels = (False, True)\n \n if type == \"dssp\":\n Per_Res_Plot([f\"ipq/{sys}/v0{i}/1us_noion/ss.dat\" for i in range(0, 5)], \n timescale=10**4, ax=ax[num, 0]).ss_master(labels=labels)\n # Per_Res_Plot([f\"ipq/{sys}/v00/1us_noion/ss.dat\"], timescale=10**4, \n # ax=ax[num, 0]).ss_master(labels=labels)\n elif type == \"rmsd\":\n Per_Res_Plot([f\"ipq/{sys}/v0{i}/1us_noion/rmsd_3K0N_1-165_BB_perres.dat\" for i in range(0,5)], \n timescale=10**4, ax=ax[num, 0]).rmsd_master(labels=labels)\n # Per_Res_Plot(f\"ipq/{sys}/v00/1us_noion/rmsd_3K0N_1-165_BB_perres.dat\", \n # timescale=10**4, ax=ax[num, 0]).rmsd_master(labels=labels)\n\n # set title and turn off the second column axes \n ax[num, 0].set_title(sys.upper(), fontweight=\"bold\", fontsize=10)\n #ax[num, 0].set_yticks(np.arange(0,180,20))\n #ax[num, 0].set_yticks(np.arange(140,180,20))\n ax[num, 0].set_ylim(60,120) # TODO: ylim=None arg\n ax[num, 1].axis(\"off\")\n\n # better fitting xticks: TODO: make this dynamic?\n ax[num, 0].set_xticks(np.arange(0,6,1))\n #ax[num, 0].set_xticks(np.arange(0,1.2,0.2))\n \n\n fig.tight_layout()\n fig.subplots_adjust(hspace=0.15)\n #plt.show()\n fig.savefig(f\"figures/per_res_{type}_140-160.png\", dpi=300, transparent=True)\n\nif __name__ == \"__main__\":\n #single_plot_test()\n multi_cypa_plot(\"dssp\")\n multi_cypa_plot(\"rmsd\")" ]
[ [ "matplotlib.pyplot.gca", "matplotlib.pyplot.tight_layout", "numpy.arange", "numpy.char.split", "matplotlib.pyplot.subplots", "matplotlib.colors.Normalize", "numpy.genfromtxt", "matplotlib.colorbar.ColorbarBase", "matplotlib.pyplot.colorbar", "matplotlib.colors.ListedColormap", "numpy.transpose", "matplotlib.colorbar.make_axes", "matplotlib.pyplot.rcParams.update", "matplotlib.pyplot.show" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
kmckiern/spin
[ "2e2badcff2cac65c63c2752c1be3ca89673ce58d" ]
[ "spin/operators.py" ]
[ "from __future__ import division\nimport numpy as np\nfrom scipy.ndimage import filters\n\n\ndef adj_kernel(configuration):\n \"\"\" Creates adjecency kernel for arbitrary dimensional array \"\"\"\n # ensure each dimension is gt 2\n for dim_length in configuration.shape:\n assert dim_length > 2\n\n # create kernel of correct shape\n kernel = np.ones(configuration.shape)\n kernel = kernel[tuple(slice(0, 3) for i in kernel.shape)]\n\n # zero non adjacent / center\n if kernel.ndim > 1:\n non_adj = kernel[tuple(slice(None, None, j - 1) for j in kernel.shape)]\n non_adj *= 0\n center = kernel[tuple(slice(j - 2, j - 1, j) for j in kernel.shape)]\n center *= 0\n return kernel\n\n\ndef measure_energy(J, configuration):\n \"\"\" Evaluate hamiltonian via normalized convolution with kernel \"\"\"\n kernel = adj_kernel(configuration)\n c = filters.convolve(configuration, kernel, mode=\"wrap\")\n energy = -1.0 * J * np.sum(c * configuration) / np.sum(kernel)\n return energy / configuration.size\n\n\ndef measure_magnetization(configuration):\n \"\"\" Given by normalized sum over all spin values \"\"\"\n for d in range(configuration.ndim):\n if d == 0:\n mag = configuration.sum(-1)\n else:\n mag = mag.sum(-1)\n mag = np.abs(mag)\n\n return mag / configuration.size\n\n\ndef measure_heat_capacity(energy, temperature, n_spin=1):\n return (np.mean(energy ** 2) - np.mean(energy) ** 2) / (temperature ** 2 * n_spin)\n\n\ndef measure_magnetic_susceptibility(magnetization, temperature, n_spin=1):\n return (np.mean(magnetization ** 2) - np.mean(magnetization) ** 2) / (temperature * n_spin)\n" ]
[ [ "numpy.abs", "numpy.ones", "numpy.mean", "scipy.ndimage.filters.convolve", "numpy.sum" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "0.13", "1.6", "0.14", "0.15", "1.4", "0.16", "1.0", "0.19", "1.5", "0.18", "1.2", "1.7", "0.12", "0.10", "0.17", "1.3" ], "tensorflow": [] } ]
bittersweetpuff/in-search-of-the-unknown-roguelike
[ "fd1bfd488f0d4e42e0b8246d0d76086788eb14f0" ]
[ "src/tile_types.py" ]
[ "from typing import Tuple\n\nimport numpy as np # type: ignore\n\n# Tile graphics structured type compatible with Console.tiles_rgb.\ngraphic_dt = np.dtype(\n [\n (\"ch\", np.int32), # Unicode codepoint.\n (\"fg\", \"3B\"), # 3 unsigned bytes, for RGB colors.\n (\"bg\", \"3B\"),\n ]\n)\n\n# Tile struct used for statically defined tile data.\ntile_dt = np.dtype(\n [\n (\"walkable\", np.bool), # True if this tile can be walked over.\n (\"transparent\", np.bool), # True if this tile doesn't block FOV.\n (\"dark\", graphic_dt), # Graphics for when this tile is not in FOV.\n (\"light\", graphic_dt), # Graphics for when the tile is in FOV.\n ]\n)\n\n\ndef new_tile(\n *, # Enforce the use of keywords, so that parameter order doesn't matter.\n walkable: int,\n transparent: int,\n dark: Tuple[int, Tuple[int, int, int], Tuple[int, int, int]],\n light: Tuple[int, Tuple[int, int, int], Tuple[int, int, int]],\n) -> np.ndarray:\n \"\"\"Helper function for defining individual tile types \"\"\"\n return np.array((walkable, transparent, dark, light), dtype=tile_dt)\n\n\n# SHROUD represents unexplored, unseen tiles\nSHROUD = np.array((ord(\" \"), (255, 255, 255), (0, 0, 0)), dtype=graphic_dt)\n\nfloor = new_tile(\n walkable=True,\n transparent=True,\n dark=(ord(\" \"), (255, 255, 255), (50, 50, 150)),\n light=(ord(\" \"), (255, 255, 255), (200, 180, 50)),\n)\ndown_stairs = new_tile(\n walkable=True,\n transparent=True,\n dark=(ord(\">\"), (0, 0, 100), (50, 50, 150)),\n light=(ord(\">\"), (255, 255, 255), (200, 180, 50)),\n)\nwall = new_tile(\n walkable=False,\n transparent=False,\n dark=(ord(\" \"), (255, 255, 255), (0, 0, 100)),\n light=(ord(\" \"), (255, 255, 255), (130, 110, 50)),\n)\n" ]
[ [ "numpy.array", "numpy.dtype" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Hosseinberg/Cirq
[ "8b64834ba601e8b48394753c24800e16b36a59b1" ]
[ "cirq-core/cirq/sim/clifford/act_on_stabilizer_ch_form_args.py" ]
[ "# Copyright 2020 The Cirq Developers\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom typing import Any, Dict, TYPE_CHECKING, List, Optional, Sequence, Union\n\nimport numpy as np\n\nfrom cirq import _compat, value, ops, protocols\nfrom cirq.ops import common_gates, pauli_gates\nfrom cirq.ops import global_phase_op\nfrom cirq.sim.clifford import clifford_simulator\nfrom cirq.sim.clifford.act_on_stabilizer_args import ActOnStabilizerArgs\n\nif TYPE_CHECKING:\n import cirq\n\n\nclass ActOnStabilizerCHFormArgs(ActOnStabilizerArgs):\n \"\"\"Wrapper around a stabilizer state in CH form for the act_on protocol.\"\"\"\n\n @_compat.deprecated_parameter(\n deadline='v0.15',\n fix='Specify all the arguments with keywords, use initial_state instead of state.',\n parameter_desc='positional arguments',\n match=lambda args, kwargs: len(args) != 1 or 'state' in kwargs,\n )\n def __init__(\n self,\n state: Optional['cirq.StabilizerStateChForm'] = None,\n prng: Optional[np.random.RandomState] = None,\n log_of_measurement_results: Optional[Dict[str, Any]] = None,\n qubits: Optional[Sequence['cirq.Qid']] = None,\n initial_state: Union[int, 'cirq.StabilizerStateChForm'] = 0,\n ):\n \"\"\"Initializes with the given state and the axes for the operation.\n\n Args:\n state: The StabilizerStateChForm to act on. Operations are expected\n to perform inplace edits of this object.\n qubits: Determines the canonical ordering of the qubits. This\n is often used in specifying the initial state, i.e. the\n ordering of the computational basis states.\n prng: The pseudo random number generator to use for probabilistic\n effects.\n log_of_measurement_results: A mutable object that measurements are\n being recorded into.\n initial_state: The initial state for the simulation. This can be a\n full CH form passed by reference which will be modified inplace,\n or a big-endian int in the computational basis.\n \"\"\"\n super().__init__(prng, qubits, log_of_measurement_results)\n initial_state = state or initial_state\n if isinstance(initial_state, int):\n qubit_map = {q: i for i, q in enumerate(self.qubits)}\n initial_state = clifford_simulator.CliffordState(\n qubit_map, initial_state=initial_state\n ).ch_form\n self.state = initial_state\n\n def _perform_measurement(self, qubits: Sequence['cirq.Qid']) -> List[int]:\n \"\"\"Returns the measurement from the stabilizer state form.\"\"\"\n return [self.state._measure(self.qubit_map[q], self.prng) for q in qubits]\n\n def _on_copy(self, target: 'ActOnStabilizerCHFormArgs', deep_copy_buffers: bool = True):\n target.state = self.state.copy()\n\n def _on_kronecker_product(\n self, other: 'cirq.ActOnStabilizerCHFormArgs', target: 'cirq.ActOnStabilizerCHFormArgs'\n ):\n target.state = self.state.kron(other.state)\n\n def _on_transpose_to_qubit_order(\n self, qubits: Sequence['cirq.Qid'], target: 'cirq.ActOnStabilizerCHFormArgs'\n ):\n axes = [self.qubit_map[q] for q in qubits]\n target.state = self.state.reindex(axes)\n\n def sample(\n self,\n qubits: Sequence['cirq.Qid'],\n repetitions: int = 1,\n seed: 'cirq.RANDOM_STATE_OR_SEED_LIKE' = None,\n ) -> np.ndarray:\n measurements: Dict[str, List[np.ndarray]] = {}\n prng = value.parse_random_state(seed)\n for i in range(repetitions):\n op = ops.measure(*qubits, key=str(i))\n state = self.state.copy()\n ch_form_args = ActOnStabilizerCHFormArgs(\n prng=prng,\n log_of_measurement_results=measurements,\n qubits=self.qubits,\n initial_state=state,\n )\n protocols.act_on(op, ch_form_args)\n return np.array(list(measurements.values()), dtype=bool)\n\n def _x(self, g: common_gates.XPowGate, axis: int):\n exponent = g.exponent\n if exponent % 2 != 0:\n if exponent % 0.5 != 0.0:\n raise ValueError('Y exponent must be half integer') # coverage: ignore\n self._h(common_gates.H, axis)\n self._z(common_gates.ZPowGate(exponent=exponent), axis)\n self._h(common_gates.H, axis)\n self.state.omega *= _phase(g)\n\n def _y(self, g: common_gates.YPowGate, axis: int):\n exponent = g.exponent\n if exponent % 0.5 != 0.0:\n raise ValueError('Y exponent must be half integer') # coverage: ignore\n if exponent % 2 == 0:\n self.state.omega *= _phase(g)\n elif exponent % 2 == 0.5:\n self._z(pauli_gates.Z, axis)\n self._h(common_gates.H, axis)\n self.state.omega *= _phase(g) * (1 + 1j) / (2 ** 0.5)\n elif exponent % 2 == 1:\n self._z(pauli_gates.Z, axis)\n self._h(common_gates.H, axis)\n self._z(pauli_gates.Z, axis)\n self._h(common_gates.H, axis)\n self.state.omega *= _phase(g) * 1j\n elif exponent % 2 == 1.5:\n self._h(common_gates.H, axis)\n self._z(pauli_gates.Z, axis)\n self.state.omega *= _phase(g) * (1 - 1j) / (2 ** 0.5)\n\n def _z(self, g: common_gates.ZPowGate, axis: int):\n exponent = g.exponent\n state = self.state\n if exponent % 2 != 0:\n if exponent % 0.5 != 0.0:\n raise ValueError('Z exponent must be half integer') # coverage: ignore\n effective_exponent = exponent % 2\n for _ in range(int(effective_exponent * 2)):\n # Prescription for S left multiplication.\n # Reference: https://arxiv.org/abs/1808.00128 Proposition 4 end\n state.M[axis, :] ^= state.G[axis, :]\n state.gamma[axis] = (state.gamma[axis] - 1) % 4\n state.omega *= _phase(g)\n\n def _h(self, g: common_gates.HPowGate, axis: int):\n exponent = g.exponent\n state = self.state\n if exponent % 2 != 0:\n if exponent % 1 != 0:\n raise ValueError('H exponent must be integer') # coverage: ignore\n # Prescription for H left multiplication\n # Reference: https://arxiv.org/abs/1808.00128\n # Equations 48, 49 and Proposition 4\n t = state.s ^ (state.G[axis, :] & state.v)\n u = state.s ^ (state.F[axis, :] & (~state.v)) ^ (state.M[axis, :] & state.v)\n alpha = sum(state.G[axis, :] & (~state.v) & state.s) % 2\n beta = sum(state.M[axis, :] & (~state.v) & state.s)\n beta += sum(state.F[axis, :] & state.v & state.M[axis, :])\n beta += sum(state.F[axis, :] & state.v & state.s)\n beta %= 2\n delta = (state.gamma[axis] + 2 * (alpha + beta)) % 4\n state.update_sum(t, u, delta=delta, alpha=alpha)\n state.omega *= _phase(g)\n\n def _cz(self, g: common_gates.CZPowGate, control_axis: int, target_axis: int):\n exponent = g.exponent\n state = self.state\n if exponent % 2 != 0:\n if exponent % 1 != 0:\n raise ValueError('CZ exponent must be integer') # coverage: ignore\n # Prescription for CZ left multiplication.\n # Reference: https://arxiv.org/abs/1808.00128 Proposition 4 end\n state.M[control_axis, :] ^= state.G[target_axis, :]\n state.M[target_axis, :] ^= state.G[control_axis, :]\n state.omega *= _phase(g)\n\n def _cx(self, g: common_gates.CXPowGate, control_axis: int, target_axis: int):\n exponent = g.exponent\n state = self.state\n if exponent % 2 != 0:\n if exponent % 1 != 0:\n raise ValueError('CX exponent must be integer') # coverage: ignore\n # Prescription for CX left multiplication.\n # Reference: https://arxiv.org/abs/1808.00128 Proposition 4 end\n state.gamma[control_axis] = (\n state.gamma[control_axis]\n + state.gamma[target_axis]\n + 2 * (sum(state.M[control_axis, :] & state.F[target_axis, :]) % 2)\n ) % 4\n state.G[target_axis, :] ^= state.G[control_axis, :]\n state.F[control_axis, :] ^= state.F[target_axis, :]\n state.M[control_axis, :] ^= state.M[target_axis, :]\n state.omega *= _phase(g)\n\n def _global_phase(self, g: global_phase_op.GlobalPhaseGate):\n self.state.omega *= g.coefficient\n\n\ndef _phase(gate):\n return np.exp(1j * np.pi * gate.global_shift * gate.exponent)\n" ]
[ [ "numpy.exp" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Ddottsai/Code-Storage
[ "fe8753e3d93dfa69822ae06b64cc7d3b259a4434" ]
[ "Quick Terminal-Interface Database/resource_database.py" ]
[ "class resource_database():\n import pandas as pd\n import ujson as json\n from io import StringIO\n from multiprocessing import Pool\n from functools import partial\n import ast\n import os\n import re\n import glob\n import textwrap\n from contextlib import suppress\n from pandas.errors import EmptyDataError\n from selenium.common.exceptions import WebDriverException\n import gnureadline\n from prompt_toolkit import PromptSession\n\n global tag_aliases,db,families,cat_files,wrapper,suppress,directory,id_to_cat,ps\n global pd,json,StringIO,Pool,partial,ast,os,re,textwrap,WebDriverException,glob,EmptyDataError,suppress\n #global open_cat,close_cat,close_all_cats,add_cat,add_cat_attributes\n #global get_tag_aliases,add_alias,find,add_family,add_ref,save,end,show\n ps = PromptSession()\n wrapper = textwrap.TextWrapper(initial_indent=\" \")\n directory = os.path.dirname(os.path.realpath(__file__)) + '/'\n with open(directory+'ID_to_cat.txt') as file:\n id_to_cat = ast.literal_eval(file.read())\n #print(var)\n with open(directory+'tag_aliases.csv', 'r') as file:\n tag_aliases = [set(line[:-1].split(',')) for line in file.readlines()]\n with open(directory+'families.txt', 'r') as file:\n families = json.loads(file.read())\n #for key,lst in families.items():\n # families[key] = set(lst)\n cat_files = {}\n import os\n for file_name in os.listdir(directory+\"categories\"):\n if not file_name.startswith('.'):\n cat_name = file_name[:-4]\n cat_files[cat_name] = None\n\n @classmethod\n def get_ID_to_cat(self,ID):\n global id_to_cat\n if id_to_cat is None:\n with open(directory+\"ID_to_cat.txt\",\"r\") as file:\n id_to_cat = ast.literal_eval(file.read())\n try:\n return id_to_cat[str(ID)]\n except KeyError:\n print(\"No ref with specified ID was found!\")\n return []\n\n @classmethod\n def add_ref_to_id_to_cat(self,ID,cats):\n global id_to_cat\n if id_to_cat is None:\n with open(directory+\"ID_to_cat.txt\",\"r\") as file:\n id_to_cat = ast.literal_eval(file.read())\n id_to_cat[str(ID)] = cats\n\n def is_a_cat(cat):\n return cat in cat_files\n\n def get_input(query):\n while True:\n user_input = ps.prompt(query).lower()\n lst_input = re.split(\"[, ]+\",user_input)\n if lst_input[0] == \"show\":\n print()\n attr = lst_input[1] if len(lst_input) > 1 else re.split(\"[, ]+\",ps.prompt(\"Attribute to show: \"))[0]\n if attr == \"tag\":\n cats = \"\"\n while True:\n cats = ps.prompt(\"Categories to search for tags (type 'all' to include all tags): \")\n if cats == \"show\":\n resource_database.show([\"cats\"])\n else:\n break\n resource_database.show([\"tags\",re.split(\"[, ]+\", cats)])\n elif attr == \"alias\":\n resource_database.show([\"aliases\"])\n elif attr == \"cat\":\n resource_database.show([\"cats\"])\n elif attr == \"fam\":\n resource_database.show([\"families\"])\n else:\n print(\"Field '\"+attr+\"' does not exist.\")\n \"\"\"\n if lst_input[1] == \"key\":\n query = [\"keys\",re.split(\"[, ]+\",input(\n \"Categories to search for keys (type 'all' to include all keys): \"))]\n resource_database.show(query)\n \"\"\"\n\n print()\n else:\n return user_input.lower()\n\n @classmethod\n def SetParser(self,data):\n return ast.literal_eval(data)\n\n @classmethod\n def load_tags(self):\n with open(directory+'tag_aliases.csv', 'r') as file:\n tag_aliases = [set(line.split(',')) for line in file.readlines()]\n\n @classmethod\n def load_families(self):\n with open(directory+'families.txt', 'r') as file:\n families = json.loads(file.read())\n\n @classmethod\n def open_cat(self,cat_name):\n if cat_name in cat_files and cat_files[cat_name] is not None:\n return True\n try:\n converters = {s: (lambda data : None if data==\"\" else ast.literal_eval(data)) for s in\n ['keys','tags']}\n cat_files[cat_name] = pd.read_csv(directory + \"categories/\"+cat_name+\".csv\",\n converters=converters,index_col=0)\n return True\n except (FileNotFoundError,EmptyDataError):\n temp = self.get_input(\"Category does not exist. Create a new category? \")\n if temp.lower() == \"yes\":\n open(directory+\"categories/\"+cat_name+\".csv\",\"w+\").close()\n cat_files[cat_name] = pd.DataFrame()#columns=[\"tags\",\"keys\",\"summary\",\n #\"family\",\"ref type\",\"date\",\"ref\"])\n return True\n else:\n print(\"Okay, category not created.\")\n return False\n\n @classmethod\n def close_cat(self,cat_name):\n cat_files[cat_name].to_csv(directory+\"categories/\"+cat_name+\".csv\")\n cat_files[cat_name] = None\n\n @classmethod\n def close_all_cats(self):\n for cat_name in cat_files.keys():\n close_cat(cat_name)\n\n @classmethod\n def add_cat(self,cat_name,cat_attr=None):\n if cat_name in cat_files:\n return False\n f = open(cat_name +\".txt\",\"w+\")\n f.write(\"{}\")\n cat_files[cat_name] = None\n\n @classmethod\n def edit_cat_attributes(self,cat_name,cat_attr):\n self.open_cat(cat_name)\n if isinstance(cat_attr, list):\n cat_files[cat_name].extend(cat_attr)\n else:\n cat_files[cat_name].append(cat_attr)\n\n @classmethod\n def get_tag_aliases(self,tag):\n tag = tag.lower()\n for equiv in tag_aliases:\n if tag in equiv:\n return equiv\n\n @classmethod\n def add_alias(self,lst):\n final ={i.lower() for i in lst}\n for equiv in tag_aliases:\n for l in lst:\n if l in equiv:\n final.update(equiv)\n tag_aliases.remove(equiv)\n break\n tag_aliases.append(final)\n\n @classmethod\n def query(self,cats=None,tags=None,families=None,ref_types=None):\n if cats == None:\n cats = cat_files.keys()\n if tags != None:\n tags = set(tags)\n if ref_types != None:\n ref_types = set(ref_types)\n hit_ID = []\n hits = []\n hit_cat_names = []\n for cat_name in cats:\n if cat_name not in cat_files:\n print(\"\\nWarning: \"+cat_name+\" is not the name of a category.\")\n continue\n if cat_files[cat_name] is None:\n self.open_cat(cat_name)\n for ID,ref_info in cat_files[cat_name].iterrows():\n if ID not in hit_ID:\n if tags == None or len(tags.intersection(ref_info['tags'])) > 0:\n if families == None or ref_info['family'] in families:\n if ref_types == None or ref_info['ref type'] in ref_types:\n hit_ID.append(int(ID))\n hit_cat_names.append(cat_name)\n hits.append(ref_info)\n return hits,hit_ID\n\n @classmethod\n def add_family(self,family_name,cats=[]):\n #families[family_name] = set(cats)\n families[family_name] = list(cats)\n\n @classmethod\n def add_ref(self,ref,cats=[],tags=None,keys=None,summary=None,family=None,ref_type=None):\n if ref in [\"download\",\"downloads\"]:\n old_path = max(glob.iglob(os.path.expanduser('~/Downloads/*')), key=lambda a:os.stat(a).st_birthtime)\n new_path = os.path.expanduser(\"~/resources/downloads/\")+ os.path.basename(old_path)\n os.rename(old_path,new_path)\n ref = new_path\n if ref_type == None:\n if len(ref) > 3 and (ref[0:4] == \"http\" or ref[0:4] == \"www.\"):\n ref_type = \"url\"\n elif \" \" not in ref and \"/\" in ref:\n ref_type = \"file\"\n else:\n ref_type = \"note\"\n if ref_type == \"url\":\n if ref[0:4] != \"www.\" and ref[0:4] != \"http\":\n ref = \"www.\" + ref\n import datetime\n t = datetime.date.today().strftime(\"%B %d, %Y\")\n if family != None:\n if family not in families:\n families[family] = list(cats)\n else:\n for c in cats:\n if c not in families[family]:\n families[family].append(c)\n series = pd.Series({\"tags\":tags,\"keys\":keys,\"summary\":summary,\"family\":family,\n \"ref type\":ref_type,\"date\":t,\"ref\":ref})\n with open(directory+\"max_ID.txt\",\"r+\") as file:\n #a = \"wow\"\n curr_max_ID = int(file.read().replace('\\x00',''))\n curr_max_ID += 1\n file.truncate(0)\n file.write(str(curr_max_ID))\n series.name = str(curr_max_ID)\n\n #with open(\"resources/ref_ID\",\"a\") as file:\n # file.write(\"\\n\"+ID + \":\" + cats)\n\n for cat_name in cats:\n self.open_cat(cat_name)\n cat_files[cat_name] = cat_files[cat_name].append(series)\n #cat_files[cat_name] = pd.DataFrame(series).transpose()#pd.DataFrame(series,columns=[\"tags\",\"keys\",\"summary\",\n # \"family\",\"type\",\"date\",\"ref\"])\n self.close_cat(cat_name)\n\n self.add_ref_to_id_to_cat(curr_max_ID,cats)\n\n @classmethod\n def save(self):\n with open(directory+'tag_aliases.csv', 'w') as file:\n for i in tag_aliases:\n file.write(\",\".join(i) + \"\\n\")\n with open(directory+'families.txt','w') as file:\n #file.truncate()\n file.write(json.dumps(families))\n for cat_name,df in cat_files.items():\n if df is not None:\n df.to_csv(directory+\"categories/\" + cat_name+\".csv\")\n if id_to_cat is not None:\n with open(directory+'ID_to_cat.txt','w') as file:\n #file.truncate()\n file.write(json.dumps(id_to_cat))\n \"\"\"\n with open('resources/resources.txt', 'w') as file:\n file.truncate()\n file.write(\"{\")\n for key,df in db.items():\n file.write(\"\\\"\"+key+ \"\\\":\" + df.to_csv(sep=\"`\"))\n file.write(\"}\")\n \"\"\"\n\n @classmethod\n def end(self):\n self.save()\n exit()\n\n @classmethod\n def show(self,query):\n #query = [q.lower() for q in query]\n if query[0] in [\"cats\",\"cat\",\"categories\",\"category\"]:\n print(self.get_contents(list(cat_files.keys())))\n elif query[0] == \"alias\" or query[0] == \"aliases\":\n for t in tag_aliases:\n print(t)\n elif query[0] == \"tags\":\n if query[1] == [\"all\"]:\n query[1] = cat_files.keys()\n tags = set()\n failed_cats = []\n for cat in query[1]:\n self.open_cat(cat)\n try:\n tags.update({t for ref_tags in cat_files[cat].loc[:,\"tags\"] for t in ref_tags})\n except KeyError:\n failed_cats.append(cat)\n self.close_cat(cat)\n print(\"\\n\" + self.get_contents(tags))\n if len(failed_cats) > 0:\n print(\"\\n Note that the following were not valid categories, and thus were skipped:\")\n print(wrapper.fill(self.get_contents(failed_cats)))\n elif query[0] == \"family\" or query[0] == \"families\":\n print(self.get_contents(families))\n\n @classmethod\n def get(self,num_hits=\"all\",features=None,cats=None,tags=None,families=None,ref_types=None):\n ordered_cols = [\"date\",\"family\",\"keys\",\"ref type\",\"summary\",\"tags\",\"ref\"]\n display_columns = []\n if features is None:\n features = [\"keys\",\"tags\",\"family\",\"summary\",\"ref\"]\n for i in ordered_cols:\n if features == \"all\" or i in features:\n display_columns.append(i)\n hits,hit_IDs = self.query(cats,tags,families,ref_types)\n #df = pd.concat(hits, axis=1, keys=[hit.name for hit in hits])\n #df[\"cat\"] = hit_cat_names\n if len(hits) == 0:\n return pd.DataFrame(),[]\n df = pd.DataFrame.from_records(hits)\n if len(df.index) > 0:\n if len(df.index) == 1:\n df = df.loc[:,display_columns].iloc[:len(display_columns)]\n else:\n df = df.loc[:,display_columns].iloc[:,:len(display_columns)]\n #df = df.reindex(columns=ordered_cols)\n pd.set_option('display.width', 200)\n pd.set_option('display.max_columns',100)\n pd.set_option('display.max_colwidth',60)\n if num_hits == \"all\" or num_hits > len(df.index):\n return df,hit_IDs\n return df.head(num_hits),hit_IDs[:num_hits]\n else:\n return pd.DataFrame(),[]\n\n @classmethod\n def scroll(self,page_size=10,features=None,cats=None,tags=None,families=None,ref_types=None):\n all_hits,all_hits_IDs = self.get(num_hits=\"all\",features=\"all\",cats=cats,tags=tags,families=families,ref_types=ref_types)\n if len(all_hits.index) == 0:\n print(\"\\n\\nNo matching refs.\")\n return\n row_num = 0\n driver = None\n if features is None:\n features = [\"keys\",\"tags\",\"family\",\"ref type\"]\n pd.set_option('display.width', 175)\n pd.set_option('display.max_columns',100)\n pd.set_option('display.max_colwidth',1000)\n else:\n if features == \"all\":\n features = all_hits.columns\n pd.set_option('display.width', 175)\n pd.set_option('display.max_columns',100)\n pd.set_option('display.max_colwidth',int(175/(len(features))))\n if page_size == \"all\":\n page_size = 1000000\n stop = False\n while not stop:\n print(\"\\n\")\n try:\n print(all_hits.ix[row_num:row_num+page_size-1,features])\n except IndexError:\n print(all_hits.ix[row_num:,features])\n print(\"\\n\"+\"-\"*70+\"\\n\")\n last_enter = False\n reset_last_enter = False\n while True:\n if reset_last_enter:\n last_enter = False\n reset_last_enter = last_enter\n user_input = self.get_input(\"\\n\\nScroll | User Input: \").lower()\n if user_input in [\"option\",\"options\",\"help\"]:\n print(\"\\nnext, back, repeat, break, open, zoom, edit, delete\")\n elif user_input in [\"scroll\",\"next\",\"n\"]:\n break\n elif user_input in [\"stop\",\"break\",\"exit\",\"end\",\"done\"]:\n return\n elif user_input == \"\":\n if last_enter == True:\n stop = True\n break\n else:\n last_enter = True\n elif user_input == \"repeat\":\n row_num -= page_size\n break\n elif user_input in [\"back\",\"b\"]:\n row_num -= page_size*2\n break\n else:\n user_input_lst = re.split(\"[, ]+\",user_input)\n if len(user_input_lst) < 2:\n temp = re.split(\"[, ]+\",self.get_input(\n \"Please specify (by index) which refs you'd like to \"\n + user_input_lst[0] + \": \"))\n if isinstance(temp,list):\n user_input_lst.extend(temp)\n else:\n user_input_lst.append(temp)\n try:\n try:\n selected_ref_nums = [all_hits_IDs[int(i)] for i in user_input_lst[1:]]\n selected_refs = [all_hits.loc[int(i),:] for i in user_input_lst[1:]]\n except ValueError:\n print(\"All selected refs must be integers.\")\n #print(\"\\n\"+\"-\"*70)\n continue\n for i,selected_ref in enumerate(selected_refs):\n if user_input_lst[0] == \"open\":\n driver = self.open_ref(selected_ref.loc[\"ref\"],selected_ref.loc[\"ref type\"],driver)\n elif user_input_lst[0] in [\"refresh\"]:\n cat_names = self.get_ID_to_cat(selected_ref_nums[i])\n self.refresh_cats(cat_names)\n elif user_input_lst[0] in [\"delete\",\"remove\",\"rm\",\"del\"]:\n print(selected_ref)\n if selected_ref[\"ref type\"] not in [\"note\",\"url\"]:\n choice = \"\"\n while choice not in [\"remove\",\"delete\",\"cancel\"]:\n choice = self.get_input(\n \"\\n\\n\\033[1mRemove\\033[0m from refs, or \\033[1mdelete\\033[0m file altogether ('cancel' to exit)? \")\n else:\n choice = \"remove\"\n if choice != \"cancel\":\n confirmation = \"\"\n if choice == \"remove\":\n while confirmation not in [\"yes\",\"no\"]:\n confirmation = self.get_input(\"\\n\\nAre you sure you want to \\033[1mremove\\033[0m this ref (shown above)? \")\n if confirmation == \"yes\":\n all_hits.drop(selected_ref.name,inplace=True)\n cat_names = self.get_ID_to_cat(selected_ref_nums[i])\n for cat_name in cat_names:\n if cat_files[cat_name] is None:\n self.open_cat(cat_name)\n cat_files[cat_name].drop(selected_ref_nums[i],inplace=True)\n self.close_cat(cat_name)\n del id_to_cat[str(selected_ref_nums[i])]\n print(\"Okay, ref removed.\")\n elif choice == \"delete\":\n while confirmation not in [\"yes\",\"no\"]:\n confirmation = self.get_input(\"Are you sure you want to \\033[1mdelete\\033[0m this ref (shown above)? \")\n if confirmation == \"yes\":\n all_hits.drop(selected_ref.name,inplace=True)\n cat_names = self.get_ID_to_cat(selected_ref_nums[i])\n for cat_name in cat_names:\n if cat_files[cat_name] is None:\n self.open_cat(cat_name)\n cat_files[cat_name].drop(selected_ref_nums[i],inplace=True)\n self.close_cat(cat_name)\n del id_to_cat[str(selected_ref_nums[i])]\n try:\n os.remove(selected_ref[\"ref\"])\n print(\"Okay, file removed and deleted.\")\n except Exception as e:\n print(\"File was removed, but there was an error (\"+str(e)+\", so file was not deleted.\")\n elif user_input_lst[0] == \"zoom\":\n print(\"\\n\")\n print(\"Hit #:\",user_input_lst[i+1])\n print(\"Ref Type:\", selected_ref.loc[\"ref type\"])\n print(\"Keys:\",self.get_contents(selected_ref.loc[\"keys\"]))\n print(\"Tags:\",self.get_contents(selected_ref.loc[\"tags\"]))\n print(\"Family:\",selected_ref.loc[\"family\"])\n if selected_ref.loc[\"summary\"] != None:\n print(\"Summary:\")\n end_str = (\"...\" if len(selected_ref.loc[\"summary\"]) > 300 else \"\")\n print(wrapper.fill(selected_ref.loc[\"summary\"][:1000] + end_str))\n else:\n print(\"Ref Path:\",selected_ref.loc[\"ref\"])\n if selected_ref.loc[\"ref type\"] == \"note\":\n print(\"ref:\")\n end_str = (\"...\" if len(selected_ref.loc[\"ref\"]) > 300 else \"\")\n print(wrapper.fill(selected_ref.loc[\"ref\"][:1000] + end_str))\n else:\n end_str = (\"...\" if len(selected_ref.loc[\"ref\"]) > 300 else \"\")\n print(\"ref: \" + selected_ref.loc[\"ref\"][:300] + end_str)\n elif user_input_lst[0] == \"edit\":\n print(selected_ref)\n field = self.get_input(\"\\n\\nField to change ('done' to end): \")\n while field != \"done\":\n try:\n ref_part = selected_ref[\"ref\"][:50] if len(selected_ref[\"ref\"]) > 50 else selected_ref[\"ref\"]\n except KeyError:\n print(\"Not a valid field.\")\n continue\n temp_file = \"Field View - \" + ref_part.replace(\":\",\"\").replace(\"~\",\"\").replace(\"/\",\"\") + \".txt\"\n try:\n with open(temp_file,\"w+\") as file:\n file.write(field + \"\\n\" + str(selected_ref.loc[field]))\n os.system('open \"' + temp_file + '\"')\n print(\"\\n\\nEdit, save, and close file.\")\n self.get_input(\"\\n\\nPress enter when done editing field.\")\n with open(temp_file,\"r\") as file:\n next(file)\n updated_field = \"\"\n for line in file:\n updated_field += line\n if updated_field == \"None\":\n updated_field = None\n with suppress(Exception):\n updated_field = ast.literal_eval(s)\n all_hits.loc[selected_ref.name,field] = updated_field\n cat_names = self.get_ID_to_cat(selected_ref_nums[i])\n for cat_name in cat_names:\n self.open_cat(cat_name)\n cat_files[cat_name].loc[selected_ref_nums[i],field] = updated_field\n self.close_cat(cat_name)\n except Exception as e:\n print(e)\n field = self.get_input(\"\\n\\nField to change ('done' to end): \")\n else:\n print(\"\\nSorry,\",user_input[0],\" is not a valid command.\")\n continue\n print(\"\\n\\n\"+\"-\"*70)\n except Exception as e:\n print(type(e))\n print(e)\n continue\n row_num += page_size\n if row_num >= len(all_hits):\n choice = self.get_input(\"\\n\\nAll items have been scrolled through. Press enter to exit, or\"+\n \" anything else to continue.\")\n if choice != \"\":\n row_num -= page_size\n stop = False\n else:\n stop = True\n print(\"\\nExited scroll.\")\n\n @classmethod\n def open_ref(self,ref,ref_type,driver):\n if ref_type == \"url\":\n if driver is None:\n from selenium import webdriver\n driver = webdriver.Chrome('chromedriver')\n else:\n driver.execute_script(\"window.open('');\")\n driver.switch_to.window(driver.window_handles[-1])\n for prefix in [\"\",\"http://\",\"https://\"]:\n with suppress(WebDriverException):\n driver.get(prefix+ref)\n break\n return driver\n elif ref_type == \"note\":\n print()\n print(\"-\"*30)\n print()\n print(ref)\n else:\n \"\"\"\n elif ref_type == \"text\":\n program =\n elif ref_type == \"spreadsheet\":\n program =\n elif ref_type == \"image\":\n program =\n elif ref_type == \"pages\":\n program =\n elif ref_type == \"video\":\n program =\n else:\n program =\n subprocess.Popen(program,ref)\n \"\"\"\n os.system('open \"' + ref + '\"')\n\n @classmethod\n def refresh_cats(self,cat_names):\n if cat_names == \"all\":\n cat_names = cat_files.keys()\n for i in cat_names:\n if i in cat_files and cat_files[i] is not None:\n d = None\n self.open_cat(i)\n\n @classmethod\n def get_contents(self,arr):\n result = \"\"\n for i in arr:\n result += i + \", \"\n if len(result) == 0:\n return \"\"\n return result[:-2]\n" ]
[ [ "pandas.read_csv", "pandas.Series", "pandas.DataFrame", "pandas.DataFrame.from_records", "pandas.set_option" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
chengstone/insightface
[ "33d9a34d5f5e7adfc0e28843072c86699499bd8f" ]
[ "src/eval/verification.py" ]
[ "\"\"\"Helper for evaluation on the Labeled Faces in the Wild dataset \n\"\"\"\n\n# MIT License\n# \n# Copyright (c) 2016 David Sandberg\n# \n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n# \n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n# \n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport argparse\nimport sys\nimport numpy as np\nfrom scipy import misc\nfrom sklearn.model_selection import KFold\nfrom scipy import interpolate\nimport sklearn\nimport cv2\nimport math\nimport datetime\nimport pickle\nfrom sklearn.decomposition import PCA\nimport mxnet as mx\nfrom mxnet import ndarray as nd\nsys.path.append(os.path.join(os.path.dirname(__file__), '..', 'common'))\nimport face_image\n\n\nclass LFold:\n def __init__(self, n_splits = 2, shuffle = False):\n self.n_splits = n_splits\n if self.n_splits>1:\n self.k_fold = KFold(n_splits = n_splits, shuffle = shuffle)\n\n def split(self, indices):\n if self.n_splits>1:\n return self.k_fold.split(indices)\n else:\n return [(indices, indices)]\n\n\ndef calculate_roc(thresholds, embeddings1, embeddings2, actual_issame, nrof_folds=10, pca = 0):\n assert(embeddings1.shape[0] == embeddings2.shape[0])\n assert(embeddings1.shape[1] == embeddings2.shape[1])\n nrof_pairs = min(len(actual_issame), embeddings1.shape[0])\n nrof_thresholds = len(thresholds)\n k_fold = LFold(n_splits=nrof_folds, shuffle=False)\n \n tprs = np.zeros((nrof_folds,nrof_thresholds))\n fprs = np.zeros((nrof_folds,nrof_thresholds))\n accuracy = np.zeros((nrof_folds))\n indices = np.arange(nrof_pairs)\n #print('pca', pca)\n \n if pca==0:\n diff = np.subtract(embeddings1, embeddings2)\n dist = np.sum(np.square(diff),1)\n \n for fold_idx, (train_set, test_set) in enumerate(k_fold.split(indices)):\n #print('train_set', train_set)\n #print('test_set', test_set)\n if pca>0:\n print('doing pca on', fold_idx)\n embed1_train = embeddings1[train_set]\n embed2_train = embeddings2[train_set]\n _embed_train = np.concatenate( (embed1_train, embed2_train), axis=0 )\n #print(_embed_train.shape)\n pca_model = PCA(n_components=pca)\n pca_model.fit(_embed_train)\n embed1 = pca_model.transform(embeddings1)\n embed2 = pca_model.transform(embeddings2)\n embed1 = sklearn.preprocessing.normalize(embed1)\n embed2 = sklearn.preprocessing.normalize(embed2)\n #print(embed1.shape, embed2.shape)\n diff = np.subtract(embed1, embed2)\n dist = np.sum(np.square(diff),1)\n \n # Find the best threshold for the fold\n acc_train = np.zeros((nrof_thresholds))\n for threshold_idx, threshold in enumerate(thresholds):\n _, _, acc_train[threshold_idx] = calculate_accuracy(threshold, dist[train_set], actual_issame[train_set])\n best_threshold_index = np.argmax(acc_train)\n for threshold_idx, threshold in enumerate(thresholds):\n tprs[fold_idx,threshold_idx], fprs[fold_idx,threshold_idx], _ = calculate_accuracy(threshold, dist[test_set], actual_issame[test_set])\n _, _, accuracy[fold_idx] = calculate_accuracy(thresholds[best_threshold_index], dist[test_set], actual_issame[test_set])\n \n tpr = np.mean(tprs,0)\n fpr = np.mean(fprs,0)\n return tpr, fpr, accuracy\n\ndef calculate_accuracy(threshold, dist, actual_issame):\n predict_issame = np.less(dist, threshold)\n tp = np.sum(np.logical_and(predict_issame, actual_issame))\n fp = np.sum(np.logical_and(predict_issame, np.logical_not(actual_issame)))\n tn = np.sum(np.logical_and(np.logical_not(predict_issame), np.logical_not(actual_issame)))\n fn = np.sum(np.logical_and(np.logical_not(predict_issame), actual_issame))\n \n tpr = 0 if (tp+fn==0) else float(tp) / float(tp+fn)\n fpr = 0 if (fp+tn==0) else float(fp) / float(fp+tn)\n acc = float(tp+tn)/dist.size\n return tpr, fpr, acc\n\n\n \ndef calculate_val(thresholds, embeddings1, embeddings2, actual_issame, far_target, nrof_folds=10):\n assert(embeddings1.shape[0] == embeddings2.shape[0])\n assert(embeddings1.shape[1] == embeddings2.shape[1])\n nrof_pairs = min(len(actual_issame), embeddings1.shape[0])\n nrof_thresholds = len(thresholds)\n k_fold = LFold(n_splits=nrof_folds, shuffle=False)\n \n val = np.zeros(nrof_folds)\n far = np.zeros(nrof_folds)\n \n diff = np.subtract(embeddings1, embeddings2)\n dist = np.sum(np.square(diff),1)\n indices = np.arange(nrof_pairs)\n \n for fold_idx, (train_set, test_set) in enumerate(k_fold.split(indices)):\n \n # Find the threshold that gives FAR = far_target\n far_train = np.zeros(nrof_thresholds)\n for threshold_idx, threshold in enumerate(thresholds):\n _, far_train[threshold_idx] = calculate_val_far(threshold, dist[train_set], actual_issame[train_set])\n if np.max(far_train)>=far_target:\n f = interpolate.interp1d(far_train, thresholds, kind='slinear')\n threshold = f(far_target)\n else:\n threshold = 0.0\n \n val[fold_idx], far[fold_idx] = calculate_val_far(threshold, dist[test_set], actual_issame[test_set])\n \n val_mean = np.mean(val)\n far_mean = np.mean(far)\n val_std = np.std(val)\n return val_mean, val_std, far_mean\n\n\ndef calculate_val_far(threshold, dist, actual_issame):\n predict_issame = np.less(dist, threshold)\n true_accept = np.sum(np.logical_and(predict_issame, actual_issame))\n false_accept = np.sum(np.logical_and(predict_issame, np.logical_not(actual_issame)))\n n_same = np.sum(actual_issame)\n n_diff = np.sum(np.logical_not(actual_issame))\n #print(true_accept, false_accept)\n #print(n_same, n_diff)\n val = float(true_accept) / float(n_same)\n far = float(false_accept) / float(n_diff)\n return val, far\n\ndef evaluate(embeddings, actual_issame, nrof_folds=10, pca = 0):\n # Calculate evaluation metrics\n thresholds = np.arange(0, 4, 0.01)\n embeddings1 = embeddings[0::2]\n embeddings2 = embeddings[1::2]\n tpr, fpr, accuracy = calculate_roc(thresholds, embeddings1, embeddings2,\n np.asarray(actual_issame), nrof_folds=nrof_folds, pca = pca)\n thresholds = np.arange(0, 4, 0.001)\n val, val_std, far = calculate_val(thresholds, embeddings1, embeddings2,\n np.asarray(actual_issame), 1e-3, nrof_folds=nrof_folds)\n return tpr, fpr, accuracy, val, val_std, far\n\ndef load_bin(path, image_size):\n bins, issame_list = pickle.load(open(path, 'rb'))\n data_list = []\n for flip in [0,1]:\n data = nd.empty((len(issame_list)*2, 3, image_size[0], image_size[1]))\n data_list.append(data)\n i = 0\n for i in xrange(len(issame_list)*2):\n _bin = bins[i]\n img = mx.image.imdecode(_bin)\n img = nd.transpose(img, axes=(2, 0, 1))\n for flip in [0,1]:\n if flip==1:\n img = mx.ndarray.flip(data=img, axis=2)\n data_list[flip][i][:] = img\n i+=1\n if i%1000==0:\n print('loading bin', i)\n print(data_list[0].shape)\n return (data_list, issame_list)\n\ndef test(data_set, mx_model, batch_size, nfolds=10, data_extra = None, label_shape = None):\n print('testing verification..')\n data_list = data_set[0]\n issame_list = data_set[1]\n model = mx_model\n embeddings_list = []\n if data_extra is not None:\n _data_extra = nd.array(data_extra)\n time_consumed = 0.0\n if label_shape is None:\n _label = nd.ones( (batch_size,) )\n else:\n _label = nd.ones( label_shape )\n for i in xrange( len(data_list) ):\n data = data_list[i]\n embeddings = None\n ba = 0\n while ba<data.shape[0]:\n bb = min(ba+batch_size, data.shape[0])\n count = bb-ba\n _data = nd.slice_axis(data, axis=0, begin=bb-batch_size, end=bb)\n #print(_data.shape, _label.shape)\n time0 = datetime.datetime.now()\n if data_extra is None:\n db = mx.io.DataBatch(data=(_data,), label=(_label,))\n else:\n db = mx.io.DataBatch(data=(_data,_data_extra), label=(_label,))\n model.forward(db, is_train=False)\n net_out = model.get_outputs()\n #_arg, _aux = model.get_params()\n #__arg = {}\n #for k,v in _arg.iteritems():\n # __arg[k] = v.as_in_context(_ctx)\n #_arg = __arg\n #_arg[\"data\"] = _data.as_in_context(_ctx)\n #_arg[\"softmax_label\"] = _label.as_in_context(_ctx)\n #for k,v in _arg.iteritems():\n # print(k,v.context)\n #exe = sym.bind(_ctx, _arg ,args_grad=None, grad_req=\"null\", aux_states=_aux)\n #exe.forward(is_train=False)\n #net_out = exe.outputs\n _embeddings = net_out[0].asnumpy()\n time_now = datetime.datetime.now()\n diff = time_now - time0\n time_consumed+=diff.total_seconds()\n #print(_embeddings.shape)\n if embeddings is None:\n embeddings = np.zeros( (data.shape[0], _embeddings.shape[1]) )\n embeddings[ba:bb,:] = _embeddings[(batch_size-count):,:]\n ba = bb\n embeddings_list.append(embeddings)\n\n _xnorm = 0.0\n _xnorm_cnt = 0\n for embed in embeddings_list:\n for i in xrange(embed.shape[0]):\n _em = embed[i]\n _norm=np.linalg.norm(_em)\n #print(_em.shape, _norm)\n _xnorm+=_norm\n _xnorm_cnt+=1\n _xnorm /= _xnorm_cnt\n\n embeddings = embeddings_list[0].copy()\n embeddings = sklearn.preprocessing.normalize(embeddings)\n acc1 = 0.0\n std1 = 0.0\n #_, _, accuracy, val, val_std, far = evaluate(embeddings, issame_list, nrof_folds=10)\n #acc1, std1 = np.mean(accuracy), np.std(accuracy)\n\n #print('Validation rate: %2.5f+-%2.5f @ FAR=%2.5f' % (val, val_std, far))\n #embeddings = np.concatenate(embeddings_list, axis=1)\n embeddings = embeddings_list[0] + embeddings_list[1]\n embeddings = sklearn.preprocessing.normalize(embeddings)\n print(embeddings.shape)\n print('infer time', time_consumed)\n _, _, accuracy, val, val_std, far = evaluate(embeddings, issame_list, nrof_folds=nfolds)\n acc2, std2 = np.mean(accuracy), np.std(accuracy)\n return acc1, std1, acc2, std2, _xnorm, embeddings_list\n\ndef test_badcase(data_set, mx_model, batch_size, name='', data_extra = None, label_shape = None):\n print('testing verification badcase..')\n data_list = data_set[0]\n issame_list = data_set[1]\n model = mx_model\n embeddings_list = []\n if data_extra is not None:\n _data_extra = nd.array(data_extra)\n time_consumed = 0.0\n if label_shape is None:\n _label = nd.ones( (batch_size,) )\n else:\n _label = nd.ones( label_shape )\n for i in xrange( len(data_list) ):\n data = data_list[i]\n embeddings = None\n ba = 0\n while ba<data.shape[0]:\n bb = min(ba+batch_size, data.shape[0])\n count = bb-ba\n _data = nd.slice_axis(data, axis=0, begin=bb-batch_size, end=bb)\n #print(_data.shape, _label.shape)\n time0 = datetime.datetime.now()\n if data_extra is None:\n db = mx.io.DataBatch(data=(_data,), label=(_label,))\n else:\n db = mx.io.DataBatch(data=(_data,_data_extra), label=(_label,))\n model.forward(db, is_train=False)\n net_out = model.get_outputs()\n _embeddings = net_out[0].asnumpy()\n time_now = datetime.datetime.now()\n diff = time_now - time0\n time_consumed+=diff.total_seconds()\n if embeddings is None:\n embeddings = np.zeros( (data.shape[0], _embeddings.shape[1]) )\n embeddings[ba:bb,:] = _embeddings[(batch_size-count):,:]\n ba = bb\n embeddings_list.append(embeddings)\n embeddings = embeddings_list[0] + embeddings_list[1]\n embeddings = sklearn.preprocessing.normalize(embeddings)\n thresholds = np.arange(0, 4, 0.01)\n actual_issame = np.asarray(issame_list)\n nrof_folds = 10\n embeddings1 = embeddings[0::2]\n embeddings2 = embeddings[1::2]\n assert(embeddings1.shape[0] == embeddings2.shape[0])\n assert(embeddings1.shape[1] == embeddings2.shape[1])\n nrof_pairs = min(len(actual_issame), embeddings1.shape[0])\n nrof_thresholds = len(thresholds)\n k_fold = LFold(n_splits=nrof_folds, shuffle=False)\n \n tprs = np.zeros((nrof_folds,nrof_thresholds))\n fprs = np.zeros((nrof_folds,nrof_thresholds))\n accuracy = np.zeros((nrof_folds))\n indices = np.arange(nrof_pairs)\n \n diff = np.subtract(embeddings1, embeddings2)\n dist = np.sum(np.square(diff),1)\n data = data_list[0]\n\n pouts = []\n nouts = []\n \n for fold_idx, (train_set, test_set) in enumerate(k_fold.split(indices)):\n \n # Find the best threshold for the fold\n acc_train = np.zeros((nrof_thresholds))\n #print(train_set)\n #print(train_set.__class__)\n for threshold_idx, threshold in enumerate(thresholds):\n p2 = dist[train_set]\n p3 = actual_issame[train_set]\n _, _, acc_train[threshold_idx] = calculate_accuracy(threshold, p2, p3)\n best_threshold_index = np.argmax(acc_train)\n for threshold_idx, threshold in enumerate(thresholds):\n tprs[fold_idx,threshold_idx], fprs[fold_idx,threshold_idx], _ = calculate_accuracy(threshold, dist[test_set], actual_issame[test_set])\n _, _, accuracy[fold_idx] = calculate_accuracy(thresholds[best_threshold_index], dist[test_set], actual_issame[test_set])\n best_threshold = thresholds[best_threshold_index]\n for iid in test_set:\n ida = iid*2\n idb = ida+1\n asame = actual_issame[iid]\n _dist = dist[iid]\n violate = _dist - best_threshold\n if not asame:\n violate *= -1.0\n if violate>0.0:\n imga = data[ida].asnumpy().transpose( (1,2,0) )[...,::-1] #to bgr\n imgb = data[idb].asnumpy().transpose( (1,2,0) )[...,::-1]\n #print(imga.shape, imgb.shape, violate, asame, _dist)\n if asame:\n pouts.append( (imga, imgb, _dist, best_threshold, ida) )\n else:\n nouts.append( (imga, imgb, _dist, best_threshold, ida) )\n\n \n tpr = np.mean(tprs,0)\n fpr = np.mean(fprs,0)\n acc = np.mean(accuracy)\n pouts = sorted(pouts, key = lambda x: x[2], reverse=True)\n nouts = sorted(nouts, key = lambda x: x[2], reverse=False)\n print(len(pouts), len(nouts))\n print('acc', acc)\n gap = 10\n image_shape = (112,224,3)\n out_dir = \"./badcases\"\n if not os.path.exists(out_dir):\n os.makedirs(out_dir)\n if len(nouts)>0:\n threshold = nouts[0][3]\n else:\n threshold = pouts[-1][3]\n \n for item in [(pouts, 'positive(false_negative).png'), (nouts, 'negative(false_positive).png')]:\n cols = 4\n rows = 8000\n outs = item[0]\n if len(outs)==0:\n continue\n #if len(outs)==9:\n # cols = 3\n # rows = 3\n\n _rows = int(math.ceil(len(outs)/cols))\n rows = min(rows, _rows)\n hack = {}\n\n if name.startswith('cfp') and item[1].startswith('pos'):\n hack = {0:'manual/238_13.jpg.jpg', 6:'manual/088_14.jpg.jpg', 10:'manual/470_14.jpg.jpg', 25:'manual/238_13.jpg.jpg', 28:'manual/143_11.jpg.jpg'}\n\n filename = item[1]\n if len(name)>0:\n filename = name+\"_\"+filename\n filename = os.path.join(out_dir, filename)\n img = np.zeros( (image_shape[0]*rows+20, image_shape[1]*cols+(cols-1)*gap, 3), dtype=np.uint8 )\n img[:,:,:] = 255\n text_color = (0,0,153)\n text_color = (255,178,102)\n text_color = (153,255,51)\n for outi, out in enumerate(outs):\n row = outi//cols\n col = outi%cols\n if row==rows:\n break\n imga = out[0].copy()\n imgb = out[1].copy()\n if outi in hack:\n idx = out[4]\n print('noise idx',idx)\n aa = hack[outi]\n imgb = cv2.imread(aa)\n #if aa==1:\n # imgb = cv2.transpose(imgb)\n # imgb = cv2.flip(imgb, 1)\n #elif aa==3:\n # imgb = cv2.transpose(imgb)\n # imgb = cv2.flip(imgb, 0)\n #else:\n # for ii in xrange(2):\n # imgb = cv2.transpose(imgb)\n # imgb = cv2.flip(imgb, 1)\n dist = out[2]\n _img = np.concatenate( (imga, imgb), axis=1 )\n k = \"%.3f\"%dist\n #print(k)\n font = cv2.FONT_HERSHEY_SIMPLEX\n cv2.putText(_img,k,(80,image_shape[0]//2+7), font, 0.6, text_color, 2)\n #_filename = filename+\"_%d.png\"%outi\n #cv2.imwrite(_filename, _img)\n img[row*image_shape[0]:(row+1)*image_shape[0], (col*image_shape[1]+gap*col):((col+1)*image_shape[1]+gap*col),:] = _img\n #threshold = outs[0][3]\n font = cv2.FONT_HERSHEY_SIMPLEX\n k = \"threshold: %.3f\"%threshold\n cv2.putText(img,k,(img.shape[1]//2-70,img.shape[0]-5), font, 0.6, text_color, 2)\n cv2.imwrite(filename, img)\n\nif __name__ == '__main__':\n\n parser = argparse.ArgumentParser(description='do verification')\n # general\n parser.add_argument('--data-dir', default='', help='')\n parser.add_argument('--model', default='../model/softmax,50', help='path to load model.')\n parser.add_argument('--target', default='lfw,cfp_ff,cfp_fp,agedb_30', help='test targets.')\n parser.add_argument('--gpu', default=0, type=int, help='gpu id')\n parser.add_argument('--batch-size', default=32, type=int, help='')\n parser.add_argument('--max', default='', type=str, help='')\n parser.add_argument('--badcase', default=0, type=int, help='')\n parser.add_argument('--nfolds', default=10, type=int, help='')\n args = parser.parse_args()\n\n prop = face_image.load_property(args.data_dir)\n image_size = prop.image_size\n print('image_size', image_size)\n ctx = mx.gpu(args.gpu)\n nets = []\n vec = args.model.split(',')\n prefix = args.model.split(',')[0]\n epochs = []\n if len(vec)==1:\n pdir = os.path.dirname(prefix)\n for fname in os.listdir(pdir):\n if not fname.endswith('.params'):\n continue\n _file = os.path.join(pdir, fname)\n if _file.startswith(prefix):\n epoch = int(fname.split('.')[0].split('-')[1])\n epochs.append(epoch)\n epochs = sorted(epochs, reverse=True)\n if len(args.max)>0:\n _max = [int(x) for x in args.max.split(',')]\n assert len(_max)==2\n if len(epochs)>_max[1]:\n epochs = epochs[_max[0]:_max[1]]\n\n else:\n epochs = [int(x) for x in vec[1].split('|')]\n print('model number', len(epochs))\n time0 = datetime.datetime.now()\n for epoch in epochs:\n print('loading',prefix, epoch)\n sym, arg_params, aux_params = mx.model.load_checkpoint(prefix, epoch)\n #arg_params, aux_params = ch_dev(arg_params, aux_params, ctx)\n all_layers = sym.get_internals()\n sym = all_layers['fc1_output']\n model = mx.mod.Module(symbol=sym, context=ctx, label_names = None)\n #model.bind(data_shapes=[('data', (args.batch_size, 3, image_size[0], image_size[1]))], label_shapes=[('softmax_label', (args.batch_size,))])\n model.bind(data_shapes=[('data', (args.batch_size, 3, image_size[0], image_size[1]))])\n model.set_params(arg_params, aux_params)\n nets.append(model)\n time_now = datetime.datetime.now()\n diff = time_now - time0\n print('model loading time', diff.total_seconds())\n\n ver_list = []\n ver_name_list = []\n for name in args.target.split(','):\n path = os.path.join(args.data_dir,name+\".bin\")\n if os.path.exists(path):\n print('loading.. ', name)\n data_set = load_bin(path, image_size)\n ver_list.append(data_set)\n ver_name_list.append(name)\n\n if args.badcase==0:\n for i in xrange(len(ver_list)):\n results = []\n for model in nets:\n acc1, std1, acc2, std2, xnorm, embeddings_list = test(ver_list[i], model, args.batch_size, args.nfolds)\n print('[%s]XNorm: %f' % (ver_name_list[i], xnorm))\n print('[%s]Accuracy: %1.5f+-%1.5f' % (ver_name_list[i], acc1, std1))\n print('[%s]Accuracy-Flip: %1.5f+-%1.5f' % (ver_name_list[i], acc2, std2))\n results.append(acc2)\n print('Max of [%s] is %1.5f' % (ver_name_list[i], np.max(results)))\n else:\n model = nets[0]\n test_badcase(ver_list[0], model, args.batch_size, args.target)\n\n\n" ]
[ [ "numpy.asarray", "sklearn.model_selection.KFold", "numpy.concatenate", "numpy.max", "numpy.mean", "numpy.square", "numpy.arange", "numpy.less", "numpy.subtract", "numpy.std", "numpy.argmax", "scipy.interpolate.interp1d", "numpy.zeros", "numpy.logical_not", "numpy.logical_and", "sklearn.decomposition.PCA", "numpy.sum", "numpy.linalg.norm", "sklearn.preprocessing.normalize" ] ]
[ { "matplotlib": [], "numpy": [ "1.10", "1.12", "1.11", "1.19", "1.24", "1.13", "1.16", "1.9", "1.18", "1.23", "1.21", "1.22", "1.20", "1.7", "1.15", "1.14", "1.17", "1.8" ], "pandas": [], "scipy": [ "0.13", "1.6", "0.14", "1.10", "0.15", "1.4", "0.16", "1.9", "0.19", "1.5", "0.18", "1.2", "1.7", "0.12", "1.0", "0.17", "1.3", "1.8" ], "tensorflow": [] } ]
jeanp413/cudf
[ "3f0859824cae915e246a41be17d4d4789ec72b9f" ]
[ "python/cudf/cudf/tests/test_column.py" ]
[ "# Copyright (c) 2020, NVIDIA CORPORATION.\n\nimport numpy as np\nimport pandas as pd\nimport pytest\n\nimport cudf\nfrom cudf.tests.utils import assert_eq\n\ndtypes = [\n \"int8\",\n \"int16\",\n \"int32\",\n \"int64\",\n \"float32\",\n \"float64\",\n \"datetime64[ns]\",\n \"str\",\n \"category\",\n]\n\n\[email protected](params=dtypes, ids=dtypes)\ndef pandas_input(request):\n data = np.random.randint(0, 1000, 100)\n return pd.Series(data, dtype=request.param)\n\n\[email protected](\"offset\", [0, 1, 15])\[email protected](\"size\", [None, 50, 10, 0])\ndef test_column_offset_and_size(pandas_input, offset, size):\n col = cudf.core.column.as_column(pandas_input)\n col = cudf.core.column.build_column(\n data=col.base_data,\n dtype=col.dtype,\n mask=col.base_mask,\n size=size,\n offset=offset,\n children=col.base_children,\n )\n\n if cudf.utils.dtypes.is_categorical_dtype(col.dtype):\n assert col.size == col.codes.size\n assert col.size == (col.codes.data.size / col.codes.dtype.itemsize)\n elif pd.api.types.is_string_dtype(col.dtype):\n assert col.size == (col.children[0].size - 1)\n assert col.size == (\n (col.children[0].data.size / col.children[0].dtype.itemsize) - 1\n )\n else:\n assert col.size == (col.data.size / col.dtype.itemsize)\n\n got = cudf.Series(col)\n\n if offset is None:\n offset = 0\n if size is None:\n size = 100\n else:\n size = size + offset\n\n slicer = slice(offset, size)\n expect = pandas_input.iloc[slicer].reset_index(drop=True)\n\n assert_eq(expect, got)\n" ]
[ [ "pandas.api.types.is_string_dtype", "pandas.Series", "numpy.random.randint" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "1.3", "0.19", "1.1", "1.5", "0.24", "0.20", "1.0", "0.25", "1.2" ], "scipy": [], "tensorflow": [] } ]
weilinie/JARE
[ "13967b3525c54a52cb0217e388be3d0f8ae68447" ]
[ "real/run.py" ]
[ "import argparse\nimport os\nimport tensorflow as tf\nimport numpy as np\n\nimport models\nfrom libs.inputs import (\n get_filename_queue,\n get_input_image, get_input_cifar10,\n create_batch\n)\nfrom train import train\nfrom utils import pp\n\nparser = argparse.ArgumentParser(description='Train and run a GAN.')\n# Architecture\nparser.add_argument('--image-size', default=128, type=int, help='Size of image crops.')\nparser.add_argument('--output-size', default=64, type=int, help='Size of samples.')\nparser.add_argument('--c-dim', default=3, type=int, help='Number of channels.')\nparser.add_argument('--z-dim', default=512, type=int, help='Dimensionality of the latent space.')\nparser.add_argument('--gf-dim', default=64, type=int, help='Number of filters to use for generator.')\nparser.add_argument('--df-dim', default=64, type=int, help='Number of filters to use for discriminator.')\nparser.add_argument('--reg-param', default=10., type=float, help='Regularization parameter.')\nparser.add_argument('--g-architecture', default='conv4', type=str, help='Architecture for generator.')\nparser.add_argument('--d-architecture', default='conv4', type=str, help='Architecture for discriminator.')\nparser.add_argument('--gan-type', default='standard', type=str, help='Which type of GAN to use.')\n\n# Training\nparser.add_argument('--seed', default=124, type=int, help='let numpy.random and tf.random keep the same seed')\nparser.add_argument('--optimizer', default='jare', type=str, help='Which optimizer to use.')\nparser.add_argument('--opt-type', default='rmsprop', type=str, help='Which optimizer type to use.')\nparser.add_argument('--altgd-gsteps', default='1', type=int, help='How many training steps to use for generator.')\nparser.add_argument('--altgd-dsteps', default='1', type=int, help='How many training steps to use for discriminator.')\nparser.add_argument('--beta1', default='0.9', type=float, help='beta1 for adam optimizer')\nparser.add_argument('--beta2', default='0.999', type=float, help='beta2 for adam optimizer')\nparser.add_argument('--nsteps', default=200000, type=int, help='Number of steps to run training.')\nparser.add_argument('--ntest', default=500, type=int, help='How often to run tests.')\nparser.add_argument('--learning-rate', default=1e-4, type=float, help='Learning rate for the model.')\nparser.add_argument('--batch-size', default=64, type=int, help='Batchsize for training.')\nparser.add_argument('--log-dir', default='./logs', type=str, help='Where to store log and checkpoint files.')\nparser.add_argument('--sample-dir', default='./samples', type=str, help='Where to put samples during training.')\nparser.add_argument('--is-inception-scores', default=False, action='store_true',\n help='Whether to compute inception scores.')\nparser.add_argument('--fid-type', default=0, type=int,\n help='How to compute fid [0: No calculation, 1: without pre-stats, 2: with pre-stats]')\nparser.add_argument('--inception-dir', default='./inception', type=str, help='Where to put inception network.')\n\nparser.add_argument('--dataset', default='cifar-10', type=str, help='Which data set to use.')\nparser.add_argument('--data-dir', default='./data', type=str, help='Where data data is stored..')\nparser.add_argument('--split', default='train', type=str, help='Which split to use.')\n\n\ndef main():\n args = parser.parse_args()\n pp.pprint(vars(args))\n\n # seed\n np.random.seed(args.seed)\n tf.set_random_seed(args.seed)\n\n # Data\n filename_queue = get_filename_queue(\n split_file=os.path.join(args.data_dir, 'splits', args.dataset, args.split + '.lst'),\n data_dir=os.path.join(args.data_dir, args.dataset)\n )\n\n if args.dataset == \"cifar-10\":\n image, label = get_input_cifar10(filename_queue)\n output_size = 32\n c_dim = 3\n else:\n image = get_input_image(filename_queue,\n output_size=args.output_size, image_size=args.image_size, c_dim=args.c_dim\n )\n output_size = args.output_size\n c_dim = args.c_dim\n\n image_batch = create_batch([image], batch_size=args.batch_size,\n num_preprocess_threads=16, min_queue_examples=10000)\n\n config = vars(args)\n\n generator = models.get_generator(args.g_architecture,\n output_size=args.output_size, c_dim=args.c_dim, f_dim=args.gf_dim)\n\n discriminator = models.get_discriminator(args.d_architecture,\n output_size=args.output_size, c_dim=args.c_dim, f_dim=args.df_dim)\n\n train(generator, discriminator, image_batch, config)\n\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "tensorflow.set_random_seed", "numpy.random.seed" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "1.12", "1.4", "1.13", "1.5", "1.7", "0.12", "1.0", "1.2" ] } ]
md11235/ssd.pytorch
[ "b168ee9c92cc1f53685fc4f42b906fcfb424d0c7" ]
[ "layers/modules/multibox_quadrilateral_loss.py" ]
[ "# -*- coding: utf-8 -*-\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\nfrom data import coco as cfg\nfrom ..box_utils import match, log_sum_exp, match_quadrilaterals\n\n\nclass MultiBoxQuadrilaterralLoss(nn.Module):\n \"\"\"SSD Weighted Loss Function\n Compute Targets:\n 1) Produce Confidence Target Indices by matching ground truth boxes\n with (default) 'priorboxes' that have jaccard index > threshold parameter\n (default threshold: 0.5).\n 2) Produce localization target by 'encoding' variance into offsets of ground\n truth boxes and their matched 'priorboxes'.\n 3) Hard negative mining to filter the excessive number of negative examples\n that comes with using a large number of default bounding boxes.\n (default negative:positive ratio 3:1)\n Objective Loss:\n L(x,c,l,g) = (Lconf(x, c) + αLloc(x,l,g)) / N\n Where, Lconf is the CrossEntropy Loss and Lloc is the SmoothL1 Loss\n weighted by α which is set to 1 by cross val.\n Args:\n c: class confidences,\n l: predicted boxes,\n g: ground truth boxes\n N: number of matched default boxes\n See: https://arxiv.org/pdf/1512.02325.pdf for more details.\n \"\"\"\n\n def __init__(self, num_classes, overlap_thresh, prior_for_matching,\n bkg_label, neg_mining, neg_pos, neg_overlap, encode_target,\n use_gpu=True):\n super(MultiBoxQuadrilaterralLoss, self).__init__()\n self.use_gpu = use_gpu\n self.num_classes = num_classes\n self.threshold = overlap_thresh\n self.background_label = bkg_label\n self.encode_target = encode_target\n self.use_prior_for_matching = prior_for_matching\n self.do_neg_mining = neg_mining\n self.negpos_ratio = neg_pos\n self.neg_overlap = neg_overlap\n self.variance = cfg['variance']\n\n def forward(self, predictions, targets):\n \"\"\"Multibox Loss\n Args:\n predictions (tuple): A tuple containing loc preds, conf preds,\n and prior boxes from SSD net.\n conf shape: torch.size(batch_size,num_priors,num_classes)\n loc shape: torch.size(batch_size,num_priors,8)\n priors shape: torch.size(num_priors, 4)\n\n targets (tensor): Ground truth boxes and labels for a batch,\n shape: [batch_size,num_objs, 9] (last idx is the label).\n \"\"\"\n loc_data, conf_data, priors = predictions\n num = loc_data.size(0) # batch size\n priors = priors[:loc_data.size(1), :]\n num_priors = (priors.size(0))\n num_classes = self.num_classes\n\n # match priors (default boxes) and ground truth boxes\n # num is batch size\n loc_t = torch.Tensor(num, num_priors, 8)\n conf_t = torch.LongTensor(num, num_priors)\n # print(\"in loss function, target is {}\".format(targets))\n for idx in range(num): # for each image\n # print(\"targets: {}, {}\".format(idx, targets[idx]))\n truths = targets[idx][:, :-1].data\n # print(\"truths shape: {}\".format(truths.shape))\n # print(\"truths: {}\".format(truths))\n labels = targets[idx][:, -1].data\n defaults = priors.data\n # print(\"id {}, truths shape: {}, value: {}, default shape {}, value: {}\".format(idx, truths.shape, truths, defaults.shape, defaults))\n if truths[0][0] > truths[0][2]:\n print(\"compare: {} ??>?? {}\".format(truths[0][0], truths[0][2]))\n print(\"truths: {}\".format(truths))\n exit()\n match_quadrilaterals(self.threshold, truths, defaults, self.variance, labels,\n loc_t, conf_t, idx)\n if self.use_gpu:\n loc_t = loc_t.cuda()\n conf_t = conf_t.cuda()\n # wrap targets\n loc_t = Variable(loc_t, requires_grad=False)\n conf_t = Variable(conf_t, requires_grad=False)\n\n pos = conf_t > 0\n num_pos = pos.sum(dim=1, keepdim=True)\n\n # Localization Loss (Smooth L1)\n # Shape: [batch,num_priors,8]\n pos_idx = pos.unsqueeze(pos.dim()).expand_as(loc_data)\n # print(\"raw loc_p shape: {}, raw loc_t shape: {}\".format(loc_data.shape, loc_t.shape))\n loc_p = loc_data[pos_idx].view(-1, 8)\n loc_t = loc_t[pos_idx].view(-1, 8)\n # print(\"loc_p shape: {}, value: {}, loc_t shape: {}, value: {}\".format(loc_p.shape, loc_p, loc_t.shape, loc_t))\n loss_l = F.smooth_l1_loss(loc_p, loc_t, size_average=False)\n\n # Compute max conf across batch for hard negative mining\n batch_conf = conf_data.view(-1, self.num_classes)\n loss_c = log_sum_exp(batch_conf) - batch_conf.gather(1, conf_t.view(-1, 1))\n\n # Hard Negative Mining\n loss_c = loss_c.view(pos.size()[0], pos.size()[1]) #add line \n loss_c[pos] = 0 # filter out pos boxes for now\n loss_c = loss_c.view(num, -1)\n _, loss_idx = loss_c.sort(1, descending=True)\n _, idx_rank = loss_idx.sort(1)\n num_pos = pos.long().sum(1, keepdim=True)\n num_neg = torch.clamp(self.negpos_ratio*num_pos, max=pos.size(1)-1)\n neg = idx_rank < num_neg.expand_as(idx_rank)\n\n # Confidence Loss Including Positive and Negative Examples\n pos_idx = pos.unsqueeze(2).expand_as(conf_data)\n neg_idx = neg.unsqueeze(2).expand_as(conf_data)\n conf_p = conf_data[(pos_idx+neg_idx).gt(0)].view(-1, self.num_classes)\n targets_weighted = conf_t[(pos+neg).gt(0)]\n loss_c = F.cross_entropy(conf_p, targets_weighted, size_average=False)\n\n # Sum of losses: L(x,c,l,g) = (Lconf(x, c) + αLloc(x,l,g)) / N\n\n N = num_pos.data.sum()\n # print(\"number of positives:{}\".format(N))\n loss_l /= N\n loss_c /= N\n return loss_l, loss_c\n " ]
[ [ "torch.LongTensor", "torch.Tensor", "torch.nn.functional.cross_entropy", "torch.nn.functional.smooth_l1_loss", "torch.autograd.Variable" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
zeroam/TIL
[ "43e3573be44c7f7aa4600ff8a34e99a65cbdc5d1" ]
[ "deep-learning-from-scratch/ch07/visualize_filter.py" ]
[ "import numpy as np\nimport matplotlib.pyplot as plt\nfrom simple_convnet import SimpleConvNet\n\n\ndef filter_show(filters, nx=8, margin=3, scale=10):\n FN, C, FH, FW = filters.shape\n ny = int(np.ceil(FN / nx))\n \n fig = plt.figure()\n fig.subplots_adjust(left=0, right=1, bottom=0, top=1, hspace=0.05, wspace=0.05)\n \n for i in range(FN):\n ax = fig.add_subplot(ny, nx, i+1, xticks=[], yticks=[])\n ax.imshow(filters[i,0], cmap=plt.cm.gray_r, interpolation='nearest')\n plt.show()\n \n\nnetwork = SimpleConvNet()\n# 무작위(랜덤) 초기화 후의 가중치\nfilter_show(network.params['W1'])\n\n# 학습된 가중치\nnetwork.load_params('params.pkl')\nfilter_show(network.params['W1'])" ]
[ [ "numpy.ceil", "matplotlib.pyplot.show", "matplotlib.pyplot.figure" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
AtticusJohnson/mmdetection
[ "d8d89bafcce13d3b32b1fb3366be3bb9830546c2" ]
[ "tools/test.py" ]
[ "import argparse\nimport os\n\nimport mmcv\nimport torch\nfrom mmcv import Config, DictAction\nfrom mmcv.parallel import MMDataParallel, MMDistributedDataParallel\nfrom mmcv.runner import get_dist_info, init_dist, load_checkpoint\nfrom tools.fuse_conv_bn import fuse_module\n\nfrom mmdet.apis import multi_gpu_test, single_gpu_test\nfrom mmdet.core import wrap_fp16_model\nfrom mmdet.datasets import build_dataloader, build_dataset\nfrom mmdet.models import build_detector\n\n\ndef parse_args():\n parser = argparse.ArgumentParser(\n description='MMDet test (and eval) a model')\n parser.add_argument('config', help='test config file path')\n parser.add_argument('checkpoint', help='checkpoint file')\n parser.add_argument('--out', help='output result file in pickle format')\n parser.add_argument(\n '--fuse-conv-bn',\n action='store_true',\n help='Whether to fuse conv and bn, this will slightly increase'\n 'the inference speed')\n parser.add_argument(\n '--format-only',\n action='store_true',\n help='Format the output results without perform evaluation. It is'\n 'useful when you want to format the result to a specific format and '\n 'submit it to the test server')\n parser.add_argument(\n '--eval',\n type=str,\n nargs='+',\n help='evaluation metrics, which depends on the dataset, e.g., \"bbox\",'\n ' \"segm\", \"proposal\" for COCO, and \"mAP\", \"recall\" for PASCAL VOC')\n parser.add_argument('--show', action='store_true', help='show results')\n parser.add_argument(\n '--show-dir', help='directory where painted images will be saved')\n parser.add_argument(\n '--show-score-thr',\n type=float,\n default=0.3,\n help='score threshold (default: 0.3)')\n parser.add_argument(\n '--gpu-collect',\n action='store_true',\n help='whether to use gpu to collect results.')\n parser.add_argument(\n '--tmpdir',\n help='tmp directory used for collecting results from multiple '\n 'workers, available when gpu-collect is not specified')\n parser.add_argument(\n '--options', nargs='+', action=DictAction, help='arguments in dict')\n parser.add_argument(\n '--launcher',\n choices=['none', 'pytorch', 'slurm', 'mpi'],\n default='none',\n help='job launcher')\n parser.add_argument('--local_rank', type=int, default=0)\n args = parser.parse_args()\n if 'LOCAL_RANK' not in os.environ:\n os.environ['LOCAL_RANK'] = str(args.local_rank)\n return args\n\n\ndef main():\n args = parse_args()\n\n assert args.out or args.eval or args.format_only or args.show \\\n or args.show_dir, \\\n ('Please specify at least one operation (save/eval/format/show the '\n 'results / save the results) with the argument \"--out\", \"--eval\"'\n ', \"--format-only\", \"--show\" or \"--show-dir\"')\n\n if args.eval and args.format_only:\n raise ValueError('--eval and --format_only cannot be both specified')\n\n if args.out is not None and not args.out.endswith(('.pkl', '.pickle')):\n raise ValueError('The output file must be a pkl file.')\n\n cfg = Config.fromfile(args.config)\n # set cudnn_benchmark\n if cfg.get('cudnn_benchmark', False):\n torch.backends.cudnn.benchmark = True\n cfg.model.pretrained = None\n cfg.data.test.test_mode = True\n\n # init distributed env first, since logger depends on the dist info.\n if args.launcher == 'none':\n distributed = False\n else:\n distributed = True\n init_dist(args.launcher, **cfg.dist_params)\n\n # build the dataloader\n # TODO: support multiple images per gpu (only minor changes are needed)\n dataset = build_dataset(cfg.data.test)\n\n data_loader = build_dataloader(\n dataset,\n samples_per_gpu=1,\n workers_per_gpu=cfg.data.workers_per_gpu,\n dist=distributed,\n shuffle=False)\n\n # build the model and load checkpoint\n model = build_detector(cfg.model, train_cfg=None, test_cfg=cfg.test_cfg)\n fp16_cfg = cfg.get('fp16', None)\n if fp16_cfg is not None:\n wrap_fp16_model(model)\n checkpoint = load_checkpoint(model, args.checkpoint, map_location='cpu')\n if args.fuse_conv_bn:\n model = fuse_module(model)\n # old versions did not save class info in checkpoints, this walkaround is\n # for backward compatibility\n if 'CLASSES' in checkpoint['meta']:\n model.CLASSES = checkpoint['meta']['CLASSES']\n else:\n model.CLASSES = dataset.CLASSES\n\n if not distributed:\n model = MMDataParallel(model, device_ids=[0])\n outputs = single_gpu_test(model, data_loader, args.show, args.show_dir,\n args.show_score_thr)\n else:\n model = MMDistributedDataParallel(\n model.cuda(),\n device_ids=[torch.cuda.current_device()],\n broadcast_buffers=False)\n outputs = multi_gpu_test(model, data_loader, args.tmpdir,\n args.gpu_collect)\n\n rank, _ = get_dist_info()\n if rank == 0:\n if args.out:\n print(f'\\nwriting results to {args.out}')\n mmcv.dump(outputs, args.out)\n kwargs = {} if args.options is None else args.options\n if args.format_only:\n dataset.format_results(outputs, **kwargs)\n if args.eval:\n dataset.evaluate(outputs, args.eval, **kwargs)\n\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "torch.cuda.current_device" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
gzydominating/tf-faster-rcnn
[ "83668f40a3bc725e261c8aab7cb197b603640bed" ]
[ "lib/model/config.py" ]
[ "from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport os.path as osp\nimport numpy as np\n# `pip install easydict` if you don't have it\nfrom easydict import EasyDict as edict\n\n__C = edict()\n# Consumers can get config by:\n# from fast_rcnn_config import cfg\ncfg = __C\n\n#\n# Training options\n#\n__C.TRAIN = edict()\n\n# Initial learning rate\n__C.TRAIN.LEARNING_RATE = 0.001\n\n# Momentum\n__C.TRAIN.MOMENTUM = 0.9\n\n# Weight decay, for regularization\n__C.TRAIN.WEIGHT_DECAY = 0.0001\n\n# Factor for reducing the learning rate\n__C.TRAIN.GAMMA = 0.1\n\n# Step size for reducing the learning rate, currently only support one step\n__C.TRAIN.STEPSIZE = [30000]\n\n# Iteration intervals for showing the loss during training, on command line interface\n__C.TRAIN.DISPLAY = 10\n\n# Whether to double the learning rate for bias\n__C.TRAIN.DOUBLE_BIAS = True\n\n# Whether to initialize the weights with truncated normal distribution \n__C.TRAIN.TRUNCATED = False\n\n# Whether to have weight decay on bias as well\n__C.TRAIN.BIAS_DECAY = False\n\n# Whether to add ground truth boxes to the pool when sampling regions\n__C.TRAIN.USE_GT = False\n\n# Whether to use aspect-ratio grouping of training images, introduced merely for saving\n# GPU memory\n__C.TRAIN.ASPECT_GROUPING = False\n\n# The number of snapshots kept, older ones are deleted to save space\n__C.TRAIN.SNAPSHOT_KEPT = 3\n\n# The time interval for saving tensorflow summaries\n__C.TRAIN.SUMMARY_INTERVAL = 180\n\n# Scale to use during training (can list multiple scales)\n# The scale is the pixel size of an image's shortest side\n__C.TRAIN.SCALES = (600,)\n\n# Max pixel size of the longest side of a scaled input image\n__C.TRAIN.MAX_SIZE = 1000\n\n# Images to use per minibatch\n__C.TRAIN.IMS_PER_BATCH = 1\n\n# Minibatch size (number of regions of interest [ROIs])\n__C.TRAIN.BATCH_SIZE = 128\n\n# Fraction of minibatch that is labeled foreground (i.e. class > 0)\n__C.TRAIN.FG_FRACTION = 0.25\n\n# Overlap threshold for a ROI to be considered foreground (if >= FG_THRESH)\n__C.TRAIN.FG_THRESH = 0.5\n\n# Overlap threshold for a ROI to be considered background (class = 0 if\n# overlap in [LO, HI))\n__C.TRAIN.BG_THRESH_HI = 0.5\n__C.TRAIN.BG_THRESH_LO = 0.1\n\n# Use horizontally-flipped images during training?\n__C.TRAIN.USE_FLIPPED = True\n\n# Train bounding-box regressors\n__C.TRAIN.BBOX_REG = True\n\n# Overlap required between a ROI and ground-truth box in order for that ROI to\n# be used as a bounding-box regression training example\n__C.TRAIN.BBOX_THRESH = 0.5\n\n# Iterations between snapshots\n__C.TRAIN.SNAPSHOT_ITERS = 5000\n\n# solver.prototxt specifies the snapshot path prefix, this adds an optional\n# infix to yield the path: <prefix>[_<infix>]_iters_XYZ.caffemodel\n__C.TRAIN.SNAPSHOT_PREFIX = 'res101_faster_rcnn'\n\n# Normalize the targets (subtract empirical mean, divide by empirical stddev)\n__C.TRAIN.BBOX_NORMALIZE_TARGETS = True\n\n# Deprecated (inside weights)\n__C.TRAIN.BBOX_INSIDE_WEIGHTS = (1.0, 1.0, 1.0, 1.0)\n\n# Normalize the targets using \"precomputed\" (or made up) means and stdevs\n# (BBOX_NORMALIZE_TARGETS must also be True)\n__C.TRAIN.BBOX_NORMALIZE_TARGETS_PRECOMPUTED = True\n\n__C.TRAIN.BBOX_NORMALIZE_MEANS = (0.0, 0.0, 0.0, 0.0)\n\n__C.TRAIN.BBOX_NORMALIZE_STDS = (0.1, 0.1, 0.2, 0.2)\n\n# Train using these proposals\n__C.TRAIN.PROPOSAL_METHOD = 'gt'\n\n# Make minibatches from images that have similar aspect ratios (i.e. both\n# tall and thin or both short and wide) in order to avoid wasting computation\n# on zero-padding.\n\n# Use RPN to detect objects\n__C.TRAIN.HAS_RPN = True\n\n# IOU >= thresh: positive example\n__C.TRAIN.RPN_POSITIVE_OVERLAP = 0.7\n\n# IOU < thresh: negative example\n__C.TRAIN.RPN_NEGATIVE_OVERLAP = 0.3\n\n# If an anchor satisfied by positive and negative conditions set to negative\n__C.TRAIN.RPN_CLOBBER_POSITIVES = False\n\n# Max number of foreground examples\n__C.TRAIN.RPN_FG_FRACTION = 0.5\n\n# Total number of examples\n__C.TRAIN.RPN_BATCHSIZE = 256\n\n# NMS threshold used on RPN proposals\n__C.TRAIN.RPN_NMS_THRESH = 0.7\n\n# Number of top scoring boxes to keep before apply NMS to RPN proposals\n__C.TRAIN.RPN_PRE_NMS_TOP_N = 12000\n\n# Number of top scoring boxes to keep after applying NMS to RPN proposals\n__C.TRAIN.RPN_POST_NMS_TOP_N = 2000\n\n# Deprecated (outside weights)\n__C.TRAIN.RPN_BBOX_INSIDE_WEIGHTS = (1.0, 1.0, 1.0, 1.0)\n\n# Give the positive RPN examples weight of p * 1 / {num positives}\n# and give negatives a weight of (1 - p)\n# Set to -1.0 to use uniform example weighting\n__C.TRAIN.RPN_POSITIVE_WEIGHT = -1.0\n\n# Whether to use all ground truth bounding boxes for training, \n# For COCO, setting USE_ALL_GT to False will exclude boxes that are flagged as ''iscrowd''\n__C.TRAIN.USE_ALL_GT = True\n\n#\n# Testing options\n#\n__C.TEST = edict()\n\n# Scale to use during testing (can NOT list multiple scales)\n# The scale is the pixel size of an image's shortest side\n__C.TEST.SCALES = (600,)\n\n# Max pixel size of the longest side of a scaled input image\n__C.TEST.MAX_SIZE = 1000\n\n# Overlap threshold used for non-maximum suppression (suppress boxes with\n# IoU >= this threshold)\n__C.TEST.NMS = 0.3\n\n# Experimental: treat the (K+1) units in the cls_score layer as linear\n# predictors (trained, eg, with one-vs-rest SVMs).\n__C.TEST.SVM = False\n\n# Test using bounding-box regressors\n__C.TEST.BBOX_REG = True\n\n# Propose boxes\n__C.TEST.HAS_RPN = False\n\n# Test using these proposals\n__C.TEST.PROPOSAL_METHOD = 'gt'\n\n## NMS threshold used on RPN proposals\n__C.TEST.RPN_NMS_THRESH = 0.7\n\n# Number of top scoring boxes to keep before apply NMS to RPN proposals\n__C.TEST.RPN_PRE_NMS_TOP_N = 6000\n\n# Number of top scoring boxes to keep after applying NMS to RPN proposals\n__C.TEST.RPN_POST_NMS_TOP_N = 300\n\n# Proposal height and width both need to be greater than RPN_MIN_SIZE (at orig image scale)\n# __C.TEST.RPN_MIN_SIZE = 16\n\n# Testing mode, default to be 'nms', 'top' is slower but better\n# See report for details\n__C.TEST.MODE = 'nms'\n\n# Only useful when TEST.MODE is 'top', specifies the number of top proposals to select\n__C.TEST.RPN_TOP_N = 5000\n\n#\n# ResNet options\n#\n\n__C.RESNET = edict()\n\n# Option to set if max-pooling is appended after crop_and_resize. \n# if true, the region will be resized to a square of 2xPOOLING_SIZE, \n# then 2x2 max-pooling is applied; otherwise the region will be directly\n# resized to a square of POOLING_SIZE\n__C.RESNET.MAX_POOL = False\n\n# Number of fixed blocks during training, by default the first of all 4 blocks is fixed\n# Range: 0 (none) to 3 (all)\n__C.RESNET.FIXED_BLOCKS = 1\n\n#\n# MobileNet options\n#\n\n__C.MOBILENET = edict()\n\n# Whether to regularize the depth-wise filters during training\n__C.MOBILENET.REGU_DEPTH = False\n\n# Number of fixed layers during training, by default the bottom 5 of 14 layers is fixed\n# Range: 0 (none) to 12 (all)\n__C.MOBILENET.FIXED_LAYERS = 5\n\n# Weight decay for the mobilenet weights\n__C.MOBILENET.WEIGHT_DECAY = 0.00004\n\n# Depth multiplier\n__C.MOBILENET.DEPTH_MULTIPLIER = 1.\n\n#\n# MISC\n#\n\n# Pixel mean values (BGR order) as a (1, 1, 3) array\n# We use the same pixel mean for all networks even though it's not exactly what\n# they were trained with\n__C.PIXEL_MEANS = np.array([[[102.9801, 115.9465, 122.7717]]])\n\n# For reproducibility\n__C.RNG_SEED = 3\n\n# Root directory of project\n__C.ROOT_DIR = osp.abspath(osp.join(osp.dirname(__file__), '..', '..'))\n\n# Data directory\n__C.DATA_DIR = osp.abspath(osp.join(__C.ROOT_DIR, 'data'))\n\n# Name (or path to) the matlab executable\n__C.MATLAB = 'matlab'\n\n# Place outputs under an experiments directory\n__C.EXP_DIR = 'default'\n\n# Use GPU implementation of non-maximum suppression\n__C.USE_GPU_NMS = True\n\n# Use an end-to-end tensorflow model.\n# Note: models in E2E tensorflow mode have only been tested in feed-forward mode,\n# but these models are exportable to other tensorflow instances as GraphDef files.\n__C.USE_E2E_TF = True\n\n# Default pooling mode, only 'crop' is available\n__C.POOLING_MODE = 'roi'\n\n# Size of the pooled region after RoI pooling\n__C.POOLING_SIZE = 7\n\n# Anchor scales for RPN\n__C.ANCHOR_SCALES = [8,16,32]\n\n# Anchor ratios for RPN\n__C.ANCHOR_RATIOS = [0.5,1,2]\n\n# Number of filters for the RPN layer\n__C.RPN_CHANNELS = 512\n\n\ndef get_output_dir(imdb, weights_filename):\n \"\"\"Return the directory where experimental artifacts are placed.\n If the directory does not exist, it is created.\n\n A canonical path is built using the name from an imdb and a network\n (if not None).\n \"\"\"\n outdir = osp.abspath(osp.join(__C.ROOT_DIR, 'output', __C.EXP_DIR, imdb.name))\n if weights_filename is None:\n weights_filename = 'default'\n outdir = osp.join(outdir, weights_filename)\n if not os.path.exists(outdir):\n os.makedirs(outdir)\n return outdir\n\n\ndef get_output_tb_dir(imdb, weights_filename):\n \"\"\"Return the directory where tensorflow summaries are placed.\n If the directory does not exist, it is created.\n\n A canonical path is built using the name from an imdb and a network\n (if not None).\n \"\"\"\n outdir = osp.abspath(osp.join(__C.ROOT_DIR, 'tensorboard', __C.EXP_DIR, imdb.name))\n if weights_filename is None:\n weights_filename = 'default'\n outdir = osp.join(outdir, weights_filename)\n if not os.path.exists(outdir):\n os.makedirs(outdir)\n return outdir\n\n\ndef _merge_a_into_b(a, b):\n \"\"\"Merge config dictionary a into config dictionary b, clobbering the\n options in b whenever they are also specified in a.\n \"\"\"\n if type(a) is not edict:\n return\n\n for k, v in a.items():\n # a must specify keys that are in b\n if k not in b:\n raise KeyError('{} is not a valid config key'.format(k))\n\n # the types must match, too\n old_type = type(b[k])\n if old_type is not type(v):\n if isinstance(b[k], np.ndarray):\n v = np.array(v, dtype=b[k].dtype)\n else:\n raise ValueError(('Type mismatch ({} vs. {}) '\n 'for config key: {}').format(type(b[k]),\n type(v), k))\n\n # recursively merge dicts\n if type(v) is edict:\n try:\n _merge_a_into_b(a[k], b[k])\n except:\n print(('Error under config key: {}'.format(k)))\n raise\n else:\n b[k] = v\n\n\ndef cfg_from_file(filename):\n \"\"\"Load a config file and merge it into the default options.\"\"\"\n import yaml\n with open(filename, 'r') as f:\n yaml_cfg = edict(yaml.load(f))\n\n _merge_a_into_b(yaml_cfg, __C)\n\n\ndef cfg_from_list(cfg_list):\n \"\"\"Set config keys via list (e.g., from command line).\"\"\"\n from ast import literal_eval\n assert len(cfg_list) % 2 == 0\n for k, v in zip(cfg_list[0::2], cfg_list[1::2]):\n key_list = k.split('.')\n d = __C\n for subkey in key_list[:-1]:\n assert subkey in d\n d = d[subkey]\n subkey = key_list[-1]\n assert subkey in d\n try:\n value = literal_eval(v)\n except:\n # handle the case when v is a string literal\n value = v\n assert type(value) == type(d[subkey]), \\\n 'type {} does not match original type {}'.format(\n type(value), type(d[subkey]))\n d[subkey] = value\n" ]
[ [ "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
limscoder/predator
[ "5999468d417dbab159529b8764866e2581802a01" ]
[ "models/lstm/model.py" ]
[ "\"\"\"trains lstm model on coin data\"\"\"\n\n# adapted from -- https://machinelearningmastery.com/multivariate-time-series-forecasting-lstms-keras/\n\nfrom pandas import DataFrame, concat, read_csv\nfrom sklearn.preprocessing import MinMaxScaler\nfrom keras.models import Sequential\nfrom keras.layers import Dense\nfrom keras.layers import LSTM\nfrom keras import backend\nfrom matplotlib import pyplot\nimport tensorflow as tf\nimport tensorflowjs as tfjs\nimport json\n\ndef timesteps_as_features(data, columns, n_in=1, n_out=1, dropnan=True):\n\tn_vars = 1 if type(data) is list else data.shape[1]\n\tdf = DataFrame(data)\n\tcols, names = list(), list()\n\t# input sequence (t-n, ... t-1)\n\tfor i in range(n_in, 0, -1):\n\t\tcols.append(df.shift(i))\n\t\tnames += [('x:%s(t-%d)' % (columns[j], i)) for j in range(n_vars)]\n\t# forecast sequence (t, t+1, ... t+n)\n\tfor i in range(0, n_out):\n\t\tcols.append(df.shift(-i))\n\t\tnames += [('y:%s(t+%d)' % (columns[j], i)) for j in range(n_vars)]\n\t# put it all together\n\tagg = concat(cols, axis=1)\n\tagg.columns = names\n\t# drop rows with NaN values\n\tif dropnan:\n\t\tagg.dropna(inplace=True)\n\treturn agg\n\ndef load_data(predict='btc_usd', in_steps=1, out_steps=1):\n # parse data\n dataset = read_csv('data/coins.csv', header=0, index_col=0)\n\n # find prediction target\n target_col = -1\n for idx, col in enumerate(dataset.columns):\n if col == predict:\n target_col = idx\n break\n if target_col < 0:\n raise Exception(\"invalid prediction column\")\n\n # validate that all data are floats, labeled columns need to be encoded\n values = dataset.values\n values = values.astype('float32')\n\n # normalize features\n scaler = MinMaxScaler(feature_range=(0, 1))\n scaled = scaler.fit_transform(values)\n\n # generate previous steps as new columns and strip current feature values\n y_col = '{}(t+{})'.format(predict, out_steps - 1)\n def include_feature(x):\n parts = x.split(':')\n if parts[0] == 'y' and not parts[1].startswith(y_col):\n return False\n return True\n\n sequenced = timesteps_as_features(scaled, dataset.columns, in_steps, out_steps)\n drop_cols = [idx for idx, x in enumerate(sequenced.columns) if not include_feature(x)]\n sequenced.drop(sequenced.columns[drop_cols], axis=1, inplace=True)\n return sequenced\n\ndef split_data(data, test_ratio=0.25):\n # split into train and test sets\n values = data.values\n n_train = int(len(values) * (1 - test_ratio))\n train = values[:n_train, :]\n test = values[n_train:, :]\n # split into input and outputs\n train_X, train_y = train[:, :-1], train[:, -1]\n test_X, test_y = test[:, :-1], test[:, -1]\n # reshape input to be 3D [samples, timesteps, features]\n train_X = train_X.reshape((train_X.shape[0], 1, train_X.shape[1]))\n test_X = test_X.reshape((test_X.shape[0], 1, test_X.shape[1]))\n return {\"train_X\": train_X, \"test_X\": test_X, \"train_y\": train_y, \"test_y\": test_y}\n\ndef compile(data, layer_size):\n model = Sequential()\n model.add(LSTM(layer_size, input_shape=(data['train_X'].shape[1], data['train_X'].shape[2]), name=\"predictatronInput\"))\n model.add(Dense(1, activation='sigmoid', name=\"predictatronOutput\"))\n model.compile(loss='mae', optimizer='adam')\n return model\n\ndef plot(history):\n pyplot.plot(history.history['loss'], label='train')\n pyplot.plot(history.history['val_loss'], label='test')\n pyplot.legend()\n pyplot.show()\n\ndef train(target, future, graph=False): \n in_step=25\n out_step=future\n data = load_data(target, in_step, out_step)\n sets = split_data(data)\n\n # train model\n model = compile(sets, 100)\n history = model.fit(\n sets['train_X'],\n sets['train_y'],\n epochs=30,\n batch_size=100,\n validation_data=(sets['test_X'], sets['test_y']),\n verbose=2,\n shuffle=False)\n\n # save model\n model_key = 'model-{}-{}m'.format(target, future)\n model_path = 'frozen/{}'.format(model_key)\n # for use in js\n tfjs.converters.save_keras_model(model, model_path)\n # for use in golang\n tf_path = 'frozen-tf/{}'.format(model_key)\n builder = tf.saved_model.builder.SavedModelBuilder(tf_path)\n builder.add_meta_graph_and_variables(backend.get_session(),[model_key])\n builder.save()\n # model metadata\n model_params = {\n 'target': target,\n 'model_key': model_key,\n 'input_operation': 'predictatronInput_input',\n 'input_steps': in_step,\n 'output_steps': out_step,\n 'output_operation': 'predictatronOutput/Sigmoid',\n 'predict_future_duration': future,\n 'columns': []}\n column_count = int((len(data.columns) - 1) / in_step)\n for idx in range(column_count):\n parts = data.columns[idx].split(':')\n model_params['columns'].append(parts[1].split('(')[0])\n with open('{}/params.json'.format(model_path), 'w') as out:\n json.dump(model_params, out)\n with open('{}/params.json'.format(tf_path), 'w') as out:\n json.dump(model_params, out)\n \n # plot model\n if graph:\n plot(history)\n\nif __name__ == \"__main__\":\n train('btc_usd', 5, False)\n train('btc_usd', 15, False)\n train('btc_usd', 60, False)\n train('eth_usd', 5, False)\n train('eth_usd', 15, False)\n train('eth_usd', 60, False)\n train('bch_usd', 5, False)\n train('bch_usd', 15, False)\n train('bch_usd', 60, False)\n " ]
[ [ "matplotlib.pyplot.legend", "pandas.concat", "pandas.read_csv", "tensorflow.saved_model.builder.SavedModelBuilder", "pandas.DataFrame", "matplotlib.pyplot.plot", "matplotlib.pyplot.show", "sklearn.preprocessing.MinMaxScaler" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [ "1.10" ] } ]
r00tDada/Mini_Project_Semester_7
[ "fd84be13d91c9ffca8288c7787a0330a5aee7950" ]
[ "Resume_Matcher/fileReader.py" ]
[ "from operator import index\nfrom pandas._config.config import options\nimport Cleaner\nimport textract as tx\nimport pandas as pd\nimport numpy\nimport os\nimport tf_idf\n\nuser = os.getcwd()\nprint(user)\n\n\nresume_dir = user+\"/media/Resume/\"\njob_desc_dir = user+\"/media/JobDesc/\"\nresume_names = os.listdir(resume_dir)\njob_description_names = os.listdir(job_desc_dir)\nprint(resume_names)\nprint(job_description_names)\ndocument = []\n\n\ndef read_resumes(list_of_resumes, resume_directory):\n placeholder = []\n for res in list_of_resumes:\n temp = []\n temp.append(res)\n text = tx.process(resume_directory+res, encoding='ascii')\n text = str(text, 'utf-8')\n temp.append(text)\n placeholder.append(temp)\n return placeholder\n\n\ndocument = read_resumes(resume_names, resume_dir)\n\n\ndef get_cleaned_words(document):\n for i in range(len(document)):\n raw = Cleaner.Cleaner(document[i][1])\n document[i].append(\" \".join(raw[0]))\n document[i].append(\" \".join(raw[1]))\n document[i].append(\" \".join(raw[2]))\n sentence = tf_idf.do_tfidf(document[i][3].split(\" \"))\n document[i].append(sentence)\n return document\n\n\nDoc = get_cleaned_words(document)\n\nDatabase = pd.DataFrame(document, columns=[\n \"Name\", \"Context\", \"Cleaned\", \"Selective\", \"Selective_Reduced\", \"TF_Based\"])\n\nDatabase.to_csv(\n user+\"/Resume_Matcher/Resume_Data.csv\", index=False)\n\n# Database.to_json(\"Resume_Data.json\", index=False)\n\n\ndef read_jobdescriptions(job_description_names, job_desc_dir):\n placeholder = []\n for tes in job_description_names:\n temp = []\n temp.append(tes)\n text = tx.process(job_desc_dir+tes, encoding='ascii')\n text = str(text, 'utf-8')\n temp.append(text)\n placeholder.append(temp)\n return placeholder\n\n\njob_document = read_jobdescriptions(job_description_names, job_desc_dir)\n\nJd = get_cleaned_words(job_document)\n\njd_database = pd.DataFrame(Jd, columns=[\n \"Name\", \"Context\", \"Cleaned\", \"Selective\", \"Selective_Reduced\", \"TF_Based\"])\n\njd_database.to_csv(\n user+\"/Resume_Matcher/Job_Data.csv\", index=False)\n" ]
[ [ "pandas.DataFrame" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
bhargavyagnik/TRex-Bot-Google-Chrome-Offline-game-
[ "71581096a35c1935c973aa9cfda480c0b057d50b" ]
[ "temp.py" ]
[ "import pyautogui as pg\r\nfrom PIL import ImageGrab\r\nimport cv2\r\nimport numpy as np\r\nimport time\r\n\r\n\r\ndef up(sleep_time):\r\n time.sleep(sleep_time)\r\n pg.press(\"space\")\r\n time.sleep(0.1)\r\n pg.press(\"down\")\r\n\r\ndef boot():\r\n template=cv2.imread('cactus.jpg',0)\r\n template2 = cv2.imread('cactus2.jpg', 0)\r\n template3 = cv2.imread('cactus3.jpg', 0)\r\n template4 = cv2.imread('cactus4.jpg', 0)\r\n template5 = cv2.imread('cactus5.jpg', 0)\r\n template6 = cv2.imread('bird1.jpg',0)\r\n flag=0\r\n sleep_time=0.150\r\n #site=(140,350,810,500) #for trek site\r\n site=(140,155,713,282) # for google chrome\r\n while(True):\r\n printscreen_pil=ImageGrab.grab(bbox=site)\r\n im=cv2.cvtColor(np.array(printscreen_pil),cv2.COLOR_BGR2RGB)\r\n #temp_im=im[70:150,175:260]\r\n im_gray=cv2.cvtColor(im,cv2.COLOR_BGR2GRAY)\r\n res1=cv2.matchTemplate(im_gray,template,cv2.TM_CCOEFF_NORMED)\r\n res5 = cv2.matchTemplate(im_gray, template5, cv2.TM_CCOEFF_NORMED)\r\n res4 = cv2.matchTemplate(im_gray, template4, cv2.TM_CCOEFF_NORMED)\r\n res3 = cv2.matchTemplate(im_gray, template3, cv2.TM_CCOEFF_NORMED)\r\n res2 = cv2.matchTemplate(im_gray, template2, cv2.TM_CCOEFF_NORMED)\r\n res6 = cv2.matchTemplate(im_gray, template6, cv2.TM_CCOEFF_NORMED)\r\n threshold=0.75\r\n loc1=np.where(res1>=threshold)\r\n loc2=np.where(res2>=threshold)\r\n loc3 = np.where(res3 >= threshold)\r\n loc4 = np.where(res4 >= threshold)\r\n loc5 = np.where(res5 >= threshold)\r\n loc6 = np.where(res6 >= threshold)\r\n for pt in zip(*loc1[::-1]):\r\n flag=1\r\n cv2.rectangle(im,pt,(pt[0]+30,pt[1]+30),(0,255,255),2)\r\n for pt in zip(*loc2[::-1]):\r\n flag=1\r\n cv2.rectangle(im,pt,(pt[0]+30,pt[1]+30),(0,255,255),2)\r\n for pt in zip(*loc3[::-1]):\r\n flag=1\r\n cv2.rectangle(im,pt,(pt[0]+30,pt[1]+30),(0,255,255),2)\r\n for pt in zip(*loc4[::-1]):\r\n flag=1\r\n cv2.rectangle(im,pt,(pt[0]+30,pt[1]+30),(0,255,255),2)\r\n for pt in zip(*loc5[::-1]):\r\n flag=1\r\n cv2.rectangle(im,pt,(pt[0]+30,pt[1]+30),(0,255,255),2)\r\n for pt in zip(*loc6[::-1]):\r\n flag=1\r\n cv2.rectangle(im,pt,(pt[0]+30,pt[1]+30),(0,255,255),2)\r\n cv2.imshow('window',im)\r\n if cv2.waitKey(25) & 0xFF == ord('q'):\r\n cv2.destroyAllWindows()\r\n break\r\n\r\nif __name__==\"__main__\":\r\n boot()\r\n\r\n" ]
[ [ "numpy.array", "numpy.where" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
himkt/pymc3
[ "fde52a4a69be1b0887a2f7861801fb48c941bbe6" ]
[ "pymc3/tests/test_gp.py" ]
[ "# pylint:disable=unused-variable\nfrom functools import reduce\nfrom ..math import cartesian, kronecker\nfrom operator import add\nimport pymc3 as pm\nimport theano\nimport theano.tensor as tt\nimport numpy as np\nimport numpy.testing as npt\nimport pytest\n\nnp.random.seed(101)\n\n\nclass TestZeroMean(object):\n def test_value(self):\n X = np.linspace(0, 1, 10)[:, None]\n with pm.Model() as model:\n zero_mean = pm.gp.mean.Zero()\n M = theano.function([], zero_mean(X))()\n assert np.all(M==0)\n assert M.shape == (10, )\n\n\nclass TestConstantMean(object):\n def test_value(self):\n X = np.linspace(0, 1, 10)[:, None]\n with pm.Model() as model:\n const_mean = pm.gp.mean.Constant(6)\n M = theano.function([], const_mean(X))()\n assert np.all(M==6)\n assert M.shape == (10, )\n\n\nclass TestLinearMean(object):\n def test_value(self):\n X = np.linspace(0, 1, 10)[:, None]\n with pm.Model() as model:\n linear_mean = pm.gp.mean.Linear(2, 0.5)\n M = theano.function([], linear_mean(X))()\n npt.assert_allclose(M[1], 0.7222, atol=1e-3)\n assert M.shape == (10, )\n\n\nclass TestAddProdMean(object):\n def test_add(self):\n X = np.linspace(0, 1, 10)[:, None]\n with pm.Model() as model:\n mean1 = pm.gp.mean.Linear(coeffs=2, intercept=0.5)\n mean2 = pm.gp.mean.Constant(2)\n mean = mean1 + mean2 + mean2\n M = theano.function([], mean(X))()\n npt.assert_allclose(M[1], 0.7222 + 2 + 2, atol=1e-3)\n\n def test_prod(self):\n X = np.linspace(0, 1, 10)[:, None]\n with pm.Model() as model:\n mean1 = pm.gp.mean.Linear(coeffs=2, intercept=0.5)\n mean2 = pm.gp.mean.Constant(2)\n mean = mean1 * mean2 * mean2\n M = theano.function([], mean(X))()\n npt.assert_allclose(M[1], 0.7222 * 2 * 2, atol=1e-3)\n\n def test_add_multid(self):\n X = np.linspace(0, 1, 30).reshape(10, 3)\n A = np.array([1, 2, 3])\n b = 10\n with pm.Model() as model:\n mean1 = pm.gp.mean.Linear(coeffs=A, intercept=b)\n mean2 = pm.gp.mean.Constant(2)\n mean = mean1 + mean2 + mean2\n M = theano.function([], mean(X))()\n npt.assert_allclose(M[1], 10.8965 + 2 + 2, atol=1e-3)\n\n def test_prod_multid(self):\n X = np.linspace(0, 1, 30).reshape(10, 3)\n A = np.array([1, 2, 3])\n b = 10\n with pm.Model() as model:\n mean1 = pm.gp.mean.Linear(coeffs=A, intercept=b)\n mean2 = pm.gp.mean.Constant(2)\n mean = mean1 * mean2 * mean2\n M = theano.function([], mean(X))()\n npt.assert_allclose(M[1], 10.8965 * 2 * 2, atol=1e-3)\n\n\nclass TestCovAdd(object):\n def test_symadd_cov(self):\n X = np.linspace(0, 1, 10)[:, None]\n with pm.Model() as model:\n cov1 = pm.gp.cov.ExpQuad(1, 0.1)\n cov2 = pm.gp.cov.ExpQuad(1, 0.1)\n cov = cov1 + cov2\n K = theano.function([], cov(X))()\n npt.assert_allclose(K[0, 1], 2 * 0.53940, atol=1e-3)\n # check diagonal\n Kd = theano.function([], cov(X, diag=True))()\n npt.assert_allclose(np.diag(K), Kd, atol=1e-5)\n\n def test_rightadd_scalar(self):\n X = np.linspace(0, 1, 10)[:, None]\n with pm.Model() as model:\n a = 1\n cov = pm.gp.cov.ExpQuad(1, 0.1) + a\n K = theano.function([], cov(X))()\n npt.assert_allclose(K[0, 1], 1.53940, atol=1e-3)\n # check diagonal\n Kd = theano.function([], cov(X, diag=True))()\n npt.assert_allclose(np.diag(K), Kd, atol=1e-5)\n\n def test_leftadd_scalar(self):\n X = np.linspace(0, 1, 10)[:, None]\n with pm.Model() as model:\n a = 1\n cov = a + pm.gp.cov.ExpQuad(1, 0.1)\n K = theano.function([], cov(X))()\n npt.assert_allclose(K[0, 1], 1.53940, atol=1e-3)\n # check diagonal\n Kd = theano.function([], cov(X, diag=True))()\n npt.assert_allclose(np.diag(K), Kd, atol=1e-5)\n\n def test_rightadd_matrix(self):\n X = np.linspace(0, 1, 10)[:, None]\n M = 2 * np.ones((10, 10))\n with pm.Model() as model:\n cov = pm.gp.cov.ExpQuad(1, 0.1) + M\n K = theano.function([], cov(X))()\n npt.assert_allclose(K[0, 1], 2.53940, atol=1e-3)\n # check diagonal\n Kd = theano.function([], cov(X, diag=True))()\n npt.assert_allclose(np.diag(K), Kd, atol=1e-5)\n\n def test_leftadd_matrixt(self):\n X = np.linspace(0, 1, 10)[:, None]\n M = 2 * tt.ones((10, 10))\n with pm.Model() as model:\n cov = M + pm.gp.cov.ExpQuad(1, 0.1)\n K = theano.function([], cov(X))()\n npt.assert_allclose(K[0, 1], 2.53940, atol=1e-3)\n # check diagonal\n Kd = theano.function([], cov(X, diag=True))()\n npt.assert_allclose(np.diag(K), Kd, atol=1e-5)\n\n def test_leftprod_matrix(self):\n X = np.linspace(0, 1, 3)[:, None]\n M = np.array([[1, 2, 3], [2, 1, 2], [3, 2, 1]])\n with pm.Model() as model:\n cov = M + pm.gp.cov.ExpQuad(1, 0.1)\n cov_true = pm.gp.cov.ExpQuad(1, 0.1) + M\n K = theano.function([], cov(X))()\n K_true = theano.function([], cov_true(X))()\n assert np.allclose(K, K_true)\n\n\nclass TestCovProd(object):\n def test_symprod_cov(self):\n X = np.linspace(0, 1, 10)[:, None]\n with pm.Model() as model:\n cov1 = pm.gp.cov.ExpQuad(1, 0.1)\n cov2 = pm.gp.cov.ExpQuad(1, 0.1)\n cov = cov1 * cov2\n K = theano.function([], cov(X))()\n npt.assert_allclose(K[0, 1], 0.53940 * 0.53940, atol=1e-3)\n # check diagonal\n Kd = theano.function([], cov(X, diag=True))()\n npt.assert_allclose(np.diag(K), Kd, atol=1e-5)\n\n def test_rightprod_scalar(self):\n X = np.linspace(0, 1, 10)[:, None]\n with pm.Model() as model:\n a = 2\n cov = pm.gp.cov.ExpQuad(1, 0.1) * a\n K = theano.function([], cov(X))()\n npt.assert_allclose(K[0, 1], 2 * 0.53940, atol=1e-3)\n # check diagonal\n Kd = theano.function([], cov(X, diag=True))()\n npt.assert_allclose(np.diag(K), Kd, atol=1e-5)\n\n def test_leftprod_scalar(self):\n X = np.linspace(0, 1, 10)[:, None]\n with pm.Model() as model:\n a = 2\n cov = a * pm.gp.cov.ExpQuad(1, 0.1)\n K = theano.function([], cov(X))()\n npt.assert_allclose(K[0, 1], 2 * 0.53940, atol=1e-3)\n # check diagonal\n Kd = theano.function([], cov(X, diag=True))()\n npt.assert_allclose(np.diag(K), Kd, atol=1e-5)\n\n def test_rightprod_matrix(self):\n X = np.linspace(0, 1, 10)[:, None]\n M = 2 * np.ones((10, 10))\n with pm.Model() as model:\n cov = pm.gp.cov.ExpQuad(1, 0.1) * M\n K = theano.function([], cov(X))()\n npt.assert_allclose(K[0, 1], 2 * 0.53940, atol=1e-3)\n # check diagonal\n Kd = theano.function([], cov(X, diag=True))()\n npt.assert_allclose(np.diag(K), Kd, atol=1e-5)\n\n def test_leftprod_matrix(self):\n X = np.linspace(0, 1, 3)[:, None]\n M = np.array([[1, 2, 3], [2, 1, 2], [3, 2, 1]])\n with pm.Model() as model:\n cov = M * pm.gp.cov.ExpQuad(1, 0.1)\n cov_true = pm.gp.cov.ExpQuad(1, 0.1) * M\n K = theano.function([], cov(X))()\n K_true = theano.function([], cov_true(X))()\n assert np.allclose(K, K_true)\n\n def test_multiops(self):\n X = np.linspace(0, 1, 3)[:, None]\n M = np.array([[1, 2, 3], [2, 1, 2], [3, 2, 1]])\n with pm.Model() as model:\n cov1 = 3 + pm.gp.cov.ExpQuad(1, 0.1) + M * pm.gp.cov.ExpQuad(1, 0.1) * M * pm.gp.cov.ExpQuad(1, 0.1)\n cov2 = pm.gp.cov.ExpQuad(1, 0.1) * M * pm.gp.cov.ExpQuad(1, 0.1) * M + pm.gp.cov.ExpQuad(1, 0.1) + 3\n K1 = theano.function([], cov1(X))()\n K2 = theano.function([], cov2(X))()\n assert np.allclose(K1, K2)\n # check diagonal\n K1d = theano.function([], cov1(X, diag=True))()\n K2d = theano.function([], cov2(X, diag=True))()\n npt.assert_allclose(np.diag(K1), K2d, atol=1e-5)\n npt.assert_allclose(np.diag(K2), K1d, atol=1e-5)\n\n\nclass TestCovKron(object):\n def test_symprod_cov(self):\n X1 = np.linspace(0, 1, 10)[:, None]\n X2 = np.linspace(0, 1, 10)[:, None]\n X = cartesian(X1, X2)\n with pm.Model() as model:\n cov1 = pm.gp.cov.ExpQuad(1, 0.1)\n cov2 = pm.gp.cov.ExpQuad(1, 0.1)\n cov = pm.gp.cov.Kron([cov1, cov2])\n K = theano.function([], cov(X))()\n npt.assert_allclose(K[0, 1], 1 * 0.53940, atol=1e-3)\n npt.assert_allclose(K[0, 11], 0.53940 * 0.53940, atol=1e-3)\n # check diagonal\n Kd = theano.function([], cov(X, diag=True))()\n npt.assert_allclose(np.diag(K), Kd, atol=1e-5)\n\n def test_multiops(self):\n X1 = np.linspace(0, 1, 3)[:, None]\n X21 = np.linspace(0, 1, 5)[:, None]\n X22 = np.linspace(0, 1, 4)[:, None]\n X2 = cartesian(X21, X22)\n X = cartesian(X1, X21, X22)\n with pm.Model() as model:\n cov1 = 3 + pm.gp.cov.ExpQuad(1, 0.1) + pm.gp.cov.ExpQuad(1, 0.1) * pm.gp.cov.ExpQuad(1, 0.1)\n cov2 = pm.gp.cov.ExpQuad(1, 0.1) * pm.gp.cov.ExpQuad(2, 0.1)\n cov = pm.gp.cov.Kron([cov1, cov2])\n K_true = kronecker(theano.function([], cov1(X1))(), theano.function([], cov2(X2))()).eval()\n K = theano.function([], cov(X))()\n npt.assert_allclose(K_true, K)\n\n\nclass TestCovSliceDim(object):\n def test_slice1(self):\n X = np.linspace(0, 1, 30).reshape(10, 3)\n with pm.Model() as model:\n cov = pm.gp.cov.ExpQuad(3, 0.1, active_dims=[0, 0, 1])\n K = theano.function([], cov(X))()\n npt.assert_allclose(K[0, 1], 0.20084298, atol=1e-3)\n # check diagonal\n Kd = theano.function([], cov(X, diag=True))()\n npt.assert_allclose(np.diag(K), Kd, atol=1e-5)\n\n def test_slice2(self):\n X = np.linspace(0, 1, 30).reshape(10, 3)\n with pm.Model() as model:\n cov = pm.gp.cov.ExpQuad(3, ls=[0.1, 0.1], active_dims=[1,2])\n K = theano.function([], cov(X))()\n npt.assert_allclose(K[0, 1], 0.34295549, atol=1e-3)\n # check diagonal\n Kd = theano.function([], cov(X, diag=True))()\n npt.assert_allclose(np.diag(K), Kd, atol=1e-5)\n\n def test_slice3(self):\n X = np.linspace(0, 1, 30).reshape(10, 3)\n with pm.Model() as model:\n cov = pm.gp.cov.ExpQuad(3, ls=np.array([0.1, 0.1]), active_dims=[1,2])\n K = theano.function([], cov(X))()\n npt.assert_allclose(K[0, 1], 0.34295549, atol=1e-3)\n # check diagonal\n Kd = theano.function([], cov(X, diag=True))()\n npt.assert_allclose(np.diag(K), Kd, atol=1e-5)\n\n def test_diffslice(self):\n X = np.linspace(0, 1, 30).reshape(10, 3)\n with pm.Model() as model:\n cov = pm.gp.cov.ExpQuad(3, ls=0.1, active_dims=[1, 0, 0]) + pm.gp.cov.ExpQuad(3, ls=[0.1, 0.2, 0.3])\n K = theano.function([], cov(X))()\n npt.assert_allclose(K[0, 1], 0.683572, atol=1e-3)\n # check diagonal\n Kd = theano.function([], cov(X, diag=True))()\n npt.assert_allclose(np.diag(K), Kd, atol=1e-5)\n\n def test_raises(self):\n lengthscales = 2.0\n with pytest.raises(ValueError):\n pm.gp.cov.ExpQuad(1, lengthscales, [True, False])\n pm.gp.cov.ExpQuad(2, lengthscales, [True])\n\n\nclass TestStability(object):\n def test_stable(self):\n X = np.random.uniform(low=320., high=400., size=[2000, 2])\n with pm.Model() as model:\n cov = pm.gp.cov.ExpQuad(2, 0.1)\n dists = theano.function([], cov.square_dist(X, X))()\n assert not np.any(dists < 0)\n\n\nclass TestExpQuad(object):\n def test_1d(self):\n X = np.linspace(0, 1, 10)[:, None]\n with pm.Model() as model:\n cov = pm.gp.cov.ExpQuad(1, 0.1)\n K = theano.function([], cov(X))()\n npt.assert_allclose(K[0, 1], 0.53940, atol=1e-3)\n K = theano.function([], cov(X, X))()\n npt.assert_allclose(K[0, 1], 0.53940, atol=1e-3)\n # check diagonal\n Kd = theano.function([], cov(X, diag=True))()\n npt.assert_allclose(np.diag(K), Kd, atol=1e-5)\n\n def test_2d(self):\n X = np.linspace(0, 1, 10).reshape(5, 2)\n with pm.Model() as model:\n cov = pm.gp.cov.ExpQuad(2, 0.5)\n K = theano.function([], cov(X))()\n npt.assert_allclose(K[0, 1], 0.820754, atol=1e-3)\n # check diagonal\n Kd = theano.function([], cov(X, diag=True))()\n npt.assert_allclose(np.diag(K), Kd, atol=1e-5)\n\n def test_2dard(self):\n X = np.linspace(0, 1, 10).reshape(5, 2)\n with pm.Model() as model:\n cov = pm.gp.cov.ExpQuad(2, np.array([1, 2]))\n K = theano.function([], cov(X))()\n npt.assert_allclose(K[0, 1], 0.969607, atol=1e-3)\n # check diagonal\n Kd = theano.function([], cov(X, diag=True))()\n npt.assert_allclose(np.diag(K), Kd, atol=1e-5)\n\n def test_inv_lengthscale(self):\n X = np.linspace(0, 1, 10)[:, None]\n with pm.Model() as model:\n cov = pm.gp.cov.ExpQuad(1, ls_inv=10)\n K = theano.function([], cov(X))()\n npt.assert_allclose(K[0, 1], 0.53940, atol=1e-3)\n K = theano.function([], cov(X, X))()\n npt.assert_allclose(K[0, 1], 0.53940, atol=1e-3)\n # check diagonal\n Kd = theano.function([], cov(X, diag=True))()\n npt.assert_allclose(np.diag(K), Kd, atol=1e-5)\n\n\nclass TestWhiteNoise(object):\n def test_1d(self):\n X = np.linspace(0, 1, 10)[:, None]\n with pm.Model() as model:\n cov = pm.gp.cov.WhiteNoise(sigma=0.5)\n K = theano.function([], cov(X))()\n npt.assert_allclose(K[0, 1], 0.0, atol=1e-3)\n npt.assert_allclose(K[0, 0], 0.5**2, atol=1e-3)\n # check diagonal\n Kd = theano.function([], cov(X, diag=True))()\n npt.assert_allclose(np.diag(K), Kd, atol=1e-5)\n # check predict\n K = theano.function([], cov(X, X))()\n npt.assert_allclose(K[0, 1], 0.0, atol=1e-3)\n # white noise predicting should return all zeros\n npt.assert_allclose(K[0, 0], 0.0, atol=1e-3)\n\n\nclass TestConstant(object):\n def test_1d(self):\n X = np.linspace(0, 1, 10)[:, None]\n with pm.Model() as model:\n cov = pm.gp.cov.Constant(2.5)\n K = theano.function([], cov(X))()\n npt.assert_allclose(K[0, 1], 2.5, atol=1e-3)\n npt.assert_allclose(K[0, 0], 2.5, atol=1e-3)\n K = theano.function([], cov(X, X))()\n npt.assert_allclose(K[0, 1], 2.5, atol=1e-3)\n npt.assert_allclose(K[0, 0], 2.5, atol=1e-3)\n # check diagonal\n Kd = theano.function([], cov(X, diag=True))()\n npt.assert_allclose(np.diag(K), Kd, atol=1e-5)\n\n\nclass TestRatQuad(object):\n def test_1d(self):\n X = np.linspace(0, 1, 10)[:, None]\n with pm.Model() as model:\n cov = pm.gp.cov.RatQuad(1, ls=0.1, alpha=0.5)\n K = theano.function([], cov(X))()\n npt.assert_allclose(K[0, 1], 0.66896, atol=1e-3)\n K = theano.function([], cov(X, X))()\n npt.assert_allclose(K[0, 1], 0.66896, atol=1e-3)\n # check diagonal\n Kd = theano.function([], cov(X, diag=True))()\n npt.assert_allclose(np.diag(K), Kd, atol=1e-5)\n\n\nclass TestExponential(object):\n def test_1d(self):\n X = np.linspace(0, 1, 10)[:, None]\n with pm.Model() as model:\n cov = pm.gp.cov.Exponential(1, 0.1)\n K = theano.function([], cov(X))()\n npt.assert_allclose(K[0, 1], 0.57375, atol=1e-3)\n K = theano.function([], cov(X, X))()\n npt.assert_allclose(K[0, 1], 0.57375, atol=1e-3)\n # check diagonal\n Kd = theano.function([], cov(X, diag=True))()\n npt.assert_allclose(np.diag(K), Kd, atol=1e-5)\n\n\nclass TestMatern52(object):\n def test_1d(self):\n X = np.linspace(0, 1, 10)[:, None]\n with pm.Model() as model:\n cov = pm.gp.cov.Matern52(1, 0.1)\n K = theano.function([], cov(X))()\n npt.assert_allclose(K[0, 1], 0.46202, atol=1e-3)\n K = theano.function([], cov(X, X))()\n npt.assert_allclose(K[0, 1], 0.46202, atol=1e-3)\n # check diagonal\n Kd = theano.function([], cov(X, diag=True))()\n npt.assert_allclose(np.diag(K), Kd, atol=1e-5)\n\n\nclass TestMatern32(object):\n def test_1d(self):\n X = np.linspace(0, 1, 10)[:, None]\n with pm.Model() as model:\n cov = pm.gp.cov.Matern32(1, 0.1)\n K = theano.function([], cov(X))()\n npt.assert_allclose(K[0, 1], 0.42682, atol=1e-3)\n K = theano.function([], cov(X, X))()\n npt.assert_allclose(K[0, 1], 0.42682, atol=1e-3)\n # check diagonal\n Kd = theano.function([], cov(X, diag=True))()\n npt.assert_allclose(np.diag(K), Kd, atol=1e-5)\n\n\nclass TestCosine(object):\n def test_1d(self):\n X = np.linspace(0, 1, 10)[:, None]\n with pm.Model() as model:\n cov = pm.gp.cov.Cosine(1, 0.1)\n K = theano.function([], cov(X))()\n npt.assert_allclose(K[0, 1], 0.766, atol=1e-3)\n K = theano.function([], cov(X, X))()\n npt.assert_allclose(K[0, 1], 0.766, atol=1e-3)\n # check diagonal\n Kd = theano.function([], cov(X, diag=True))()\n npt.assert_allclose(np.diag(K), Kd, atol=1e-5)\n\n\nclass TestPeriodic(object):\n def test_1d(self):\n X = np.linspace(0, 1, 10)[:, None]\n with pm.Model() as model:\n cov = pm.gp.cov.Periodic(1, 0.1, 0.1)\n K = theano.function([], cov(X))()\n npt.assert_allclose(K[0, 1], 0.00288, atol=1e-3)\n K = theano.function([], cov(X, X))()\n npt.assert_allclose(K[0, 1], 0.00288, atol=1e-3)\n # check diagonal\n Kd = theano.function([], cov(X, diag=True))()\n npt.assert_allclose(np.diag(K), Kd, atol=1e-5)\n\n\nclass TestLinear(object):\n def test_1d(self):\n X = np.linspace(0, 1, 10)[:, None]\n with pm.Model() as model:\n cov = pm.gp.cov.Linear(1, 0.5)\n K = theano.function([], cov(X))()\n npt.assert_allclose(K[0, 1], 0.19444, atol=1e-3)\n K = theano.function([], cov(X, X))()\n npt.assert_allclose(K[0, 1], 0.19444, atol=1e-3)\n # check diagonal\n Kd = theano.function([], cov(X, diag=True))()\n npt.assert_allclose(np.diag(K), Kd, atol=1e-5)\n\n\nclass TestPolynomial(object):\n def test_1d(self):\n X = np.linspace(0, 1, 10)[:, None]\n with pm.Model() as model:\n cov = pm.gp.cov.Polynomial(1, 0.5, 2, 0)\n K = theano.function([], cov(X))()\n npt.assert_allclose(K[0, 1], 0.03780, atol=1e-3)\n K = theano.function([], cov(X, X))()\n npt.assert_allclose(K[0, 1], 0.03780, atol=1e-3)\n # check diagonal\n Kd = theano.function([], cov(X, diag=True))()\n npt.assert_allclose(np.diag(K), Kd, atol=1e-5)\n\n\nclass TestWarpedInput(object):\n def test_1d(self):\n X = np.linspace(0, 1, 10)[:, None]\n def warp_func(x, a, b, c):\n return x + (a * tt.tanh(b * (x - c)))\n with pm.Model() as model:\n cov_m52 = pm.gp.cov.Matern52(1, 0.2)\n cov = pm.gp.cov.WarpedInput(1, warp_func=warp_func, args=(1, 10, 1), cov_func=cov_m52)\n K = theano.function([], cov(X))()\n npt.assert_allclose(K[0, 1], 0.79593, atol=1e-3)\n K = theano.function([], cov(X, X))()\n npt.assert_allclose(K[0, 1], 0.79593, atol=1e-3)\n # check diagonal\n Kd = theano.function([], cov(X, diag=True))()\n npt.assert_allclose(np.diag(K), Kd, atol=1e-5)\n\n def test_raises(self):\n cov_m52 = pm.gp.cov.Matern52(1, 0.2)\n with pytest.raises(TypeError):\n pm.gp.cov.WarpedInput(1, cov_m52, \"str is not callable\")\n with pytest.raises(TypeError):\n pm.gp.cov.WarpedInput(1, \"str is not Covariance object\", lambda x: x)\n\n\nclass TestGibbs(object):\n def test_1d(self):\n X = np.linspace(0, 2, 10)[:, None]\n def tanh_func(x, x1, x2, w, x0):\n return (x1 + x2) / 2.0 - (x1 - x2) / 2.0 * tt.tanh((x - x0) / w)\n with pm.Model() as model:\n cov = pm.gp.cov.Gibbs(1, tanh_func, args=(0.05, 0.6, 0.4, 1.0))\n K = theano.function([], cov(X))()\n npt.assert_allclose(K[2, 3], 0.136683, atol=1e-4)\n K = theano.function([], cov(X, X))()\n npt.assert_allclose(K[2, 3], 0.136683, atol=1e-4)\n # check diagonal\n Kd = theano.function([], cov(X, diag=True))()\n npt.assert_allclose(np.diag(K), Kd, atol=1e-5)\n\n def test_raises(self):\n with pytest.raises(TypeError):\n pm.gp.cov.Gibbs(1, \"str is not callable\")\n with pytest.raises(NotImplementedError):\n pm.gp.cov.Gibbs(2, lambda x: x)\n with pytest.raises(NotImplementedError):\n pm.gp.cov.Gibbs(3, lambda x: x, active_dims=[0,1])\n\n\nclass TestHandleArgs(object):\n def test_handleargs(self):\n def func_noargs(x):\n return x\n def func_onearg(x, a):\n return x + a\n def func_twoarg(x, a, b):\n return x + a + b\n x = 100\n a = 2\n b = 3\n func_noargs2 = pm.gp.cov.handle_args(func_noargs, None)\n func_onearg2 = pm.gp.cov.handle_args(func_onearg, a)\n func_twoarg2 = pm.gp.cov.handle_args(func_twoarg, args=(a, b))\n assert func_noargs(x) == func_noargs2(x, args=None)\n assert func_onearg(x, a) == func_onearg2(x, args=a)\n assert func_twoarg(x, a, b) == func_twoarg2(x, args=(a, b))\n\n\nclass TestCoregion(object):\n def setup_method(self):\n self.nrows = 6\n self.ncols = 3\n self.W = np.random.rand(self.nrows, self.ncols)\n self.kappa = np.random.rand(self.nrows)\n self.B = np.dot(self.W, self.W.T) + np.diag(self.kappa)\n self.rand_rows = np.random.randint(0, self.nrows, size=(20, 1))\n self.rand_cols = np.random.randint(0, self.ncols, size=(10, 1))\n self.X = np.concatenate((self.rand_rows, np.random.rand(20, 1)), axis=1)\n self.Xs = np.concatenate((self.rand_cols, np.random.rand(10, 1)), axis=1)\n\n def test_full(self):\n B_mat = self.B[self.rand_rows, self.rand_rows.T]\n with pm.Model() as model:\n B = pm.gp.cov.Coregion(2, W=self.W, kappa=self.kappa, active_dims=[0])\n npt.assert_allclose(\n B(np.array([[2, 1.5], [3, -42]])).eval(),\n self.B[2:4, 2:4]\n )\n npt.assert_allclose(B(self.X).eval(), B_mat)\n\n def test_fullB(self):\n B_mat = self.B[self.rand_rows, self.rand_rows.T]\n with pm.Model() as model:\n B = pm.gp.cov.Coregion(1, B=self.B)\n npt.assert_allclose(\n B(np.array([[2], [3]])).eval(),\n self.B[2:4, 2:4]\n )\n npt.assert_allclose(B(self.X).eval(), B_mat)\n\n def test_Xs(self):\n B_mat = self.B[self.rand_rows, self.rand_cols.T]\n with pm.Model() as model:\n B = pm.gp.cov.Coregion(2, W=self.W, kappa=self.kappa, active_dims=[0])\n npt.assert_allclose(\n B(np.array([[2, 1.5]]), np.array([[3, -42]])).eval(),\n self.B[2, 3]\n )\n npt.assert_allclose(B(self.X, self.Xs).eval(), B_mat)\n\n def test_diag(self):\n B_diag = np.diag(self.B)[self.rand_rows.ravel()]\n with pm.Model() as model:\n B = pm.gp.cov.Coregion(2, W=self.W, kappa=self.kappa, active_dims=[0])\n npt.assert_allclose(\n B(np.array([[2, 1.5]]), diag=True).eval(),\n np.diag(self.B)[2]\n )\n npt.assert_allclose(B(self.X, diag=True).eval(), B_diag)\n\n def test_raises(self):\n with pm.Model() as model:\n with pytest.raises(ValueError):\n B = pm.gp.cov.Coregion(2, W=self.W, kappa=self.kappa)\n\n def test_raises2(self):\n with pm.Model() as model:\n with pytest.raises(ValueError):\n B = pm.gp.cov.Coregion(1, W=self.W, kappa=self.kappa, B=self.B)\n\n def test_raises3(self):\n with pm.Model() as model:\n with pytest.raises(ValueError):\n B = pm.gp.cov.Coregion(1)\n\n\nclass TestMarginalVsLatent(object):\n R\"\"\"\n Compare the logp of models Marginal, noise=0 and Latent.\n \"\"\"\n def setup_method(self):\n X = np.random.randn(50,3)\n y = np.random.randn(50)*0.01\n Xnew = np.random.randn(60, 3)\n pnew = np.random.randn(60)*0.01\n with pm.Model() as model:\n cov_func = pm.gp.cov.ExpQuad(3, [0.1, 0.2, 0.3])\n mean_func = pm.gp.mean.Constant(0.5)\n gp = pm.gp.Marginal(mean_func, cov_func)\n f = gp.marginal_likelihood(\"f\", X, y, noise=0.0, is_observed=False, observed=y)\n p = gp.conditional(\"p\", Xnew)\n self.logp = model.logp({\"p\": pnew})\n self.X = X\n self.Xnew = Xnew\n self.y = y\n self.pnew = pnew\n\n def testLatent1(self):\n with pm.Model() as model:\n cov_func = pm.gp.cov.ExpQuad(3, [0.1, 0.2, 0.3])\n mean_func = pm.gp.mean.Constant(0.5)\n gp = pm.gp.Latent(mean_func, cov_func)\n f = gp.prior(\"f\", self.X, reparameterize=False)\n p = gp.conditional(\"p\", self.Xnew)\n latent_logp = model.logp({\"f\": self.y, \"p\": self.pnew})\n npt.assert_allclose(latent_logp, self.logp, atol=0, rtol=1e-2)\n\n def testLatent2(self):\n with pm.Model() as model:\n cov_func = pm.gp.cov.ExpQuad(3, [0.1, 0.2, 0.3])\n mean_func = pm.gp.mean.Constant(0.5)\n gp = pm.gp.Latent(mean_func, cov_func)\n f = gp.prior(\"f\", self.X, reparameterize=True)\n p = gp.conditional(\"p\", self.Xnew)\n chol = np.linalg.cholesky(cov_func(self.X).eval())\n y_rotated = np.linalg.solve(chol, self.y - 0.5)\n latent_logp = model.logp({\"f_rotated_\": y_rotated, \"p\": self.pnew})\n npt.assert_allclose(latent_logp, self.logp, atol=5)\n\n\nclass TestMarginalVsMarginalSparse(object):\n R\"\"\"\n Compare logp of models Marginal and MarginalSparse.\n Should be nearly equal when inducing points are same as inputs.\n \"\"\"\n def setup_method(self):\n X = np.random.randn(50,3)\n y = np.random.randn(50)*0.01\n Xnew = np.random.randn(60, 3)\n pnew = np.random.randn(60)*0.01\n with pm.Model() as model:\n cov_func = pm.gp.cov.ExpQuad(3, [0.1, 0.2, 0.3])\n mean_func = pm.gp.mean.Constant(0.5)\n gp = pm.gp.Marginal(mean_func, cov_func)\n sigma = 0.1\n f = gp.marginal_likelihood(\"f\", X, y, noise=sigma)\n p = gp.conditional(\"p\", Xnew)\n self.logp = model.logp({\"p\": pnew})\n self.X = X\n self.Xnew = Xnew\n self.y = y\n self.sigma = sigma\n self.pnew = pnew\n self.gp = gp\n\n @pytest.mark.parametrize('approx', ['FITC', 'VFE', 'DTC'])\n def testApproximations(self, approx):\n with pm.Model() as model:\n cov_func = pm.gp.cov.ExpQuad(3, [0.1, 0.2, 0.3])\n mean_func = pm.gp.mean.Constant(0.5)\n gp = pm.gp.MarginalSparse(mean_func, cov_func, approx=approx)\n f = gp.marginal_likelihood(\"f\", self.X, self.X, self.y, self.sigma)\n p = gp.conditional(\"p\", self.Xnew)\n approx_logp = model.logp({\"f\": self.y, \"p\": self.pnew})\n npt.assert_allclose(approx_logp, self.logp, atol=0, rtol=1e-2)\n\n @pytest.mark.parametrize('approx', ['FITC', 'VFE', 'DTC'])\n def testPredictVar(self, approx):\n with pm.Model() as model:\n cov_func = pm.gp.cov.ExpQuad(3, [0.1, 0.2, 0.3])\n mean_func = pm.gp.mean.Constant(0.5)\n gp = pm.gp.MarginalSparse(mean_func, cov_func, approx=approx)\n f = gp.marginal_likelihood(\"f\", self.X, self.X, self.y, self.sigma)\n mu1, var1 = self.gp.predict(self.Xnew, diag=True)\n mu2, var2 = gp.predict(self.Xnew, diag=True)\n npt.assert_allclose(mu1, mu2, atol=0, rtol=1e-3)\n npt.assert_allclose(var1, var2, atol=0, rtol=1e-3)\n\n def testPredictCov(self):\n with pm.Model() as model:\n cov_func = pm.gp.cov.ExpQuad(3, [0.1, 0.2, 0.3])\n mean_func = pm.gp.mean.Constant(0.5)\n gp = pm.gp.MarginalSparse(mean_func, cov_func, approx=\"DTC\")\n f = gp.marginal_likelihood(\"f\", self.X, self.X, self.y, self.sigma, is_observed=False)\n mu1, cov1 = self.gp.predict(self.Xnew, pred_noise=True)\n mu2, cov2 = gp.predict(self.Xnew, pred_noise=True)\n npt.assert_allclose(mu1, mu2, atol=0, rtol=1e-3)\n npt.assert_allclose(cov1, cov2, atol=0, rtol=1e-3)\n\n\nclass TestGPAdditive(object):\n def setup_method(self):\n self.X = np.random.randn(50,3)\n self.y = np.random.randn(50)*0.01\n self.Xnew = np.random.randn(60, 3)\n self.noise = pm.gp.cov.WhiteNoise(0.1)\n self.covs = (pm.gp.cov.ExpQuad(3, [0.1, 0.2, 0.3]),\n pm.gp.cov.ExpQuad(3, [0.1, 0.2, 0.3]),\n pm.gp.cov.ExpQuad(3, [0.1, 0.2, 0.3]))\n self.means = (pm.gp.mean.Constant(0.5),\n pm.gp.mean.Constant(0.5),\n pm.gp.mean.Constant(0.5))\n\n def testAdditiveMarginal(self):\n with pm.Model() as model1:\n gp1 = pm.gp.Marginal(self.means[0], self.covs[0])\n gp2 = pm.gp.Marginal(self.means[1], self.covs[1])\n gp3 = pm.gp.Marginal(self.means[2], self.covs[2])\n\n gpsum = gp1 + gp2 + gp3\n fsum = gpsum.marginal_likelihood(\"f\", self.X, self.y, noise=self.noise)\n model1_logp = model1.logp({\"fsum\": self.y})\n\n with pm.Model() as model2:\n gptot = pm.gp.Marginal(reduce(add, self.means), reduce(add, self.covs))\n fsum = gptot.marginal_likelihood(\"f\", self.X, self.y, noise=self.noise)\n model2_logp = model2.logp({\"fsum\": self.y})\n npt.assert_allclose(model1_logp, model2_logp, atol=0, rtol=1e-2)\n\n with model1:\n fp1 = gpsum.conditional(\"fp1\", self.Xnew, given={\"X\": self.X, \"y\": self.y,\n \"noise\": self.noise, \"gp\": gpsum})\n with model2:\n fp2 = gptot.conditional(\"fp2\", self.Xnew)\n\n fp = np.random.randn(self.Xnew.shape[0])\n npt.assert_allclose(fp1.logp({\"fp1\": fp}), fp2.logp({\"fp2\": fp}), atol=0, rtol=1e-2)\n\n @pytest.mark.parametrize('approx', ['FITC', 'VFE', 'DTC'])\n def testAdditiveMarginalSparse(self, approx):\n Xu = np.random.randn(10, 3)\n sigma = 0.1\n with pm.Model() as model1:\n gp1 = pm.gp.MarginalSparse(self.means[0], self.covs[0], approx=approx)\n gp2 = pm.gp.MarginalSparse(self.means[1], self.covs[1], approx=approx)\n gp3 = pm.gp.MarginalSparse(self.means[2], self.covs[2], approx=approx)\n\n gpsum = gp1 + gp2 + gp3\n fsum = gpsum.marginal_likelihood(\"f\", self.X, Xu, self.y, sigma=sigma)\n model1_logp = model1.logp({\"fsum\": self.y})\n\n with pm.Model() as model2:\n gptot = pm.gp.MarginalSparse(reduce(add, self.means), reduce(add, self.covs), approx=approx)\n fsum = gptot.marginal_likelihood(\"f\", self.X, Xu, self.y, sigma=sigma)\n model2_logp = model2.logp({\"fsum\": self.y})\n npt.assert_allclose(model1_logp, model2_logp, atol=0, rtol=1e-2)\n\n with model1:\n fp1 = gpsum.conditional(\"fp1\", self.Xnew, given={\"X\": self.X, \"Xu\": Xu, \"y\": self.y,\n \"sigma\": sigma, \"gp\": gpsum})\n with model2:\n fp2 = gptot.conditional(\"fp2\", self.Xnew)\n\n fp = np.random.randn(self.Xnew.shape[0])\n npt.assert_allclose(fp1.logp({\"fp1\": fp}), fp2.logp({\"fp2\": fp}), atol=0, rtol=1e-2)\n\n def testAdditiveLatent(self):\n with pm.Model() as model1:\n gp1 = pm.gp.Latent(self.means[0], self.covs[0])\n gp2 = pm.gp.Latent(self.means[1], self.covs[1])\n gp3 = pm.gp.Latent(self.means[2], self.covs[2])\n\n gpsum = gp1 + gp2 + gp3\n fsum = gpsum.prior(\"fsum\", self.X, reparameterize=False)\n model1_logp = model1.logp({\"fsum\": self.y})\n\n with pm.Model() as model2:\n gptot = pm.gp.Latent(reduce(add, self.means), reduce(add, self.covs))\n fsum = gptot.prior(\"fsum\", self.X, reparameterize=False)\n model2_logp = model2.logp({\"fsum\": self.y})\n npt.assert_allclose(model1_logp, model2_logp, atol=0, rtol=1e-2)\n\n with model1:\n fp1 = gpsum.conditional(\"fp1\", self.Xnew, given={\"X\": self.X, \"f\": self.y, \"gp\": gpsum})\n with model2:\n fp2 = gptot.conditional(\"fp2\", self.Xnew)\n\n fp = np.random.randn(self.Xnew.shape[0])\n npt.assert_allclose(fp1.logp({\"fsum\": self.y, \"fp1\": fp}),\n fp2.logp({\"fsum\": self.y, \"fp2\": fp}), atol=0, rtol=1e-2)\n\n def testAdditiveSparseRaises(self):\n # cant add different approximations\n with pm.Model() as model:\n cov_func = pm.gp.cov.ExpQuad(3, [0.1, 0.2, 0.3])\n gp1 = pm.gp.MarginalSparse(cov_func=cov_func, approx=\"DTC\")\n gp2 = pm.gp.MarginalSparse(cov_func=cov_func, approx=\"FITC\")\n with pytest.raises(Exception) as e_info:\n gp1 + gp2\n\n def testAdditiveTypeRaises1(self):\n with pm.Model() as model:\n cov_func = pm.gp.cov.ExpQuad(3, [0.1, 0.2, 0.3])\n gp1 = pm.gp.MarginalSparse(cov_func=cov_func, approx=\"DTC\")\n gp2 = pm.gp.Marginal(cov_func=cov_func)\n with pytest.raises(Exception) as e_info:\n gp1 + gp2\n\n def testAdditiveTypeRaises2(self):\n with pm.Model() as model:\n cov_func = pm.gp.cov.ExpQuad(3, [0.1, 0.2, 0.3])\n gp1 = pm.gp.Latent(cov_func=cov_func)\n gp2 = pm.gp.Marginal(cov_func=cov_func)\n with pytest.raises(Exception) as e_info:\n gp1 + gp2\n\n\nclass TestTP(object):\n R\"\"\"\n Compare TP with high degress of freedom to GP\n \"\"\"\n def setup_method(self):\n X = np.random.randn(20,3)\n y = np.random.randn(20)*0.01\n Xnew = np.random.randn(50, 3)\n pnew = np.random.randn(50)*0.01\n with pm.Model() as model:\n cov_func = pm.gp.cov.ExpQuad(3, [0.1, 0.2, 0.3])\n gp = pm.gp.Latent(cov_func=cov_func)\n f = gp.prior(\"f\", X, reparameterize=False)\n p = gp.conditional(\"p\", Xnew)\n self.X = X\n self.y = y\n self.Xnew = Xnew\n self.pnew = pnew\n self.latent_logp = model.logp({\"f\": y, \"p\": pnew})\n self.plogp = p.logp({\"f\": y, \"p\": pnew})\n\n def testTPvsLatent(self):\n with pm.Model() as model:\n cov_func = pm.gp.cov.ExpQuad(3, [0.1, 0.2, 0.3])\n tp = pm.gp.TP(cov_func=cov_func, nu=10000)\n f = tp.prior(\"f\", self.X, reparameterize=False)\n p = tp.conditional(\"p\", self.Xnew)\n tp_logp = model.logp({\"f\": self.y, \"p\": self.pnew})\n npt.assert_allclose(self.latent_logp, tp_logp, atol=0, rtol=1e-2)\n\n def testTPvsLatentReparameterized(self):\n with pm.Model() as model:\n cov_func = pm.gp.cov.ExpQuad(3, [0.1, 0.2, 0.3])\n tp = pm.gp.TP(cov_func=cov_func, nu=10000)\n f = tp.prior(\"f\", self.X, reparameterize=True)\n p = tp.conditional(\"p\", self.Xnew)\n chol = np.linalg.cholesky(cov_func(self.X).eval())\n y_rotated = np.linalg.solve(chol, self.y)\n # testing full model logp unreliable due to introduction of chi2__log__\n plogp = p.logp({\"f_rotated_\": y_rotated, \"p\": self.pnew, \"chi2__log__\": np.log(1e20)})\n npt.assert_allclose(self.plogp, plogp, atol=0, rtol=1e-2)\n\n def testAdditiveTPRaises(self):\n with pm.Model() as model:\n cov_func = pm.gp.cov.ExpQuad(3, [0.1, 0.2, 0.3])\n gp1 = pm.gp.TP(cov_func=cov_func, nu=10)\n gp2 = pm.gp.TP(cov_func=cov_func, nu=10)\n with pytest.raises(Exception) as e_info:\n gp1 + gp2\n\n\nclass TestMarginalKron(object):\n def setup_method(self):\n self.Xs = [np.linspace(0, 1, 7)[:, None],\n np.linspace(0, 1, 5)[:, None],\n np.linspace(0, 1, 6)[:, None]]\n self.X = cartesian(*self.Xs)\n self.N = np.prod([len(X) for X in self.Xs])\n self.y = np.random.randn(self.N) * 0.1\n self.Xnews = [np.random.randn(5, 1),\n np.random.randn(5, 1),\n np.random.randn(5, 1)]\n self.Xnew = np.concatenate(tuple(self.Xnews), axis=1)\n self.sigma = 0.2\n self.pnew = np.random.randn(len(self.Xnew))*0.01\n ls = 0.2\n with pm.Model() as model:\n self.cov_funcs = [pm.gp.cov.ExpQuad(1, ls),\n pm.gp.cov.ExpQuad(1, ls),\n pm.gp.cov.ExpQuad(1, ls)]\n cov_func = pm.gp.cov.Kron(self.cov_funcs)\n self.mean = pm.gp.mean.Constant(0.5)\n gp = pm.gp.Marginal(mean_func=self.mean, cov_func=cov_func)\n f = gp.marginal_likelihood(\"f\", self.X, self.y, noise=self.sigma)\n p = gp.conditional(\"p\", self.Xnew)\n self.mu, self.cov = gp.predict(self.Xnew)\n self.logp = model.logp({\"p\": self.pnew})\n\n def testMarginalKronvsMarginalpredict(self):\n with pm.Model() as kron_model:\n kron_gp = pm.gp.MarginalKron(mean_func=self.mean,\n cov_funcs=self.cov_funcs)\n f = kron_gp.marginal_likelihood('f', self.Xs, self.y,\n sigma=self.sigma, shape=self.N)\n p = kron_gp.conditional('p', self.Xnew)\n mu, cov = kron_gp.predict(self.Xnew)\n npt.assert_allclose(mu, self.mu, atol=0, rtol=1e-2)\n npt.assert_allclose(cov, self.cov, atol=0, rtol=1e-2)\n\n def testMarginalKronvsMarginal(self):\n with pm.Model() as kron_model:\n kron_gp = pm.gp.MarginalKron(mean_func=self.mean,\n cov_funcs=self.cov_funcs)\n f = kron_gp.marginal_likelihood('f', self.Xs, self.y,\n sigma=self.sigma, shape=self.N)\n p = kron_gp.conditional('p', self.Xnew)\n kron_logp = kron_model.logp({'p': self.pnew})\n npt.assert_allclose(kron_logp, self.logp, atol=0, rtol=1e-2)\n\n def testMarginalKronRaises(self):\n with pm.Model() as kron_model:\n gp1 = pm.gp.MarginalKron(mean_func=self.mean,\n cov_funcs=self.cov_funcs)\n gp2 = pm.gp.MarginalKron(mean_func=self.mean,\n cov_funcs=self.cov_funcs)\n with pytest.raises(TypeError):\n gp1 + gp2\n" ]
[ [ "numpy.diag", "numpy.dot", "numpy.log", "numpy.linalg.solve", "numpy.allclose", "numpy.random.seed", "numpy.linspace", "numpy.ones", "numpy.all", "numpy.random.randn", "numpy.random.rand", "numpy.any", "numpy.testing.assert_allclose", "numpy.random.uniform", "numpy.array", "numpy.random.randint" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
NBUFabio25/Speaker_Verification-
[ "b6691d0c78e50803d1d7117887c81e5ce57fd34b" ]
[ "utils.py" ]
[ "# Third Party\nimport librosa\nimport numpy as np\nimport torch.nn.functional as F\n\n\n# ===============================================\n# code from Arsha for loading data.\n# This code extract features for a give audio file\n# ===============================================\ndef load_wav(audio_filepath, sr, min_dur_sec=4):\n audio_data, fs = librosa.load(audio_filepath, sr=16000)\n len_file = len(audio_data)\n\n if len_file < int(min_dur_sec * sr):\n dummy = np.zeros((1, int(min_dur_sec * sr) - len_file))\n extened_wav = np.concatenate((audio_data, dummy[0]))\n else:\n\n extened_wav = audio_data\n return extened_wav\n\n\ndef lin_mel_from_wav(wav, hop_length, win_length, n_mels):\n linear = librosa.feature.melspectrogram(wav, n_mels=n_mels, win_length=win_length,\n hop_length=hop_length) # linear spectrogram\n return linear.T\n\n\ndef lin_spectogram_from_wav(wav, hop_length, win_length, n_fft=512):\n linear = librosa.stft(wav, n_fft=n_fft, win_length=win_length, hop_length=hop_length) # linear spectrogram\n return linear.T\n\n\ndef feature_extraction(filepath, sr=16000, min_dur_sec=4, win_length=400, hop_length=160, n_mels=40, spec_len=400,\n mode='train'):\n audio_data = load_wav(filepath, sr=sr, min_dur_sec=min_dur_sec)\n linear_spect = lin_spectogram_from_wav(audio_data, hop_length, win_length, n_fft=512)\n mag, _ = librosa.magphase(linear_spect) # magnitude\n mag_T = mag.T\n mu = np.mean(mag_T, 0, keepdims=True)\n std = np.std(mag_T, 0, keepdims=True)\n return (mag_T - mu) / (std + 1e-5)\n\n\ndef load_data(filepath, sr=16000, min_dur_sec=4, win_length=400, hop_length=160, n_mels=40, spec_len=400, mode='train'):\n audio_data = load_wav(filepath, sr=sr, min_dur_sec=min_dur_sec)\n # linear_spect = lin_spectogram_from_wav(audio_data, hop_length, win_length, n_mels)\n linear_spect = lin_spectogram_from_wav(audio_data, hop_length, win_length, n_fft=512)\n mag, _ = librosa.magphase(linear_spect) # magnitude\n mag_T = mag.T\n\n\n randtime = np.random.randint(0, mag_T.shape[1] - spec_len)\n spec_mag = mag_T[:, randtime:randtime + spec_len]\n\n # preprocessing, subtract mean, divided by time-wise var\n mu = np.mean(spec_mag, 0, keepdims=True)\n std = np.std(spec_mag, 0, keepdims=True)\n return (spec_mag - mu) / (std + 1e-5)\n\ndef get_centroids(embeddings):\n centroids = embeddings.mean(dim=1)\n return centroids\n\ndef get_utterance_centroids(embeddings):\n \"\"\"\n Returns the centroids for each utterance of a speaker, where\n the utterance centroid is the speaker centroid without considering\n this utterance\n\n Shape of embeddings should be:\n (speaker_ct, utterance_per_speaker_ct, embedding_size)\n \"\"\"\n sum_centroids = embeddings.sum(dim=1)\n # we want to subtract out each utterance, prior to calculating the\n # the utterance centroid\n sum_centroids = sum_centroids.reshape(\n sum_centroids.shape[0], 1, sum_centroids.shape[-1]\n )\n # we want the mean but not including the utterance itself, so -1\n num_utterances = embeddings.shape[1] - 1\n centroids = (sum_centroids - embeddings) / num_utterances\n return centroids\n\ndef get_cossim(embeddings, centroids):\n # number of utterances per speaker\n num_utterances = embeddings.shape[1]\n utterance_centroids = get_utterance_centroids(embeddings)\n\n # flatten the embeddings and utterance centroids to just utterance,\n # so we can do cosine similarity\n utterance_centroids_flat = utterance_centroids.view(\n utterance_centroids.shape[0] * utterance_centroids.shape[1],\n -1\n )\n embeddings_flat = embeddings.view(\n embeddings.shape[0] * num_utterances,\n -1\n )\n # the cosine distance between utterance and the associated centroids\n # for that utterance\n # this is each speaker's utterances against his own centroid, but each\n # comparison centroid has the current utterance removed\n cos_same = F.cosine_similarity(embeddings_flat, utterance_centroids_flat)\n\n # now we get the cosine distance between each utterance and the other speakers'\n # centroids\n # to do so requires comparing each utterance to each centroid. To keep the\n # operation fast, we vectorize by using matrices L (embeddings) and\n # R (centroids) where L has each utterance repeated sequentially for all\n # comparisons and R has the entire centroids frame repeated for each utterance\n centroids_expand = centroids.repeat((num_utterances * embeddings.shape[0], 1))\n embeddings_expand = embeddings_flat.unsqueeze(1).repeat(1, embeddings.shape[0], 1)\n embeddings_expand = embeddings_expand.view(\n embeddings_expand.shape[0] * embeddings_expand.shape[1],\n embeddings_expand.shape[-1]\n )\n cos_diff = F.cosine_similarity(embeddings_expand, centroids_expand)\n cos_diff = cos_diff.view(\n embeddings.size(0),\n num_utterances,\n centroids.size(0)\n )\n # assign the cosine distance for same speakers to the proper idx\n same_idx = list(range(embeddings.size(0)))\n if num_utterances > 1:\n cos_diff[same_idx, :, same_idx] = cos_same.view(embeddings.shape[0], num_utterances)\n cos_diff = cos_diff + 1e-6\n return cos_diff\n\ndef load_npy_data(filepath, spec_len=400, mode='train'):\n mag_T = np.load(filepath)\n if mode == 'train':\n randtime = np.random.randint(0, mag_T.shape[1] - spec_len)\n spec_mag = mag_T[:, randtime:randtime + spec_len]\n else:\n spec_mag = mag_T\n return spec_mag\n\n\ndef speech_collate(batch):\n targets = []\n specs = []\n for sample in batch:\n specs.append(sample['features'])\n targets.append((sample['labels']))\n return specs, targets" ]
[ [ "numpy.concatenate", "numpy.std", "torch.nn.functional.cosine_similarity", "numpy.mean", "numpy.load", "numpy.random.randint" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Dowell-Lab/OCR_transcription_detection
[ "702c49d4c4ae7c0eefdf1fa52a218359c52cbe27" ]
[ "dataset_stats.py" ]
[ "import pandas as pd\n\ndata = pd.read_pickle('./combined_dataset_union_fstitchtfit.pkl')\ndata['prom_ovlp'] = data['prom_ovlp'].clip(upper=1)\n\nprint(\"For all OCRs:\")\nprint(\"OCRs overlapping nascent transcription:\")\nprint(data.groupby(['sample', 'ovlp_txn']).size())\nprint(\"OCRs overlapping TSSs:\")\nprint(data.groupby(['sample', 'prom_ovlp']).size())\n\nprint(\"For the test set (no HCT116, chr12-chrY only):\")\nchromosomes = [\n 'chr1', 'chr2', 'chr3', 'chr4', 'chr5', 'chr6', 'chr7', 'chr8', 'chr9', 'chr10',\n 'chr11', 'chr12', 'chr13', 'chr14', 'chr15', 'chr16', 'chr17', 'chr18', 'chr19', 'chr20',\n 'chr21', 'chr22', 'chrX', 'chrY']\nsplit_index = 11 # ie. chr12\ntest_chroms = chromosomes[split_index:]\ndata = data[(data['chrom'].isin(test_chroms)) & (data['sample'] != 'HCT116')]\n\nprint(\"OCRs overlapping nascent transcription:\")\nprint(data.groupby(['sample', 'ovlp_txn']).size())\nprint(\"OCRs overlapping TSSs:\")\ndata['prom_ovlp'].clip(upper=1)\nprint(data.groupby(['sample', 'prom_ovlp']).size())\n\n\n\n" ]
[ [ "pandas.read_pickle" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
lxgrf/posenet_jetson_nano
[ "41e1a728f7c1db153b942a7174bec2383a5c00ad" ]
[ "webcam_demo.py" ]
[ "import tensorflow as tf\nimport cv2\nimport time\nimport argparse\n\nimport posenet\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--model', type=int, default=101)\nparser.add_argument('--cam_id', type=int, default=0)\nparser.add_argument('--cam_width', type=int, default=1280)\nparser.add_argument('--cam_height', type=int, default=720)\nparser.add_argument('--scale_factor', type=float, default=0.7125)\nparser.add_argument('--file', type=str, default=None, help=\"Optionally use a video file instead of a live camera\")\nargs = parser.parse_args()\n\ndef gstreamer_pipeline (capture_width=1280, capture_height=720, display_width=1280, display_height=720, framerate=60, flip_method=2) : \n return ('nvarguscamerasrc ! ' \n 'video/x-raw(memory:NVMM), '\n 'width=(int)%d, height=(int)%d, '\n 'format=(string)NV12, framerate=(fraction)%d/1 ! '\n 'nvvidconv flip-method=%d ! '\n 'video/x-raw, width=(int)%d, height=(int)%d, format=(string)BGRx ! '\n 'videoconvert ! '\n 'video/x-raw, format=(string)BGR ! appsink' % (capture_width,capture_height,framerate,flip_method,display_width,display_height))\n\ndef main():\n with tf.Session() as sess:\n model_cfg, model_outputs = posenet.load_model(args.model, sess)\n output_stride = model_cfg['output_stride']\n\n if args.file is not None:\n cap = cv2.VideoCapture(args.file)\n else:\n cap = cv2.VideoCapture(gstreamer_pipeline(flip_method=2), cv2.CAP_GSTREAMER)\n #cap.set(3, args.cam_width)\n #cap.set(4, args.cam_height)\n\n start = time.time()\n frame_count = 0\n while True:\n input_image, display_image, output_scale = posenet.read_cap(\n cap, scale_factor=args.scale_factor, output_stride=output_stride)\n\n heatmaps_result, offsets_result, displacement_fwd_result, displacement_bwd_result = sess.run(\n model_outputs,\n feed_dict={'image:0': input_image}\n )\n\n pose_scores, keypoint_scores, keypoint_coords = posenet.decode_multi.decode_multiple_poses(\n heatmaps_result.squeeze(axis=0),\n offsets_result.squeeze(axis=0),\n displacement_fwd_result.squeeze(axis=0),\n displacement_bwd_result.squeeze(axis=0),\n output_stride=output_stride,\n max_pose_detections=10,\n min_pose_score=0.15)\n\n keypoint_coords *= output_scale\n\n # TODO this isn't particularly fast, use GL for drawing and display someday...\n overlay_image = posenet.draw_skel_and_kp(\n display_image, pose_scores, keypoint_scores, keypoint_coords,\n min_pose_score=0.15, min_part_score=0.1)\n\n\n cv2.namedWindow(\"posenet\", cv2.WND_PROP_FULLSCREEN)\n cv2.setWindowProperty(\"posenet\", cv2.WND_PROP_FULLSCREEN,cv2.WINDOW_FULLSCREEN)\n cv2.imshow('posenet', overlay_image)\n frame_count += 1\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n\n print('Average FPS: ', frame_count / (time.time() - start))\n\n\nif __name__ == \"__main__\":\n main()\n\n" ]
[ [ "tensorflow.Session" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "1.12", "1.4", "1.13", "1.5", "1.7", "0.12", "1.0", "1.2" ] } ]
woo1/Anim-NeRF
[ "03977700420691b18b6aa0bc809f3a05a9f07b12" ]
[ "models/volume_rendering.py" ]
[ "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nif torch.__version__ < '1.6.0':\n from torchsearchsorted import searchsorted\n\nclass VolumeRenderer(nn.Module):\n def __init__(\n self, \n n_coarse=64,\n n_fine=0,\n n_fine_depth=0,\n share_fine=False,\n noise_std=1.0,\n depth_std=0.02,\n white_bkgd=True,\n lindisp=True\n ):\n super(VolumeRenderer, self).__init__()\n self.n_coarse = n_coarse\n self.n_fine = n_fine\n self.n_fine_depth = n_fine_depth\n self.share_fine = share_fine\n self.noise_std = noise_std\n self.depth_std = depth_std\n self.lindisp = lindisp\n self.white_bkgd = white_bkgd\n \n def sample_coarse(self, rays, perturb=0.):\n \"\"\"\n Stratified sampling. Note this is different from original NeRF slightly.\n :param rays ray [origins (3), directions (3), near (1), far (1)] (bs, n_rays, 8)\n :return (bs, n_rays, Kc)\n \"\"\"\n bs, n_rays = rays.shape[:2]\n device = rays.device\n near, far = rays[..., 6:7], rays[..., 7:8] # (bs, n_rays, 1)\n\n step = 1.0 / self.n_coarse\n z_steps = torch.linspace(0, 1 - step, self.n_coarse, device=device) # (Kc)\n z_steps = z_steps.unsqueeze(0).unsqueeze(0).repeat(bs, n_rays, 1) # (bs, n_rays, Kc)\n\n if self.lindisp: # Use linear sampling in depth space\n z_samp = near * (1 - z_steps) + far * z_steps # (bs, n_rays, Kc)\n else: # Use linear sampling in disparity space\n z_samp = 1 / (1 / near * (1 - z_steps) + 1 / far * z_steps) # (bs, n_rays, Kc)\n\n if perturb > 0:\n mids = .5 * (z_samp[..., 1:] + z_samp[..., :-1])\n upper = torch.cat([mids, z_samp[..., -1:]], -1)\n lower = torch.cat([z_samp[..., :1], mids], -1)\n # stratified samples in those intervals\n t_rand = perturb * torch.rand(z_samp.shape, device=device)\n z_samp = lower + (upper - lower) * t_rand\n\n return z_samp\n \n \n def sample_fine(self, bins, weights, det=False, eps=1e-5):\n \"\"\"\n Weighted stratified (importance) sample\n :param bins (bs, n_rays, Kc-1)\n :param weights (bs, n_rays,, Kc-2)\n :return (bs, n_rays,, Kf)\n \"\"\"\n bs, n_rays = bins.shape[:2]\n device = bins.device\n\n weights = weights.detach() + eps # Prevent division by zero\n pdf = weights / torch.sum(weights, -1, keepdim=True) # (bs, n_rays, Kc)\n cdf = torch.cumsum(pdf, -1) # (bs, n_rays, Kc)\n cdf = torch.cat([torch.zeros_like(cdf[..., :1]), cdf], -1) # (bs, n_rays, Kc+1)\n\n if det:\n u = torch.linspace(0., 1., steps=self.n_fine, device=device)\n u = u.expand(bs, n_rays, self.n_fine)\n else:\n u = torch.rand(bs, n_rays, self.n_fine, device=device) # (bs, n_rays, Kf)\n \n u = u.contiguous()\n\n if torch.__version__ < '1.6.0':\n inds = searchsorted(cdf.reshape(bs*n_rays, -1), u.reshape(bs*n_rays, -1), side='right').reshape(bs, n_rays, -1)\n else:\n inds = torch.searchsorted(cdf, u, right=True)\n below = torch.clamp_min(inds - 1, 0)\n above = torch.clamp_max(inds, self.n_coarse - 2)\n\n inds_sampled = torch.stack([below, above], -1).view(bs, n_rays, self.n_fine * 2)\n cdf_g = torch.gather(cdf, 2, inds_sampled).view(bs, n_rays, self.n_fine, 2)\n bins_g = torch.gather(bins, 2, inds_sampled).view(bs, n_rays, self.n_fine, 2)\n\n denom = cdf_g[..., 1] - cdf_g[..., 0]\n denom[denom < eps] = 1\n z_samp = bins_g[..., 0] + (u - cdf_g[..., 0]) / denom * (bins_g[..., 1] - bins_g[..., 0])\n\n return z_samp\n\n def sample_fine_depth(self, rays, depth):\n \"\"\"\n Sample around specified depth\n :param rays ray [origins (3), directions (3), near (1), far (1)] (bs, n_rays, 8)\n :param depth (bs, n_rays, 1)\n :return (bs, n_rays, Kfd)\n \"\"\"\n z_samp = depth.repeat(1, 1, self.n_fine_depth)\n z_samp += torch.randn_like(z_samp) * self.depth_std\n # Clamp does not support tensor bounds\n near, far = rays[..., 6:7], rays[..., 7:8] # (B, 1)\n z_samp = torch.min(torch.max(z_samp, near), far)\n return z_samp\n \n def composite(self, model, rays, z_samp, coarse=True, far=True, perturb=0., **kwargs):\n bs, n_rays, K = z_samp.shape\n\n # (bs, n_rays, K, 3)\n xyz = rays[..., None, :3] + z_samp.unsqueeze(-1) * rays[..., None, 3:6]\n viewdir = rays[..., None, 3:6].expand(-1, -1, K, -1)\n xyz = xyz.reshape(bs, -1, 3) # (bs, n_rays*K, 3)\n viewdir = viewdir.reshape(bs, -1, 3) # (bs, n_rays*K, 3)\n \n # (bs, n_rays*K, 4)\n rgbs, sigmas = model(xyz, viewdir, use_fine=not coarse, **kwargs)\n\n rgbs = rgbs.reshape(bs, n_rays, K, 3)\n sigmas = sigmas.reshape(bs, n_rays, K)\n\n if self.noise_std > 0.0 and perturb > 0:\n sigmas = sigmas + torch.randn_like(sigmas) * self.noise_std\n\n deltas = z_samp[..., 1:] - z_samp[..., :-1] # (bs, n_rays, K-1)\n \n if far:\n delta_inf = 1e10 * torch.ones_like(deltas[..., :1]) # infty (bs, n_rays, 1)\n else:\n delta_inf = rays[..., 7:8] - z_samp[..., -1:]\n deltas = torch.cat([deltas, delta_inf], -1) # (bs, n_rays, K)\n\n #deltas = deltas * torch.norm(rays[..., None, 3:6], dim=-1)\n\n # compute the gradients in log space of the alphas, for NV TV occupancy regularizer\n alphas = 1 - torch.exp(-deltas * torch.relu(sigmas)) # (bs, n_rays, K)\n alphas_shifted = torch.cat([torch.ones_like(alphas[..., :1]), 1 - alphas + 1e-10], -1) # (bs, n_rays, K+1) = [1, a1, a2, ...]\n T = torch.cumprod(alphas_shifted, -1) # (bs, n_rays, K+1)\n weights = alphas * T[..., :-1] # (bs, n_rays, K)\n weights_sum = torch.sum(weights, dim=-1, keepdim=True) # (bs, n_rays, 1)\n\n rgb_final = torch.sum(weights.unsqueeze(-1) * rgbs, -2) # (bs, n_rays, 3)\n depth_final = torch.sum(weights * z_samp, -1, keepdim=True) # (bs, n_rays, 1)\n\n if self.white_bkgd:\n depth_final = depth_final + (1 - weights_sum) * rays[..., 7:8]\n rgb_final = rgb_final + 1 - weights_sum # (bs, n_rays, 3)\n \n return (\n weights,\n rgb_final,\n depth_final,\n weights_sum\n )\n \n \n def forward(self, model, rays, perturb=0., **kwargs):\n\n z_coarse = self.sample_coarse(rays[..., :8], perturb=perturb) # (bs, n_rays, Kc)\n\n if self.n_fine > 0 and self.share_fine:\n with torch.no_grad():\n weights, rgbs, depths, alphas = self.composite(\n model,\n rays,\n z_coarse,\n coarse=True,\n far=True,\n perturb=perturb,\n **kwargs\n )\n else:\n weights, rgbs, depths, alphas = self.composite(\n model,\n rays,\n z_coarse,\n coarse=True,\n far=True,\n perturb=perturb,\n **kwargs\n )\n \n output = {\n 'rgbs': rgbs,\n 'alphas': alphas,\n 'depths': depths\n }\n\n if self.n_fine > 0 or self.n_fine_depth > 0:\n z_combine = z_coarse\n\n if self.n_fine > 0:\n z_coarse_mid = 0.5 * (z_coarse[..., :-1] + z_coarse[..., 1:]) # (bs, n_rays, Kc - 1)\n z_fine = self.sample_fine(z_coarse_mid, weights[..., 1:-1].detach(), det=(perturb==0)).detach() # (bs, n_rays, Kf)\n z_combine = torch.cat([z_combine, z_fine], dim=-1)\n\n if self.n_fine_depth > 0:\n z_fine_depth = self.sample_fine_depth(rays, depth=depths)\n z_combine = torch.cat([z_combine, z_fine_depth.detach()], dim=-1)\n\n z_combine, _ = torch.sort(z_combine, dim=-1)\n\n weights_fine, rgbs_fine, depths_fine, alphas_fine = self.composite(\n model,\n rays,\n z_combine,\n coarse=False,\n far=True,\n perturb=perturb,\n **kwargs\n )\n\n if self.share_fine:\n output = {\n 'rgbs': rgbs_fine,\n 'alphas': alphas_fine,\n 'depths': depths_fine\n }\n else:\n output.update({\n 'rgbs_fine': rgbs_fine,\n 'alphas_fine': alphas_fine,\n 'depths_fine': depths_fine\n })\n\n return output" ]
[ [ "torch.randn_like", "torch.linspace", "torch.max", "torch.clamp_max", "torch.cat", "torch.sum", "torch.zeros_like", "torch.gather", "torch.relu", "torch.no_grad", "torch.rand", "torch.cumprod", "torch.sort", "torch.clamp_min", "torch.searchsorted", "torch.cumsum", "torch.ones_like", "torch.stack" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
chris4540/DD2412-adl-proj
[ "69490f335acb6bc9eb8bca29329de1debb2c9c83" ]
[ "utils/eval.py" ]
[ "import tensorflow as tf\n\ndef evaluate(data_loader, model, output_activations=True):\n total = 0\n correct = 0.0\n for inputs, labels in data_loader:\n if output_activations:\n out, *_ = model(inputs, training=False)\n else:\n out = model(inputs, training=False)\n\n prob = tf.math.softmax(out, axis=-1)\n\n pred = tf.argmax(prob, axis=-1)\n equality = tf.equal(pred, tf.reshape(labels, [-1]))\n correct = correct + tf.reduce_sum(tf.cast(equality, tf.float32))\n total = total + tf.size(equality)\n\n total = tf.cast(total, tf.float32)\n ret = correct / total\n return ret\n" ]
[ [ "tensorflow.reshape", "tensorflow.cast", "tensorflow.math.softmax", "tensorflow.argmax", "tensorflow.size" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "1.12", "1.4", "1.13", "1.5", "1.7", "0.12", "1.0", "1.2" ] } ]
BChaudron/entsoe-py
[ "be6c724c4b9aac438954f79623d7ec0b51ab4848" ]
[ "entsoe/misc.py" ]
[ "import pandas as pd\nfrom dateutil import rrule\nfrom itertools import tee\n\n\ndef year_blocks(start, end):\n \"\"\"\n Create pairs of start and end with max a year in between, to deal with usage restrictions on the API\n\n Parameters\n ----------\n start : dt.datetime | pd.Timestamp\n end : dt.datetime | pd.Timestamp\n\n Returns\n -------\n ((pd.Timestamp, pd.Timestamp))\n \"\"\"\n rule = rrule.YEARLY\n\n res = []\n for day in rrule.rrule(rule, dtstart=start, until=end):\n res.append(pd.Timestamp(day))\n res.append(end)\n res = sorted(set(res))\n res = pairwise(res)\n return res\n\n\ndef day_blocks(start, end):\n \"\"\"\n Create pairs of start and end with max a day in between, to deal with usage restrictions on the API\n\n Parameters\n ----------\n start : dt.datetime | pd.Timestamp\n end : dt.datetime | pd.Timestamp\n\n Returns\n -------\n ((pd.Timestamp, pd.Timestamp))\n \"\"\"\n rule = rrule.DAILY\n\n res = []\n for day in rrule.rrule(rule, dtstart=start, until=end):\n res.append(pd.Timestamp(day))\n res.append(end)\n res = sorted(set(res))\n res = pairwise(res)\n return res\n\n\ndef pairwise(iterable):\n \"\"\"\n Create pairs to iterate over\n eg. [A, B, C, D] -> ([A, B], [B, C], [C, D])\n\n Parameters\n ----------\n iterable : iterable\n\n Returns\n -------\n iterable\n \"\"\"\n a, b = tee(iterable)\n next(b, None)\n return zip(a, b)\n" ]
[ [ "pandas.Timestamp" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
shreyaspj20/DataScientist
[ "47bcbd7415eb4d7d91dbc893a6476861d048cca6" ]
[ "datascientist/feature_selection/test/test_pearson.py" ]
[ "#Reading the test file having the data of footballers and target being their\n#overall skill score ( 1-100 ).\nimport pandas as pd\nplayer_df = pd.read_csv(\"datascientist/feature_selection/test/CSV/data.csv\")\n\n#Taking only those columns which have numerical or categorical values since \n#feature selection with Pearson Correlation can be performed on numerical data.\nnumcols = ['Overall', 'Crossing','Finishing', 'ShortPassing', 'Dribbling',\n 'LongPassing', 'BallControl', 'Acceleration','SprintSpeed',\n 'Agility', 'Stamina','Volleys','FKAccuracy','Reactions','Balance',\n 'ShotPower','Strength','LongShots','Aggression','Interceptions']\ncatcols = ['Preferred Foot','Position','Body Type','Nationality','Weak Foot']\nplayer_df = player_df[numcols+catcols]\n\n#encoding categorical values with one-hot encoding.\ntraindf = pd.concat([player_df[numcols], pd.get_dummies(player_df[catcols])],axis=1)\nfeatures = traindf.columns\n\n#dropping rows with Nan values\ntraindf = traindf.dropna()\ntraindf = pd.DataFrame(traindf,columns=features)\n\n#Separating features(X) and target(y).\ny = traindf['Overall']\nX = traindf.copy()\nX = X.drop(['Overall'],axis = 1)\n\n\nfrom datascientist.feature_selection.filter_based_selection import PearsonCorrelation\nCol_sel = PearsonCorrelation(X, y)\n\n#using corr_score method with different parameter values.\nscore1 = Col_sel.corr_score()\nscore2 = Col_sel.corr_score(sort = True)\nscore3 = Col_sel.corr_score(reset_index = True)\nscore4 = Col_sel.corr_score(sort = True,reset_index = True)\n\n#using top_corr_featurenames method with different parameter values.\ntopfeatname1 = Col_sel.top_corr_featurenames()\ntopfeatname2 = Col_sel.top_corr_featurenames(feat_num = 15)\ntopfeatname3 = Col_sel.top_corr_featurenames(feat_num = 15,ascending = False)\ntopfeatname4 = Col_sel.top_corr_featurenames(feat_num = 30,ascending = False)\n\n#using top_corr_features method with different parameter values.\nX_mod1 = Col_sel.top_corr_features()\nX_mod2 = Col_sel.top_corr_features(feat_num = 15)\nX_mod3 = Col_sel.top_corr_features(feat_num = 15,ascending = False)\nX_mod4 = Col_sel.top_corr_features(feat_num = 30,ascending = False)\n" ]
[ [ "pandas.read_csv", "pandas.DataFrame", "pandas.get_dummies" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] } ]
loriab/geomeTRIC
[ "335e97896e8efc261b2a041bd7ffeade1bf3f7d7" ]
[ "geometric/rotate.py" ]
[ "#!/usr/bin/env python\n\nfrom __future__ import division\nfrom forcebalance.nifty import invert_svd\nfrom forcebalance.molecule import *\nimport numpy as np\nimport sys\n\n\"\"\"\nReferences\n----------\n1. E. A. Coutsias, C. Seok, K. A. Dill. \"Using Quaternions to Calculate RMSD.\". J. Comput. Chem 2004.\n\"\"\"\n\n# def invert_svd(X,thresh=1e-12):\n \n# \"\"\" \n\n# Invert a matrix using singular value decomposition. \n# @param[in] X The matrix to be inverted\n# @param[in] thresh The SVD threshold; eigenvalues below this are not inverted but set to zero\n# @return Xt The inverted matrix\n\n# \"\"\"\n\n# u,s,vh = np.linalg.svd(X, full_matrices=0)\n# uh = np.matrix(np.transpose(u))\n# v = np.matrix(np.transpose(vh))\n# si = s.copy()\n# for i in range(s.shape[0]):\n# # reg = s[i]**2 / (s[i]**2 + thresh**2)\n# si[i] = s[i] / (s[i]**2 + thresh**2)\n# # if abs(s[i]) > thresh:\n# # si[i] = 1./s[i]\n# # else:\n# # si[i] = 0.0\n# si = np.matrix(np.diag(si))\n# Xt = v*si*uh\n# return Xt\n\ndef build_correlation(x, y):\n \"\"\"\n Build the 3x3 correlation matrix given by the sum over all atoms k:\n xk_i * yk_j\n \n Parameters\n ----------\n x : numpy.ndarray\n Trial coordinates, dimensionality (number of atoms) x 3\n y : numpy.ndarray\n Target coordinates, dimensionalty must match trial coordinates\n\n Returns\n -------\n numpy.ndarray\n 3x3 correlation matrix\n \"\"\"\n assert x.ndim == 2\n assert y.ndim == 2\n assert x.shape[1] == 3\n assert y.shape[1] == 3\n assert x.shape[0] == y.shape[0]\n xmat = np.matrix(x).T\n ymat = np.matrix(y).T\n return np.array(xmat*ymat.T)\n\ndef build_F(x, y):\n \"\"\"\n Build the 4x4 F-matrix used in constructing the rotation quaternion\n given by Equation 10 of Reference 1\n \n Parameters\n ----------\n x : numpy.ndarray\n Trial coordinates, dimensionality (number of atoms) x 3\n y : numpy.ndarray\n Target coordinates, dimensionalty must match trial coordinates\n \"\"\"\n R = build_correlation(x, y)\n F = np.zeros((4,4),dtype=float)\n R11 = R[0,0]\n R12 = R[0,1]\n R13 = R[0,2]\n R21 = R[1,0]\n R22 = R[1,1]\n R23 = R[1,2]\n R31 = R[2,0]\n R32 = R[2,1]\n R33 = R[2,2]\n F[0,0] = R11 + R22 + R33\n F[0,1] = R23 - R32\n F[0,2] = R31 - R13\n F[0,3] = R12 - R21\n F[1,0] = R23 - R32\n F[1,1] = R11 - R22 - R33\n F[1,2] = R12 + R21\n F[1,3] = R13 + R31\n F[2,0] = R31 - R13\n F[2,1] = R12 + R21\n F[2,2] = R22 - R33 - R11\n F[2,3] = R23 + R32\n F[3,0] = R12 - R21\n F[3,1] = R13 + R31\n F[3,2] = R23 + R32\n F[3,3] = R33 - R22 - R11\n return F\n\ndef al(p):\n \"\"\"\n Given a quaternion p, return the 4x4 matrix A_L(p)\n which when multiplied with a column vector q gives\n the quaternion product pq.\n \n Parameters\n ----------\n p : numpy.ndarray\n 4 elements, represents quaternion\n \n Returns\n -------\n numpy.matrix\n 4x4 matrix describing action of quaternion multiplication\n \"\"\"\n # Given a quaternion p, return the 4x4 matrix A_L(p)\n # which when multiplied with a column vector q gives\n # the quaternion product pq.\n return np.matrix([[ p[0], -p[1], -p[2], -p[3]],\n [ p[1], p[0], -p[3], p[2]],\n [ p[2], p[3], p[0], -p[1]],\n [ p[3], -p[2], p[1], p[0]]])\n \ndef ar(p):\n \"\"\"\n Given a quaternion p, return the 4x4 matrix A_R(p)\n which when multiplied with a column vector q gives\n the quaternion product qp.\n \n Parameters\n ----------\n p : numpy.ndarray\n 4 elements, represents quaternion\n \n Returns\n -------\n numpy.matrix\n 4x4 matrix describing action of quaternion multiplication\n \"\"\"\n return np.matrix([[ p[0], -p[1], -p[2], -p[3]],\n [ p[1], p[0], p[3], -p[2]],\n [ p[2], -p[3], p[0], p[1]],\n [ p[3], p[2], -p[1], p[0]]])\n\ndef conj(q):\n \"\"\"\n Given a quaternion p, return its conjugate, simply the second\n through fourth elements changed in sign.\n \n Parameters\n ----------\n q : numpy.ndarray\n 4 elements, represents quaternion\n \n Returns\n -------\n numpy.ndarray\n New array representing conjugate of q\n \"\"\"\n assert q.ndim == 1\n assert q.shape[0] == 4\n qc = np.zeros_like(q)\n qc[0] = q[0]\n qc[1] = -q[1]\n qc[2] = -q[2]\n qc[3] = -q[3]\n return qc\n\ndef form_rot(q):\n \"\"\"\n Given a quaternion p, form a rotation matrix from it.\n \n Parameters\n ----------\n q : numpy.ndarray\n 4 elements, represents quaternion\n \n Returns\n -------\n numpy.matrix\n 3x3 rotation matrix\n \"\"\"\n qc = conj(q)\n R4 = al(q)*ar(qc)\n return R4[1:, 1:]\n\ndef sorted_eigh(mat, b=None, asc=False):\n \"\"\" Return eigenvalues of a symmetric matrix in descending order and associated eigenvectors \"\"\"\n if b is not None:\n L, Q = np.linalg.eigh(mat, b)\n else:\n L, Q = np.linalg.eigh(mat)\n if asc:\n idx = L.argsort()\n else:\n idx = L.argsort()[::-1] \n L = L[idx]\n Q = Q[:,idx]\n return L, Q\n\ndef calc_rmsd(x, y):\n \"\"\"\n Calculate the minimal RMSD between two structures x and y following\n the algorithm in Reference 1.\n \n Parameters\n ----------\n x : numpy.ndarray\n Trial coordinates, dimensionality (number of atoms) x 3\n y : numpy.ndarray\n Target coordinates, dimensionalty must match trial coordinates\n\n Returns\n -------\n numpy.ndarray\n 3x3 correlation matrix\n \"\"\"\n x = x - np.mean(x,axis=0)\n y = y - np.mean(y,axis=0)\n N = x.shape[0]\n L, Q = sorted_eigh(build_F(x, y))\n idx = L.argsort()[::-1] \n L = L[idx]\n Q = Q[:,idx]\n\n lmax = np.max(L)\n rmsd = np.sqrt((np.sum(x**2) + np.sum(y**2) - 2*lmax)/N)\n return rmsd\n\ndef is_linear(x, y):\n \"\"\"\n Returns True if molecule is linear \n (largest eigenvalue almost equivalent to second largest)\n \"\"\"\n x = x - np.mean(x,axis=0)\n y = y - np.mean(y,axis=0)\n N = x.shape[0]\n L, Q = sorted_eigh(build_F(x, y))\n if L[0]/L[1] < 1.01 and L[0]/L[1] > 0.0:\n return True\n else:\n return False\n\ndef get_quat(x, y, eig=False):\n \"\"\"\n Calculate the quaternion that rotates x into maximal coincidence with y\n to minimize the RMSD, following the algorithm in Reference 1.\n \n Parameters\n ----------\n x : numpy.ndarray\n Trial coordinates, dimensionality (number of atoms) x 3\n y : numpy.ndarray\n Target coordinates, dimensionalty must match trial coordinates\n\n Returns\n -------\n numpy.ndarray\n 4-element array representing quaternion\n \"\"\"\n x = x - np.mean(x,axis=0)\n y = y - np.mean(y,axis=0)\n N = x.shape[0]\n L, Q = sorted_eigh(build_F(x, y))\n q = Q[:,0]\n # Standardize the orientation somewhat\n if q[0] < 0:\n q *= -1\n if eig:\n return q, L[0]\n else:\n return q\n\ndef get_rot(x, y):\n \"\"\"\n Calculate the rotation matrix that brings x into maximal coincidence with y\n to minimize the RMSD, following the algorithm in Reference 1. Mainly\n used to check the correctness of the quaternion.\n \n Parameters\n ----------\n x : numpy.ndarray\n Trial coordinates, dimensionality (number of atoms) x 3\n y : numpy.ndarray\n Target coordinates, dimensionalty must match trial coordinates\n\n Returns\n -------\n numpy.matrix\n 3x3 rotation matrix\n \"\"\"\n x = x - np.mean(x,axis=0)\n y = y - np.mean(y,axis=0)\n N = x.shape[0]\n q = get_quat(x, y)\n U = form_rot(q)\n x = np.matrix(x)\n xr = np.array((U*x.T).T)\n rmsd = np.sqrt(np.sum((xr-y)**2)/N)\n # print rmsd\n return U\n\n\ndef get_R_der(x, y):\n \"\"\"\n Calculate the derivatives of the correlation matrix with respect\n to the Cartesian coordinates.\n \n Parameters\n ----------\n x : numpy.ndarray\n Trial coordinates, dimensionality (number of atoms) x 3\n y : numpy.ndarray\n Target coordinates, dimensionalty must match trial coordinates\n\n Returns\n -------\n numpy.ndarray\n u, w, i, j : \n First two dimensions are (n_atoms, 3), the variables being differentiated\n Second two dimensions are (3, 3), the elements of the R-matrix derivatives with respect to atom u, dimension w\n \"\"\"\n x = x - np.mean(x,axis=0)\n y = y - np.mean(y,axis=0)\n # 3 x 3 x N_atoms x 3\n ADiffR = np.zeros((x.shape[0], 3, 3, 3), dtype=float)\n for u in range(x.shape[0]):\n for w in range(3):\n for i in range(3):\n for j in range(3):\n if i == w:\n ADiffR[u, w, i, j] = y[u, j]\n fdcheck = False\n if fdcheck:\n h = 1e-4\n R0 = build_correlation(x, y)\n for u in range(x.shape[0]):\n for w in range(3):\n x[u, w] += h\n RPlus = build_correlation(x, y)\n x[u, w] -= 2*h\n RMinus = build_correlation(x, y)\n x[u, w] += h\n FDiffR = (RPlus-RMinus)/(2*h)\n print(u, w, np.max(np.abs(ADiffR[u, w]-FDiffR)))\n return ADiffR\n\ndef get_F_der(x, y):\n \"\"\"\n Calculate the derivatives of the F-matrix with respect\n to the Cartesian coordinates.\n \n Parameters\n ----------\n x : numpy.ndarray\n Trial coordinates, dimensionality (number of atoms) x 3\n y : numpy.ndarray\n Target coordinates, dimensionalty must match trial coordinates\n\n Returns\n -------\n numpy.ndarray\n u, w, i, j : \n First two dimensions are (n_atoms, 3), the variables being differentiated\n Second two dimensions are (4, 4), the elements of the R-matrix derivatives with respect to atom u, dimension w\n \"\"\"\n x = x - np.mean(x,axis=0)\n y = y - np.mean(y,axis=0)\n dR = get_R_der(x, y)\n dF = np.zeros((x.shape[0], 3, 4, 4),dtype=float)\n for u in range(x.shape[0]):\n for w in range(3):\n dR11 = dR[u,w,0,0]\n dR12 = dR[u,w,0,1]\n dR13 = dR[u,w,0,2]\n dR21 = dR[u,w,1,0]\n dR22 = dR[u,w,1,1]\n dR23 = dR[u,w,1,2]\n dR31 = dR[u,w,2,0]\n dR32 = dR[u,w,2,1]\n dR33 = dR[u,w,2,2]\n dF[u,w,0,0] = dR11 + dR22 + dR33\n dF[u,w,0,1] = dR23 - dR32\n dF[u,w,0,2] = dR31 - dR13\n dF[u,w,0,3] = dR12 - dR21\n dF[u,w,1,0] = dR23 - dR32\n dF[u,w,1,1] = dR11 - dR22 - dR33\n dF[u,w,1,2] = dR12 + dR21\n dF[u,w,1,3] = dR13 + dR31\n dF[u,w,2,0] = dR31 - dR13\n dF[u,w,2,1] = dR12 + dR21\n dF[u,w,2,2] = dR22 - dR33 - dR11\n dF[u,w,2,3] = dR23 + dR32\n dF[u,w,3,0] = dR12 - dR21\n dF[u,w,3,1] = dR13 + dR31\n dF[u,w,3,2] = dR23 + dR32\n dF[u,w,3,3] = dR33 - dR22 - dR11\n fdcheck = False\n if fdcheck:\n h = 1e-4\n F0 = build_F(x, y)\n for u in range(x.shape[0]):\n for w in range(3):\n x[u, w] += h\n FPlus = build_F(x, y)\n x[u, w] -= 2*h\n FMinus = build_F(x, y)\n x[u, w] += h\n FDiffF = (FPlus-FMinus)/(2*h)\n print(u, w, np.max(np.abs(dF[u, w]-FDiffF)))\n return dF\n\ndef get_q_der(x, y):\n \"\"\"\n Calculate the derivatives of the quaternion with respect\n to the Cartesian coordinates.\n \n Parameters\n ----------\n x : numpy.ndarray\n Trial coordinates, dimensionality (number of atoms) x 3\n y : numpy.ndarray\n Target coordinates, dimensionalty must match trial coordinates\n\n Returns\n -------\n numpy.ndarray\n u, w, i: \n First two dimensions are (n_atoms, 3), the variables being differentiated\n Third dimension is 4, the elements of the quaternion derivatives with respect to atom u, dimension w\n \"\"\"\n x = x - np.mean(x,axis=0)\n y = y - np.mean(y,axis=0)\n q, l = get_quat(x, y, eig=True)\n F = build_F(x, y)\n dF = get_F_der(x, y)\n mat = np.eye(4)*l - F\n # pinv = np.matrix(np.linalg.pinv(np.eye(4)*l - F))\n pinv = invert_svd(np.eye(4)*l - F, thresh=1e-6)\n dq = np.zeros((x.shape[0], 3, 4), dtype=float)\n for u in range(x.shape[0]):\n for w in range(3):\n dquw = pinv*np.matrix(dF[u, w])*np.matrix(q).T\n dq[u, w] = np.array(dquw).flatten()\n fdcheck = False\n if fdcheck:\n h = 1e-6\n for u in range(x.shape[0]):\n for w in range(3):\n x[u, w] += h\n QPlus = get_quat(x, y)\n x[u, w] -= 2*h\n QMinus = get_quat(x, y)\n x[u, w] += h\n FDiffQ = (QPlus-QMinus)/(2*h)\n print(QPlus, QMinus)\n print(dq[u, w], FDiffQ)\n print(u, w, np.dot(QPlus, QMinus), np.max(np.abs(dq[u, w]-FDiffQ)))\n return dq\n\ndef calc_fac_dfac(q0):\n \"\"\"\n Calculate the prefactor mapping the quaternion to the exponential map\n and also its derivative. Takes the first element of the quaternion only\n \"\"\"\n # Ill-defined around q0=1.0\n qm1 = q0-1.0\n # if np.abs(q0) == 1.0:\n # fac = 2\n # dfac = -2/3\n if np.abs(qm1) < 1e-8:\n fac = 2 - 2*qm1/3\n dfac = -2/3\n else:\n fac = 2*np.arccos(q0)/np.sqrt(1-q0**2)\n dfac = -2/(1-q0**2)\n dfac += 2*q0*np.arccos(q0)/(1-q0**2)**1.5\n return fac, dfac\n\ndef get_expmap(x, y):\n \"\"\"\n Calculate the exponential map that rotates x into maximal coincidence with y\n to minimize the RMSD.\n \n Parameters\n ----------\n x : numpy.ndarray\n Trial coordinates, dimensionality (number of atoms) x 3\n y : numpy.ndarray\n Target coordinates, dimensionalty must match trial coordinates\n\n Returns\n -------\n numpy.ndarray\n 3-element array representing exponential map\n \"\"\"\n q = get_quat(x, y)\n # print q\n fac, _ = calc_fac_dfac(q[0])\n v = fac*q[1:]\n return v\n\ndef get_expmap_der(x,y):\n \"\"\"\n Given trial coordinates x and target coordinates y, \n return the derivatives of the exponential map that brings\n x into maximal coincidence (minimum RMSD) with y, with\n respect to the coordinates of x.\n\n Parameters\n ----------\n x : numpy.ndarray\n Trial coordinates, dimensionality (number of atoms) x 3\n y : numpy.ndarray\n Target coordinates, dimensionalty must match trial coordinates\n\n Returns\n -------\n numpy.ndarray\n u, w, i: \n First two dimensions are (n_atoms, 3), the variables being differentiated\n Third dimension is 3, the elements of the exponential map derivatives with respect to atom u, dimension w\n \"\"\"\n q = get_quat(x,y)\n v = get_expmap(x,y)\n fac, dfac = calc_fac_dfac(q[0])\n dvdq = np.zeros((4, 3), dtype=float)\n dvdq[0, :] = dfac*q[1:]\n for i in range(3):\n dvdq[i+1, i] = fac\n fdcheck = False\n if fdcheck:\n h = 1e-6\n fac, _ = calc_fac_dfac(q[0])\n VZero = fac*q[1:]\n for i in range(4):\n # Do backwards difference only, because arccos of q[0] > 1 is undefined\n q[i] -= h\n fac, _ = calc_fac_dfac(q[0])\n VMinus = fac*q[1:]\n q[i] += h\n FDiffV = (VZero-VMinus)/h\n print(i, dvdq[i], FDiffV, np.max(np.abs(dvdq[i]-FDiffV)))\n # Dimensionality: Number of atoms, number of dimensions (3), number of elements in q (4)\n dqdx = get_q_der(x, y)\n # Dimensionality: Number of atoms, number of dimensions (3), number of elements in v (3)\n dvdx = np.zeros((x.shape[0], 3, 3), dtype=float)\n for u in range(x.shape[0]):\n for w in range(3):\n dqdx_uw = dqdx[u, w]\n for i in range(4):\n dvdx[u, w, :] += dvdq[i, :] * dqdx[u, w, i]\n if fdcheck:\n h = 1e-3\n for u in range(x.shape[0]):\n for w in range(3):\n x[u, w] += h\n VPlus = get_expmap(x, y)\n x[u, w] -= 2*h\n VMinus = get_expmap(x, y)\n x[u, w] += h\n FDiffV = (VPlus-VMinus)/(2*h)\n print(u, w, np.max(np.abs(dvdx[u, w]-FDiffV)))\n return dvdx\n\ndef main():\n M = Molecule(sys.argv[1])\n # The target structure\n Y = M.xyzs[0]\n # The structure being rotated\n X = M.xyzs[1]\n # Copy the structure being rotated\n Z = X.copy()\n get_expmap_der(X, Y)\n\nif __name__ == \"__main__\":\n main()\n" ]
[ [ "numpy.matrix", "numpy.dot", "numpy.abs", "numpy.sqrt", "numpy.eye", "numpy.arccos", "numpy.max", "numpy.linalg.eigh", "numpy.zeros_like", "numpy.mean", "numpy.array", "numpy.zeros", "numpy.sum" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
mobiledgex/edge-cloud-sampleapps
[ "f9d7b04cf56d949a7cc78bb9987608024f583af4" ]
[ "TritonInferenceServer/clients/rest_client.py" ]
[ "# Copyright 2021 MobiledgeX, Inc. All rights and licenses reserved.\n# MobiledgeX, Inc. 156 2nd Street #408, San Francisco, CA 94105\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nREST client script to send images to the Triton Inference Server and measure latency.\nHas no tritonclient or nvidia dependencies.\n\"\"\"\nimport sys\nimport os\nimport os.path\nimport platform\nimport requests\nimport subprocess\nimport re\nimport socket\nimport ssl\nimport json\nimport time\nimport logging\nimport io\nimport cv2\nimport argparse\nfrom threading import Thread\nimport numpy as np\n\nutil_dir = \"../utilities\"\nsys.path.append(os.path.join(os.path.dirname(__file__), util_dir))\nfrom stats import RunningStats\n\nPING_INTERVAL = 1 # Seconds\nSERVER_STATS_INTERVAL = 1.5 # Seconds\nSERVER_STATS_DELAY = 2 # Seconds\n\nhex_colors = [\"#238bc0\", \"#ff9209\", \"#32ab39\", \"#e03d34\", \"#a57ec8\", \"#9e6a5d\", \"#ea90cc\", \"#919191\",\n \"#c8c62b\", \"#00c8d8\", \"#bbd1ec\", \"#ffc689\", \"#a6e19b\", \"#ffaaa6\", \"#cfbfdd\", \"#cfaca5\",\n \"#fac4da\", \"#d1d1d1\", \"#e1e09e\", \"#ace0e9\"]\ncolors=[]\nfor h in hex_colors:\n h = h.lstrip('#')\n colors.append(tuple(int(h[i:i+2], 16) for i in (0, 2, 4)))\n\ncolors = colors * 5 # Extend array in case we get more than 20 objects detected\n\nlogger = logging.getLogger(__name__)\nlogger.setLevel(logging.DEBUG)\nformatter = logging.Formatter('%(asctime)s - %(threadName)s - %(levelname)s - %(message)s')\nfh = logging.FileHandler('rest_client.log')\nfh.setLevel(logging.DEBUG)\nfh.setFormatter(formatter)\nlogger.addHandler(fh)\nch = logging.StreamHandler()\nch.setLevel(logging.INFO)\nch.setFormatter(formatter)\nlogger.addHandler(ch)\n\nif platform.system() == \"Darwin\":\n PING_EXEC = \"/sbin/ping\"\n PING_REGEX = r'round-trip min/avg/max/stddev = (.*)/(.*)/(.*)/(.*) ms'\nelse:\n PING_EXEC = \"/bin/ping\"\n PING_REGEX = r'rtt min/avg/max/mdev = (.*)/(.*)/(.*)/(.*) ms'\n\nclass Client:\n \"\"\" Base Client class \"\"\"\n\n MULTI_THREADED = False\n # Initialize \"Grand total\" class variables.\n stats_latency_full_process = RunningStats()\n stats_latency_network_only = RunningStats()\n num_success = 0\n\n def __init__(self, host, port):\n # Initialize instance variables.\n self.host = host\n self.port = port\n self.model_name = None\n self.net_latency_method = None\n self.tls = False\n self.continue_on_error = False\n self.running = False\n self.num_success = 0\n self.do_server_stats = False\n self.show_responses = False\n self.stats_latency_full_process = RunningStats()\n self.stats_latency_network_only = RunningStats()\n self.media_file_name = None\n self.out_dir = None\n self.latency_start_time = 0\n self.loop_count = 0\n self.num_repeat = 0\n self.num_vid_repeat = 0\n self.num_vid_plays = 0\n self.filename_list = []\n self.filename_list_index = 0\n self.video = None\n self.video_frame_num = 0\n self.resize = True\n self.resize_long = 240\n self.resize_short = 180\n self.skip_frames = 1\n logger.debug(\"host:port = %s:%d\" %(self.host, self.port))\n\n def start(self):\n self.running = True\n logger.debug(\"media file(s) %s\" %(self.filename_list))\n video_extensions = ('mp4', 'avi', 'mov')\n if self.filename_list[0].endswith(video_extensions):\n logger.debug(\"It's a video\")\n self.media_file_name = self.filename_list[0]\n self.video = cv2.VideoCapture(self.media_file_name)\n\n def get_next_image(self):\n if self.video is not None:\n for x in range(self.skip_frames):\n ret, image = self.video.read()\n self.out_file_name = f\"video-frame-{self.video_frame_num:04}.jpg\"\n self.video_frame_num += 1\n if not ret:\n logger.debug(\"End of video\")\n self.num_vid_plays += 1\n print(f\"{self.num_vid_repeat=} {self.num_vid_plays=}\")\n if self.num_vid_plays < self.num_vid_repeat:\n logger.info(\"Restarting the video\")\n self.video = cv2.VideoCapture(self.media_file_name)\n return self.get_next_image()\n else:\n logger.info(\"Done with repeats\")\n return None\n else:\n # If the filename_list array has more than 1, get the next value.\n if len(self.filename_list) > 1:\n self.filename_list_index += 1\n if self.filename_list_index >= len(self.filename_list):\n self.filename_list_index = 0\n else:\n self.filename_list_index = 0\n\n if self.stats_latency_full_process.n >= self.num_repeat:\n return None\n\n self.media_file_name = self.filename_list[self.filename_list_index]\n self.out_file_name = self.media_file_name\n image = cv2.imread(self.media_file_name)\n\n if self.resize:\n image = self.resize_image(image)\n\n # Whether it's from a video frame or image file, at this point the image\n # data is a numpy array. Here we convert it to a raw byte stream.\n res, image = cv2.imencode('.JPEG', image)\n image = image.tobytes()\n\n logger.debug(\"Image data (first 32 bytes logged): %s\" %image[:32])\n return image\n\n def resize_image(self, image):\n w = image.shape[1]\n h = image.shape[0]\n logger.debug(\"Frame size: %dx%d\" %(w, h))\n if w > h:\n resize_w = self.resize_long\n resize_h = self.resize_short\n else:\n resize_w = self.resize_short\n resize_h = self.resize_long\n image = cv2.resize(image, (resize_w, resize_h))\n\n logger.debug(\"Resized image to: %dx%d\" %(resize_w, resize_h))\n return image\n\n def measure_network_latency(self):\n while self.running:\n if self.net_latency_method == \"socket\":\n self.time_open_socket()\n elif self.net_latency_method == \"ping\":\n self.icmp_ping()\n time.sleep(PING_INTERVAL)\n\n def measure_server_stats(self):\n # TODO: Use /v2/models/yolov4/versions/1/stats\n print(\"TODO\")\n\n def time_open_socket(self):\n now = time.time()\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n sock.settimeout(2)\n result = sock.connect_ex((self.host, self.port))\n if result != 0:\n logger.error(\"Could not connect to %s on port %d\" %(self.host, self.port))\n return\n millis = (time.time() - now)*1000\n elapsed = \"%.3f\" %millis\n if self.show_responses:\n logger.info(\"%s ms to open socket\" %(elapsed))\n self.stats_latency_network_only.push(millis)\n Client.stats_latency_network_only.push(millis)\n\n\n def icmp_ping(self):\n args=[PING_EXEC, '-c', '1', '-W', '1', self.host]\n p_ping = subprocess.Popen(args,\n shell=False,\n stdout=subprocess.PIPE)\n # save ping stdout\n p_ping_out = str(p_ping.communicate()[0])\n\n if (p_ping.wait() == 0):\n # rtt min/avg/max/mdev = 61.994/61.994/61.994/0.000 ms\n search = re.search(PING_REGEX, p_ping_out, re.M|re.I)\n ping_rtt = float(search.group(2))\n if self.show_responses:\n logger.info(\"%s ms ICMP ping\" %(ping_rtt))\n self.stats_latency_network_only.push(ping_rtt)\n Client.stats_latency_network_only.push(ping_rtt)\n else:\n logger.error(\"ICMP ping failed\")\n\n def process_result(self, result, image, inference_header_content_length):\n millis = (time.time() - self.latency_start_time)*1000\n self.stats_latency_full_process.push(millis)\n Client.stats_latency_full_process.push(millis)\n\n if self.model_name == \"ensemble_dali_inception\":\n data = result[inference_header_content_length:]\n cls = data.split(':')\n confidence = float(cls[0])\n class_name = cls[2]\n output = f\"{class_name} - Confidence={confidence:0.2}\"\n\n elif self.model_name == \"ensemble_dali_yolov4\":\n try:\n # This is the overall response, in JSON format. We have\n # to drill down a bit to get the data we're interested in,\n # which is stored as a string object, but is also JSON-encoded.\n decoded_json = json.loads(result)\n decoded_json = decoded_json['outputs'][0]['data'][0]\n output = decoded_json\n # Parse the actual object detection inference results\n decoded_json = json.loads(output)\n except Exception as e:\n logger.error(\"Could not decode result. Exception: %s. Result: %s\" %(e, result))\n return\n if 'success' in decoded_json:\n if decoded_json['success'] == True:\n self.num_success += 1\n Client.num_success += 1\n\n if self.out_dir is not None:\n nparr = np.frombuffer(image, np.uint8)\n img_np = cv2.imdecode(nparr, cv2.IMREAD_COLOR)\n image_w = img_np.shape[1]\n image_h = img_np.shape[0]\n ratio_x = image_w / 608\n ratio_y = image_h / 608\n obj_num = 0\n for json_object in decoded_json['objects']:\n label = json_object['class']\n score = float(json_object['confidence'])\n percent = f\"{score*100:0.1f}\"\n rect = json_object['rect']\n x1 = int(rect[0] * ratio_x)\n y1 = int(rect[1] * ratio_y)\n x2 = int(rect[2] * ratio_x)\n y2 = int(rect[3] * ratio_y)\n text = f\"{label} ({percent}%)\"\n retval, _ = cv2.getTextSize(text, cv2.FONT_HERSHEY_SIMPLEX, 0.50, 2)\n cv2.rectangle(img_np, (x1, y1), (x2, y2), colors[obj_num], 2)\n cv2.putText(img_np, text, (x1 + 2, y1 + retval[1] + 2), cv2.FONT_HERSHEY_SIMPLEX, 0.67, colors[obj_num], 2)\n obj_num += 1\n\n cv2.imwrite(self.out_dir + \"/\" + os.path.basename(self.out_file_name), img_np) \n\n if self.show_responses:\n elapsed = \"%.3f\" %millis\n logger.info(\"%s ms to send and receive: %s\" %(elapsed, output))\n\n def display_results(self):\n self.running = False\n if not self.show_responses or not Client.MULTI_THREADED:\n return\n\n if self.stats_latency_full_process.n > 0:\n logger.info(\"====> Average Latency Full Process=%.3f ms (stddev=%.3f)\" %(self.stats_latency_full_process.mean(), self.stats_latency_full_process.stddev()))\n if self.stats_latency_network_only.n > 0:\n logger.info(\"====> Average Latency Network Only=%.3f ms (stddev=%.3f)\" %(self.stats_latency_network_only.mean(), self.stats_latency_network_only.stddev()))\n\nclass RestClient(Client):\n def __init__(self, host, port=8000):\n if port is None:\n port = 8000\n Client.__init__(self, host, port)\n\n def start(self):\n Client.start(self)\n self.url = f\"http://{self.host}:{self.port}/v2/models/{self.model_name}/infer\"\n if self.tls:\n self.url = self.url.replace(\"http\", \"https\", 1)\n\n while True:\n image = self.get_next_image()\n if image is None:\n break\n\n self.latency_start_time = time.time()\n response = self.send_image(image)\n logger.debug(f\"{response.headers =}\")\n if 'Inference-Header-Content-Length' in response.headers:\n inference_header_content_length = int(response.headers['Inference-Header-Content-Length'])\n else:\n inference_header_content_length = 0\n content = str(response.content, 'utf-8')\n if response.status_code != 200:\n logger.error(\"non-200 response: %d: %s\" %(response.status_code, content))\n self.num_repeat -= 1\n if self.continue_on_error:\n continue\n else:\n break\n self.process_result(content, image, inference_header_content_length+4)\n\n logger.debug(\"Done\")\n self.display_results()\n\n def send_image(self, image):\n \"\"\"\n Sends the raw image data with a 'Content-Type' of 'image/jpeg'.\n \"\"\"\n if self.model_name == \"ensemble_dali_yolov4\":\n body_template = '{\"inputs\":[{\"name\":\"IMAGE\",\"shape\":[1,$size],\"datatype\":\"UINT8\",\"parameters\":{\"binary_data_size\":$size}}],\"outputs\":[{\"name\":\"OBJECTS_JSON\"}]}'\n elif self.model_name == \"ensemble_dali_inception\":\n body_template = '{\"inputs\":[{\"name\":\"INPUT\",\"shape\":[1,$size],\"datatype\":\"UINT8\",\"parameters\":{\"binary_data_size\":$size}}],\"outputs\":[{\"name\":\"OUTPUT\",\"parameters\":{\"classification\":1,\"binary_data\":true}}]}'\n else:\n logger.error(f\"Unknown model name: {model_name}\")\n sys.exit(1)\n\n data = body_template.replace(\"$size\", str(len(image)))\n headers = {'Inference-Header-Content-Length': str(len(data))}\n logger.debug(f\"POST {self.url}, headers {headers}\")\n data = bytes(data, 'utf-8') + image\n return requests.post(self.url, data=data, headers=headers, verify=self.tls_verify)\n\nclass ErrorCatchingArgumentParser(argparse.ArgumentParser):\n def exit(self, status=0, message=None):\n if status:\n raise Exception(f'ArgumentParser error: {message}')\n\ndef benchmark(arguments=None, django=False):\n # This handler will save everything logged to a String which\n # can be accessed with log_stream.getvalue()\n log_stream = io.StringIO()\n sh = logging.StreamHandler(log_stream)\n formatter = logging.Formatter('%(asctime)s - %(process)d - %(message)s')\n sh.setLevel(logging.INFO)\n sh.setFormatter(formatter)\n logger.addHandler(sh)\n if django:\n logger.removeHandler(fh)\n logger.removeHandler(ch)\n parser = ErrorCatchingArgumentParser()\n else:\n parser = argparse.ArgumentParser()\n\n parser.add_argument('-s', '--server', required=True, help='Server host name or IP address.')\n parser.add_argument('-m', '--model_name', required=True, choices=['ensemble_dali_yolov4', 'ensemble_dali_inception'], help='Model name to use for inference')\n parser.add_argument('-n', '--network-latency', required=False, choices=['ping', 'socket', 'NONE'], default='socket', help='Network-only latency test method.')\n parser.add_argument('-c', '--connection-method', required=False, choices=['rest'], default='rest', help='Connection type.')\n parser.add_argument('-f', '--filename', required=False, help='Name of image file to send.')\n parser.add_argument('-d', '--directory', required=False, help='Directory containing image files to send (*.jpg, *.png).')\n parser.add_argument('-r', '--repeat', type=int, default=1, help='Number of times to repeat.')\n parser.add_argument('-t', '--threads', type=int, default=1, help='Number of concurrent execution threads.')\n parser.add_argument('--skip-frames', type=int, default=1, help='For video, send every Nth frame.')\n parser.add_argument('-p', '--port', type=int, help='Port number')\n parser.add_argument('--fullsize', action='store_true', help='Maintain original image size. Default is to shrink the image before sending.')\n parser.add_argument('--tls', action='store_true', help='Use https connection')\n parser.add_argument('--noverify', action='store_true', help='Disable TLS cert verification')\n parser.add_argument('--continue-on-error', action='store_true', default=False, help='Continue processing when error occurs')\n parser.add_argument('--show-responses', action='store_true', help='Show responses.')\n parser.add_argument('--server-stats', action='store_true', help='Get server stats every Nth frame.')\n parser.add_argument('--out-dir', type=str, required=False, default=None, help='Directory to write processed image to')\n args = parser.parse_args(arguments)\n\n # Clear the Class variables. Otherwise, in the case we are instantiated by\n # a Django view, the accumulation of stats would continue session to session.\n Client.stats_latency_full_process.clear()\n Client.stats_latency_network_only.clear()\n\n start_time = time.time()\n\n if args.threads > 1:\n Client.MULTI_THREADED = True\n for x in range(args.threads):\n if args.connection_method == \"rest\":\n client = RestClient(args.server, args.port)\n else:\n # This should be impossible because the ArgumentParser enforces a valid choice.\n logger.error(\"Unknown connection-method: %s\" %args.connection_method)\n return False\n\n if args.filename != None and args.directory != None:\n logger.error(\"Can't include both filename and directory arguments\")\n parser.print_usage()\n return False\n\n if args.filename != None:\n if not os.path.isfile(args.filename):\n return [False, \"%s doesn't exist\" %args.filename]\n client.filename_list.append(args.filename)\n\n elif args.directory != None:\n if not os.path.isdir(args.directory):\n return [False, \"%s doesn't exist\" %args.directory]\n valid_extensions = ('jpg','jpeg', 'png')\n files = os.listdir(args.directory)\n for file in files:\n if file.endswith(valid_extensions):\n client.filename_list.append(args.directory+\"/\"+file)\n if len(client.filename_list) == 0:\n return [False, \"%s contains no valid image files\" %args.directory]\n\n else:\n logger.error(\"Must include either filename or directory argument\")\n parser.print_usage()\n return False\n\n client.filename_list_index = -1\n client.num_repeat = args.repeat * len(client.filename_list)\n client.num_vid_repeat = args.repeat\n client.do_server_stats = args.server_stats\n client.show_responses = args.show_responses\n client.model_name = args.model_name\n client.net_latency_method = args.network_latency\n client.resize = not args.fullsize\n client.skip_frames = args.skip_frames\n client.tls = args.tls\n client.tls_verify = not args.noverify\n client.continue_on_error = args.continue_on_error\n client.out_dir = args.out_dir\n\n thread = Thread(target=client.start)\n thread.start()\n logger.debug(\"Started %s\" %thread)\n time.sleep(0.5) # stagger threads\n\n if args.network_latency != \"NONE\":\n thread = Thread(target=client.measure_network_latency)\n thread.start()\n logger.debug(\"Started background measure_network_latency %s\" %thread)\n\n if args.server_stats:\n thread = Thread(target=client.measure_server_stats)\n thread.start()\n logger.debug(\"Started background measure_server_stats %s\" %thread)\n\n thread.join()\n\n session_time = time.time() - start_time\n\n if Client.stats_latency_full_process.n + Client.stats_latency_network_only.n > 0:\n fps = Client.stats_latency_full_process.n / session_time\n header1 = f\"Grand totals for {args.server} {args.model_name} {args.connection_method}\"\n header2 = f\"{args.threads} threads repeated {args.repeat} times on {len(client.filename_list)} files. {Client.stats_latency_full_process.n} total frames. FPS={fps:.2f}\"\n separator = \"\"\n for s in header1: separator += \"=\"\n logger.info(separator)\n logger.info(header1)\n logger.info(header2)\n logger.info(separator)\n if Client.stats_latency_full_process.n > 0:\n fps = 1/Client.stats_latency_full_process.mean()*1000\n logger.info(\"====> Average Latency Full Process=%.3f ms (stddev=%.3f) FPS=%.2f\" %(Client.stats_latency_full_process.mean(), Client.stats_latency_full_process.stddev(), fps))\n if Client.stats_latency_network_only.n > 0:\n logger.info(\"====> Average Latency Network Only=%.3f ms (stddev=%.3f)\" %(Client.stats_latency_network_only.mean(), Client.stats_latency_network_only.stddev()))\n\n # The following line outputs CSV data that can be imported to a spreadsheet.\n logger.info(\"\")\n logger.info(\"Server, Full Process, Network Only\")\n logger.info(f\"{args.server}, {Client.stats_latency_full_process.mean()}, {Client.stats_latency_network_only.mean()}\")\n\n TEST_PASS = (Client.stats_latency_full_process.n == Client.num_success)\n logger.info(\"TEST_PASS=%r\" %TEST_PASS)\n\n else:\n logger.info(\"No results\")\n\n return [True, log_stream.getvalue()]\n\nif __name__ == \"__main__\":\n ret = benchmark()\n if not ret[0]:\n print(ret)\n" ]
[ [ "numpy.frombuffer" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
sanyabt/kg-microbe
[ "5886197d5fa00ab4c2e6af09dd9f5a3bc453743f" ]
[ "kg_microbe/utils/nlp_utils.py" ]
[ "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport os\nimport configparser\nfrom kgx.cli.cli_utils import transform\nfrom oger.ctrl.router import Router, PipelineServer\nfrom oger.ctrl.run import run as og_run\nfrom kg_microbe.utils import biohub_converter as bc\nimport pandas as pd\n\nSETTINGS_FILENAME = 'settings.ini'\n\ndef create_settings_file(path: str, ont: str = 'ALL') -> None: \n \"\"\"\n Creates the settings.ini file for OGER to get parameters.\n\n :param path: Path of the 'nlp' folder\n :param ont: The ontology to be used as dictionary ['ALL', 'ENVO', 'CHEBI']\n :return: None.\n\n - The 'Shared' section declares global variables that can be used in other sections\n e.g. Data root.\n root = location of the working directory\n accessed in other sections using => ${Shared:root}/\n\n - Input formats accepted:\n txt, txt_json, bioc_xml, bioc_json, conll, pubmed,\n pxml, pxml.gz, pmc, nxml, pubtator, pubtator_fbk,\n becalmabstracts, becalmpatents\n\n - Two iter-modes available: [collection or document]\n document:- 'n' input files = 'n' output files\n (provided every file has ontology terms)\n collection:- n input files = 1 output file\n\n - Export formats possible:\n tsv, txt, text_tsv, xml, text_xml, bioc_xml,\n bioc_json, bionlp, bionlp.ann, brat, brat.ann,\n conll, pubtator, pubanno_json, pubtator, pubtator_fbk,\n europepmc, europepmc.zip, odin, becalm_tsv, becalm_json\n These can be passed as a list for multiple outputs too.\n\n - Multiple Termlists can be declared in separate sections\n e.g. [Termlist1], [Termlist2] ...[Termlistn] with each having\n their own paths\n \"\"\"\n\n config = configparser.ConfigParser()\n config['Section'] = {}\n config['Shared'] = {}\n \n # Settings required by OGER\n config['Main'] = {\n 'input-directory' : os.path.join(path,'input'),\n 'output-directory' : os.path.join(path,'output'),\n 'pointer-type' : 'glob',\n 'pointers' : '*.tsv',\n 'iter-mode' : 'collection',\n 'article-format' : 'txt_tsv',\n 'export_format': 'tsv',\n 'termlist_stopwords': os.path.join(path,'stopwords','stopwords.txt')\n\n }\n\n if ont == 'ENVO':\n config.set('Main','termlist_path', os.path.join(path,'terms/envo_termlist.tsv'))\n elif ont == 'CHEBI':\n config.set('Main','termlist_path', os.path.join(path,'terms/chebi_termlist.tsv'))\n elif ont == 'ECOCORE':\n config.set('Main','termlist_path', os.path.join(path,'terms/ecocore_termlist.tsv'))\n elif ont == 'GO':\n config.set('Main','termlist_path', os.path.join(path,'terms/go_termlist.tsv'))\n elif ont == 'PATO':\n config.set('Main','termlist_path', os.path.join(path,'terms/pato_termlist.tsv'))\n else:\n #config.set('Main', 'termlist1_path', os.path.join(path,'terms/envo_termlist.tsv'))\n config.set('Main', 'termlist1_path', os.path.join(path,'terms/chebi_termlist.tsv'))\n #config.set('Main', 'termlist2_path', os.path.join(path,'terms/ecocore_termlist.tsv'))\n \n # This is how OGER prescribes in it's test file but above works too.\n '''config['Termlist1'] = {\n 'path' : os.path.join(path,'terms/envo_termlist.tsv')\n }\n\n config['Termlist2'] = {\n 'path' : os.path.join(path,'terms/chebi_termlist.tsv')\n }'''\n # Write\n with open(os.path.join(path, SETTINGS_FILENAME), 'w') as settings_file:\n config.write(settings_file)\n\n\ndef create_termlist(path: str, ont: str) -> None:\n \"\"\"\n Create termlist.tsv files from ontology JSON files for NLP\n\n TODO: Replace this code once runNER is installed and remove 'kg_microbe/utils/biohub_converter.py'\n \"\"\"\n ont_int = ont+'.json'\n \n json_input = os.path.join(path,ont_int)\n tsv_output = os.path.join(path,ont)\n\n transform(inputs=[json_input], input_format='obojson', output= tsv_output, output_format='tsv')\n\n ont_nodes = os.path.join(path, ont + '_nodes.tsv')\n ont_terms = os.path.abspath(os.path.join(os.path.dirname(json_input),'..','nlp/terms/', ont+'_termlist.tsv'))\n bc.parse(ont_nodes, ont_terms)\n\n\ndef prep_nlp_input(path: str, columns: list, dic: str)-> str:\n '''\n Creates a tsv which forms the input for OGER\n\n :param path: Path to the file which has text to be analyzed\n :param columns: The first column HAS to be an id column.\n :param dic: The Ontology to be used as a dictionary for NLP\n :return: Filename (str)\n '''\n df = pd.read_csv(path, low_memory=False, usecols=columns)\n sub_df = df.dropna()\n \n if 'pathways' in columns:\n sub_df['pathways'] = sub_df['pathways'].str.replace('_', ' ')\n\n # New way of doing this : PR submitted to Ontogene for merging code.\n fn = 'nlp'+dic\n nlp_input = os.path.abspath(os.path.join(os.path.dirname(path),'..','nlp/input/'+fn+'.tsv'))\n sub_df.to_csv(nlp_input, sep='\\t', index=False)\n return fn\n \n\n\ndef run_oger(path: str , input_file_name: str , n_workers :int = 1 ) -> pd.DataFrame:\n '''\n Runs OGER using the settings.ini file created previously.\n\n :param path: Path of the input file.\n :param input_file_name: Filename.\n :param n_workers: Number of threads to run (default: 1).\n :return: Pandas DataFrame containing the output of OGER analysis.\n\n '''\n config = configparser.ConfigParser()\n config.read(os.path.join(path, SETTINGS_FILENAME))\n sections = config._sections\n settings = sections['Main']\n settings['n_workers'] = n_workers\n og_run(**settings)\n df = process_oger_output(path, input_file_name)\n \n return df\n\ndef process_oger_output(path: str, input_file_name: str) -> pd.DataFrame:\n \"\"\"\n The OGER output is a TSV which is imported and only the terms that occurred in the text file\n are considered and a dataframe of relevant information is returned\n \n :param path: Path to the folder containing relevant files\n :param input_file_name: OGER output (tsv file)\n :return: Pandas Dataframe containing required data for further analyses.\n \"\"\"\n \n cols = ['TaxId', 'Biolink', 'BeginTerm', 'EndTerm', 'TokenizedTerm', 'PreferredTerm', \\\n 'CURIE', 'NaN1', 'SentenceID', 'NaN2', 'UMLS_CUI']\n df = pd.read_csv(os.path.join(path, 'output',input_file_name+'.tsv'), sep='\\t', names=cols)\n sub_df = df[['TaxId', 'Biolink','TokenizedTerm', 'PreferredTerm', 'CURIE']]\n\n sub_df['StringMatch'] = sub_df.apply(lambda row : assign_string_match_rating(row), axis=1) \n sub_df = sub_df.drop_duplicates()\n sub_df.to_csv(os.path.join(path, 'output',input_file_name +'Filtered.tsv'), sep='\\t', index=False)\n #interested_df = sub_df.loc[(df['TokenizedTerm'] == df['PreferredTerm'].str.replace(r\"\\(.*\\)\",\"\"))]\n #interested_df = interested_df.drop(columns = ['PreferredTerm']).drop_duplicates()\n #interested_df.to_csv(os.path.join(path, 'output',input_file_name +'Filtered.tsv'), sep='\\t', index=False)\n '''\n TODO: Figure out synonym categories amongst nodes using OBO Format\n - Synonyms are always scoped into one of four disjoint categories: EXACT, BROAD, NARROW, RELATED\n LOGIC:\n IF TokenizedTerm and PreferredTerm are used interchangeably: EXACT\n ELIF TokenizedTerm is parent of PreferredTerm: BROAD\n ELIF TokenizedTerm is child of PreferredTerm: NARROW\n ELSE: RELATED\n\n '''\n return sub_df\n\ndef assign_string_match_rating(dfRow):\n '''\n Assign another column categorizing the level of match between TokenizedTerm and PreferredTerm\n - Exact\n - Partial\n - Synonym\n\n :param dfRow: each row of the OGER output\n :returns: Same dataframe with an extra 'matchRating' column\n '''\n result = None\n if dfRow['TokenizedTerm'] == dfRow['PreferredTerm']:\n result = 'Exact'\n elif dfRow['TokenizedTerm'] in dfRow['PreferredTerm']:\n result = 'Partial'\n else:\n result = 'NoMatch'\n return result" ]
[ [ "pandas.read_csv" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] } ]
shingte/Face-Expression-Transfer-by-Audio
[ "7aa93880bbc62b7f9fe25428d425ea3f0f73c8b0" ]
[ "code/models.py" ]
[ "import torch\nimport torch.nn as nn\n# from pts3d import *\nfrom ops import *\nimport torchvision.models as models\nimport functools\nfrom torch.autograd import Variable\nimport torch.nn.functional as F\nfrom torch.nn import init\nimport numpy as np\nfrom convolutional_rnn import Conv2dGRU\n\nclass Flatten(nn.Module):\n def forward(self, input):\n return input.view(input.size(0), -1)\n\n\nclass AT_net(nn.Module):\n def __init__(self):\n super(AT_net, self).__init__()\n self.lmark_encoder = nn.Sequential(\n nn.Linear(6,256),\n nn.ReLU(True),\n nn.Linear(256,512),\n nn.ReLU(True),\n\n )\n self.audio_eocder = nn.Sequential(\n conv2d(1,64,3,1,1),\n conv2d(64,128,3,1,1),\n nn.MaxPool2d(3, stride=(1,2)),\n conv2d(128,256,3,1,1),\n conv2d(256,256,3,1,1),\n conv2d(256,512,3,1,1),\n nn.MaxPool2d(3, stride=(2,2))\n )\n self.audio_eocder_fc = nn.Sequential(\n nn.Linear(1024 *12,2048),\n nn.ReLU(True),\n nn.Linear(2048,256),\n nn.ReLU(True),\n \n )\n self.lstm = nn.LSTM(256*3,256,3,batch_first = True)\n self.lstm_fc = nn.Sequential(\n nn.Linear(256,6),\n )\n\n def forward(self, example_landmark, audio):\n hidden = ( torch.autograd.Variable(torch.zeros(3, audio.size(0), 256).cuda()),\n torch.autograd.Variable(torch.zeros(3, audio.size(0), 256).cuda()))\n example_landmark_f = self.lmark_encoder(example_landmark)\n lstm_input = []\n for step_t in range(audio.size(1)):\n current_audio = audio[ : ,step_t , :, :].unsqueeze(1)\n current_feature = self.audio_eocder(current_audio)\n current_feature = current_feature.view(current_feature.size(0), -1)\n current_feature = self.audio_eocder_fc(current_feature)\n features = torch.cat([example_landmark_f, current_feature], 1)\n lstm_input.append(features)\n lstm_input = torch.stack(lstm_input, dim = 1)\n lstm_out, _ = self.lstm(lstm_input, hidden)\n fc_out = []\n for step_t in range(audio.size(1)):\n fc_in = lstm_out[:,step_t,:]\n fc_out.append(self.lstm_fc(fc_in))\n return torch.stack(fc_out, dim = 1)\n\n\nclass AT_single(nn.Module):\n def __init__(self):\n super(AT_single, self).__init__()\n # self.lmark_encoder = nn.Sequential(\n # nn.Linear(6,256),\n # nn.ReLU(True),\n # nn.Linear(256,512),\n # nn.ReLU(True),\n\n # )\n self.audio_eocder = nn.Sequential(\n conv2d(1,64,3,1,1,normalizer = None),\n conv2d(64,128,3,1,1,normalizer = None),\n nn.MaxPool2d(3, stride=(1,2)),\n conv2d(128,256,3,1,1,normalizer = None),\n conv2d(256,256,3,1,1,normalizer = None),\n conv2d(256,512,3,1,1,normalizer = None),\n nn.MaxPool2d(3, stride=(2,2))\n )\n self.audio_eocder_fc = nn.Sequential(\n nn.Linear(1024 *12,2048),\n nn.ReLU(True),\n nn.Linear(2048,256),\n nn.ReLU(True),\n nn.Linear(256, 6)\n )\n # self.fusion = nn.Sequential(\n # nn.Linear(256 *3, 256),\n # nn.ReLU(True),\n # nn.Linear(256, 6)\n # )\n\n def forward(self, audio):\n current_audio = audio.unsqueeze(1)\n current_feature = self.audio_eocder(current_audio)\n current_feature = current_feature.view(current_feature.size(0), -1)\n\n output = self.audio_eocder_fc(current_feature)\n \n return output\n\n\nclass GL_Discriminator(nn.Module):\n\n\n def __init__(self):\n super(GL_Discriminator, self).__init__()\n\n self.image_encoder_dis = nn.Sequential(\n conv2d(3,64,3,2, 1,normalizer=None),\n # conv2d(64, 64, 4, 2, 1),\n conv2d(64, 128, 3, 2, 1),\n\n conv2d(128, 256, 3, 2, 1),\n\n conv2d(256, 512, 3, 2, 1),\n )\n self.encoder = nn.Sequential(\n nn.Linear(136, 256),\n nn.ReLU(True),\n nn.Linear(256, 512),\n nn.ReLU(True),\n )\n self.decoder = nn.Sequential(\n nn.Linear(1024, 512),\n nn.ReLU(True),\n nn.Linear(512, 136),\n nn.Tanh()\n )\n self.img_fc = nn.Sequential(\n nn.Linear(512*8*8, 512),\n nn.ReLU(True),\n )\n\n self.lstm = nn.LSTM(1024,256,3,batch_first = True)\n self.lstm_fc = nn.Sequential(\n nn.Linear(256,136),\n nn.Tanh())\n self.decision = nn.Sequential(\n nn.Linear(256,1),\n )\n self.aggregator = nn.AvgPool1d(kernel_size = 16)\n self.activate = nn.Sigmoid()\n def forward(self, xs, example_landmark):\n hidden = ( torch.autograd.Variable(torch.zeros(3, example_landmark.size(0), 256).cuda()),\n torch.autograd.Variable(torch.zeros(3, example_landmark.size(0), 256).cuda()))\n lstm_input = list()\n lmark_feature= self.encoder(example_landmark)\n for step_t in range(xs.size(1)):\n x = xs[:,step_t,:,:, :]\n x.data = x.data.contiguous()\n x = self.image_encoder_dis(x)\n x = x.view(x.size(0), -1)\n x = self.img_fc(x)\n new_feature = torch.cat([lmark_feature, x], 1)\n lstm_input.append(new_feature)\n lstm_input = torch.stack(lstm_input, dim = 1)\n lstm_out, _ = self.lstm(lstm_input, hidden)\n fc_out = []\n decision = []\n for step_t in range(xs.size(1)):\n fc_in = lstm_out[:,step_t,:]\n decision.append(self.decision(fc_in))\n fc_out.append(self.lstm_fc(fc_in)+ example_landmark)\n fc_out = torch.stack(fc_out, dim = 1)\n decision = torch.stack(decision, dim = 2)\n decision = self.aggregator(decision)\n decision = self.activate(decision)\n return decision.view(decision.size(0)), fc_out\n\n\n\nclass VG_net(nn.Module):\n def __init__(self,input_nc = 3, output_nc = 3,ngf = 64, use_dropout=True, use_bias=False,norm_layer=nn.BatchNorm2d,n_blocks = 9,padding_type='zero'):\n super(VG_net,self).__init__()\n dtype = torch.FloatTensor\n\n\n self.image_encoder1 = nn.Sequential(\n nn.ReflectionPad2d(3),\n conv2d(3, 64, 7,1, 0),\n\n # conv2d(64,16,3,1,1),\n conv2d(64,64,3,2,1),\n # conv2d(32,64,3,1,1),\n conv2d(64,128,3,2,1)\n )\n\n self.image_encoder2 = nn.Sequential(\n conv2d(128,256,3,2,1),\n conv2d(256,512,3,2,1)\n )\n\n self.landmark_encoder = nn.Sequential(\n nn.Linear(136, 64),\n nn.ReLU(True)\n )\n\n self.landmark_encoder_stage2 = nn.Sequential(\n conv2d(1,256,3),\n \n )\n self.lmark_att = nn.Sequential(\n nn.ConvTranspose2d(512, 256,kernel_size=3, stride=(2),padding=(1), output_padding=1),\n nn.BatchNorm2d(256),\n nn.ReLU(True),\n nn.ConvTranspose2d(256, 128,kernel_size=3, stride=(2),padding=(1), output_padding=1),\n nn.BatchNorm2d(128),\n nn.ReLU(True),\n conv2d(128, 1,3, activation=nn.Sigmoid, normalizer=None)\n )\n self.lmark_feature = nn.Sequential(\n conv2d(256,512,3)) \n \n model = []\n n_downsampling = 4\n mult = 2**(n_downsampling -1 )\n for i in range(n_blocks):\n model += [ResnetBlock(ngf * mult, padding_type=padding_type, norm_layer=norm_layer, use_dropout=use_dropout, use_bias=use_bias)]\n\n for i in range(n_downsampling ):\n mult = 2**(n_downsampling-i-1 ) \n model += [nn.ConvTranspose2d(ngf * mult, int(ngf * mult / 2),\n kernel_size=3, stride=(2),\n padding=(1), output_padding=1,\n bias=use_bias),\n norm_layer(int(ngf * mult / 2)),\n nn.ReLU(True)]\n if i == n_downsampling-3:\n self.generator1 = nn.Sequential(*model)\n model = []\n\n self.base = nn.Sequential(*model)\n model = []\n model += [nn.Conv2d(ngf//2, output_nc, kernel_size=7, padding=3)]\n model += [nn.Tanh()]\n self.generator_color = nn.Sequential(*model)\n\n model = []\n model += [nn.Conv2d(ngf//2, 1, kernel_size=7, padding=3)]\n model += [nn.Sigmoid()]\n self.generator_attention = nn.Sequential(*model)\n\n self.bottle_neck = nn.Sequential(conv2d(1024,128,3,1,1))\n\n \n self.convGRU = Conv2dGRU(in_channels = 128, out_channels = 512, kernel_size = (3), num_layers = 1, bidirectional = False, dilation = 2, stride = 1, dropout = 0.5 )\n \n def forward(self,image, landmarks, example_landmark ):\n # ex_landmark1 = self.landmark_encoder(example_landmark.unsqueeze(2).unsqueeze(3).repeat(1, 1, 128,128))\n image_feature1 = self.image_encoder1(image)\n image_feature = self.image_encoder2(image_feature1)\n ex_landmark1 = self.landmark_encoder(example_landmark.view(example_landmark.size(0), -1))\n ex_landmark1 = ex_landmark1.view(ex_landmark1.size(0), 1, image_feature.size(2), image_feature.size(3) )\n ex_landmark1 = self.landmark_encoder_stage2(ex_landmark1)\n ex_landmark = self.lmark_feature(ex_landmark1)\n \n lstm_input = list()\n lmark_atts = list()\n for step_t in range(landmarks.size(1)):\n landmark = landmarks[:,step_t,:]\n landmark.data = landmark.data.contiguous()\n landmark = self.landmark_encoder(landmark.view(landmark.size(0), -1))\n landmark = landmark.view(landmark.size(0), 1, image_feature.size(2), image_feature.size(3) )\n landmark = self.landmark_encoder_stage2(landmark)\n\n lmark_att = self.lmark_att( torch.cat([landmark, ex_landmark1], dim=1))\n landmark = self.lmark_feature(landmark)\n\n inputs = self.bottle_neck(torch.cat([image_feature, landmark - ex_landmark], dim=1))\n lstm_input.append(inputs)\n lmark_atts.append(lmark_att)\n lmark_atts =torch.stack(lmark_atts, dim = 1)\n lstm_input = torch.stack(lstm_input, dim = 1)\n lstm_output, _ = self.convGRU(lstm_input)\n\n outputs = []\n atts = []\n colors = []\n for step_t in range(landmarks.size(1)):\n input_t = lstm_output[:,step_t,:,:,:]\n v_feature1 = self.generator1(input_t)\n v_feature1_f = image_feature1 * (1- lmark_atts[:,step_t,:,:,:] ) + v_feature1 * lmark_atts[:,step_t,:,:,:] \n base = self.base(v_feature1_f)\n color = self.generator_color(base)\n att = self.generator_attention(base)\n atts.append(att)\n colors.append(color)\n output = att * color + (1 - att ) * image\n outputs.append(output)\n return torch.stack(outputs, dim = 1), torch.stack(atts, dim = 1), torch.stack(colors, dim = 1), lmark_atts\n\n\nclass ResnetBlock(nn.Module):\n def __init__(self, dim, padding_type, norm_layer, use_dropout, use_bias):\n super(ResnetBlock, self).__init__()\n self.conv_block = self.build_conv_block(dim, padding_type, norm_layer, use_dropout, use_bias)\n\n def build_conv_block(self, dim, padding_type, norm_layer, use_dropout, use_bias):\n conv_block = []\n p = 0\n if padding_type == 'reflect':\n conv_block += [nn.ReflectionPad2d(1)]\n elif padding_type == 'replicate':\n conv_block += [nn.ReplicationPad2d(1)]\n elif padding_type == 'zero':\n p = 1\n else:\n raise NotImplementedError('padding [%s] is not implemented' % padding_type)\n\n conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p, bias=use_bias),\n norm_layer(dim),\n nn.ReLU(True)]\n if use_dropout:\n conv_block += [nn.Dropout(0.5)]\n\n p = 0\n if padding_type == 'reflect':\n conv_block += [nn.ReflectionPad2d(1)]\n elif padding_type == 'replicate':\n conv_block += [nn.ReplicationPad2d(1)]\n elif padding_type == 'zero':\n p = 1\n else:\n raise NotImplementedError('padding [%s] is not implemented' % padding_type)\n conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p, bias=use_bias),\n norm_layer(dim)]\n\n return nn.Sequential(*conv_block)\n\n def forward(self, x):\n out = x + self.conv_block(x)\n return out" ]
[ [ "torch.nn.Sequential", "torch.nn.Dropout", "torch.nn.ReflectionPad2d", "torch.nn.ConvTranspose2d", "torch.nn.LSTM", "torch.cat", "torch.nn.Conv2d", "torch.nn.Sigmoid", "torch.nn.Tanh", "torch.nn.Linear", "torch.nn.MaxPool2d", "torch.nn.BatchNorm2d", "torch.stack", "torch.nn.ReLU", "torch.nn.AvgPool1d", "torch.nn.ReplicationPad2d" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
elix-tech/infrag
[ "e5fa6b91659ed94e64ffbb3272b90fd3618e017e" ]
[ "egegl/models/handlers/explainer_handler.py" ]
[ "\"\"\"\nExplainer handler class\n\nCopyright (c) 2021 Elix, Inc.\n\"\"\"\n\nimport torch\nimport torch.nn as nn\nfrom torch.optim import Adam\nfrom torch_geometric.data import Batch\n\nfrom egegl.models.attribution import (\n AbstractExplainer,\n DirectedMessagePassingNetwork,\n GraphConvNetwork,\n)\n\n\nclass ExplainerHandler:\n def __init__(\n self,\n model: AbstractExplainer,\n optimizer=Adam,\n ):\n self.model = model\n self.optimizer = optimizer\n self.criterion = nn.MSELoss()\n\n def train_on_graph_batch(self, batch: Batch, device=torch.device) -> float:\n pred = self.generate_preds(batch.to(device))\n loss = self.criterion(pred.squeeze(), batch.y.to(device))\n\n self.model.zero_grad()\n loss.backward()\n nn.utils.clip_grad_norm_(self.model.parameters(), 1.0)\n self.optimizer.step()\n\n return loss.item()\n\n def save(self, save_dir: str) -> None:\n self.model.save(save_dir)\n\n def generate_preds(\n self, batch: Batch, return_activations: bool = False\n ) -> torch.Tensor:\n \"\"\"\n Interface to generate predictions depending on the model instance from Batch data\n \"\"\"\n if isinstance(self.model, DirectedMessagePassingNetwork):\n preds = self.model(\n batch.x,\n batch.edge_attr,\n batch.edge_index,\n batch.batch,\n return_activations,\n )\n elif isinstance(self.model, GraphConvNetwork):\n preds = self.model(\n batch.x, batch.edge_index, batch.batch, return_activations\n )\n else:\n raise ValueError(f\"The explainer class {type(self.model)} is not supported\")\n return preds\n\n def generate_attributions(self, batch: Batch) -> torch.Tensor:\n with torch.no_grad():\n activations = self.generate_preds(batch, return_activations=True)\n cam = torch.matmul(\n activations, torch.transpose(self.model.final_dense.weight.data, 0, 1) # type: ignore\n )\n return cam\n" ]
[ [ "torch.transpose", "torch.no_grad", "torch.nn.MSELoss" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
jonboh/virtual_worlds
[ "d9a96d51cfa9ef598b3f17f751ad3fdb516d7e23" ]
[ "tests/universe_tests.py" ]
[ "import pytest\nfrom unittest import mock\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom universe import *\nfrom rules import *\n\n\ndef default_universe_creation():\n np.random.seed(1)\n num_agents = 7\n num_foods = 25\n num_dims = 2\n np.random.seed(2)\n rules = Rules\n agents = [Agent(np.random.rand(1), np.random.rand(num_dims), np.random.rand(1) * 0.01)\n for i in range(0, num_agents)]\n foods = [Matter(np.random.rand(1), np.random.rand(num_foods)) for i in range(0, num_foods)]\n universe = Universe(num_dims, rules, agents, foods)\n return universe\n\n\ndef test_retrieve_info():\n position = [5,6]\n reach = 2\n agents_positions_hp = np.array([[1,2,1],\n [5,5,1],\n [5,6,1]])\n food_positions_hp = np.array([[1,1,1]])\n universe = default_universe_creation()\n universe.agent_positions_hp = agents_positions_hp\n universe.food_positions_hp = food_positions_hp\n info_array = universe.retrieve_info(np.array([position]),reach)\n\n assert np.all(info_array==np.array([[5,5,1],[5,6,1]]))\n" ]
[ [ "numpy.array", "numpy.random.rand", "numpy.random.seed" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Junyoungpark/CGS
[ "a0230d9898f13d1456f8d706b97a93d814b51e64" ]
[ "gvi_train.py" ]
[ "from time import perf_counter\n\nimport dgl\nimport hydra\nimport torch\nimport torch.optim as th_op\nfrom torch.optim.lr_scheduler import CosineAnnealingWarmRestarts\n\nfrom CGS.experiments.gvi.generate_graph import generate_graphs_seq\nfrom CGS.gnn.CGS.get_model import get_model\nfrom CGS.utils.test_utils import get_policy_acc, get_pred_mape, print_perf\n\n\[email protected](config_path=\"./CGS/configs/gvi\", config_name='cgs')\ndef main(config=None):\n device = config.train.device\n\n model = get_model(num_heads=config.model.num_heads,\n gamma=config.model.gamma,\n num_hidden_gn=config.model.num_hidden_gn,\n nf_dim=config.model.nf_dim,\n ef_dim=config.model.ef_dim,\n sol_dim=config.model.sol_dim,\n n_hidden_dim=config.model.n_hidden_dim,\n e_hidden_dim=config.model.e_hidden_dim,\n node_aggregator=config.model.node_aggregator,\n non_linear=config.model.non_linear,\n mlp_num_neurons=config.model.mlp_num_neurons,\n reg_num_neurons=config.model.reg_num_neurons,\n activation=config.model.activation).to(device)\n\n opt = getattr(th_op, config.opt.name)(model.parameters(), lr=config.opt.lr)\n scheduler = CosineAnnealingWarmRestarts(opt, T_0=32)\n loss_fn = torch.nn.MSELoss()\n\n for i in range(config.train.n_updates):\n if i % config.train.generate_g_every == 0:\n train_g = generate_graphs_seq(n_graphs=config.train.bs,\n nA_bd=config.train.na_range,\n nS_bd=config.train.ns_range)\n train_g = dgl.batch(train_g).to(device)\n\n start = perf_counter()\n train_nf, train_ef = train_g.ndata['feat'], train_g.edata['feat']\n train_y = train_g.ndata['value']\n train_pred = model(train_g, train_nf, train_ef)\n loss = loss_fn(train_pred, train_y)\n\n opt.zero_grad()\n loss.backward()\n opt.step()\n scheduler.step()\n fit_time = perf_counter() - start\n\n # logging\n mean_mape, _, _, _ = get_pred_mape(train_g, train_pred)\n mean_acc, _, _ = get_policy_acc(train_g, train_pred)\n\n log_dict = {'iter': i,\n 'loss': loss.item(),\n 'mape': mean_mape,\n 'acc': mean_acc,\n 'fit_time': fit_time,\n 'forward_itr': model.fp_layer.frd_itr,\n 'lr': opt.param_groups[0]['lr']}\n\n print_perf(log_dict)\n\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "torch.optim.lr_scheduler.CosineAnnealingWarmRestarts", "torch.nn.MSELoss" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
joelibaceta/backend-codigo-10
[ "75256580ce9975bcfa831fde884362787d82b71f" ]
[ "sesion41/fashion_tester/pre.py" ]
[ "from PIL import Image\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nimport matplotlib.image as mpimg\n\ndef rgb2gray(rgb):\n return np.dot(rgb[...,:3], [0.2989, 0.5870, 0.1140])\n\nim = mpimg.imread('sneaker_test.jpg')#.convert('L')\n#imr = im.resize((28, 28))\n\ngray = rgb2gray(im)\n\nplt.imshow(gray, cmap=plt.get_cmap('gray'), vmin=0, vmax=1)\nplt.show(\n\n)" ]
[ [ "matplotlib.image.imread", "numpy.dot", "matplotlib.pyplot.show", "matplotlib.pyplot.get_cmap" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
asappresearch/aum
[ "892e76eda4b6b85e21bda441d86134e0430ded7a" ]
[ "examples/paper_replication/runner.py" ]
[ "import datetime\nimport logging\nimport os\nimport random\nimport shutil\nimport sys\nfrom collections import OrderedDict\n\nimport numpy as np\nimport pandas as pd\nimport torch\n\nimport fire\nimport tqdm\nimport util\nfrom aum import AUMCalculator\nfrom losses import losses\nfrom models import models\nfrom torchvision import datasets\nfrom torchvision import models as tvmodels\nfrom torchvision import transforms\n\n\nclass _Dataset(torch.utils.data.Dataset):\n \"\"\"\n A wrapper around existing torch datasets to add purposefully mislabeled samplesa and threshold samples.\n\n :param :obj:`torch.utils.data.Dataset` base_dataset: Dataset to wrap\n :param :obj:`torch.LongTensor` indices: List of indices of base_dataset to include (used to create valid. sets)\n :param dict flip_dict: (optional) List mapping sample indices to their (incorrect) assigned label\n :param bool use_threshold_samples: (default False) Whether or not to add threshold samples to this datasets\n :param bool threshold_samples_set_idx: (default 1) Which set of threshold samples to use.\n \"\"\"\n def __init__(self,\n base_dataset,\n indices=None,\n flip_dict=None,\n use_threshold_samples=False,\n threshold_samples_set_idx=1):\n super().__init__()\n self.dataset = base_dataset\n self.flip_dict = flip_dict or {}\n self.indices = torch.arange(len(self.dataset)) if indices is None else indices\n\n # Create optional extra class (for threshold samples)\n self.use_threshold_samples = use_threshold_samples\n if use_threshold_samples:\n num_threshold_samples = len(self.indices) // (self.targets.max().item() + 1)\n start_index = (threshold_samples_set_idx - 1) * num_threshold_samples\n end_index = (threshold_samples_set_idx) * num_threshold_samples\n self.threshold_sample_indices = torch.randperm(len(self.indices))[start_index:end_index]\n\n @property\n def targets(self):\n \"\"\"\n (Hidden) ground-truth labels\n \"\"\"\n if not hasattr(self, \"_target_memo\"):\n try:\n self.__target_memo = torch.tensor(self.dataset.targets)[self.indices]\n except Exception:\n self.__target_memo = torch.tensor([target\n for _, target in self.dataset])[self.indices]\n if torch.is_tensor(self.__target_memo):\n return self.__target_memo\n else:\n return torch.tensor(self.__target_memo)\n\n @property\n def assigned_targets(self):\n \"\"\"\n (Potentially incorrect) assigned labels\n \"\"\"\n if not hasattr(self, \"_assigned_target_memo\"):\n self._assigned_target_memo = self.targets.clone()\n\n # Change labels of mislabeled samples\n if self.flip_dict is not None:\n for i, idx in enumerate(self.indices.tolist()):\n if idx in self.flip_dict.keys():\n self._assigned_target_memo[i] = self.flip_dict[idx]\n\n # Change labels of threshold samples\n if self.use_threshold_samples:\n extra_class = (self.targets.max().item() + 1)\n self._assigned_target_memo[self.threshold_sample_indices] = extra_class\n return self._assigned_target_memo\n\n def __len__(self):\n return len(self.indices)\n\n def __getitem__(self, index):\n input, _ = self.dataset[self.indices[index].item()]\n target = self.assigned_targets[index].item()\n res = input, target, index\n return res\n\n\nclass Runner(object):\n \"\"\"\n Main module for running experiments. Can call `load`, `save`, `train`, `test`, etc.\n\n :param str data: Directory to load data from\n :param str save: Directory to save model/results\n :param str dataset: (cifar10, cifar100, tiny_imagenet, webvision50, clothing100k)\n\n :param int num_valid: (default 5000) What size validation set to use (comes from train set, indices determined by seed)\n :param int seed: (default 0) Random seed\n :param int split_seed: (default 0) Which random seed to use for creating trian/val split and for flipping random labels.\n If this arg is not supplied, the split_seed will come from the `seed` arg.\n\n :param float perc_mislabeled: (default 0.) How many samples will be intentionally mislabeled.\n Default is 0. - i.e. regular training without flipping any labels.\n :param str noise_type: (uniform, flip) Mislabeling noise model to use.\n\n :param bool use_threshold_samples: (default False) Whether to add indictaor samples\n :param bool threshold_samples_set_idx: (default 1) Which set of threshold samples to use (based on index)\n\n :param str loss_type: (default cross-entropy) Loss type\n :param bool oracle_training: (default False) If true, the network will be trained only on clean data\n (i.e. all training points with flipped labels will be discarded).\n\n :param str net_type: (resnet, densenet, wide_resnet) Which network to use.\n :param **model_args: Additional argumets to pass to the model\n \"\"\"\n def __init__(self,\n data,\n save,\n dataset=\"cifar10\",\n num_valid=5000,\n seed=0,\n split_seed=None,\n noise_type=\"uniform\",\n perc_mislabeled=0.,\n use_threshold_samples=False,\n threshold_samples_set_idx=1,\n loss_type=\"cross-entropy\",\n oracle_training=False,\n net_type=\"resnet\",\n pretrained=False,\n **model_args):\n if not os.path.exists(save):\n os.makedirs(save)\n if not os.path.isdir(save):\n raise Exception('%s is not a dir' % save)\n self.data = data\n self.savedir = save\n self.perc_mislabeled = perc_mislabeled\n self.noise_type = noise_type\n self.dataset = dataset\n self.net_type = net_type\n self.num_valid = num_valid\n self.use_threshold_samples = use_threshold_samples\n self.threshold_samples_set_idx = threshold_samples_set_idx\n self.split_seed = split_seed if split_seed is not None else seed\n self.seed = seed\n self.loss_func = losses[loss_type]\n self.oracle_training = oracle_training\n self.pretrained = pretrained\n\n # Seed\n torch.manual_seed(0)\n torch.cuda.manual_seed_all(0)\n random.seed(0)\n\n # Logging\n self.timestring = datetime.datetime.now().strftime('%Y-%m-%d-%H-%M-%S')\n logging.basicConfig(\n format='%(message)s',\n handlers=[\n logging.StreamHandler(sys.stdout),\n logging.FileHandler(os.path.join(self.savedir, 'log-%s.log' % self.timestring)),\n ],\n level=logging.INFO,\n )\n logging.info('Data dir:\\t%s' % data)\n logging.info('Save dir:\\t%s\\n' % save)\n\n # Make model\n self.num_classes = self.test_set.targets.max().item() + 1\n if use_threshold_samples:\n self.num_classes += 1\n self.num_data = len(self.train_set)\n logging.info(f\"\\nDataset: {self.dataset}\")\n logging.info(f\"Num train: {self.num_data}\")\n logging.info(f\"Num valid: {self.num_valid}\")\n logging.info(f\"Extra class: {self.use_threshold_samples}\")\n logging.info(f\"Num classes: {self.num_classes}\")\n if self.perc_mislabeled:\n logging.info(f\"Noise type: {self.noise_type}\")\n logging.info(f\"Flip perc: {self.perc_mislabeled}\\n\")\n if self.oracle_training:\n logging.info(f\"Training with Oracle Only\")\n\n # Model\n if self.dataset == \"imagenet\" or \"webvision\" in self.dataset or \"clothing\" in self.dataset:\n big_models = dict((key, val) for key, val in tvmodels.__dict__.items())\n self.model = big_models[self.net_type](pretrained=False, num_classes=self.num_classes)\n if self.pretrained:\n try:\n self.model.load_state_dict(\n big_models[self.net_type](pretrained=True).state_dict(), strict=False)\n except RuntimeError:\n pass\n # Fix pooling issues\n if \"inception\" in self.net_type:\n self.avgpool_1a = torch.nn.AdaptiveAvgPool2d((1, 1))\n else:\n self.model = models[self.net_type](\n num_classes=self.num_classes,\n initial_stride=(2 if \"tiny\" in self.dataset.lower() else 1),\n **model_args)\n logging.info(f\"Model type: {self.net_type}\")\n logging.info(f\"Model args:\")\n for key, val in model_args.items():\n logging.info(f\" - {key}: {val}\")\n logging.info(f\"Loss type: {loss_type}\")\n logging.info(\"\")\n\n def _make_datasets(self):\n try:\n dataset_cls = getattr(datasets, self.dataset.upper())\n self.big_model = False\n except Exception:\n dataset_cls = datasets.ImageFolder\n if \"tiny\" in self.dataset.lower():\n self.big_model = False\n else:\n self.big_model = True\n\n # Get constants\n if dataset_cls == datasets.ImageFolder:\n tmp_set = dataset_cls(root=os.path.join(self.data, \"train\"))\n else:\n tmp_set = dataset_cls(root=self.data, train=True, download=True)\n if self.dataset.upper() == 'CIFAR10':\n tmp_set.targets = tmp_set.train_labels\n num_train = len(tmp_set) - self.num_valid\n num_valid = self.num_valid\n num_classes = int(max(tmp_set.targets)) + 1\n\n # Create train/valid split\n torch.manual_seed(self.split_seed)\n torch.cuda.manual_seed_all(self.split_seed)\n random.seed(self.split_seed)\n train_indices, valid_indices = torch.randperm(num_train + num_valid).split(\n [num_train, num_valid])\n\n # dataset indices flip\n flip_dict = {}\n if self.perc_mislabeled:\n # Generate noisy labels from random transitions\n transition_matrix = torch.eye(num_classes)\n if self.noise_type == \"uniform\":\n transition_matrix.mul_(1 - self.perc_mislabeled * (num_classes / (num_classes - 1)))\n transition_matrix.add_(self.perc_mislabeled / (num_classes - 1))\n elif self.noise_type == \"flip\":\n source_classes = torch.arange(num_classes)\n target_classes = (source_classes + 1).fmod(num_classes)\n transition_matrix.mul_(1 - self.perc_mislabeled)\n transition_matrix[source_classes, target_classes] = self.perc_mislabeled\n else:\n raise ValueError(f\"Unknonwn noise type {self.noise}\")\n true_targets = (torch.tensor(tmp_set.targets) if hasattr(tmp_set, \"targets\") else\n torch.tensor([target for _, target in self]))\n transition_targets = torch.distributions.Categorical(\n probs=transition_matrix[true_targets, :]).sample()\n # Create a dictionary of transitions\n if not self.oracle_training:\n flip_indices = torch.nonzero(transition_targets != true_targets).squeeze(-1)\n flip_targets = transition_targets[flip_indices]\n for index, target in zip(flip_indices, flip_targets):\n flip_dict[index.item()] = target.item()\n else:\n # In the oracle setting, don't add transitions\n oracle_indices = torch.nonzero(transition_targets == true_targets).squeeze(-1)\n train_indices = torch.from_numpy(\n np.intersect1d(oracle_indices.numpy(), train_indices.numpy())).long()\n\n # Reset the seed for dataset/initializations\n torch.manual_seed(self.split_seed)\n torch.cuda.manual_seed_all(self.split_seed)\n random.seed(self.split_seed)\n\n # Define trainsforms\n if self.big_model:\n normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\n test_transforms = transforms.Compose([\n transforms.Resize(256),\n transforms.CenterCrop(227 if \"inception\" in self.net_type else 224),\n transforms.ToTensor(),\n normalize,\n ])\n train_transforms = transforms.Compose([\n transforms.RandomResizedCrop(227 if \"inception\" in self.net_type else 224),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n normalize,\n ])\n elif self.dataset == \"tiny_imagenet\":\n normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\n test_transforms = transforms.Compose([\n transforms.ToTensor(),\n normalize,\n ])\n train_transforms = transforms.Compose([\n transforms.RandomCrop(64, padding=8),\n transforms.RandomHorizontalFlip(),\n test_transforms,\n ])\n elif self.dataset == \"cifar10\":\n normalize = transforms.Normalize(mean=[0.4914, 0.4824, 0.4467],\n std=[0.2471, 0.2435, 0.2616])\n test_transforms = transforms.Compose([\n transforms.ToTensor(),\n normalize,\n ])\n train_transforms = transforms.Compose([\n transforms.RandomCrop(32, padding=4),\n transforms.RandomHorizontalFlip(),\n test_transforms,\n ])\n elif self.dataset == \"cifar100\":\n normalize = transforms.Normalize(mean=[0.4914, 0.4824, 0.4467],\n std=[0.2471, 0.2435, 0.2616])\n test_transforms = transforms.Compose([\n transforms.ToTensor(),\n normalize,\n ])\n train_transforms = transforms.Compose([\n transforms.RandomCrop(32, padding=4),\n transforms.RandomHorizontalFlip(),\n test_transforms,\n ])\n elif self.dataset == \"mnist\":\n normalize = transforms.Normalize(mean=(0.1307, ), std=(0.3081, ))\n test_transforms = transforms.Compose([\n transforms.ToTensor(),\n normalize,\n ])\n train_transforms = test_transforms\n else:\n raise ValueError(f\"Unknown dataset {self.dataset}\")\n\n # Get train set\n if dataset_cls == datasets.ImageFolder:\n self._train_set_memo = _Dataset(\n dataset_cls(\n root=os.path.join(self.data, \"train\"),\n transform=train_transforms,\n ),\n flip_dict=flip_dict,\n indices=train_indices,\n use_threshold_samples=self.use_threshold_samples,\n threshold_samples_set_idx=self.threshold_samples_set_idx,\n )\n if os.path.exists(os.path.join(self.data, \"test\")):\n self._valid_set_memo = _Dataset(\n dataset_cls(root=os.path.join(self.data, \"val\"), transform=test_transforms))\n self._test_set_memo = _Dataset(\n dataset_cls(root=os.path.join(self.data, \"test\"), transform=test_transforms))\n else:\n self._valid_set_memo = _Dataset(\n dataset_cls(root=os.path.join(self.data, \"train\"), transform=test_transforms),\n indices=valid_indices,\n ) if len(valid_indices) else None\n self._test_set_memo = _Dataset(\n dataset_cls(root=os.path.join(self.data, \"val\"), transform=test_transforms))\n else:\n self._train_set_memo = _Dataset(\n dataset_cls(root=self.data, train=True, transform=train_transforms),\n flip_dict=flip_dict,\n indices=train_indices,\n use_threshold_samples=self.use_threshold_samples,\n threshold_samples_set_idx=self.threshold_samples_set_idx,\n )\n self._valid_set_memo = _Dataset(dataset_cls(\n root=self.data, train=True, transform=test_transforms),\n indices=valid_indices) if len(valid_indices) else None\n self._test_set_memo = _Dataset(\n dataset_cls(root=self.data, train=False, transform=test_transforms))\n\n @property\n def test_set(self):\n if not hasattr(self, \"_test_set_memo\"):\n self._make_datasets()\n return self._test_set_memo\n\n @property\n def train_set(self):\n if not hasattr(self, \"_train_set_memo\"):\n self._make_datasets()\n return self._train_set_memo\n\n @property\n def valid_set(self):\n if not hasattr(self, \"_valid_set_memo\"):\n self._make_datasets()\n return self._valid_set_memo\n\n def generate_aum_details(self, load=None):\n \"\"\"\n Script for accumulating both aum values and other sample details at the end of training.\n It makes a dataframe that contains AUMs Clean for all samples\n The results are saved to the file `aum_details.csv` in the model folder.\n\n :param str load: (optional) If set to some value - it will assemble aum info from the model stored in the `load` folder.\n Otherwise - it will comptue aums from the runner's model.\n\n :return: self\n \"\"\"\n\n load = load or self.savedir\n train_data = torch.load(os.path.join(load, \"train_data.pth\"))\n aum_data = pd.read_csv(os.path.join(load, \"aum_values.csv\"))\n\n # HACK: fix for old version of the code\n if \"assigned_targets\" not in train_data:\n train_data[\"assigned_targets\"] = train_data[\"observed_targets\"]\n\n true_targets = train_data[\"true_targets\"]\n assigned_targets = train_data[\"assigned_targets\"]\n is_threshold_sample = assigned_targets.gt(true_targets.max())\n label_flipped = torch.ne(true_targets, assigned_targets)\n\n # Where to store result\n result = {}\n\n # Add index of samples\n result[\"Index\"] = torch.arange(train_data[\"assigned_targets\"].size(-1))\n\n # Add label flipped info\n result[\"True Target\"] = true_targets\n result[\"Observed Target\"] = assigned_targets\n result[\"Label Flipped\"] = label_flipped\n result[\"Is Threshold Sample\"] = is_threshold_sample\n\n # Add AUM\n aum_data = aum_data.set_index('sample_id')\n aum_data = aum_data.reindex(list(range(train_data[\"assigned_targets\"].size(-1))))\n aum_list = aum_data['aum'].to_list()\n result[\"AUM\"] = torch.tensor(aum_list)\n\n # Add AUM \"worse than random\" (AUM_WTR) score\n # i.e. - is the AUM worse than 99% of threshold samples?\n if is_threshold_sample.sum().item():\n aum_wtr = torch.lt(\n result[\"AUM\"].view(-1, 1),\n result[\"AUM\"][is_threshold_sample].view(1, -1),\n ).float().mean(dim=-1).gt(0.01).float()\n result[\"AUM_WTR\"] = aum_wtr\n else:\n result[\"AUM_WTR\"] = torch.ones_like(result[\"AUM\"])\n\n df = pd.DataFrame(result)\n df.set_index(\n [\"Index\", \"True Target\", \"Observed Target\", \"Label Flipped\", \"Is Threshold Sample\"],\n inplace=True)\n df.to_csv(os.path.join(load, \"aum_details.csv\"))\n return self\n\n def done(self):\n \"Break out of the runner\"\n return None\n\n def load(self, save=None, suffix=\"\"):\n \"\"\"\n Load a previously saved model state dict.\n\n :param str save: (optional) Which folder to load the saved model from.\n Will default to the current runner's save dir.\n :param str suffix: (optional) Which model file to load (e.g. \"model.pth.last\").\n By default will load \"model.pth\" which contains the early-stopped model.\n \"\"\"\n save = save or self.savedir\n state_dict = torch.load(os.path.join(save, f\"model.pth{suffix}\"),\n map_location=torch.device('cpu'))\n self.model.load_state_dict(state_dict, strict=False)\n return self\n\n def save(self, save=None, suffix=\"\"):\n \"\"\"\n Save the current state dict\n\n :param str save: (optional) Which folder to save the model to.\n Will default to the current runner's save dir.\n :param str suffix: (optional) A suffix to append to the save name.\n \"\"\"\n save = save or self.savedir\n torch.save(self.model.state_dict(), os.path.join(save, f\"model.pth{suffix}\"))\n return self\n\n def subset(self, perc, aum_files=None):\n \"\"\"\n Use only a subset of the training set\n If aum files are supplied, then drop samples with the lowest aum.\n Otherwise, drop samples at random.\n\n :param float perc: What percentage of the set to use\n :param str aum_files:\n \"\"\"\n if aum_files is None:\n torch.manual_seed(self.seed)\n torch.cuda.manual_seed_all(self.seed)\n random.seed(self.seed)\n order = torch.randperm(len(self.train_set))\n else:\n counts = torch.zeros(len(self.train_set))\n aums = torch.zeros(len(self.train_set))\n if isinstance(aum_files, str):\n aum_files = aum_files.split(\",\")\n for sub_aum_file in aum_files:\n aums_path = os.path.join(sub_aum_file, \"aum_details.csv\")\n if not os.path.exists(aums_path):\n self.compute_aums(load=sub_aum_file)\n aums_data = pd.read_csv(aums_path).drop(\n [\"True Target\", \"Observed Target\", \"Label Flipped\"], axis=1)\n counts += torch.tensor(~aums_data[\"Is Threshold Sample\"].values).float()\n aums += torch.tensor(aums_data[\"AUM\"].values *\n ~aums_data[\"Is Threshold Sample\"].values).float()\n counts.clamp_min_(1)\n aums = aums.div_(counts)\n order = aums.argsort(descending=True)\n\n num_samples = int(len(self.train_set) * perc)\n self.train_set.indices = self.train_set.indices[order[:num_samples]]\n logging.info(f\"Reducing training set from {len(order)} to {len(self.train_set)}\")\n if aum_files is not None:\n logging.info(\n f\"Average AUM: {aums[order[:num_samples]].mean().item()} (from {aums.mean().item()}\"\n )\n return self\n\n def test(self,\n model=None,\n split=\"test\",\n batch_size=512,\n dataset=None,\n epoch=None,\n num_workers=0):\n \"\"\"\n Testing script\n \"\"\"\n stats = ['error', 'top5_error', 'loss']\n meters = [util.AverageMeter() for _ in stats]\n result_class = util.result_class(stats)\n\n # Get model\n if model is None:\n model = self.model\n # Model on cuda\n if torch.cuda.is_available():\n model = model.cuda()\n if torch.cuda.is_available() and torch.cuda.device_count() > 1:\n model = torch.nn.DataParallel(model).cuda()\n\n # Get dataset/loader\n if dataset is None:\n try:\n dataset = getattr(self, f\"{split}_set\")\n except Exception:\n raise ValueError(f\"Invalid split '{split}'\")\n loader = tqdm.tqdm(torch.utils.data.DataLoader(dataset,\n batch_size=batch_size,\n shuffle=False,\n num_workers=num_workers),\n desc=split.title())\n\n # For storing results\n all_losses = []\n all_confs = []\n all_preds = []\n all_targets = []\n\n # Model on train mode\n model.eval()\n with torch.no_grad():\n for inputs, targets, indices in loader:\n # Get types right\n if torch.cuda.is_available():\n inputs = inputs.cuda()\n targets = targets.cuda()\n\n # Calculate loss\n outputs = model(inputs)\n losses = self.loss_func(outputs, targets, reduction=\"none\")\n confs, preds = outputs.topk(5, dim=-1, largest=True, sorted=True)\n is_correct = preds.eq(targets.unsqueeze(-1)).float()\n loss = losses.mean()\n error = 1 - is_correct[:, 0].mean()\n top5_error = 1 - is_correct.sum(dim=-1).mean()\n\n # measure and record stats\n batch_size = inputs.size(0)\n stat_vals = [error.item(), top5_error.item(), loss.item()]\n for stat_val, meter in zip(stat_vals, meters):\n meter.update(stat_val, batch_size)\n\n # Record losses\n all_losses.append(losses.cpu())\n all_confs.append(confs[:, 0].cpu())\n all_preds.append(preds[:, 0].cpu())\n all_targets.append(targets.cpu())\n\n # log stats\n res = dict((name, f\"{meter.val:.3f} ({meter.avg:.3f})\")\n for name, meter in zip(stats, meters))\n loader.set_postfix(**res)\n\n # Save the outputs\n pd.DataFrame({\n \"Loss\": torch.cat(all_losses).numpy(),\n \"Prediction\": torch.cat(all_preds).numpy(),\n \"Confidence\": torch.cat(all_confs).numpy(),\n \"Label\": torch.cat(all_targets).numpy(),\n }).to_csv(os.path.join(self.savedir, f\"results_{split}.csv\"), index_label=\"index\")\n\n # Return summary statistics and outputs\n return result_class(*[meter.avg for meter in meters])\n\n def train_for_aum_computation(self,\n num_epochs=150,\n batch_size=64,\n lr=0.1,\n wd=1e-4,\n momentum=0.9,\n **kwargs):\n \"\"\"\n Helper training script - this trains models that will be specifically used for AUL computations\n\n :param int num_epochs: (default 150) (This corresponds roughly to how\n many epochs a normal model is trained for before the lr drop.)\n :param int batch_size: (default 64) (The batch size is intentionally\n lower - this makes the network less likely to memorize.)\n :param float lr: Learning rate\n :param float wd: Weight decay\n :param float momentum: Momentum\n \"\"\"\n return self.train(num_epochs=num_epochs,\n batch_size=batch_size,\n test_at_end=False,\n lr=lr,\n wd=wd,\n momentum=momentum,\n lr_drops=[],\n **kwargs)\n\n def train(self,\n num_epochs=300,\n batch_size=256,\n test_at_end=True,\n lr=0.1,\n wd=1e-4,\n momentum=0.9,\n lr_drops=[0.5, 0.75],\n aum_wtr=False,\n rand_weight=False,\n **kwargs):\n \"\"\"\n Training script\n\n :param int num_epochs: (default 300)\n :param int batch_size: (default 256)\n :param float lr: Learning rate\n :param float wd: Weight decay\n :param float momentum: Momentum\n :param list lr_drops: When to drop the learning rate (by a factor of 10) as a percentage of total training time.\n\n :param str aum_wtr: (optional) The path of the model/results directory to load AUM_WTR weights from.\n :param bool rand_weight (optional, default false): uses rectified normal random weighting if True.\n \"\"\"\n # Model\n model = self.model\n if torch.cuda.is_available():\n model = model.cuda()\n if torch.cuda.device_count() > 1:\n model = torch.nn.DataParallel(model).cuda()\n\n # Optimizer\n optimizer = torch.optim.SGD(model.parameters(),\n lr=lr,\n weight_decay=wd,\n momentum=momentum,\n nesterov=True)\n milestones = [int(lr_drop * num_epochs) for lr_drop in (lr_drops or [])]\n scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer,\n milestones=milestones,\n gamma=0.1)\n logging.info(f\"\\nOPTIMIZER:\\n{optimizer}\")\n logging.info(f\"SCHEDULER:\\n{scheduler.milestones}\")\n\n # Initialize AUM caluclator object\n aum_calculator = AUMCalculator(save_dir=self.savedir, compressed=False)\n\n train_data = OrderedDict()\n train_data[\"train_indices\"] = self.train_set.indices\n train_data[\"valid_indices\"] = (self.valid_set.indices if self.valid_set is not None else\n torch.tensor([], dtype=torch.long))\n train_data[\"true_targets\"] = self.train_set.targets\n train_data[\"assigned_targets\"] = self.train_set.assigned_targets\n\n # Storage to log results\n results = []\n\n # Train model\n best_error = 1\n for epoch in range(num_epochs):\n train_results = self.train_epoch(model=model,\n optimizer=optimizer,\n epoch=epoch,\n num_epochs=num_epochs,\n batch_size=batch_size,\n aum_calculator=aum_calculator,\n aum_wtr=aum_wtr,\n rand_weight=rand_weight,\n **kwargs)\n if self.valid_set is not None:\n valid_results = self.test(model=model,\n split=\"valid\",\n batch_size=batch_size,\n epoch=epoch,\n **kwargs)\n else:\n valid_results = self.test(model,\n split=\"test\",\n batch_size=batch_size,\n epoch=epoch,\n **kwargs)\n scheduler.step()\n\n # Determine if model is the best\n if self.valid_set is not None:\n self.save()\n elif best_error > valid_results.error:\n best_error = valid_results.error\n logging.info('New best error: %.4f' % valid_results.error)\n self.save()\n\n # Log results\n logging.info(f\"\\nTraining {repr(train_results)}\")\n logging.info(f\"\\nValidation {repr(valid_results)}\")\n logging.info('')\n results.append(\n OrderedDict([(\"epoch\", f\"{epoch + 1:03d}\"),\n *[(f\"train_{field}\", val) for field, val in train_results.items()],\n *[(f\"valid_{field}\", val) for field, val in valid_results.items()]]))\n pd.DataFrame(results).set_index(\"epoch\").to_csv(\n os.path.join(self.savedir, \"train_log.csv\"))\n\n # Save metadata around train set (like which labels were flipped)\n torch.save(train_data, os.path.join(self.savedir, \"train_data.pth\"))\n\n # Once we're finished training calculate aum\n aum_calculator.finalize()\n\n # Maybe test (last epoch)\n if test_at_end and self.valid_set is not None:\n test_results = self.test(model=model, **kwargs)\n logging.info(f\"\\nTest (no early stopping) {repr(test_results)}\")\n shutil.copyfile(os.path.join(self.savedir, \"results_test.csv\"),\n os.path.join(self.savedir, \"results_test_noearlystop.csv\"))\n results.append(\n OrderedDict([(f\"test_{field}\", val) for field, val in test_results.items()]))\n pd.DataFrame(results).set_index(\"epoch\").to_csv(\n os.path.join(self.savedir, \"train_log.csv\"))\n\n # Load best model\n self.save(suffix=\".last\")\n self.load()\n\n # Maybe test (best epoch)\n if test_at_end and self.valid_set is not None:\n test_results = self.test(model=model, **kwargs)\n logging.info(f\"\\nEarly Stopped Model Test {repr(test_results)}\")\n results.append(\n OrderedDict([(f\"test_best_{field}\", val) for field, val in test_results.items()]))\n pd.DataFrame(results).set_index(\"epoch\").to_csv(os.path.join(self.savedir, \"train_log.csv\"))\n\n return self\n\n def train_epoch(self,\n model,\n optimizer,\n epoch,\n num_epochs,\n batch_size=256,\n num_workers=0,\n aum_calculator=None,\n aum_wtr=False,\n rand_weight=False):\n stats = [\"error\", \"loss\"]\n meters = [util.AverageMeter() for _ in stats]\n result_class = util.result_class(stats)\n\n # Weighting - set up from GMM\n # NOTE: This is only used when removing threshold samples\n # TODO: some of this probably needs to be changed?\n if aum_wtr:\n counts = torch.zeros(len(self.train_set))\n bad_probs = torch.zeros(len(self.train_set))\n if isinstance(aum_wtr, str):\n aum_wtr = aum_wtr.split(\",\")\n for sub_aum_wtr in aum_wtr:\n aums_path = os.path.join(sub_aum_wtr, \"aum_details.csv\")\n if not os.path.exists(aums_path):\n self.generate_aum_details(load=sub_aum_wtr)\n aums_data = pd.read_csv(aums_path).drop(\n [\"True Target\", \"Observed Target\", \"Label Flipped\"], axis=1)\n counts += torch.tensor(~aums_data[\"Is Threshold Sample\"].values).float()\n bad_probs += torch.tensor(aums_data[\"AUM_WTR\"].values *\n ~aums_data[\"Is Threshold Sample\"].values).float()\n counts.clamp_min_(1)\n good_probs = (1 - bad_probs / counts).to(next(model.parameters()).dtype).ceil()\n if torch.cuda.is_available():\n good_probs = good_probs.cuda()\n logging.info(f\"AUM WTR Score\")\n logging.info(f\"(Num samples removed: {good_probs.ne(1.).sum().item()})\")\n elif rand_weight:\n logging.info(\"Rectified Normal Random Weighting\")\n else:\n logging.info(\"Standard weighting\")\n\n # Setup loader\n train_set = self.train_set\n loader = tqdm.tqdm(torch.utils.data.DataLoader(train_set,\n batch_size=batch_size,\n shuffle=True,\n num_workers=num_workers),\n desc=f\"Train (Epoch {epoch + 1}/{num_epochs})\")\n\n # Model on train mode\n model.train()\n for inputs, targets, indices in loader:\n optimizer.zero_grad()\n\n # Get types right\n if torch.cuda.is_available():\n inputs = inputs.cuda()\n targets = targets.cuda()\n\n # Compute output and losses\n outputs = model(inputs)\n losses = self.loss_func(outputs, targets, reduction=\"none\")\n preds = outputs.argmax(dim=-1)\n\n # Compute loss weights\n if aum_wtr:\n weights = good_probs[indices.to(good_probs.device)]\n weights = weights.div(weights.sum())\n elif rand_weight:\n weights = torch.randn(targets.size(), dtype=outputs.dtype,\n device=outputs.device).clamp_min_(0)\n weights = weights.div(weights.sum().clamp_min_(1e-10))\n else:\n weights = torch.ones(targets.size(), dtype=outputs.dtype,\n device=outputs.device).div_(targets.numel())\n\n # Backward through model\n loss = torch.dot(weights, losses)\n error = torch.ne(targets, preds).float().mean()\n loss.backward()\n\n # Update the model\n optimizer.step()\n\n # Update AUM values (after the first epoch due to variability of random initialization)\n if aum_calculator and epoch > 0:\n aum_calculator.update(logits=outputs.detach().cpu().half().float(),\n targets=targets.detach().cpu(),\n sample_ids=indices.tolist())\n\n # measure and record stats\n batch_size = outputs.size(0)\n stat_vals = [error.item(), loss.item()]\n for stat_val, meter in zip(stat_vals, meters):\n meter.update(stat_val, batch_size)\n\n # log stats\n res = dict(\n (name, f\"{meter.val:.3f} ({meter.avg:.3f})\") for name, meter in zip(stats, meters))\n loader.set_postfix(**res)\n\n # Return summary statistics\n return result_class(*[meter.avg for meter in meters])\n\n\nif __name__ == \"__main__\":\n fire.Fire(Runner)\n" ]
[ [ "torch.cat", "torch.randperm", "torch.utils.data.DataLoader", "pandas.DataFrame", "torch.no_grad", "torch.cuda.manual_seed_all", "torch.cuda.is_available", "torch.device", "torch.optim.lr_scheduler.MultiStepLR", "pandas.read_csv", "torch.eye", "torch.tensor", "torch.nonzero", "torch.arange", "torch.dot", "torch.ones_like", "torch.is_tensor", "torch.cuda.device_count", "torch.ne", "torch.manual_seed", "torch.distributions.Categorical", "torch.nn.AdaptiveAvgPool2d", "torch.nn.DataParallel" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.3", "1.1", "1.5", "1.2" ], "scipy": [], "tensorflow": [] } ]
s-arora-1987/imitation
[ "17c7b512013eb44c22376a2c2991cde664f64b87" ]
[ "tests/algorithms/test_dagger.py" ]
[ "\"\"\"Tests for DAgger.\"\"\"\n\nimport contextlib\nimport glob\nimport os\nimport pickle\nfrom unittest import mock\n\nimport gym\nimport numpy as np\nimport pytest\nfrom stable_baselines3.common import policies\n\nfrom imitation.algorithms import bc, dagger\nfrom imitation.data import rollout\nfrom imitation.policies import base, serialize\nfrom imitation.util import util\n\nENV_NAME = \"CartPole-v1\"\nEXPERT_POLICY_PATH = \"tests/testdata/expert_models/cartpole_0/policies/final/\"\nEXPERT_ROLLOUTS_PATH = \"tests/testdata/expert_models/cartpole_0/rollouts/final.pkl\"\n\n\ndef test_beta_schedule():\n one_step_sched = dagger.LinearBetaSchedule(1)\n three_step_sched = dagger.LinearBetaSchedule(3)\n for i in range(10):\n assert np.allclose(one_step_sched(i), 1 if i == 0 else 0)\n assert np.allclose(three_step_sched(i), (3 - i) / 3 if i <= 2 else 0)\n\n\[email protected](params=[1, 4])\ndef num_envs(request):\n return request.param\n\n\[email protected]\ndef venv(num_envs):\n return util.make_vec_env(ENV_NAME, num_envs)\n\n\[email protected]\ndef expert_policy(venv):\n return serialize.load_policy(\"ppo\", EXPERT_POLICY_PATH, venv)\n\n\[email protected](params=[True, False])\ndef expert_trajs(request):\n keep_trajs = request.param\n if keep_trajs:\n with open(EXPERT_ROLLOUTS_PATH, \"rb\") as f:\n return pickle.load(f)\n else:\n return None\n\n\ndef test_traj_collector_seed(tmpdir, venv):\n collector = dagger.InteractiveTrajectoryCollector(\n venv=venv,\n get_robot_acts=lambda o: [venv.action_space.sample() for _ in range(len(o))],\n beta=0.5,\n save_dir=tmpdir,\n )\n seeds1 = collector.seed(42)\n obs1 = collector.reset()\n seeds2 = collector.seed(42)\n obs2 = collector.reset()\n\n np.testing.assert_array_equal(seeds1, seeds2)\n np.testing.assert_array_equal(obs1, obs2)\n\n\ndef test_traj_collector(tmpdir, venv):\n robot_calls = 0\n\n def get_random_acts(obs):\n nonlocal robot_calls\n robot_calls += len(obs)\n return [venv.action_space.sample() for _ in range(len(obs))]\n\n collector = dagger.InteractiveTrajectoryCollector(\n venv=venv, get_robot_acts=get_random_acts, beta=0.5, save_dir=tmpdir\n )\n collector.reset()\n zero_acts = np.zeros((venv.num_envs,), dtype=\"int\")\n obs, rews, dones, infos = collector.step(zero_acts)\n assert np.all(rews != 0)\n assert not np.any(dones)\n for info in infos:\n assert isinstance(info, dict)\n # roll out ~5 * venv.num_envs episodes\n for i in range(1000):\n collector.step(zero_acts)\n\n # there is a <10^(-12) probability this fails by chance; we should be calling\n # robot with 50% prob each time\n assert 388 * venv.num_envs <= robot_calls <= 612 * venv.num_envs\n\n # All user/expert actions are zero. Therefore, all collected actions should be\n # zero.\n file_paths = glob.glob(os.path.join(tmpdir, \"dagger-demo-*.npz\"))\n assert len(file_paths) >= 5\n trajs = map(dagger._load_trajectory, file_paths)\n nonzero_acts = sum(np.sum(traj.acts != 0) for traj in trajs)\n assert nonzero_acts == 0\n\n\ndef _build_dagger_trainer(\n tmpdir, venv, beta_schedule, expert_policy, expert_trajs, custom_logger\n):\n del expert_policy\n if expert_trajs is not None:\n pytest.skip(\n \"DAggerTrainer does not use trajectories. \"\n \"Skipping to avoid duplicate test.\"\n )\n return dagger.DAggerTrainer(\n venv=venv,\n scratch_dir=tmpdir,\n beta_schedule=beta_schedule,\n bc_kwargs=dict(optimizer_kwargs=dict(lr=1e-3)),\n custom_logger=custom_logger,\n )\n\n\ndef _build_simple_dagger_trainer(\n tmpdir,\n venv,\n beta_schedule,\n expert_policy,\n expert_trajs,\n custom_logger,\n):\n return dagger.SimpleDAggerTrainer(\n venv=venv,\n scratch_dir=tmpdir,\n beta_schedule=beta_schedule,\n bc_kwargs=dict(optimizer_kwargs=dict(lr=1e-3)),\n expert_policy=expert_policy,\n expert_trajs=expert_trajs,\n custom_logger=custom_logger,\n )\n\n\[email protected](params=[None, dagger.LinearBetaSchedule(1)])\ndef beta_schedule(request):\n return request.param\n\n\[email protected](params=[_build_dagger_trainer, _build_simple_dagger_trainer])\ndef init_trainer_fn(\n request, tmpdir, venv, beta_schedule, expert_policy, expert_trajs, custom_logger\n):\n # Provide a trainer initialization fixture in addition `trainer` fixture below\n # for tests that want to initialize multiple DAggerTrainer.\n trainer_fn = request.param\n return lambda: trainer_fn(\n tmpdir, venv, beta_schedule, expert_policy, expert_trajs, custom_logger\n )\n\n\[email protected]\ndef trainer(init_trainer_fn):\n return init_trainer_fn()\n\n\[email protected]\ndef simple_dagger_trainer(\n tmpdir, venv, beta_schedule, expert_policy, expert_trajs, custom_logger\n):\n return _build_simple_dagger_trainer(\n tmpdir, venv, beta_schedule, expert_policy, expert_trajs, custom_logger\n )\n\n\ndef test_trainer_needs_demos_exception_error(\n trainer,\n expert_trajs,\n):\n assert trainer.round_num == 0\n error_ctx = pytest.raises(dagger.NeedsDemosException)\n if expert_trajs is not None and isinstance(trainer, dagger.SimpleDAggerTrainer):\n # In this case, demos should be preloaded and we shouldn't experience\n # the NeedsDemoException error.\n ctx = contextlib.nullcontext()\n else:\n # In all cases except the one above, an error should be raised because\n # there are no demos to update on.\n ctx = error_ctx\n\n with ctx:\n trainer.extend_and_update(dict(n_epochs=1))\n\n # If ctx==nullcontext before, then we should fail on the second call\n # because there aren't any demos loaded into round 1 yet.\n # If ctx==error_ctx, then still should fail once again on the second call.\n with error_ctx:\n trainer.extend_and_update(dict(n_epochs=1))\n\n\ndef test_trainer_train_arguments(trainer, expert_policy):\n def add_samples():\n collector = trainer.get_trajectory_collector()\n rollout.generate_trajectories(\n expert_policy, collector, sample_until=rollout.make_min_timesteps(40)\n )\n\n # Lower default number of epochs for the no-arguments call that follows.\n add_samples()\n with mock.patch.object(trainer, \"DEFAULT_N_EPOCHS\", 1):\n trainer.extend_and_update()\n\n add_samples()\n trainer.extend_and_update(dict(n_batches=2))\n\n add_samples()\n trainer.extend_and_update(dict(n_epochs=1))\n\n\ndef test_trainer_makes_progress(trainer, venv, expert_policy):\n pre_train_rew_mean = rollout.mean_return(\n trainer.bc_trainer.policy,\n venv,\n sample_until=rollout.make_min_episodes(15),\n deterministic_policy=False,\n )\n # checking that the initial policy is poor can be flaky; sometimes the\n # randomly initialised policy performs very well, and it's not clear why\n # assert pre_train_rew_mean < 100\n for i in range(2):\n # roll out a few trajectories for dataset, then train for a few steps\n collector = trainer.get_trajectory_collector()\n for _ in range(5):\n obs = collector.reset()\n dones = [False] * venv.num_envs\n while not np.any(dones):\n expert_actions, _ = expert_policy.predict(obs, deterministic=True)\n obs, _, dones, _ = collector.step(expert_actions)\n trainer.extend_and_update(dict(n_epochs=1))\n # make sure we're doing better than a random policy would\n post_train_rew_mean = rollout.mean_return(\n trainer.bc_trainer.policy,\n venv,\n sample_until=rollout.make_min_episodes(15),\n deterministic_policy=True,\n )\n assert post_train_rew_mean - pre_train_rew_mean > 50, (\n f\"pre-train mean {pre_train_rew_mean}, post-train mean \"\n f\"{post_train_rew_mean}\"\n )\n\n\ndef test_trainer_save_reload(tmpdir, init_trainer_fn):\n trainer = init_trainer_fn()\n trainer.round_num = 3\n trainer.save_trainer()\n loaded_trainer = dagger.reconstruct_trainer(trainer.scratch_dir)\n assert loaded_trainer.round_num == trainer.round_num\n\n # old trainer and reloaded trainer should have same variable values\n old_vars = trainer.bc_trainer.policy.state_dict()\n new_vars = loaded_trainer.bc_trainer.policy.state_dict()\n assert len(new_vars) == len(old_vars)\n for var, values in new_vars.items():\n assert values.equal(old_vars[var])\n\n # also those values should be different from freshly initialized trainer\n third_trainer = init_trainer_fn()\n third_vars = third_trainer.bc_trainer.policy.state_dict()\n assert len(third_vars) == len(old_vars)\n assert not all(values.equal(old_vars[var]) for var, values in third_vars.items())\n\n\ndef test_simple_dagger_trainer_train(simple_dagger_trainer: dagger.SimpleDAggerTrainer):\n simple_dagger_trainer.train(total_timesteps=200, bc_train_kwargs=dict(n_batches=10))\n\n\ndef test_policy_save_reload(tmpdir, trainer):\n # just make sure the methods run; we already test them in test_bc.py\n policy_path = os.path.join(tmpdir, \"policy.pt\")\n trainer.save_policy(policy_path)\n pol = bc.reconstruct_policy(policy_path)\n assert isinstance(pol, policies.BasePolicy)\n\n\ndef test_simple_dagger_space_mismatch_error(\n tmpdir,\n venv,\n beta_schedule,\n expert_policy,\n expert_trajs,\n custom_logger,\n):\n class MismatchedSpace(gym.spaces.Space):\n \"\"\"Dummy space that is not equal to any other space.\"\"\"\n\n # Swap out expert_policy.{observation,action}_space with a bad space to\n # elicit space mismatch errors.\n space = MismatchedSpace()\n for space_name in [\"observation\", \"action\"]:\n with mock.patch.object(expert_policy, f\"{space_name}_space\", space):\n with pytest.raises(ValueError, match=f\"Mismatched {space_name}.*\"):\n _build_simple_dagger_trainer(\n tmpdir,\n venv,\n beta_schedule,\n expert_policy,\n expert_trajs,\n custom_logger,\n )\n\n\ndef test_dagger_not_enough_transitions_error(tmpdir, custom_logger):\n venv = util.make_vec_env(\"CartPole-v0\")\n # Initialize with large batch size to ensure error down the line.\n trainer = dagger.DAggerTrainer(\n venv, tmpdir, batch_size=100_000, custom_logger=custom_logger\n )\n collector = trainer.get_trajectory_collector()\n policy = base.RandomPolicy(venv.observation_space, venv.action_space)\n rollout.generate_trajectories(policy, collector, rollout.make_min_episodes(1))\n with pytest.raises(ValueError, match=\"Not enough transitions.*\"):\n trainer.extend_and_update()\n" ]
[ [ "numpy.testing.assert_array_equal", "numpy.all", "numpy.any", "numpy.zeros", "numpy.sum" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
rdmolony/dublin-electricity-substations
[ "137573c68e4564c7c31dc3c60b6de2218704436a" ]
[ "notebooks/link_demands_to_clustered_stations.py" ]
[ "# %%\nfrom pathlib import Path\n\nimport pandas as pd\nimport geopandas as gpd\nimport mapclassify as mc\nimport matplotlib.patheffects as pe\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport seaborn as sns\n\nimport dublin_electricity_network as den\nfrom dublin_electricity_network.cluster import cluster_itm_coords\n\nsns.set()\ndata_dir = Path(\"../data\")\npower_factor = 0.95\n\n\ndef convert_to_gdf(df, x, y, crs, *args, **kwargs):\n return gpd.GeoDataFrame(\n df, geometry=gpd.points_from_xy(df[x], df[y], crs=crs)\n ).drop(columns=[x, y])\n\n\n# %% [markdown]\n# # Caveat: Only Data Centres with < 20MVA load link to the MV network\n# ... so most don't effect the substation capacities\n\n# %%\nesbmap_stations = gpd.read_file(\n data_dir / \"heatmap_stations.geojson\",\n driver=\"GeoJSON\",\n)\n\n# %%\nesbmap_capacity_columns = [\n \"slr_load_mva\",\n \"installed_capacity_mva\",\n \"planned_capacity_mva\",\n \"demand_available_mva\",\n \"gen_available_firm_mva\",\n]\n\n# %%\nesbmap_stations_clustered = gpd.read_file(\n data_dir / \"esbmap_stations_clustered.geojson\",\n driver=\"GeoJSON\",\n)\n\n# %%\ndublin_boundary = gpd.read_file(\n data_dir / \"dublin_boundary.geojson\",\n driver=\"GeoJSON\",\n)\n\n# %%\ndublin_small_area_boundaries = gpd.read_file(\n data_dir / \"dublin_small_area_boundaries.geojson\",\n driver=\"GeoJSON\",\n)\n\n# %%\ndublin_small_area_hh = pd.read_csv(data_dir / \"dublin_small_area_hh.csv\")\n\n# %% [markdown]\n# # Link Small Areas stations to Substation Cluster\ndublin_small_area_boundaries[\"cluster_id\"] = den.join_nearest_points(\n dublin_small_area_boundaries.assign(geometry=lambda gdf: gdf[\"geometry\"].centroid),\n esbmap_stations_clustered[[\"cluster_id\", \"geometry\"]],\n).loc[:, \"cluster_id\"]\n\n# %%\ndublin_small_area_boundaries[\"total_hh\"] = dublin_small_area_boundaries.merge(\n dublin_small_area_hh\n).loc[:, \"total_hh\"]\n\n# %%\nesbmap_stations_clustered[\"residential_buildings\"] = (\n dublin_small_area_boundaries.groupby(\"cluster_id\")[\"total_hh\"].sum().round()\n)\n\n# %%\npeak_demand_mva_lower = 1.5 * (10 ** -3) * power_factor\nesbmap_stations_clustered[\"resi_peak_mva_at_1_5kw\"] = esbmap_stations_clustered.eval(\n \"residential_buildings * @peak_demand_mva_lower\"\n).round()\n\npeak_demand_mva_upper = 2 * (10 ** -3) * power_factor\nesbmap_stations_clustered[\"resi_peak_mva_at_2kw\"] = esbmap_stations_clustered.eval(\n \"residential_buildings * @peak_demand_mva_upper\"\n).round()\n\n\n# %% [markdown]\n# # Get remaining Load at each cluster\nesbmap_stations_clustered[\"remaining_load_mva_lower\"] = esbmap_stations_clustered.eval(\n \"slr_load_mva - resi_peak_mva_at_2kw\"\n)\n\nesbmap_stations_clustered[\"remaining_load_mva_upper\"] = esbmap_stations_clustered.eval(\n \"slr_load_mva - resi_peak_mva_at_1_5kw\"\n)\n\n# %% [markdown]\n# # Link Small Areas to clustered stations\n\n# %%\nsmall_areas_clustered = dublin_small_area_boundaries[\n [\"cluster_id\", \"geometry\"]\n].dissolve(by=\"cluster_id\", as_index=True)\n\n# %%\nsmall_area_esbmap_stations = esbmap_stations_clustered.assign(\n geometry=small_areas_clustered[\"geometry\"]\n)\n\n# %%\nsmall_area_esbmap_stations.to_file(\n data_dir / \"small_area_esbmap_stations.geojson\", driver=\"GeoJSON\"\n)\n\n# %%\n" ]
[ [ "pandas.read_csv" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] } ]
nkoep/geomstats
[ "e10f363b14fd4216825d3b44daf3ec38057a1d86" ]
[ "examples/plot_square_h2_poincare_half_plane.py" ]
[ "\"\"\"Plot a square on H2 with Poincare half-plane visualization.\"\"\"\n\nimport logging\nimport os\n\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nimport geomstats.visualization as visualization\nfrom geomstats.geometry.hyperbolic import Hyperbolic\n\nH2 = Hyperbolic(dimension=2)\nMETRIC = H2.metric\n\nSQUARE_SIZE = 50\n\n\ndef main():\n top = SQUARE_SIZE / 2.0\n bot = - SQUARE_SIZE / 2.0\n left = - SQUARE_SIZE / 2.0\n right = SQUARE_SIZE / 2.0\n corners_int = [(bot, left), (bot, right), (top, right), (top, left)]\n corners_ext = H2.from_coordinates(corners_int, \"intrinsic\")\n n_steps = 20\n ax = plt.gca()\n for i, src in enumerate(corners_ext):\n dst_id = (i + 1) % len(corners_ext)\n dst = corners_ext[dst_id]\n tangent_vec = METRIC.log(point=dst, base_point=src)\n geodesic = METRIC.geodesic(initial_point=src,\n initial_tangent_vec=tangent_vec)\n t = np.linspace(0, 1, n_steps)\n edge_points = geodesic(t)\n\n visualization.plot(\n edge_points,\n ax=ax,\n space='H2_poincare_half_plane',\n marker='.',\n color='black')\n\n plt.show()\n\n\nif __name__ == \"__main__\":\n if os.environ['GEOMSTATS_BACKEND'] == 'tensorflow':\n logging.info('Examples with visualizations are only implemented '\n 'with numpy backend.\\n'\n 'To change backend, write: '\n 'export GEOMSTATS_BACKEND = \\'numpy\\'.')\n else:\n main()\n" ]
[ [ "matplotlib.pyplot.gca", "matplotlib.pyplot.show", "numpy.linspace" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
MayuriKalokhe/Data_Science_Covid-19
[ "e4bd99ddb2d6b2467991867bfa8a658804689d9f" ]
[ "src/tests/tests.py" ]
[ "\n\nimport build_features as bf\nimport pandas as pd\n\n\n\n\nif __name__ == '__main__':\n\n\n pd_JH_data=pd.read_csv('data/processed/COVID_relational_confirmed.csv',sep=';',parse_dates=[0])\n pd_JH_data=pd_JH_data.sort_values('date',ascending=True).copy()\n" ]
[ [ "pandas.read_csv" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] } ]
tjgran01/DotNIRSToolboxTools
[ "aa07f507095dc42d4af4c3d8616e826c401e6a54" ]
[ "utilities/eml/data_state_table_generator.py" ]
[ "import pandas as pd\nimport os\nimport re\n\nfrom pathlib import Path\n\nfrom utilities.generic.file_finder import FileFinder\n\ndef list_diff(li1, li2):\n \"\"\"Returns the subset of lists that are present in li1 but absent in li2.\n\n Args:\n li1: The first list (a superset of li2).\n li2: The second list (a subset of li1)\n\n Returns:\n list: a list of the elements present in li1, but not li2.\"\"\"\n return (list(list(set(li1)-set(li2)) + list(set(li2)-set(li1))))\n\n### NOTE THIS IS SPECIFIC TO EML STUDY CURRENTLY.\n\nclass DataTableGenerator(object):\n def __init__(self, running_fpath, file_finder, root_dir=\"\", export_fpath=\"\"):\n self._this_fpath = running_fpath\n self._file_finder = file_finder\n self.export_fpath = export_fpath\n\n if not root_dir:\n self.ROOT_DIR = Path(f\"{self._this_fpath}/data/unzipped/\")\n if not self.export_fpath:\n self.export_fpath = Path(f\"{self.ROOT_DIR}/data_summaries/eml_summary_nirs.csv\")\n if not os.path.exists(Path(f\"{self.ROOT_DIR}/data_summaries\")):\n os.makedirs(Path(f\"{self.ROOT_DIR}/data_summaries\"))\n\n\n def gen_data_table(self):\n\n # Get all the file paths needed in order to gather information about all of the sessions.\n self.nirs_fnames = self._file_finder.find_files_of_type(self.ROOT_DIR, \".nirs\")\n self.trigger_fnames = self._file_finder.find_files_of_type(self.ROOT_DIR, \".tri\")\n self.trial_sheet_fnames = self._file_finder.find_files_of_type(self.ROOT_DIR, \"Trials.txt\")\n self.nirs_dir_fpaths = self._file_finder.find_files_of_type(self.ROOT_DIR, \".nirs\", return_parent=True)\n\n # Start getting info about the data.\n self.valid_triggers_dict = self.validate_triggers(self.trigger_fnames)\n self.localizer_order_dict = self.get_localizer_order(self.trial_sheet_fnames)\n self.reading_order_dict = self.get_reading_order(self.trial_sheet_fnames)\n\n # Generate dataframe to collate all of that information and write it to file.\n df = pd.DataFrame([self.nirs_fnames, self.trigger_fnames, self.nirs_dir_fpaths,\n self.trial_sheet_fnames, self.valid_triggers_dict,\n self.localizer_order_dict, self.reading_order_dict]).transpose()\n df.index.name = 'participant'\n\n df.columns = [\"NIRS fPath\", \"Trigger fPath\", \"nirs_dir\",\n \"Trial Sheet fPath\", \"Trigger Notes\",\n \"Localizer Order\", \"Reading Order\"]\n\n df.to_csv(self.export_fpath)\n return self.export_fpath\n\n\n def validate_triggers(self, trigger_fnames):\n \"\"\"Reads the .lsl trigger file from each participant and makes a few judgements\n about the state of the triggers. These judgements are then written to the data state table later on.\n\n Args:\n trigger_fnames (dict): key is ID, val is filepath to lsl.tri file for that id.\n Returns:\n triggers_valid (dict): key is ID, val is string describing the state of the triggers.\n \"\"\"\n\n localizer_triggers = [25, 27, 24, 22, 23, 26]\n\n triggers_valid = {}\n\n for k, val in trigger_fnames.items():\n df = pd.DataFrame()\n for v in val:\n df = df.append(pd.read_csv(v, sep=\";\", names=[\"t\", \"sample\", \"val\"]), ignore_index=True)\n\n if df.shape[0] == 226:\n triggers_valid[k] = \"LSL Triggers Look Good For Whole Study\"\n elif df.shape[0] > 226:\n triggers_valid[k] = f\"There are {df.shape[0] - 226} more triggers than expected.\"\n elif df[df[\"val\"].isin(localizer_triggers)].shape[0] == 86:\n triggers_valid[k] = f\"LSL Triggers Look Good for Localizer / Resting State, but are {226 - df.shape[0]} triggers short of whole study.\"\n else:\n triggers_valid[k] = f\"Missing: {86 - df[df['val'].isin(localizer_triggers)].shape[0]} in the Localizer / Resting State Task.\"\n\n return triggers_valid\n\n\n def get_localizer_order(self, trial_fnames):\n \"\"\"Reads the trial file for each participant and determines the order\n the localizer tasks were presented in.\n\n Args:\n trial_fnames (dict): key is ID, val is filepath to Trials.txt file for that ID.\n\n Returns:\n loc_order (dict): key is ID, val is a list with localizer strings as values.\n \"\"\"\n\n loc_order = {}\n possible_vals = [\"4_jabwords\", \"3_jabsent\", \"2_words\", \"1_sent\"]\n\n for k, val in trial_fnames.items():\n df = pd.read_csv(val[0], skiprows=1, names=[\"time_info\", \"event\", \"val\"], sep=\"\\t\")\n condition_list = df[df['val'] == 23][\"event\"].tolist()\n condition_list = [elm.split(\" \")[-1] for elm in condition_list]\n\n missing = list_diff(possible_vals, condition_list)\n if len(missing) == 1:\n condition_list.insert(0, missing[0])\n loc_order[k] = condition_list\n else:\n loc_order[k] = []\n\n return loc_order\n\n\n def get_reading_order(self, trial_fnames):\n \"\"\"Reads the trial file for each participant and determines the order\n the readings were presented in.\n\n Args:\n trial_fnames (dict): key is ID, val is filepath to Trials.txt file for that ID.\n\n Returns:\n loc_order (dict): key is ID, val is a list with reading strings as values.\n \"\"\"\n\n\n read_order = {}\n\n for k, val in trial_fnames.items():\n df = pd.read_csv(val[0], skiprows=1, names=[\"time_info\", \"event\", \"val\"], sep=\"\\t\")\n condition_list = df[df['val'] == 7][\"event\"].tolist()\n condition_list = [elm[:-1] for elm in condition_list]\n\n # Preserve the order and get rid of repeats\n _cond_list = []\n for elm in condition_list:\n if elm not in _cond_list:\n _cond_list.append(elm)\n\n read_order[k] = _cond_list\n\n return read_order\n\n\nif __name__ == \"__main__\":\n _mDataTableGen = DataTableGenerator()\n" ]
[ [ "pandas.read_csv", "pandas.DataFrame" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.3", "1.1", "1.5", "1.2" ], "scipy": [], "tensorflow": [] } ]
DocOtak/gsw-xarray
[ "eb39f8712173c1b2d75caf37218174367f07023c" ]
[ "gsw_xarray/tests/conftest.py" ]
[ "import pytest\n\nimport xarray as xr\nimport numpy as np\n\n\[email protected]\ndef ds():\n ds = xr.Dataset()\n id = np.arange(3)\n ds[\"id\"] = xr.DataArray(id, coords={\"id\": id})\n ds[\"CT\"] = ds[\"id\"] * 10\n ds[\"CT\"].attrs = {\"standard_name\": \"sea_water_conservative_temperature\"}\n ds[\"SA\"] = ds[\"id\"] * 0.1 + 34\n ds[\"SA\"].attrs = {\"standard_name\": \"sea_water_absolute_salinity\"}\n return ds\n\n\[email protected](scope=\"session\")\ndef ureg():\n pint = pytest.importorskip(\"pint\")\n return pint.UnitRegistry()\n" ]
[ [ "numpy.arange" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
ngu-khoi/BRITS-Realtime-Alive
[ "5167070ee7cdf8ce74dcf8dd2d7bf8340a6bd186" ]
[ "neural_net.py" ]
[ "import xgboost as xgb\nimport numpy as np\nimport os\nimport csv\nmodel_name = 'brits'\n\nimpute = np.load('./{}_data.npy'.format(model_name)).reshape(-1, 48*35)\nprint(impute.shape)\n\nlabel = np.load('./{}_label.npy'.format(model_name)).reshape(-1,)\nprint(label.shape)\n\ndata = np.nan_to_num(impute)\nn_train = 3000\n\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.neural_network import MLPClassifier\nfrom sklearn.metrics import roc_auc_score\n\nauc = []\n\nfor i in range(10):\n model = MLPClassifier(random_state=1, max_iter=300).fit(data[:n_train], label[:n_train])\n pred = model.predict_proba(data[n_train:])\n\n auc.append(roc_auc_score(label[n_train:].reshape(-1,), pred[:, 1].reshape(-1, )))\nprint(pred)\nprint(np.mean(auc))\n#Create outfile\noutfile = \"./result/data2.csv\"\nprint(\"Processing data\")\nx = open(outfile, \"a\", newline='')\ncsv_writer = csv.writer(x)\n#Write header + patient specific classifiers\ncsv_writer.writerow([pred[7][0], pred[7][1], np.mean(auc)] )\nx.close()" ]
[ [ "sklearn.neural_network.MLPClassifier", "numpy.mean", "numpy.nan_to_num" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
gauravpks/ml-repo
[ "4e5874aff2aef105367c8ac3ffd155a05a3abe11" ]
[ "Part 1 - Data Preprocessing/data_preprocessing_tools.py" ]
[ "# Data Preprocessing Tools\n\n# Importing the libraries\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\n# Importing the dataset\ndataset = pd.read_csv('https://raw.githubusercontent.com/gauravpks/ml-repo/master/Part%201%20-%20Data%20Preprocessing/Data.csv')\nX = dataset.iloc[:, :-1].values\ny = dataset.iloc[:, -1].values\nprint(X)\nprint(y)\n\n# Taking care of missing data\nfrom sklearn.impute import SimpleImputer\nimputer = SimpleImputer(missing_values=np.nan, strategy='mean')\nimputer.fit(X[:, 1:3])\nX[:, 1:3] = imputer.transform(X[:, 1:3])\nprint(X)\n\n# Encoding categorical data\n# Encoding the Independent Variable\nfrom sklearn.compose import ColumnTransformer\nfrom sklearn.preprocessing import OneHotEncoder\nct = ColumnTransformer(transformers=[('encoder', OneHotEncoder(), [0])], remainder='passthrough')\nX = np.array(ct.fit_transform(X))\nprint(X)\n# Encoding the Dependent Variable\nfrom sklearn.preprocessing import LabelEncoder\nle = LabelEncoder()\ny = le.fit_transform(y)\nprint(y)\n\n# Splitting the dataset into the Training set and Test set\nfrom sklearn.model_selection import train_test_split\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 1)\nprint(X_train)\nprint(X_test)\nprint(y_train)\nprint(y_test)\n\n# Feature Scaling\nfrom sklearn.preprocessing import StandardScaler\nsc = StandardScaler()\nX_train[:, 3:] = sc.fit_transform(X_train[:, 3:])\nX_test[:, 3:] = sc.transform(X_test[:, 3:])\nprint(X_train)\nprint(X_test)\n" ]
[ [ "pandas.read_csv", "sklearn.preprocessing.OneHotEncoder", "sklearn.impute.SimpleImputer", "sklearn.model_selection.train_test_split", "sklearn.preprocessing.StandardScaler", "sklearn.preprocessing.LabelEncoder" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] } ]
LightTwist/RobustVideoMatting
[ "03096f23de1831b8181dadd5e165561c2759f9eb" ]
[ "evaluation/generate_videomatte_with_background_image.py" ]
[ "\"\"\"\npython generate_videomatte_with_background_image.py \\\n --videomatte-dir ../matting-data/VideoMatte240K_JPEG_HD/test \\\n --background-dir ../matting-data/Backgrounds/valid \\\n --num-samples 25 \\\n --resize 512 288 \\\n --out-dir ../matting-data/evaluation/vidematte_static_sd/\n\"\"\"\n\nimport argparse\nimport os\nimport pims\nimport numpy as np\nimport random\nfrom PIL import Image\nfrom tqdm import tqdm\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--videomatte-dir', type=str, required=True)\nparser.add_argument('--background-dir', type=str, required=True)\nparser.add_argument('--num-samples', type=int, default=20)\nparser.add_argument('--num-frames', type=int, default=100)\nparser.add_argument('--resize', type=int, default=None, nargs=2)\nparser.add_argument('--out-dir', type=str, required=True)\nparser.add_argument('--extension', type=str, default='.png')\nargs = parser.parse_args()\n \nrandom.seed(10)\n\nvideomatte_filenames = [(clipname, sorted(os.listdir(os.path.join(args.videomatte_dir, 'fgr', clipname)))) \n for clipname in sorted(os.listdir(os.path.join(args.videomatte_dir, 'fgr')))]\n\nbackground_filenames = os.listdir(args.background_dir)\nrandom.shuffle(background_filenames)\n\nfor i in range(args.num_samples):\n \n clipname, framenames = videomatte_filenames[i % len(videomatte_filenames)]\n \n out_path = os.path.join(args.out_dir, str(i).zfill(4))\n os.makedirs(os.path.join(out_path, 'fgr'), exist_ok=True)\n os.makedirs(os.path.join(out_path, 'pha'), exist_ok=True)\n os.makedirs(os.path.join(out_path, 'com'), exist_ok=True)\n os.makedirs(os.path.join(out_path, 'bgr'), exist_ok=True)\n \n with Image.open(os.path.join(args.background_dir, background_filenames[i])) as bgr:\n bgr = bgr.convert('RGB')\n\n \n base_t = random.choice(range(len(framenames) - args.num_frames))\n \n for t in tqdm(range(args.num_frames), desc=str(i).zfill(4)):\n with Image.open(os.path.join(args.videomatte_dir, 'fgr', clipname, framenames[base_t + t])) as fgr, \\\n Image.open(os.path.join(args.videomatte_dir, 'pha', clipname, framenames[base_t + t])) as pha:\n fgr = fgr.convert('RGB')\n pha = pha.convert('L')\n \n if args.resize is not None:\n fgr = fgr.resize(args.resize, Image.BILINEAR)\n pha = pha.resize(args.resize, Image.BILINEAR)\n \n \n if i // len(videomatte_filenames) % 2 == 1:\n fgr = fgr.transpose(Image.FLIP_LEFT_RIGHT)\n pha = pha.transpose(Image.FLIP_LEFT_RIGHT)\n \n fgr.save(os.path.join(out_path, 'fgr', str(t).zfill(4) + args.extension))\n pha.save(os.path.join(out_path, 'pha', str(t).zfill(4) + args.extension))\n \n if t == 0:\n bgr = bgr.resize(fgr.size, Image.BILINEAR)\n bgr.save(os.path.join(out_path, 'bgr', str(t).zfill(4) + args.extension))\n else:\n os.symlink(str(0).zfill(4) + args.extension, os.path.join(out_path, 'bgr', str(t).zfill(4) + args.extension))\n \n pha = np.asarray(pha).astype(float)[:, :, None] / 255\n com = Image.fromarray(np.uint8(np.asarray(fgr) * pha + np.asarray(bgr) * (1 - pha)))\n com.save(os.path.join(out_path, 'com', str(t).zfill(4) + args.extension))\n" ]
[ [ "numpy.asarray" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
laeen/kaggle_practice
[ "f12d4ab01800ee0f750067c16ef6cea473b168c5" ]
[ "house_prices/main.py" ]
[ "import pickle\nimport pandas as pd\n\nfrom sklearn.externals import joblib\nfrom sklearn import preprocessing\n\nclf = joblib.load(\"train_model.m\")\n\ndata_test = pd.read_csv(\"cleaning_test.csv\")\ndf = pd.read_csv(\"data/test.csv\")\n\nf_names = ['OverallQual', 'GrLivArea', 'TotalBsmtSF', 'GarageArea', '1stFlrSF', 'FullBath', 'TotRmsAbvGrd',\n 'YearRemodAdd', 'YearBuilt', 'CentralAir', 'Neighborhood', 'RoofMatl', 'HouseStyle', 'KitchenQual',\n 'SaleCondition', 'SaleType']\n\nfor key in f_names:\n data_test[key].fillna(data_test[key].mode()[0], inplace=True)\n\n\n# 读取模型参数,对测试进行再编码\n\nx = data_test.values\ny_te_pred = clf.predict(x)\n\ny_scaler = joblib.load('scalarY')\n\nprediction = pd.DataFrame(y_te_pred, columns=['SalePrice'])\n\np = y_scaler.inverse_transform(prediction)\n\nresult = pd.concat([df['Id'], pd.DataFrame(p, columns=['SalePrice'])], axis=1)\nprint(type(p), type(result), type(prediction))\n\nresult.to_csv('./Predictions.csv', index=False)\n" ]
[ [ "sklearn.externals.joblib.load", "pandas.read_csv", "pandas.DataFrame" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.3", "1.1", "1.5", "1.2" ], "scipy": [], "tensorflow": [] } ]
tetsuzawa/rpi_ahrs
[ "5db341073b7c711c33d8854a22535655170d82c0" ]
[ "server/attitude_estimation/server_thread.py" ]
[ "# -*- coding: utf-8 -*-\nimport socket\nimport threading\nimport time\n\nimport numpy as np\n\n\nclass ServerThreadUDP(threading.Thread):\n def __init__(self, server_ip='127.0.0.1', port=50009, ini_val=np.ones((3, 3))):\n threading.Thread.__init__(self)\n # initial value\n self.data = ini_val\n\n self.kill_flag = False\n # line information\n self.host = server_ip\n # self.host = socket.gethostname()\n self.port = port\n self.buffsize = 1024\n self.addr = (socket.gethostbyname(self.host), self.port)\n # bind\n self.udpServSock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n self.udpServSock.bind(self.addr) # HOST, PORTでbinding\n\n def run(self):\n while True:\n try:\n data, self.addr = self.udpServSock.recvfrom(self.buffsize) # データ受信\n data = data.decode().split(',')\n self.data = np.array(data, dtype=np.float32).reshape((3, 3))\n\n except KeyboardInterrupt:\n break\n except:\n pass\n\n\nif __name__ == '__main__':\n sock = ServerThreadUDP(server_ip='127.0.0.1', port=50009, ini_val=np.zeros((3, 3)))\n sock.setDaemon(True)\n sock.start()\n # sock.run()\n\n while True:\n if not sock.data:\n break\n print(sock.data)\n time.sleep(0.1)\n" ]
[ [ "numpy.array", "numpy.zeros", "numpy.ones" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
DickLiTQ/pyts
[ "374711385a48349b0a74db7f444acaf769ab49ee" ]
[ "tspy.py" ]
[ "# -*- coding: utf-8 -*-\r\n\r\n\"\"\"\r\n\r\nThis pyfile is used to make our time series analysis more convenient. Although statsmodels are powerful enough, it can be quite tedious to remember a lot of modules with different name. If we spend so much time on searching which command should we use in statsmodels, we are just doing something wrong against efficiency. So I try to develop this pyfile based on statsmodels to make things easy.\r\n\r\nDickLi\r\n2018.3.22\r\n\r\n\"\"\"\r\n\r\nimport numpy as np\r\nimport pandas as pd\r\nimport matplotlib.pyplot as plt\r\nimport statsmodels.api as sm\r\nimport statsmodels.graphics as smg\r\nfrom statsmodels.tsa.stattools import adfuller\r\nfrom statsmodels.stats.diagnostic import acorr_ljungbox\r\nplt.style.use(\"seaborn\")\r\n\r\n\r\ndef __init__():\r\n\r\n return 0\r\n\r\ndef load_txt(path,header):\r\n file = open(path,'r')\r\n lines = file.readlines()\r\n firstline = lines[0]\r\n variable = firstline.split()\r\n if header == \"T\":\r\n data = pd.DataFrame(columns=variable,index=range(len(lines[1:])))\r\n for row, line in enumerate(lines[1:]):\r\n text = line.split()\r\n for var in range(len(variable)):\r\n data.iloc[row,var]=text[var]\r\n else:\r\n variable = np.arange(len(variable))\r\n variable = list(variable)\r\n data = pd.DataFrame(columns=variable,index=range(len(lines)))\r\n for row, line in enumerate(lines):\r\n text = line.split()\r\n for var in range(len(variable)):\r\n data.iloc[row,var]=text[var]\r\n return data.apply(pd.to_numeric,errors=\"ignore\")\r\n\r\ndef to_num(data):\r\n return data.apply(pd.to_numeric,errors=\"ignore\")\r\n\r\ndef dateindex_transfer(data,year,month,day):\r\n import datetime\r\n data['Date']=datetime.date(2018,1,1)\r\n for index in range(data.shape[0]):\r\n date = datetime.date(int(data[year][index]),int(data[month][index]),int(data[day][index]))\r\n data['Date'][index] = date\r\n data.set_index('Date',inplace=True)\r\n return data\r\n\r\ndef acf(data,lag):\r\n fig = plt.figure()\r\n fig = sm.graphics.tsa.plot_acf(data,lags=lag)\r\n fig.show()\r\n return fig\r\n\r\ndef pacf(data,lag):\r\n fig = plt.figure()\r\n fig = sm.graphics.tsa.plot_pacf(data,lags=lag)\r\n fig.show()\r\n\r\ndef acfpacf(data,lag):\r\n fig = plt.figure()\r\n ax1 = fig.add_subplot(211)\r\n fig = sm.graphics.tsa.plot_acf(data,lags=lag,ax=ax1)\r\n ax2 = fig.add_subplot(212)\r\n fig = sm.graphics.tsa.plot_pacf(data,lags=lag,ax=ax2)\r\n fig.show()\r\n\r\ndef select_ARMA_order(data,max_ar,max_ma,ic):\r\n return sm.tsa.arma_order_select_ic(data,max_ar=max_ar,max_ma=max_ma,ic=ic)\r\n \r\ndef ARMA(data,p,q):\r\n return sm.tsa.ARMA(data,(p,q)).fit()\r\n\r\ndef ADFuller(data,ic,ct):\r\n t = adfuller(data,autolag=ic,regression=ct)\r\n return {'ADF_statistics':t[0],\r\n 'p_value':t[1],\r\n 'lags':t[2],\r\n 'observation':t[3],\r\n 'critical_values':t[4],\r\n 'icbest':t[5]}\r\n\r\ndef LjungBox(residual,lag):\r\n t = acorr_ljungbox(residual,lags=lag)\r\n acfpacf(residual,lag)\r\n return {'LB_statistics': t[0],'p_value': t[1]}\r\n\r\ndef LjungBox_ARMA(data,p,q,lag):\r\n model = ARMA(data,p,q)\r\n residual = model.fittedvalues - data\r\n t = acorr_ljungbox(residual,lags=lag)\r\n acfpacf(residual,lag)\r\n return {'LB_statistics': t[0],'p_value': t[1]}\r\n\r\ndef backtest(data,p,q,startpoint,IC):\r\n MSFE = 0\r\n# predict = 0\r\n startpoint = startpoint - 1 # i th elements has an index of i-1\r\n test = np.zeros_like(data)\r\n train = np.zeros_like(data)\r\n test[startpoint:] = data[startpoint:]\r\n train[:startpoint] = data[:startpoint]\r\n for i in range(len(data)-startpoint):\r\n predict = ARMA(train[i:startpoint+i],p,q).forecast(1)[0]\r\n MSFE = MSFE + (predict-test[startpoint+i])**2\r\n train[startpoint+i] = predict\r\n return MSFE/(len(data)-startpoint)\r\n\r\nbacktest(data['data'],4,4,250,'aic')\r\n\r\npath = 'C:/Users/DickLi/OneDrive/文档/2017-2018大三下学期/时间序列分析/data/m-q-GNPC96.txt'\r\ndata = load_txt(path,\"T\")\r\n#data = to_num(data)\r\ndata.columns = ['data']\r\nindex = pd.date_range('1/1/1948',periods=766,freq='M')\r\ndata.index = index\r\nmodel = ARMA(data['data'],1,3)\r\nmodel.summary2()\r\nmodel.forecast(10)[0]\r\n\r\nADFuller(data['data'],'AIC','c')\r\n \r\n" ]
[ [ "matplotlib.pyplot.figure", "numpy.zeros_like", "matplotlib.pyplot.style.use", "pandas.date_range" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
VLOGroup/optox
[ "ae8bf1b4c1bfeb1e2fea24f549182d5610e09d82" ]
[ "tensorflow/optotf/keras/pad.py" ]
[ "import tensorflow as tf\nimport optotf.pad\nimport unittest\n\nclass Pad2d(tf.keras.layers.Layer):\n def __init__(self, padding, mode, channel_last=True):\n super().__init__()\n self.padding = padding\n self.mode = mode\n self.channel_last = channel_last\n self.op = optotf.pad._ext.pad2d\n\n def build(self, input_shape):\n shape = tf.unstack(input_shape)\n\n if self.channel_last:\n shape = [shape[0], shape[-1], *shape[1:-1]]\n\n new_shape = [-1, *shape[2:]]\n new_shape = tf.stack(new_shape)\n\n padded_shape = shape\n padded_shape[-2] += self.padding[2] + self.padding[3]\n padded_shape[-1] += self.padding[0] + self.padding[1]\n padded_shape = tf.stack(padded_shape)\n\n self.pre_pad_shape = new_shape\n self.post_pad_shape = padded_shape\n\n\n def call(self, x):\n # first reshape the input\n if self.channel_last:\n x = tf.transpose(x, [0, 3, 1, 2])\n\n x_r = tf.reshape(x, self.pre_pad_shape, self.post_pad_shape)\n\n if x.dtype == tf.complex64 or x.dtype == tf.complex128:\n x_r = tf.complex(self.op(tf.math.real(x_r), left=self.padding[0], right=self.padding[1], bottom=self.padding[2], top=self.padding[3], mode=self.mode), \n self.op(tf.math.imag(x_r), left=self.padding[0], right=self.padding[1], bottom=self.padding[2], top=self.padding[3], mode=self.mode))\n else:\n x_r = self.op(x_r, left=self.padding[0], right=self.padding[1], bottom=self.padding[2], top=self.padding[3], mode=self.mode)\n\n if self.channel_last:\n return tf.transpose(tf.reshape(x_r, self.post_pad_shape), [0, 2, 3, 1])\n else:\n return tf.reshape(x_r, self.post_pad_shape)\n\nclass Pad2dTranspose(Pad2d):\n def __init__(self, padding, mode, channel_last=True):\n super().__init__(padding, mode, channel_last=channel_last)\n self.op = optotf.pad._ext.pad2d_transpose\n\n def build(self, input_shape):\n shape = tf.unstack(input_shape)\n\n if self.channel_last:\n shape = [shape[0], shape[-1], *shape[1:-1]]\n\n new_shape = [-1, *shape[2:]]\n new_shape = tf.stack(new_shape)\n\n padded_shape = shape\n padded_shape[-2] -= self.padding[2] + self.padding[3]\n padded_shape[-1] -= self.padding[0] + self.padding[1]\n padded_shape = tf.stack(padded_shape)\n\n self.pre_pad_shape = new_shape\n self.post_pad_shape = padded_shape\nclass Pad3d(tf.keras.layers.Layer):\n def __init__(self, padding, mode, channel_last=True):\n super().__init__()\n self.padding = padding\n self.mode = mode\n self.channel_last = channel_last\n self.op = optotf.pad._ext.pad3d\n\n def build(self, input_shape):\n shape = tf.unstack(input_shape)\n\n if self.channel_last:\n shape = [shape[0], shape[-1], *shape[1:-1]]\n\n new_shape = [-1, *shape[2:]]\n new_shape = tf.stack(new_shape)\n\n padded_shape = shape\n padded_shape[-3] += self.padding[4] + self.padding[5]\n padded_shape[-2] += self.padding[2] + self.padding[3]\n padded_shape[-1] += self.padding[0] + self.padding[1]\n padded_shape = tf.stack(padded_shape)\n\n self.pre_pad_shape = new_shape\n self.post_pad_shape = padded_shape\n\n\n def call(self, x):\n # first reshape the input\n if self.channel_last:\n x = tf.transpose(x, [0, 2, 3, 4, 1])\n\n x_r = tf.reshape(x, self.pre_pad_shape, self.post_pad_shape)\n\n if x.dtype == tf.complex64 or x.dtype == tf.complex128:\n x_r = tf.complex(self.op(tf.math.real(x_r), left=self.padding[0], right=self.padding[1], bottom=self.padding[2], top=self.padding[3], front=self.padding[4], back=self.padding[5], mode=self.mode), \n self.op(tf.math.imag(x_r), left=self.padding[0], right=self.padding[1], bottom=self.padding[2], top=self.padding[3], front=self.padding[4], back=self.padding[5], mode=self.mode))\n else:\n x_r = self.op(x_r, left=self.padding[0], right=self.padding[1], bottom=self.padding[2], top=self.padding[3], front=self.padding[4], back=self.padding[5], mode=self.mode)\n\n if self.channel_last:\n return tf.transpose(tf.reshape(x_r, self.post_pad_shape), [0, 2, 3, 4, 1])\n else:\n return tf.reshape(x_r, self.post_pad_shape)\n\nclass Pad3dTranspose(Pad3d):\n def __init__(self, padding, mode, channel_last=True):\n super().__init__(padding, mode, channel_last=channel_last)\n self.op = optotf.pad._ext.pad3d_transpose\n\n def build(self, input_shape):\n shape = tf.unstack(input_shape)\n\n if self.channel_last:\n shape = [shape[0], shape[-1], *shape[1:-1]]\n\n new_shape = [-1, *shape[2:]]\n new_shape = tf.stack(new_shape)\n\n padded_shape = shape\n padded_shape[-3] -= self.padding[4] + self.padding[5]\n padded_shape[-2] -= self.padding[2] + self.padding[3]\n padded_shape[-1] -= self.padding[0] + self.padding[1]\n padded_shape = tf.stack(padded_shape)\n\n self.pre_pad_shape = new_shape\n self.post_pad_shape = padded_shape\nclass TestPad(unittest.TestCase):\n def test2d(self):\n shape = (5, 2, 10, 10)\n x = tf.random.normal(shape)\n padding = [2, 2, 4, 4]\n op = Pad2d(padding=padding, mode='symmetric', channel_last=False)\n Kx = op(x)\n\n # manually construct new shape\n new_shape = list(x.shape)\n new_shape[-1] += padding[0] + padding[1]\n new_shape[-2] += padding[2] + padding[3]\n new_shape = tuple(new_shape)\n\n self.assertTrue(new_shape == Kx.shape)\n\n def test2d_complex(self):\n shape = (5, 2, 10, 10)\n x = tf.complex(tf.random.normal(shape), tf.random.normal(shape))\n padding = [2, 2, 4, 4]\n op = Pad2d(padding=padding, mode='symmetric', channel_last=False)\n Kx = op(x)\n\n # manually construct new shape\n new_shape = list(x.shape)\n new_shape[-1] += padding[0] + padding[1]\n new_shape[-2] += padding[2] + padding[3]\n new_shape = tuple(new_shape)\n\n self.assertTrue(new_shape == Kx.shape)\n\n def test2d_channel_last(self):\n shape = (5, 10, 10, 2)\n x = tf.random.normal(shape)\n padding = [2, 2, 4, 4]\n op = Pad2d(padding=padding, mode='symmetric', channel_last=True)\n Kx = op(x)\n\n # manually construct new shape\n new_shape = list(x.shape)\n new_shape[-2] += padding[0] + padding[1]\n new_shape[-3] += padding[2] + padding[3]\n new_shape = tuple(new_shape)\n\n self.assertTrue(new_shape == Kx.shape)\n\n def test3d(self):\n shape = (5, 2, 8, 10, 10)\n x = tf.random.normal(shape)\n padding = [2, 2, 4, 4, 1, 1]\n op = Pad3d(padding=padding, mode='symmetric', channel_last=False)\n Kx = op(x)\n\n # manually construct new shape\n new_shape = list(x.shape)\n new_shape[-1] += padding[0] + padding[1]\n new_shape[-2] += padding[2] + padding[3]\n new_shape[-3] += padding[4] + padding[5]\n new_shape = tuple(new_shape)\n\n self.assertTrue(new_shape == Kx.shape)\n\n def test3d_complex(self):\n shape = (5, 2, 8, 10, 10)\n x = tf.complex(tf.random.normal(shape), tf.random.normal(shape))\n padding = [2, 2, 4, 4, 1, 1]\n op = Pad3d(padding=padding, mode='symmetric', channel_last=False)\n Kx = op(x)\n\n # manually construct new shape\n new_shape = list(x.shape)\n new_shape[-1] += padding[0] + padding[1]\n new_shape[-2] += padding[2] + padding[3]\n new_shape[-3] += padding[4] + padding[5]\n new_shape = tuple(new_shape)\n\n self.assertTrue(new_shape == Kx.shape)\n\n def test3d_channel_last(self):\n shape = (5, 8, 10, 10, 2)\n x = tf.random.normal(shape)\n padding = [2, 2, 4, 4, 1, 2]\n op = Pad3d(padding=padding, mode='symmetric', channel_last=True)\n Kx = op(x)\n\n # manually construct new shape\n new_shape = list(x.shape)\n new_shape[-2] += padding[0] + padding[1]\n new_shape[-3] += padding[2] + padding[3]\n new_shape[-4] += padding[4] + padding[5]\n new_shape = tuple(new_shape)\n\n self.assertTrue(new_shape == Kx.shape)\n\n def test2d_transpose(self):\n shape = (5, 2, 10, 10)\n x = tf.random.normal(shape)\n padding = [2, 2, 4, 4]\n op = Pad2dTranspose(padding=padding, mode='symmetric', channel_last=False)\n Kx = op(x)\n\n # manually construct new shape\n new_shape = list(x.shape)\n new_shape[-1] -= padding[0] + padding[1]\n new_shape[-2] -= padding[2] + padding[3]\n new_shape = tuple(new_shape)\n\n self.assertTrue(new_shape == Kx.shape)\n\n def test2d_complex_transpose(self):\n shape = (5, 2, 10, 10)\n x = tf.complex(tf.random.normal(shape), tf.random.normal(shape))\n padding = [2, 2, 4, 4]\n op = Pad2dTranspose(padding=padding, mode='symmetric', channel_last=False)\n Kx = op(x)\n\n # manually construct new shape\n new_shape = list(x.shape)\n new_shape[-1] -= padding[0] + padding[1]\n new_shape[-2] -= padding[2] + padding[3]\n new_shape = tuple(new_shape)\n\n self.assertTrue(new_shape == Kx.shape)\n\n def test2d_channel_last_transpose(self):\n shape = (5, 10, 10, 2)\n x = tf.random.normal(shape)\n padding = [2, 2, 4, 4]\n op = Pad2dTranspose(padding=padding, mode='symmetric', channel_last=True)\n Kx = op(x)\n\n # manually construct new shape\n new_shape = list(x.shape)\n new_shape[-2] -= padding[0] + padding[1]\n new_shape[-3] -= padding[2] + padding[3]\n new_shape = tuple(new_shape)\n\n self.assertTrue(new_shape == Kx.shape)\n\n def test3d_transpose(self):\n shape = (5, 2, 8, 10, 10)\n x = tf.random.normal(shape)\n padding = [2, 2, 4, 4, 1, 1]\n op = Pad3dTranspose(padding=padding, mode='symmetric', channel_last=False)\n Kx = op(x)\n\n # manually construct new shape\n new_shape = list(x.shape)\n new_shape[-1] -= padding[0] + padding[1]\n new_shape[-2] -= padding[2] + padding[3]\n new_shape[-3] -= padding[4] + padding[5]\n new_shape = tuple(new_shape)\n\n self.assertTrue(new_shape == Kx.shape)\n\n def test3d_complex_transpose(self):\n shape = (5, 2, 8, 10, 10)\n x = tf.complex(tf.random.normal(shape), tf.random.normal(shape))\n padding = [2, 2, 4, 4, 1, 1]\n op = Pad3dTranspose(padding=padding, mode='symmetric', channel_last=False)\n Kx = op(x)\n\n # manually construct new shape\n new_shape = list(x.shape)\n new_shape[-1] -= padding[0] + padding[1]\n new_shape[-2] -= padding[2] + padding[3]\n new_shape[-3] -= padding[4] + padding[5]\n new_shape = tuple(new_shape)\n\n self.assertTrue(new_shape == Kx.shape)\n\n def test3d_channel_last_transpose(self):\n shape = (5, 8, 10, 10, 2)\n x = tf.random.normal(shape)\n padding = [2, 2, 4, 4, 1, 2]\n op = Pad3dTranspose(padding=padding, mode='symmetric', channel_last=True)\n Kx = op(x)\n\n # manually construct new shape\n new_shape = list(x.shape)\n new_shape[-2] -= padding[0] + padding[1]\n new_shape[-3] -= padding[2] + padding[3]\n new_shape[-4] -= padding[4] + padding[5]\n new_shape = tuple(new_shape)\n\n self.assertTrue(new_shape == Kx.shape)\n\nif __name__ == \"__main__\":\n unittest.test()" ]
[ [ "tensorflow.transpose", "tensorflow.unstack", "tensorflow.stack", "tensorflow.reshape", "tensorflow.math.imag", "tensorflow.math.real", "tensorflow.random.normal" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
doutib/lobpredict
[ "3e443766ea97ec7385fdb6b3d86c37e58055065c" ]
[ "lobpredictrst/rf.py" ]
[ "\n# coding: utf-8\n\n# In[1]:\n\nimport numpy as np\nimport pandas as pd\nimport json\n\nfrom sklearn.preprocessing import MinMaxScaler\nfrom sklearn.metrics import confusion_matrix\nfrom sklearn.metrics import log_loss\nfrom sklearn.metrics import precision_score\nfrom sklearn.metrics import f1_score\nfrom sklearn.metrics import recall_score\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.metrics import classification_report\nfrom multiprocessing import Pool, TimeoutError\nfrom multiprocessing import cpu_count\nfrom datetime import timedelta\n\nfrom sklearn.ensemble import RandomForestClassifier\n\nimport sys\nimport csv\nimport itertools\nimport time\n\n\n# In[13]:\n\ndef rf(X_train_cols,\n X_train,\n Y_train,\n X_test,\n Y_test,\n n_estimators=10,\n criterion=\"gini\",\n max_features=\"auto\",\n max_depth=-1,\n n_jobs=1):\n \"\"\"\n Parameters\n ----------\n X_train_cols : list of feature column names\n from the training set\n X_train : pandas data frame\n data frame of features for the training set\n Y_train : pandas data frame\n data frame of labels for the training set\n X_test : pandas data frame\n data frame of features for the test set\n Y_test : pandas data frame\n data frame of labels for the test set\n n_estimators : integer, optional (default=10)\n The number of trees in the forest.\n criterion : string, optional (default=”gini”)\n The function to measure the quality of a split.\n Supported criteria are “gini” for the Gini impurity and “entropy”\n for the information gain.\n max_features : int, float, string or None, optional (default=”auto”)\n The number of features to consider when looking for the best split:\n If int, then consider max_features features at each split.\n If float, then max_features is a percentage and int(max_features * n_features)\n features are considered at each split.\n If “auto”, then max_features=sqrt(n_features).\n If “sqrt”, then max_features=sqrt(n_features) (same as “auto”).\n If “log2”, then max_features=log2(n_features).\n If None, then max_features=n_features.\n max_depth : integer or None, optional (default=None)\n The maximum depth of the tree.\n If None, then nodes are expanded until all leaves are pure or\n until all leaves contain less than min_samples_split samples.\n Ignored if max_leaf_nodes is not None.\n n_jobs : integer, optional (default=1)\n The number of jobs to run in parallel for both fit and predict.\n If -1, then the number of jobs is set to the number of cores.\n\n Result:\n -------\n numpy array\n logloss : averaged logarithmic loss\n miss_err : missclassification error rate\n prec : precision\n recall : recall\n f1 : f1 score\n parameters : previous parameters in the order previously specified\n \"\"\"\n if max_depth==-1:\n max_depth = None\n\n labels = np.unique(Y_train)\n\n ## # Run rf\n # Define classifier\n rf = RandomForestClassifier(n_estimators = n_estimators,\n criterion = criterion,\n max_features = max_features,\n max_depth = max_depth,\n n_jobs = n_jobs)\n # Fit\n rf.fit(X_train, Y_train)\n\n # Predict\n Y_hat = rf.predict(X_test)\n Y_probs = rf.predict_proba(X_test)\n\n ## # Misclassification error rate\n miss_err = 1-accuracy_score(Y_test, Y_hat)\n ## # Log Loss\n eps = 10^(-15)\n logloss = log_loss(Y_test, Y_probs, eps = eps)\n\n ##confusion_matrix\n confusion_matrix1 = confusion_matrix(y_true=Y_test, y_pred=Y_hat\n , labels=labels)\n\n # classification_report\n classification_report1 = classification_report(y_true=Y_test, y_pred=Y_hat)\n\n # Variable importance\n importances = rf.feature_importances_\n std = np.std([tree.feature_importances_ for tree in rf.estimators_],\n axis=0)\n indices = np.argsort(importances)[::-1]\n\n # Return tuple of (rank, feature name, variable importance)\n var_importance = [(f+1, X_train_cols[f], importances[indices[f]]) for f in range(X_train.shape[1])]\n\n # Output results in a list format\n result = []\n result.append(\"confusion_matrix\")\n result.append(confusion_matrix1)\n result.append(\"classification_report\")\n result.append(classification_report1)\n result.append(\"number of trees\")\n result.append(n_estimators)\n result.append(\"max depth\")\n result.append(max_depth)\n result.append(\"logloss\")\n result.append(logloss)\n result.append(\"miss_err\")\n result.append(miss_err)\n result.append(\"var_importance\")\n result.append(var_importance)\n return result\n" ]
[ [ "sklearn.ensemble.RandomForestClassifier", "numpy.unique", "sklearn.metrics.confusion_matrix", "sklearn.metrics.log_loss", "numpy.std", "numpy.argsort", "sklearn.metrics.classification_report", "sklearn.metrics.accuracy_score" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
haihabi/NormFlowPy
[ "a15ea6a704254a925f25dc94b22459ca2e0beaf5" ]
[ "normflowpy/flows/maf.py" ]
[ "import torch\nfrom normflowpy.base_nets.mlp_base_net import ARMLP\nfrom normflowpy.base_flow import UnconditionalBaseFlowLayer\n\n\nclass MAF(UnconditionalBaseFlowLayer):\n \"\"\" Masked Autoregressive Flow that uses a MADE-style network for fast forward \"\"\"\n\n def __init__(self, dim, parity, net_class=ARMLP, nh=24):\n super().__init__()\n self.dim = dim\n self.net = net_class(dim, dim * 2, nh)\n self.parity = parity\n\n def forward(self, x):\n # here we see that we are evaluating all of z in parallel, so density estimation will be fast\n st = self.net(x)\n s, t = st.split(self.dim, dim=1)\n z = x * torch.exp(s) + t\n # reverse order, so if we stack MAFs correct things happen\n z = z.flip(dims=(1,)) if self.parity else z\n log_det = torch.sum(s, dim=1)\n return z, log_det\n\n def backward(self, z):\n # we have to decode the x one at a time, sequentially\n x = torch.zeros_like(z)\n log_det = torch.zeros(z.size(0))\n z = z.flip(dims=(1,)) if self.parity else z\n for i in range(self.dim):\n st = self.net(x.clone()) # clone to avoid in-place op errors if using IAF\n s, t = st.split(self.dim, dim=1)\n x[:, i] = (z[:, i] - t[:, i]) * torch.exp(-s[:, i])\n log_det += -s[:, i]\n return x, log_det\n" ]
[ [ "torch.exp", "torch.zeros_like", "torch.sum" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Huhanlin/MusicGenreClassification
[ "fa6e412b94630139e994d51d7963de711717d946" ]
[ "music_gen_lib.py" ]
[ "# store the function/object used in the project\n\n# import modules\nfrom __future__ import print_function\nimport numpy as np\nimport librosa\nimport keras\nfrom keras.models import Sequential, load_model, Model\nfrom keras.layers import Dense, Dropout, Flatten, Input, Reshape, Add\nfrom keras.layers import Conv2D, MaxPooling2D, AveragePooling2D, concatenate, LSTM\nfrom keras import backend as K\nfrom keras.utils import np_utils\nfrom keras import regularizers\nimport time\nfrom keras.engine.topology import Layer\nfrom hyperparams import HyperParams as hp\nfrom keras.callbacks import ModelCheckpoint\n\n\ndef split_data(T, split_idxes):\n \"\"\"\n give the indexes of training, validation, and testing data\n :param T: label of all data\n :param split_idxes: splitting points of the data\n :return:\n \"\"\"\n genres = np.unique(T)\n training_idxes = []\n validation_idxes = []\n testing_idxes = []\n for idx, music_genre in enumerate(genres):\n tmp_logidx = music_genre == T\n tmp_idx = np.flatnonzero(tmp_logidx)\n tmp_shuffled_idx = np.random.permutation(tmp_idx)\n tmp_num_examles = len(tmp_shuffled_idx)\n tmp_split_idxes = np.asarray(split_idxes * tmp_num_examles, dtype=np.int)\n training_idxes.append(tmp_shuffled_idx[tmp_split_idxes[0]: tmp_split_idxes[1]])\n validation_idxes.append(tmp_shuffled_idx[tmp_split_idxes[1]: tmp_split_idxes[2]])\n testing_idxes.append(tmp_shuffled_idx[tmp_split_idxes[2]: tmp_split_idxes[3]])\n return np.concatenate(training_idxes), np.concatenate(validation_idxes), np.concatenate(testing_idxes)\n\n\ndef load_original_data():\n \"\"\"\n load original audio files\n :return:\n \"\"\"\n import os\n\n # genre_folders = [x[0] for x in os.walk(data_folder)]\n genre_folders = os.listdir(hp.data_folder)\n X = []\n T = []\n SR = []\n min_length = 0\n for sub_folder in genre_folders:\n genre_path = hp.data_folder + \"/\" + sub_folder\n audio_files = os.listdir(genre_path)\n for audio_name in audio_files:\n audio_path = genre_path + \"/\" + audio_name\n x, sr = librosa.core.load(audio_path) # x = 661794\n if x.shape[0] < 30 * sr:\n x = np.append(x, np.zeros(30 * sr - x.shape[0])) # insure all files are exactly the same length\n if min_length < x.shape[0]:\n min_length = x.shape[0] # report the duration of the minimum audio clip\n print(\"This audio last %f seconds, zeros are padded at the end.\" % (x.shape[0] * 1.0 / sr))\n X.append(x[:30 * sr])\n SR.append(sr)\n T.append(sub_folder)\n return np.asarray(X), np.asarray(SR), np.asarray(T, dtype=str)\n\n\n# calculate mel-spectrogram\ndef mel_spectrogram(ys, sr, n_mels=hp.n_mels, hop_size=hp.hop_size, fmax=hp.fmax, pre_emphasis=hp.pre_emphasis):\n \"\"\"\n calculate the spectrogram in mel scale, refer to documentation of librosa and MFCC tutorial\n :param ys:\n :param sr:\n :param n_mels:\n :param hop_size:\n :param fmax:\n :param pre_emphasis:\n :return:\n \"\"\"\n if pre_emphasis:\n ys = np.append(ys[0], ys[1:] - pre_emphasis * ys[:-1])\n return librosa.feature.melspectrogram(ys, sr,\n n_fft=hp.n_fft,\n hop_length=hop_size, n_mels=n_mels,\n fmax=fmax)\n\n\n# batch convert waveform into spectrogram in mel-scale\ndef batch_mel_spectrogram(X, SR):\n \"\"\"\n convert all waveforms in R into time * 64 spectrogram in mel scale\n :param X:\n :param SR:\n :return:\n \"\"\"\n melspec_list = []\n for idx in range(X.shape[0]):\n tmp_melspec = mel_spectrogram(X[idx], SR[idx])\n melspec_list.append(tmp_melspec)\n return np.asarray(melspec_list)\n\n\n# def segment_spectrogram(input_spectrogram, num_fft_windows=num_fft_windows):\n# # given a spectrogram of a music that's longer than 3 seconds, segment it into relatively independent pieces\n# length_in_fft = input_spectrogram.shape[1]\n# num_segments = int(length_in_fft / num_fft_windows)\n# pass\n\n\ndef baseline_model_32(num_genres=hp.num_genres, input_shape=hp.input_shape):\n model = Sequential()\n model.add(Conv2D(32, kernel_size=(3, 3),\n activation='relu', kernel_regularizer=regularizers.l2(0.01),\n input_shape=input_shape))\n model.add(MaxPooling2D(pool_size=(2, 4)))\n model.add(Conv2D(64, (3, 5), activation='relu', kernel_regularizer=regularizers.l2(0.01)))\n model.add(MaxPooling2D(pool_size=(2, 4)))\n model.add(Dropout(0.2))\n model.add(Flatten())\n model.add(Dense(32, activation='relu', kernel_regularizer=regularizers.l2(0.02)))\n model.add(Dropout(0.2))\n model.add(Dense(num_genres, activation='softmax'))\n model.compile(loss=keras.losses.categorical_crossentropy,\n optimizer=keras.optimizers.Adadelta(decay=1e-5),\n metrics=['accuracy'])\n return model\n\n\ndef baseline_model_64(num_genres=hp.num_genres, input_shape=hp.input_shape):\n model = Sequential()\n model.add(Conv2D(64, kernel_size=(3, 3),\n activation='relu', kernel_regularizer=regularizers.l2(0.01),\n input_shape=input_shape))\n model.add(MaxPooling2D(pool_size=(2, 4)))\n model.add(Conv2D(64, (3, 5), activation='relu', kernel_regularizer=regularizers.l2(0.01)))\n model.add(MaxPooling2D(pool_size=(2, 4)))\n model.add(Dropout(0.2))\n model.add(Flatten())\n model.add(Dense(32, activation='relu', kernel_regularizer=regularizers.l2(0.02)))\n model.add(Dropout(0.2))\n model.add(Dense(num_genres, activation='softmax'))\n model.compile(loss=keras.losses.categorical_crossentropy,\n optimizer=keras.optimizers.Adadelta(1e-4),\n metrics=['accuracy'])\n return model\n\n\ndef baseline_model_96(num_genres=hp.num_genres, input_shape=hp.input_shape):\n model = Sequential()\n model.add(Conv2D(96, kernel_size=(3, 3),\n activation='relu', kernel_regularizer=regularizers.l2(0.01),\n input_shape=input_shape))\n model.add(MaxPooling2D(pool_size=(2, 4)))\n model.add(Conv2D(64, (3, 5), activation='relu', kernel_regularizer=regularizers.l2(0.01)))\n model.add(MaxPooling2D(pool_size=(2, 4)))\n model.add(Dropout(0.2))\n model.add(Flatten())\n model.add(Dense(32, activation='relu', kernel_regularizer=regularizers.l2(0.02)))\n model.add(Dropout(0.2))\n model.add(Dense(num_genres, activation='softmax'))\n model.compile(loss=keras.losses.categorical_crossentropy,\n optimizer=keras.optimizers.Adadelta(decay=1e-5),\n metrics=['accuracy'])\n return model\n\n\ndef baseline_model_128(num_genres=hp.num_genres, input_shape=hp.input_shape):\n model = Sequential()\n model.add(Conv2D(128, kernel_size=(3, 3),\n activation='relu', kernel_regularizer=regularizers.l2(0.01),\n input_shape=input_shape))\n model.add(MaxPooling2D(pool_size=(2, 4)))\n model.add(Conv2D(64, (3, 5), activation='relu', kernel_regularizer=regularizers.l2(0.01)))\n model.add(MaxPooling2D(pool_size=(2, 4)))\n model.add(Dropout(0.2))\n model.add(Flatten())\n model.add(Dense(32, activation='relu', kernel_regularizer=regularizers.l2(0.02)))\n model.add(Dropout(0.2))\n model.add(Dense(num_genres, activation='softmax'))\n model.compile(loss=keras.losses.categorical_crossentropy,\n optimizer=keras.optimizers.Adadelta(decay=1e-5),\n metrics=['accuracy'])\n return model\n\n\ndef model_nnet1(num_genres=hp.num_genres, input_shape=hp.input_shape):\n inputs = Input(shape=input_shape, name='model_inputs') # (?, 256, 128, 1)\n # conv1\n conv1 = Conv2D(256, kernel_size=(256, 4), activation='relu',\n input_shape=input_shape, kernel_regularizer=regularizers.l2(0.02))\n conv1_temp = conv1(inputs)\n drop1 = Dropout(rate=0.2)\n conv1_outputs = drop1(conv1_temp) # (?, 1, 125, 256)\n # hidden_1\n mp = MaxPooling2D(pool_size=(1, 125))\n hidden_1 = mp(conv1_outputs) # (?, 1, 1, 256)\n # conv2\n conv2 = Conv2D(256, kernel_size=(1, 4), activation='relu',\n kernel_regularizer=regularizers.l2(0.01),\n padding='same')\n conv2_outputs = conv2(conv1_outputs) # (?, 1, 125, 256)\n # hidden_2\n ap = AveragePooling2D(pool_size=(1, 125))\n hidden_2 = ap(conv2_outputs) # (?, 1, 1, 256)\n # hidden_3\n conv3 = Conv2D(128, kernel_size=(1, 4), activation='relu',\n kernel_regularizer=regularizers.l2(0.01),\n padding='same')\n hidden_3 = mp(conv3(conv2_outputs)) # (?, 1, 1, 128)\n # hidden_4\n hidden_4 = ap(conv3(conv2_outputs)) # (?, 1, 1, 128)\n # concatenate\n concat = concatenate([hidden_1, hidden_2, hidden_3, hidden_4], axis=-1) # (?, 1, 1, 768)\n re = Reshape([768])\n concat = re(concat)\n # dense classifier\n d1 = Dense(256, activation='relu')\n d1_outputs = d1(concat) # (?, 256)\n d2 = Dense(64, activation='relu')\n d2_outputs = d2(d1_outputs) # (?, 64)\n d3 = Dense(num_genres, activation='softmax')\n drop2 = Dropout(rate=0.1)\n outputs = d3(drop2(d2_outputs)) # (?, 10)\n re2 = Reshape([10])\n outputs = re2(outputs)\n model = Model(inputs=inputs, outputs=outputs)\n model.compile(loss=keras.losses.categorical_crossentropy,\n optimizer=keras.optimizers.Adadelta(),\n metrics=['accuracy'])\n return model\n\n\ndef model_nnet2(num_genres=hp.num_genres, input_shape=hp.input_shape):\n inputs = Input(shape=input_shape, name='model_inputs') # (?, 256, 128, 1)\n # conv1\n conv1 = Conv2D(256, kernel_size=(128, 4), activation='relu',\n input_shape=input_shape, kernel_regularizer=regularizers.l2(0.02))\n conv1_temp = conv1(inputs)\n drop1 = Dropout(rate=0.2)\n conv1_outputs = drop1(conv1_temp) # (?, 1, 125, 256)\n # hidden_2\n conv2 = Conv2D(256, kernel_size=(1, 4), activation='relu',\n kernel_regularizer=regularizers.l2(0.01),\n padding='same')\n conv2_outputs = conv2(conv1_outputs) # (?, 1, 125, 256)\n # hidden_3\n conv3 = Conv2D(256, kernel_size=(1, 4), activation='relu',\n kernel_regularizer=regularizers.l2(0.01),\n padding='same')\n conv3_outputs = conv3(conv2_outputs) # (?, 1, 125, 256)\n # Add\n add = Add()\n add_outputs = add([conv1_outputs, conv2_outputs, conv3_outputs]) # (?, 1, 125, 256)\n # equivalent to\n # added = keras.layers.add([conv1_outputs, conv2_outputs, conv3_outputs])\n # MaxPooling\n mp = MaxPooling2D(pool_size=(1, 125))\n mp_outputs = mp(add_outputs) # (?, 1, 1, 256)\n # AveragePooling\n ap = AveragePooling2D(pool_size=(1, 125))\n ap_outputs = ap(add_outputs) # (?, 1, 1, 256)\n # concat\n concat = concatenate([mp_outputs, ap_outputs]) # (?, 1, 1, 512)\n re = Reshape([512])\n concat_outputs = re(concat) # (?, 512)\n # dense classifier\n d1 = Dense(128, activation='relu')\n d1_outputs = d1(concat_outputs)\n d2 = Dense(64, activation='relu')\n d2_outputs = d2(d1_outputs)\n d3 = Dense(num_genres, activation='softmax')\n drop2 = Dropout(rate=0.1)\n outputs = d3(drop2(d2_outputs))\n re2 = Reshape([10])\n outputs = re2(outputs)\n model = Model(inputs=inputs, outputs=outputs)\n model.compile(loss=keras.losses.categorical_crossentropy,\n optimizer=keras.optimizers.Adadelta(),\n metrics=['accuracy'])\n return model\n\n\ndef model_nnet1_128(num_genres=hp.num_genres, input_shape=hp.input_shape):\n inputs = Input(shape=input_shape, name='model_inputs') # (?, 128, 128, 1)\n # conv1\n conv1 = Conv2D(256, kernel_size=(128, 4), activation='relu',\n input_shape=input_shape, kernel_regularizer=regularizers.l2(0.02))\n conv1_temp = conv1(inputs)\n drop1 = Dropout(rate=0.2)\n conv1_outputs = drop1(conv1_temp) # (?, 1, 125, 256)\n # hidden_1\n mp = MaxPooling2D(pool_size=(1, 125))\n hidden_1 = mp(conv1_outputs) # (?, 1, 1, 256)\n # conv2\n conv2 = Conv2D(256, kernel_size=(1, 4), activation='relu',\n kernel_regularizer=regularizers.l2(0.01),\n padding='same')\n conv2_outputs = conv2(conv1_outputs) # (?, 1, 125, 256)\n # hidden_2\n ap = AveragePooling2D(pool_size=(1, 125))\n hidden_2 = ap(conv2_outputs) # (?, 1, 1, 256)\n # hidden_3\n conv3 = Conv2D(128, kernel_size=(1, 4), activation='relu',\n kernel_regularizer=regularizers.l2(0.01),\n padding='same')\n hidden_3 = mp(conv3(conv2_outputs)) # (?, 1, 1, 128)\n # hidden_4\n hidden_4 = ap(conv3(conv2_outputs)) # (?, 1, 1, 128)\n # concatenate\n concat = concatenate([hidden_1, hidden_2, hidden_3, hidden_4], axis=-1) # (?, 1, 1, 768)\n re = Reshape([768])\n concat = re(concat)\n # dense classifier\n d1 = Dense(256, activation='relu')\n d1_outputs = d1(concat) # (?, 256)\n d2 = Dense(64, activation='relu')\n d2_outputs = d2(d1_outputs) # (?, 64)\n d3 = Dense(num_genres, activation='softmax')\n drop2 = Dropout(rate=0.1)\n outputs = d3(drop2(d2_outputs)) # (?, 10)\n re2 = Reshape([10])\n outputs = re2(outputs)\n model = Model(inputs=inputs, outputs=outputs)\n model.compile(loss=keras.losses.categorical_crossentropy,\n optimizer=keras.optimizers.Adadelta(),\n metrics=['accuracy'])\n return model\n\n\ndef model_nnet3_128(num_genres=hp.num_genres, input_shape=hp.input_shape):\n inputs = Input(shape=input_shape, name='model_inputs') # (?, 128, 128, 1)\n # conv1\n conv1 = Conv2D(256, kernel_size=(128, 4), activation='relu',\n input_shape=input_shape, kernel_regularizer=regularizers.l2(0.02))\n conv1_temp = conv1(inputs)\n drop1 = Dropout(rate=0.2)\n conv1_outputs = drop1(conv1_temp) # (?, 1, 125, 256)\n # hidden_1\n mp = MaxPooling2D(pool_size=(1, 125))\n hidden_1 = mp(conv1_outputs) # (?, 1, 1, 256)\n # conv2\n conv2 = Conv2D(256, kernel_size=(1, 4), activation='relu',\n kernel_regularizer=regularizers.l2(0.01),\n padding='same')\n conv2_outputs = conv2(conv1_outputs) # (?, 1, 125, 256)\n # hidden_2\n ap = AveragePooling2D(pool_size=(1, 125))\n hidden_2 = ap(conv1_outputs) # (?, 1, 1, 256)\n # hidden_3\n conv3 = Conv2D(128, kernel_size=(1, 4), activation='relu',\n kernel_regularizer=regularizers.l2(0.01),\n padding='same')\n hidden_3 = mp(conv3(conv2_outputs)) # (?, 1, 1, 128)\n # hidden_4\n hidden_4 = ap(conv3(conv2_outputs)) # (?, 1, 1, 128)\n # concatenate\n concat = concatenate([hidden_1, hidden_2, hidden_3, hidden_4], axis=-1) # (?, 1, 1, 768)\n re = Reshape([768])\n concat = re(concat)\n # dense classifier\n d1 = Dense(256, activation='relu')\n d1_outputs = d1(concat) # (?, 256)\n d2 = Dense(64, activation='relu')\n d2_outputs = d2(d1_outputs) # (?, 64)\n d3 = Dense(num_genres, activation='softmax')\n drop2 = Dropout(rate=0.1)\n outputs = d3(drop2(d2_outputs)) # (?, 10)\n re2 = Reshape([10])\n outputs = re2(outputs)\n model = Model(inputs=inputs, outputs=outputs)\n model.compile(loss=keras.losses.categorical_crossentropy,\n optimizer=keras.optimizers.Adam(),\n metrics=['accuracy'])\n return model\n\n\n\ndef model_lstm(num_genres=hp.num_genres, input_shape=hp.input_shape):\n model = Sequential()\n model.add(Reshape((128, 128), input_shape=input_shape))\n model.add(LSTM(units=128, return_sequences=True))\n model.add(Reshape((128, 128, 1)))\n model.summary()\n model.add(MaxPooling2D(pool_size=(2, 4)))\n model.add(Conv2D(64, (3, 5), activation='relu', kernel_regularizer=regularizers.l2(0.01)))\n model.add(MaxPooling2D(pool_size=(2, 4)))\n model.add(Dropout(0.2))\n model.add(Flatten())\n model.add(Dense(32, activation='relu', kernel_regularizer=regularizers.l2(0.02)))\n model.add(Dropout(0.2))\n model.add(Dense(num_genres, activation='softmax'))\n model.compile(loss=keras.losses.categorical_crossentropy,\n optimizer=keras.optimizers.Adadelta(),\n metrics=['accuracy'])\n return model\n\ndef net1_correct():\n inputs = Input(shape=(256, 128, 1))\n conv1 = Conv2D(256, (249, 4), padding='valid', activation='relu', kernel_regularizer=regularizers.l2(0.02))(inputs)\n conv1_drop = Dropout(0.2)(conv1)\n conv2 = Conv2D(256, (1, 4), padding='same', kernel_regularizer=regularizers.l2(0.01))(conv1_drop)\n hidden1 = MaxPooling2D((1, 125))(conv1_drop)\n hidden2 = AveragePooling2D((1, 125))(conv2)\n conv3 = Conv2D(128, (1, 4), padding='same', kernel_regularizer=regularizers.l2(0.01))(conv2)\n hidden3 = MaxPooling2D((1, 125))(conv3)\n conv4 = Conv2D(128, (1, 4), padding='same', kernel_regularizer=regularizers.l2(0.01))(conv2)\n hidden4 = AveragePooling2D((1, 125))(conv4)\n concat = concatenate([hidden1, hidden2, hidden3, hidden4], axis=-1)\n flatten = Flatten()(concat)\n dense1 = Dense(256, activation='relu')(flatten)\n dense2 = Dense(64, activation='relu')(dense1)\n dense2_drop = Dropout(0.1)(dense2)\n outputs = Dense(10, activation='softmax')(dense2_drop)\n model = Model(inputs=inputs, outputs=outputs)\n model.compile(optimizer=keras.optimizers.Adadelta(),\n loss=keras.losses.categorical_crossentropy,\n metrics=['accuracy'])\n return model\n\n\nclass Music_Genre_CNN(object):\n def __init__(self, ann_model):\n self.model = ann_model()\n\n def load_model(self, model_path, custom_objects=None):\n self.model = load_model(model_path, custom_objects=custom_objects)\n\n def summary(self):\n print(self.model.summary())\n\n def train_model(self, input_spectrograms, labels, cv=False,\n validation_spectrograms=None, validation_labels=None,\n small_batch_size=hp.small_batch_size, max_iteration=hp.max_iteration,\n print_interval=hp.print_interval):\n \"\"\"\n train the CNN model\n :param print_interval:\n :param input_spectrograms: number of training examplex * num of mel bands * number of fft windows * 1\n type: 4D numpy array\n :param labels: vectorized class labels\n type:\n :param cv: whether do cross validation\n :param validation_spectrograms: data used for cross validation\n type: as input_spectrogram\n :param validation_labels: used for cross validation\n :param small_batch_size: size of each training batch\n :param max_iteration:\n maximum number of iterations allowed for one training\n :return:\n trained model\n \"\"\"\n validation_accuracy_list = []\n for iii in range(max_iteration):\n\n st_time = time.time()\n\n # split training data into even batches\n num_training_data = len(input_spectrograms)\n batch_idx = np.random.permutation(num_training_data)\n num_batches = int(num_training_data / small_batch_size)\n\n for jjj in range(num_batches - 1):\n\n sample_idx = np.random.randint(input_spectrograms.shape[2] - hp.num_fft_windows)\n training_idx = batch_idx[jjj * small_batch_size: (jjj + 1) * small_batch_size]\n training_data = input_spectrograms[training_idx, :, sample_idx:sample_idx + hp.num_fft_windows, :]\n # (150, 128, 128, 1)\n training_label = labels[training_idx] # (150, 10)\n # training_label = K.reshape(training_label, (150, 1, 1, 10))\n # checkpoint = ModelCheckpoint(hp.best_model, monitor='val_acc', verbose=0, save_best_only=True,\n # mode='max', period=1)\n # checkpoint_list = [checkpoint]\n self.model.train_on_batch(training_data, training_label)\n # self.model.evaluate(training_data, training_label, verbose=0)\n # print(\"Training accuracy is: %f\" % (training_accuracy))\n\n # end_time = time.time()\n # elapsed_time = end_time - st_time\n\n if iii % print_interval == 0:\n sample_idx = np.random.randint(input_spectrograms.shape[2] - hp.num_fft_windows)\n # training_idx = batch_idx[jjj * small_batch_size: (jjj + 1) * small_batch_size] \n training_data = input_spectrograms[:, :, sample_idx:sample_idx + hp.num_fft_windows, :]\n training_label = labels[:]\n training_loss, _ = self.model.evaluate(training_data, training_label, verbose=0)\n training_accuracy, _ = self.test_model(input_spectrograms[:, :, :, :], training_label)\n \n validation_accuracy, _ = self.test_model(validation_spectrograms, validation_labels)\n print(\"\\nIteration:%d Loss: %f; Training accuracy: %f, Validation accuracy: %f\\n\" \n %(iii, training_loss, training_accuracy, validation_accuracy)) \n\n \"\"\"\n if cv:\n validation_accuracy = self.model.evaluate(\n validation_spectrograms[:, :, sample_idx:sample_idx + hp.num_fft_windows, :]\n , validation_labels, verbose=0)\n validation_accuracy_list.append(validation_accuracy[1])\n else:\n validation_accuracy = [-1.0, -1.0]\n\n if iii % print_interval == 0:\n with open(hp.loss_log, \"a\") as text_file:\n things2write = \"iter: \" + str(iii) + \"\\t\" + \"loss: \" + str(training_accuracy[0]) + \"\\t\" + str(training_accuracy[1]) + \"\\t\" + str(validation_accuracy[1]) + \"\\n\"\n text_file.write(things2write)\n print(\"\\nIteration:%d Loss: %f; Training accuracy: %f, Validation accuracy: %f\\n\" %\n (iii, training_accuracy[0], training_accuracy[1], validation_accuracy[1]))\n if cv:\n return np.asarray(validation_accuracy_list)\n \"\"\"\n\n def song_spectrogram_prediction(self, song_mel_spectrogram, overlap):\n \"\"\"\n give the predicted_probability for each class and each segment\n :param song_mel_spectrogram:\n 4D numpy array: num of time windows * mel bands * 1 (depth)\n :param overlap:\n overlap between segments, overlap = 0 means no overlap between segments\n :return:\n predictions: numpy array (number of segments * num classes)\n \"\"\"\n largest_idx = song_mel_spectrogram.shape[1] - hp.num_fft_windows - 1\n step_size = int((1 - overlap) * hp.num_fft_windows)\n num_segments = int(largest_idx / step_size)\n segment_edges = np.arange(num_segments) * step_size\n segment_list = []\n for idx in segment_edges:\n segment = song_mel_spectrogram[:, idx: idx + hp.num_fft_windows]\n segment_list.append(segment)\n segment_array = np.asarray(segment_list)[:, :, :, np.newaxis]\n # predictions = self.model.predict_proba(segment_array, batch_size=len(segment_array), verbose=0)\n predictions = self.model.predict(segment_array, batch_size=len(segment_array), verbose=0)\n summarized_prediction = np.argmax(predictions.sum(axis=0))\n return summarized_prediction, predictions\n\n def test_model(self, test_X, test_T, overlap=0.5):\n # test the accuracy of the model using testing data\n # test_T (100, 10), one_hot vector\n num_sample = len(test_T) # 1000 * 1/10 = 100\n correct_labels = np.argmax(test_T, axis=1)\n predicted_labels = np.zeros(num_sample)\n # test_X (100, 128, 1292)\n for iii in range(len(test_X)):\n song_mel_spectrogram = test_X[iii].squeeze()\n predicted_labels[iii], _ = self.song_spectrogram_prediction(song_mel_spectrogram, overlap=overlap)\n confusion_data = np.vstack((predicted_labels, correct_labels)).T\n accuracy = np.sum(correct_labels == predicted_labels) * 1.0 / num_sample\n return accuracy, confusion_data\n\n def backup_model(self, model_bk_name=False):\n if not model_bk_name:\n year, month, day, hour, minute = time.strftime(\"%Y,%m,%d,%H,%M\").split(',')\n model_type = hp.model_name\n model_bk_name = \"saved_model/mgcnn_\" + model_type + \"_\" + month + day + hour + minute + \".h5\"\n self.model.save(model_bk_name)\n\n def song_genre_prediction(self, audio_path):\n # resample the song into single channel, 22050 sampling frequency\n\n # convert into mel-scale spectrogram\n\n # predict using trained model\n\n x, sr = librosa.core.load(audio_path)\n\n xm = mel_spectrogram(ys=x, sr=sr)\n\n summarized_predictions, predictions = self.song_spectrogram_prediction(xm, 0.5)\n\n print(\"predict result: \" + str(np.argmax(predictions, axis=1)))\n\n genres_dict = {'blues': 0, 'classical': 1, 'country': 2, 'disco': 3, 'hiphop': 4,\n 'jazz': 5, 'metal': 6, 'pop': 7, 'reggae': 8, 'rock': 9}\n\n print(\"summarized_predictions: \" +\n str(list(genres_dict.keys())[list(genres_dict.values()).index(summarized_predictions)]))\n\n\n\n" ]
[ [ "numpy.unique", "numpy.asarray", "numpy.arange", "numpy.flatnonzero", "numpy.concatenate", "numpy.append", "numpy.random.permutation", "numpy.argmax", "numpy.zeros", "numpy.sum", "numpy.vstack", "numpy.random.randint" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
FractalArt/chaos_exercises
[ "ce86858ceb887560a30f6fd313d920a18f2da5c7" ]
[ "ch2/ex2_8_6.py" ]
[ "import matplotlib.pyplot as plt\nimport numpy as np\n\n\ndef f(x):\n return x + np.exp(-x)\n\n\ndef plot_gradient_field(ax, func, title=None):\n X = np.arange(-4, 4, 0.1)\n Y = np.arange(-4, 4, 0.1)\n U, V = np.meshgrid(np.ones(len(X)), func(X))\n ax.quiver(X, Y, U/3, V/3)\n\n ax.set_xlabel('x')\n ax.set_ylabel('x\\'')\n if title:\n ax.set_title(title)\n\ndef euler_method(f, t0: float, x0: float, timestep: float, end: float, exact_solution=None):\n \"\"\"\n Implementation of the euler method to numerically compute the solution\n to the differential equation\n\n x'=f(x)\n\n Parameters\n ----------\n f: function\n The implementation of the function `f` appearing in the differential\n equation.\n t0: float\n The initial time.\n x0: float\n The initial condition to the differential equation, i.e. the value\n of x(t=t0).\n timestep: float\n The timestep to employ for the numerical solution of the differential\n equation.\n end: float\n The maximal time step up to which to compute the the solution.\n exact_solution: function\n The exact solution. If the value is different from `None` the exact\n solution will\n be evaluated at each time step and the corresponding values will be\n returned in order\n to be able to check the convergence of the numerical solution.\n\n \"\"\"\n if end < t0:\n raise ValueError(\"Initial time is larger than the end time!\")\n\n # Store the time steps\n time_steps = [t0]\n # Store the value at each time step\n values = [x0]\n # Store the exact values of the solutions at each time step, if the exact\n # solution is provided\n if exact_solution:\n exact_values = [exact_solution(t0)]\n\n # Now start solving the differential equation numerically\n t = t0\n x = x0\n while t < end:\n t = t + timestep\n time_steps.append(t)\n x = x + f(x) * timestep\n values.append(x)\n if exact_solution:\n exact_values.append(exact_solution(t))\n\n return time_steps, values, None if not exact_solution else exact_values\n\n\ndef runge_kutta_method(f, t0: float, x0: float, timestep: float, end: float, exact_solution=None):\n \"\"\"\n Implementation of the Runge-Kutta method to numerically compute the solution\n to the differential equation\n\n x'=f(x)\n\n Parameters\n ----------\n f: function\n The implementation of the function `f` appearing in the differential\n equation.\n t0: float\n The initial time.\n x0: float\n The initial condition to the differential equation, i.e. the value\n of x(t=t0).\n timestep: float\n The timestep to employ for the numerical solution of the differential\n equation.\n end: float\n The maximal time step up to which to compute the the solution.\n exact_solution: function\n The exact solution. If the value is different from `None` the exact\n solution will\n be evaluated at each time step and the corresponding values will be\n returned in order\n to be able to check the convergence of the numerical solution.\n\n \"\"\"\n if end < t0:\n raise ValueError(\"Initial time is larger than the end time!\")\n\n # Store the time steps\n time_steps = [t0]\n # Store the value at each time step\n values = [x0]\n # Store the exact values of the solutions at each time step, if the exact\n # solution is provided\n if exact_solution:\n exact_values = [exact_solution(t0)]\n\n # Now start solving the differential equation numerically\n t = t0\n x = x0\n while t < end:\n t = t + timestep\n time_steps.append(t)\n \n k1 = f(x) * timestep\n k2 = f(x + 0.5 * k1) * timestep\n k3 = f(x + 0.5 * k2) * timestep\n k4 = f(x + k3) * timestep\n\n x = x + 1.0 / 6.0 * (k1 + 2.0 * k2 + 2.0 * k3 + k4)\n values.append(x)\n \n if exact_solution:\n exact_values.append(exact_solution(t))\n\n return time_steps, values, None if not exact_solution else exact_values\n\n\nif __name__ == \"__main__\":\n # a) Draw the gradient filed\n fig, ax = plt.subplots()\n plot_gradient_field(ax, f)\n plt.tight_layout()\n # plt.show()\n\n # b) Euler Method with stepsize 0.001\n time_steps, values, _ = euler_method(f, 0, 0, 0.001, 1)\n print(f\"Euler result with step size 0.001: {values[-1]}\")\n\n time_steps, values, _ = runge_kutta_method(f, 0, 0, 1, 1)\n print(f\"Runge-Kutta result with step size 1: {values[-1]}\")\n\n # This gives a weird result\n time_steps, values, _ = runge_kutta_method(f, 0, 0, 0.1, 1)\n print(f\"Runge-Kutta result with step size 0.1: {values[-1]}\")\n\n time_steps, values, _ = runge_kutta_method(f, 0, 0, 0.001, 1)\n print(f\"Runge-Kutta result with step size 0.001: {values[-1]}\")\n\n print(f(-1))\n\n" ]
[ [ "numpy.exp", "numpy.arange", "matplotlib.pyplot.tight_layout", "matplotlib.pyplot.subplots" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
biggates/wfdb-python
[ "5bb2e3c19a196a4efda0762f7596889a3eed5cee" ]
[ "tests/test_record.py" ]
[ "import os\nimport pdb\nimport shutil\nimport unittest\n\nimport numpy as np\nimport wfdb\n\n\nclass TestRecord(unittest.TestCase):\n \"\"\"\n Test read and write of single segment WFDB records, including\n PhysioNet streaming.\n\n Target files created using the original WFDB Software Package\n version 10.5.24\n\n \"\"\"\n\n # ----------------------- 1. Basic Tests -----------------------#\n\n def test_1a(self):\n \"\"\"\n Format 16, entire signal, digital.\n\n Target file created with:\n rdsamp -r sample-data/test01_00s | cut -f 2- > record-1a\n \"\"\"\n record = wfdb.rdrecord('sample-data/test01_00s', physical=False)\n sig = record.d_signal\n sig_target = np.genfromtxt('tests/target-output/record-1a')\n\n # Compare data streaming from Physionet\n record_pn = wfdb.rdrecord('test01_00s', physical=False,\n pn_dir='macecgdb')\n\n # Test file writing\n record_2 = wfdb.rdrecord('sample-data/test01_00s', physical=False)\n record_2.sig_name = ['ECG_1', 'ECG_2', 'ECG_3', 'ECG_4']\n record_2.wrsamp()\n record_write = wfdb.rdrecord('test01_00s', physical=False)\n\n assert np.array_equal(sig, sig_target)\n assert record.__eq__(record_pn)\n assert record_2.__eq__(record_write)\n\n def test_1b(self):\n \"\"\"\n Format 16, byte offset, selected duration, selected channels,\n physical.\n\n Target file created with:\n rdsamp -r sample-data/a103l -f 50 -t 160 -s 2 0 -P | cut -f 2- > record-1b\n \"\"\"\n sig, fields = wfdb.rdsamp('sample-data/a103l', sampfrom=12500,\n sampto=40000, channels=[2, 0])\n sig_round = np.round(sig, decimals=8)\n sig_target = np.genfromtxt('tests/target-output/record-1b')\n\n # Compare data streaming from Physionet\n sig_pn, fields_pn = wfdb.rdsamp('a103l',\n pn_dir='challenge-2015/training',\n sampfrom=12500, sampto=40000,\n channels=[2, 0])\n\n # Option of selecting channels by name\n sig_named, fields_named = wfdb.rdsamp('sample-data/a103l',\n sampfrom=12500, sampto=40000,\n channel_names=['PLETH', 'II'])\n\n assert np.array_equal(sig_round, sig_target)\n assert np.array_equal(sig, sig_pn) and fields == fields_pn\n assert np.array_equal(sig, sig_named) and fields == fields_named\n\n def test_1c(self):\n \"\"\"\n Format 16, byte offset, selected duration, selected channels,\n digital.\n\n Target file created with:\n rdsamp -r sample-data/a103l -f 80 -s 0 1 | cut -f 2- > record-1c\n \"\"\"\n record = wfdb.rdrecord('sample-data/a103l',\n sampfrom=20000, channels=[0, 1], physical=False)\n sig = record.d_signal\n sig_target = np.genfromtxt('tests/target-output/record-1c')\n\n # Compare data streaming from Physionet\n record_pn = wfdb.rdrecord('a103l', pn_dir='challenge-2015/training',\n sampfrom=20000, channels=[0, 1],\n physical=False)\n\n # Test file writing\n record.wrsamp()\n record_write = wfdb.rdrecord('a103l', physical=False)\n\n assert np.array_equal(sig, sig_target)\n assert record.__eq__(record_pn)\n assert record.__eq__(record_write)\n\n def test_1d(self):\n \"\"\"\n Format 80, selected duration, selected channels, physical\n\n Target file created with:\n rdsamp -r sample-data/3000003_0003 -f 1 -t 8 -s 1 -P | cut -f 2- > record-1d\n \"\"\"\n sig, fields = wfdb.rdsamp('sample-data/3000003_0003', sampfrom=125,\n sampto=1000, channels=[1])\n sig_round = np.round(sig, decimals=8)\n sig_target = np.genfromtxt('tests/target-output/record-1d')\n sig_target = sig_target.reshape(len(sig_target), 1)\n\n # Compare data streaming from Physionet\n sig_pn, fields_pn = wfdb.rdsamp('3000003_0003',\n pn_dir='mimic3wdb/30/3000003/',\n sampfrom=125, sampto=1000,\n channels=[1])\n\n assert np.array_equal(sig_round, sig_target)\n assert np.array_equal(sig, sig_pn) and fields == fields_pn\n\n def test_1e(self):\n \"\"\"\n Format 24, entire signal, digital.\n\n Target file created with:\n rdsamp -r sample-data/n8_evoked_raw_95_F1_R9 | cut -f 2- > record-1e\n \"\"\"\n record = wfdb.rdrecord('sample-data/n8_evoked_raw_95_F1_R9', physical=False)\n sig = record.d_signal\n sig_target = np.genfromtxt('tests/target-output/record-1e')\n\n # Compare data streaming from Physionet\n record_pn = wfdb.rdrecord('n8_evoked_raw_95_F1_R9', physical=False,\n pn_dir='earndb/raw/N8')\n\n # Test file writing\n record_2 = wfdb.rdrecord('sample-data/n8_evoked_raw_95_F1_R9', physical=False)\n record_2.wrsamp()\n record_write = wfdb.rdrecord('sample-data/n8_evoked_raw_95_F1_R9', physical=False)\n\n assert np.array_equal(sig, sig_target)\n assert record.__eq__(record_pn)\n assert record_2.__eq__(record_write)\n\n # ------------------ 2. Special format records ------------------ #\n\n def test_2a(self):\n \"\"\"\n Format 212, entire signal, physical.\n\n Target file created with:\n rdsamp -r sample-data/100 -P | cut -f 2- > record-2a\n \"\"\"\n sig, fields = wfdb.rdsamp('sample-data/100')\n sig_round = np.round(sig, decimals=8)\n sig_target = np.genfromtxt('tests/target-output/record-2a')\n\n # Compare data streaming from Physionet\n sig_pn, fields_pn = wfdb.rdsamp('100', pn_dir = 'mitdb')\n # This comment line was manually added and is not present in the\n # original PhysioNet record\n del(fields['comments'][0])\n\n assert np.array_equal(sig_round, sig_target)\n assert np.array_equal(sig, sig_pn) and fields == fields_pn\n\n def test_2b(self):\n \"\"\"\n Format 212, selected duration, selected channel, digital.\n\n Target file created with:\n rdsamp -r sample-data/100 -f 0.002 -t 30 -s 1 | cut -f 2- > record-2b\n \"\"\"\n record = wfdb.rdrecord('sample-data/100', sampfrom=1,\n sampto=10800, channels=[1], physical=False)\n sig = record.d_signal\n sig_target = np.genfromtxt('tests/target-output/record-2b')\n sig_target = sig_target.reshape(len(sig_target), 1)\n\n # Compare data streaming from Physionet\n record_pn = wfdb.rdrecord('100', sampfrom=1, sampto=10800,\n channels=[1], physical=False, pn_dir='mitdb')\n # This comment line was manually added and is not present in the\n # original PhysioNet record\n del(record.comments[0])\n\n # Option of selecting channels by name\n record_named = wfdb.rdrecord('sample-data/100', sampfrom=1,\n sampto=10800, channel_names=['V5'],\n physical=False)\n del(record_named.comments[0])\n\n # Test file writing\n record.wrsamp()\n record_write = wfdb.rdrecord('100', physical=False)\n\n assert np.array_equal(sig, sig_target)\n assert record.__eq__(record_pn)\n assert record.__eq__(record_named)\n assert record.__eq__(record_write)\n\n def test_2c(self):\n \"\"\"\n Format 212, entire signal, physical, odd sampled record.\n\n Target file created with:\n rdsamp -r sample-data/100_3chan -P | cut -f 2- > record-2c\n \"\"\"\n record = wfdb.rdrecord('sample-data/100_3chan')\n sig_round = np.round(record.p_signal, decimals=8)\n sig_target = np.genfromtxt('tests/target-output/record-2c')\n\n # Test file writing\n record.d_signal = record.adc()\n record.wrsamp()\n record_write = wfdb.rdrecord('100_3chan')\n record.d_signal = None\n\n assert np.array_equal(sig_round, sig_target)\n assert record.__eq__(record_write)\n\n def test_2d(self):\n \"\"\"\n Format 310, selected duration, digital\n Target file created with:\n rdsamp -r sample-data/3000003_0003 -f 0 -t 8.21 | cut -f 2- | wrsamp -o 310derive -O 310\n rdsamp -r 310derive -f 0.007 | cut -f 2- > record-2d\n \"\"\"\n record = wfdb.rdrecord('sample-data/310derive', sampfrom=2,\n physical=False)\n sig = record.d_signal\n sig_target = np.genfromtxt('tests/target-output/record-2d')\n assert np.array_equal(sig, sig_target)\n\n def test_2e(self):\n \"\"\"\n Format 311, selected duration, physical.\n\n Target file created with:\n rdsamp -r sample-data/3000003_0003 -f 0 -t 8.21 -s 1 | cut -f 2- | wrsamp -o 311derive -O 311\n rdsamp -r 311derive -f 0.005 -t 3.91 -P | cut -f 2- > record-2e\n \"\"\"\n sig, fields = wfdb.rdsamp('sample-data/311derive', sampfrom=1,\n sampto=978)\n sig = np.round(sig, decimals=8)\n sig_target = np.genfromtxt('tests/target-output/record-2e')\n sig_target = sig_target.reshape([977, 1])\n assert np.array_equal(sig, sig_target)\n\n def test_2f(self):\n \"\"\"\n EDF format conversion to MIT for uniform sample rates.\n\n \"\"\"\n # Uniform sample rates\n record_MIT = wfdb.rdrecord('sample-data/n16').__dict__\n record_EDF = wfdb.edf2mit('sample-data/n16.edf').__dict__\n\n fields = list(record_MIT.keys())\n # Original MIT format method of checksum is outdated, sometimes\n # the same value though\n fields.remove('checksum')\n # Original MIT format units are less comprehensive since they\n # default to mV if unknown.. therefore added more default labels\n fields.remove('units')\n\n test_results = []\n for field in fields:\n # Signal value will be slightly off due to C to Python type conversion\n if field == 'p_signal':\n true_array = np.array(record_MIT[field]).flatten()\n pred_array = np.array(record_EDF[field]).flatten()\n # Prevent divide by zero warning\n for i,v in enumerate(true_array):\n if v == 0:\n true_array[i] = 1\n pred_array[i] = 1\n sig_diff = np.abs((pred_array - true_array) / true_array)\n sig_diff[sig_diff == -np.inf] = 0\n sig_diff[sig_diff == np.inf] = 0\n sig_diff = np.nanmean(sig_diff,0)\n # 5% tolerance\n if np.max(sig_diff) <= 5:\n test_results.append(True)\n else:\n test_results.append(False)\n elif field == 'init_value':\n signal_diff = [abs(record_MIT[field][i] - record_EDF[field][i]) for i in range(len(record_MIT[field]))]\n if abs(max(min(signal_diff), max(signal_diff), key=abs)) <= 2:\n test_results.append(True)\n else:\n test_results.append(False)\n else:\n test_results.append(record_MIT[field] == record_MIT[field])\n\n target_results = len(fields) * [True]\n assert np.array_equal(test_results, target_results)\n\n def test_2g(self):\n \"\"\"\n EDF format conversion to MIT for non-uniform sample rates.\n\n \"\"\"\n # Non-uniform sample rates\n record_MIT = wfdb.rdrecord('sample-data/wave_4').__dict__\n record_EDF = wfdb.edf2mit('sample-data/wave_4.edf').__dict__\n\n fields = list(record_MIT.keys())\n # Original MIT format method of checksum is outdated, sometimes\n # the same value though\n fields.remove('checksum')\n # Original MIT format units are less comprehensive since they\n # default to mV if unknown.. therefore added more default labels\n fields.remove('units')\n\n test_results = []\n for field in fields:\n # Signal value will be slightly off due to C to Python type conversion\n if field == 'p_signal':\n true_array = np.array(record_MIT[field]).flatten()\n pred_array = np.array(record_EDF[field]).flatten()\n # Prevent divide by zero warning\n for i,v in enumerate(true_array):\n if v == 0:\n true_array[i] = 1\n pred_array[i] = 1\n sig_diff = np.abs((pred_array - true_array) / true_array)\n sig_diff[sig_diff == -np.inf] = 0\n sig_diff[sig_diff == np.inf] = 0\n sig_diff = np.nanmean(sig_diff,0)\n # 5% tolerance\n if np.max(sig_diff) <= 5:\n test_results.append(True)\n else:\n test_results.append(False)\n elif field == 'init_value':\n signal_diff = [abs(record_MIT[field][i] - record_EDF[field][i]) for i in range(len(record_MIT[field]))]\n if abs(max(min(signal_diff), max(signal_diff), key=abs)) <= 2:\n test_results.append(True)\n else:\n test_results.append(False)\n else:\n test_results.append(record_MIT[field] == record_MIT[field])\n\n target_results = len(fields) * [True]\n assert np.array_equal(test_results, target_results)\n\n # --------------------- 3. Multi-dat records --------------------- #\n\n def test_3a(self):\n \"\"\"\n Multi-dat, entire signal, digital\n Target file created with:\n rdsamp -r sample-data/s0010_re | cut -f 2- > record-3a\n \"\"\"\n record = wfdb.rdrecord('sample-data/s0010_re', physical=False)\n sig = record.d_signal\n sig_target = np.genfromtxt('tests/target-output/record-3a')\n\n # Compare data streaming from Physionet\n record_pn = wfdb.rdrecord('s0010_re', physical=False,\n pn_dir='ptbdb/patient001')\n\n # Test file writing\n record.wrsamp()\n record_write = wfdb.rdrecord('s0010_re', physical=False)\n\n assert np.array_equal(sig, sig_target)\n assert record.__eq__(record_pn)\n assert record.__eq__(record_write)\n\n def test_3b(self):\n \"\"\"\n Multi-dat, selected duration, selected channels, physical.\n\n Target file created with:\n rdsamp -r sample-data/s0010_re -f 5 -t 38 -P -s 13 0 4 8 3 | cut -f 2- > record-3b\n \"\"\"\n sig, fields = wfdb.rdsamp('sample-data/s0010_re', sampfrom=5000,\n sampto=38000, channels=[13, 0, 4, 8, 3])\n sig_round = np.round(sig, decimals=8)\n sig_target = np.genfromtxt('tests/target-output/record-3b')\n\n # Compare data streaming from Physionet\n sig_pn, fields_pn = wfdb.rdsamp('s0010_re', sampfrom=5000,\n pn_dir='ptbdb/patient001',\n sampto=38000,\n channels=[13, 0, 4, 8, 3])\n\n assert np.array_equal(sig_round, sig_target)\n assert np.array_equal(sig, sig_pn) and fields == fields_pn\n\n\n # -------------- 4. Skew and multiple samples/frame -------------- #\n\n def test_4a(self):\n \"\"\"\n Format 16, multi-samples per frame, skew, digital.\n\n Target file created with:\n rdsamp -r sample-data/test01_00s_skewframe | cut -f 2- > record-4a\n \"\"\"\n record = wfdb.rdrecord('sample-data/test01_00s_skewframe',\n physical=False)\n sig = record.d_signal\n # The WFDB library rdsamp does not return the final N samples for all\n # channels due to the skew. The WFDB python rdsamp does return the final\n # N samples, filling in NANs for end of skewed channels only.\n sig = sig[:-3, :]\n\n sig_target = np.genfromtxt('tests/target-output/record-4a')\n\n # Test file writing. Multiple samples per frame and skew.\n # Have to read all the samples in the record, ignoring skew\n record_no_skew = wfdb.rdrecord('sample-data/test01_00s_skewframe',\n physical=False,\n smooth_frames=False, ignore_skew=True)\n record_no_skew.wrsamp(expanded=True)\n # Read the written record\n record_write = wfdb.rdrecord('test01_00s_skewframe', physical=False)\n\n assert np.array_equal(sig, sig_target)\n assert record.__eq__(record_write)\n\n def test_4b(self):\n \"\"\"\n Format 12, multi-samples per frame, skew, entire signal, digital.\n\n Target file created with:\n rdsamp -r sample-data/03700181 | cut -f 2- > record-4b\n \"\"\"\n record = wfdb.rdrecord('sample-data/03700181', physical=False)\n sig = record.d_signal\n # The WFDB library rdsamp does not return the final N samples for all\n # channels due to the skew.\n sig = sig[:-4, :]\n # The WFDB python rdsamp does return the final N samples, filling in\n # NANs for end of skewed channels only.\n sig_target = np.genfromtxt('tests/target-output/record-4b')\n\n # Compare data streaming from Physionet\n record_pn = wfdb.rdrecord('03700181', physical=False,\n pn_dir='mimicdb/037')\n\n # Test file writing. Multiple samples per frame and skew.\n # Have to read all the samples in the record, ignoring skew\n record_no_skew = wfdb.rdrecord('sample-data/03700181', physical=False,\n smooth_frames=False, ignore_skew=True)\n record_no_skew.wrsamp(expanded=True)\n # Read the written record\n record_write = wfdb.rdrecord('03700181', physical=False)\n\n assert np.array_equal(sig, sig_target)\n assert record.__eq__(record_pn)\n assert record.__eq__(record_write)\n\n def test_4c(self):\n \"\"\"\n Format 12, multi-samples per frame, skew, selected suration,\n selected channels, physical.\n\n Target file created with:\n rdsamp -r sample-data/03700181 -f 8 -t 128 -s 0 2 -P | cut -f 2- > record-4c\n \"\"\"\n sig, fields = wfdb.rdsamp('sample-data/03700181', channels=[0, 2],\n sampfrom=1000, sampto=16000)\n sig_round = np.round(sig, decimals=8)\n sig_target = np.genfromtxt('tests/target-output/record-4c')\n\n # Compare data streaming from Physionet\n sig_pn, fields_pn = wfdb.rdsamp('03700181', pn_dir='mimicdb/037',\n channels=[0, 2], sampfrom=1000,\n sampto=16000)\n\n # Test file writing. Multiple samples per frame and skew.\n # Have to read all the samples in the record, ignoring skew\n record_no_skew = wfdb.rdrecord('sample-data/03700181', physical=False,\n smooth_frames=False, ignore_skew=True)\n record_no_skew.wrsamp(expanded=True)\n # Read the written record\n writesig, writefields = wfdb.rdsamp('03700181', channels=[0, 2],\n sampfrom=1000, sampto=16000)\n\n assert np.array_equal(sig_round, sig_target)\n assert np.array_equal(sig, sig_pn) and fields == fields_pn\n assert np.array_equal(sig, writesig) and fields == writefields\n\n def test_4d(self):\n \"\"\"\n Format 16, multi-samples per frame, skew, read expanded signals\n\n Target file created with:\n rdsamp -r sample-data/test01_00s_skewframe -P -H | cut -f 2- > record-4d\n \"\"\"\n record = wfdb.rdrecord('sample-data/test01_00s_skewframe',\n smooth_frames=False)\n\n # Upsample the channels with lower samples/frame\n expandsig = np.zeros((7994, 3))\n expandsig[:,0] = np.repeat(record.e_p_signal[0][:-3],2)\n expandsig[:,1] = record.e_p_signal[1][:-6]\n expandsig[:,2] = np.repeat(record.e_p_signal[2][:-3],2)\n\n sig_round = np.round(expandsig, decimals=8)\n sig_target = np.genfromtxt('tests/target-output/record-4d')\n\n assert np.array_equal(sig_round, sig_target)\n\n def test_header_with_non_utf8(self):\n \"\"\"\n Ignores non-utf8 characters in the header part.\n \"\"\"\n record = wfdb.rdrecord(\"sample-data/test_generator_2\")\n sig_units_target = ['uV', 'uV', 'uV', 'uV', 'uV', 'uV', 'uV', 'uV', 'mV', 'mV', 'uV', 'mV']\n assert record.units.__eq__(sig_units_target)\n\n @classmethod\n def tearDownClass(cls):\n \"Clean up written files\"\n writefiles = ['03700181.dat','03700181.hea','100.atr','100.dat',\n '100.hea','1003.atr','100_3chan.dat','100_3chan.hea',\n '12726.anI','a103l.hea','a103l.mat','s0010_re.dat',\n 's0010_re.hea','s0010_re.xyz','test01_00s.dat',\n 'test01_00s.hea','test01_00s_skewframe.hea',\n 'n8_evoked_raw_95_F1_R9.dat', 'n8_evoked_raw_95_F1_R9.hea']\n\n for file in writefiles:\n if os.path.isfile(file):\n os.remove(file)\n\n\nclass TestMultiRecord(unittest.TestCase):\n \"\"\"\n Test read and write of multi segment WFDB records, including\n PhysioNet streaming.\n\n Target files created using the original WFDB Software Package\n version 10.5.24\n\n \"\"\"\n\n def test_multi_fixed_a(self):\n \"\"\"\n Multi-segment, fixed layout, read entire signal.\n\n Target file created with:\n rdsamp -r sample-data/multi-segment/fixed1/v102s -P | cut -f 2- > record-multi-fixed-a\n \"\"\"\n record = wfdb.rdrecord('sample-data/multi-segment/fixed1/v102s')\n sig_round = np.round(record.p_signal, decimals=8)\n sig_target = np.genfromtxt('tests/target-output/record-multi-fixed-a')\n\n np.testing.assert_equal(sig_round, sig_target)\n\n def test_multi_fixed_b(self):\n \"\"\"\n Multi-segment, fixed layout, selected duration, samples read\n from one segment.\n\n Target file created with:\n rdsamp -r sample-data/multi-segment/fixed1/v102s -t s75000 -P | cut -f 2- > record-multi-fixed-b\n \"\"\"\n record = wfdb.rdrecord('sample-data/multi-segment/fixed1/v102s',\n sampto=75000)\n sig_round = np.round(record.p_signal, decimals=8)\n sig_target = np.genfromtxt('tests/target-output/record-multi-fixed-b')\n\n np.testing.assert_equal(sig_round, sig_target)\n\n def test_multi_fixed_c(self):\n \"\"\"\n Multi-segment, fixed layout, selected duration and channels,\n samples read from multiple segments\n\n Target file created with:\n rdsamp -r sample-data/multi-segment/fixed1/v102s -f s70000 -t s80000 -s 1 0 3 -P | cut -f 2- > record-multi-fixed-c\n \"\"\"\n record = wfdb.rdrecord('sample-data/multi-segment/fixed1/v102s',\n sampfrom=70000, sampto=80000, channels=[1, 0, 3])\n sig_round = np.round(record.p_signal, decimals=8)\n sig_target = np.genfromtxt('tests/target-output/record-multi-fixed-c')\n\n # Option of selecting channels by name\n record_named = wfdb.rdrecord('sample-data/multi-segment/fixed1/v102s',\n sampfrom=70000, sampto=80000,\n channel_names=['V', 'II', 'RESP'])\n\n np.testing.assert_equal(sig_round, sig_target)\n assert record.__eq__(record_named)\n\n def test_multi_variable_a(self):\n \"\"\"\n Multi-segment, variable layout, selected duration, samples read\n from one segment only.\n\n Target file created with:\n rdsamp -r sample-data/multi-segment/s00001/s00001-2896-10-10-00-31 -f s14428365 -t s14428375 -P | cut -f 2- > record-multi-variable-a\n \"\"\"\n record = wfdb.rdrecord('sample-data/multi-segment/s00001/s00001-2896-10-10-00-31',\n sampfrom=14428365, sampto=14428375)\n sig_round = np.round(record.p_signal, decimals=8)\n sig_target = np.genfromtxt('tests/target-output/record-multi-variable-a')\n\n np.testing.assert_equal(sig_round, sig_target)\n\n def test_multi_variable_b(self):\n \"\"\"\n Multi-segment, variable layout, selected duration, samples read\n from several segments.\n\n Target file created with:\n rdsamp -r sample-data/multi-segment/s00001/s00001-2896-10-10-00-31 -f s14428364 -t s14428375 -P | cut -f 2- > record-multi-variable-b\n \"\"\"\n record = wfdb.rdrecord('sample-data/multi-segment/s00001/s00001-2896-10-10-00-31',\n sampfrom=14428364, sampto=14428375)\n sig_round = np.round(record.p_signal, decimals=8)\n sig_target = np.genfromtxt('tests/target-output/record-multi-variable-b')\n\n np.testing.assert_equal(sig_round, sig_target)\n\n def test_multi_variable_c(self):\n \"\"\"\n Multi-segment, variable layout, entire signal, physical\n\n The reference signal creation cannot be made with rdsamp\n directly because the WFDB c package (10.5.24) applies the single\n adcgain and baseline values from the layout specification\n header, which is undesired in multi-segment signals with\n different adcgain/baseline values across segments.\n\n Target file created with:\n ```\n for i in {01..18}\n do\n rdsamp -r sample-data/multi-segment/s25047/3234460_00$i -P | cut -f 2- >> record-multi-variable-c\n done\n ```\n\n Entire signal has 543240 samples.\n - 25740 length empty segment.\n - First 16 segments have same 2 channels, length 420000\n - Last 2 segments have same 3 channels, length 97500\n\n \"\"\"\n record = wfdb.rdrecord('sample-data/multi-segment/s25047/s25047-2704-05-04-10-44')\n sig_round = np.round(record.p_signal, decimals=8)\n\n sig_target_a = np.full((25740,3), np.nan)\n sig_target_b = np.concatenate(\n (np.genfromtxt('tests/target-output/record-multi-variable-c', skip_footer=97500),\n np.full((420000, 1), np.nan)), axis=1)\n sig_target_c = np.genfromtxt('tests/target-output/record-multi-variable-c',\n skip_header=420000)\n sig_target = np.concatenate((sig_target_a, sig_target_b, sig_target_c))\n\n np.testing.assert_equal(sig_round, sig_target)\n\n def test_multi_variable_d(self):\n \"\"\"\n Multi-segment, variable layout, selected duration, selected\n channels, digital. There are two channels: PLETH, and II. Their\n fmt, adc_gain, and baseline do not change between the segments.\n\n Target file created with:\n rdsamp -r sample-data/multi-segment/p000878/p000878-2137-10-26-16-57 -f s3550 -t s7500 -s 0 1 | cut -f 2- | perl -p -e 's/-32768/ -128/g;' > record-multi-variable-d\n\n \"\"\"\n record = wfdb.rdrecord('sample-data/multi-segment/p000878/p000878-2137-10-26-16-57',\n sampfrom=3550, sampto=7500, channels=[0, 1],\n physical=False)\n sig = record.d_signal\n\n # Compare data streaming from Physionet\n record_pn = wfdb.rdrecord('p000878-2137-10-26-16-57',\n pn_dir='mimic3wdb/matched/p00/p000878/',\n sampfrom=3550, sampto=7500, channels=[0, 1],\n physical=False)\n sig_target = np.genfromtxt('tests/target-output/record-multi-variable-d')\n\n # Option of selecting channels by name\n record_named = wfdb.rdrecord('sample-data/multi-segment/p000878/p000878-2137-10-26-16-57',\n sampfrom=3550, sampto=7500, physical=False,\n channel_names=['PLETH', 'II'])\n\n\n np.testing.assert_equal(sig, sig_target)\n assert record.__eq__(record_pn)\n assert record.__eq__(record_named)\n\n\nclass TestSignal(unittest.TestCase):\n \"\"\"\n For lower level signal tests\n\n \"\"\"\n def test_infer_sig_len(self):\n \"\"\"\n Infer the signal length of a record without the sig_len header\n Read two headers. The records should be the same.\n \"\"\"\n\n record = wfdb.rdrecord('sample-data/100')\n record_2 = wfdb.rdrecord('sample-data/100-no-len')\n record_2.record_name = '100'\n\n assert record_2.__eq__(record)\n\n\nclass TestDownload(unittest.TestCase):\n # Test that we can download records with no \"dat\" file\n # Regression test for https://github.com/MIT-LCP/wfdb-python/issues/118\n def test_dl_database_no_dat_file(self):\n wfdb.dl_database('afdb', './download-tests/', ['00735'])\n\n # Test that we can download records that *do* have a \"dat\" file.\n def test_dl_database_with_dat_file(self):\n wfdb.dl_database('afdb', './download-tests/', ['04015'])\n\n # Cleanup written files\n @classmethod\n def tearDownClass(self):\n if os.path.isdir('./download-tests/'):\n shutil.rmtree('./download-tests/')\n\nif __name__ == \"__main__\":\n unittest.main()\n print(\"Everything passed!\")\n" ]
[ [ "numpy.testing.assert_equal", "numpy.abs", "numpy.array_equal", "numpy.genfromtxt", "numpy.round", "numpy.full", "numpy.concatenate", "numpy.max", "numpy.nanmean", "numpy.repeat", "numpy.array", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
JoshJson/nummethod
[ "e9380f873a029205e2f843d4629e363a8f2f2f92" ]
[ "build/lib/NMisS/optimization.py" ]
[ "import pip\n\ntry:\n __import__('math')\nexcept ImportError:\n pip.main([ 'install', 'math' ])\n\ntry:\n __import__('pandas')\nexcept ImportError:\n pip.main([ 'install', 'pandas' ])\n\ntry:\n __import__('scipy')\nexcept ImportError:\n pip.main([ 'install', 'scipy' ])\n\ntry:\n __import__('matplotlib')\nexcept ImportError:\n pip.main([ 'install', 'matplotlib' ])\n\ntry:\n __import__('networkx')\nexcept ImportError:\n pip.main([ 'install', 'networkx' ])\n\ntry:\n __import__('numpy')\nexcept ImportError:\n pip.main([ 'install', 'numpy' ])\n\ntry:\n __import__('datetime')\nexcept ImportError:\n pip.main([ 'install', 'datetime' ])\n\n\nimport math\nimport numpy as np\nimport pandas as pd\nfrom scipy.stats import cauchy\nimport random\nimport matplotlib.pyplot as plt\nimport networkx as nx\nfrom numpy.random import choice as np_choice\n\nrandom_matrix = pd.DataFrame([[int(random.random() * 100) for _ in range(100)]\n for _ in range(100)])\nrandom_matrix.to_csv('random_matrix.csv', header=True, index=False)\nrandom_matrix = pd.read_csv('random_matrix.csv')\nspisok = random_matrix.values.tolist()\n\n\ndef simulated_annealing(dist, n, t0):\n \"\"\"\n Функция, в которой реализован алгоритм имитации отжига\n :param dist: list -- матрица весов\n :param n: int -- длина пути\n :param t0: int -- оптимальная температура\n \"\"\"\n\n def temperatura(k, t):\n \"\"\"\n Функция расчета оптимальной температуры для алгоритма имитации отжига\n :param k: int -- количество городов\n :param t: int -- температура\n :return t/k: float -- коэффициент,\n который нужен для вычисления следующей температуры\n \"\"\"\n return t / k\n\n way = [element for element in range(n)]\n rand0 = [element for element in range(1, n)]\n tk = 1\n m = 1\n s = 0\n x0 = 0.1\n x = [x0]\n t = t0\n s_list = []\n while t > tk:\n sp = 0\n t = temperatura(m, t0)\n x.append(random.uniform(0, 1))\n way_p = [way[j] for j in range(n)]\n rand = random.sample(rand0, 2)\n way_p[rand[0]], way_p[rand[1]] = way_p[rand[1]], way_p[rand[0]]\n for j in range(n - 1):\n sp = sp + dist[way_p[j]][way_p[j + 1]]\n sp = sp + dist[way_p[0]][way_p[-1]]\n if m == 1 or sp < s:\n s = sp\n way = [way_p[j] for j in range(n)]\n else:\n p = math.exp(-(sp - s) / t)\n if x[m - 1] < p:\n x[m - 1], x[m] = x[m], x[m - 1]\n s = sp\n way = [way_p[j] for j in range(n)]\n m += 1\n s_list.append(s)\n way.append(way[0])\n return way, s, m, s_list\n\n\ndef inlet():\n \"\"\"\n Функция ввода и выбора, каким путем мы хотим задать матрицу весов\n :return dist: list -- матрица весов\n \"\"\"\n\n def file():\n \"\"\"\n Функция, которая считывает файл csv и заполняет матрицу\n значениями, взятыми оттуда\n :return matrix_1: list -- матрица, считываемая с csv файла\n \"\"\"\n import csv\n matrix_1 = []\n name = input(\"Введите названи файла. Например, city.csv: \")\n with open(name) as file:\n reader = csv.reader(file, delimiter=';', quotechar=',')\n for row in reader:\n matrix_1.append(row)\n matrix_1 = [[float(matrix_1[i][j]) for j in range(len(matrix_1))]\n for i in range(len(matrix_1))]\n return matrix_1\n\n def random_dist(k):\n \"\"\"\n Функция, которая герерирует матрицу\n :param k: int -- количество городов\n :return d: list -- сгенерируемая матрица\n \"\"\"\n d = [[0 if elem == j else random.uniform(0, 10) for j in range(k)]\n for elem in range(k)]\n for elem in range(k):\n print(d[elem])\n return d\n\n def matr(m, n):\n \"\"\"\n Функция заполнения матрицы элементов.\n :param m: int -- количество строк в матрице\n :param n: int -- количество столбцов в матрице\n :return matrix: list -- заполненная элементами матрица\n \"\"\"\n\n def el_int(el):\n \"\"\"\n Функция на проверку типа введенного элемента в матрице (целое).\n Она возвращает True, если число целое, False - если нет.\n :param el: элемент матрицы\n \"\"\"\n try:\n int(el)\n return True\n except ValueError:\n return False\n\n def el_float(el):\n \"\"\"\n Функция на проверку типа введенного элемента в матрице (вещественное).\n Она возвращает True, если число вещественное, False - если нет.\n :param el: элемент матрицы\n \"\"\"\n try:\n float(el)\n return True\n except ValueError:\n return False\n\n def el_complex(el):\n \"\"\"\n Функция на проверку типа введенного элемента в матрице (комплексное).\n Она возвращает True, если число комплексное, False - если нет.\n :param el: элемент матрицы\n \"\"\"\n try:\n complex(el)\n return True\n except ValueError:\n return False\n\n def rev_complex(h):\n \"\"\"\n Функция преобразует комплексное число в нормальный вид, т. е. в вид a + i*b\n Пример: если вы ввели -j + 1, функция преобразует это в 1 - j\n :param h: str -- элемент матрицы\n :return h_rev: str -- преобразованный элемент\n \"\"\"\n h_rev = ''\n sep = 0\n if h[0] == '+' or h[0] == '-':\n for element_matr in range(1, len(h)):\n if h[element_matr] == '+' or h[element_matr] == '-':\n sep = element_matr\n break\n h_rev = h[sep:len(h)] + h[0:sep]\n else:\n for element_matr in range(0, len(h)):\n if h[element_matr] == '+' or h[element_matr] == '-':\n sep = element_matr\n break\n h_rev = h[sep:len(h)] + '+' + h[0:sep]\n return (h_rev)\n\n matrix = []\n print('Введите элементы строки матрицы через пробел:')\n for elem_matr in range(0, m):\n a = []\n row = input()\n row = row.split(' ')\n matrix.append(row)\n if len(row) != n:\n print('Некорректное количество элементов в строке матрицы.')\n exit()\n for j in range(0, n):\n el = matrix[elem_matr][j]\n k = 0\n while k == 0:\n if el_int(el) is True:\n matrix[elem_matr][j] = int(el)\n k = 1\n else:\n if el_float(el) is True:\n matrix[elem_matr][j] = float(el)\n k = 1\n else:\n if el_complex(el) is True:\n matrix[elem_matr][j] = complex(el)\n k = 1\n else:\n if el_complex(rev_complex(el)) is True:\n matrix[elem_matr][j] = complex(\n rev_complex(el))\n k = 1\n else:\n el = input('Неверный формат ввода. '\n 'Повторите ввод '\n 'элемента [{}, '\n '{}]: '.format(elem_matr, j))\n return (matrix)\n\n print(\"Ввод данных\")\n length = int(input(\"Введите: 1 - для считывания файла с устройства, \"\n \"2 - для случайной генерации, \"\n \"3 - для ввода матрицы с клавиатуры\\n\"))\n if length == 1:\n dist = file()\n if length == 2:\n k = int(input(\"Введите количество городов: \"))\n dist = random_dist(k)\n if length == 3:\n k = int(input(\"Введите количество городов: \"))\n dist = matr(k, k)\n return dist\n\n\nclass AntColony(object):\n \"\"\"\n Класс для нахождения оптимального пути алгоритмом Муравьиной колонии.\n \"\"\"\n\n def __init__(self, distances, n_ants, n_best, n_iterations,\n decay, alpha=1, beta=1):\n \"\"\"\n Функция для замены 0 на inf\n :param distances: list -- матрица весов\n :param n_ants: int -- количество муравьев\n :param n_best: int\n :param n_iterations: int -- количество итераций\n :param decay: float\n :param alpha: int -- значение ориентации феромонов\n :param beta: int -- значение ориентации на длину пути\n \"\"\"\n i = 0\n j = 0\n while i < len(distances):\n while j < len(distances):\n if distances[i][j] == 0:\n distances[i][j] = np.inf\n i += 1\n j += 1\n else:\n continue\n\n self.distances = np.array(distances)\n self.pheromone = np.ones(self.distances.shape) / len(self.distances)\n self.all_inds = range(len(self.distances))\n self.n_ants = n_ants\n self.n_best = n_best\n self.n_iterations = n_iterations\n self.decay = decay\n self.alpha = alpha\n self.beta = beta\n\n def run(self):\n \"\"\"\n Функция для нахождения лучшего пути и его стоимости\n :return all_time_shortest_path: tuple -- кортеж, в котором список\n корттежей лучшего пути и его стоимость\n \"\"\"\n shortest_path = None\n all_time_shortest_path = (\"placeholder\", np.inf)\n for elem in range(self.n_iterations):\n all_paths = self.gen_all_paths()\n self.spread_pheronome(all_paths, self.n_best,\n shortest_path=shortest_path)\n shortest_path = min(all_paths, key=lambda x: x[1])\n if shortest_path[1] < all_time_shortest_path[1]:\n all_time_shortest_path = shortest_path\n self.pheromone * self.decay\n return all_time_shortest_path\n\n def spread_pheronome(self, all_paths, n_best, shortest_path):\n \"\"\"\n Функция для нахождения оптимального значения феромона\n :param all_paths: list -- список кортежей пути и их стоимости\n :param n_best: int\n :param shortest_path: tuple -- кортеж, в котором список кортежей\n пути и их стоимость\n \"\"\"\n sorted_paths = sorted(all_paths, key=lambda x: x[1])\n for path, dist in sorted_paths[:n_best]:\n for move in path:\n self.pheromone[move] += 1.0 / self.distances[move]\n\n def gen_path_dist(self, path):\n \"\"\"\n Функция для расчета стоимости пути\n :param path: list -- список кортежей пути\n :return total_dist: numpy.float64 -- стоимость пути\n \"\"\"\n total_dist = 0\n for ele in path:\n total_dist += self.distances[ele]\n return total_dist\n\n def gen_all_paths(self):\n \"\"\"\n Функция, в которой в список добавляются кортежи путей и их стоимость\n :return all_path: list -- список кортежей пути и их стоимости\n \"\"\"\n all_paths = []\n for elem in range(self.n_ants):\n path = self.gen_path(0)\n all_paths.append((path, self.gen_path_dist(path)))\n return all_paths\n\n def gen_all_cost(self):\n \"\"\"\n Функция для расчета стоимости каждого пути\n :return cost: list -- список стоимости каждого пути\n \"\"\"\n cost = []\n for elem in range(self.n_ants):\n path = self.gen_path(0)\n cost_1 = self.gen_path_dist(path)\n cost.append(cost_1.tolist())\n return cost\n\n def gen_path(self, start):\n \"\"\"\n Функция для расчета пути\n :param start: int -- начальная вершина\n :return path: list -- список кортежей пути\n \"\"\"\n\n path = []\n visited = set()\n visited.add(start)\n prev = start\n for elem in range(len(self.distances) - 1):\n move = self.pick_move(self.pheromone[prev], self.distances[prev],\n visited)\n path.append((prev, move))\n prev = move\n visited.add(move)\n path.append((prev, start))\n return path\n\n def pick_move(self, pheromone, dist, visited):\n \"\"\"\n Функция для нахождения вершин, в которых путь оптимален\n :param pheromone: numpy.ndarray -- феромон, который необходим для\n поиска лучшего пути\n :param dist: list -- матрица весов\n :param visited: set -- множество посещенных вершин\n :return move: numpy.int64 -- вершины пути\n \"\"\"\n pheromone = np.copy(pheromone)\n pheromone[list(visited)] = 0\n row = pheromone ** self.alpha * ((1.0 / dist) ** self.beta)\n norm_row = row / row.sum()\n move = np_choice(self.all_inds, 1, p=norm_row)[0]\n return move\n\n\ndef route_conversion(lst):\n \"\"\"\n Функция для получения лучшего пути в формате 0-2-1-0\n :param lst: list -- список кортежей лучшего пути\n :return '-'.join(result): numpy.float64 -- лучший путь в формате 0-1-2-0\n \"\"\"\n result = []\n for elem in range(len(lst)):\n if elem == 0:\n result.append('-'.join([str(lst[elem][0]), str(lst[elem][1])]))\n else:\n result.append(str(lst[elem][1]))\n return '-'.join(result)\n\n\ndef route_con(lst):\n \"\"\"\n Функция для получения списка лучшего пути\n :param lst: list -- список кортежей лучшего пути\n :return result: list -- список лучшего пути\n \"\"\"\n result = []\n for elem in range(len(lst)):\n if elem == 0:\n result.append(lst[elem][0])\n result.append(lst[elem][1])\n else:\n result.append(lst[elem][1])\n return result\n\n\ndef graph(n, way, dist):\n \"\"\"\n Функция для построения графа алгоритма Имитации отжига\n :param n: int -- длина пути\n :param way: list -- полученный самый оптимальный путь\n :param dist: list -- матрица весов\n \"\"\"\n rand = [i for i in range(n)]\n g = nx.Graph()\n g.add_nodes_from(rand)\n for elem in range(n):\n for j in range(elem + 1, n):\n if dist[elem][j] != 0:\n g.add_edge(rand[elem], rand[j])\n comb = []\n for elem in range(n):\n if rand.index(way[elem]) > rand.index(way[elem + 1]):\n comb.append(tuple([way[elem + 1], way[elem]]))\n else:\n comb.append(tuple([way[elem], way[elem + 1]]))\n edge_colors = [\"red\" if elem in comb else \"blue\" for elem in g.edges()]\n plt.figure(figsize=(10, 10))\n pos = nx.spring_layout(g)\n nx.draw_networkx(g, pos, edge_color=edge_colors)\n plt.title(\"Алгоритм Отжига\")\n plt.show()\n\n\ndef graph_1(n, way, dist):\n \"\"\"\n Функция для построения графа алгоритма Муравьиной колонии\n :param n: int -- длина пути\n :param way: list -- полученный самый оптимальный путь\n :param dist: list -- матрица весов\n \"\"\"\n rand = [_ for _ in range(n)]\n g = nx.Graph()\n g.add_nodes_from(rand)\n for elem in range(n):\n for j in range(elem + 1, n):\n if dist[elem][j] != 0:\n g.add_edge(rand[elem], rand[j])\n comb = []\n for elem in range(n):\n if rand.index(way[elem]) > rand.index(way[elem + 1]):\n comb.append(tuple([way[elem + 1], way[elem]]))\n else:\n comb.append(tuple([way[elem], way[elem + 1]]))\n edge_colors = [\"red\" if elem in comb else \"blue\" for elem in g.edges()]\n plt.figure(figsize=(10, 10))\n pos = nx.spring_layout(g)\n nx.draw_networkx(g, pos, edge_color=edge_colors)\n plt.title(\"Алгоритм Муравьиной Колонии\")\n plt.show()\n\ndef runoptimisationscript():\n \"\"\"\n Функция для запуска итерационного цикла (показа работы самих программ оптимищации)\n :return:\n \"\"\"\n distant = inlet()\n len_m = len(distant)\n temper = len_m ** 2\n w, s, q, s_list = simulated_annealing(distant, len_m, temper)\n print(\"Длина маршрута: \", s)\n print(\"Маршрут алгоритма имитации отжига: \", w)\n print(\"Количество итераций в маршруте имитации отжига: \", q)\n graph(len_m, w, distant)\n\n distance = distant\n ant_colony = AntColony(distance, len(distance) * 2, 5, len(distance) * 4,\n 0.95, alpha=1, beta=1)\n shortest_path = ant_colony.run()\n c = ant_colony.gen_all_cost()\n route = shortest_path[0]\n len_m = len(distance)\n results = route_con(shortest_path[0])\n print(\"Полученный путь алгоритмом муравьиной колонии:\",\n route_conversion(shortest_path[0]))\n print(\"Стоимость пути муравьиной колонии:\", shortest_path[1])\n graph_1(len_m, results, distance)\n\n plt.subplot(2, 1, 1)\n plt.plot(s_list)\n plt.title('Алгоритм отжига')\n plt.xlabel('Номер итерации')\n plt.ylabel('Длина маршрута')\n plt.subplot(2, 1, 2)\n plt.plot(c)\n plt.title('Алгоритм Муравьиной колонии')\n plt.xlabel('Номер итерации')\n plt.ylabel('Длина маршрута')\n plt.show()" ]
[ [ "pandas.read_csv", "matplotlib.pyplot.title", "numpy.random.choice", "numpy.ones", "matplotlib.pyplot.plot", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.subplot", "numpy.copy", "matplotlib.pyplot.xlabel", "numpy.array", "matplotlib.pyplot.show", "matplotlib.pyplot.figure" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] } ]
armand33/recommender_system
[ "6b133fa903150a5e022d44123935b78af8436ac9" ]
[ "src/utils.py" ]
[ "# !/usr/bin/env python\n\nimport numpy as np\nimport scipy.sparse as sp\n\n\ndef split_data(ratings, min_num_ratings, p_test=0.1, verbose=False, seed=988):\n \"\"\"\n Splits the data set (ratings) to training data and test data\n :param ratings: initial data set (sparse matrix of dimensions n items and p users)\n :param min_num_ratings: all users and items must have at least min_num_ratings per user and per item to be kept\n :param p_test: proportion of the data dedicated to test\n :param verbose: True if user wants to print details of computation\n :param seed: random seed\n :return: - valid_ratings (initial data set where some items and users where dropped)\n - train train data (same shape as valid_ratings but with 1-p_test non_zero values)\n - test data (same shape as valid_ratings but with p_test non zero values\n \"\"\"\n\n num_items_per_user = np.array((ratings != 0).sum(axis=0)).flatten()\n num_users_per_item = np.array((ratings != 0).sum(axis=1).T).flatten()\n\n # set seed\n np.random.seed(seed)\n\n # select user and item based on the condition.\n valid_users = np.where(num_items_per_user >= min_num_ratings)[0]\n valid_items = np.where(num_users_per_item >= min_num_ratings)[0]\n valid_ratings = ratings[valid_items, :][:, valid_users]\n\n # define the sparse matrix that will contain train and test data\n train = sp.lil_matrix(valid_ratings.shape)\n test = sp.lil_matrix(valid_ratings.shape)\n\n # get the index of non zero elements of the valid_ratings\n non_zero_item, non_zero_users = valid_ratings.nonzero()\n\n # for each item, select p_test percent of users to put in test and put the rest in train\n for item in set(non_zero_item):\n\n _, indexes = valid_ratings[item].nonzero()\n test_ind = np.random.choice(indexes, size=int(len(indexes) * p_test))\n train_ind = list(set(indexes) - set(test_ind))\n\n train[item, train_ind] = valid_ratings[item, train_ind]\n test[item, test_ind] = valid_ratings[item, test_ind]\n\n if verbose:\n print('Shape of original ratings : {}'.format(ratings.shape))\n print('Shape of valid ratings (and of train and test data) : {}'.format(valid_ratings.shape))\n print(\"Total number of nonzero elements in original data : {v}\".format(v=ratings.nnz))\n print(\"Total number of nonzero elements in train data : {v}\".format(v=train.nnz))\n print(\"Total number of nonzero elements in test data : {v}\".format(v=test.nnz))\n return valid_ratings, train, test\n\n\ndef compute_error(data, user_features, item_features, nz):\n \"\"\"\n Returns the error of the prediction using matrix factorization data = transpose(user_features) x item_features.\n The error is only computed on non zero elements.\n :param data: sparse matrix of shape (num_items, num_users)\n :param user_features: matrix of shape (num_features, num_users)\n :param item_features: matrix of shape (num_features, num_items)\n :param nz: list of non zero entries of matrix data\n :return: the RMSE corresponding to the approximation of data by transpose(user_features) x item_features\n \"\"\"\n mse = 0\n for row, col in nz:\n current_item = item_features[:, row]\n current_user = user_features[:, col]\n prediction = current_user.T.dot(current_item)\n prediction = min(5, prediction)\n prediction = max(1, prediction)\n mse += (data[row, col] - prediction) ** 2\n return np.sqrt(1.0 * mse / len(nz))\n\n\ndef init_mf(train, num_features):\n \"\"\"\n Initialize the empty matrices for matrix factorization.\n As indicated in lab 10, the item_features matrix is initialized by assigning the average rating for that movie\n as the \ffirst row, and small random numbers for the remaining entries.\n :param train: training data set (sparse matrix of size nxp, n items and p users)\n :param num_features: number of latent features wanted in the factorization\n :return: two randomly initialized matrices of shapes (num_features, num_users) and (num_features, num_items)\n \"\"\"\n num_items, num_users = train.shape\n\n user_features = np.random.rand(num_features, num_users) / num_users\n user_features[0, :] = np.ones((num_users,))\n\n item_features = np.random.rand(num_features, num_items) / num_items\n item_features[0, :] = sp.csr_matrix.mean(train, axis=1).reshape(num_items, )\n\n return user_features, item_features\n\n\ndef build_prediction_factorization(item_features, user_features, test):\n \"\"\"\n Build the prediction using matrix factorization\n :param item_features:\n :param user_features:\n :param test:\n :return:\n \"\"\"\n nnz_row, nnz_col = test.nonzero()\n nnz_test = list(zip(nnz_row, nnz_col))\n for row, col in nnz_test:\n current_item = item_features[:, row]\n current_user = user_features[:, col]\n prediction = current_user.T.dot(current_item)\n prediction = min(5, prediction)\n prediction = max(1, prediction)\n test[row, col] = prediction\n\n return test\n\n\ndef write_matrix_to_file(matrix, path):\n with open(path, 'w') as output:\n output.write('Id,Prediction\\n')\n nnz_row, nnz_col = matrix.nonzero()\n nnz = list(zip(nnz_row, nnz_col))\n for row, col in nnz:\n output.write('r{}_c{},{}\\n'.format(row + 1, col + 1, matrix[row, col]))\n\n\ndef get_indices(matrix):\n \"\"\"\n :param matrix: sparse matrix of any shape\n :return: list of (i, j) such as matrix[i, j] is non zero\n \"\"\"\n nnz_row, nnz_col = matrix.nonzero()\n nnz = list(zip(nnz_row, nnz_col))\n return nnz\n" ]
[ [ "numpy.random.seed", "numpy.ones", "numpy.random.rand", "numpy.where", "scipy.sparse.csr_matrix.mean", "scipy.sparse.lil_matrix" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "1.7", "1.0", "0.10", "1.2", "0.14", "0.19", "1.5", "0.12", "0.17", "0.13", "1.6", "1.4", "1.9", "1.3", "1.10", "0.15", "0.18", "0.16", "1.8" ], "tensorflow": [] } ]
eyalk11/investpy
[ "621e272687d341da0a8a173fe55628a0bc2fde76" ]
[ "investpy/etfs.py" ]
[ "# Copyright 2018-2021 Alvaro Bartolome, alvarobartt @ GitHub\n# See LICENSE for details.\n\nfrom datetime import datetime, date, timedelta\nimport pytz\n\nimport json\nfrom random import randint\n\nimport warnings\n\nimport pandas as pd\nimport pkg_resources\nimport requests\nfrom unidecode import unidecode\nfrom lxml.html import fromstring\n\nfrom .utils.extra import random_user_agent\nfrom .utils.data import Data\n\nfrom .data.etfs_data import etfs_as_df, etfs_as_list, etfs_as_dict\nfrom .data.etfs_data import etf_countries_as_list\n\n\ndef get_etfs(country=None):\n \"\"\"\n This function retrieves all the available etfs indexed on Investing.com, already stored on `etfs.csv`.\n This function also allows the users to specify which country do they want to retrieve data from or if they\n want to retrieve it from every listed country; so on, all the indexed etfs will be returned.\n\n Args:\n country (:obj:`str`, optional): name of the country to retrieve all its available etfs from.\n\n Returns:\n :obj:`pandas.DataFrame` - etfs:\n The resulting :obj:`pandas.DataFrame` contains all the etfs basic information stored on `etfs.csv`, since it\n was previously retrieved by investpy. Unless the country is specified, all the available etfs indexed on \n Investing.com is returned, but if it is specified, just the etfs from that country are returned.\n\n In the case that the file reading of `etfs.csv` or the retrieval process from Investing.com was\n successfully completed, the resulting :obj:`pandas.DataFrame` will look like::\n\n country | name | full_name | symbol | isin | asset_class | currency | stock_exchange | def_stock_exchange \n --------|------|-----------|--------|------|-------------|----------|----------------|--------------------\n xxxxxxx | xxxx | xxxxxxxxx | xxxxxx | xxxx | xxxxxxxxxxx | xxxxxxxx | xxxxxxxxxxxxxx | xxxxxxxxxxxxxxxxxx \n\n Raises:\n ValueError: raised when any of the input arguments is not valid.\n FileNotFoundError: raised when `etfs.csv` file was not found.\n IOError: raised when `etfs.csv` file is missing.\n \n \"\"\"\n\n return etfs_as_df(country=country)\n\n\ndef get_etfs_list(country=None):\n \"\"\"\n This function retrieves all the available etfs indexed on Investing.com, already stored on `etfs.csv`.\n This function also allows the users to specify which country do they want to retrieve data from or if they\n want to retrieve it from every listed country; so on, a listing of etfs will be returned. This function\n helps the user to get to know which etfs are available on Investing.com.\n\n Args:\n country (:obj:`str`, optional): name of the country to retrieve all its available etfs from.\n\n Returns:\n :obj:`list` - etfs_list:\n The resulting :obj:`list` contains the retrieved data from the `etfs.csv` file, which is\n a listing of the names of the etfs listed on Investing.com, which is the input for data\n retrieval functions as the name of the etf to retrieve data from needs to be specified.\n\n In case the listing was successfully retrieved, the :obj:`list` will look like::\n\n etfs_list = [\n 'Betashares U.S. Equities Strong Bear Currency Hedg',\n 'Betashares Active Australian Hybrids',\n 'Australian High Interest Cash', ...\n ]\n\n Raises:\n ValueError: raised when any of the input arguments is not valid.\n FileNotFoundError: raised when `etfs.csv` file was not found.\n IOError: raised when `etfs.csv` file is missing.\n \n \"\"\"\n\n return etfs_as_list(country=country)\n\n\ndef get_etfs_dict(country=None, columns=None, as_json=False):\n \"\"\"\n This function retrieves all the available etfs indexed on Investing.com, already stored on `etfs.csv`.\n This function also allows the user to specify which country do they want to retrieve data from,\n or from every listed country; the columns which the user wants to be included on the resulting\n :obj:`dict`; and the output of the function will either be a :obj:`dict` or a :obj:`json`.\n\n Args:\n country (:obj:`str`, optional): name of the country to retrieve all its available etfs from.\n columns (:obj:`list`, optional):\n names of the columns of the etf data to retrieve <country, name, full_name, symbol, isin, asset_class, \n currency, stock_exchange>\n as_json (:obj:`bool`, optional):\n value to determine the format of the output data which can either be a :obj:`dict` or a :obj:`json`.\n\n Returns:\n :obj:`dict` or :obj:`json` - etfs_dict:\n The resulting :obj:`dict` contains the retrieved data if found, if not, the corresponding\n fields are filled with `None` values.\n\n In case the information was successfully retrieved, the :obj:`dict` will look like::\n\n etfs_dict = {\n \"country\": country,\n \"name\": name,\n \"full_name\": full_name,\n \"symbol\": symbol,\n \"isin\": isin,\n \"asset_class\": asset_class,\n \"currency\": currency,\n \"stock_exchange\": stock_exchange,\n \"def_stock_exchange\": def_stock_exchange\n }\n\n Raises:\n ValueError: raised when any of the input arguments is not valid.\n FileNotFoundError: raised when `etfs.csv` file was not found.\n IOError: raised when `etfs.csv` file is missing.\n \n \"\"\"\n\n return etfs_as_dict(country=country, columns=columns, as_json=as_json)\n\n\ndef get_etf_countries():\n \"\"\"\n This function retrieves all the available countries to retrieve etfs from, as the listed\n countries are the ones indexed on Investing.com. The purpose of this function is to list\n the countries which have available etfs according to Investing.com data, so to ease the\n etf retrieval process of a particular country.\n\n Returns:\n :obj:`list` - countries:\n The resulting :obj:`list` contains all the countries listed on Investing.com with\n etfs available to retrieve data from.\n\n In the case that the file reading of `etf_countries.csv` which contains the names and codes of the countries\n with etfs was successfully completed, the resulting :obj:`list` will look like::\n\n countries = ['australia', 'austria', 'belgium', 'brazil', ...]\n\n Raises:\n FileNotFoundError: raised when `etf_countries.csv` file was not found.\n \n \"\"\"\n\n return etf_countries_as_list()\n\n\ndef get_etf_recent_data(etf, country, stock_exchange=None, as_json=False, order='ascending', interval='Daily'):\n \"\"\"\n This function retrieves recent historical data from the introduced `etf` from Investing\n via Web Scraping. The resulting data can it either be stored in a :obj:`pandas.DataFrame` or in a\n :obj:`json` file, with `ascending` or `descending` order.\n\n Args:\n etf (:obj:`str`): name of the etf to retrieve recent historical data from.\n country (:obj:`str`): name of the country from where the etf is.\n as_json (:obj:`bool`, optional):\n optional argument to determine the format of the output data (:obj:`pandas.DataFrame` or :obj:`json`).\n order (:obj:`str`, optional):\n optional argument to define the order of the retrieved data (`ascending`, `asc` or `descending`, `desc`).\n interval (:obj:`str`, optional):\n value to define the historical data interval to retrieve, by default `Daily`, but it can also be `Weekly` or `Monthly`.\n\n Returns:\n :obj:`pandas.DataFrame` or :obj:`json`:\n The function returns either a :obj:`pandas.DataFrame` or a :obj:`json` file containing the retrieved\n recent data from the specified etf via argument. The dataset contains the open, high, low and close\n values for the selected etf on market days.\n\n The returned data is case we use default arguments will look like::\n\n Date || Open | High | Low | Close | Volume | Currency | Exchange\n -----||------|------|-----|-------|--------|----------|---------\n xxxx || xxxx | xxxx | xxx | xxxxx | xxxxxx | xxxxxxxx | xxxxxxxx\n\n but if we define `as_json=True`, then the output will be::\n\n {\n name: name,\n recent: [\n {\n date: dd/mm/yyyy,\n open: x,\n high: x,\n low: x,\n close: x,\n volume: x,\n currency: x,\n exchange: x\n },\n ...\n ]\n }\n\n Raises:\n ValueError: raised whenever any of the arguments is not valid or errored.\n IOError: raised if etfs object/file not found or unable to retrieve.\n RuntimeError:raised if the introduced etf does not match any of the indexed ones.\n ConnectionError: raised if GET requests does not return 200 status code.\n IndexError: raised if etf information was unavailable or not found.\n\n Examples:\n >>> data = investpy.get_etf_recent_data(etf='bbva accion dj eurostoxx 50', country='spain')\n >>> data.head()\n Open High Low Close Volume Currency Exchange\n Date \n 2020-04-09 28.890 29.155 28.40 28.945 20651 EUR Madrid\n 2020-04-14 29.345 30.235 28.94 29.280 14709 EUR Madrid\n 2020-04-15 29.125 29.125 28.11 28.130 14344 EUR Madrid\n 2020-04-16 28.505 28.590 28.08 28.225 17662 EUR Madrid\n 2020-04-17 29.000 29.325 28.80 28.895 19578 EUR Madrid\n\n \"\"\"\n\n if not etf:\n raise ValueError(\"ERR#0031: etf parameter is mandatory and must be a valid etf name.\")\n\n if not isinstance(etf, str):\n raise ValueError(\"ERR#0030: etf argument needs to be a str.\")\n\n if country is None:\n raise ValueError(\"ERR#0039: country can not be None, it should be a str.\")\n\n if country is not None and not isinstance(country, str):\n raise ValueError(\"ERR#0025: specified country value not valid.\")\n\n if stock_exchange is not None and not isinstance(stock_exchange, str):\n raise ValueError(\"ERR#0125: specified stock_exchange value is not valid, it should be a str.\")\n\n if not isinstance(as_json, bool):\n raise ValueError(\"ERR#0002: as_json argument can just be True or False, bool type.\")\n\n if order not in ['ascending', 'asc', 'descending', 'desc']:\n raise ValueError(\"ERR#0003: order argument can just be ascending (asc) or descending (desc), str type.\")\n\n if not interval:\n raise ValueError(\"ERR#0073: interval value should be a str type and it can just be either 'Daily', 'Weekly' or 'Monthly'.\")\n\n if not isinstance(interval, str):\n raise ValueError(\"ERR#0073: interval value should be a str type and it can just be either 'Daily', 'Weekly' or 'Monthly'.\")\n\n interval = interval.lower()\n\n if interval not in ['daily', 'weekly', 'monthly']:\n raise ValueError(\"ERR#0073: interval value should be a str type and it can just be either 'Daily', 'Weekly' or 'Monthly'.\")\n\n resource_package = 'investpy'\n resource_path = '/'.join(('resources', 'etfs.csv'))\n if pkg_resources.resource_exists(resource_package, resource_path):\n etfs = pd.read_csv(pkg_resources.resource_filename(resource_package, resource_path), keep_default_na=False)\n else:\n raise FileNotFoundError(\"ERR#0058: etfs file not found or errored.\")\n\n if etfs is None:\n raise IOError(\"ERR#0009: etfs object not found or unable to retrieve.\")\n\n country = unidecode(country.strip().lower())\n\n if country not in get_etf_countries():\n raise RuntimeError(\"ERR#0034: country \" + country + \" not found, check if it is correct.\")\n\n etf = unidecode(etf.strip().lower())\n\n def_exchange = etfs.loc[((etfs['name'].apply(unidecode).str.lower() == etf) & (etfs['def_stock_exchange'] == True)).idxmax()]\n \n etfs = etfs[etfs['country'].str.lower() == country]\n\n if etf not in list(etfs['name'].apply(unidecode).str.lower()):\n raise RuntimeError(\"ERR#0019: etf \" + etf + \" not found, check if it is correct.\")\n\n etfs = etfs[etfs['name'].apply(unidecode).str.lower() == etf]\n\n if def_exchange['country'] != country:\n warnings.warn(\n 'Selected country does not contain the default stock exchange of the introduced ETF. ' + \\\n 'Default country is: \\\"' + def_exchange['country'] + '\\\" and default stock_exchange: \\\"' + \\\n def_exchange['stock_exchange'] + '\\\".', \n Warning\n )\n \n if stock_exchange:\n if stock_exchange.lower() not in etfs['stock_exchange'].str.lower():\n raise ValueError(\"ERR#0126: introduced stock_exchange value does not exists, leave this parameter to None to use default stock_exchange.\")\n \n etf_exchange = etfs.loc[(etfs['stock_exchange'].str.lower() == stock_exchange.lower()).idxmax(), 'stock_exchange']\n else:\n found_etfs = etfs[etfs['name'].apply(unidecode).str.lower() == etf]\n \n if len(found_etfs) > 1:\n warnings.warn(\n 'Note that the displayed information can differ depending on the stock exchange. Available stock_exchange' + \\\n ' values for \\\"' + country + '\\\" are: \\\"' + '\\\", \\\"'.join(found_etfs['stock_exchange']) + '\\\".',\n Warning\n )\n\n del found_etfs\n\n etf_exchange = etfs.loc[(etfs['name'].apply(unidecode).str.lower() == etf).idxmax(), 'stock_exchange']\n else:\n if stock_exchange:\n if stock_exchange.lower() not in etfs['stock_exchange'].str.lower():\n raise ValueError(\"ERR#0126: introduced stock_exchange value does not exists, leave this parameter to None to use default stock_exchange.\")\n\n if def_exchange['stock_exchange'].lower() != stock_exchange.lower():\n warnings.warn(\n 'Selected stock_exchange is not the default one of the introduced ETF. ' + \\\n 'Default country is: \\\"' + def_exchange['country'] + '\\\" and default stock_exchange: \\\"' + \\\n def_exchange['stock_exchange'].lower() + '\\\".', \n Warning\n )\n\n etf_exchange = etfs.loc[(etfs['stock_exchange'].str.lower() == stock_exchange.lower()).idxmax(), 'stock_exchange']\n else:\n etf_exchange = def_exchange['stock_exchange']\n\n symbol = etfs.loc[((etfs['name'].apply(unidecode).str.lower() == etf) & (etfs['stock_exchange'].str.lower() == etf_exchange.lower())).idxmax(), 'symbol']\n id_ = etfs.loc[((etfs['name'].apply(unidecode).str.lower() == etf) & (etfs['stock_exchange'].str.lower() == etf_exchange.lower())).idxmax(), 'id']\n name = etfs.loc[((etfs['name'].apply(unidecode).str.lower() == etf) & (etfs['stock_exchange'].str.lower() == etf_exchange.lower())).idxmax(), 'name']\n\n etf_currency = etfs.loc[((etfs['name'].apply(unidecode).str.lower() == etf) & (etfs['stock_exchange'].str.lower() == etf_exchange.lower())).idxmax(), 'currency']\n\n header = symbol + ' Historical Data'\n\n head = {\n \"User-Agent\": random_user_agent(),\n \"X-Requested-With\": \"XMLHttpRequest\",\n \"Accept\": \"text/html\",\n \"Accept-Encoding\": \"gzip, deflate\",\n \"Connection\": \"keep-alive\",\n }\n\n params = {\n \"curr_id\": id_,\n \"smlID\": str(randint(1000000, 99999999)),\n \"header\": header,\n \"interval_sec\": interval.capitalize(),\n \"sort_col\": \"date\",\n \"sort_ord\": \"DESC\",\n \"action\": \"historical_data\"\n }\n\n url = \"https://www.investing.com/instruments/HistoricalDataAjax\"\n\n req = requests.post(url, headers=head, data=params)\n\n if req.status_code != 200:\n raise ConnectionError(\"ERR#0015: error \" + str(req.status_code) + \", try again later.\")\n\n root_ = fromstring(req.text)\n path_ = root_.xpath(\".//table[@id='curr_table']/tbody/tr\")\n \n result = list()\n\n if path_:\n for elements_ in path_:\n if elements_.xpath(\".//td\")[0].text_content() == 'No results found':\n raise IndexError(\"ERR#0010: etf information unavailable or not found.\")\n \n info = []\n \n for nested_ in elements_.xpath(\".//td\"):\n info.append(nested_.get('data-real-value'))\n\n etf_date = datetime.strptime(str(datetime.fromtimestamp(int(info[0]), tz=pytz.timezone('GMT')).date()), '%Y-%m-%d')\n\n etf_close = float(info[1].replace(',', ''))\n etf_open = float(info[2].replace(',', ''))\n etf_high = float(info[3].replace(',', ''))\n etf_low = float(info[4].replace(',', ''))\n\n etf_volume = int(info[5])\n\n result.insert(len(result),\n Data(etf_date, etf_open, etf_high, etf_low, etf_close, etf_volume, etf_currency, etf_exchange))\n\n if order in ['ascending', 'asc']:\n result = result[::-1]\n elif order in ['descending', 'desc']:\n result = result\n\n if as_json is True:\n json_ = {\n 'name': name,\n 'recent':\n [value.etf_as_json() for value in result]\n }\n\n return json.dumps(json_, sort_keys=False)\n elif as_json is False:\n df = pd.DataFrame.from_records([value.etf_to_dict() for value in result])\n df.set_index('Date', inplace=True)\n\n return df\n else:\n raise RuntimeError(\"ERR#0004: data retrieval error while scraping.\")\n\n\ndef get_etf_historical_data(etf, country, from_date, to_date, stock_exchange=None, as_json=False, order='ascending', interval='Daily',id=None):\n \"\"\"\n This function retrieves historical data from the introduced `etf` from Investing.com via Web Scraping on the \n introduced date range. The resulting data can it either be stored in a :obj:`pandas.DataFrame` or in a \n :obj:`json` object with `ascending` or `descending` order.\n\n Args:\n etf (:obj:`str`): name of the etf to retrieve recent historical data from.\n country (:obj:`str`): name of the country from where the etf is.\n from_date (:obj:`str`): date as `str` formatted as `dd/mm/yyyy`, from where data is going to be retrieved.\n to_date (:obj:`str`): date as `str` formatted as `dd/mm/yyyy`, until where data is going to be retrieved.\n as_json (:obj:`bool`, optional):\n to determine the format of the output data (:obj:`pandas.DataFrame` or :obj:`json`).\n order (:obj:`str`, optional):\n optional argument to define the order of the retrieved data (`ascending`, `asc` or `descending`, `desc`).\n interval (:obj:`str`, optional):\n value to define the historical data interval to retrieve, by default `Daily`, but it can also be `Weekly` or `Monthly`.\n\n Returns:\n :obj:`pandas.DataFrame` or :obj:`json`:\n The function returns either a :obj:`pandas.DataFrame` or a :obj:`json` file containing the retrieved\n recent data from the specified etf via argument. The dataset contains the open, high, low and close\n values for the selected etf on market days.\n\n The returned data is case we use default arguments will look like::\n\n Date || Open | High | Low | Close | Volume | Currency | Exchange\n -----||------|------|-----|-------|--------|----------|---------\n xxxx || xxxx | xxxx | xxx | xxxxx | xxxxxx | xxxxxxxx | xxxxxxxx\n\n but if we define `as_json=True`, then the output will be::\n\n {\n name: name,\n historical: [\n {\n date: dd/mm/yyyy,\n open: x,\n high: x,\n low: x,\n close: x,\n volume: x,\n currency: x,\n exchange: x\n },\n ...\n ]\n }\n\n Raises:\n ValueError: raised whenever any of the arguments is not valid or errored.\n IOError: raised if etfs object/file not found or unable to retrieve.\n RuntimeError:raised if the introduced etf does not match any of the indexed ones.\n ConnectionError: raised if GET requests does not return 200 status code.\n IndexError: raised if etf information was unavailable or not found.\n\n Examples:\n >>> data = investpy.get_etf_historical_data(etf='bbva accion dj eurostoxx 50', country='spain', from_date='01/01/2010', to_date='01/01/2019')\n >>> data.head()\n Open High Low Close Volume Currency Exchange\n Date \n 2011-12-07 23.70 23.70 23.70 23.62 2000 EUR Madrid\n 2011-12-08 23.53 23.60 23.15 23.04 599 EUR Madrid\n 2011-12-09 23.36 23.60 23.36 23.62 2379 EUR Madrid\n 2011-12-12 23.15 23.26 23.00 22.88 10695 EUR Madrid\n 2011-12-13 22.88 22.88 22.88 22.80 15 EUR Madrid\n\n \"\"\"\n if not id:\n interval = stupid_checks(as_json, country, etf, from_date, interval, order, stock_exchange, to_date)\n\n if type(to_date)==str:\n end_date = datetime.strptime(to_date, '%d/%m/%Y')\n start_date = datetime.strptime(from_date, '%d/%m/%Y')\n else:\n end_date= to_date\n start_date = from_date\n\n\n\n if start_date >= end_date:\n raise ValueError(\"ERR#0032: to_date should be greater than from_date, both formatted as 'dd/mm/yyyy'.\")\n\n date_interval = {\n 'intervals': [],\n }\n\n flag = True\n\n while flag is True:\n diff = end_date.year - start_date.year\n\n if diff > 19:\n obj = {\n 'start': start_date.strftime('%m/%d/%Y'),\n 'end': start_date.replace(year=start_date.year + 19).strftime('%m/%d/%Y'),\n }\n\n date_interval['intervals'].append(obj)\n\n start_date = start_date.replace(year=start_date.year + 19) + timedelta(days=1)\n else:\n obj = {\n 'start': start_date.strftime('%m/%d/%Y'),\n 'end': end_date.strftime('%m/%d/%Y'),\n }\n\n date_interval['intervals'].append(obj)\n\n flag = False\n\n interval_limit = len(date_interval['intervals'])\n interval_counter = 0\n\n data_flag = False\n\n resource_package = 'investpy'\n resource_path = '/'.join(('resources', 'etfs.csv'))\n if pkg_resources.resource_exists(resource_package, resource_path):\n etfs = pd.read_csv(pkg_resources.resource_filename(resource_package, resource_path), keep_default_na=False)\n else:\n raise FileNotFoundError(\"ERR#0058: etfs file not found or errored.\")\n\n if not id:\n etf_currency, etf_exchange, id_, name, symbol = dostuff(etf, etfs)\n else:\n id_=id\n name=symbol=etf\n etf_currency, etf_exchange = 'unk','unk'\n\n final = list()\n\n header = symbol + ' Historical Data'\n\n for index in range(len(date_interval['intervals'])):\n interval_counter += 1\n\n params = {\n \"curr_id\": id_,\n \"smlID\": str(randint(1000000, 99999999)),\n \"header\": header,\n \"st_date\": date_interval['intervals'][index]['start'],\n \"end_date\": date_interval['intervals'][index]['end'],\n \"interval_sec\": interval.capitalize(),\n \"sort_col\": \"date\",\n \"sort_ord\": \"DESC\",\n \"action\": \"historical_data\"\n }\n\n head = {\n \"User-Agent\": random_user_agent(),\n \"X-Requested-With\": \"XMLHttpRequest\",\n \"Accept\": \"text/html\",\n \"Accept-Encoding\": \"gzip, deflate\",\n \"Connection\": \"keep-alive\",\n }\n\n url = \"https://www.investing.com/instruments/HistoricalDataAjax\"\n\n req = requests.post(url, headers=head, data=params)\n\n if req.status_code != 200:\n raise ConnectionError(\"ERR#0015: error \" + str(req.status_code) + \", try again later.\")\n\n if not req.text:\n continue\n\n root_ = fromstring(req.text)\n path_ = root_.xpath(\".//table[@id='curr_table']/tbody/tr\")\n \n result = list()\n\n if path_:\n for elements_ in path_:\n if elements_.xpath(\".//td\")[0].text_content() == 'No results found':\n if interval_counter < interval_limit:\n data_flag = False\n else:\n raise IndexError(\"ERR#0010: etf information unavailable or not found.\")\n else:\n data_flag = True\n \n info = []\n\n for nested_ in elements_.xpath(\".//td\"):\n info.append(nested_.get('data-real-value'))\n\n if data_flag is True:\n etf_date = datetime.strptime(str(datetime.fromtimestamp(int(info[0]), tz=pytz.timezone('GMT')).date()), '%Y-%m-%d')\n \n etf_close = float(info[1].replace(',', ''))\n etf_open = float(info[2].replace(',', ''))\n etf_high = float(info[3].replace(',', ''))\n etf_low = float(info[4].replace(',', ''))\n\n etf_volume = int(info[5])\n\n result.insert(len(result),\n Data(etf_date, etf_open, etf_high, etf_low, etf_close, etf_volume, etf_currency, etf_exchange))\n\n if data_flag is True:\n if order in ['ascending', 'asc']:\n result = result[::-1]\n elif order in ['descending', 'desc']:\n result = result\n\n if as_json is True:\n json_list = [value.etf_as_json() for value in result]\n\n final.append(json_list)\n elif as_json is False:\n df = pd.DataFrame.from_records([value.etf_to_dict() for value in result])\n df.set_index('Date', inplace=True)\n\n final.append(df)\n else:\n raise RuntimeError(\"ERR#0004: data retrieval error while scraping.\")\n\n if order in ['descending', 'desc']:\n final.reverse()\n\n if as_json is True:\n json_ = {\n 'name': name,\n 'historical': [value for json_list in final for value in json_list]\n }\n return json.dumps(json_, sort_keys=False)\n elif as_json is False:\n return pd.concat(final)\n\n\ndef dostuff(etf, etfs):\n if etfs is None:\n raise IOError(\"ERR#0009: etfs object not found or unable to retrieve.\")\n\n country = unidecode(country.strip().lower())\n\n if country not in get_etf_countries():\n raise RuntimeError(\"ERR#0034: country \" + country + \" not found, check if it is correct.\")\n\n etf = unidecode(etf.strip().lower())\n\n def_exchange = etfs.loc[\n ((etfs['name'].apply(unidecode).str.lower() == etf) & (etfs['def_stock_exchange'] == True)).idxmax()]\n\n etfs = etfs[etfs['country'].str.lower() == country]\n\n if etf not in list(etfs['name'].apply(unidecode).str.lower()):\n raise RuntimeError(\"ERR#0019: etf \" + etf + \" not found, check if it is correct.\")\n\n etfs = etfs[etfs['name'].apply(unidecode).str.lower() == etf]\n\n if def_exchange['country'] != country:\n warnings.warn(\n 'Selected country does not contain the default stock exchange of the introduced ETF. ' + \\\n 'Default country is: \\\"' + def_exchange['country'] + '\\\" and default stock_exchange: \\\"' + \\\n def_exchange['stock_exchange'] + '\\\".',\n Warning\n )\n\n if stock_exchange:\n if stock_exchange.lower() not in etfs['stock_exchange'].str.lower():\n raise ValueError(\n \"ERR#0126: introduced stock_exchange value does not exists, leave this parameter to None to use default stock_exchange.\")\n\n etf_exchange = etfs.loc[\n (etfs['stock_exchange'].str.lower() == stock_exchange.lower()).idxmax(), 'stock_exchange']\n else:\n found_etfs = etfs[etfs['name'].apply(unidecode).str.lower() == etf]\n\n if len(found_etfs) > 1:\n warnings.warn(\n 'Note that the displayed information can differ depending on the stock exchange. Available stock_exchange' + \\\n ' values for \\\"' + country + '\\\" are: \\\"' + '\\\", \\\"'.join(found_etfs['stock_exchange']) + '\\\".',\n Warning\n )\n\n del found_etfs\n\n etf_exchange = etfs.loc[(etfs['name'].apply(unidecode).str.lower() == etf).idxmax(), 'stock_exchange']\n else:\n if stock_exchange:\n if stock_exchange.lower() not in etfs['stock_exchange'].str.lower():\n raise ValueError(\n \"ERR#0126: introduced stock_exchange value does not exists, leave this parameter to None to use default stock_exchange.\")\n\n if def_exchange['stock_exchange'].lower() != stock_exchange.lower():\n warnings.warn(\n 'Selected stock_exchange is not the default one of the introduced ETF. ' + \\\n 'Default country is: \\\"' + def_exchange['country'] + '\\\" and default stock_exchange: \\\"' + \\\n def_exchange['stock_exchange'].lower() + '\\\".',\n Warning\n )\n\n etf_exchange = etfs.loc[\n (etfs['stock_exchange'].str.lower() == stock_exchange.lower()).idxmax(), 'stock_exchange']\n else:\n etf_exchange = def_exchange['stock_exchange']\n symbol = etfs.loc[((etfs['name'].apply(unidecode).str.lower() == etf) & (\n etfs['stock_exchange'].str.lower() == etf_exchange.lower())).idxmax(), 'symbol']\n id_ = etfs.loc[((etfs['name'].apply(unidecode).str.lower() == etf) & (\n etfs['stock_exchange'].str.lower() == etf_exchange.lower())).idxmax(), 'id']\n name = etfs.loc[((etfs['name'].apply(unidecode).str.lower() == etf) & (\n etfs['stock_exchange'].str.lower() == etf_exchange.lower())).idxmax(), 'name']\n etf_currency = etfs.loc[((etfs['name'].apply(unidecode).str.lower() == etf) & (\n etfs['stock_exchange'].str.lower() == etf_exchange.lower())).idxmax(), 'currency']\n return etf_currency, etf_exchange, id_, name, symbol\n\n\ndef stupid_checks(as_json, country, etf, from_date, interval, order, stock_exchange, to_date):\n if not etf:\n raise ValueError(\"ERR#0031: etf parameter is mandatory and must be a valid etf name.\")\n if not isinstance(etf, str):\n raise ValueError(\"ERR#0030: etf argument needs to be a str.\")\n if country is None:\n raise ValueError(\"ERR#0039: country can not be None, it should be a str.\")\n if country is not None and not isinstance(country, str):\n raise ValueError(\"ERR#0025: specified country value not valid.\")\n if stock_exchange is not None and not isinstance(stock_exchange, str):\n raise ValueError(\"ERR#0125: specified stock_exchange value is not valid, it should be a str.\")\n if not isinstance(as_json, bool):\n raise ValueError(\"ERR#0002: as_json argument can just be True or False, bool type.\")\n if order not in ['ascending', 'asc', 'descending', 'desc']:\n raise ValueError(\"ERR#0003: order argument can just be ascending (asc) or descending (desc), str type.\")\n if not interval:\n raise ValueError(\n \"ERR#0073: interval value should be a str type and it can just be either 'Daily', 'Weekly' or 'Monthly'.\")\n if not isinstance(interval, str):\n raise ValueError(\n \"ERR#0073: interval value should be a str type and it can just be either 'Daily', 'Weekly' or 'Monthly'.\")\n interval = interval.lower()\n if interval not in ['daily', 'weekly', 'monthly']:\n raise ValueError(\n \"ERR#0073: interval value should be a str type and it can just be either 'Daily', 'Weekly' or 'Monthly'.\")\n try:\n datetime.strptime(from_date, '%d/%m/%Y')\n except ValueError:\n raise ValueError(\"ERR#0011: incorrect data format, it should be 'dd/mm/yyyy'.\")\n try:\n datetime.strptime(to_date, '%d/%m/%Y')\n except ValueError:\n raise ValueError(\"ERR#0011: incorrect data format, it should be 'dd/mm/yyyy'.\")\n return interval\n\n\ndef get_etf_information(etf, country, as_json=False):\n \"\"\"\n This function retrieves fundamental financial information from the specified ETF. The retrieved \n information from the ETF can be valuable as it is additional information that can be used combined \n with OHLC values, so to determine financial insights from the company which holds the specified ETF.\n\n Args:\n etf (:obj:`str`): name of the ETF to retrieve recent historical data from.\n country (:obj:`str`): name of the country from where the ETF is.\n as_json (:obj:`bool`, optional):\n optional argument to determine the format of the output data (:obj:`dict` or :obj:`json`).\n\n Returns:\n :obj:`pandas.DataFrame` or :obj:`dict`- etf_information:\n The resulting :obj:`pandas.DataFrame` contains the information fields retrieved from Investing.com\n from the specified ETF; it can also be returned as a :obj:`dict`, if argument `as_json=True`.\n\n If any of the information fields could not be retrieved, that field/s will be filled with\n None values. If the retrieval process succeeded, the resulting :obj:`dict` will look like::\n\n etf_information = {\n \"1-Year Change\": \"21.83%\",\n \"52 wk Range\": \"233.76 - 320.06\",\n \"Asset Class\": \"Equity\",\n \"Average Vol. (3m)\": 59658771.0,\n \"Beta\": 1.01,\n \"Dividend Yield\": \"1.73%\",\n \"Dividends (TTM)\": 4.03,\n \"ETF Name\": \"SPDR S&P 500\",\n \"Market Cap\": 296440000000.0,\n \"Open\": 319.25,\n \"Prev. Close\": 317.27,\n \"ROI (TTM)\": \"- 0.46%\",\n \"Shares Outstanding\": 934132116.0,\n \"Todays Range\": \"319.18 - 320.06\",\n \"Total Assets\": 167650000000.0,\n \"Volume\": 27928710.0\n }\n\n Raises:\n ValueError: raised if any of the introduced arguments is not valid or errored.\n FileNotFoundError: raised if `etfs.csv` file was not found or errored.\n IOError: raised if `etfs.csv` file is empty or errored.\n RuntimeError: raised if scraping process failed while running.\n ConnectionError: raised if the connection to Investing.com errored (did not return HTTP 200)\n\n \"\"\"\n\n if not etf:\n raise ValueError(\"ERR#0031: etf parameter is mandatory and must be a valid etf name.\")\n\n if not isinstance(etf, str):\n raise ValueError(\"ERR#0030: etf argument needs to be a str.\")\n\n if country is None:\n raise ValueError(\"ERR#0039: country can not be None, it should be a str.\")\n\n if country is not None and not isinstance(country, str):\n raise ValueError(\"ERR#0025: specified country value not valid.\")\n\n if not isinstance(as_json, bool):\n raise ValueError(\"ERR#0002: as_json argument can just be True or False, bool type.\")\n\n resource_package = 'investpy'\n resource_path = '/'.join(('resources', 'etfs.csv'))\n if pkg_resources.resource_exists(resource_package, resource_path):\n etfs = pd.read_csv(pkg_resources.resource_filename(resource_package, resource_path), keep_default_na=False)\n else:\n raise FileNotFoundError(\"ERR#0058: etfs file not found or errored.\")\n\n if etfs is None:\n raise IOError(\"ERR#0009: etfs object not found or unable to retrieve.\")\n\n country = unidecode(country.strip().lower())\n\n if country not in get_etf_countries():\n raise RuntimeError(\"ERR#0034: country \" + country + \" not found, check if it is correct.\")\n\n etfs = etfs[etfs['country'] == country]\n\n etf = unidecode(etf.strip().lower())\n\n if etf not in list(etfs['name'].apply(unidecode).str.lower()):\n raise RuntimeError(\"ERR#0019: etf \" + etf + \" not found, check if it is correct.\")\n\n name = etfs.loc[(etfs['name'].apply(unidecode).str.lower() == etf).idxmax(), 'name']\n tag = etfs.loc[(etfs['name'].apply(unidecode).str.lower() == etf).idxmax(), 'tag']\n\n url = \"https://www.investing.com/etfs/\" + tag\n\n head = {\n \"User-Agent\": random_user_agent(),\n \"X-Requested-With\": \"XMLHttpRequest\",\n \"Accept\": \"text/html\",\n \"Accept-Encoding\": \"gzip, deflate\",\n \"Connection\": \"keep-alive\",\n }\n\n req = requests.get(url, headers=head)\n\n if req.status_code != 200:\n raise ConnectionError(\"ERR#0015: error \" + str(req.status_code) + \", try again later.\")\n\n root_ = fromstring(req.text)\n path_ = root_.xpath(\"//div[contains(@class, 'overviewDataTable')]/div\")\n\n result = pd.DataFrame(columns=['ETF Name', 'Prev. Close', 'Todays Range', 'ROI (TTM)',\n 'Open', '52 wk Range', 'Dividends (TTM)', 'Volume',\n 'Market Cap', 'Dividend Yield', 'Average Vol. (3m)',\n 'Total Assets', 'Beta', '1-Year Change', 'Shares Outstanding',\n 'Asset Class'])\n result.at[0, 'ETF Name'] = name\n\n if path_:\n for elements_ in path_:\n element = elements_.xpath(\".//span[@class='float_lang_base_1']\")[0]\n title_ = element.text_content()\n if title_ == \"Day's Range\":\n title_ = 'Todays Range'\n if title_ in result.columns.tolist():\n try:\n result.at[0, title_] = float(element.getnext().text_content().replace(',', ''))\n continue\n except:\n pass\n try:\n text = element.getnext().text_content().strip()\n result.at[0, title_] = datetime.strptime(text, \"%b %d, %Y\").strftime(\"%d/%m/%Y\")\n continue\n except:\n pass\n try:\n value = element.getnext().text_content().strip()\n if value.__contains__('K'):\n value = float(value.replace('K', '').replace(',', '')) * 1e3\n elif value.__contains__('M'):\n value = float(value.replace('M', '').replace(',', '')) * 1e6\n elif value.__contains__('B'):\n value = float(value.replace('B', '').replace(',', '')) * 1e9\n elif value.__contains__('T'):\n value = float(value.replace('T', '').replace(',', '')) * 1e12\n result.at[0, title_] = value\n continue\n except:\n pass\n\n result.replace({'N/A': None}, inplace=True)\n\n if as_json is True:\n json_ = result.iloc[0].to_dict()\n return json_\n elif as_json is False:\n return result\n else:\n raise RuntimeError(\"ERR#0004: data retrieval error while scraping.\")\n\n\ndef get_etfs_overview(country, as_json=False, n_results=100):\n \"\"\"\n This function retrieves an overview containing all the real time data available for the main ETFs from a country,\n such as the ETF names, symbols, current value, etc. as indexed in Investing.com. So on, the main usage of this\n function is to get an overview on the main ETFs from a country, so to get a general view. Note that since \n this function is retrieving a lot of information at once, by default just the overview of the Top 100 ETFs \n is being retrieved, but an additional parameter called n_results can be specified so to retrieve N results.\n\n Args:\n country (:obj:`str`): name of the country to retrieve the ETFs overview from.\n as_json (:obj:`bool`, optional):\n optional argument to determine the format of the output data (:obj:`pandas.DataFrame` or :obj:`json`).\n n_results (:obj:`int`, optional): number of results to be displayed on the overview table (0-1000).\n\n Returns:\n :obj:`pandas.DataFrame` - etfs_overview:\n The resulting :obj:`pandas.DataFrame` contains all the data available in Investing.com of the main ETFs\n from a country in order to get an overview of it.\n\n If the retrieval process succeeded, the resulting :obj:`pandas.DataFrame` should look like::\n\n country | name | full_name | symbol | last | change | turnover\n --------|------|-----------|--------|------|--------|----------\n xxxxxxx | xxxx | xxxxxxxxx | xxxxxx | xxxx | xxxxxx | xxxxxxxx\n \n Raises:\n ValueError: raised if there was any argument error.\n FileNotFoundError: raised when `etfs.csv` file is missing.\n IOError: raised if data could not be retrieved due to file error.\n RuntimeError: \n raised either if the introduced country does not match any of the listed ones or if no overview results could be \n retrieved from Investing.com.\n ConnectionError: raised if GET requests does not return 200 status code.\n \n \"\"\"\n\n if country is None:\n raise ValueError(\"ERR#0039: country can not be None, it should be a str.\")\n\n if country is not None and not isinstance(country, str):\n raise ValueError(\"ERR#0025: specified country value not valid.\")\n\n if not isinstance(as_json, bool):\n raise ValueError(\"ERR#0002: as_json argument can just be True or False, bool type.\")\n\n if not isinstance(n_results, int):\n raise ValueError(\"ERR#0089: n_results argument should be an integer between 1 and 1000.\")\n\n if 1 > n_results or n_results > 1000:\n raise ValueError(\"ERR#0089: n_results argument should be an integer between 1 and 1000.\")\n\n resource_package = 'investpy'\n resource_path = '/'.join(('resources', 'etfs.csv'))\n if pkg_resources.resource_exists(resource_package, resource_path):\n etfs = pd.read_csv(pkg_resources.resource_filename(resource_package, resource_path), keep_default_na=False)\n else:\n raise FileNotFoundError(\"ERR#0058: etfs file not found or errored.\")\n\n if etfs is None:\n raise IOError(\"ERR#0009: etfs object not found or unable to retrieve.\")\n\n country = unidecode(country.strip().lower())\n\n if country not in get_etf_countries():\n raise RuntimeError('ERR#0025: specified country value is not valid.')\n\n etfs = etfs[etfs['country'] == country]\n\n if country.lower() == 'united states':\n country= 'usa'\n elif country.lower() == 'united kingdom':\n country = 'uk'\n\n head = {\n \"User-Agent\": random_user_agent(),\n \"X-Requested-With\": \"XMLHttpRequest\",\n \"Accept\": \"text/html\",\n \"Accept-Encoding\": \"gzip, deflate\",\n \"Connection\": \"keep-alive\",\n }\n\n url = \"https://www.investing.com/etfs/\" + country.replace(' ', '-') + \"-etfs?&issuer_filter=0\"\n\n req = requests.get(url, headers=head)\n\n if req.status_code != 200:\n raise ConnectionError(\"ERR#0015: error \" + str(req.status_code) + \", try again later.\")\n\n root_ = fromstring(req.text)\n table = root_.xpath(\".//table[@id='etfs']/tbody/tr\")\n\n results = list()\n\n if len(table) > 0:\n for row in table[:n_results]:\n id_ = row.get('id').replace('pair_', '')\n symbol = row.xpath(\".//td[contains(@class, 'symbol')]\")[0].get('title')\n\n nested = row.xpath(\".//a\")[0]\n name = nested.text.strip()\n full_name = nested.get('title').rstrip()\n\n # In Euro Zone the ETFs are from different countries so the country is specified\n country_flag = row.xpath(\".//td[@class='flag']/span\")[0].get('title')\n country_flag = unidecode(country_flag.lower())\n\n last_path = \".//td[@class='\" + 'pid-' + str(id_) + '-last' + \"']\"\n last = row.xpath(last_path)[0].text_content()\n\n change_path = \".//td[contains(@class, '\" + 'pid-' + str(id_) + '-pcp' + \"')]\"\n change = row.xpath(change_path)[0].text_content()\n\n turnover_path = \".//td[contains(@class, '\" + 'pid-' + str(id_) + '-turnover' + \"')]\"\n turnover = row.xpath(turnover_path)[0].text_content()\n\n if turnover == '':\n continue\n\n if turnover.__contains__('K'):\n turnover = float(turnover.replace('K', '').replace(',', '')) * 1e3\n elif turnover.__contains__('M'):\n turnover = float(turnover.replace('M', '').replace(',', '')) * 1e6\n elif turnover.__contains__('B'):\n turnover = float(turnover.replace('B', '').replace(',', '')) * 1e9\n else:\n turnover = float(turnover.replace(',', ''))\n\n data = {\n \"country\": country_flag,\n \"name\": name,\n \"full_name\": full_name,\n \"symbol\": symbol,\n \"last\": float(last.replace(',', '')),\n \"change\": change,\n \"turnover\": int(turnover),\n \"currency\": etfs.loc[(etfs['name'] == name).idxmax(), 'currency']\n }\n\n results.append(data)\n else:\n raise RuntimeError(\"ERR#0092: no data found while retrieving the overview from Investing.com\")\n\n df = pd.DataFrame(results)\n\n if as_json:\n return json.loads(df.to_json(orient='records'))\n else:\n return df\n\n\ndef search_etfs(by, value):\n \"\"\"\n This function searches etfs by the introduced value for the specified field. This means that this function\n is going to search if there is a value that matches the introduced value for the specified field which is the\n `etfs.csv` column name to search in. Available fields to search etfs are 'name', 'full_name' and 'symbol'.\n\n Args:\n by (:obj:`str`): name of the field to search for, which is the column name ('name', 'full_name' or 'symbol').\n value (:obj:`str`): value of the field to search for, which is the str that is going to be searched.\n\n Returns:\n :obj:`pandas.DataFrame` - search_result:\n The resulting `pandas.DataFrame` contains the search results from the given query (the specified value\n in the specified field). If there are no results and error will be raised, but otherwise this\n `pandas.DataFrame` will contain all the available field values that match the introduced query.\n\n Raises:\n ValueError: raised if any of the introduced params is not valid or errored.\n FileNotFoundError: raised if `etfs.csv` file is missing.\n IOError: raised if data could not be retrieved due to file error.\n RuntimeError: raised if no results were found for the introduced value in the introduced field.\n \n \"\"\"\n\n if not by:\n raise ValueError('ERR#0006: the introduced field to search is mandatory and should be a str.')\n\n if not isinstance(by, str):\n raise ValueError('ERR#0006: the introduced field to search is mandatory and should be a str.')\n\n if not value:\n raise ValueError('ERR#0017: the introduced value to search is mandatory and should be a str.')\n\n if not isinstance(value, str):\n raise ValueError('ERR#0017: the introduced value to search is mandatory and should be a str.')\n\n resource_package = 'investpy'\n resource_path = '/'.join(('resources', 'etfs.csv'))\n if pkg_resources.resource_exists(resource_package, resource_path):\n etfs = pd.read_csv(pkg_resources.resource_filename(resource_package, resource_path), keep_default_na=False)\n else:\n raise FileNotFoundError(\"ERR#0058: etfs file not found or errored.\")\n\n if etfs is None:\n raise IOError(\"ERR#0009: etfs object not found or unable to retrieve.\")\n\n etfs.drop(columns=['tag', 'id'], inplace=True)\n\n available_search_fields = etfs.columns.tolist()\n\n if isinstance(by, str) and by not in available_search_fields:\n raise ValueError('ERR#0026: the introduced field to search can either just be '\n + ' or '.join(available_search_fields))\n\n etfs['matches'] = etfs[by].str.contains(value, case=False)\n\n search_result = etfs.loc[etfs['matches'] == True].copy()\n\n if len(search_result) == 0:\n raise RuntimeError('ERR#0043: no results were found for the introduced ' + str(by) + '.')\n\n search_result.drop(columns=['matches'], inplace=True)\n search_result.reset_index(drop=True, inplace=True)\n\n return search_result\n" ]
[ [ "pandas.concat", "pandas.DataFrame" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "1.3", "0.19", "1.1", "1.5", "0.24", "0.20", "1.0", "0.25", "1.2" ], "scipy": [], "tensorflow": [] } ]
HubBucket-Team/graphics
[ "dc22b266c225b7d667cc3836f6fc5a1d4bb47152" ]
[ "tensorflow_graphics/geometry/convolution/tests/graph_convolution_test.py" ]
[ "#Copyright 2019 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Tests for graph convolution ops.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport itertools\n\nfrom absl.testing import parameterized\nimport numpy as np\nimport tensorflow as tf\n\nimport tensorflow_graphics.geometry.convolution.graph_convolution as gc\nfrom tensorflow_graphics.util import test_case\n\n\ndef _dense_to_sparse(data):\n \"\"\"Convert a numpy array to a tf.SparseTensor.\"\"\"\n indices = np.where(data)\n return tf.SparseTensor(\n np.stack(indices, axis=-1), data[indices], dense_shape=data.shape)\n\n\ndef _dummy_data(batch_size, num_vertices, num_channels):\n \"\"\"Create inputs for feature_steered_convolution.\"\"\"\n if batch_size > 0:\n data = np.zeros(\n shape=(batch_size, num_vertices, num_channels), dtype=np.float32)\n neighbors = _dense_to_sparse(\n np.tile(np.eye(num_vertices, dtype=np.float32), (batch_size, 1, 1)))\n else:\n data = np.zeros(shape=(num_vertices, num_channels), dtype=np.float32)\n neighbors = _dense_to_sparse(np.eye(num_vertices, dtype=np.float32))\n return data, neighbors\n\n\ndef _dummy_variables(in_channels, out_channels, num_weight_matrices):\n \"\"\"Create variable substitutes for feature_steered_convolution.\"\"\"\n var_u = tf.zeros(shape=(in_channels, num_weight_matrices))\n var_v = tf.zeros(shape=(in_channels, num_weight_matrices))\n var_c = tf.zeros(shape=(num_weight_matrices))\n var_w = tf.zeros(shape=(num_weight_matrices, in_channels, out_channels))\n var_b = tf.zeros(shape=(out_channels))\n return var_u, var_v, var_c, var_w, var_b\n\n\ndef _random_data(batch_size,\n num_vertices,\n num_channels,\n padding,\n only_self_edges,\n data_type=np.float32,\n neighbors_type=np.float32,\n sizes_type=np.int32):\n \"\"\"Create random inputs for feature_steered_convolution.\"\"\"\n\n def _random_data_2d(padding):\n size = num_vertices if not padding else np.random.randint(\n low=1, high=num_vertices + 1)\n data = np.random.uniform(size=(size, num_channels)).astype(data_type)\n if only_self_edges:\n neighbors = np.eye(size, dtype=neighbors_type)\n else:\n random = np.random.uniform(size=(size, size)).astype(neighbors_type)\n neighbors = np.maximum(\n np.where(random > 0.75, np.ones_like(random), np.zeros_like(random)),\n np.eye(size, dtype=neighbors_type))\n neighbors = neighbors / np.sum(neighbors, axis=1, keepdims=True)\n if padding:\n data = np.pad(data, ((0, num_vertices - size), (0, 0)), \"constant\")\n neighbors = np.pad(neighbors,\n ((0, num_vertices - size), (0, num_vertices - size)),\n \"constant\")\n return data, neighbors, size\n else:\n return data, neighbors\n\n if batch_size > 0:\n list_2d = [_random_data_2d(padding=padding) for _ in range(batch_size)]\n data = np.stack([i[0] for i in list_2d], 0).astype(data_type)\n neighbors = np.stack([i[1] for i in list_2d], 0).astype(neighbors_type)\n if padding:\n sizes = np.stack([i[2] for i in list_2d], 0).astype(sizes_type)\n return data, _dense_to_sparse(neighbors), sizes\n else:\n return data, _dense_to_sparse(neighbors)\n else:\n if padding:\n raise ValueError(\"Padding only allowed with batched data.\")\n data, neighbors = _random_data_2d(padding=False)\n return data.astype(data_type), _dense_to_sparse(\n neighbors.astype(neighbors_type))\n\n\ndef _random_variables(in_channels,\n out_channels,\n num_weight_matrices,\n dtype=np.float32):\n \"\"\"Create random variables for feature_steered_convolution.\"\"\"\n\n def _random_constant(shape, dtype):\n return tf.constant(np.random.uniform(size=shape).astype(dtype))\n\n var_u = _random_constant([in_channels, num_weight_matrices], dtype)\n var_v = _random_constant([in_channels, num_weight_matrices], dtype)\n var_c = _random_constant([num_weight_matrices], dtype)\n var_w = _random_constant([num_weight_matrices, in_channels, out_channels],\n dtype)\n var_b = _random_constant([out_channels], dtype)\n return var_u, var_v, var_c, var_w, var_b\n\n\nclass GraphConvolutionTestFeatureSteeredConvolutionTests(test_case.TestCase):\n\n @parameterized.parameters(\n (\"'sizes' must have an integer type.\", np.float32, np.float32, np.float32,\n np.float32),\n (\"'data' must have a float type.\", np.int32, np.float32, np.int32,\n np.float32),\n (\"'neighbors' and 'data' must have the same type.\", np.float32,\n np.float64, np.int32, np.float32),\n )\n def test_feature_steered_convolution_exception_raised_types(\n self, err_msg, data_type, neighbors_type, sizes_type, var_type):\n \"\"\"Check the type errors for invalid input types.\"\"\"\n data, neighbors, sizes = _random_data(1, 5, 3, True, False, data_type,\n neighbors_type, sizes_type)\n u, v, c, w, b = _random_variables(3, 3, 1, var_type)\n with self.assertRaisesRegexp(TypeError, err_msg):\n _ = gc.feature_steered_convolution(\n data=data,\n neighbors=neighbors,\n sizes=sizes,\n var_u=u,\n var_v=v,\n var_c=c,\n var_w=w,\n var_b=b)\n\n @parameterized.parameters(\n (np.float32, np.float32, np.int32, np.float32),\n (np.float64, np.float64, np.int32, np.float64),\n (np.float32, np.float32, np.int64, np.float32),\n (np.float64, np.float64, np.int64, np.float64),\n )\n def test_feature_steered_convolution_exception_not_raised_types(\n self, data_type, neighbors_type, sizes_type, var_type):\n \"\"\"Check there are no exceptions for valid input types.\"\"\"\n data, neighbors, sizes = _random_data(1, 5, 3, True, False, data_type,\n neighbors_type, sizes_type)\n u, v, c, w, b = _random_variables(3, 3, 1, var_type)\n try:\n gc.feature_steered_convolution(\n data=data,\n neighbors=neighbors,\n sizes=sizes,\n var_u=u,\n var_v=v,\n var_c=c,\n var_w=w,\n var_b=b)\n except Exception as e: # pylint: disable=broad-except\n self.fail(\"Exception raised: %s\" % str(e))\n\n def test_feature_steered_convolution_exception_raised_shapes(self):\n \"\"\"Check that invalid input shapes trigger the right exceptions.\"\"\"\n with self.assertRaisesRegexp(ValueError, \"must have a rank of 2\"):\n data, neighbors = _dummy_data(1, 5, 2)\n u, v, c, w, b = _dummy_variables(2, 2, 1)\n data = data[0, :]\n _ = gc.feature_steered_convolution(\n data=data,\n neighbors=neighbors,\n sizes=None,\n var_u=u,\n var_v=v,\n var_c=c,\n var_w=w,\n var_b=b)\n\n with self.assertRaisesRegexp(ValueError, \"must have a rank greater than 1\"):\n u, v, c, w, b = _dummy_variables(2, 2, 1)\n data = np.ones(shape=(5), dtype=np.float32)\n neighbors = _dense_to_sparse(np.ones(shape=(5), dtype=np.float32))\n _ = gc.feature_steered_convolution(\n data=data,\n neighbors=neighbors,\n sizes=None,\n var_u=u,\n var_v=v,\n var_c=c,\n var_w=w,\n var_b=b)\n\n with self.assertRaisesRegexp(ValueError,\n \"Not all batch dimensions are identical.\"):\n data, neighbors = _dummy_data(1, 5, 2)\n u, v, c, w, b = _dummy_variables(2, 2, 1)\n _ = gc.feature_steered_convolution(\n data=data,\n neighbors=neighbors,\n sizes=(1, 1),\n var_u=u,\n var_v=v,\n var_c=c,\n var_w=w,\n var_b=b)\n\n @parameterized.parameters(\n (1, 1, 1, 1, 1),\n (4, 2, 3, 6, 5),\n (0, 1, 1, 1, 1),\n (0, 2, 3, 6, 5),\n )\n def test_feature_steered_convolution_output_shape(self, batch_size,\n num_vertices, in_channels,\n out_channels,\n num_weight_matrices):\n \"\"\"Check that the output of convolution has the correct shape.\"\"\"\n data, neighbors = _dummy_data(batch_size, num_vertices, in_channels)\n u, v, c, w, b = _dummy_variables(in_channels, out_channels,\n num_weight_matrices)\n\n y = gc.feature_steered_convolution(\n data=data,\n neighbors=neighbors,\n sizes=None,\n var_u=u,\n var_v=v,\n var_c=c,\n var_w=w,\n var_b=b)\n y_shape = y.shape.as_list()\n\n self.assertEqual(y_shape[-1], out_channels)\n self.assertAllEqual(y_shape[:-1], data.shape[:-1])\n\n @parameterized.parameters(\n (1, 1, 1, 1, 1),\n (4, 2, 3, 6, 5),\n (0, 1, 1, 1, 1),\n (0, 2, 3, 6, 5),\n )\n def test_feature_steered_convolution_only_self_edges(self, batch_size,\n num_vertices,\n in_channels,\n out_channels,\n num_weight_matrices):\n \"\"\"Test convolution when the graph only has self edges.\"\"\"\n data, neighbors = _random_data(\n batch_size,\n num_vertices,\n in_channels,\n padding=False,\n only_self_edges=True)\n u, v, c, w, b = _random_variables(in_channels, out_channels,\n num_weight_matrices)\n\n with self.subTest(name=\"w=0_expect_output=b\"):\n y = gc.feature_steered_convolution(\n data=data,\n neighbors=neighbors,\n sizes=None,\n var_u=u,\n var_v=v,\n var_c=c,\n var_w=tf.zeros_like(w),\n var_b=b)\n y_expected = tf.broadcast_to(b, y.shape)\n\n self.assertAllEqual(y, y_expected)\n\n with self.subTest(name=\"translation_invariant_self_edges\"):\n y = gc.feature_steered_convolution(\n data=data,\n neighbors=neighbors,\n sizes=None,\n var_u=u,\n var_v=-u,\n var_c=c,\n var_w=w,\n var_b=b)\n q = tf.reshape(\n tf.exp(c) / tf.reduce_sum(input_tensor=tf.exp(c)),\n (num_weight_matrices, 1, 1))\n if batch_size > 0:\n q_times_w = tf.reduce_sum(input_tensor=q * w, axis=0, keepdims=True)\n q_times_w = tf.tile(q_times_w, (batch_size, 1, 1))\n else:\n q_times_w = tf.reduce_sum(input_tensor=q * w, axis=0)\n y_expected = tf.matmul(data, q_times_w) + tf.broadcast_to(b, y.shape)\n\n self.assertAllClose(y, y_expected)\n\n with self.subTest(name=\"constant_signal\"):\n if batch_size > 0:\n constant_data = np.tile(\n np.random.uniform(size=(batch_size, 1,\n in_channels)).astype(np.float32),\n (1, num_vertices, 1))\n else:\n constant_data = np.tile(\n np.random.uniform(size=(1, in_channels)).astype(np.float32),\n (num_vertices, 1))\n y = gc.feature_steered_convolution(\n data=constant_data,\n neighbors=neighbors,\n sizes=None,\n var_u=u,\n var_v=v,\n var_c=c,\n var_w=w,\n var_b=b)\n if batch_size > 0:\n y_expected = tf.tile(y[:, :1, :], (1, num_vertices, 1))\n else:\n y_expected = tf.tile(y[:1, :], (num_vertices, 1))\n\n self.assertAllClose(y, y_expected)\n\n @parameterized.parameters(\n (((1.0,), (2.0,), (3.0,)), np.ones(shape=(3, 3)) / 3.0, ((0.5,),),\n ((1.3,),), (-0.7,), (((0.8,),),), (3.0,), ((4.6,), (4.6,), (4.6,))),\n (((1.0,), (2.0,), (3.0,)), np.ones(shape=(3, 3)) / 3.0, ((0.5, 0.2),),\n ((0.3, 0.4),), (-0.7, 0.15), (((0.8,),), ((1.1,),)), (3.0,),\n ((5.011706928844621,), (4.971030281984818,), (4.927388658982911,))),\n )\n def test_feature_steered_convolution_padding_preset(self, data, neighbors, u,\n v, c, w, b, expected):\n \"\"\"Test expected result for preset data and filter values.\"\"\"\n array = (np.array(i) for i in (data, neighbors, expected))\n data, neighbors, expected = array\n tensors = (tf.convert_to_tensor(value=np.array(i).astype(data.dtype)) \\\n for i in (u, v, c, w, b))\n u, v, c, w, b = tensors\n y = gc.feature_steered_convolution(\n data=data,\n neighbors=_dense_to_sparse(neighbors),\n sizes=None,\n var_u=u,\n var_v=v,\n var_c=c,\n var_w=w,\n var_b=b)\n self.assertAllClose(y, expected)\n\n @parameterized.parameters(\n (1, 5, 1, 1, 1),\n (2, 6, 3, 6, 5),\n (5, 15, 6, 12, 8),\n )\n def test_feature_steered_convolution_padding_random(self, batch_size,\n num_vertices, in_channels,\n out_channels,\n num_weight_matrices):\n \"\"\"Test mixed topology batches (random vertices and neighbors).\"\"\"\n data, neighbors, sizes = _random_data(\n batch_size,\n num_vertices,\n in_channels,\n padding=True,\n only_self_edges=False)\n u, v, c, w, b = _random_variables(in_channels, out_channels,\n num_weight_matrices)\n\n with self.subTest(name=\"if_w_is_0_then_y_is_b\"):\n y = gc.feature_steered_convolution(\n data=data,\n neighbors=neighbors,\n sizes=sizes,\n var_u=u,\n var_v=v,\n var_c=c,\n var_w=tf.zeros_like(w),\n var_b=b)\n for k in range(batch_size):\n y_crop = y[k, :sizes[k], :]\n y_expected = tf.broadcast_to(b, y_crop.shape)\n\n self.assertAllEqual(y_crop, y_expected)\n # Check for zeros in the padded region.\n self.assertAllEqual(y[k, sizes[k]:, :],\n tf.zeros((num_vertices - sizes[k], out_channels)))\n\n with self.subTest(name=\"convolve_with_constant\"):\n constant_data = data\n for k in range(batch_size):\n constant_data[k, :sizes[k], :] = np.tile(data[k, 0, :], (sizes[k], 1))\n\n y = gc.feature_steered_convolution(\n data=constant_data,\n neighbors=neighbors,\n sizes=sizes,\n var_u=u,\n var_v=v,\n var_c=c,\n var_w=w,\n var_b=b)\n for k in range(batch_size):\n y_crop = y[k, :sizes[k], :]\n y_const = tf.broadcast_to(y_crop[0, :], y_crop.shape)\n\n self.assertAllClose(y_crop, y_const)\n # Check for zeros in the padded region.\n self.assertAllEqual(y[k, sizes[k]:, :],\n tf.zeros([num_vertices - sizes[k], out_channels]))\n\n @parameterized.parameters(\n (1, 10, 3, 1, True),\n (3, 6, 1, 4, True),\n (0, 10, 5, 2, False),\n (1, 10, 3, 1, False),\n (3, 6, 1, 4, False),\n (0, 10, 5, 2, False),\n )\n def test_feature_steered_convolution_jacobian_random(self, batch_size,\n num_vertices,\n in_channels,\n num_weight_matrices,\n padding):\n \"\"\"Test the jacobian for random input data.\"\"\"\n random_data = _random_data(\n batch_size,\n num_vertices,\n in_channels,\n padding,\n only_self_edges=False,\n data_type=np.float64,\n neighbors_type=np.float64)\n data_init = random_data[0]\n neighbors = random_data[1]\n sizes = None if not padding else random_data[2]\n u, v, c, w, b = _random_variables(\n in_channels, in_channels, num_weight_matrices, dtype=np.float64)\n data = tf.convert_to_tensor(value=data_init)\n\n y = gc.feature_steered_convolution(\n data=data,\n neighbors=neighbors,\n sizes=sizes,\n var_u=u,\n var_v=v,\n var_c=c,\n var_w=w,\n var_b=b)\n\n self.assert_jacobian_is_correct(data, data_init, y)\n\n @parameterized.parameters(\n (1, 1, 0.0),\n (5, 1, 0.0),\n (1, 3, 0.0),\n (5, 3, 0.0),\n (1, 1, 1.0),\n (5, 1, 1.0),\n (1, 3, 1.0),\n (5, 3, 1.0),\n )\n def test_feature_steered_convolution_jacobian_preset(self, num_vertices,\n num_channels,\n data_multiplier):\n \"\"\"Test the jacobian is correct for preset inputs.\"\"\"\n # Corner cases include one vertex, one channel, and all-zero features.\n data_init = data_multiplier * np.random.uniform(\n size=(num_vertices, num_channels)).astype(np.float64)\n neighbors = tf.sparse.eye(num_vertices, dtype=tf.float64)\n u, v, c, w, b = _random_variables(\n num_channels, num_channels, 1, dtype=np.float64)\n data = tf.convert_to_tensor(value=data_init)\n\n y = gc.feature_steered_convolution(\n data=data,\n neighbors=neighbors,\n sizes=None,\n var_u=u,\n var_v=v,\n var_c=c,\n var_w=w,\n var_b=b)\n\n self.assert_jacobian_is_correct(data, data_init, y)\n\n\nclass EdgeConvolutionTemplateTests(test_case.TestCase):\n\n def _zeros(self, vertex_features, neighbor_features, out_dimensions=None):\n \"\"\"A callable for `edge_convolution_template`.\"\"\"\n if out_dimensions is None:\n return tf.zeros_like(vertex_features)\n else:\n return tf.zeros(\n shape=(vertex_features.shape.as_list()[0], out_dimensions),\n dtype=vertex_features.dtype)\n\n def _pass_through(self, vertex_features, neighbor_features):\n \"\"\"A callable for `edge_convolution_template`.\"\"\"\n return neighbor_features\n\n def _circular_2d_data(self, num_vertices, include_normals=False):\n \"\"\"Create data for a circle graph.\"\"\"\n # Vertices are points distributed uniformly on a circle, with each point\n # connected to its closest neighbor on either side.\n theta = np.linspace(0.0, np.pi * 2.0, num=num_vertices, endpoint=False)\n data = np.stack((np.cos(theta), np.sin(theta)), axis=-1)\n if include_normals:\n data = np.concatenate((data, data), axis=-1)\n eye = np.eye(num_vertices)\n neighbors = np.maximum(np.roll(eye, 1, axis=1), np.roll(eye, -1,\n axis=1)) * 0.5\n return data, _dense_to_sparse(neighbors)\n\n def _edge_curvature_2d(self, vertex_features, neighbor_features):\n \"\"\"A callable for `edge_convolution_template` that computes curvature.\"\"\"\n x_position, x_normal = tf.split(\n value=vertex_features, num_or_size_splits=2, axis=-1)\n y_position, y_normal = tf.split(\n value=neighbor_features, num_or_size_splits=2, axis=-1)\n yx_diff = x_position - y_position\n curvature_unscaled = tf.abs(\n tf.reduce_sum(\n input_tensor=(y_normal - x_normal) * yx_diff,\n axis=-1,\n keepdims=True))\n edge_length_squared = tf.reduce_sum(\n input_tensor=yx_diff * yx_diff, axis=-1, keepdims=True)\n return tf.compat.v1.where(\n tf.less(edge_length_squared, 1e-7), tf.zeros_like(edge_length_squared),\n curvature_unscaled / edge_length_squared)\n\n @parameterized.parameters(\n (\"'sizes' must have an integer type.\", np.float32, np.float32,\n np.float32),\n (\"'data' must have a float type.\", np.int32, np.float32, np.int32),\n (\"'neighbors' and 'data' must have the same type.\", np.float32,\n np.float64, np.int32),\n )\n def test_edge_convolution_template_exception_raised_types(\n self, err_msg, data_type, neighbors_type, sizes_type):\n \"\"\"Check the type errors for invalid input types.\"\"\"\n data, neighbors, sizes = _random_data(1, 5, 3, True, False, data_type,\n neighbors_type, sizes_type)\n with self.assertRaisesRegexp(TypeError, err_msg):\n gc.edge_convolution_template(\n data=data,\n neighbors=neighbors,\n sizes=sizes,\n edge_function=self._zeros,\n reduction=\"weighted\",\n edge_function_kwargs=dict())\n\n @parameterized.parameters(\n (np.float32, np.float32, np.int32),\n (np.float64, np.float64, np.int32),\n (np.float32, np.float32, np.int64),\n (np.float64, np.float64, np.int64),\n (np.float64, np.float64, np.int8),\n (np.float64, np.float64, np.uint8),\n (np.float64, np.float64, np.int16),\n (np.float64, np.float64, np.uint16),\n )\n def test_edge_convolution_template_exception_not_raised_types(\n self, data_type, neighbors_type, sizes_type):\n \"\"\"Check there are no exceptions for valid input types.\"\"\"\n data, neighbors, sizes = _random_data(1, 5, 3, True, False, data_type,\n neighbors_type, sizes_type)\n try:\n gc.edge_convolution_template(\n data=data,\n neighbors=neighbors,\n sizes=sizes,\n edge_function=self._zeros,\n reduction=\"weighted\",\n edge_function_kwargs=dict())\n except Exception as e: # pylint: disable=broad-except\n self.fail(\"Exception raised: %s\" % str(e))\n\n def test_edge_convolution_template_exception_raised_shapes(self):\n \"\"\"Check that invalid input shapes trigger the right exceptions.\"\"\"\n with self.assertRaisesRegexp(ValueError, \"must have a rank of 2\"):\n data, neighbors = _dummy_data(1, 5, 2)\n data = data[0, :]\n _ = gc.edge_convolution_template(\n data=data,\n neighbors=neighbors,\n sizes=None,\n edge_function=self._zeros,\n reduction=\"weighted\",\n edge_function_kwargs=dict())\n\n with self.assertRaisesRegexp(ValueError, \"must have a rank greater than 1\"):\n data = np.ones(shape=(5), dtype=np.float32)\n neighbors = _dense_to_sparse(np.ones(shape=(5), dtype=np.float32))\n _ = gc.edge_convolution_template(\n data=data,\n neighbors=neighbors,\n sizes=None,\n edge_function=self._zeros,\n reduction=\"weighted\",\n edge_function_kwargs=dict())\n\n with self.assertRaisesRegexp(ValueError, \"must have a rank of 1\"):\n data, neighbors = _dummy_data(1, 5, 2)\n _ = gc.edge_convolution_template(\n data=data,\n neighbors=neighbors,\n sizes=((1, 1), (1, 1)),\n edge_function=self._zeros,\n reduction=\"weighted\",\n edge_function_kwargs=dict())\n\n @parameterized.parameters(\"\", \"invalid\")\n def test_edge_convolution_template_exception_raised_reduction(self,\n reduction):\n \"\"\"Check that an invalid reduction method triggers the exception.\"\"\"\n with self.assertRaisesRegexp(ValueError, \"reduction method\"):\n data, neighbors = _dummy_data(1, 5, 2)\n gc.edge_convolution_template(\n data=data,\n neighbors=neighbors,\n sizes=None,\n edge_function=self._zeros,\n reduction=reduction,\n edge_function_kwargs=dict())\n\n @parameterized.parameters(\n (1, 1, 1, 1, \"weighted\"),\n (4, 2, 3, 6, \"weighted\"),\n (0, 1, 1, 1, \"max\"),\n (0, 2, 3, 6, \"max\"),\n )\n def test_edge_convolution_template_output_shape(self, batch_size,\n num_vertices, in_channels,\n out_channels,\n reduction):\n \"\"\"Check that the output of convolution has the correct shape.\"\"\"\n data, neighbors = _dummy_data(batch_size, num_vertices, in_channels)\n\n y = gc.edge_convolution_template(\n data,\n neighbors,\n None,\n self._zeros,\n reduction=reduction,\n edge_function_kwargs={\"out_dimensions\": out_channels})\n y_shape = y.shape.as_list()\n\n with self.subTest(name=\"out_channels\"):\n self.assertEqual(y_shape[-1], out_channels)\n\n with self.subTest(name=\"shape\"):\n self.assertAllEqual(y_shape[:-1], data.shape[:-1])\n\n @parameterized.parameters(\n (1, 10, 3, True, \"weighted\"),\n (3, 6, 1, True, \"weighted\"),\n (0, 10, 5, False, \"weighted\"),\n (1, 10, 3, False, \"max\"),\n (3, 6, 1, False, \"max\"),\n (0, 10, 5, False, \"max\"),\n )\n def test_edge_convolution_template_jacobian_random(self, batch_size,\n num_vertices, in_channels,\n padding, reduction):\n \"\"\"Test the jacobian for random input data.\"\"\"\n random_data = _random_data(\n batch_size,\n num_vertices,\n in_channels,\n padding,\n only_self_edges=False,\n data_type=np.float64,\n neighbors_type=np.float64)\n data_init = random_data[0]\n neighbors = random_data[1]\n sizes = None if not padding else random_data[2]\n data = tf.convert_to_tensor(value=data_init)\n\n y = gc.edge_convolution_template(\n data=data,\n neighbors=neighbors,\n sizes=sizes,\n edge_function=self._pass_through,\n reduction=reduction,\n edge_function_kwargs=dict())\n\n self.assert_jacobian_is_correct(data, data_init, y)\n\n def test_edge_convolution_template_preset_max(self):\n data = np.array(((1, 2), (3, 4), (5, 6), (7, 8)), np.float32)\n neighbors = np.array(\n ((0, 1, 0, 1), (0, 0, 1, 0), (1, 1, 1, 0), (0, 0, 1, 1)), np.float32)\n neighbors = _dense_to_sparse(neighbors)\n true = np.array(((8, 10), (8, 10), (10, 12), (14, 16)), np.float32)\n\n with self.subTest(\"max_sum\"):\n max_sum = gc.edge_convolution_template(\n data=data,\n neighbors=neighbors,\n sizes=None,\n edge_function=lambda x, y: x + y,\n reduction=\"max\",\n edge_function_kwargs=dict())\n\n self.assertAllEqual(max_sum, true)\n\n with self.subTest(\"max_sum_scaled\"):\n # Max reduction ignores the weights, so scaling the neighbors weights\n # should not change the result.\n max_sum_scaled = gc.edge_convolution_template(\n data=data,\n neighbors=neighbors * 10.0,\n sizes=None,\n edge_function=lambda x, y: x + y,\n reduction=\"max\",\n edge_function_kwargs=dict())\n\n self.assertAllEqual(max_sum_scaled, true)\n\n @parameterized.parameters(\n itertools.product((1, 5), (1, 3), (0.0, 1.0), (\"weighted\", \"max\"))\n )\n def test_edge_convolution_template_jacobian_preset(self, num_vertices,\n num_channels,\n data_multiplier,\n reduction):\n \"\"\"Test the jacobian is correct for preset inputs.\"\"\"\n # Corner cases include one vertex, one channel, and all-zero features.\n data_init = data_multiplier * np.random.uniform(\n size=(num_vertices, num_channels)).astype(np.float64)\n neighbors = tf.sparse.eye(num_vertices, dtype=tf.float64)\n data = tf.convert_to_tensor(value=data_init)\n\n y = gc.edge_convolution_template(\n data=data,\n neighbors=neighbors,\n sizes=None,\n edge_function=self._pass_through,\n reduction=reduction,\n edge_function_kwargs=dict())\n\n self.assert_jacobian_is_correct(data, data_init, y)\n\n def test_edge_convolution_template_laplacian_smoothing(self):\n r\"\"\"Test the expected result with laplacian smoothing.\n\n Laplacian smoothing for meshes is defined as\n $$y_i = \\frac{1}{|\\mathcal{N(i)}|} \\sum_{j \\in \\mathcal{N(i)}} x_j$$\n\n This can be computed using `edge_convolution_template` with `f(x, y)->y`.\n \"\"\"\n\n # We can reuse `self._pass_through(x, y)->y` as the smoothing functional.\n with self.subTest(name=\"only_self_edges_random\"):\n num_vertices = 500\n data = np.random.uniform(size=(num_vertices, 5))\n neighbors = tf.sparse.eye(num_vertices, dtype=tf.as_dtype(data.dtype))\n\n data_smoothed = gc.edge_convolution_template(\n data=data,\n neighbors=neighbors,\n sizes=None,\n edge_function=self._pass_through,\n reduction=\"weighted\",\n edge_function_kwargs=dict())\n\n self.assertAllEqual(data, data_smoothed)\n\n with self.subTest(name=\"circular_2d\"):\n num_vertices = 500\n data, neighbors = self._circular_2d_data(num_vertices)\n\n data_smoothed = gc.edge_convolution_template(\n data=data,\n neighbors=neighbors,\n sizes=None,\n edge_function=self._pass_through,\n reduction=\"weighted\",\n edge_function_kwargs=dict())\n # The smoothed points should have the same direction as the originals.\n data_smoothed_normalized = tf.nn.l2_normalize(data_smoothed, axis=-1)\n\n self.assertAllClose(data, data_smoothed_normalized)\n\n def test_edge_convolution_template_curvature(self):\n r\"\"\"Test the expected result with curvature.\n\n (Approximate) curvature for meshes is defined as\n $$\\kappa_{v_i} = \\frac{1}{|\\mathcal{N}(v_i)|}\n \\sum_{v_j \\in \\mathcal{N}(v_i)}\n \\frac{(\\vec{v_i} - \\vec{v_j})^T (\\vec{n_{v_i}} -\n \\vec{n_{v_j}})} {\\left|\\vec{v_i}-\\vec{v_j}\\right|^2}\n $$\n\n This can be computed using `edge_convolution_template` with\n $$f(x, y) = (n_x - n_y)^T (x - y) / ||x - y||^2.$$\n where $$n_x$$ and $$n_y$$ are the normals at points $$x$$ and $$y$$\n respectively.\n \"\"\"\n # We can reuse `self._edge_curvature_2d` as the curvature functional.\n num_vertices = 500\n data, neighbors = self._circular_2d_data(num_vertices, include_normals=True)\n\n data_curvature = gc.edge_convolution_template(\n data=data,\n neighbors=neighbors,\n sizes=None,\n edge_function=self._edge_curvature_2d,\n reduction=\"weighted\",\n edge_function_kwargs=dict())\n\n # The curvature at each point on a circle of radius 1 should be 1.\n self.assertAllClose(data_curvature, np.ones(shape=(num_vertices, 1)))\n\n\nif __name__ == \"__main__\":\n test_case.main()\n" ]
[ [ "tensorflow.convert_to_tensor", "numpy.linspace", "tensorflow.zeros", "tensorflow.as_dtype", "tensorflow.reduce_sum", "numpy.concatenate", "numpy.zeros_like", "numpy.where", "numpy.roll", "tensorflow.sparse.eye", "numpy.random.randint", "numpy.ones_like", "numpy.pad", "numpy.eye", "numpy.stack", "numpy.sin", "numpy.zeros", "tensorflow.tile", "tensorflow.nn.l2_normalize", "tensorflow.matmul", "tensorflow.less", "tensorflow.exp", "tensorflow.zeros_like", "tensorflow.split", "numpy.array", "numpy.sum", "tensorflow.broadcast_to", "numpy.tile", "numpy.cos", "numpy.ones", "numpy.random.uniform" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] } ]
hogepodge/tvm-rpi
[ "b10f0e0ac97660933197a596e44e6429f7f89125" ]
[ "postprocess.py" ]
[ "#!python3 ./postprocess.py\nimport os.path\nimport numpy as np\nfrom scipy.special import softmax\nfrom tvm.contrib.download import download_testdata\n\n# Download a list of labels\nlabels_url = \"https://s3.amazonaws.com/onnx-model-zoo/synset.txt\"\nlabels_path = download_testdata(labels_url, \"synset.txt\", module=\"data\")\n\nwith open(labels_path, \"r\") as f:\n labels = [l.rstrip() for l in f]\n#\noutput_file = \"predictions.npz\"\n\n# Open the output and read the output tensor\nif os.path.exists(output_file):\n with np.load(output_file) as data:\n scores = softmax(data[\"output_0\"])\n scores = np.squeeze(scores)\n ranks = np.argsort(scores)[::-1]\n\n for rank in ranks[0:5]:\n print(\"class='%s' with probability=%f\" % (labels[rank], scores[rank]))\n" ]
[ [ "numpy.argsort", "numpy.load", "numpy.squeeze", "scipy.special.softmax" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "1.6", "1.10", "1.4", "1.9", "1.5", "1.2", "1.7", "1.3", "1.8" ], "tensorflow": [] } ]
ahmeddeladly/arch
[ "20774dce296af3716c44ecd50716d368634acbba" ]
[ "arch/unitroot/critical_values/simulation/phillips-ouliaris-simulation-process.py" ]
[ "from collections import defaultdict\nimport glob\nfrom itertools import product\nimport os\nfrom typing import Dict, List, NamedTuple, Tuple\n\nfrom black import FileMode, TargetVersion, format_file_contents\nimport matplotlib.backends.backend_pdf\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nfrom phillips_ouliaris import FILE_TYPES, ROOT, TRENDS\nfrom scipy import stats\nimport seaborn as sns\nfrom shared import format_dict\nfrom statsmodels.regression.linear_model import OLS, WLS\n\nMETA = {\"z_a\": \"negative\", \"z_t\": \"negative\", \"p_u\": \"positive\", \"p_z\": \"positive\"}\nCRITICAL_VALUES = (1, 5, 10)\nPLOT = False\nWINS: Dict[int, int] = defaultdict(lambda: 0)\n# 1. Load data\n# 2. Compute critical values\n\n\nclass PvalueResult(NamedTuple):\n large_p: List[float]\n small_p: List[float]\n tau_max: float\n tau_star: float\n tau_min: float\n\n\ndef xval(lhs: np.ndarray, rhs: np.ndarray, log: bool = True, folds: int = 5) -> None:\n lhs = np.asarray(lhs)\n rhs = np.asarray(rhs)\n pcg = np.random.PCG64(849756746597530743027509)\n gen = np.random.Generator(pcg)\n nobs = lhs.shape[0]\n idx = gen.permutation(nobs)\n predictions = np.empty((nobs, 6))\n for fold in range(folds):\n right = int(fold * nobs / folds)\n left = int((fold + 1) * nobs / folds)\n locs = idx[np.r_[np.arange(0, right), np.arange(left, nobs)]]\n end = nobs if fold == folds - 1 else left\n pred_loc = idx[np.arange(right, end)]\n pred_rhs = rhs[pred_loc]\n sm = OLS(lhs[locs], rhs[locs, :3]).fit()\n predictions[pred_loc, 0] = pred_rhs[:, :3] @ sm.params\n lg = OLS(lhs[locs], rhs[locs]).fit()\n predictions[pred_loc, 1] = pred_rhs @ lg.params\n if log and np.all(np.sign(lhs) == np.sign(lhs)[0]):\n log_lhs = np.log(np.abs(lhs))\n sgn = np.sign(lhs[0])\n sm_log = OLS(log_lhs[locs], rhs[locs, :3]).fit()\n sigma2 = (sm_log.resid**2).mean()\n predictions[pred_loc, 2] = sgn * np.exp(pred_rhs[:, :3] @ sm_log.params)\n predictions[pred_loc, 3] = sgn * np.exp(\n pred_rhs[:, :3] @ sm_log.params + sigma2 / 2\n )\n\n lg_log = OLS(log_lhs[locs], rhs[locs]).fit()\n sigma2 = (lg_log.resid**2).mean()\n predictions[pred_loc, 4] = sgn * np.exp(pred_rhs @ lg_log.params)\n predictions[pred_loc, 5] = sgn * np.exp(\n pred_rhs @ lg_log.params + sigma2 / 2\n )\n errors = lhs[:, None] - predictions\n best = int(np.argmin(errors.var(0)))\n WINS[best] += 1\n\n\ndef estimate_cv_regression(\n results: pd.DataFrame, statistic: str\n) -> Tuple[Dict[int, List[float]], float]:\n # For percentiles 1, 5 and 10, regress on a constant, and powers of 1/T\n out = {}\n quantiles = np.asarray(results.index)\n tau = np.array(results.columns).reshape((1, -1)).T\n rhs = (1.0 / tau) ** np.arange(4)\n for cv in CRITICAL_VALUES:\n if META[statistic] == \"negative\":\n loc = np.argmin(np.abs(100 * quantiles - cv))\n else:\n loc = np.argmin(np.abs(100 * quantiles - (100 - cv)))\n lhs = np.squeeze(np.asarray(results.iloc[loc]))\n xval(lhs, rhs)\n res = OLS(lhs, rhs).fit()\n params = res.params.copy()\n if res.pvalues[-1] > 0.05:\n params[-1] = 0.00\n out[cv] = [float(round(val, 5)) for val in params]\n return out, float(tau.min())\n\n\ndef fit_pval_model(quantiles: pd.DataFrame) -> PvalueResult:\n percentiles = quantiles.index.to_numpy()\n lhs = stats.norm.ppf(percentiles)\n data = np.asarray(quantiles)\n avg_test_stats = data.mean(1)\n avg_test_std = data.std(1)\n avg_test_stats = avg_test_stats[:, None]\n\n rhs = avg_test_stats ** np.arange(4)\n rhs_large = rhs\n lhs_large = lhs\n res_large = WLS(lhs_large, rhs, weights=1.0 / avg_test_std).fit()\n large_p = res_large.params.tolist()\n\n # Compute tau_max, by finding the func maximum\n p = res_large.params\n poly_roots = np.roots(np.array([3, 2, 1.0]) * p[:0:-1])\n if np.isreal(poly_roots[0]):\n tau_max = float(np.squeeze(np.real(np.max(poly_roots))))\n else:\n tau_max = np.inf\n\n # Small p regression using only p<=15%\n cutoff = np.where(percentiles <= 0.150)[0]\n avg_test_stats = avg_test_stats[cutoff]\n avg_test_std = avg_test_std[cutoff]\n lhs_small = lhs[cutoff]\n rhs = avg_test_stats ** np.arange(3)\n res_small = WLS(lhs_small, rhs, weights=1.0 / avg_test_std).fit()\n small_p = res_small.params.tolist()\n\n # Compute tau star\n err_large = lhs_large - rhs_large.dot(res_large.params)\n # Missing 1 parameter here, replace with 0\n params = np.append(res_small.params, 0.0)\n err_small = lhs_large - rhs_large.dot(params)\n # Find the location that minimizes the total absolute error\n m = lhs_large.shape[0]\n abs_err = np.zeros((m, 1))\n for j in range(m):\n abs_err[j] = np.abs(err_large[:j]).sum() + np.abs(err_small[j:]).sum()\n loc = np.argmin(abs_err)\n tau_star = rhs_large[loc, 1]\n # Compute tau min\n tau_min = -params[1] / (2 * params[2])\n large_p = [round(val, 5) for val in large_p]\n small_p = [round(val, 5) for val in small_p]\n tau_max = round(tau_max, 5)\n tau_star = round(tau_star, 5)\n tau_min = round(tau_min, 5)\n return PvalueResult(large_p, small_p, tau_max, tau_star, tau_min)\n\n\nresults = defaultdict(list)\nnum_files = {}\nfor file_type in FILE_TYPES:\n for trend in TRENDS:\n pattern = f\"*-statistic-{file_type}-trend-{trend}-*.hdf\"\n result_files = glob.glob(os.path.join(ROOT, pattern))\n num_files[(file_type, trend)] = len(result_files)\n for rf in result_files:\n temp = pd.DataFrame(pd.read_hdf(rf, \"results\"))\n statistics = temp.columns.levels[2]\n for stat in statistics:\n single = temp.loc[:, pd.IndexSlice[:, :, stat]]\n single.columns = single.columns.droplevel(2)\n results[(stat, trend)].append(single)\n\nassert len(num_files) > 0\n# assert all([nf == num_files[0] for nf in num_files])\nnsimulation = {k: 250_000 * v for k, v in num_files.items()}\n\njoined = defaultdict(list)\nfor key in results:\n temp = results[key]\n stoch_trends = temp[0].columns.levels[1]\n for st in stoch_trends:\n for df in temp:\n single = df.loc[:, pd.IndexSlice[:, st]]\n single.columns = single.columns.droplevel(1)\n single = single.dropna(axis=1, how=\"all\")\n joined[key + (st,)].append(single)\n\nfinal = {key: pd.concat(joined[key], axis=1) for key in joined}\nstat_names = {\"p_z\": \"Pz\", \"p_u\": \"Pu\", \"z_t\": \"Zt\", \"z_a\": \"Za\"}\ncv_params = {}\ncv_tau_min = {}\nfor final_key in final:\n final_key = (stat_names[final_key[0]],) + final_key[1:]\n cv_params[final_key], cv_tau_min[final_key] = estimate_cv_regression(\n final[final_key], final_key[0]\n )\n\nprint(\"Best methods\")\nfor wins_key in sorted(WINS):\n print(f\"{wins_key}: {WINS[wins_key]}\")\n\nreport = []\nfor key in nsimulation:\n s = key[0].upper()\n t = key[1]\n n = nsimulation[key]\n report.append(f\"{s}-type statistics with trend {t} based on {n:,} simulations\")\n\ncounts = \"\\n\".join(report)\n\nSTATISTICS = set(str(final_key[0]) for final_key in final)\nALL_TRENDS = set(str(final_key[1]) for final_key in final)\nNSTOCHASTICS = set(int(final_key[-1]) for final_key in final)\nquantiles_d = defaultdict(list)\npval_data = {}\nfor multi_key in product(STATISTICS, ALL_TRENDS, NSTOCHASTICS):\n pval_data[multi_key] = final[multi_key].loc[:, 2000]\n temp = final[multi_key].loc[:, 2000].mean(1)\n temp.name = multi_key[-1]\n quantiles_d[multi_key[:-1]].append(temp)\nquantiles = {}\nfor key in quantiles_d:\n quantiles[key] = pd.concat(quantiles_d[key], axis=1)\n\n\nplt.rc(\"figure\", figsize=(16, 8))\nsns.set_style(\"darkgrid\")\npdf = matplotlib.backends.backend_pdf.PdfPages(\"output.pdf\")\nfor key in quantiles:\n temp = quantiles[key]\n y = temp.index.to_numpy()[:, None]\n x = temp.to_numpy()\n stat = key[0]\n if stat in (\"z_t\", \"z_a\"):\n x = -1 * x\n if stat in (\"p_u\", \"p_z\"):\n y = 1 - y\n fig, ax = plt.subplots(1, 1)\n plt.plot(x, y)\n plt.title(key)\n pdf.savefig(fig)\n if stat in (\"p_u\", \"p_z\"):\n fig, ax = plt.subplots(1, 1)\n plt.plot(np.log(x), y)\n plt.title(f\"Log {key[0]}, {key[1]}\")\n pdf.savefig(fig)\npdf.close()\n\npval_results = {}\npval_large_p = {}\npval_small_p = {}\npval_tau_star = {}\npval_tau_min = {}\npval_tau_max = {}\nfor pval_key in pval_data:\n temp = pval_data[pval_key].copy()\n if pval_key[0] in (\"p_z\", \"p_u\"):\n temp.index = 1 - temp.index\n temp = -1 * temp\n temp = temp.sort_index()\n res = fit_pval_model(temp)\n out_key = (stat_names[pval_key[0]],) + pval_key[1:]\n pval_results[out_key] = res\n pval_large_p[out_key] = res.large_p\n pval_small_p[out_key] = res.small_p\n pval_tau_min[out_key] = res.tau_min\n pval_tau_max[out_key] = res.tau_max\n pval_tau_star[out_key] = res.tau_star\n\n\nheader = f'''\\\n\"\"\"\nCritical values produced by phillips-ouliaris-simulation.py\n\n{counts}\n\"\"\"\n\nfrom math import inf\n\n'''\n\nformatted_code = header + \"CV_PARAMETERS = \" + format_dict(cv_params)\nformatted_code += \"\\n\\nCV_TAU_MIN = \" + format_dict(cv_tau_min)\nformatted_code += \"\\n\\nPVAL_LARGE_P = \" + format_dict(pval_large_p)\nformatted_code += \"\\n\\nPVAL_SMALL_P = \" + format_dict(pval_small_p)\nformatted_code += \"\\n\\nPVAL_TAU_MAX = \" + format_dict(pval_tau_max)\nformatted_code += \"\\n\\nPVAL_TAU_STAR = \" + format_dict(pval_tau_star)\nformatted_code += \"\\n\\nPVAL_TAU_MIN = \" + format_dict(pval_tau_min)\n\ntargets = {TargetVersion.PY36, TargetVersion.PY37, TargetVersion.PY38}\nfm = FileMode(target_versions=targets)\nformatted_code = format_file_contents(formatted_code, fast=False, mode=fm)\n\nwith open(\"../phillips_ouliaris.py\", \"w\") as po:\n po.write(formatted_code)\n" ]
[ [ "scipy.stats.norm.ppf", "numpy.asarray", "numpy.random.Generator", "matplotlib.pyplot.rc", "matplotlib.pyplot.plot", "numpy.max", "numpy.argmin", "numpy.exp", "numpy.where", "numpy.arange", "numpy.zeros", "numpy.random.PCG64", "pandas.concat", "numpy.log", "pandas.read_hdf", "matplotlib.pyplot.title", "numpy.append", "numpy.array", "numpy.isreal", "numpy.abs", "matplotlib.pyplot.subplots", "numpy.sign", "numpy.empty" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "1.3", "0.19", "1.1", "1.5", "0.24", "0.20", "1.0", "0.25", "1.2" ], "scipy": [], "tensorflow": [] } ]
jsalsman/TensorNetwork
[ "9914ec04d5a783a445b8ee56c82030dc69fed3ed" ]
[ "tensornetwork/network_components.py" ]
[ "# Copyright 2019 The TensorNetwork Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Implementation of Network Components.\"\"\"\n\nfrom typing import Any, Dict, List, Optional, Set, Text, Tuple, Type, Union, \\\n overload, Sequence, Iterable\nimport numpy as np\nfrom abc import ABC\nfrom abc import abstractmethod\nimport h5py\n\n#pylint: disable=useless-import-alias\nimport tensornetwork.config as config\nfrom tensornetwork import ops\nfrom tensornetwork.backends import backend_factory\nfrom tensornetwork.backends.base_backend import BaseBackend\n\nstring_type = h5py.special_dtype(vlen=str)\nTensor = Any\n# This is required because of the circular dependency between\n# network_components.py and network.py types.\n\n\nclass BaseNode(ABC):\n \"\"\"\n Base class for nodes. Should be subclassed.\n\n A Node represents a concrete tensor in a tensor network. The number of edges\n for a node represents the rank of that tensor.\n\n For example:\n\n * A node with no edges means this node represents a scalar value.\n * A node with a single edge means this node is a vector.\n * A node with two edges represents a matrix.\n * A node with three edges is a tensor of rank 3, etc.\n\n Each node can have an arbitrary rank/number of edges, each of which can have\n an arbitrary dimension.\n \"\"\"\n\n def __init__(self,\n name: Optional[Text] = None,\n axis_names: Optional[List[Text]] = None,\n backend: Optional[BaseBackend] = None,\n shape: Optional[Tuple[int]] = None) -> None:\n \"\"\"Create a node. Should be subclassed before usage\n and a limited number of abstract methods and properties implemented.\n\n Args:\n name: Name of the node. Used primarily for debugging.\n axis_names: List of names for each of the tensor's axes.\n shape: the shape of the tensor, as tuple of integers.\n\n Raises:\n ValueError: If there is a repeated name in `axis_names` or if the length\n doesn't match the shape of the tensor.\n \"\"\"\n\n self.is_disabled = False\n self.name = name if name is not None else '__unnamed_node__'\n self.backend = backend\n self._shape = shape\n if axis_names is not None:\n self._edges = [\n Edge(node1=self, axis1=i, name=edge_name)\n for i, edge_name in enumerate(axis_names)\n ]\n elif shape is not None:\n self._edges = [\n Edge(node1=self, axis1=i, name=\"Dangling_{}\".format(i))\n for i, _ in enumerate(shape)\n ]\n else:\n raise ValueError(\"One of axis_names or shape must be provided.\")\n if axis_names is not None:\n self.add_axis_names(axis_names)\n else:\n self._axis_names = [str(i) for i in range(len(shape))]\n\n self._signature = -1\n\n collection = ops.get_current_collection()\n if collection is not None:\n collection.add(self)\n\n super().__init__()\n\n @property\n def dtype(self):\n #any derived instance of BaseNode always has to have a tensor\n return self.tensor.dtype\n\n def set_signature(self, signature: int) -> None:\n \"\"\"Set the signature for the node.\n\n Signatures are numbers that uniquely identify a node.\n \"\"\"\n self.signature = signature\n\n def add_axis_names(self, axis_names: List[Text]) -> None:\n \"\"\"Add axis names to a Node.\n\n Args:\n axis_names: List of names for each of the tensor's axes.\n\n Raises:\n ValueError: If there is a repeated name in `axis_names` or if the length\n doesn't match the shape of the tensor.\n \"\"\"\n if len(axis_names) != len(set(axis_names)):\n raise ValueError(\"Not all axis names are unique.\")\n if len(axis_names) != len(self.shape):\n raise ValueError(\"axis_names is not the same length as the tensor shape.\"\n \"axis_names length: {}, tensor.shape length: {}\".format(\n len(axis_names), len(self.shape)))\n self.axis_names = axis_names[:]\n\n def add_edge(self,\n edge: \"Edge\",\n axis: Union[int, Text],\n override: bool = False) -> None:\n \"\"\"Add an edge to the node on the given axis.\n\n Args:\n edge: The edge to add.\n axis: The axis the edge points to.\n override: If true, replace the existing edge with the new one.\n\n Raises:\n ValueError: If the edge on axis is not dangling.\n \"\"\"\n axis_num = self.get_axis_number(axis)\n if axis_num < 0 or axis_num >= len(self.shape):\n raise ValueError(\"Axis must be positive and less than rank of the tensor\")\n if not self.edges[axis_num].is_dangling() and not override:\n raise ValueError(\n \"Node '{}' already has a non-dangling edge for axis {}\".format(\n self, axis))\n self.edges[axis_num] = edge\n\n @abstractmethod\n def get_tensor(self) -> Tensor:\n return\n\n @abstractmethod\n def set_tensor(self, tensor) -> None:\n return\n\n @property\n @abstractmethod\n def shape(self) -> Tuple[Optional[int], ...]:\n if self._shape is None:\n raise ValueError('Please ensure this Node has a well-defined shape')\n return self._shape\n\n @property\n @abstractmethod\n def tensor(self) -> Tensor:\n return\n\n @tensor.setter\n @abstractmethod\n def tensor(self, tensor: Tensor) -> None:\n return\n\n def get_rank(self) -> int:\n \"\"\"Return rank of tensor represented by self.\"\"\"\n return len(self.shape)\n\n def reorder_edges(self, edge_order: List[\"Edge\"]) -> \"BaseNode\":\n \"\"\"Reorder the edges for this given Node.\n\n This will reorder the node's edges and transpose the underlying tensor\n accordingly.\n\n Args:\n edge_order: List of edges. The order in the list determines the new edge\n ordering.\n\n Returns:\n This node post reordering.\n\n Raises:\n ValueError: If either the list of edges is not the same as expected or\n if you try to reorder with a trace edge.\n AttributeError: If the Node has no tensor.\n\n \"\"\"\n if not hasattr(self, '_tensor'):\n raise AttributeError(\"Please provide a valid tensor for this Node.\")\n\n extra_edges = set(edge_order).difference(set(self.edges))\n if extra_edges:\n raise ValueError(\"Given edge order does not match expected edges. \"\n \"Additional edges that do not belong to node found: \"\n \"{}\".format(extra_edges))\n missing_edges = set(self.edges).difference(set(edge_order))\n if missing_edges:\n raise ValueError(\"Given edge order does not match expected edges. \"\n \"Missing edges that belong to node found: \"\n \"{}\".format(missing_edges))\n for edge in edge_order:\n if edge.node1 == edge.node2:\n raise ValueError(\"Edge reordering does not support trace edges. \"\n \"Found trace edge: '{}'\".format(edge))\n\n permutation = []\n for i, edge in enumerate(edge_order):\n # This is O(n^2), but the number of edges will likely never be >100\n # so this should be fine for now.\n old_position = self.edges.index(edge)\n permutation.append(old_position)\n edge.update_axis(old_position, self, i, self)\n self.edges = edge_order[:]\n self.tensor = self.backend.transpose(self.tensor, perm=permutation)\n if self.axis_names is not None:\n # Update axis_names:\n tmp_axis_names = []\n for i in permutation:\n tmp_axis_names.append(self.axis_names[i])\n self.axis_names = tmp_axis_names\n return self\n\n def reorder_axes(self, perm: List[int]) -> \"BaseNode\":\n \"\"\"Reorder axes of the node's tensor.\n\n This will also update all of the node's edges.\n\n Args:\n perm: Permutation of the dimensions of the node's tensor.\n\n Returns:\n This node post reordering.\n\n Raises:\n AttributeError: If the Node has no tensor.\n \"\"\"\n if not hasattr(self, '_tensor'):\n raise AttributeError(\"Please provide a valid tensor for this Node.\")\n\n if set(perm) != set(range(len(self.edges))):\n raise ValueError(\"A full permutation was not passed. \"\n \"Permutation passed: {}\".format(perm))\n self.tensor = self.backend.transpose(self.tensor, perm=perm)\n tmp_edges = []\n for i, position in enumerate(perm):\n edge = self.edges[position]\n edge.update_axis(position, self, i, self)\n tmp_edges.append(edge)\n self.edges = tmp_edges\n if self.axis_names is not None:\n # Permute axis names accordingly.\n tmp_axis_names = []\n for i in perm:\n tmp_axis_names.append(self.axis_names[i])\n self.axis_names = tmp_axis_names\n return self\n\n def get_axis_number(self, axis: Union[Text, int]) -> int:\n \"\"\"Get the axis number for a given axis name or value.\"\"\"\n if isinstance(axis, int):\n return axis\n try:\n return self.axis_names.index(axis)\n except ValueError:\n raise ValueError(\"Axis name '{}' not found for node '{}'\".format(\n axis, self))\n\n def get_dimension(self, axis: Union[Text, int]) -> Optional[int]:\n \"\"\"Get the dimension on the given axis.\n\n Args:\n axis: The axis of the underlying tensor.\n\n Returns:\n The dimension of the given axis.\n\n Raises:\n ValueError: if axis isn't an int or if axis is too large or small.\n \"\"\"\n axis_num = self.get_axis_number(axis)\n if axis_num < 0 or axis_num >= len(self.shape):\n raise ValueError(\"Axis must be positive and less than rank of the tensor\")\n return self.shape[axis_num]\n\n def get_edge(self, axis: Union[int, Text]) -> \"Edge\":\n axis_num = self.get_axis_number(axis)\n return self.edges[axis_num]\n\n def get_all_edges(self) -> List[\"Edge\"]:\n # Copy to prevent overwriting.\n return self.edges[:]\n\n def get_all_nondangling(self) -> Set[\"Edge\"]:\n \"\"\"Return the set of nondangling edges connected to this node.\"\"\"\n return {edge for edge in self.edges if not edge.is_dangling()}\n\n def get_all_dangling(self) -> Set[\"Edge\"]:\n \"\"\"Return the set of dangling edges connected to this node.\"\"\"\n return {edge for edge in self.edges if edge.is_dangling()}\n\n def set_name(self, name) -> None:\n self.name = name\n\n def has_nondangling_edge(self) -> bool:\n for e in self.edges:\n if not e.is_dangling():\n return True\n return False\n\n def has_dangling_edge(self) -> bool:\n for e in self.edges:\n if e.is_dangling():\n return True\n return False\n\n @overload\n def __getitem__(self, key: slice) -> List[\"Edge\"]:\n pass\n\n @overload\n def __getitem__(self, key: Union[int, Text]) -> \"Edge\":\n pass\n\n def __getitem__(self,\n key: Union[int, Text, slice]) -> Union[\"Edge\", List[\"Edge\"]]:\n if isinstance(key, slice):\n return self.edges[key]\n return self.get_edge(key)\n\n def __str__(self) -> Text:\n return self.name\n\n def __lt__(self, other) -> bool:\n if not isinstance(other, BaseNode):\n raise ValueError(\"Object {} is not a Node type.\".format(other))\n return id(self) < id(other)\n\n def __matmul__(self, other: \"BaseNode\") -> \"BaseNode\":\n if not hasattr(self, '_tensor'):\n raise AttributeError(\"Please provide a valid tensor for this Node.\")\n if not isinstance(other, BaseNode):\n raise TypeError(\"Cannot use '@' with type '{}'\".format(type(other)))\n if self.is_disabled:\n raise ValueError(\"Cannot use '@' on disabled node {}.\".format(self.name))\n return contract_between(self, other)\n\n @property\n def edges(self) -> List[\"Edge\"]:\n if self.is_disabled:\n raise ValueError('Node {} has been disabled. '\n 'Accessing its edges is no longer possible'.format(\n self.name))\n return self._edges\n\n @edges.setter\n def edges(self, edges: List) -> None:\n if self.is_disabled:\n raise ValueError('Node {} has been disabled.'\n 'Assigning edges is no longer possible'.format(\n self.name))\n self._edges = edges\n\n @property\n def axis_names(self) -> List[Text]:\n return self._axis_names\n\n @axis_names.setter\n def axis_names(self, axis_names: List[Text]) -> None:\n if len(axis_names) != len(self.shape):\n raise ValueError(\"Expected {} names, only got {}.\".format(\n len(self.shape), len(axis_names)))\n self._axis_names = axis_names\n\n @property\n def signature(self) -> Optional[int]:\n if self.is_disabled:\n raise ValueError('Node {} has been disabled. '\n 'Accessing its signature is no longer possible'.format(\n self.name))\n return self._signature\n\n @signature.setter\n def signature(self, signature: int) -> None:\n if self.is_disabled:\n raise ValueError('Node {} has been disabled. '\n 'Assigning a signature is no longer possible'.format(\n self.name))\n self._signature = signature\n\n def disable(self) -> None:\n if self.is_disabled:\n raise ValueError('Node {} is already disabled'.format(self.name))\n self.is_disabled = True\n\n @classmethod\n @abstractmethod\n def _load_node(cls, node_data: h5py.Group) -> \"BaseNode\":\n \"\"\"load a node based on hdf5 data.\n\n Args:\n node_data: h5py group that contains the serialized node data\n\n Returns:\n The loaded node.\n \"\"\"\n return\n\n @classmethod\n def _load_node_data(cls,\n node_data: h5py.Group) -> Tuple[Any, Any, Any, Any, Any]:\n \"\"\"Common method to enable loading nodes based on hdf5 data.\n Only a common functionality to load node properties is implemented.\n\n Args:\n node_data: h5py group that contains the serialized node data\n\n Returns:\n the node's name, signature, shape, axis_names\n \"\"\"\n name = node_data['name'][()]\n signature = node_data['signature'][()]\n backend = node_data['backend'][()]\n shape = node_data['shape'][()]\n axis_names = node_data['axis_names'][()]\n return name, signature, shape, axis_names, backend\n\n @abstractmethod\n def _save_node(self, node_group: h5py.Group) -> None:\n \"\"\"Abstract method to enable saving nodes to hdf5.\n Only serializing common properties is implemented. Should be\n overwritten by subclasses.\n\n Args:\n node_group: h5py group where data is saved\n \"\"\"\n node_group.create_dataset('type', data=type(self).__name__)\n node_group.create_dataset('signature', data=self.signature)\n node_group.create_dataset('backend', data=self.backend.name)\n node_group.create_dataset('name', data=self.name)\n node_group.create_dataset('shape', data=self.shape)\n if self.axis_names:\n node_group.create_dataset(\n 'axis_names',\n dtype=string_type,\n data=np.array(self.axis_names, dtype=object))\n else: #couldn't find any documentation on saving None\n node_group.create_dataset('axis_names', dtype='i', data=123456789)\n\n node_group.create_dataset(\n 'edges',\n dtype=string_type,\n data=np.array([edge.name for edge in self.edges], dtype=object))\n\n def fresh_edges(self, axis_names: Optional[List[Text]] = None) -> None:\n if not axis_names:\n axis_names = self.axis_names\n if not axis_names:\n axis_names = [str(i) for i in range(len(self.shape))]\n for i in range(len(self.edges)):\n new_edge = Edge(node1=self, axis1=i, name=axis_names[i])\n self.add_edge(new_edge, i, True)\n\n\nclass Node(BaseNode):\n \"\"\"\n A Node represents a concrete tensor in a tensor network.\n The number of edges for a node represents the rank of that tensor.\n\n For example:\n\n * A node with no edges means this node represents a scalar value.\n * A node with a single edge means this node is a vector.\n * A node with two edges represents a matrix.\n * A node with three edges is a tensor of rank 3, etc.\n\n Each node can have an arbitrary rank/number of edges, each of which can have\n an arbitrary dimension.\n \"\"\"\n\n def __init__(self,\n tensor: Union[Tensor, BaseNode],\n name: Optional[Text] = None,\n axis_names: Optional[List[Text]] = None,\n backend: Optional[Union[Text, BaseBackend]] = None) -> None:\n \"\"\"Create a node.\n\n Args:\n tensor: The concrete that is represented by this node, or a `BaseNode` \n object. If a tensor is passed, it can be \n be either a numpy array or the tensor-type of the used backend.\n If a `BaseNode` is passed, the passed node has to have the same \\\n backend as given by `backend`.\n name: Name of the node. Used primarily for debugging.\n axis_names: List of names for each of the tensor's axes.\n backend: The name of the backend or an instance of a `BaseBackend`.\n\n Raises:\n ValueError: If there is a repeated name in `axis_names` or if the length\n doesn't match the shape of the tensor.\n \"\"\"\n if isinstance(tensor, BaseNode):\n #always use the `Node`'s backend\n backend = tensor.backend\n tensor = tensor.tensor\n if not backend:\n backend = config.default_backend\n if isinstance(backend, BaseBackend):\n backend_obj = backend\n else:\n backend_obj = backend_factory.get_backend(backend)\n self._tensor = backend_obj.convert_to_tensor(tensor)\n super().__init__(\n name=name,\n axis_names=axis_names,\n backend=backend_obj,\n shape=backend_obj.shape_tuple(self._tensor))\n\n def get_tensor(self) -> Tensor:\n return self.tensor\n\n def set_tensor(self, tensor) -> None:\n self.tensor = tensor\n\n @property\n def shape(self) -> Tuple[Optional[int], ...]:\n if self.is_disabled:\n raise ValueError('Node {} has been disabled. '\n 'Access its shape via self.tensor'.format(self.name))\n return self.backend.shape_tuple(self._tensor)\n\n @property\n def tensor(self) -> Tensor:\n return self._tensor\n\n @tensor.setter\n def tensor(self, tensor: Tensor) -> Tensor:\n self._tensor = tensor\n\n def _save_node(self, node_group: h5py.Group) -> None:\n \"\"\"Method to save a node to hdf5.\n\n Args:\n node_group: h5py group where data is saved\n \"\"\"\n super()._save_node(node_group)\n node_group.create_dataset('tensor', data=self._tensor)\n\n @classmethod\n def _load_node(cls, node_data: h5py.Group) -> \"BaseNode\":\n \"\"\"Load a node based on hdf5 data.\n\n Args:\n node_data: h5py group that contains the serialized node data\n\n Returns:\n The loaded node.\n \"\"\"\n name, signature, _, axis_names, backend = cls._load_node_data(node_data)\n tensor = node_data['tensor'][()]\n # pylint: disable=unnecessary-comprehension\n node = Node(\n tensor,\n name=name,\n axis_names=[ax for ax in axis_names],\n backend=backend)\n node.set_signature(signature)\n return node\n\n def __repr__(self) -> Text:\n edges = self.get_all_edges()\n return (f'{self.__class__.__name__}\\n(\\n'\n f'name : {self.name!r},'\n f'\\ntensor : \\n{self.tensor!r},'\n f'\\nedges : \\n{edges!r} \\n)')\n\n\nclass CopyNode(BaseNode):\n\n def __init__(self,\n rank: int,\n dimension: int,\n name: Optional[Text] = None,\n axis_names: Optional[List[Text]] = None,\n backend: Optional[Text] = None,\n dtype: Type[np.number] = np.float64) -> None:\n \"\"\"\n Initialize a CopyNode:\n Args:\n rank: The rank of the tensor.\n dimension: The dimension of each leg.\n name: A name for the node.\n axis_names: axis_names for the node.\n backend: An optional backend for the node. If `None`, a default\n backend is used\n dtype: The dtype used to initialize a numpy-copy node.\n Note that this dtype has to be a numpy dtype, and it has to be \n compatible with the dtype of the backend, e.g. for a tensorflow\n backend with a tf.Dtype=tf.floa32, `dtype` has to be `np.float32`.\n \"\"\"\n\n if not backend:\n backend = config.default_backend\n backend_obj = backend_factory.get_backend(backend)\n\n self.rank = rank\n self.dimension = dimension\n self._tensor = None\n self.copy_node_dtype = dtype\n\n super().__init__(\n name=name,\n axis_names=axis_names,\n backend=backend_obj,\n shape=(dimension,) * rank)\n\n def get_tensor(self) -> Tensor:\n return self.tensor\n\n def set_tensor(self, tensor) -> None:\n self.tensor = tensor\n\n @property\n def shape(self) -> Tuple[Optional[int], ...]:\n return (self.dimension,) * self.rank\n\n @property\n def tensor(self) -> Tensor:\n if self._tensor is None:\n copy_tensor = self.make_copy_tensor(self.rank, self.dimension,\n self.copy_node_dtype)\n self._tensor = self.backend.convert_to_tensor(copy_tensor)\n return self._tensor\n\n @tensor.setter\n def tensor(self, tensor: Tensor) -> Tensor:\n self._tensor = tensor\n\n @staticmethod\n def make_copy_tensor(rank: int, dimension: int,\n dtype: Type[np.number]) -> Tensor:\n shape = (dimension,) * rank\n copy_tensor = np.zeros(shape, dtype=dtype)\n i = np.arange(dimension)\n copy_tensor[(i,) * rank] = 1\n return copy_tensor\n\n def _is_my_trace(self, edge: \"Edge\") -> bool:\n return edge.node1 is self and edge.node2 is self\n\n def _get_partner(self, edge: \"Edge\") -> Tuple[BaseNode, int]:\n if edge.node1 is self:\n assert edge.axis2 is not None\n return edge.node2, edge.axis2\n assert edge.node2 is self\n return edge.node1, edge.axis1\n\n def get_partners(self) -> Dict[BaseNode, Set[int]]:\n partners = {} # type: Dict[BaseNode, Set[int]]\n for edge in self.edges:\n if edge.is_dangling():\n raise ValueError('Cannot contract copy tensor with dangling edges')\n if self._is_my_trace(edge):\n continue\n partner_node, shared_axis = self._get_partner(edge)\n if partner_node not in partners:\n partners[partner_node] = set()\n partners[partner_node].add(shared_axis)\n return partners\n\n _VALID_SUBSCRIPTS = list(\n 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789')\n\n def _make_einsum_input_term(self, node: BaseNode, shared_axes: Set[int],\n next_index: int) -> Tuple[str, int]:\n indices = []\n for axis in range(node.get_rank()):\n if axis in shared_axes:\n indices.append(0)\n else:\n indices.append(next_index)\n next_index += 1\n term = \"\".join(self._VALID_SUBSCRIPTS[i] for i in indices)\n return term, next_index\n\n def _make_einsum_output_term(self, next_index: int) -> str:\n return \"\".join(self._VALID_SUBSCRIPTS[i] for i in range(1, next_index))\n\n def _make_einsum_expression(self, partners: Dict[BaseNode, Set[int]]) -> str:\n next_index = 1 # zero is reserved for the shared index\n einsum_input_terms = []\n for partner_node, shared_axes in partners.items():\n einsum_input_term, next_index = self._make_einsum_input_term(\n partner_node, shared_axes, next_index)\n einsum_input_terms.append(einsum_input_term)\n einsum_output_term = self._make_einsum_output_term(next_index)\n einsum_expression = \",\".join(einsum_input_terms) + \"->\" + einsum_output_term\n return einsum_expression\n\n def compute_contracted_tensor(self) -> Tensor:\n \"\"\"Compute tensor corresponding to contraction of self with neighbors.\"\"\"\n partners = self.get_partners()\n einsum_expression = self._make_einsum_expression(partners)\n tensors = [partner.get_tensor() for partner in partners]\n return self.backend.einsum(einsum_expression, *tensors)\n\n # pylint: disable=W0235\n def _save_node(self, node_group: h5py.Group) -> None:\n \"\"\"Method to save a node to hdf5.\n\n Args:\n node_group: h5py group where data is saved\n \"\"\"\n super()._save_node(node_group)\n node_group.create_dataset(\n name='copy_node_dtype', data=np.dtype(self.copy_node_dtype).name)\n\n @classmethod\n def _load_node(cls, node_data: h5py.Group) -> \"CopyNode\":\n \"\"\"Load a node based on hdf5 data.\n\n Args:\n node_data: h5py group that contains the serialized node data\n\n Returns:\n The loaded node.\n \"\"\"\n name, signature, shape, axis_names, backend = cls._load_node_data(node_data)\n copy_node_dtype = np.dtype(node_data['copy_node_dtype'][()])\n # pylint: disable=unnecessary-comprehension\n node = CopyNode(\n rank=len(shape),\n dimension=shape[0],\n name=name,\n axis_names=[ax for ax in axis_names],\n backend=backend,\n dtype=copy_node_dtype)\n\n node.set_signature(signature)\n return node\n\n\nclass Edge:\n \"\"\"Each edge represents a vector space common to the tensors it connects and\n over which a contraction may be performed. In numpy terms, each edge\n represents a `tensordot` operation over the given axes.\n There are 3 main types of edges:\n\n Standard Edge:\n A standard edge is like any other edge you would find in a normal\n undirected graph as they connect two different nodes. This edge represents\n a tensor contraction of the underlying tensors along their given axes.\n The two axes must be the same dimension.\n\n Dangling Edge:\n A dangling edge is an edge that only connects to a single node and only one\n part of the edge connects to the node. The other end is left \"dangling\".\n These types of edges can not be contracted and represent additional\n dimensions on the underlying tensor. After all other edges are contracted,\n the final result will have the same rank as the number of dangling edges. If\n there are no dangling edges, then the final value will be a scalar.\n\n Trace Edges:\n Trace edges are edges that connect a node to itself. These edges represent\n a trace along the given axis. Once again, the axes must be the same\n dimension.\n \"\"\"\n\n def __init__(self,\n node1: BaseNode,\n axis1: int,\n name: Optional[Text] = None,\n node2: Optional[BaseNode] = None,\n axis2: Optional[int] = None) -> None:\n \"\"\"Create an Edge.\n\n Args:\n name: Name of the edge. Used primarily for debugging.\n node1: One of the nodes edge connects.\n axis1: The axis of node1 that represents this edge.\n node2: The other node that this edge connects. Can be `None` if edge is\n dangling.\n axis2: The axis of node2 that represents this edge. Must be `None` if\n node2 is `None`.\n\n Raises:\n ValueError: If node2 and axis2 are not either both `None` or both\n not be `None`.\n \"\"\"\n if (node2 is None) != (axis2 is None):\n raise ValueError(\n \"node2 and axis2 must either be both None or both not be None\")\n self.is_disabled = False\n if not name:\n name = '__unnamed_edge__'\n self._name = name\n self.node1 = node1\n self._axis1 = axis1\n self.node2 = node2\n self._axis2 = axis2\n self._is_dangling = node2 is None\n self._signature = -1\n\n # contraction methods now explicitly disable Edges by setting\n # node1, node2 to None. This makes use of weakref for node1 and node2\n # properties redundant:\n # previously, storage of contracted edges in TensorNetwork caused\n # node1 and node2 refs of those edges to be prevented from garbage\n # collection. Once we set them to None explicitly, they will be garbage\n # collected once their refcount goes to zero.\n def disable(self):\n # pylint: disable=attribute-defined-outside-init\n self._node1 = None\n # pylint: disable=attribute-defined-outside-init\n self._node2 = None\n self.is_disabled = True\n\n @property\n def name(self) -> Text:\n if self.is_disabled:\n raise ValueError(\n 'Edge has been disabled, accessing its name is no longer possible')\n return self._name\n\n @name.setter\n def name(self, name) -> None:\n if self.is_disabled:\n raise ValueError(\n 'Edge has been disabled, setting its name is no longer possible')\n self._name = name\n\n @property\n def axis1(self) -> int:\n if self.is_disabled:\n raise ValueError(\n 'Edge has been disabled, accessing axis1 is no longer possible')\n return self._axis1\n\n @axis1.setter\n def axis1(self, axis1: int) -> None:\n if self.is_disabled:\n raise ValueError(\n 'Edge has been disabled, setting node1 is no longer possible')\n self._axis1 = axis1\n\n @property\n def axis2(self) -> int:\n if self.is_disabled:\n raise ValueError(\n 'Edge has been disabled, accessing axis2 is no longer possible')\n return self._axis2\n\n @axis2.setter\n def axis2(self, axis2: int) -> None:\n if self.is_disabled:\n raise ValueError(\n 'Edge has been disabled, setting node1 is no longer possible')\n self._axis2 = axis2\n\n @property\n def signature(self) -> Optional[int]:\n if self.is_disabled:\n raise ValueError(\n 'Edge has been disabled, accessing signature is no longer possible')\n return self._signature\n\n @signature.setter\n def signature(self, signature: int) -> None:\n if self.is_disabled:\n raise ValueError(\n 'Edge has been disabled, setting node1 is no longer possible')\n self._signature = signature\n\n def set_signature(self, signature: int) -> None:\n if self.is_dangling():\n raise ValueError(\n \"Do not set a signature for dangling edge '{}'.\".format(self))\n self.signature = signature\n\n def get_nodes(self) -> List[Optional[BaseNode]]:\n \"\"\"Get the nodes of the edge.\"\"\"\n return [self.node1, self.node2]\n\n def update_axis(self, old_axis: int, old_node: BaseNode, new_axis: int,\n new_node: BaseNode) -> None:\n \"\"\"Update the node that Edge is connected to.\n\n Args:\n old_axis: The old axis that the edge pointed to.\n old_node: The old node that the edge pointed to.\n new_axis: The new axis that the edge should point to.\n new_node: The new node that replaces the old_node.\n\n Raises:\n AssertionError: Whether the edge actually contained `old_node`.\n \"\"\"\n if self.axis1 == old_axis and self.node1 is old_node:\n self.axis1 = new_axis\n self.node1 = new_node\n elif self.axis2 == old_axis and self.node2 is old_node:\n self.axis2 = new_axis\n self.node2 = new_node\n else:\n raise ValueError(\"Edge '{}' did not contain node '{}' on axis {}. \"\n \"node1: '{}', axis1: {}, node2: '{}', axis2: {}\".format(\n self, old_node, old_axis, self.node1, self.axis1,\n self.node2, self.axis2))\n\n @property\n def node1(self) -> BaseNode:\n if self.is_disabled:\n raise ValueError(\n 'Edge has been disabled, accessing node1 is no longer possible')\n if self._node1 is None:\n raise ValueError(\"node1 for edge '{}' no longer exists.\".format(self))\n return self._node1\n\n @property\n def node2(self) -> Optional[BaseNode]:\n if self.is_disabled:\n raise ValueError(\n 'Edge has been disabled, accessing node2 is no longer possible')\n if self._is_dangling:\n return None\n if self._node2 is None:\n raise ValueError(\"node2 for edge '{}' no longer exists.\".format(self))\n return self._node2\n\n @node1.setter\n def node1(self, node: BaseNode) -> None:\n if self.is_disabled:\n raise ValueError(\n 'Edge has been disabled, setting node1 is no longer possible')\n # pylint: disable=attribute-defined-outside-init\n self._node1 = node\n\n @node2.setter\n def node2(self, node: Optional[BaseNode]) -> None:\n if self.is_disabled:\n raise ValueError(\n 'Edge has been disabled, setting node2 is no longer possible')\n # pylint: disable=attribute-defined-outside-init\n self._node2 = node\n if node is None:\n self._is_dangling = True\n\n @property\n def dimension(self) -> Tuple[Optional[int], ...]:\n return self.node1.shape[self.axis1]\n\n def is_dangling(self) -> bool:\n \"\"\"Whether this edge is a dangling edge.\"\"\"\n return self._is_dangling\n\n def is_trace(self) -> bool:\n return self.node1 is self.node2\n\n def is_being_used(self) -> bool:\n \"\"\"Whether the nodes this edge points to also use this edge.\n\n During edge flattening, nodes can change their edges. Since\n deleting objects in python isn't possible, we use this to ensure that the\n edge is actually being used by the given nodes.\n\n Returns:\n Whether this edge is actually being used.\n \"\"\"\n result = self is self.node1[self.axis1]\n if self.node2 is not None:\n result = result and self is self.node2[self.axis2]\n return result\n\n def set_name(self, name: Text) -> None:\n self.name = name\n\n def _save_edge(self, edge_group: h5py.Group) -> None:\n \"\"\"Method to save an edge to hdf5.\n\n Args:\n edge_group: h5py group where data is saved\n \"\"\"\n edge_group.create_dataset('node1', data=self.node1.name)\n edge_group.create_dataset('axis1', data=self.axis1)\n if self.node2 is not None:\n edge_group.create_dataset('node2', data=self.node2.name)\n edge_group.create_dataset('axis2', data=self.axis2)\n edge_group.create_dataset('signature', data=self.signature)\n edge_group.create_dataset('name', data=self.name)\n\n @classmethod\n def _load_edge(cls, edge_data: h5py.Group, nodes_dict: Dict[Text, BaseNode]):\n \"\"\"load an edge based on hdf5 data.\n\n Args:\n edge_data: h5py group that contains the serialized edge data\n nodes: dictionary of node's name, node\n\n Returns:\n The added edge.\n \"\"\"\n node1 = nodes_dict[edge_data[\"node1\"][()]]\n axis1 = int(edge_data[\"axis1\"][()])\n if \"node2\" in list(edge_data.keys()):\n node2 = nodes_dict[edge_data[\"node2\"][()]]\n axis2 = int(edge_data[\"axis2\"][()])\n else:\n node2 = None\n axis2 = None\n signature = edge_data[\"signature\"][()]\n name = edge_data[\"name\"][()]\n edge = cls(node1=node1, axis1=axis1, node2=node2, axis2=axis2, name=name)\n node1.add_edge(edge, axis1)\n if node2 is not None:\n node2.add_edge(edge, axis2)\n if not edge.is_dangling():\n edge.set_signature(signature)\n return edge\n\n def __xor__(self, other: \"Edge\") -> \"Edge\":\n return connect(self, other, self.name)\n\n def __lt__(self, other) -> bool:\n if not isinstance(other, Edge):\n raise TypeError(\"Cannot compare 'Edge' with type {}\".format(type(Edge)))\n return self.signature < other.signature\n\n def __str__(self) -> Optional[Text]:\n if self.name:\n return self.name\n return '__unnamed_edge__'\n\n def __repr__(self) -> Text:\n if self.node1 is not None and self.node2 is not None:\n return (f'\\n{self.__class__.__name__}('\n f'{self.node1.name!r}[{self.axis1}] -> '\n f'{self.node2.name!r}[{self.axis2}] )\\n')\n return f'\\n{self.__class__.__name__}(Dangling Edge)[{self.axis1}] \\n'\n\n def disconnect(self,\n edge1_name: Optional[Text] = None,\n edge2_name: Optional[Text] = None) -> Tuple[\"Edge\", \"Edge\"]:\n \"\"\"\n Break an existing non-dangling edge.\n This updates both Edge.node1 and Edge.node2 by removing the \n connecting edge from `Edge.node1.edges` and `Edge.node2.edges`\n and adding new dangling edges instead\n Args:\n edge1_name: A name for the new dangling edge at `self.node1`\n edge2_name: A name for the new dangling edge at `self.node2`\n Returns:\n (new_edge1, new_edge2): The new `Edge` objects of \n `self.node1` and `self.node2`\n \"\"\"\n if self.is_dangling():\n raise ValueError(\"Cannot break dangling edge {}.\".format(self))\n if not edge1_name:\n edge1_name = '__disconnected_edge1_of_{}__'.format(self.name)\n if not edge2_name:\n edge2_name = '__disconnected_edge2_of_{}__'.format(self.name)\n\n node1 = self.node1\n node2 = self.node2\n\n new_edge1 = Edge(node1=node1, axis1=self.axis1, name=edge1_name)\n new_edge2 = Edge(node1=node2, axis1=self.axis2, name=edge2_name)\n node1.add_edge(new_edge1, self.axis1, override=True)\n node2.add_edge(new_edge2, self.axis2, override=True)\n return new_edge1, new_edge2\n\n def __or__(self, other: \"Edge\") -> Tuple[\"Edge\", \"Edge\"]:\n \"\"\"\n Break apart two edges if they are connected\n \"\"\"\n if self is not other:\n raise ValueError('Cannot break two unconnected edges')\n return self.disconnect()\n\n\ndef get_shared_edges(node1: BaseNode, node2: BaseNode) -> Set[Edge]:\n \"\"\"Get all edges shared between two nodes.\n\n Args:\n node1: The first node.\n node2: The second node.\n\n Returns:\n A (possibly empty) `set` of `Edge`s shared by the nodes.\n \"\"\"\n nodes = {node1, node2}\n shared_edges = set()\n # Assuming the network is well formed, all of the edges shared by\n # these two nodes will be stored in just one of the nodes, so we only\n # have to do this loop once.\n for edge in node1.edges:\n if set(edge.get_nodes()) == nodes:\n shared_edges.add(edge)\n return shared_edges\n\n\ndef get_parallel_edges(edge: Edge) -> Set[Edge]:\n \"\"\"\n Get all of the edges parallel to the given `edge`.\n Args:\n edge: The given edge.\n\n Returns:\n A `set` of all of the edges parallel to the given edge \n (including the given edge).\n \"\"\"\n return get_shared_edges(edge.node1, edge.node2)\n\n\ndef get_all_nondangling(nodes: Iterable[BaseNode]) -> Set[Edge]:\n \"\"\"Return the set of all non-dangling edges.\"\"\"\n edges = set()\n for node in nodes:\n edges |= node.get_all_nondangling()\n return edges\n\n\ndef get_all_dangling(nodes: Iterable[BaseNode]) -> Set[Edge]:\n \"\"\"Return the set of all dangling edges.\"\"\"\n edges = set()\n for node in nodes:\n edges |= node.get_all_dangling()\n return edges\n\n\ndef _flatten_trace_edges(edges: List[Edge],\n new_edge_name: Optional[Text] = None) -> Edge:\n \"\"\"Flatten trace edges into single edge.\n\n Args:\n edges: List of trace edges to flatten\n new_edge_name: Optional name of the new edge created.\n\n Returns:\n The new edge that represents the flattening of the given edges.\n \"\"\"\n node = edges[0].node1 # We are in the trace case, so this is the only node.\n backend = node.backend\n # Flatten all of the edge's axes into a a single list.\n perm_back = [min(e.axis1, e.axis2) for e in edges]\n perm_back += [max(e.axis1, e.axis2) for e in edges]\n perm_front = set(range(len(node.edges))) - set(perm_back)\n perm_front = sorted(perm_front)\n perm = perm_front + perm_back\n new_dim = backend.prod([backend.shape(node.tensor)[e.axis1] for e in edges])\n node.reorder_axes(perm)\n unaffected_shape = backend.shape(node.tensor)[:len(perm_front)]\n new_shape = backend.concat([unaffected_shape, [new_dim, new_dim]], axis=-1)\n node.tensor = backend.reshape(node.tensor, new_shape)\n edge1 = Edge(node1=node, axis1=len(perm_front), name=\"TraceFront\")\n edge2 = Edge(node1=node, axis1=len(perm_front) + 1, name=\"TraceBack\")\n node.edges = node.edges[:len(perm_front)] + [edge1, edge2]\n new_edge = connect(edge1, edge2, new_edge_name)\n # pylint: disable=expression-not-assigned\n [edge.disable() for edge in edges] #disable edges!\n return new_edge\n\n\ndef flatten_edges(edges: List[Edge],\n new_edge_name: Optional[Text] = None) -> Edge:\n \"\"\"Flatten edges into single edge.\n\n If two nodes have multiple edges connecting them, it may be\n beneficial to flatten these edges into a single edge to avoid having several\n unnecessary trace edges. This can speed up computation time and reduce\n memory cost.\n\n Warning: This will remove all axes names.\n\n Args:\n edges: A list of edges to flatten.\n new_edge_name: Optional name to give to the newly created edge.\n\n Returns:\n The new flattened edge.\n\n Raises:\n ValueError: If edges is an empty list.\n ValueError: If not all of the edges connect to the same node(s).\n ValueError: If one of the nodes connecting to these edges does not have\n edge definitions for all of its axes.\n \"\"\"\n if not edges:\n raise ValueError(\"At least 1 edge must be given.\")\n\n backends = [edge.node1.backend for edge in edges] + [\n edge.node2.backend for edge in edges if edge.node2 is not None\n ]\n\n if not all([b.name == backends[0].name for b in backends]):\n raise ValueError(\"Not all backends are the same.\")\n backend = backends[0]\n if len(edges) == 1:\n return edges[0] # Don't bother with reshaping.\n # Set equality is transitive (a=b, b=c, therefore a=c) so it is only\n # necessary to compare the first edge against the rest.\n expected_nodes = set(edges[0].get_nodes())\n for edge in edges:\n if expected_nodes != set(edge.get_nodes()):\n raise ValueError(\n \"Two edges do not share the same nodes. \"\n \"'{}'s nodes: '{}', '{}'. '{}'s nodes: '{}', '{}'\".format(\n edges[0], edges[0].node1, edges[0].node2, edge, edge.node1,\n edge.node2))\n if len(expected_nodes) == 1:\n return _flatten_trace_edges(edges, new_edge_name) #disables edges\n # Flatten standard or dangling edges.\n new_dangling_edges = []\n for node in expected_nodes:\n # Required for dangling case.\n if node is None:\n continue\n axis_names = node.axis_names\n perm_back = []\n for edge in edges:\n # There will only be 1 edge since we are in the standard edge case.\n perm_back.append(node.edges.index(edge))\n perm_front = sorted(set(range(len(node.edges))) - set(perm_back))\n node.reorder_axes(perm_front + perm_back)\n old_tensor_shape = backend.shape(node.tensor)\n # Calculate the new axis dimension as a product of the other\n # axes dimensions.\n flattened_axis_dim = backend.prod(old_tensor_shape[len(perm_front):])\n new_tensor_shape = backend.concat(\n [old_tensor_shape[:len(perm_front)], [flattened_axis_dim]], axis=-1)\n new_tensor = backend.reshape(node.tensor, new_tensor_shape)\n # Modify the node in place. Currently, this is they only method that\n # modifies a node's tensor.\n node.tensor = new_tensor\n # This Edge is required for the connect call later.\n edge = Edge(node1=node, axis1=len(perm_front), name=new_edge_name)\n # Do not set the signature of 'edge' since it is dangling.\n node.edges = node.edges[:len(perm_front)] + [edge]\n new_dangling_edges.append(edge)\n # TODO: Allow renaming of the new axis.\n if axis_names:\n node.axis_names = [axis_names[n] for n in range(len(node.edges))]\n else:\n node.axis_names = [str(n) for n in range(len(node.edges))]\n\n node1, node2 = tuple(expected_nodes)\n # Sets are returned in a random order, so this is how we deal with\n # dangling edges.\n # pylint: disable=expression-not-assigned\n [edge.disable() for edge in edges] #disable edges!\n if node1 is None or node2 is None:\n return new_dangling_edges[0]\n\n return connect(new_dangling_edges[0], new_dangling_edges[1], new_edge_name)\n\n\ndef flatten_edges_between(\n node1: BaseNode,\n node2: BaseNode,\n) -> Optional[Edge]:\n \"\"\"Flatten all of the edges between the given two nodes.\n\n Args:\n node1: The first node.\n node2: The second node.\n\n Returns:\n The flattened `Edge` object. If there was only one edge between the two\n nodes, then the original edge is returned. If there were no edges\n between the nodes, a None is returned.\n \"\"\"\n shared_edges = get_shared_edges(node1, node2)\n if shared_edges:\n return flatten_edges(list(shared_edges))\n return None\n\n\ndef flatten_all_edges(nodes: Iterable[BaseNode]) -> List[Edge]:\n \"\"\"Flatten all edges that belong to the nodes.\n\n Returns:\n A list of all the flattened edges. If there was only one edge between\n two given nodes, that original edge is included in this list.\n \"\"\"\n flattened_edges = []\n for edge in get_all_nondangling(nodes):\n if not edge.is_disabled:\n flat_edge = flatten_edges_between(edge.node1, edge.node2)\n flattened_edges.append(flat_edge)\n return flattened_edges\n\n\ndef _split_trace_edge(\n edge: Edge,\n shape: Tuple[int, ...],\n new_edge_names: Optional[List[Text]] = None,\n) -> List[Edge]:\n \"\"\"Split trace edges into single edge.\n\n Args:\n edge: Trace edge to split.\n shape: Tuple of integers used to split trace edge into multiple edges.\n new_edge_names: Optional names of the new edges created.\n\n Returns:\n A list of new edges where the product of the dimensions of the new\n edges corresponds to the dimension of the edge before splitting.\n \"\"\"\n node = edge.node1 # We are in the trace case, so this is the only node.\n backend = node.backend\n # Permute until edge axes to be split are at the back and reshape.\n perm_back = [min(edge.axis1, edge.axis2)]\n perm_back += [max(edge.axis1, edge.axis2)]\n perm_front = set(range(len(node.edges))) - set(perm_back)\n perm_front = sorted(perm_front)\n node.reorder_axes(perm_front + perm_back)\n unaffected_shape = backend.shape(node.tensor)[:len(perm_front)]\n new_shape = backend.concat([unaffected_shape, shape, shape], axis=-1)\n node.tensor = backend.reshape(node.tensor, new_shape)\n # Trim edges and add placeholder edges for new axes.\n node.edges = node.edges[:len(perm_front)] + 2 * len(shape) * [None]\n # Create new dangling edges and connect them to each other.\n new_edges = []\n for idx in range(len(shape)):\n edge1 = Edge(node1=node, axis1=len(perm_front) + idx)\n edge2 = Edge(node1=node, axis1=len(perm_front) + len(shape) + idx)\n node.edges[len(perm_front) + idx] = edge1\n node.edges[len(perm_front) + len(shape) + idx] = edge2\n new_edges.append(\n connect(edge1, edge2,\n new_edge_names[idx] if new_edge_names is not None else None))\n # pylint: disable=expression-not-assigned\n edge.disable() # disable old edge!\n return new_edges\n\n\ndef split_edge(edge: Edge,\n shape: Tuple[int, ...],\n new_edge_names: Optional[List[Text]] = None) -> List[Edge]:\n \"\"\"Split an `Edge` into multiple edges according to `shape`. Reshapes\n the underlying tensors connected to the edge accordingly. \n \n This method acts as the inverse operation of flattening edges and\n distinguishes between the following edge cases when adding new edges:\n 1) standard edge connecting two different nodes: reshape node dimensions\n 2) dangling edge (node2 is None): reshape node1 dimension\n 3) trace edge (node1 is node2): reshape node1 dimension\n\n Args:\n edge: Edge to split.\n shape: Tuple of integers used to split edge into multiple edges.\n\n Returns:\n A list of new edges where the product of the dimensions of the new\n edges corresponds to the dimension of the edge before splitting.\n\n Raises:\n ValueError: If the edge dimension mismatches with the split shape.\n ValueError: If the edge is connecting nodes with different backends.\n \"\"\"\n\n # Check if reshape operation is possible.\n if not np.prod(shape) == edge.dimension:\n raise ValueError(\"Edge {} with dimension {} cannot be split according to \"\n \"shape {}.\".format(edge, edge.dimension, shape))\n # Check if possible reshape operation is trivial.\n if len(shape) == 1:\n return [edge]\n\n # Handle trace edge case separately.\n if edge.is_trace():\n return _split_trace_edge(edge, shape, new_edge_names)\n\n backends = [node.backend for node in edge.get_nodes() if node is not None]\n if not all([b.name == backends[0].name for b in backends]):\n raise ValueError(\"Not all backends are the same.\")\n backend = backends[0]\n\n # Split standard or dangling edge.\n new_dangling_edges = []\n expected_nodes = set(edge.get_nodes())\n for node in expected_nodes:\n # Required for dangling case.\n if node is None:\n continue\n axis_names = node.axis_names\n # Permute until edge axes to be split are at the back and reshape.\n perm_back = [node.edges.index(edge)]\n perm_front = set(range(len(node.edges))) - set(perm_back)\n perm_front = sorted(perm_front)\n node.reorder_axes(perm_front + perm_back)\n unaffected_shape = backend.shape(node.tensor)[:len(perm_front)]\n new_shape = backend.concat([unaffected_shape, shape], axis=-1)\n node.tensor = backend.reshape(node.tensor, new_shape) # in-place update\n # Trim edges.\n node.edges = node.edges[:len(perm_front)]\n # Create new dangling edges.\n for idx in range(len(shape)):\n new_dangling_edge = Edge(\n node1=node,\n axis1=len(perm_front) + idx,\n name=new_edge_names[idx] if new_edge_names is not None else None)\n node.edges += [new_dangling_edge]\n new_dangling_edges.append(new_dangling_edge)\n # TODO: Allow renaming of new axes (possibly distinct from new_edge_names).\n if axis_names:\n new_axis_names = [axis_names[n] for n in range(len(unaffected_shape))]\n if new_edge_names:\n new_axis_names.extend(new_edge_names)\n else:\n new_axis_names.extend(\n [str(n) for n in range(len(unaffected_shape), len(node.edges))])\n node.axis_names = new_axis_names\n else:\n node.axis_names = [str(n) for n in range(len(node.edges))]\n\n node1, node2 = tuple(expected_nodes)\n # pylint: disable=expression-not-assigned\n edge.disable() # disable old edge\n\n # Return new dangling edges for dangling case.\n if node1 is None or node2 is None:\n return new_dangling_edges\n\n # Create connected edges between nodes for standard case.\n new_edges = []\n for idx in range(len(shape)):\n new_edges.append(\n connect(new_dangling_edges[idx], new_dangling_edges[len(shape) + idx],\n new_edge_names[idx] if new_edge_names is not None else None))\n return new_edges\n\n\ndef _remove_trace_edge(edge: Edge, new_node: BaseNode) -> None:\n \"\"\"Collapse a trace edge. `edge` is disabled before returning.\n\n Take a trace edge (i.e. with edge.node1 = edge.node2),\n remove it, update the axis numbers of all remaining edges\n and move them to `new_node`.\n\n Args:\n edge: The edge to contract.\n new_node: The new node created after contraction.\n\n Returns:\n None\n\n Raises:\n ValueError: If edge is not a trace edge.\n \"\"\"\n if edge.is_dangling():\n raise ValueError(\"Attempted to remove dangling edge '{}'.\".format(edge))\n if edge.node1 is not edge.node2:\n raise ValueError(\"Edge '{}' is not a trace edge.\".format(edge))\n axes = sorted([edge.axis1, edge.axis2])\n node_edges = edge.node1.edges[:]\n node_edges.pop(axes[0])\n node_edges.pop(axes[1] - 1)\n seen_edges = set()\n for tmp_edge in node_edges:\n if tmp_edge in seen_edges:\n continue\n seen_edges.add(tmp_edge)\n if tmp_edge.node1 is edge.node1:\n to_reduce = 0\n to_reduce += 1 if tmp_edge.axis1 > axes[0] else 0\n to_reduce += 1 if tmp_edge.axis1 > axes[1] else 0\n tmp_edge.axis1 -= to_reduce\n tmp_edge.node1 = new_node\n if tmp_edge.node2 is edge.node1:\n to_reduce = 0\n to_reduce += 1 if tmp_edge.axis2 > axes[0] else 0\n to_reduce += 1 if tmp_edge.axis2 > axes[1] else 0\n tmp_edge.axis2 -= to_reduce\n tmp_edge.node2 = new_node\n # Update edges for the new node.\n for i, e in enumerate(node_edges):\n new_node.add_edge(e, i)\n edge.node1.fresh_edges(edge.node1.axis_names)\n edge.disable() #disabled edge!\n\n\ndef _remove_edges(edges: Set[Edge], node1: BaseNode, node2: BaseNode,\n new_node: BaseNode) -> None:\n \"\"\"\n\n Takes a set of `edges` shared between `node1` and `node2` to be contracted\n over, and moves all other uncontracted edges from `node1` and `node2` to\n `new_node`.\n The nodes that currently share the edges in `edges` must be supplied as\n `node1` and `node2`. The ordering of `node1` and `node2` must match the\n axis ordering of `new_node` (as determined by the contraction procedure).\n `node1` and `node2` get both a fresh set edges.\n `edges` are disabled before returning.\n Args:\n edges: The edges to contract.\n node1: The old node that supplies the first edges of `new_node`.\n node2: The old node that supplies the last edges of `new_node`.\n new_node: The new node that represents the contraction of the two old\n nodes.\n Returns:\n node1, node2L\n Raises:\n Value Error: If edge isn't in the network.\n \"\"\"\n if node1 is node2:\n raise ValueError(\n \"node1 and node2 are the same ('{}' == '{}'), but trace edges cannot \"\n \"be removed by _remove_edges.\".format(node1, node2))\n\n node1_edges = node1.edges[:]\n node2_edges = node2.edges[:]\n\n nodes_set = set([node1, node2])\n for edge in edges:\n if edge.is_dangling():\n raise ValueError(\"Attempted to remove dangling edge '{}'.\".format(edge))\n if set([edge.node1, edge.node2]) != nodes_set:\n raise ValueError(\n \"Attempted to remove edges belonging to different node pairs: \"\n \"'{}' != '{}'.\".format(nodes_set, set([edge.node1, edge.node2])))\n\n node1_axis_names = node1.axis_names\n node2_axis_names = node2.axis_names\n\n remaining_edges = []\n for (i, edge) in enumerate(node1_edges):\n if edge not in edges: # NOTE: Makes the cost quadratic in # edges\n edge.update_axis(\n old_node=node1,\n old_axis=i,\n new_axis=len(remaining_edges),\n new_node=new_node)\n remaining_edges.append(edge)\n\n for (i, edge) in enumerate(node2_edges):\n if edge not in edges:\n edge.update_axis(\n old_node=node2,\n old_axis=i,\n new_axis=len(remaining_edges),\n new_node=new_node)\n remaining_edges.append(edge)\n\n for (i, edge) in enumerate(remaining_edges):\n new_node.add_edge(edge, i)\n\n node1.fresh_edges(node1_axis_names)\n node2.fresh_edges(node2_axis_names)\n # pylint: disable=expression-not-assigned\n [edge.disable() for edge in edges] #disabled edges!\n\n\ndef _contract_trace(edge: Edge, name: Optional[Text] = None) -> BaseNode:\n \"\"\"Contract a trace edge.\n `edge` is disabled before returning.\n Args:\n edge: The edge name or object to contract next.\n name: Name to give to the new node. If None, a name will automatically be\n generated.\n\n Returns:\n The new node created after the contraction.\n\n Raise:\n ValueError: When edge is a dangling edge.\n \"\"\"\n if edge.is_dangling():\n raise ValueError(\"Attempted to contract dangling edge '{}'\".format(edge))\n if edge.node1 is not edge.node2:\n raise ValueError(\"Can not take trace of edge '{}'. This edge connects to \"\n \"two different nodes: '{}' and '{}\".format(\n edge, edge.node1, edge.node2))\n backend = edge.node1.backend\n axes = sorted([edge.axis1, edge.axis2])\n dims = len(edge.node1.tensor.shape)\n permutation = sorted(set(range(dims)) - set(axes)) + axes\n new_tensor = backend.trace(\n backend.transpose(edge.node1.tensor, perm=permutation))\n name = name if name else edge.node1.name\n new_node = Node(new_tensor, name=name, backend=backend)\n _remove_trace_edge(edge, new_node) #disables edge\n return new_node\n\n\ndef contract(edge: Edge,\n name: Optional[Text] = None,\n axis_names: Optional[List[Text]] = None) -> BaseNode:\n \"\"\"Contract an edge connecting two nodes.\n\n All edges of `node1` and `node2` are passed on to the new node,\n and `node1` and `node2` get a new set of dangling edges.\n `edge` is disabled before returning.\n\n Args:\n edge: The edge to contract.\n name: Name of the new node created.\n\n Returns:\n The new node created after the contraction.\n\n Raises:\n ValueError: When edge is a dangling edge or if it already has been\n contracted.\n \"\"\"\n if edge.is_dangling():\n raise ValueError(\"Attempting to contract dangling edge\")\n\n for node in [edge.node1, edge.node2]:\n if (node is not None) and (not hasattr(node, 'backend')):\n raise TypeError('Node {} of type {} has no `backend`'.format(\n node, type(node)))\n\n if edge.node1.backend.name != edge.node2.backend.name:\n raise ValueError(\"edge.node1 {} and edge.node2 {} have different backends \"\n \"{} and {}\".format(edge.node1.name, edge.node2.name,\n edge.node1.backend.name,\n edge.node2.backend.name))\n\n if edge.node1:\n backend = edge.node1.backend\n else:\n raise ValueError(\"edge {} has no nodes. \"\n \"Cannot perform a contraction\".format(edge.name))\n\n backend = edge.node1.backend\n if edge.node1 is edge.node2:\n return _contract_trace(edge, name)\n new_tensor = backend.tensordot(edge.node1.tensor, edge.node2.tensor,\n [[edge.axis1], [edge.axis2]])\n new_node = Node(\n tensor=new_tensor, name=name, axis_names=axis_names, backend=backend.name)\n # edge.node1 and edge.node2 get new edges in _remove_edges\n _remove_edges(set([edge]), edge.node1, edge.node2, new_node)\n return new_node\n\n\ndef contract_copy_node(copy_node: CopyNode,\n name: Optional[Text] = None) -> BaseNode:\n \"\"\"Contract all edges incident on given copy node.\n\n Args:\n copy_node: Copy tensor node to be contracted.\n name: Name of the new node created.\n\n Returns:\n New node representing contracted tensor.\n\n Raises:\n ValueError: If copy_node has dangling edge(s).\n \"\"\"\n new_tensor = copy_node.compute_contracted_tensor()\n new_node = Node(new_tensor, name, backend=copy_node.backend.name)\n\n partners = copy_node.get_partners()\n new_axis = 0\n for partner in partners:\n for edge in partner.edges:\n if edge.node1 is copy_node or edge.node2 is copy_node:\n continue\n old_axis = edge.axis1 if edge.node1 is partner else edge.axis2\n edge.update_axis(\n old_node=partner,\n old_axis=old_axis,\n new_node=new_node,\n new_axis=new_axis)\n new_node.add_edge(edge, new_axis)\n new_axis += 1\n assert len(new_tensor.shape) == new_axis\n copy_node.fresh_edges(copy_node.axis_names)\n return new_node\n\n\ndef contract_parallel(edge: Edge) -> BaseNode:\n \"\"\"Contract all edges parallel to this edge.\n\n This method calls `contract_between` with the nodes connected by the edge.\n\n Args:\n edge: The edge to contract.\n\n Returns:\n The new node created after contraction.\n \"\"\"\n if edge.is_dangling():\n raise ValueError(\"Attempted to contract dangling edge: '{}'\".format(edge))\n return contract_between(edge.node1, edge.node2)\n\n\ndef connect(edge1: Edge, edge2: Edge, name: Optional[Text] = None) -> Edge:\n for edge in [edge1, edge2]:\n if not edge.is_dangling():\n raise ValueError(\"Edge '{}' is not a dangling edge. \"\n \"This edge points to nodes: '{}' and '{}'\".format(\n edge, edge.node1, edge.node2))\n if edge1 is edge2:\n raise ValueError(\"Cannot connect and edge '{}' to itself.\".format(edge1))\n\n if edge1.dimension != edge2.dimension:\n raise ValueError(\"Cannot connect edges of unequal dimension. \"\n \"Dimension of edge '{}': {}, \"\n \"Dimension of edge '{}': {}.\".format(\n edge1, edge1.dimension, edge2, edge2.dimension))\n\n #edge1 and edge2 are always dangling in this case\n node1 = edge1.node1\n node2 = edge2.node1\n axis1_num = node1.get_axis_number(edge1.axis1)\n axis2_num = node2.get_axis_number(edge2.axis1)\n\n new_edge = Edge(\n node1=node1, axis1=axis1_num, name=name, node2=node2, axis2=axis2_num)\n\n node1.add_edge(new_edge, axis1_num, override=True)\n node2.add_edge(new_edge, axis2_num, override=True)\n return new_edge\n\n\ndef disconnect(edge,\n edge1_name: Optional[Text] = None,\n edge2_name: Optional[Text] = None) -> Tuple[Edge, Edge]:\n \"\"\"\n Break an existing non-dangling edge.\n This updates both Edge.node1 and Edge.node2 by removing the \n connecting edge from `Edge.node1.edges` and `Edge.node2.edges`\n and adding new dangling edges instead\n \"\"\"\n return edge.disconnect(edge1_name, edge2_name)\n\n\ndef contract_between(\n node1: BaseNode,\n node2: BaseNode,\n name: Optional[Text] = None,\n allow_outer_product: bool = False,\n output_edge_order: Optional[Sequence[Edge]] = None,\n axis_names: Optional[List[Text]] = None,\n) -> BaseNode:\n \"\"\"Contract all of the edges between the two given nodes.\n\n Args:\n node1: The first node.\n node2: The second node.\n name: Name to give to the new node created.\n allow_outer_product: Optional boolean. If two nodes do not share any edges\n and `allow_outer_product` is set to `True`, then we return the outer\n product of the two nodes. Else, we raise a `ValueError`.\n output_edge_order: Optional sequence of Edges. When not `None`, must\n contain all edges belonging to, but not shared by `node1` and `node2`.\n The axes of the new node will be permuted (if necessary) to match this\n ordering of Edges.\n axis_names: An optional list of names for the axis of the new node\n Returns:\n The new node created.\n\n Raises:\n ValueError: If no edges are found between node1 and node2 and\n `allow_outer_product` is set to `False`.\n \"\"\"\n for node in [node1, node2]:\n if not hasattr(node, 'backend'):\n raise TypeError('Node {} of type {} has no `backend`'.format(\n node, type(node)))\n\n if node1.backend.name != node2.backend.name:\n raise ValueError(\"node {} and node {} have different backends \"\n \"{} and {}.\".format(node1.name, node2.name,\n node1.backend.name,\n node2.backend.name))\n\n backend = node1.backend\n # Trace edges cannot be contracted using tensordot.\n if node1 is node2:\n flat_edge = flatten_edges_between(node1, node2)\n if not flat_edge:\n raise ValueError(\"No trace edges found on contraction of edges between \"\n \"node '{}' and itself.\".format(node1))\n return contract(flat_edge, name)\n\n shared_edges = get_shared_edges(node1, node2)\n if not shared_edges:\n if allow_outer_product:\n return outer_product(node1, node2, name=name, axis_names=axis_names)\n raise ValueError(\"No edges found between nodes '{}' and '{}' \"\n \"and allow_outer_product=False.\".format(node1, node2))\n\n # Collect the axis of each node corresponding to each edge, in order.\n # This specifies the contraction for tensordot.\n # NOTE: The ordering of node references in each contraction edge is ignored.\n axes1 = []\n axes2 = []\n for edge in shared_edges:\n if edge.node1 is node1:\n axes1.append(edge.axis1)\n axes2.append(edge.axis2)\n else:\n axes1.append(edge.axis2)\n axes2.append(edge.axis1)\n\n if output_edge_order:\n # Determine heuristically if output transposition can be minimized by\n # flipping the arguments to tensordot.\n node1_output_axes = []\n node2_output_axes = []\n for (i, edge) in enumerate(output_edge_order):\n if edge in shared_edges:\n raise ValueError(\n \"Edge '{}' in output_edge_order is shared by the nodes to be \"\n \"contracted: '{}' and '{}'.\".format(edge, node1, node2))\n edge_nodes = set(edge.get_nodes())\n if node1 in edge_nodes:\n node1_output_axes.append(i)\n elif node2 in edge_nodes:\n node2_output_axes.append(i)\n else:\n raise ValueError(\n \"Edge '{}' in output_edge_order is not connected to node '{}' or \"\n \"node '{}'\".format(edge, node1, node2))\n if np.mean(node1_output_axes) > np.mean(node2_output_axes):\n node1, node2 = node2, node1\n axes1, axes2 = axes2, axes1\n\n new_tensor = backend.tensordot(node1.tensor, node2.tensor, [axes1, axes2])\n new_node = Node(\n tensor=new_tensor, name=name, axis_names=axis_names, backend=backend)\n # node1 and node2 get new edges in _remove_edges\n _remove_edges(shared_edges, node1, node2, new_node)\n if output_edge_order:\n new_node = new_node.reorder_edges(list(output_edge_order))\n return new_node\n\n\ndef outer_product_final_nodes(nodes: Iterable[BaseNode],\n edge_order: List[Edge]) -> BaseNode:\n \"\"\"Get the outer product of `nodes`\n\n For example, if there are 3 nodes remaining in `nodes` with \n shapes :math:`(2, 3)`, :math:`(4, 5, 6)`, and :math:`(7)`\n respectively, the newly returned node will have shape \n :math:`(2, 3, 4, 5, 6, 7)`.\n\n Args:\n nodes: A collection of nodes.\n edge_order: Edge order for the final node.\n\n Returns:\n The outer product of the remaining nodes.\n\n Raises:\n ValueError: If any of the remaining nodes are not fully contracted.\n \"\"\"\n nodes = list(nodes)\n for node in nodes:\n if node.has_nondangling_edge():\n raise ValueError(\"Node '{}' has a non-dangling edge remaining.\")\n final_node = nodes[0]\n for node in nodes[1:]:\n final_node = outer_product(final_node, node)\n return final_node.reorder_edges(edge_order)\n\n\ndef outer_product(node1: BaseNode,\n node2: BaseNode,\n name: Optional[Text] = None,\n axis_names: Optional[List[Text]] = None) -> BaseNode:\n \"\"\"Calculates an outer product of the two nodes.\n\n This causes the nodes to combine their edges and axes, so the shapes are\n combined. For example, if `a` had a shape (2, 3) and `b` had a shape\n (4, 5, 6), then the node `net.outer_product(a, b) will have shape\n (2, 3, 4, 5, 6). All edges of `node1` and `node2` are passed on to\n the new node, and `node1` and `node2` get a new set of dangling edges.\n\n Args:\n node1: The first node. The axes on this node will be on the left side of\n the new node.\n node2: The second node. The axes on this node will be on the right side of\n the new node.\n name: Optional name to give the new node created.\n axis_names: An optional list of names for the axis of the new node\n Returns:\n A new node. Its shape will be node1.shape + node2.shape\n Raises:\n TypeError: If `node1` and `node2` have wrong types.\n \"\"\"\n for node in [node1, node2]:\n if not hasattr(node, 'backend'):\n raise TypeError('Node {} of type {} has no `backend`'.format(\n node, type(node)))\n\n if node1.backend.name != node2.backend.name:\n raise ValueError(\"node {} and node {} have different backends. \"\n \"Cannot perform outer product\".format(node1, node2))\n\n backend = node1.backend\n if node1.get_rank() == 0 or node2.get_rank() == 0:\n new_tensor = backend.multiply(node1.tensor, node2.tensor)\n else:\n new_tensor = backend.outer_product(node1.tensor, node2.tensor)\n node1_axis_names = node1.axis_names\n node2_axis_names = node2.axis_names\n new_node = Node(\n tensor=new_tensor, name=name, axis_names=axis_names, backend=backend)\n additional_axes = len(node1.tensor.shape)\n\n for i, edge in enumerate(node1.edges):\n edge.update_axis(i, node1, i, new_node)\n for i, edge in enumerate(node2.edges):\n edge.update_axis(i, node2, i + additional_axes, new_node)\n\n for i, edge in enumerate(node1.edges + node2.edges):\n new_node.add_edge(edge, i, True)\n\n node1.fresh_edges(node1_axis_names)\n node2.fresh_edges(node2_axis_names)\n\n return new_node\n\n\nclass NodeCollection:\n \"\"\"Context manager for easy collection of a set or list of nodes.\n\n The following examples are equivalent:\n ```python\n # 1. Using a NodeCollection context:\n nodes_set = set()\n with NodeCollection(nodes_set):\n a = tn.Node(...)\n b = tn.Node(...)\n # 2. Explicitly adding each node to the set:\n nodes_set = set()\n a = tn.Node(...)\n nodes_set.add(a)\n b = tn.Node(...)\n nodes_set.add(b)\n ```\n \"\"\"\n\n def __init__(self, container: Union[Set[BaseNode], List[BaseNode]]):\n \"\"\"Initialize the NodeCollection context manager\n\n Args:\n container: The container to hold the created nodes, can be a list or a\n set.\n\n Raises:\n ValueError: If container is not a list or set.\n \"\"\"\n\n if not isinstance(container, (list, set)):\n raise ValueError(\"Item passed to NodeCollection must be list or set\")\n self._container = container\n\n def add(self, node: BaseNode):\n if isinstance(self._container, set):\n self._container.add(node)\n else:\n self._container.append(node)\n\n def __enter__(self):\n ops._default_collection_stack.stack.append(self)\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n ops._default_collection_stack.stack.pop()\n" ]
[ [ "numpy.arange", "numpy.dtype", "numpy.mean", "numpy.prod", "numpy.array", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
nguyenvanhoang7398/pytorch-transformers
[ "9f995b99d4c4067662c3bd4f1274315c0839deeb" ]
[ "examples/contrib/run_openai_gpt.py" ]
[ "# coding=utf-8\n# Copyright 2018 Google AI, Google Brain and Carnegie Mellon University Authors and the HuggingFace Inc. team.\n# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\" OpenAI GPT model fine-tuning script.\n Adapted from https://github.com/huggingface/pytorch-openai-transformer-lm/blob/master/train.py\n It self adapted from https://github.com/openai/finetune-transformer-lm/blob/master/train.py\n\n This script with default values fine-tunes and evaluate a pretrained OpenAI GPT on the RocStories dataset:\n python run_openai_gpt.py \\\n --model_name openai-gpt \\\n --do_train \\\n --do_eval \\\n --train_dataset $ROC_STORIES_DIR/cloze_test_val__spring2016\\ -\\ cloze_test_ALL_val.csv \\\n --eval_dataset $ROC_STORIES_DIR/cloze_test_test__spring2016\\ -\\ cloze_test_ALL_test.csv \\\n --output_dir ../log \\\n --train_batch_size 16 \\\n\"\"\"\nimport argparse\nimport os\nimport csv\nimport random\nimport logging\nfrom tqdm import tqdm, trange\n\nimport numpy as np\nimport torch\nfrom torch.utils.data import (DataLoader, RandomSampler, SequentialSampler,\n TensorDataset)\n\nfrom pytorch_transformers import (OpenAIGPTDoubleHeadsModel, OpenAIGPTTokenizer,\n AdamW, cached_path, WEIGHTS_NAME, CONFIG_NAME,\n WarmupLinearSchedule)\n\nROCSTORIES_URL = \"https://s3.amazonaws.com/datasets.huggingface.co/ROCStories.tar.gz\"\n\nlogging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s',\n datefmt = '%m/%d/%Y %H:%M:%S',\n level = logging.INFO)\nlogger = logging.getLogger(__name__)\n\ndef accuracy(out, labels):\n outputs = np.argmax(out, axis=1)\n return np.sum(outputs == labels)\n\ndef load_rocstories_dataset(dataset_path):\n \"\"\" Output a list of tuples(story, 1st continuation, 2nd continuation, label) \"\"\"\n with open(dataset_path, encoding='utf_8') as f:\n f = csv.reader(f)\n output = []\n next(f) # skip the first line\n for line in tqdm(f):\n output.append((' '.join(line[1:5]), line[5], line[6], int(line[-1])-1))\n return output\n\ndef pre_process_datasets(encoded_datasets, input_len, cap_length, start_token, delimiter_token, clf_token):\n \"\"\" Pre-process datasets containing lists of tuples(story, 1st continuation, 2nd continuation, label)\n\n To Transformer inputs of shape (n_batch, n_alternative, length) comprising for each batch, continuation:\n input_ids[batch, alternative, :] = [start_token] + story[:cap_length] + [delimiter_token] + cont1[:cap_length] + [clf_token]\n \"\"\"\n tensor_datasets = []\n for dataset in encoded_datasets:\n n_batch = len(dataset)\n input_ids = np.zeros((n_batch, 2, input_len), dtype=np.int64)\n mc_token_ids = np.zeros((n_batch, 2), dtype=np.int64)\n lm_labels = np.full((n_batch, 2, input_len), fill_value=-1, dtype=np.int64)\n mc_labels = np.zeros((n_batch,), dtype=np.int64)\n for i, (story, cont1, cont2, mc_label), in enumerate(dataset):\n with_cont1 = [start_token] + story[:cap_length] + [delimiter_token] + cont1[:cap_length] + [clf_token]\n with_cont2 = [start_token] + story[:cap_length] + [delimiter_token] + cont2[:cap_length] + [clf_token]\n input_ids[i, 0, :len(with_cont1)] = with_cont1\n input_ids[i, 1, :len(with_cont2)] = with_cont2\n mc_token_ids[i, 0] = len(with_cont1) - 1\n mc_token_ids[i, 1] = len(with_cont2) - 1\n lm_labels[i, 0, :len(with_cont1)] = with_cont1\n lm_labels[i, 1, :len(with_cont2)] = with_cont2\n mc_labels[i] = mc_label\n all_inputs = (input_ids, mc_token_ids, lm_labels, mc_labels)\n tensor_datasets.append(tuple(torch.tensor(t) for t in all_inputs))\n return tensor_datasets\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument('--model_name', type=str, default='openai-gpt',\n help='pretrained model name')\n parser.add_argument(\"--do_train\", action='store_true', help=\"Whether to run training.\")\n parser.add_argument(\"--do_eval\", action='store_true', help=\"Whether to run eval on the dev set.\")\n parser.add_argument(\"--output_dir\", default=None, type=str, required=True,\n help=\"The output directory where the model predictions and checkpoints will be written.\")\n parser.add_argument('--train_dataset', type=str, default='')\n parser.add_argument('--eval_dataset', type=str, default='')\n parser.add_argument('--seed', type=int, default=42)\n parser.add_argument('--num_train_epochs', type=int, default=3)\n parser.add_argument('--train_batch_size', type=int, default=8)\n parser.add_argument('--eval_batch_size', type=int, default=16)\n parser.add_argument(\"--adam_epsilon\", default=1e-8, type=float,\n help=\"Epsilon for Adam optimizer.\")\n parser.add_argument('--max_grad_norm', type=int, default=1)\n parser.add_argument(\"--max_steps\", default=-1, type=int,\n help=\"If > 0: set total number of training \\\n steps to perform. Override num_train_epochs.\")\n parser.add_argument('--gradient_accumulation_steps', type=int, default=1,\n help=\"Number of updates steps to accumulate before\\\n performing a backward/update pass.\")\n parser.add_argument('--learning_rate', type=float, default=6.25e-5)\n parser.add_argument(\"--warmup_steps\", default=0, type=int,\n help=\"Linear warmup over warmup_steps.\")\n parser.add_argument('--lr_schedule', type=str, default='warmup_linear')\n parser.add_argument('--weight_decay', type=float, default=0.01)\n parser.add_argument('--lm_coef', type=float, default=0.9)\n parser.add_argument('--n_valid', type=int, default=374)\n\n parser.add_argument('--server_ip', type=str, default='', help=\"Can be used for distant debugging.\")\n parser.add_argument('--server_port', type=str, default='', help=\"Can be used for distant debugging.\")\n args = parser.parse_args()\n print(args)\n\n if args.server_ip and args.server_port:\n # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script\n import ptvsd\n print(\"Waiting for debugger attach\")\n ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True)\n ptvsd.wait_for_attach()\n\n random.seed(args.seed)\n np.random.seed(args.seed)\n torch.manual_seed(args.seed)\n torch.cuda.manual_seed_all(args.seed)\n\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n n_gpu = torch.cuda.device_count()\n logger.info(\"device: {}, n_gpu {}\".format(device, n_gpu))\n\n if not args.do_train and not args.do_eval:\n raise ValueError(\"At least one of `do_train` or `do_eval` must be True.\")\n\n if not os.path.exists(args.output_dir):\n os.makedirs(args.output_dir)\n\n # Load tokenizer and model\n # This loading functions also add new tokens and embeddings called `special tokens`\n # These new embeddings will be fine-tuned on the RocStories dataset\n special_tokens = ['_start_', '_delimiter_', '_classify_']\n tokenizer = OpenAIGPTTokenizer.from_pretrained(args.model_name)\n tokenizer.add_tokens(special_tokens)\n special_tokens_ids = tokenizer.convert_tokens_to_ids(special_tokens)\n model = OpenAIGPTDoubleHeadsModel.from_pretrained(args.model_name)\n model.resize_token_embeddings(len(tokenizer))\n model.to(device)\n\n # Load and encode the datasets\n if not args.train_dataset and not args.eval_dataset:\n roc_stories = cached_path(ROCSTORIES_URL)\n def tokenize_and_encode(obj):\n \"\"\" Tokenize and encode a nested object \"\"\"\n if isinstance(obj, str):\n return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(obj))\n elif isinstance(obj, int):\n return obj\n return list(tokenize_and_encode(o) for o in obj)\n logger.info(\"Encoding dataset...\")\n train_dataset = load_rocstories_dataset(args.train_dataset)\n eval_dataset = load_rocstories_dataset(args.eval_dataset)\n datasets = (train_dataset, eval_dataset)\n encoded_datasets = tokenize_and_encode(datasets)\n\n # Compute the max input length for the Transformer\n max_length = model.config.n_positions // 2 - 2\n input_length = max(len(story[:max_length]) + max(len(cont1[:max_length]), len(cont2[:max_length])) + 3 \\\n for dataset in encoded_datasets for story, cont1, cont2, _ in dataset)\n input_length = min(input_length, model.config.n_positions) # Max size of input for the pre-trained model\n\n # Prepare inputs tensors and dataloaders\n tensor_datasets = pre_process_datasets(encoded_datasets, input_length, max_length, *special_tokens_ids)\n train_tensor_dataset, eval_tensor_dataset = tensor_datasets[0], tensor_datasets[1]\n\n train_data = TensorDataset(*train_tensor_dataset)\n train_sampler = RandomSampler(train_data)\n train_dataloader = DataLoader(train_data, sampler=train_sampler, batch_size=args.train_batch_size)\n\n eval_data = TensorDataset(*eval_tensor_dataset)\n eval_sampler = SequentialSampler(eval_data)\n eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=args.eval_batch_size)\n\n # Prepare optimizer\n if args.do_train:\n if args.max_steps > 0:\n t_total = args.max_steps\n args.num_train_epochs = args.max_steps //\\\n (len(train_dataloader) // args.gradient_accumulation_steps) + 1\n else:\n t_total = len(train_dataloader)\\\n // args.gradient_accumulation_steps * args.num_train_epochs\n\n param_optimizer = list(model.named_parameters())\n no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']\n optimizer_grouped_parameters = [\n {'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': args.weight_decay},\n {'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}\n ]\n optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)\n scheduler = WarmupLinearSchedule(optimizer, warmup_steps=args.warmup_steps, t_total=t_total)\n\n if args.do_train:\n nb_tr_steps, tr_loss, exp_average_loss = 0, 0, None\n model.train()\n for _ in trange(int(args.num_train_epochs), desc=\"Epoch\"):\n tr_loss = 0\n nb_tr_steps = 0\n tqdm_bar = tqdm(train_dataloader, desc=\"Training\")\n for step, batch in enumerate(tqdm_bar):\n batch = tuple(t.to(device) for t in batch)\n input_ids, mc_token_ids, lm_labels, mc_labels = batch\n losses = model(input_ids, mc_token_ids=mc_token_ids, lm_labels=lm_labels, mc_labels=mc_labels)\n loss = args.lm_coef * losses[0] + losses[1]\n loss.backward()\n scheduler.step()\n optimizer.step()\n optimizer.zero_grad()\n tr_loss += loss.item()\n exp_average_loss = loss.item() if exp_average_loss is None else 0.7*exp_average_loss+0.3*loss.item()\n nb_tr_steps += 1\n tqdm_bar.desc = \"Training loss: {:.2e} lr: {:.2e}\".format(exp_average_loss, scheduler.get_lr()[0])\n\n # Save a trained model\n if args.do_train:\n # Save a trained model, configuration and tokenizer\n model_to_save = model.module if hasattr(model, 'module') else model # Only save the model it-self\n\n # If we save using the predefined names, we can load using `from_pretrained`\n output_model_file = os.path.join(args.output_dir, WEIGHTS_NAME)\n output_config_file = os.path.join(args.output_dir, CONFIG_NAME)\n\n torch.save(model_to_save.state_dict(), output_model_file)\n model_to_save.config.to_json_file(output_config_file)\n tokenizer.save_vocabulary(args.output_dir)\n\n # Load a trained model and vocabulary that you have fine-tuned\n model = OpenAIGPTDoubleHeadsModel.from_pretrained(args.output_dir)\n tokenizer = OpenAIGPTTokenizer.from_pretrained(args.output_dir)\n model.to(device)\n\n if args.do_eval:\n model.eval()\n eval_loss, eval_accuracy = 0, 0\n nb_eval_steps, nb_eval_examples = 0, 0\n for batch in tqdm(eval_dataloader, desc=\"Evaluating\"):\n batch = tuple(t.to(device) for t in batch)\n input_ids, mc_token_ids, lm_labels, mc_labels = batch\n with torch.no_grad():\n _, mc_loss, _, mc_logits = model(input_ids, mc_token_ids=mc_token_ids, lm_labels=lm_labels, mc_labels=mc_labels)\n\n mc_logits = mc_logits.detach().cpu().numpy()\n mc_labels = mc_labels.to('cpu').numpy()\n tmp_eval_accuracy = accuracy(mc_logits, mc_labels)\n\n eval_loss += mc_loss.mean().item()\n eval_accuracy += tmp_eval_accuracy\n\n nb_eval_examples += input_ids.size(0)\n nb_eval_steps += 1\n\n eval_loss = eval_loss / nb_eval_steps\n eval_accuracy = eval_accuracy / nb_eval_examples\n train_loss = tr_loss/nb_tr_steps if args.do_train else None\n result = {'eval_loss': eval_loss,\n 'eval_accuracy': eval_accuracy,\n 'train_loss': train_loss}\n\n output_eval_file = os.path.join(args.output_dir, \"eval_results.txt\")\n with open(output_eval_file, \"w\") as writer:\n logger.info(\"***** Eval results *****\")\n for key in sorted(result.keys()):\n logger.info(\" %s = %s\", key, str(result[key]))\n writer.write(\"%s = %s\\n\" % (key, str(result[key])))\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "numpy.random.seed", "torch.manual_seed", "torch.utils.data.TensorDataset", "torch.utils.data.SequentialSampler", "torch.utils.data.RandomSampler", "torch.utils.data.DataLoader", "numpy.full", "torch.tensor", "numpy.argmax", "torch.no_grad", "torch.cuda.manual_seed_all", "torch.cuda.is_available", "torch.cuda.device_count", "numpy.zeros", "numpy.sum" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
luucv/tfjs
[ "78add066f2d6cf2c3bc51d7996fda5186b3126dc" ]
[ "tfjs-converter/python/tensorflowjs/write_weights.py" ]
[ "# Copyright 2018 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\nimport io\nimport json\nimport math\nimport os\n\nimport numpy as np\n\nfrom tensorflowjs import quantization\nfrom tensorflowjs import read_weights\n\n_OUTPUT_DTYPES = [np.float32, np.int32, np.uint8, np.uint16, np.bool, np.object]\n_AUTO_DTYPE_CONVERSION = {\n np.dtype(np.float64): np.float32,\n np.dtype(np.int64): np.int32}\n\ndef write_weights(\n weight_groups, write_dir, shard_size_bytes=1024 * 1024 * 4,\n write_manifest=True, quantization_dtype=None):\n \"\"\"Writes weights to a binary format on disk for ingestion by JavaScript.\n\n Weights are organized into groups. When writing to disk, the bytes from all\n weights in each group are concatenated together and then split into shards\n (default is 4MB). This means that large weights (> shard_size) get sharded\n and small weights (< shard_size) will be packed. If the bytes can't be split\n evenly into shards, there will be a leftover shard that is smaller than the\n shard size.\n\n Weights are optionally quantized to either 8 or 16 bits for compression,\n which is enabled via the `quantization_dtype` argument.\n\n Args:\n weight_groups: An list of groups. Each group is an array of weight\n entries. Each entry is a dict that maps a unique name to a numpy array,\n for example:\n entry = {\n 'name': 'weight1',\n 'data': np.array([1, 2, 3], 'float32')\n }\n\n Weights groups would then look like:\n weight_groups = [\n [group_0_entry1, group_0_entry2],\n [group_1_entry1, group_1_entry2],\n ]\n\n The 'name' must be unique across all groups and all entries. The 'data'\n field must be a numpy ndarray.\n write_dir: A directory to write the files to.\n shard_size_bytes: The size of shards in bytes. Defaults to 4MB, which is\n the max file size for caching for all major browsers.\n write_manifest: Whether to write the manifest JSON to disk. Defaults to\n True.\n quantization_dtype: An optional numpy dtype to quantize weights to for\n compression. Only np.uint8 and np.uint16 are supported.\n Returns:\n The weights manifest JSON dict.\n\n An example manifest with 2 groups, 2 weights, and each weight sharded\n into 2:\n\n The manifest JSON looks like the following:\n [{\n 'paths': ['group1-shard1of2', 'group1-shard2of2'],\n 'weights': [{\n 'name': 'weight1',\n 'shape': [1000, 1000],\n 'dtype': 'float32'\n }]\n }, {\n 'paths': ['group2-shard1of2', 'group2-shard2of2'],\n 'weights': [{\n 'name': 'weight2',\n 'shape': [2000, 2000],\n 'dtype': 'float32'\n }]\n }]\n or, if quantization is used:\n [{\n 'paths': ['group1-shard1of2', 'group1-shard2of2'],\n 'weights': [{\n 'name': 'weight1',\n 'shape': [1000, 1000],\n 'dtype': 'float32'\n 'quantization': {'min': -0.1, 'scale': 0.01, 'dtype': 'uint8'}\n }]\n }, {\n 'paths': ['group2-shard1of2', 'group2-shard2of2'],\n 'weights': [{\n 'name': 'weight2',\n 'shape': [2000, 2000],\n 'dtype': 'float32',\n 'quantization': {'min': -2.4, 'scale': 0.08, 'dtype': 'uint8'}\n }]\n }]\n \"\"\"\n _assert_weight_groups_valid(weight_groups)\n _assert_shard_size_bytes_valid(shard_size_bytes)\n _assert_no_duplicate_weight_names(weight_groups)\n\n manifest = []\n\n for group_index, group in enumerate(weight_groups):\n for e in group:\n _auto_convert_weight_entry(e)\n if quantization_dtype:\n group = [_quantize_entry(e, quantization_dtype) for e in group]\n group_bytes, total_bytes, _ = _stack_group_bytes(group)\n\n shard_filenames = _shard_group_bytes_to_disk(\n write_dir, group_index, group_bytes, total_bytes, shard_size_bytes)\n\n weights_entries = _get_weights_manifest_for_group(group)\n manifest_entry = {\n 'paths': shard_filenames,\n 'weights': weights_entries\n }\n manifest.append(manifest_entry)\n\n if write_manifest:\n manifest_path = os.path.join(write_dir, 'weights_manifest.json')\n with open(manifest_path, 'wb') as f:\n f.write(json.dumps(manifest).encode())\n\n return manifest\n\n\ndef _quantize_entry(entry, quantization_dtype):\n \"\"\"Quantizes the weights in the entry, returning a new entry.\n\n The weights are quantized by linearly re-scaling the values between the\n minimum and maximum value, and representing them with the number of bits\n provided by the `quantization_dtype`.\n\n In order to guarantee that 0 is perfectly represented by one of the quanzitzed\n values, the range is \"nudged\" in the same manner as in TF-Lite.\n\n Args:\n entry: A weight entries to quantize.\n quantization_dtype: An numpy dtype to quantize weights to. Only np.uint8 and\n np.uint16 are supported.\n\n Returns:\n A new entry containing the quantized data and additional quantization info,\n for example:\n original_entry = {\n 'name': 'weight1',\n 'data': np.array([0, -0.1, 1.2], 'float32')\n }\n quantized_entry = {\n 'name': 'weight1',\n 'data': np.array([20, 0, 255], 'uint8')\n 'quantization': {'min': -0.10196078817, 'scale': 0.00509803940852,\n 'original_dtype': 'float32'}\n }\n \"\"\"\n data = entry['data']\n # Only float32 tensors are quantized.\n if data.dtype != 'float32':\n return entry\n quantized_data, scale, min_val = quantization.quantize_weights(\n data, quantization_dtype)\n quantized_entry = entry.copy()\n quantized_entry['data'] = quantized_data\n quantized_entry['quantization'] = {\n 'min': min_val, 'scale': scale, 'original_dtype': data.dtype.name}\n return quantized_entry\n\n\ndef _serialize_string_array(data):\n \"\"\"Serializes a numpy array of dtype `string` into bytes.\n\n Each string value is preceded by 4 bytes which denote a 32-bit unsigned\n integer in little endian that specifies the byte length of the following\n string. This is followed by the actual string bytes. If the tensor has no\n strings there will be no bytes reserved. Empty strings will still take 4 bytes\n for the length.\n\n For example, a tensor that has 2 strings will be encoded as\n [byte length of s1][bytes of s1...][byte length of s2][bytes of s2...]\n\n where byte length always takes 4 bytes.\n\n Args:\n data: A numpy array of dtype `string`.\n\n Returns:\n bytes of the entire string tensor to be serialized on disk.\n \"\"\"\n strings = data.flatten().tolist()\n\n string_bytes = io.BytesIO()\n bytes_writer = io.BufferedWriter(string_bytes)\n\n for x in strings:\n encoded = x if isinstance(x, bytes) else x.encode('utf-8')\n length_as_bytes = np.array(len(encoded),\n read_weights.STRING_LENGTH_DTYPE).tobytes()\n bytes_writer.write(length_as_bytes)\n bytes_writer.write(encoded)\n bytes_writer.flush()\n string_bytes.seek(0)\n return string_bytes.read()\n\ndef _serialize_numeric_array(data):\n \"\"\"Serializes a numeric numpy array into bytes.\n\n Args:\n data: A numeric numpy array.\n\n Returns:\n bytes of the array to be serialized on disk.\n \"\"\"\n return data.tobytes()\n\ndef _stack_group_bytes(group):\n \"\"\"Stacks the bytes for a weight group into a flat byte array.\n\n Args:\n group: A list of weight entries.\n Returns:\n A type: (group_bytes, total_bytes, weights_entries, group_bytes_writer)\n group_bytes: The stacked bytes for the group, as a BytesIO() stream.\n total_bytes: A number representing the total size of the byte buffer.\n groups_bytes_writer: The io.BufferedWriter object. Returned so that\n group_bytes does not get garbage collected and closed.\n\n \"\"\"\n group_bytes = io.BytesIO()\n group_bytes_writer = io.BufferedWriter(group_bytes)\n total_bytes = 0\n\n for entry in group:\n _assert_valid_weight_entry(entry)\n data = entry['data']\n\n if data.dtype == np.object:\n data_bytes = _serialize_string_array(data)\n else:\n data_bytes = _serialize_numeric_array(data)\n group_bytes_writer.write(data_bytes)\n total_bytes += len(data_bytes)\n\n group_bytes_writer.flush()\n group_bytes.seek(0)\n\n # NOTE: We must return the bytes writer here, otherwise it goes out of scope\n # and python closes the IO operation.\n return (group_bytes, total_bytes, group_bytes_writer)\n\n\ndef _shard_group_bytes_to_disk(\n write_dir, group_index, group_bytes, total_bytes, shard_size_bytes):\n \"\"\"Shards the concatenated bytes for a group to disk.\n\n Args:\n write_dir: The directory to write the files to.\n group_index: The index for the group.\n group_bytes: An io.BytesIO() object representing the byte array.\n total_bytes: The total number of bytes of the stream.\n shard_size_bytes: The size of shards in bytes. If None, the whole byte\n array will be written as one shard.\n Returns:\n A list of filenames that were written to disk.\n \"\"\"\n if shard_size_bytes is None:\n shard_size_bytes = total_bytes\n\n num_shards = int(math.ceil(float(total_bytes) / shard_size_bytes))\n\n filenames = []\n for i in range(num_shards):\n shard = group_bytes.read(shard_size_bytes)\n\n filename = 'group%d-shard%dof%d.bin' % (group_index + 1, i + 1, num_shards)\n filenames.append(filename)\n filepath = os.path.join(write_dir, filename)\n\n # Write the shard to disk.\n with open(filepath, 'wb') as f:\n f.write(shard)\n\n return filenames\n\n\ndef _get_weights_manifest_for_group(group):\n \"\"\"Gets the weights entries manifest JSON for a group.\n\n Args:\n group: A list of weight entries.\n Returns:\n An list of manifest entries (dicts) to be written in the weights manifest.\n \"\"\"\n weights_entries = []\n for entry in group:\n is_quantized = 'quantization' in entry\n dtype = (entry['quantization']['original_dtype']\n if is_quantized else entry['data'].dtype.name)\n var_manifest = {\n 'name': entry['name'],\n 'shape': list(entry['data'].shape),\n 'dtype': dtype\n }\n # String arrays have dtype 'object' and need extra metadata to parse.\n if dtype == 'object':\n var_manifest['dtype'] = 'string'\n if is_quantized:\n var_manifest['quantization'] = {\n 'min': entry['quantization']['min'],\n 'scale': entry['quantization']['scale'],\n 'dtype': entry['data'].dtype.name\n }\n weights_entries.append(var_manifest)\n return weights_entries\n\n\ndef _assert_no_duplicate_weight_names(weight_groups):\n weight_names = set()\n for group in weight_groups:\n for entry in group:\n name = entry['name']\n if name in weight_names:\n raise Exception(\n 'Error dumping weights, duplicate weight name ' + name)\n weight_names.add(name)\n\n\ndef _auto_convert_weight_entry(entry):\n data = entry['data']\n if data.dtype in _AUTO_DTYPE_CONVERSION:\n entry['data'] = data.astype(_AUTO_DTYPE_CONVERSION[data.dtype])\n print('weight ' + entry['name'] + ' with shape ' + str(data.shape) +\n ' and dtype ' + data.dtype.name + ' was auto converted to the type ' +\n np.dtype(_AUTO_DTYPE_CONVERSION[data.dtype]).name)\n\n\ndef _assert_valid_weight_entry(entry):\n if 'name' not in entry:\n raise ValueError('Error dumping weight, no name field found.')\n if 'data' not in entry:\n raise ValueError('Error dumping weight, no data field found.')\n\n name = entry['name']\n data = entry['data']\n\n # String tensors can be backed by different numpy dtypes, thus we consolidate\n # to a single 'np.object' dtype.\n if data.dtype.name.startswith('str') or data.dtype.name.startswith('bytes'):\n data = data.astype(np.object)\n entry['data'] = data\n\n\n if not (data.dtype in _OUTPUT_DTYPES or data.dtype in _AUTO_DTYPE_CONVERSION):\n raise ValueError('Error dumping weight ' + name + ', dtype ' +\n data.dtype.name + ' not supported.')\n\n if not isinstance(data, np.ndarray):\n raise ValueError('Error dumping weight ' + name + ', data ' +\n 'must be a numpy ndarray.')\n\n\ndef _assert_weight_groups_valid(weight_groups):\n if not isinstance(weight_groups, list):\n raise Exception('weight_groups must be a list of groups')\n if not weight_groups:\n raise ValueError('weight_groups must have more than one list element')\n for i, weight_group in enumerate(weight_groups):\n if not isinstance(weight_group, list):\n raise ValueError(\n 'weight_groups[' + i + '] must be a list of weight entries')\n for j, weights in enumerate(weight_group):\n if 'name' not in weights:\n raise ValueError(\n 'weight_groups[' + i + '][' + j + '] has no string field \\'name\\'')\n if 'data' not in weights:\n raise ValueError(\n 'weight_groups[' + i + '][' + j + '] has no numpy ' +\n 'array field \\'data\\'')\n if not isinstance(weights['data'], np.ndarray):\n raise ValueError(\n 'weight_groups[' + i + '][' + j + '][\\'data\\'] is not a numpy ' +\n 'array')\n\n\ndef _assert_shard_size_bytes_valid(shard_size_bytes):\n if shard_size_bytes < 0:\n raise ValueError(\n 'shard_size_bytes must be greater than 0, but got %s' %\n shard_size_bytes)\n if not isinstance(shard_size_bytes, int):\n raise ValueError(\n 'shard_size_bytes must be an integer, but got %s' %\n shard_size_bytes)\n" ]
[ [ "numpy.dtype" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
gruebel/ignite
[ "d384dc664b0e9d7241b5f688ca0f92b600a3cb4d" ]
[ "ignite/contrib/metrics/regression/r2_score.py" ]
[ "from typing import Callable, Union\n\nimport torch\n\nfrom ignite.contrib.metrics.regression._base import _BaseRegression\nfrom ignite.exceptions import NotComputableError\nfrom ignite.metrics.metric import reinit__is_reduced, sync_all_reduce\n\n\nclass R2Score(_BaseRegression):\n r\"\"\"\n Calculates the R-Squared, the\n `coefficient of determination <https://en.wikipedia.org/wiki/Coefficient_of_determination>`_:\n\n :math:`R^2 = 1 - \\frac{\\sum_{j=1}^n(A_j - P_j)^2}{\\sum_{j=1}^n(A_j - \\bar{A})^2}`,\n\n where :math:`A_j` is the ground truth, :math:`P_j` is the predicted value and\n :math:`\\bar{A}` is the mean of the ground truth.\n\n - ``update`` must receive output of the form ``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y}``.\n - `y` and `y_pred` must be of same shape `(N, )` or `(N, 1)` and of type `float32`.\n \"\"\"\n\n def __init__(\n self, output_transform: Callable = lambda x: x, device: Union[str, torch.device] = torch.device(\"cpu\")\n ):\n self._num_examples = None\n self._sum_of_errors = None\n self._y_sq_sum = None\n self._y_sum = None\n super(R2Score, self).__init__(output_transform, device)\n\n @reinit__is_reduced\n def reset(self):\n self._num_examples = 0\n self._sum_of_errors = torch.tensor(0.0, device=self._device)\n self._y_sq_sum = torch.tensor(0.0, device=self._device)\n self._y_sum = torch.tensor(0.0, device=self._device)\n\n def _update(self, output):\n y_pred, y = output\n self._num_examples += y.shape[0]\n self._sum_of_errors += torch.sum(torch.pow(y_pred - y, 2)).to(self._device)\n\n self._y_sum += torch.sum(y).to(self._device)\n self._y_sq_sum += torch.sum(torch.pow(y, 2)).to(self._device)\n\n @sync_all_reduce(\"_num_examples\", \"_sum_of_errors\", \"_y_sq_sum\", \"_y_sum\")\n def compute(self):\n if self._num_examples == 0:\n raise NotComputableError(\"R2Score must have at least one example before it can be computed.\")\n return 1 - self._sum_of_errors.item() / (self._y_sq_sum.item() - (self._y_sum.item() ** 2) / self._num_examples)\n" ]
[ [ "torch.device", "torch.sum", "torch.pow", "torch.tensor" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
PRASAD-DANGARE/Machine_Learning-Applications
[ "e2fa540d44993dc0750d95ce6ad686facd3bb769" ]
[ "Application15/Boosting_IRIS.py" ]
[ "'''\nClassifier : AdaBoostClassifier\nDataset : Iris Dataset\nFeatures : Sepal Width, Sepal Length, Petal Width, Petal Length\nLabels : Versicolor, Setosa, Virginica \n\nTraining Dataset : 70% of 150 Entries\nTesting Dataset : 30% of 150 Entries\n\nAuthor : Prasad Dangare\nDate : 12 July 2021\n \nFunction Name : Boosting\n\n'''\n\n#====================\n#\n# IMPORTS\n#\n#====================\n\nfrom sklearn.ensemble import AdaBoostClassifier \nfrom sklearn import datasets \nfrom sklearn.model_selection import train_test_split \nfrom sklearn import metrics \n\n#=====================\n#\n# Accuracy Operation\n#\n#=====================\n\ndef Boosting():\n\n # Load data\n iris = datasets.load_iris() \n X = iris.data \n y = iris.target \n\n # Split dataset into training set and test set\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.3) # 70% training and 30% test\n\n # Create adaboost classifer object\n abc = AdaBoostClassifier(n_estimators = 50, learning_rate = 1, random_state = 2)\n\n # Train Adaboost Classifer\n model = abc.fit(X_train, y_train) \n\n #Predict the response for test dataset\n y_pred = model.predict(X_test) \n\n print(\"Accuracy Using AdaBoost : \", metrics.accuracy_score(y_test, y_pred) * 100) \n\n#=====================\n#\n# ENTRY POINT \n#\n#=====================\n\ndef main():\n\n Boosting()\n\n#=====================\n#\n# CODE STARTER\n#\n#=====================\n\nif __name__ == \"__main__\":\n main()\n" ]
[ [ "sklearn.ensemble.AdaBoostClassifier", "sklearn.datasets.load_iris", "sklearn.model_selection.train_test_split", "sklearn.metrics.accuracy_score" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
ehbussell/PatchOptimalControl
[ "8a422a2b0b85b3ea4f6898bf2df9cdd3a9e4c52f" ]
[ "patch_model/bocop_utils.py" ]
[ "\"\"\"Functions to help when using the BOCOP direct solver for optimal control\nproblems.\"\"\"\n\nimport logging\nimport os\nimport numpy as np\nfrom scipy.interpolate import interp1d\n\n\ndef readSolFile(file=\"problem.sol\", ignore_fail=False):\n \"\"\"Read BOCOP solution file and extract Xt, Lt and Ut as interpolated functions.\"\"\"\n\n with open(file, 'r') as f:\n allLines = f.readlines()\n\n for i, line in enumerate(allLines):\n if \"time.initial\" in line:\n time_init = float(line.split()[-1])\n if \"time.final\" in line:\n time_final = float(line.split()[-1])\n if \"state.dimension\" in line:\n state_dim = int(line.split()[-1])\n if \"control.dimension\" in line:\n control_dim = int(line.split()[-1])\n if \"discretization.steps\" in line:\n time_steps = int(line.split()[-1])\n\n times = np.linspace(time_init, time_final, time_steps + 1)\n\n Xt = []\n Lt = []\n Ut = []\n\n for i, line in enumerate(allLines):\n for j in range(state_dim):\n if \"# State \" + str(j) + \"\\n\" == line:\n Xt.append(list(map(float, allLines[i+1:i+2+time_steps])))\n\n for j in range(control_dim):\n if \"# Control \" + str(j) + \"\\n\" == line:\n Ut.append(list(map(float, allLines[i+1:i+1+2*time_steps])))\n\n for j in range(state_dim):\n if (\"# Dynamic constraint \" + str(j)) in line:\n Lt.append(list(map(float, allLines[i+2:i+2+time_steps])))\n\n results_file = os.path.join(os.path.dirname(file), \"result.out\")\n with open(results_file, \"r\") as infile:\n result_lines = infile.readlines()\n\n exit_text = None\n for line in result_lines:\n if \"EXIT\" in line:\n exit_text = line[6:].strip()\n\n if (exit_text != \"Optimal Solution Found.\"\n and exit_text != \"Solved To Acceptable Level.\") and not ignore_fail:\n raise RuntimeError(\"BOCOP optimisation failed with code: {0}\".format(exit_text))\n\n if exit_text == \"Some uncaught Ipopt exception encountered.\":\n logging.error(\"Uncaught Ipopt exception. Try re-running optimiser.\")\n return (None, None, None, exit_text)\n\n Xt = interp1d(times, Xt, fill_value=\"extrapolate\")\n Lt = interp1d(times[:-1], Lt, fill_value=\"extrapolate\")\n Ut_mean = np.mean(np.reshape(np.array(Ut), (control_dim, -1, 2)), axis=2)\n Ut = interp1d(times[:-1], Ut_mean, fill_value=\"extrapolate\")\n\n return (Xt, Lt, Ut, exit_text)\n" ]
[ [ "numpy.array", "scipy.interpolate.interp1d", "numpy.linspace" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "0.13", "1.6", "0.14", "1.10", "0.15", "1.4", "0.16", "1.9", "0.19", "1.5", "0.18", "1.2", "1.7", "0.12", "1.0", "0.17", "1.3", "1.8" ], "tensorflow": [] } ]
Artia-Inspirenet/module-2
[ "7cf1d74f13d23a11ce202436d88b283d7ef1e109" ]
[ "pretraining/hed.py" ]
[ "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# File: hed.py\n# Author: Yuxin Wu\n\nimport cv2\nimport tensorflow as tf\nimport numpy as np\nimport argparse\nfrom six.moves import zip\nimport os\n\n\nfrom tensorpack import *\nfrom tensorpack.dataflow import dataset\nfrom tensorpack.utils.gpu import get_num_gpu\nfrom tensorpack.tfutils import optimizer, gradproc\nfrom tensorpack.tfutils.summary import add_moving_summary, add_param_summary\n\n\ndef class_balanced_sigmoid_cross_entropy(logits, label, name='cross_entropy_loss'):\n \"\"\"\n The class-balanced cross entropy loss,\n as in `Holistically-Nested Edge Detection\n <http://arxiv.org/abs/1504.06375>`_.\n\n Args:\n logits: of shape (b, ...).\n label: of the same shape. the ground truth in {0,1}.\n Returns:\n class-balanced cross entropy loss.\n \"\"\"\n with tf.name_scope('class_balanced_sigmoid_cross_entropy'):\n y = tf.cast(label, tf.float32)\n\n count_neg = tf.reduce_sum(1. - y)\n count_pos = tf.reduce_sum(y)\n beta = count_neg / (count_neg + count_pos)\n\n pos_weight = beta / (1 - beta)\n cost = tf.nn.weighted_cross_entropy_with_logits(logits=logits, targets=y, pos_weight=pos_weight)\n cost = tf.reduce_mean(cost * (1 - beta))\n zero = tf.equal(count_pos, 0.0)\n return tf.where(zero, 0.0, cost, name=name)\n\n\n@layer_register(log_shape=True)\ndef CaffeBilinearUpSample(x, shape):\n \"\"\"\n Deterministic bilinearly-upsample the input images.\n It is implemented by deconvolution with \"BilinearFiller\" in Caffe.\n It is aimed to mimic caffe behavior.\n\n Args:\n x (tf.Tensor): a NHWC tensor\n shape (int): the upsample factor\n\n Returns:\n tf.Tensor: a NHWC tensor.\n \"\"\"\n inp_shape = x.shape.as_list()\n ch = inp_shape[3]\n assert ch is not None\n\n shape = int(shape)\n filter_shape = 2 * shape\n\n def bilinear_conv_filler(s):\n \"\"\"\n s: width, height of the conv filter\n https://github.com/BVLC/caffe/blob/99bd99795dcdf0b1d3086a8d67ab1782a8a08383/include/caffe/filler.hpp#L219-L268\n \"\"\"\n f = np.ceil(float(s) / 2)\n c = float(2 * f - 1 - f % 2) / (2 * f)\n ret = np.zeros((s, s), dtype='float32')\n for x in range(s):\n for y in range(s):\n ret[x, y] = (1 - abs(x / f - c)) * (1 - abs(y / f - c))\n return ret\n w = bilinear_conv_filler(filter_shape)\n w = np.repeat(w, ch * ch).reshape((filter_shape, filter_shape, ch, ch))\n\n weight_var = tf.constant(w, tf.float32,\n shape=(filter_shape, filter_shape, ch, ch),\n name='bilinear_upsample_filter')\n x = tf.pad(x, [[0, 0], [shape - 1, shape - 1], [shape - 1, shape - 1], [0, 0]], mode='SYMMETRIC')\n out_shape = tf.shape(x) * tf.constant([1, shape, shape, 1], tf.int32)\n deconv = tf.nn.conv2d_transpose(x, weight_var, out_shape,\n [1, shape, shape, 1], 'SAME')\n edge = shape * (shape - 1)\n deconv = deconv[:, edge:-edge, edge:-edge, :]\n\n if inp_shape[1]:\n inp_shape[1] *= shape\n if inp_shape[2]:\n inp_shape[2] *= shape\n deconv.set_shape(inp_shape)\n return deconv\n\n\nclass Model(ModelDesc):\n def inputs(self):\n return [tf.placeholder(tf.float32, [None, None, None, 3], 'image'),\n tf.placeholder(tf.int32, [None, None, None], 'edgemap')]\n\n def build_graph(self, image, edgemap):\n image = image - tf.constant([104, 116, 122], dtype='float32')\n edgemap = tf.expand_dims(edgemap, 3, name='edgemap4d')\n\n def branch(name, l, up):\n with tf.variable_scope(name):\n l = Conv2D('convfc', l, 1, kernel_size=1, activation=tf.identity,\n use_bias=True,\n kernel_initializer=tf.constant_initializer())\n while up != 1:\n l = CaffeBilinearUpSample('upsample{}'.format(up), l, 2)\n up = up / 2\n return l\n\n with argscope(Conv2D, kernel_size=3, activation=tf.nn.relu):\n l = Conv2D('conv1_1', image, 64)\n l = Conv2D('conv1_2', l, 64)\n b1 = branch('branch1', l, 1)\n l = MaxPooling('pool1', l, 2)\n\n l = Conv2D('conv2_1', l, 128)\n l = Conv2D('conv2_2', l, 128)\n b2 = branch('branch2', l, 2)\n l = MaxPooling('pool2', l, 2)\n\n l = Conv2D('conv3_1', l, 256)\n l = Conv2D('conv3_2', l, 256)\n l = Conv2D('conv3_3', l, 256)\n b3 = branch('branch3', l, 4)\n l = MaxPooling('pool3', l, 2)\n\n l = Conv2D('conv4_1', l, 512)\n l = Conv2D('conv4_2', l, 512)\n l = Conv2D('conv4_3', l, 512)\n b4 = branch('branch4', l, 8)\n l = MaxPooling('pool4', l, 2)\n\n l = Conv2D('conv5_1', l, 512)\n l = Conv2D('conv5_2', l, 512)\n l = Conv2D('conv5_3', l, 512)\n b5 = branch('branch5', l, 16)\n\n final_map = Conv2D('convfcweight',\n tf.concat([b1, b2, b3, b4, b5], 3), 1, kernel_size=1,\n kernel_initializer=tf.constant_initializer(0.2),\n use_bias=False, activation=tf.identity)\n costs = []\n for idx, b in enumerate([b1, b2, b3, b4, b5, final_map]):\n output = tf.nn.sigmoid(b, name='output{}'.format(idx + 1))\n xentropy = class_balanced_sigmoid_cross_entropy(\n b, edgemap,\n name='xentropy{}'.format(idx + 1))\n costs.append(xentropy)\n\n # some magic threshold\n pred = tf.cast(tf.greater(output, 0.5), tf.int32, name='prediction')\n wrong = tf.cast(tf.not_equal(pred, edgemap), tf.float32)\n wrong = tf.reduce_mean(wrong, name='train_error')\n\n if get_current_tower_context().is_training:\n wd_w = tf.train.exponential_decay(2e-4, get_global_step_var(),\n 80000, 0.7, True)\n wd_cost = tf.multiply(wd_w, regularize_cost('.*/W', tf.nn.l2_loss), name='wd_cost')\n costs.append(wd_cost)\n\n add_param_summary(('.*/W', ['histogram'])) # monitor W\n total_cost = tf.add_n(costs, name='cost')\n add_moving_summary(wrong, total_cost, *costs)\n return total_cost\n\n def optimizer(self):\n lr = tf.get_variable('learning_rate', initializer=3e-5, trainable=False)\n opt = tf.train.AdamOptimizer(lr, epsilon=1e-3)\n return optimizer.apply_grad_processors(\n opt, [gradproc.ScaleGradient(\n [('convfcweight.*', 0.1), ('conv5_.*', 5)])])\n\n\ndef get_data(name):\n isTrain = name == 'train'\n ds = dataset.BSDS500(name, shuffle=True)\n\n class CropMultiple16(imgaug.ImageAugmentor):\n def _get_augment_params(self, img):\n newh = img.shape[0] // 16 * 16\n neww = img.shape[1] // 16 * 16\n assert newh > 0 and neww > 0\n diffh = img.shape[0] - newh\n h0 = 0 if diffh == 0 else self.rng.randint(diffh)\n diffw = img.shape[1] - neww\n w0 = 0 if diffw == 0 else self.rng.randint(diffw)\n return (h0, w0, newh, neww)\n\n def _augment(self, img, param):\n h0, w0, newh, neww = param\n return img[h0:h0 + newh, w0:w0 + neww]\n\n if isTrain:\n shape_aug = [\n imgaug.RandomResize(xrange=(0.7, 1.5), yrange=(0.7, 1.5),\n aspect_ratio_thres=0.15),\n imgaug.RotationAndCropValid(90),\n CropMultiple16(),\n imgaug.Flip(horiz=True),\n imgaug.Flip(vert=True)\n ]\n else:\n # the original image shape (321x481) in BSDS is not a multiple of 16\n IMAGE_SHAPE = (320, 480)\n shape_aug = [imgaug.CenterCrop(IMAGE_SHAPE)]\n ds = AugmentImageComponents(ds, shape_aug, (0, 1), copy=False)\n\n def f(m): # thresholding\n m[m >= 0.50] = 1\n m[m < 0.50] = 0\n return m\n ds = MapDataComponent(ds, f, 1)\n\n if isTrain:\n augmentors = [\n imgaug.Brightness(63, clip=False),\n imgaug.Contrast((0.4, 1.5)),\n ]\n ds = AugmentImageComponent(ds, augmentors, copy=False)\n ds = BatchDataByShape(ds, 8, idx=0)\n ds = PrefetchDataZMQ(ds, 1)\n else:\n ds = BatchData(ds, 1)\n return ds\n\n\ndef view_data():\n ds = RepeatedData(get_data('train'), -1)\n ds.reset_state()\n for ims, edgemaps in ds.get_data():\n for im, edgemap in zip(ims, edgemaps):\n assert im.shape[0] % 16 == 0 and im.shape[1] % 16 == 0, im.shape\n cv2.imshow(\"im\", im / 255.0)\n cv2.waitKey(1000)\n cv2.imshow(\"edge\", edgemap)\n cv2.waitKey(1000)\n\n\ndef get_config():\n logger.auto_set_dir()\n dataset_train = get_data('train')\n steps_per_epoch = dataset_train.size() * 40\n dataset_val = get_data('val')\n\n return TrainConfig(\n dataflow=dataset_train,\n callbacks=[\n ModelSaver(),\n ScheduledHyperParamSetter('learning_rate', [(30, 6e-6), (45, 1e-6), (60, 8e-7)]),\n HumanHyperParamSetter('learning_rate'),\n InferenceRunner(dataset_val,\n BinaryClassificationStats('prediction', 'edgemap4d'))\n ],\n model=Model(),\n steps_per_epoch=steps_per_epoch,\n max_epoch=100,\n )\n\n\ndef run(model_path, im):\n pred_config = PredictConfig(\n model=Model(),\n session_init=get_model_loader(model_path),\n input_names=['image'],\n output_names=['output2'])\n predictor = OfflinePredictor(pred_config)\n outputs = predictor(im)\n kernel = np.ones((1,1),np.uint8)\n output = cv2.dilate(outputs[0][0]*255,kernel,iterations = 1)\n thresh1 = cv2.threshold(output, 127, 255, cv2.THRESH_BINARY_INV)[1]\n return thresh1\n #cv2.imwrite(output_path, thresh1)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--gpu', help='comma separated list of GPU(s) to use.')\n parser.add_argument('--load', help='load model')\n parser.add_argument('--view', help='view dataset', action='store_true')\n parser.add_argument('--run', help='run model on images')\n parser.add_argument('--output', help='fused output filename. default to out-fused.png')\n args = parser.parse_args()\n if args.gpu:\n os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu\n\n if args.view:\n view_data()\n elif args.run:\n run(args.load, args.run, args.output)\n else:\n config = get_config()\n if args.load:\n config.session_init = get_model_loader(args.load)\n launch_train_with_config(\n config,\n SyncMultiGPUTrainer(max(get_num_gpu(), 1)))\n" ]
[ [ "tensorflow.get_variable", "tensorflow.concat", "tensorflow.reduce_sum", "tensorflow.cast", "tensorflow.equal", "tensorflow.nn.conv2d_transpose", "tensorflow.pad", "tensorflow.where", "tensorflow.train.AdamOptimizer", "tensorflow.add_n", "tensorflow.greater", "tensorflow.name_scope", "numpy.repeat", "numpy.zeros", "tensorflow.shape", "tensorflow.placeholder", "tensorflow.not_equal", "tensorflow.constant", "tensorflow.reduce_mean", "tensorflow.expand_dims", "numpy.ones", "tensorflow.constant_initializer", "tensorflow.variable_scope", "tensorflow.nn.weighted_cross_entropy_with_logits" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] } ]
wolfo1/IML.HUJI
[ "0b32e552774d0be747547ab8b3eedbcd19cc11e7" ]
[ "IMLearn/learners/classifiers/gaussian_naive_bayes.py" ]
[ "from typing import NoReturn\nfrom ...base import BaseEstimator\nimport numpy as np\nimport pandas as pd\n\n\nclass GaussianNaiveBayes(BaseEstimator):\n \"\"\"\n Gaussian Naive-Bayes classifier\n \"\"\"\n\n def __init__(self):\n \"\"\"\n Instantiate a Gaussian Naive Bayes classifier\n\n Attributes\n ----------\n self.classes_ : np.ndarray of shape (n_classes,)\n The different labels classes. To be set in `GaussianNaiveBayes.fit`\n\n self.mu_ : np.ndarray of shape (n_classes,n_features)\n The estimated features means for each class. To be set in `GaussianNaiveBayes.fit`\n\n self.vars_ : np.ndarray of shape (n_classes, n_features)\n The estimated features variances for each class. To be set in `GaussianNaiveBayes.fit`\n\n self.pi_: np.ndarray of shape (n_classes)\n The estimated class probabilities. To be set in `GaussianNaiveBayes.fit`\n \"\"\"\n super().__init__()\n self.classes_, self.mu_, self.vars_, self.pi_ = None, None, None, None\n\n def _fit(self, X: np.ndarray, y: np.ndarray) -> NoReturn:\n \"\"\"\n fits a gaussian naive bayes model\n\n Parameters\n ----------\n X : ndarray of shape (n_samples, n_features)\n Input data to fit an estimator for\n\n y : ndarray of shape (n_samples, )\n Responses of input data to fit to\n \"\"\"\n self.classes_ = np.unique(y)\n # features mean for each class\n self.mu_ = np.array([np.mean(X[y == i], axis=0) for i in self.classes_])\n # class probability\n self.pi_ = np.array([np.mean(y == i) for i in self.classes_])\n # feature variation by class\n self.vars_ = np.array([np.var(X[y == k], axis=0) for k in self.classes_])\n\n def _predict(self, X: np.ndarray) -> np.ndarray:\n \"\"\"\n Predict responses for given samples using fitted estimator\n\n Parameters\n ----------\n X : ndarray of shape (n_samples, n_features)\n Input data to predict responses for\n\n Returns\n -------\n responses : ndarray of shape (n_samples, )\n Predicted responses of given samples\n \"\"\"\n max_matrix = np.multiply(self.likelihood(X), self.pi_)\n return self.classes_[max_matrix.argmax(axis=1)]\n\n def likelihood(self, X: np.ndarray) -> np.ndarray:\n \"\"\"\n Calculate the likelihood of a given data over the estimated model\n\n Parameters\n ----------\n X : np.ndarray of shape (n_samples, n_features)\n Input data to calculate its likelihood over the different classes.\n\n Returns\n -------\n likelihoods : np.ndarray of shape (n_samples, n_classes)\n The likelihood for each sample under each of the classes\n\n \"\"\"\n if not self.fitted_:\n raise ValueError(\"Estimator must first be fitted before calling `likelihood` function\")\n likelihood_matrix = []\n for i in range(self.classes_.size):\n half1 = 1 / np.sqrt((self.vars_[i]) * (2 * np.pi))\n half2 = np.exp(-0.5 / self.vars_[i] * (X - self.mu_[i]) ** 2)\n likelihood_matrix.append(np.prod(half2 * half1, axis=1))\n return np.array(likelihood_matrix).T\n\n def _loss(self, X: np.ndarray, y: np.ndarray) -> float:\n \"\"\"\n Evaluate performance under misclassification loss function\n\n Parameters\n ----------\n X : ndarray of shape (n_samples, n_features)\n Test samples\n\n y : ndarray of shape (n_samples, )\n True labels of test samples\n\n Returns\n -------\n loss : float\n Performance under missclassification loss function\n \"\"\"\n from ...metrics import misclassification_error\n return misclassification_error(y, self.predict(X))\n" ]
[ [ "numpy.sqrt", "numpy.unique", "numpy.exp", "numpy.mean", "numpy.prod", "numpy.var", "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
balopat/quantum
[ "8c85ab228c9cabceacfa0438d7886a7eca307016" ]
[ "tensorflow_quantum/core/ops/circuit_execution_ops.py" ]
[ "# Copyright 2020 The TensorFlow Quantum Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"A module for user-facing generators of tfq ops.\"\"\"\nimport enum\n\nimport cirq\nimport tensorflow as tf\n\nfrom tensorflow_quantum.core.ops import (cirq_ops, tfq_simulate_ops,\n tfq_utility_ops)\nfrom tensorflow_quantum.python import quantum_context\n\n_GLOBAL_OP_LOCK = tf.CriticalSection()\n\n\nclass TFQWavefunctionSimulator(enum.Enum):\n \"\"\"Enum to make specifying TFQ simulators user-friendly.\"\"\"\n expectation = tfq_simulate_ops.tfq_simulate_expectation\n samples = tfq_simulate_ops.tfq_simulate_samples\n state = tfq_simulate_ops.tfq_simulate_state\n sampled_expectation = tfq_simulate_ops.tfq_simulate_sampled_expectation\n\n\ndef _check_quantum_concurrent(quantum_concurrent):\n if not isinstance(quantum_concurrent, bool):\n raise TypeError(\"quantum_concurrent must be type bool.\"\n \" Given: {}\".format(str(type(quantum_concurrent))))\n\n\ndef get_expectation_op(\n backend=None,\n *,\n quantum_concurrent=quantum_context.get_quantum_concurrent_op_mode()):\n \"\"\"Get a TensorFlow op that will calculate batches of expectation values.\n\n This function produces a non-differentiable TF op that will calculate\n batches of expectation values given tensor batches of `cirq.Circuit`s,\n parameter values, and `cirq.PauliSum` operators to measure.\n\n\n >>> # Simulate circuits with C++.\n >>> my_op = tfq.get_expectation_op()\n >>> # Prepare some inputs.\n >>> qubit = cirq.GridQubit(0, 0)\n >>> my_symbol = sympy.Symbol('alpha')\n >>> my_circuit_tensor = tfq.convert_to_tensor([\n ... cirq.Circuit(cirq.H(qubit) ** my_symbol)\n ... ])\n >>> my_values = np.array([[0.123]])\n >>> my_paulis = tfq.convert_to_tensor([[\n ... 3.5 * cirq.X(qubit) - 2.2 * cirq.Y(qubit)\n ... ]])\n >>> # This op can now be run with:\n >>> output = my_op(\n ... my_circuit_tensor, ['alpha'], my_values, my_paulis)\n >>> output\n tf.Tensor([[0.71530885]], shape=(1, 1), dtype=float32)\n\n\n In order to make the op differentiable, a `tfq.differentiator` object is\n needed. see `tfq.differentiators` for more details. Below is a simple\n example of how to make my_op from the above code block differentiable:\n\n >>> diff = tfq.differentiators.ForwardDifference()\n >>> my_differentiable_op = diff.generate_differentiable_op(\n ... analytic_op=my_op\n ... )\n\n\n Args:\n backend: Optional Python `object` that specifies what backend this op\n should use when evaluating circuits. Can be any\n `cirq.SimulatesFinalState`. If not provided the default C++\n analytical expectation calculation op is returned.\n quantum_concurrent: Optional Python `bool`. True indicates that the\n returned op should not block graph level parallelism on itself when\n executing. False indicates that graph level parallelism on itself\n should be blocked. Defaults to value specified in\n `tfq.get_quantum_concurrent_op_mode` which defaults to True\n (no blocking). This flag is only needed for advanced users when\n using TFQ for very large simulations, or when running on a real\n chip.\n\n Returns:\n A `callable` with the following signature:\n\n ```op(programs, symbol_names, symbol_values, pauli_sums)```\n\n programs: `tf.Tensor` of strings with shape [batch_size] containing\n the string representations of the circuits to be executed.\n symbol_names: `tf.Tensor` of strings with shape [n_params], which\n is used to specify the order in which the values in\n `symbol_values` should be placed inside of the circuits in\n `programs`.\n symbol_values: `tf.Tensor` of real numbers with shape\n [batch_size, n_params] specifying parameter values to resolve\n into the circuits specified by programs, following the ordering\n dictated by `symbol_names`.\n pauli_sums: `tf.Tensor` of strings with shape [batch_size, n_ops]\n containing the string representation of the operators that will\n be used on all of the circuits in the expectation calculations.\n\n Returns:\n `tf.Tensor` with shape [batch_size, n_ops] that holds the\n expectation value for each circuit with each op applied to it\n (after resolving the corresponding parameters in).\n \"\"\"\n\n # TODO (mbbrough): investigate how the above docstring renders.\n _check_quantum_concurrent(quantum_concurrent)\n\n op = None\n if backend is None:\n op = TFQWavefunctionSimulator.expectation\n\n if isinstance(backend, cirq.SimulatesFinalState):\n op = cirq_ops._get_cirq_analytical_expectation(backend)\n\n if op is not None:\n if quantum_concurrent is True:\n # Return an op that does not block graph level parallelism.\n return lambda programs, symbol_names, symbol_values, pauli_sums: \\\n op(programs, symbol_names, symbol_values, pauli_sums)\n\n # Return an op that does block graph level parallelism.\n return lambda programs, symbol_names, symbol_values, pauli_sums: \\\n _GLOBAL_OP_LOCK.execute(lambda: op(\n programs, symbol_names, symbol_values, pauli_sums))\n\n if isinstance(backend, (cirq.SimulatesSamples, cirq.Sampler)):\n raise NotImplementedError(\"Sample-based expectation is not supported.\"\n \" Use \"\n \"tf.get_sampled_expectation_op() instead.\")\n\n raise TypeError(\"Backend {} is invalid. Expected a Cirq.SimulatesFinalState\"\n \" or None.\".format(backend))\n\n\ndef get_sampling_op(\n backend=None,\n *,\n quantum_concurrent=quantum_context.get_quantum_concurrent_op_mode()):\n \"\"\"Get a Tensorflow op that produces samples from given quantum circuits.\n\n This function produces a non-differentiable op that will calculate\n batches of circuit samples given tensor batches of `cirq.Circuit`s,\n parameter values, and a scalar telling the op how many samples to take.\n\n\n >>> # Simulate circuits with cirq.\n >>> my_op = tfq.get_sampling_op(backend=cirq.sim.Simulator())\n >>> # Simulate circuits with C++.\n >>> my_second_op = tfq.get_sampling_op()\n >>> # Prepare some inputs.\n >>> qubit = cirq.GridQubit(0, 0)\n >>> my_symbol = sympy.Symbol('alpha')\n >>> my_circuit_tensor = tfq.convert_to_tensor(\n ... [cirq.Circuit(cirq.X(qubit)**my_symbol)])\n >>> my_values = np.array([[2.0]])\n >>> n_samples = np.array([10])\n >>> # This op can now be run to take samples.\n >>> output = my_second_op(\n ... my_circuit_tensor, ['alpha'], my_values, n_samples)\n >>> output\n <tf.RaggedTensor [[[0], [0], [0], [0], [0], [0], [0], [0], [0], [0]]]>\n\n\n Args:\n backend: Optional Python `object` that specifies what backend this op\n should use when evaluating circuits. Can be any `cirq.Sampler`. If\n not provided the default C++ sampling op is returned.\n quantum_concurrent: Optional Python `bool`. True indicates that the\n returned op should not block graph level parallelism on itself when\n executing. False indicates that graph level parallelism on itself\n should be blocked. Defaults to value specified in\n `tfq.get_quantum_concurrent_op_mode` which defaults to True\n (no blocking). This flag is only needed for advanced users when\n using TFQ for very large simulations, or when running on a real\n chip.\n\n Returns:\n A `callable` with the following signature:\n\n ```op(programs, symbol_names, symbol_values, num_samples)```\n\n programs: `tf.Tensor` of strings with shape [batch_size] containing\n the string representations of the circuits to be executed.\n symbol_names: `tf.Tensor` of strings with shape [n_params], which\n is used to specify the order in which the values in\n `symbol_values` should be placed inside of the circuits in\n `programs`.\n symbol_values: `tf.Tensor` of real numbers with shape\n [batch_size, n_params] specifying parameter values to resolve\n into the circuits specified by programs, following the ordering\n dictated by `symbol_names`.\n num_samples: `tf.Tensor` with one element indicating the number of\n samples to draw.\n\n Returns:\n `tf.Tensor` with shape\n [batch_size, num_samples, <ragged> n_qubits] that\n holds samples (as boolean values) for each circuit.\n \"\"\"\n\n # TODO (mbbrough): investigate how the above docstring renders.\n _check_quantum_concurrent(quantum_concurrent)\n\n op = None\n if backend is None:\n op = TFQWavefunctionSimulator.samples\n\n if isinstance(backend, cirq.Sampler):\n op = cirq_ops._get_cirq_samples(backend)\n\n if op is not None:\n if quantum_concurrent is True:\n # Return an op that does not block graph level parallelism.\n return lambda programs, symbol_names, symbol_values, num_samples: \\\n tfq_utility_ops.padded_to_ragged(\n op(programs, symbol_names, symbol_values, num_samples))\n\n return lambda programs, symbol_names, symbol_values, num_samples: \\\n _GLOBAL_OP_LOCK.execute(lambda: tfq_utility_ops.padded_to_ragged(\n op(programs, symbol_names, symbol_values, num_samples)))\n\n raise TypeError(\"Backend {} is invalid. Expected a Cirq.Sampler \"\n \"or None.\".format(backend))\n\n\ndef get_state_op(\n backend=None,\n *,\n quantum_concurrent=quantum_context.get_quantum_concurrent_op_mode()):\n \"\"\"Get a TensorFlow op that produces states from given quantum circuits.\n\n This function produces a non-differentiable op that will calculate\n batches of state tensors given tensor batches of `cirq.Circuit`s and\n parameter values.\n\n\n >>> # Simulate circuits with cirq.\n >>> my_op = tfq.get_state_op(backend=cirq.DensityMatrixSimulator())\n >>> # Simulate circuits with C++.\n >>> my_second_op = tfq.get_state_op()\n >>> # Prepare some inputs.\n >>> qubit = cirq.GridQubit(0, 0)\n >>> my_symbol = sympy.Symbol('alpha')\n >>> my_circuit_tensor = tfq.convert_to_tensor([\n ... cirq.Circuit(cirq.Y(qubit) ** my_symbol)\n ... ])\n >>> my_values = np.array([[0.5]])\n >>> # This op can now be run to calculate the state.\n >>> output = my_second_op(my_circuit_tensor, ['alpha'], my_values)\n >>> output\n <tf.RaggedTensor [[(0.5+0.5j), (0.5+0.5j)]]>\n\n\n Args:\n backend: Optional Python `object` that specifies what backend this op\n should use when evaluating circuits. Can be any\n `cirq.SimulatesFinalState`. If not provided, the default C++\n wavefunction simulator will be used.\n quantum_concurrent: Optional Python `bool`. True indicates that the\n returned op should not block graph level parallelism on itself when\n executing. False indicates that graph level parallelism on itself\n should be blocked. Defaults to value specified in\n `tfq.get_quantum_concurrent_op_mode` which defaults to True\n (no blocking). This flag is only needed for advanced users when\n using TFQ for very large simulations, or when running on a real\n chip.\n\n Returns:\n A `callable` with the following signature:\n\n ```op(programs, symbol_names, symbol_values)```\n\n programs: `tf.Tensor` of strings with shape [batch_size] containing\n the string representations of the circuits to be executed.\n symbol_names: `tf.Tensor` of strings with shape [n_params], which\n is used to specify the order in which the values in\n `symbol_values` should be placed inside of the circuits in\n `programs`.\n symbol_values: `tf.Tensor` of real numbers with shape\n [batch_size, n_params] specifying parameter values to resolve\n into the circuits specified by programs, following the ordering\n dictated by `symbol_names`.\n\n Returns:\n `tf.Tensor` with shape [batch_size, <ragged> size of state] that\n contains the state information of the circuit.\n \"\"\"\n\n # TODO (mbbrough): investigate how the above docstring renders.\n _check_quantum_concurrent(quantum_concurrent)\n\n op = None\n if backend is None:\n op = TFQWavefunctionSimulator.state\n\n if isinstance(backend, (cirq.SimulatesFinalState)):\n op = cirq_ops._get_cirq_simulate_state(backend)\n\n if op is not None:\n if quantum_concurrent is True:\n # Return an op that does not block graph level parallelism.\n return lambda programs, symbol_names, symbol_values: \\\n tfq_utility_ops.padded_to_ragged(\n op(programs, symbol_names, symbol_values))\n\n # Return an op that does block graph level parallelism.\n return lambda programs, symbol_names, symbol_values: \\\n _GLOBAL_OP_LOCK.execute(lambda: tfq_utility_ops.padded_to_ragged(\n op(programs, symbol_names, symbol_values)))\n\n raise TypeError(\"Backend {} is invalid. Expected a Cirq.SimulatesFinalState\"\n \" or None.\".format(backend))\n\n\ndef get_sampled_expectation_op(\n backend=None,\n *,\n quantum_concurrent=quantum_context.get_quantum_concurrent_op_mode()):\n \"\"\"Get a TensorFlow op that will calculate sampled expectation values.\n\n This function produces a non-differentiable TF op that will calculate\n batches of expectation values given tensor batches of `cirq.Circuit`s,\n parameter values, and `cirq.PauliSum` operators to measure.\n Expectation is estimated by taking num_samples shots per term in the\n corresponding PauliSum.\n\n\n >>> # Simulate circuits with C++.\n >>> my_op = tfq.get_sampled_expectation_op()\n >>> # Prepare some inputs.\n >>> qubit = cirq.GridQubit(0, 0)\n >>> my_symbol = sympy.Symbol('alpha')\n >>> my_circuit_tensor = tfq.convert_to_tensor([\n ... cirq.Circuit(cirq.H(qubit) ** my_symbol)\n ... ])\n >>> my_values = np.array([[0.123]])\n >>> my_paulis = tfq.convert_to_tensor([[\n ... 3.5 * cirq.X(qubit) - 2.2 * cirq.Y(qubit)\n ... ]])\n >>> my_num_samples = np.array([[100]])\n >>> # This op can now be run with:\n >>> output = my_op(\n ... my_circuit_tensor, ['alpha'], my_values, my_paulis, my_num_samples)\n >>> output\n tf.Tensor([[0.71530885]], shape=(1, 1), dtype=float32)\n\n\n In order to make the op differentiable, a `tfq.differentiator` object is\n needed. see `tfq.differentiators` for more details. Below is a simple\n example of how to make my_op from the above code block differentiable:\n\n\n >>> diff = tfq.differentiators.ForwardDifference()\n >>> my_differentiable_op = diff.generate_differentiable_op(\n ... analytic_op=my_op\n ... )\n\n Args:\n backend: Optional Python `object` that specifies what backend this op\n should use when evaluating circuits. Can be any `cirq.Sampler`. If\n not provided the default C++ sampled expectation op is returned.\n quantum_concurrent: Optional Python `bool`. True indicates that the\n returned op should not block graph level parallelism on itself when\n executing. False indicates that graph level parallelism on itself\n should be blocked. Defaults to value specified in\n `tfq.get_quantum_concurrent_op_mode` which defaults to True\n (no blocking). This flag is only needed for advanced users when\n using TFQ for very large simulations, or when running on a real\n chip.\n\n Returns:\n A `callable` with the following signature:\n\n ```op(programs, symbol_names, symbol_values, pauli_sums, num_samples)```\n\n programs: `tf.Tensor` of strings with shape [batch_size] containing\n the string representations of the circuits to be executed.\n symbol_names: `tf.Tensor` of strings with shape [n_params], which\n is used to specify the order in which the values in\n `symbol_values` should be placed inside of the circuits in\n `programs`.\n symbol_values: `tf.Tensor` of real numbers with shape\n [batch_size, n_params] specifying parameter values to resolve\n into the circuits specified by programs, following the ordering\n dictated by `symbol_names`.\n pauli_sums: `tf.Tensor` of strings with shape [batch_size, n_ops]\n containing the string representation of the operators that will\n be used on all of the circuits in the expectation calculations.\n num_samples: `tf.Tensor` with `num_samples[i][j]` is equal to the\n number of samples to draw in each term of `pauli_sums[i][j]`\n when estimating the expectation. Therefore, `num_samples` must\n have the same shape as `pauli_sums`.\n\n Returns:\n `tf.Tensor` with shape [batch_size, n_ops] that holds the\n expectation value for each circuit with each op applied to it\n (after resolving the corresponding parameters in).\n \"\"\"\n # TODO (mbbrough): investigate how the above docstring renders.\n _check_quantum_concurrent(quantum_concurrent)\n\n op = None\n if backend is None:\n op = TFQWavefunctionSimulator.sampled_expectation\n\n if isinstance(backend, cirq.Sampler):\n op = cirq_ops._get_cirq_sampled_expectation(backend)\n\n if op is not None:\n if quantum_concurrent is True:\n # Return an op that does not block graph level parallelism.\n return lambda programs, symbol_names, symbol_values, pauli_sums, \\\n num_samples: op(programs,\n symbol_names,\n symbol_values,\n pauli_sums,\n num_samples)\n\n # Return an op that does block graph level parallelism.\n return lambda programs, symbol_names, symbol_values, pauli_sums, \\\n num_samples: _GLOBAL_OP_LOCK.execute(lambda: op(programs,\n symbol_names,\n symbol_values,\n pauli_sums,\n num_samples))\n\n raise TypeError(\n \"Backend {} is invalid. Expected a Cirq.Sampler or None.\".format(\n backend))\n" ]
[ [ "tensorflow.CriticalSection" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "2.7", "2.6", "2.2", "2.3", "2.4", "2.9", "2.5", "2.8", "2.10" ] } ]
Starry-Hu/FedML
[ "0fd4bd55b7b3122c8cb4faee9fe36dcb1998657d" ]
[ "fedml_api/data_preprocessing/cervical_cancer/datasets.py" ]
[ "import torch.utils.data as data\n\nimport pandas as pd\nimport torch\nimport matplotlib.pyplot as plt\n\nfrom sklearn.model_selection import train_test_split\nfrom sklearn import preprocessing\n\n\ndef data_visualize(df,feature_name):\n # 绘制Smokes等离散变量与其相关连续变量的关系,Pie chart\n # 自定义格式\n # def my_fmt(x):\n # '%1.1f%%'\n # total = 858\n # print(x)\n # return '{:.1f}%\\n({:.0f})'.format(x, total * x / 100)\n #\n # fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2)\n # ax = [ax1, ax2, ax3, ax4]\n # labels = ['all nan', 'all not nan']\n # colors = ['#ff9999', '#ffcc99']\n # checkFeatures = [{'A': 'Smokes', 'B': 'Smokes (years)'},\n # {'A': 'Hormonal Contraceptives', 'B': 'Hormonal Contraceptives (years)'},\n # {'A': 'IUD', 'B': 'IUD (years)'},\n # {'A': 'STDs', 'B': 'STDs (number)'}]\n # for feature, features in enumerate(checkFeatures):\n # df_bool1 = ((df[features['A']].isna()) & df[features['B']].isna())\n # df_bool4 = (~(df[features['A']].isna()) & (~df[features['B']].isna()))\n # count = [df_bool1.sum(), df_bool4.sum()]\n #\n # ax[feature].pie(count, colors=colors, labels=labels, autopct=my_fmt, startangle=90)\n # centre_circle = plt.Circle((0, 0), 0.70, fc='white')\n # fig = plt.gcf()\n # fig.gca().add_artist(centre_circle)\n # ax[feature].axis('equal')\n # if feature != 1:\n # ax[feature].set_title('{} and {}'.format(features['A'], features['B']))\n # else:\n # ax[feature].set_title('HC and HC (years) for short')\n # plt.tight_layout()\n # plt.show()\n\n # Visualize the number of missing\n # values as a bar chart\n # import missingno as msno\n # msno.bar(df)\n\n # scale = preprocessing.StandardScaler()\n # data = scale.fit_transform(df.drop('Biopsy', axis=1))\n # df = pd.DataFrame(data, index=list(range(data.shape[0])), columns=feature_name)\n import seaborn as sns\n # 描述各特征的分布密度图\n print(\"Density Plots\");\n print()\n fig, ((ax1, ax2, ax3), (ax4, ax5, ax6), (ax7, ax8, ax9),\n (ax10, ax11, ax12), (ax13, ax14, ax15)) = plt.subplots(5, 3)\n ax = [ax1, ax2, ax3, ax4, ax5, ax6, ax7, ax8, ax9, ax10, ax11, ax12, ax13, ax14, ax15]\n i = 0\n for feature in df[feature_name]:\n print('*' * 100)\n sns.countplot(x=feature, data=df, ax=ax[i]) # 计数图\n # sns.distplot(df[feature], ax=ax[i]) # 数据分布密度图\n i+=1\n plt.show()\n\n\n\n x = ['Biopsy-0', 'Biopsy-1']\n y = [len(df[df[\"Biopsy\"] == 0]), len(df[df[\"Biopsy\"] == 1])]\n plt.bar(x, y, width=0.8)\n plt.ylabel(\"count\")\n for a, b in zip(x, y):\n plt.text(a, b + 0.25, '%.0f' % b, ha=\"center\", va=\"bottom\", fontsize=12)\n # plt.savefig(\"./bar1.png\")\n plt.show()\n # for a, b in zip(X, df['Bi']):\n # plt.text(a, b + 0.05, '%.0f' % b, ha='center', va='bottom', fontsize=11)\n\n # checking the patch order, not for final:\n # catp.ax.text(spot[0].get_x(), -3, spot[1][0][0]+spot[1][1][0])\n # fig, (ax1, ax2, ax3) = plt.subplots(3, 1, figsize=(15, 12))\n # sns.countplot(x='Age', data=df, ax=ax1)\n # sns.countplot(x='Biopsy', data=df, ax=ax2)\n # sns.barplot(x='Age', y='Biopsy', data=df, ax=ax3)\n\n # Stratified\n facet = sns.FacetGrid(df, hue='Biopsy', aspect=4)\n facet.map(sns.kdeplot, 'Age', shade=True)\n facet.set(xlim=(0, df['Age'].max()))\n facet.add_legend()\n\n plt.show()\n\n\nclass Cervical_truncated(data.Dataset):\n\n def __init__(self, datadir, dataidxs=None, train=True):\n self.datadir = datadir\n self.dataidxs = dataidxs\n self.train = train\n self.feature_name = ['Age', 'Number of sexual partners', 'First sexual intercourse', 'Num of pregnancies',\n 'Smokes', 'Smokes (years)', 'Hormonal Contraceptives', 'Hormonal Contraceptives (years)',\n 'IUD', 'IUD (years)', 'STDs', 'STDs (number)',\n 'STDs: Number of diagnosis', 'STDs: Time since first diagnosis',\n 'STDs: Time since last diagnosis', 'Biopsy']\n self.data, self.target = self.__build_truncated_dataset__()\n\n def __build_truncated_dataset__(self):\n df = pd.read_csv(self.datadir, na_values=['?'])\n # 筛选列\n df = df[self.feature_name]\n df = df.apply(pd.to_numeric)\n # 对原始数据进行可视化\n # data_visualize(df,self.feature_name[:-1])\n # 数据清洗,填充nan。连续变量用中位数,离散型变量根据相应的连续变量取值用0/1填充\n df['Number of sexual partners'] = df['Number of sexual partners'].fillna(\n df['Number of sexual partners'].median())\n df['First sexual intercourse'] = df['First sexual intercourse'].fillna(df['First sexual intercourse'].median())\n df['Num of pregnancies'] = df['Num of pregnancies'].fillna(df['Num of pregnancies'].median())\n # 离散变量与其对应的连续变量\n df['Smokes (years)'] = df['Smokes (years)'].fillna(df['Smokes (years)'].median())\n if df['Smokes (years)'].median() != 0:\n df['Smokes'] = df['Smokes'].fillna(1)\n else:\n df['Smokes'] = df['Smokes'].fillna(0)\n df['Hormonal Contraceptives (years)'] = df['Hormonal Contraceptives (years)'].fillna(\n df['Hormonal Contraceptives (years)'].median())\n if df['Hormonal Contraceptives (years)'].median() != 0:\n df['Hormonal Contraceptives'] = df['Hormonal Contraceptives'].fillna(1)\n else:\n df['Hormonal Contraceptives'] = df['Hormonal Contraceptives'].fillna(0)\n df['IUD (years)'] = df['IUD (years)'].fillna(df['IUD (years)'].median())\n if df['IUD (years)'].median() != 0:\n df['IUD'] = df['IUD'].fillna(1)\n else:\n df['IUD'] = df['IUD'].fillna(0)\n df['STDs (number)'] = df['STDs (number)'].fillna(df['STDs (number)'].median())\n if df['STDs (number)'].median() != 0:\n df['STDs'] = df['STDs'].fillna(1)\n else:\n df['STDs'] = df['STDs'].fillna(0)\n df['STDs: Time since first diagnosis'] = df['STDs: Time since first diagnosis'].fillna(\n df['STDs: Time since first diagnosis'].median())\n df['STDs: Time since last diagnosis'] = df['STDs: Time since last diagnosis'].fillna(\n df['STDs: Time since last diagnosis'].median())\n\n # 划分数据集,75%train, 25%test, 控制每次输出一样\n train_ds, test_ds = train_test_split(df, test_size=0.25, random_state=2021, shuffle=False)\n\n # 正则化,处理数据得到x, y\n scale = preprocessing.MinMaxScaler()\n # scale = preprocessing.StandardScaler()\n train_y = torch.tensor(train_ds['Biopsy'].values).float()\n train_X = torch.tensor(scale.fit_transform(train_ds.drop('Biopsy', axis=1))).float()\n test_y = torch.tensor(test_ds['Biopsy'].values).float()\n test_X = torch.tensor(scale.fit_transform(test_ds.drop('Biopsy', axis=1))).float()\n\n # 判断训练数据/测试数据\n if self.train:\n data = train_X\n target = train_y\n else:\n data = test_X\n target = test_y\n # 返回具体某个客户端的数据\n if self.dataidxs is not None:\n return data[self.dataidxs], target[self.dataidxs]\n else:\n return data, target\n\n def __getitem__(self, index):\n return self.data[index], self.target[index]\n\n def __len__(self):\n return len(self.data)\n" ]
[ [ "pandas.read_csv", "matplotlib.pyplot.subplots", "sklearn.model_selection.train_test_split", "torch.tensor", "matplotlib.pyplot.bar", "matplotlib.pyplot.text", "matplotlib.pyplot.show", "sklearn.preprocessing.MinMaxScaler", "matplotlib.pyplot.ylabel" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] } ]
jld23/python-dlpy
[ "39fe417a02da8f40975691392f5735fe02160da0" ]
[ "dlpy/images.py" ]
[ "#!/usr/bin/env python\n# encoding: utf-8\n#\n# Copyright SAS Institute\n#\n# Licensed under the Apache License, Version 2.0 (the License);\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\n''' Special functionality for CAS tables containing image data '''\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom swat.cas.table import CASTable\nfrom .utils import random_name, image_blocksize, caslibify_context, get_server_path_sep\nfrom warnings import warn\n\n\nclass ImageTable(CASTable):\n\n '''\n\n Specialized CASTable for Image Data\n\n Parameters\n ----------\n name : string\n The name of the CAS table\n **table_params : keyword arguments, optional\n Parameters to the :class:`CASTable` constructor\n\n Attributes\n ----------\n image_summary : pandas.Series\n The summary of the images contained in the image table.\n label_freq : pandas.Series\n The count of images in different categories.\n channel_means : tuple of double\n The mean of the image intensities in each channels.\n uid : pandas.DataFrame\n The unique ID for each image\n\n Returns\n -------\n :class:`ImageTable`\n\n '''\n\n running_image_column = '_image_'\n\n def __init__(self, name, **table_params):\n CASTable.__init__(self, name, **table_params)\n self.patch_level = 0\n\n @classmethod\n def from_table(cls, tbl, image_col='_image_', label_col='_label_',\n path_col=None, columns=None, casout=None):\n\n '''\n\n Create an ImageTable from a CASTable\n\n Parameters\n ----------\n tbl : CASTable\n The CASTable object to use as the source.\n image_col : str, optional\n Specifies the column name for the image data.\n Default = '_image_'\n label_col : str, optional\n Specifies the column name for the labels.\n Default = '_label_'\n path_col : str, optional\n Specifies the column name that stores the path for each image.\n Default = None, and the unique image ID will be generated from the labels.\n columns : list of str, optional\n Specifies the extra columns in the image table.\n Default = None\n casout : dict\n Specifies the output CASTable parameters.\n Default = None.\n Note : the options of replace=True, blocksize=32 will be automatically\n added to the casout option.\n\n Returns\n -------\n :class:`ImageTable`\n\n '''\n\n out = cls(**tbl.params)\n\n conn = tbl.get_connection()\n conn.loadactionset('image', _messagelevel='error')\n\n if casout is None:\n casout = {}\n elif isinstance(casout, CASTable):\n casout = casout.to_outtable_params()\n\n if 'name' not in casout:\n casout['name'] = random_name()\n\n if '_filename_0' in tbl.columninfo().ColumnInfo.Column.tolist():\n computedvars = []\n code = []\n else:\n computedvars = ['_filename_0']\n code = ['length _filename_0 varchar(*);']\n if path_col is not None:\n code.append(('_loc1 = LENGTH({0}) - '\n 'INDEX(REVERSE({0}),\\'/\\')+2;').format(path_col))\n code.append('_filename_0 = SUBSTR({},_loc1);'.format(path_col))\n else:\n code.append('call streaminit(-1);shuffle_id=rand(\"UNIFORM\")*10**10;')\n code.append(('_filename_0=cats({},\"_\",put(put(shuffle_id,z10.)'\n ',$char10.),\".jpg\");').format(label_col))\n\n if image_col != '_image_':\n cls.running_image_column = image_col\n\n if label_col != '_label_':\n computedvars.append('_label_')\n code.append('_label_ = {};'.format(label_col))\n\n code = '\\n'.join(code)\n\n if computedvars:\n table_opts = dict(computedvars=computedvars,\n computedvarsprogram=code,\n **tbl.params)\n else:\n table_opts = dict(**tbl.params)\n\n # This will generate the '_image_' and '_label_' columns.\n conn.retrieve('table.shuffle', _messagelevel='error',\n table=table_opts,\n casout=dict(replace=True, blocksize=32, **casout))\n\n column_names = [cls.running_image_column, '_label_', '_filename_0', '_id_']\n if columns is not None:\n if not isinstance(columns, list):\n columns = list(columns)\n column_names += columns\n\n # Remove the unwanted columns.\n conn.retrieve('table.partition', _messagelevel='error',\n table=dict(Vars=column_names, **casout),\n casout=dict(replace=True, blocksize=32, **casout))\n\n out = cls(**casout)\n out.set_connection(conn)\n\n return out\n\n @classmethod\n def load_files(cls, conn, path, casout=None, columns=None, caslib=None, **kwargs):\n\n '''\n\n Create ImageTable from files in `path`\n\n Parameters\n ----------\n conn : CAS\n The CAS connection object\n path : string\n The path to the image directory on the server.\n Path may be absolute, or relative to caslib root if specified.\n casout : dict, optional\n The output table specifications\n columns : list of str, optional\n Specifies the extra columns in the image table.\n caslib : string, optional\n The name of the caslib containing the images.\n **kwargs : keyword arguments, optional\n Additional keyword arguments to the `image.loadimages` action\n\n Returns\n -------\n\n :class:`ImageTable`\n\n '''\n\n conn.loadactionset('image', _messagelevel='error')\n\n if casout is None:\n casout = dict(name=random_name())\n elif isinstance(casout, CASTable):\n casout = casout.to_outtable_params()\n\n if 'name' not in casout:\n casout['name'] = random_name()\n with caslibify_context(conn, path, task = 'load') as (caslib_created, path_created):\n\n if caslib is None:\n caslib = caslib_created\n path = path_created\n\n if caslib is None and path is None:\n print('Cannot create a caslib for the provided path. Please make sure that the path is accessible from'\n 'the CAS Server. Please also check if there is a subpath that is part of an existing caslib')\n\n conn.retrieve('image.loadimages', _messagelevel='error',\n casout=casout,\n distribution=dict(type='random'),\n recurse=True, labellevels=-1,\n path=path, caslib=caslib, **kwargs)\n\n sep_ = get_server_path_sep(conn)\n code=[]\n code.append('length _filename_0 varchar(*);')\n code.append(\"_loc1 = LENGTH(_path_) - INDEX(REVERSE(_path_),'\"+sep_+\"')+2;\")\n code.append('_filename_0 = SUBSTR(_path_,_loc1);')\n code = '\\n'.join(code)\n column_names = ['_image_', '_label_', '_filename_0', '_id_']\n if columns is not None:\n column_names += columns\n conn.retrieve('table.partition', _messagelevel='error',\n table=dict(Vars=column_names,\n computedvars=['_filename_0'],\n computedvarsprogram=code,\n **casout),\n casout=dict(replace=True, blocksize=32, **casout))\n\n out = cls(**casout)\n out.set_connection(conn)\n\n return out\n\n def __copy__(self):\n out = CASTable.__copy__(self)\n out.patch_level = self.patch_level\n return out\n\n def __deepcopy__(self, memo):\n out = CASTable.__deepcopy__(self, memo)\n out.patch_level = self.patch_level\n return out\n\n def to_files(self, path):\n\n '''\n\n Save the images in the original format under the specified directory\n\n Parameters\n ----------\n path : string\n Specifies the directory on the server to save the images\n\n '''\n\n caslib = random_name('Caslib', 6)\n self._retrieve('addcaslib', name=caslib, path=path, activeonadd=False)\n\n file_name = '_filename_{}'.format(self.patch_level)\n\n rt = self._retrieve('image.saveimages', caslib=caslib,\n images=dict(table=self.to_table_params(), path=file_name, image=self.running_image_column),\n labellevels=1)\n\n self._retrieve('dropcaslib', caslib=caslib)\n\n def to_sashdat(self, path=None, name=None, **kwargs):\n\n '''\n\n Save the ImageTable to a sashdat file\n\n Parameters\n ----------\n path : string\n Specifies the directory on the server to save the images\n\n '''\n\n caslib = random_name('Caslib', 6)\n self._retrieve('addcaslib', name=caslib, path=path, activeonadd=False,\n datasource=dict(srcType='DNFS'))\n if name is None:\n name = self.to_params()['name'] + '.sashdat'\n\n self._retrieve('table.save', caslib=caslib, name=name,\n table=self.to_params(), **kwargs)\n self._retrieve('dropcaslib', caslib=caslib)\n\n def copy_table(self, casout=None):\n\n '''\n\n Create a copy of the ImageTable\n\n Parameters\n ----------\n casout : dict, optional\n Output CAS table parameters\n\n Returns\n -------\n :class:`ImageTable`\n\n '''\n\n if casout is None:\n casout = {}\n casout['name'] = random_name()\n\n res = self._retrieve('table.partition', casout=casout, table=self)['casTable']\n out = ImageTable.from_table(tbl=res, image_col=self.running_image_column)\n out.params.update(res.params)\n\n return out\n\n def show(self, nimages=5, ncol=8, randomize=False, figsize=None, where=None):\n\n '''\n\n Display a grid of images\n\n Parameters\n ----------\n nimages : int, optional\n Specifies the number of images to be displayed.\n If nimage is greater than the maximum number of images in the\n table, it will be set to this maximum number.\n Note: Specifying a large value for nimages can lead to slow performance.\n ncol : int, optional\n Specifies the layout of the display, determine the number of\n columns in the plots.\n randomize : bool, optional\n Specifies whether to randomly choose the images for display.\n figsize : int, optional\n Specifies the size of the fig that contains the image.\n where : string, optional\n Specifies the SAS Where clause for selecting images to be shown.\n One example is as follows:\n my_images.show(nimages=2, where='_id_ eq 57')\n\n '''\n\n nimages = min(nimages, len(self))\n # put where clause to select images\n self.params['where'] = where\n # restrict the number of observations to be shown\n try:\n # we use numrows to check if where clause is valid\n max_obs = self.numrows().numrows\n nimages = min(max_obs, nimages)\n except AttributeError:\n self.params['where'] = None\n warn(\"Where clause doesn't take effect, because encounter an error while processing where clause. \"\n \"Please check your where clause.\")\n\n if randomize:\n temp_tbl = self.retrieve('image.fetchimages', _messagelevel='error',\n table=dict(\n computedvars=['random_index'],\n computedvarsprogram='call streaminit(-1);'\n 'random_index='\n 'rand(\"UNIFORM\");',\n **self.to_table_params()),\n image=self.running_image_column,\n sortby='random_index', to=nimages)\n else:\n temp_tbl = self._retrieve('image.fetchimages', to=nimages, image=self.running_image_column)\n\n # remove the where clause\n self.params['where'] = None\n\n if nimages > ncol:\n nrow = nimages // ncol + 1\n else:\n nrow = 1\n ncol = nimages\n if figsize is None:\n figsize = (16, 16 // ncol * nrow)\n fig = plt.figure(figsize=figsize)\n\n for i in range(nimages):\n image = temp_tbl['Images']['Image'][i]\n label = temp_tbl['Images']['Label'][i]\n ax = fig.add_subplot(nrow, ncol, i + 1)\n ax.set_title('{}'.format(label))\n if len(image.size) == 2:\n plt.imshow(np.array(image), cmap='Greys_r')\n else:\n plt.imshow(image)\n plt.xticks([]), plt.yticks([])\n plt.show()\n\n def crop(self, x=0, y=0, width=None, height=None, inplace=True):\n\n '''\n\n Crop the images in the ImageTable\n\n Parameters\n ----------\n x : int, optional\n Specify the x location of the top-left corner of the cropped images.\n y : int, optional\n Specify the y location of the top-left corner of the cropped images.\n width : int, optional\n Specify the width of the cropped images.\n height : int, optional\n Specify the height of the cropped images.\n If not specified, height will be set to be equal to width.\n inplace : bool, optional\n Specifies whether to update the original table, or to create a new one.\n\n Returns\n -------\n :class:`ImageTable`\n If `inplace=False`\n None\n If `inplace=True`\n\n '''\n\n if (width is None) and (height is None):\n width = 224\n if width is None:\n width = height\n if height is None:\n height = width\n blocksize = image_blocksize(width, height)\n\n column_names = ['_filename_{}'.format(i) for i in range(self.patch_level + 1)]\n\n if inplace:\n self._retrieve('image.processimages',\n copyvars=column_names,\n image=self.running_image_column,\n casout=dict(replace=True, blocksize=blocksize,\n **self.to_outtable_params()),\n imagefunctions=[\n dict(functionoptions=dict(functiontype='GET_PATCH',\n x=x, y=y,\n w=width, h=height))])\n\n else:\n out = self.copy_table()\n out.crop(x=x, y=y, width=width, height=height)\n return out\n\n def resize(self, width=None, height=None, inplace=True):\n\n '''\n\n Resize the images in the ImageTable\n\n Parameters\n ----------\n width : int, optional\n Specify the target width of the resized images.\n height : int, optional\n Specify the target height of the resized images.\n If not specified, height will be set to be equal to width.\n inplace : bool, optional\n Specifies whether to update the original table, or to create\n a new one.\n\n Returns\n -------\n :class:`ImageTable`\n If `inplace=False`\n None\n If `inplace=True`\n\n '''\n\n if (width is None) and (height is None):\n width = 224\n if width is None:\n width = height\n if height is None:\n height = width\n blocksize = image_blocksize(width, height)\n column_names = ['_filename_{}'.format(i) for i in range(self.patch_level + 1)]\n\n if inplace:\n self._retrieve('image.processimages',\n copyvars=column_names,\n image=self.running_image_column,\n casout=dict(replace=True, blocksize=blocksize,\n **self.to_outtable_params()),\n imagefunctions=[\n dict(functionoptions=dict(functiontype='RESIZE',\n w=width, h=height))])\n else:\n out = self.copy_table()\n out.resize(width=width, height=height)\n return out\n\n def as_patches(self, x=0, y=0, width=None, height=None, step_size=None,\n output_width=None, output_height=None, inplace=True):\n\n '''\n\n Generate patches from the images in the ImageTable\n\n Parameters\n ----------\n x : int, optional\n Specify the x location of the top-left corner of the\n first patches.\n y : int, optional\n Specify the y location of the top-left corner of the\n first patches.\n width : int, optional\n Specify the width of the patches.\n height : int, optional\n Specify the width of the patches.\n If not specified, height will be set to be equal to width.\n step_size : int, optional\n Specify the step size of the moving windows for extracting\n the patches.\n Default : None, meaning step_size=width.\n output_width : int, optional\n Specify the output width of the patches.\n If not equal to width, the patches will be resize to the\n output width.\n Default : None, meaning output_width=width.\n output_height : int, optional\n Specify the output height of the patches.\n If not equal to height, the patches will be resize to the\n output height.\n Default : None, meaning output_height=height.\n inplace : bool, optional\n Specifies whether to update the original table, or create a\n new one.\n\n Returns\n -------\n :class:`ImageTable`\n If `inplace=False`\n None\n If `inplace=True`\n\n '''\n\n if (width is None) and (height is None):\n width = 224\n if width is None:\n width = height\n if height is None:\n height = width\n\n if step_size is None:\n step_size = width\n\n if output_width is None:\n output_width = width\n if output_height is None:\n output_height = height\n\n blocksize = image_blocksize(output_width, output_height)\n croplist = [dict(sweepimage=True, x=x, y=y,\n width=width, height=height,\n stepsize=step_size,\n outputwidth=output_width,\n outputheight=output_height)]\n\n column_names = ['_filename_{}'.format(i) for i in range(self.patch_level + 1)]\n\n if inplace:\n self._retrieve('image.augmentimages',\n copyvars=column_names,\n image=self.running_image_column,\n casout=dict(replace=True, **self.to_outtable_params()),\n croplist=croplist)\n\n # The following code generate the latest file name according\n # to the number of patches operations.\n computedvars = '_filename_{}'.format(self.patch_level + 1)\n code = []\n code.append('length _filename_{1} varchar(*);')\n code.append('dot_loc = LENGTH(_filename_{0}) - '\n 'INDEX(REVERSE(_filename_{0}), \\'.\\')+1;')\n code.append('_filename_{1} = SUBSTR(_filename_{0}, 1, dot_loc-1) || '\n 'compress(\\'_\\'||x||\\'_\\'||y||SUBSTR(_filename_{0},dot_loc));')\n code = '\\n'.join(code)\n code = code.format(self.patch_level, self.patch_level + 1)\n\n self._retrieve('table.shuffle',\n casout=dict(replace=True, blocksize=blocksize,\n **self.to_outtable_params()),\n table=dict(computedvars=computedvars,\n computedvarsprogram=code,\n **self.to_table_params()))\n self.patch_level += 1\n\n else:\n out = self.copy_table()\n out.as_patches(x=x, y=y, width=width, height=height, step_size=step_size,\n output_width=output_width, output_height=output_height)\n return out\n\n def as_random_patches(self, random_ratio=0.5, x=0, y=0, width=None, height=None,\n step_size=None, output_width=None, output_height=None,\n inplace=True):\n\n '''\n\n Generate random patches from the images in the ImageTable\n\n Parameters\n ----------\n\n random_ratio : double, optional\n Specifies the proportion of the generated patches to output.\n x : int, optional\n Specifies the x location of the top-left corner of the first patches.\n y : int, optional\n Specifies the y location of the top-left corner of the first patches.\n width : int, optional\n Specifies the width of the patches.\n height : int, optional\n Specifies the width of the patches.\n If not specified, height will be set to be equal to width.\n step_size : int, optional\n Specifies the step size of the moving windows for extracting the patches.\n If not specified, it will be set to be equal to width.\n output_width : int, optional\n Specifies the output width of the patches.\n If not specified, it will be set to be equal to width.\n output_height : int, optional\n Specifies the output height of the patches.\n If not specified, it will be set to be equal to height.\n inplace : bool, optional\n Specifies whether to update the original table, or create a new one.\n\n Returns\n -------\n :class:`ImageTable`\n If `inplace=True`\n None\n If `inplace=False`\n\n '''\n\n if (width is None) and (height is None):\n width = 224\n if width is None:\n width = height\n if height is None:\n height = width\n\n if step_size is None:\n step_size = width\n\n if output_width is None:\n output_width = width\n if output_height is None:\n output_height = height\n\n blocksize = image_blocksize(output_width, output_height)\n\n croplist = [dict(sweepimage=True, x=x, y=y,\n width=width, height=height,\n stepsize=step_size,\n outputwidth=output_width,\n outputheight=output_height)]\n\n column_names = ['_filename_{}'.format(i) for i in range(self.patch_level + 1)]\n\n if inplace:\n self._retrieve('image.augmentimages',\n copyvars=column_names,\n image=self.running_image_column,\n casout=dict(replace=True, **self.to_outtable_params()),\n croplist=croplist,\n randomratio=random_ratio,\n writerandomly=True)\n\n # The following code generate the latest file name according\n # to the number of patches operations.\n computedvars = '_filename_{}'.format(self.patch_level + 1)\n code = []\n code.append('length _filename_{1} varchar(*);')\n code.append('dot_loc = LENGTH(_filename_{0}) - '\n 'INDEX(REVERSE(_filename_{0}),\\'.\\')+1;')\n code.append('_filename_{1} = SUBSTR(_filename_{0},1,dot_loc-1) || '\n 'compress(\\'_\\'||x||\\'_\\'||y||SUBSTR(_filename_{0},dot_loc));')\n code = '\\n'.join(code)\n code = code.format(self.patch_level, self.patch_level + 1)\n\n self._retrieve('table.shuffle',\n casout=dict(replace=True, blocksize=blocksize,\n **self.to_outtable_params()),\n table=dict(computedvars=computedvars,\n computedvarsprogram=code,\n **self.to_table_params()))\n\n self.patch_level += 1\n\n else:\n out = self.copy_table()\n out.as_random_patches(random_ratio=random_ratio,\n x=x, y=y,\n width=width, height=height,\n step_size=step_size,\n output_width=output_width,\n output_height=output_height)\n return out\n\n def random_mutations(self, color_jitter=True, color_shift=True, darken=False,\n horizontal_flip=True, invert_pixels=False, lighten=False, pyramid_down=False,\n pyramid_up=False, rotate_left=False, rotate_right=False, sharpen=False,\n vertical_flip=True, inplace=True, random_ratio=None):\n\n '''\n\n Generate random mutations from the images in the ImageTable\n\n Parameters\n ----------\n\n color_jitter : bool, optional\n Specifies whether to apply color jittering to an input image.\n color_shift : bool, optional\n Specifies whether to randomly change pixel intensity values of an input image.\n darken : bool, optional\n Specifies whether to darken the input image.\n horizontal_flip : bool, optional\n Specifies whether to flip the input image horizontally.\n invert_pixels : bool, optional\n Specifies whether to invert all pixels in the input image.\n lighten : bool, optional\n Specifies whether to lighten the input image.\n pyramid_down : bool, optional\n Specifies whether to downsample and then blur the input image.\n pyramid_up : bool, optional\n Specifies whether to upsample and then blur the input image.\n rotate_left : bool, optional\n Specifies whether to rotate the input image to the left.\n rotate_right : bool, optional\n Specifies whether to rotate the input image to the right.\n sharpen : bool, optional\n Specifies whether to sharpen the input image.\n vertical_flip : bool, optional\n Specifies whether to vertically flip the input image.\n inplace : bool, optional\n Specifies if the input table will be used as the resulting table or not.\n Default : True\n random_ratio : double, optional\n Specifies the ratio of the randomness. The smaller value would yield less\n number of images in the resulting table.\n Returns\n -------\n\n :class:`ImageTable`\n If `inplace=True`\n None\n If `inplace=False`\n\n '''\n\n croplist = [{'mutations':dict(colorjittering=color_jitter,\n colorshifting=color_shift,\n darken=darken, lighten=lighten,\n horizontalflip=horizontal_flip,\n invertpixels=invert_pixels,\n pyramiddown=pyramid_down,\n pyramidup=pyramid_up,\n rotateleft=rotate_left,\n rotateright=rotate_right,\n sharpen=sharpen,\n verticalflip=vertical_flip),\n 'usewholeimage':True}]\n\n column_names = ['_filename_{}'.format(i) for i in range(self.patch_level + 1)]\n\n if inplace:\n self._retrieve('image.augmentimages',\n copyvars=column_names,\n image=self.running_image_column,\n casout=dict(replace=True, **self.to_outtable_params()),\n croplist=croplist,\n randomratio=random_ratio,\n writerandomly=True)\n\n # The following code generate the latest file name according\n # to the number of patches and mutation (_m) operations.\n computedvars = '_filename_{}'.format(self.patch_level + 1)\n code = []\n code.append('length _filename_{1} varchar(*);')\n code.append('dot_loc = LENGTH(_filename_{0}) - '\n 'INDEX(REVERSE(_filename_{0}),\\'.\\')+1;')\n code.append('_filename_{1} = SUBSTR(_filename_{0},1,dot_loc-1) || '\n 'compress(\\'_\\'||\\'m{0}\\'||SUBSTR(_filename_{0},dot_loc));')\n code = '\\n'.join(code)\n code = code.format(self.patch_level, self.patch_level + 1)\n\n self._retrieve('table.shuffle',\n casout=dict(replace=True,\n **self.to_outtable_params()),\n table=dict(computedvars=computedvars,\n computedvarsprogram=code,\n **self.to_table_params()))\n\n self.patch_level += 1\n\n else:\n out = self.copy_table()\n out.random_mutations(color_jitter=color_jitter,\n color_shift=color_shift,\n darken=darken,\n horizontal_flip=horizontal_flip,\n invert_pixels=invert_pixels,\n lighten=lighten,\n pyramid_down=pyramid_down,\n pyramid_up=pyramid_up,\n rotate_left=rotate_left,\n rotate_right=rotate_right,\n sharpen=sharpen,\n vertical_flip=vertical_flip,\n inplace=True,\n randomratio=random_ratio)\n return out\n\n @property\n def image_summary(self):\n '''\n Summarize the images in the ImageTable\n Returns\n -------\n :class:`pd.Series`\n '''\n out = self._retrieve('image.summarizeimages', image=self.running_image_column)['Summary']\n out = out.T.drop(['Column'])[0]\n out.name = None\n return out\n\n @property\n def label_freq(self):\n '''\n Summarize the distribution of different classes (labels) in the ImageTable\n Returns\n -------\n :class:`pd.Series`\n '''\n out = self._retrieve('simple.freq', table=self, inputs=['_label_'])['Frequency']\n out = out[['FmtVar', 'Level', 'Frequency']]\n out = out.set_index('FmtVar')\n # out.index.name = 'Label'\n out.index.name = None\n out = out.astype('int64')\n return out\n\n @property\n def channel_means(self):\n '''\n A list of the means of the image intensities in each color channel.\n Returns\n -------\n ( first-channel-mean, second-channel-mean, third-channel-mean )\n '''\n return self.image_summary[['mean1stChannel', 'mean2ndChannel',\n 'mean3rdChannel']].tolist()\n\n @property\n def uid(self):\n '''\n A unique ID for each image.\n Returns\n -------\n '''\n file_name = '_filename_{}'.format(self.patch_level)\n uid = self[['_label_', file_name]].to_frame()\n # uid = uid.rename(columns={file_name: '_uid_'})\n return uid" ]
[ [ "numpy.array", "matplotlib.pyplot.imshow", "matplotlib.pyplot.yticks", "matplotlib.pyplot.show", "matplotlib.pyplot.xticks", "matplotlib.pyplot.figure" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
AspirinCode/MD-analysis-tools-scripts
[ "dfc0d282c9a844f5b8b1935a3ae74b1aff577ff9" ]
[ "network/calc_bet_centrality.py" ]
[ "import networkx as nx\nimport numpy as np\nimport pickle\n\nG = nx.Graph()\nnode1, node2 = np.loadtxt(graph_input, usecols=(0,1), unpack=True)\n\nfor i in range(len(node1)):\n G.add_edge(node1[i], node2[i])\n\ngraph_num_node = G.number_of_nodes()\nprint(f\"This graph contains {graph_num_node} nodes. \")\n\ngraph_num_edge = G.number_of_edges()\nprint(f\"This graph contains {graph_num_edge} edges. \")\n\nnode_bet_central = nx.betweenness_centrality(G)\npickle.dump(node_bet_central, open(\"node_betweeen_centrality.pkl\", 'wb'))\n\nres = np.array([(int(key), node_bet_central[key]) for key in node_bet_central.keys() ])\nres_sorted = res[res[:,0].argsort()]\nax.xaxis.set_minor_locator(MultipleLocator(10))\n\n\npos = dict(zip(idx.astype(int), np.column_stack((x, y, z))))\n\npos = {}\nfor i in range(len(idx)):\n pos[str(int(idx[i]))] = (x[i], y[i], z[i])\n\nfor key in pos.keys():\n position[key] = {'posi': pos[key]}\n\nnx.set_node_attributes(G, poistion)\n\npos = nx.get_node_attributes(G, 'posi')\nn = G.number_of_nodes()\n\ndegrees = [val for (node, val) in G.degree()]\n\nedge_max = max(degrees)\ncolors = [plt.cm.plasma(degrees[i]/edge_max) for i in range(n)]\n\nwith plt.style.context(('ggplot')):\n fig = plt.figure(figsize=(10,7))\n ax = Axes3D(fig)\n\n for key, value in pos.items():\n xi = value[0]\n yi = value[1]\n zi = value[2]\n\n ax.scatter(xi, yi, zi, c=colors[key], s=20+20*G.degree(key), edgecolors='k', alpha=0.7)\n\n for i, j in enumerate(G.edges()):\n x = np.array((pos[j[0]][0], pos[j[1]][0]))\n y = np.array((pos[j[0]][1], pos[j[1]][1]))\n z = np.array((pos[j[0]][2], pos[j[1]][2]))\n ax.plot(x, y, z, c='black', alpha=0.5)\n\n ax.view_init(30, angle)\n ax.set_axis_off()\n\n plt.show()\n\n return\n\n\n" ]
[ [ "numpy.array", "numpy.loadtxt", "numpy.column_stack" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
teddy-lu/yolov4flask
[ "20e825cc67faf3adca3bdbe3c40d30662d39c59f" ]
[ "predict.py" ]
[ "import base64\nimport time\n\nimport cv2\nimport os\n\nimport numpy as np\n\n\ndef dect(picb64):\n # picPath = r'./27210923000000.jpeg'\n model_label = r'./cfg/obj.names'\n LABELS = open(model_label).read().strip().split('\\n')\n num_class = len(LABELS)\n\n np.random.seed(28)\n COLORS = np.random.randint(0, 255, size=(num_class, 3), dtype='uint8')\n # cv2.cv2.imread()\n # img = cv2.imread(picPath)\n img_decode = base64.b64decode(picb64)\n img = cv2.imdecode(np.fromstring(img_decode, np.uint8), cv2.IMREAD_COLOR)\n\n # filename = picPath.split('/')[-1]\n # name = filename.split('.')[0]\n (H, W) = img.shape[:2]\n\n cfgFile = './cfg/yolov4.cfg'\n darknetModel = './cfg/yolo-obj.weights'\n\n net = cv2.dnn.readNetFromDarknet(cfgFile=cfgFile, darknetModel=darknetModel)\n ln = net.getLayerNames()\n ln = [ln[i - 1] for i in net.getUnconnectedOutLayers()]\n\n blob = cv2.dnn.blobFromImage(img, 1 / 255.0, (416, 416), swapRB=True, crop=False)\n net.setInput(blob)\n start = time.time()\n layerOutputs = net.forward(ln)\n end = time.time()\n boxes = []\n confidences = []\n classIDs = []\n # 置信度大于.5的边界框数据保留下来\n confidence_thre = 0.5\n # 非最大抑制的阈值。\n nms_thre = 0.3\n\n for output in layerOutputs:\n for detection in output:\n scores = detection[5:]\n classID = np.argmax(scores)\n confidence = scores[classID]\n\n if confidence > confidence_thre:\n box = detection[0:4] * np.array([W, H, W, H])\n (centerX, centerY, width, height) = box.astype(\"int\")\n x = int(centerX - (width / 2))\n y = int(centerY - (height / 2))\n\n boxes.append([x, y, int(width), int(height)])\n confidences.append(float(confidence))\n classIDs.append(classID)\n\n idxs = cv2.dnn.NMSBoxes(boxes, confidences, confidence_thre, nms_thre)\n\n data = {\"counts\": len(idxs), \"msg\": \"本次检测话费了{:.6f}秒来预测一张图片\".format(end - start)}\n listLayer = []\n if len(idxs) > 0:\n for i in idxs.flatten():\n (x, y) = (boxes[i][0], boxes[i][1])\n (w, h) = (boxes[i][2], boxes[i][3])\n\n # 画出来\n color = [int(c) for c in COLORS[classIDs[i]]]\n cv2.rectangle(img, (x, y), (x + w, y + h), color, 2)\n text = '{}: {:.3f}'.format(LABELS[classIDs[i]], confidences[i])\n (text_w, text_h), baseline = cv2.getTextSize(text, cv2.FONT_HERSHEY_SIMPLEX, 0.5, 2)\n cv2.rectangle(img, (x, y - text_h - baseline), (x + text_w, y), color, -1)\n cv2.putText(img, text, (x, y - 5), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 0), 2)\n\n info = {\n \"label\": LABELS[classIDs[i]],\n \"scores\": confidences[i],\n \"x\": str(x),\n \"y\": str(y),\n \"w\": str(w),\n \"h\": str(h),\n }\n listLayer.append(info)\n\n data[\"data\"] = listLayer\n if os.getenv(\"SAVE_IMG\"):\n cv2.imwrite('./out/res{}.jpg'.format(time.time()), img)\n # res = json.dumps(data)\n\n return data\n" ]
[ [ "numpy.random.seed", "numpy.fromstring", "numpy.argmax", "numpy.array", "numpy.random.randint" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
code-cullison/gnam
[ "655feedab085d47a0dfe359c97deb2dd6cdf3de7" ]
[ "misc/flatten_test.py" ]
[ "import numpy as np\n\nxyz = np.zeros((5,5,5))\n\nfor ix in range(5):\n for iy in range(5):\n for iz in range(5):\n xyz[ix,iy,iz] = ix + 10*iy + 100*iz\n\nprint(xyz)\n\nxyz = xyz.reshape((5,5,5))\n\nprint(xyz)\n\nxyz = xyz.flatten()\n\nprint(xyz)\n\nxyz = xyz.reshape((5,5,5))\n\nprint(xyz)\n" ]
[ [ "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
daynoryamil/ML-Scikit-Learn
[ "72aff95a6c046fcfa70ae21248c3d09b0dc101a8" ]
[ "exercises/6.boosting.py" ]
[ "import pandas as pd\n\nfrom sklearn.ensemble import GradientBoostingClassifier\n\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import accuracy_score\n\nif __name__ == \"__main__\":\n\n dt_heart = pd.read_csv(\"./data/heart.csv\")\n print(dt_heart['target'].describe())\n\n X = dt_heart.drop(['target'], axis=1)\n y = dt_heart['target']\n\n X_train, X_test, y_train, y_test = train_test_split(X,y,test_size=0.35)\n\n boost = GradientBoostingClassifier(n_estimators=50).fit(X_train, y_train)\n boost_pred = boost.predict(X_test)\n print('='*64)\n print(accuracy_score(boost_pred, y_test))\n\n\n" ]
[ [ "sklearn.ensemble.GradientBoostingClassifier", "pandas.read_csv", "sklearn.model_selection.train_test_split", "sklearn.metrics.accuracy_score" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] } ]
bangab/GMVAE
[ "6522f5f4b6afa3f8415a9dacc558670b307c664b" ]
[ "tensorflow/losses/LossFunctions.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"\n---------------------------------------------------------------------\n-- Author: Jhosimar George Arias Figueroa\n---------------------------------------------------------------------\n\nLoss functions used for training our model\n\n\"\"\"\n\nimport tensorflow as tf\nimport numpy as np\n\nclass LossFunctions:\n eps = 0.\n\n def binary_cross_entropy(self, real, logits, average=True):\n \"\"\"Binary Cross Entropy between the true and predicted outputs\n loss = (1/n) * -Σ(real*log(predicted) + (1 - real)*log(1 - predicted))\n\n Args:\n real: (array) corresponding array containing the true labels\n logits: (array) corresponding array containing the output logits\n average: (bool) whether to average the result to obtain a value\n \n Returns:\n output: (array/float) depending on average parameters the result will be the mean\n of all the sample losses or an array with the losses per sample\n \"\"\"\n if self.eps > 0.0:\n max_val = np.log(1.0 - self.eps) - np.log(self.eps)\n logits = tf.clip_by_value(logits, -max_val, max_val)\n loss = tf.reduce_sum(tf.nn.sigmoid_cross_entropy_with_logits(logits=logits, labels=real), axis=-1)\n if average:\n return tf.reduce_mean(loss)\n else:\n return loss\n \n\n def mean_squared_error(self, real, predictions, average=True):\n \"\"\"Mean Squared Error between the true and predicted outputs\n loss = (1/n)*Σ(real - predicted)^2\n\n Args:\n real: (array) corresponding array containing the true labels\n predictions: (array) corresponding array containing the predicted labels\n average: (bool) whether to average the result to obtain a value\n \n Returns:\n output: (array/float) depending on average parameters the result will be the mean\n of all the sample losses or an array with the losses per sample\n \"\"\"\n loss = tf.square(real - predictions)\n if average:\n return tf.reduce_mean(loss)\n else:\n return tf.reduce_sum(loss) \n\n\n def kl_gaussian(self, mean, logVar, average=True):\n \"\"\"KL Divergence between the posterior and a prior gaussian distribution (N(0,1))\n loss = (1/n) * -0.5 * Σ(1 + log(σ^2) - σ^2 - μ^2)\n\n Args:\n mean: (array) corresponding array containing the mean of our inference model\n logVar: (array) corresponding array containing the log(variance) of our inference model\n average: (bool) whether to average the result to obtain a value\n \n Returns:\n output: (array/float) depending on average parameters the result will be the mean\n of all the sample losses or an array with the losses per sample\n \"\"\"\n loss = -0.5 * tf.reduce_sum(1 + logVar - tf.exp(logVar) - tf.square(mean + self.eps), 1 ) \n if average:\n return tf.reduce_mean(loss)\n else:\n return tf.reduce_sum(loss)\n\n\n def kl_categorical(self, qx, log_qx, k, average=True):\n \"\"\"KL Divergence between the posterior and a prior uniform distribution (U(0,1))\n loss = (1/n) * Σ(qx * log(qx/px)), because we use a uniform prior px = 1/k \n loss = (1/n) * Σ(qx * (log(qx) - log(1/k)))\n\n Args:\n qx: (array) corresponding array containing the probs of our inference model\n log_qx: (array) corresponding array containing the log(probs) of our inference model\n k: (int) number of classes\n average: (bool) whether to average the result to obtain a value\n \n Returns:\n output: (array/float) depending on average parameters the result will be the mean\n of all the sample losses or an array with the losses per sample\n \"\"\"\n loss = tf.reduce_sum(qx * (log_qx - tf.log(1.0/k)), 1)\n if average:\n return tf.reduce_mean(loss)\n else:\n return tf.reduce_sum(loss)\n \n \n def log_normal(self, x, mu, var):\n \"\"\"Logarithm of normal distribution with mean=mu and variance=var\n log(x|μ, σ^2) = loss = -0.5 * Σ log(2π) + log(σ^2) + ((x - μ)/σ)^2\n\n Args:\n x: (array) corresponding array containing the input\n mu: (array) corresponding array containing the mean \n var: (array) corresponding array containing the variance\n\n Returns:\n output: (array/float) depending on average parameters the result will be the mean\n of all the sample losses or an array with the losses per sample\n \"\"\"\n if self.eps > 0.0:\n var = var + self.eps\n return -0.5 * tf.reduce_sum(\n tf.log(2 * np.pi) + tf.log(var) + tf.square(x - mu) / var, axis=-1)\n \n \n def labeled_loss(self, z, z_mu, z_var, z_mu_prior, z_var_prior, average=True):\n \"\"\"Variational loss when using labeled data without considering reconstruction loss\n loss = log q(z|x,y) - log p(z) - log p(y)\n\n Args:\n z: (array) array containing the gaussian latent variable\n z_mu: (array) array containing the mean of the inference model\n z_var: (array) array containing the variance of the inference model\n z_mu_prior: (array) array containing the prior mean of the generative model\n z_var_prior: (array) array containing the prior variance of the generative mode\n average: (bool) whether to average the result to obtain a value\n \n Returns:\n output: (array/float) depending on average parameters the result will be the mean\n of all the sample losses or an array with the losses per sample\n \"\"\"\n loss = self.log_normal(z, z_mu, z_var) - self.log_normal(z, z_mu_prior, z_var_prior) \n loss = loss - np.log(0.1)\n if average:\n return tf.reduce_mean(loss)\n else:\n return loss\n \n \n def entropy(self, logits, targets, average=True):\n \"\"\"Entropy loss\n loss = (1/n) * -Σ targets*log(predicted)\n\n Args:\n logits: (array) corresponding array containing the logits of the categorical variable\n real: (array) corresponding array containing the true labels\n average: (bool) whether to average the result to obtain a value\n \n Returns:\n output: (array/float) depending on average parameters the result will be the mean\n of all the sample losses or an array with the losses per sample\n \"\"\"\n log_q = tf.nn.log_softmax(logits)\n if average:\n return -tf.reduce_mean(tf.reduce_sum(targets * log_q, 1))\n else:\n return -tf.reduce_sum(targets * log_q, 1)\n\n" ]
[ [ "tensorflow.clip_by_value", "numpy.log", "tensorflow.nn.log_softmax", "tensorflow.reduce_mean", "tensorflow.reduce_sum", "tensorflow.exp", "tensorflow.nn.sigmoid_cross_entropy_with_logits", "tensorflow.log", "tensorflow.square" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] } ]
Nikhil-Pesaladinne/cando-web
[ "f7f07aefda259dfe4a094b2c929c838539f16599" ]
[ "cando/cando.py" ]
[ "import os, sys, pickle\nimport requests\nimport random\nimport time\nimport operator\nimport math\nimport progressbar\nimport numpy as np\nimport pandas as pd\nimport multiprocessing as mp\nimport difflib\nimport matplotlib.pyplot as plt\nimport inspect\nfrom decimal import Decimal\nfrom rdkit import Chem, DataStructs, RDConfig\nfrom rdkit.Chem import AllChem, rdmolops\nfrom sklearn.metrics import pairwise_distances, pairwise_distances_chunked, roc_curve, roc_auc_score, average_precision_score, ndcg_score\nfrom sklearn.decomposition import PCA\nfrom sklearn.cluster import KMeans\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn import svm\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.model_selection import train_test_split\nfrom scipy.spatial.distance import squareform, cdist\nfrom scipy import stats\n\n\nclass Protein(object):\n \"\"\"!\n An object to represent a protein\n \"\"\"\n def __init__(self, id_, sig):\n ## @var id_ \n # PDB or UniProt ID for the given protein\n self.id_ = id_\n ## @var alt_id\n # Used when a second identifier mapping is available (such as SIFTs project)\n self.alt_id = ''\n ## @var sig \n # List of scores representing each drug interaction with the given protein\n self.sig = sig\n ## @var pathways \n # List of Pathway objects in which the given protein is involved\n self.pathways = []\n ## @var indications\n # List of Indication objects to which the protein is associated\n self.indications = []\n ## @var name\n # str: the common name of the protein (not currently used)\n self.name = ''\n ## @var gene\n # str: the gene name from which the protein is produced\n self.gene = ''\n\n\nclass Compound(object):\n \"\"\"!\n An object to represent a compound/drug\n \"\"\"\n def __init__(self, name, id_, index, status='N/A'):\n ## @var name \n # str: Name of the Compound (e.g., 'caffeine')\n self.name = name\n ## @var id_\n # int: CANDO id from mapping file (e.g., 1, 10, 100, ...)\n self.id_ = id_\n ## @var index\n # int: The order in which the Compound appears in the mapping file (e.g, 1, 2, 3, ...)\n self.index = index\n ## @var status\n # str: The clinical trial status of the compound from DrugBank ('approved' or 'other')\n self.status = status\n ## @var sig\n # list: Signature is essentially a column of the Matrix\n self.sig = []\n ## @var aux_sig\n # list: Potentially temporary signature for things like pathways, where \"c.sig\" needs to be preserved\n self.aux_sig = []\n ## @var indications\n # list: This is every indication the Compound is associated with from the\n # mapping file\n self.indications = []\n ## @var similar\n # list: This is the ranked list of compounds with the most similar interaction signatures\n self.similar = []\n ## @var similar_computed\n # bool: Have the distances of all Compounds to the given Compound been computed?\n self.similar_computed = False\n ## @var similar_sorted\n # bool: Have the most similar Compounds to the given Compound been sorted?\n self.similar_sorted = False\n ## @var cluster_id\n # int: The cluster id this Compound was assigned from clustering method\n self.cluster_id = []\n ## @var adrs\n # list: List of ADRs associated with this Compound\n self.adrs = []\n ## @var alt_ids\n # dict: dict of other ids inputted with compound mapping\n self.alt_ids = {}\n ## @var metabolites\n # list: List of all metabolites from the compound\n self.metabolites = []\n ## @var is_metabolite\n # bool: bool if the drug is a metabolite itself\n self.is_metabolite = False\n ## @var parent\n # Compound: Compound object to which this compound is a metabolite\n self.parent = None\n ## @var compounds\n # List Compound: Compound objects to which this compound is associated\n self.compounds = []\n\n def add_indication(self, ind):\n \"\"\"!\n Add an Indication to the list of Indications associated to this Compound\n @param ind object: Indication object to add\n \"\"\"\n self.indications.append(ind)\n\n\nclass Compound_pair(object):\n \"\"\"!\n An object to represent a compound/drug-pair\n \"\"\"\n def __init__(self, name, id_, index):\n ## @var name\n # str: Name of the Compound (e.g., 'caffeine')\n self.name = name\n ## @var id_\n # int: CANDO id from mapping file (e.g., 1, 10, 100, ...)\n self.id_ = id_\n ## @var index\n # int: The order in which the Compound appears in the mapping file (e.g, 1, 2, 3, ...)\n self.index = index\n ## @var sig\n # list: Signature is essentially a column of the Matrix\n self.sig = []\n ## @var aux_sig\n # list: Potentially temporary signature for things like pathways, where \"c.sig\" needs to be preserved\n self.aux_sig = []\n ## @var similar\n # list: This is the ranked list of compounds with the most similar interaction signatures\n self.similar = []\n ## @var similar_computed\n # bool: Have the distances of all Compounds to the given Compound been computed?\n self.similar_computed = False\n ## @var similar_sorted\n # bool: Have the most similar Compounds to the given Compound been sorted?\n self.similar_sorted = False\n ## @var adrs\n # list: List of ADRs associated with this Compound\n self.adrs = []\n\n def add_adr(self, adr):\n \"\"\"!\n Add an ADR to the list of Indications associated to this Compound\n @param ind object: Indication object to add\n \"\"\"\n self.adrs.append(adr)\n\n\nclass Indication(object):\n \"\"\"!\n An object to represent an indication (disease)\n \"\"\"\n def __init__(self, ind_id, name):\n ## @var id_\n # str: MeSH or OMIM ID for the indication from the mapping file\n self.id_ = ind_id\n ## @var name\n # str: Name for the indication from the mapping file\n self.name = name\n ## @var compounds\n # list: Every associated compound object from the mapping file\n self.compounds = []\n ## @var pathways\n # list: Every pathway associated to the indication from the mapping file\n self.pathways = []\n ## @var proteins\n # list: Every protein associated to the indication form the mapping file\n self.proteins = []\n ## @var pathogen\n # bool: Whether or not this indication is caused by a pathogen\n self.pathogen = None\n\n\nclass Pathway(object):\n \"\"\"!\n An object to represent a pathway\n \"\"\"\n def __init__(self, id_):\n ## @var proteins\n # list: Protein objects associated with the given Pathway\n self.proteins = []\n ## @var id_\n # str: Identification for the given Pathway\n self.id_ = id_\n ## @var indications\n # list: Indication objects associated with the given Pathway\n self.indications = []\n\n\nclass ADR(object):\n \"\"\"!\n An object to represent an adverse reaction\n \"\"\"\n def __init__(self, id_, name):\n ## @var id_\n # str: Identification for the given ADR\n self.id_ = id_\n ## @var name\n # str: Name of the given ADR\n self.name = name\n ## @var compounds\n # list: Compound objects associated with the given ADR\n self.compounds = []\n ## @var compounds\n # List: Compound object pairs (tuples) associated with the given ADR\n self.compound_pairs = []\n\n\nclass CANDO(object):\n \"\"\"!\n An object to represent all aspects of CANDO (compounds, indications, matrix, etc.)\n To instantiate you need the compound mapping (c_map), an\n indication mapping file (i_map), and typically and a compound-protein matrix (matrix=) or\n or precomputed compound-compound distance matrix (read_rmsds=), but those are optional.\n \"\"\"\n def __init__(self, c_map, i_map, matrix='', compound_set='all', compute_distance=False, save_dists='',\n read_dists='', pathways='', pathway_quantifier='max', indication_pathways='', indication_proteins='',\n similarity=False, dist_metric='rmsd', protein_set='', rm_zeros=False, rm_compounds='',\n ddi_compounds='', ddi_adrs='', adr_map='', protein_distance=False, protein_map='', ncpus=1):\n ## @var c_map\n # str: File path to the compound mapping file (relative or absolute)\n self.c_map = c_map\n ## @var i_map \n # str: File path to the indication mapping file (relative or absolute)\n self.i_map = i_map\n ## @var matrix \n # str: File path to the cando matrix file (relative or absolute)\n self.matrix = matrix\n ## @var compound_set\n # str or List str: what compounds to use, such as all, approved, experimental, etc\n self.compound_set = compound_set\n ## @var protein_set\n # str: File path to protein subset file (relative or absolute) \n self.protein_set = protein_set\n ## @var pathways\n # str: File path to pathway file\n self.pathways = []\n self.accuracies = {}\n ## @var compute_distance\n # bool: Calculate the distance for each Compound against all other Compounds using chosen distance metric\n self.compute_distance = compute_distance\n ## @var protein_distance\n # bool: Calculate the distance for each Protein against all other Proteins using chosen distance metric\n self.protein_distance = protein_distance\n self.clusters = {}\n ## @var rm_zeros\n # bool: Remove Compounds with all-zero signatures from CANDO object\n self.rm_zeros = rm_zeros\n ## @var rm_compounds\n # list: Compounds to remove from the CANDO object \n self.rm_compounds = rm_compounds\n self.rm_cmpds = []\n ## @var save_dists\n # bool: Write the calculated distances to file after computation (set compute_distances=True)\n self.save_dists = save_dists\n ## @var read_dists\n # str: File path to pre-computed distance matrix\n self.read_dists = read_dists\n ## @var similarity\n # bool: Use similarity instead of distance\n self.similarity = similarity\n ## @var dist_metric\n # str: Distance metric to be used for computing Compound-Compound distances\n self.dist_metric = dist_metric\n ## @var ncpus\n # int: Number of CPUs used for parallelization\n self.ncpus = int(ncpus)\n ## @var pathway_quantifier\n # str: Method used to quantify a all Pathways\n self.pathway_quantifier = pathway_quantifier\n ## @var indication_pathways\n # str: File path to Indication-Pathway association file\n self.indication_pathways = indication_pathways\n ## @var indication_proteins\n # str: File path to Indication-Protein association file\n self.indication_proteins = indication_proteins\n ## @var adr_map\n # str: File path to ADR mapping file\n self.adr_map = adr_map\n ## @var protein_map\n # str: File path to Protein metadata mapping file\n self.protein_map = protein_map\n ## @var ddi_compounds\n # str: File path to Drug--drug mapping file\n self.ddi_compounds = ddi_compounds\n ## @var ddi_compounds\n # str: File path to Drug--Drug--ADE mapping file\n self.ddi_adrs = ddi_adrs\n\n ## @var proteins\n # List: Protein objects in the platform\n self.proteins = []\n self.protein_id_to_index = {}\n ## @var compounds\n # List: Compound objects in the platform\n self.compounds = []\n self.compound_ids = []\n ## @var compound_pairs\n # List: Compound_pair objects in the platform\n self.compound_pairs = []\n self.compound_pair_ids = []\n ## @var indications\n # List: Indication objects in the platform\n self.indications = []\n self.indication_ids = []\n ## @var adrs\n # List: ADR objects in the platform\n self.adrs = []\n self.adr_ids = []\n\n self.short_matrix_path = self.matrix.split('/')[-1]\n self.short_read_dists = read_dists.split('/')[-1]\n self.short_protein_set = protein_set.split('/')[-1]\n self.cmpd_set = rm_compounds.split('/')[-1]\n self.data_name = ''\n\n if self.matrix:\n if self.protein_set:\n self.data_name = self.short_protein_set + '.' + self.short_matrix_path\n elif rm_compounds:\n self.data_name = self.cmpd_set + '.' + self.short_matrix_path\n if self.short_read_dists:\n self.data_name = self.short_read_dists\n\n ignored_set = []\n # create all of the compound objects from the compound map\n with open(c_map, 'r') as c_f:\n lines = c_f.readlines()\n header = lines[0]\n h2i = {}\n for i, h in enumerate(header.strip().split('\\t')):\n h2i[h] = i\n for l in lines[1:]:\n ls = l.strip().split('\\t')\n name = ls[h2i['GENERIC_NAME']]\n id_ = int(ls[h2i['CANDO_ID']])\n db_id = ls[h2i['DRUGBANK_ID']]\n index = id_\n cm = Compound(name, id_, index)\n\n include_cmpd = False\n if self.compound_set == 'all':\n include_cmpd = True\n tags = None\n elif isinstance(self.compound_set, str):\n tags = [self.compound_set]\n elif isinstance(self.compound_set, list):\n tags = self.compound_set\n else:\n tags = None\n print('compound_set flag has wrong input type, please input a string compound category (\"all\", '\n '\"approved\", etc) or a list of categories ([\"approved\", \"experimental\"])')\n quit()\n\n if 'DRUG_GROUPS' in h2i:\n stati = ls[h2i['DRUG_GROUPS']].split(';')\n if tags is not None:\n if len(list(set(tags) & set(stati))) > 0:\n include_cmpd = True\n else:\n ignored_set.append(id_)\n continue\n if 'approved' in stati:\n cm.status = 'approved'\n elif 'metabolite' in stati:\n cm.status = 'other'\n cm.is_metabolite = True\n else:\n cm.status = 'other'\n else:\n if self.compound_set != 'all':\n print('This mapping does not have drug groups/approval status - '\n 'please re-run with compound_set=\"all\".')\n sys.exit()\n cm.status = 'N/A'\n\n if include_cmpd:\n self.compounds.append(cm)\n self.compound_ids.append(id_)\n\n if self.compound_set and len(self.compounds) == 0:\n print('No compounds passed filtering, please check input parameters.')\n quit()\n\n # create the indication objects and add indications to the\n # already created compound objects from previous loop\n # NOTE: if a compound is in the indication mapping file that\n # isn't in the compound mapping file, an error will occur. I\n # had to remove those compounds from the indication mapping in\n # order for it to work\n with open(i_map, 'r') as i_f:\n lines = i_f.readlines()\n header = lines[0]\n h2i = {}\n for i, h in enumerate(header.strip().split('\\t')):\n h2i[h] = i\n for l in lines[1:]:\n ls = l.strip().split('\\t')\n c_id = int(ls[h2i['CANDO_ID']])\n if c_id in ignored_set:\n continue\n i_name = ls[h2i['INDICATION_NAME']]\n ind_id = ls[h2i['MESH_ID']]\n cm = self.get_compound(c_id, quiet=True)\n if cm:\n if ind_id in self.indication_ids:\n ind = self.get_indication(ind_id)\n ind.compounds.append(cm)\n else:\n ind = Indication(ind_id, i_name)\n ind.compounds.append(cm)\n self.indications.append(ind)\n self.indication_ids.append(ind.id_)\n cm.add_indication(ind)\n\n # add proteins, add signatures and such to compounds\n if self.protein_set:\n uniprots = []\n with open(self.protein_set, 'r') as psf:\n lines = psf.readlines()\n for line in lines:\n uni = line.strip()\n uniprots.append(uni)\n\n if matrix:\n if matrix[-4:] == '.fpt':\n print('The matrix file {} is in the old fpt format -- please '\n 'convert to tsv with the following line of code:'.format(matrix))\n print('>> Matrix({}, convert_to_tsv=True)'.format(matrix))\n quit()\n print('Reading signatures from matrix...')\n\n with open(matrix, 'r') as m_f:\n m_lines = m_f.readlines()\n if self.protein_set:\n print('Editing signatures according to proteins in {}...'.format(self.protein_set))\n targets, pdct_rev = self.uniprot_set_index(self.protein_set)\n new_i = 0\n matches = [0, 0]\n for l_i in range(len(m_lines)):\n vec = m_lines[l_i].strip().split('\\t')\n name = vec[0]\n if name in targets:\n scores = list(map(float, vec[1:]))\n if len(scores) != len(self.compounds):\n print('The number of compounds in {} does not match the '\n 'number of values in {} -- quitting.'.format(self.c_map, self.matrix))\n quit()\n p = Protein(name, scores)\n try:\n alt = pdct_rev[name]\n p.alt_id = alt\n matches[0] += 1\n except KeyError:\n matches[1] += 1\n self.proteins.append(p)\n self.protein_id_to_index[name] = new_i\n for i in range(len(scores)):\n s = scores[i]\n self.compounds[i].sig.append(s)\n new_i += 1\n else:\n continue\n print('\\tDirect UniProt matches:\\t{}\\n\\tDirect PDB matches: \\t{}'\n '\\n\\tNew signature length: \\t{}'.format(matches[1], matches[0], sum(matches)))\n if not sum(matches):\n print('Sorry, the input proteins did not match any proteins in the input matrix -- quitting.')\n quit()\n else:\n for l_i in range(len(m_lines)):\n vec = m_lines[l_i].strip().split('\\t')\n name = vec[0]\n scores = list(map(float, vec[1:]))\n if len(scores) != len(self.compounds):\n print('The number of compounds in {} does not match the '\n 'number of values in {} -- quitting.'.format(self.c_map, self.matrix))\n quit()\n p = Protein(name, scores)\n self.proteins.append(p)\n self.protein_id_to_index[name] = l_i\n for i in range(len(scores)):\n s = scores[i]\n self.compounds[i].sig.append(s)\n print('Done reading signatures.\\n')\n if pathways:\n print('Reading pathways...')\n if self.indication_pathways:\n print('Reading indication-pathway associations...')\n path_ind = {}\n with open(indication_pathways, 'r') as ipf:\n for l in ipf:\n ls = l.strip().split('\\t')\n pw = ls[0]\n ind_ids = ls[1:]\n path_ind[pw] = ind_ids\n\n with open(pathways, 'r') as pf:\n for l in pf:\n ls = l.strip().split('\\t')\n pw = ls[0]\n ps = ls[1:]\n if not ps:\n continue\n PW = Pathway(pw)\n self.pathways.append(PW)\n for p in ps:\n try:\n pi = self.protein_id_to_index[p]\n pro = self.proteins[pi]\n pro.pathways.append(PW)\n PW.proteins.append(pro)\n except KeyError:\n pass\n\n if self.indication_pathways:\n try:\n ind_ids = path_ind[pw]\n for ind_id in ind_ids:\n try:\n ind = self.get_indication(ind_id)\n except LookupError:\n continue\n PW.indications.append(ind)\n ind.pathways.append(PW)\n except KeyError:\n continue\n if not indication_pathways:\n self.quantify_pathways()\n print('Done reading pathways.')\n \n if self.ddi_compounds:\n print(\"Reading compound-compound associations...\")\n ddi = pd.read_csv(ddi_compounds, sep='\\t')\n for x in ddi.index:\n c1 = self.get_compound(int(ddi.loc[x,'CANDO_ID-1']))\n c2 = self.get_compound(int(ddi.loc[x,'CANDO_ID-2']))\n if c2 not in c1.compounds:\n c1.compounds.append(c2)\n if c1 not in c2.compounds:\n c2.compounds.append(c1)\n print('Done reading compound-compound associations.\\n')\n\n if self.ddi_adrs:\n print(\"Reading compound pair-adverse events associations...\")\n ddi = pd.read_csv(ddi_adrs,sep='\\t')\n # Create a unique set of tuples using CANDO IDs for compound pairs\n idss = list(zip(ddi.loc[:,'CANDO_ID-1'].values.tolist(),ddi.loc[:,'CANDO_ID-2'].values.tolist()))\n print(\" {} compound pair-adverse event associations.\".format(len(idss)))\n idss = list(set(idss))\n # Iterate through list of CANDO ID tuples\n for ids in idss: \n if ids in self.compound_pair_ids:\n cm_p = self.get_compound_pair(ids)\n elif (ids[1],ids[0]) in self.compound_pair_ids:\n cm_p = self.get_compound_pair((ids[1],ids[0]))\n else:\n names = (self.get_compound(ids[0]).name,self.get_compound(ids[1]).name)\n cm_p = Compound_pair(names, ids, ids)\n self.compound_pairs.append(cm_p)\n self.compound_pair_ids.append(ids)\n # Pull list of ADRs for this compound pair\n adrs = ddi.loc[(ddi['CANDO_ID-1']==ids[0]) & (ddi['CANDO_ID-2']==ids[1])]\n # Iterate through ADRs for this compound pair \n for x in adrs.index:\n #ADRs\n adr_name = ddi.loc[x,'CONDITION_MESH_NAME']\n adr_id = ddi.loc[x,'CONDITION_MESH_ID']\n if adr_id in self.adr_ids:\n adr = self.get_adr(adr_id)\n else:\n adr = ADR(adr_id,adr_name)\n self.adrs.append(adr)\n self.adr_ids.append(adr.id_)\n # Add comppund pair to ADR and vice versa\n cm_p.add_adr(adr)\n adr.compound_pairs.append(cm_p)\n print(\" {} compound pairs.\".format(len(self.compound_pairs)))\n print(\" {} adverse events.\".format(len(self.adrs)))\n print('Done reading compound pair-adverse event associations.\\n')\n \n '''\n for x in ddi.itertuples():\n #ADRs\n #adr_name = ddi.loc[x,'EVENT_NAME']\n adr_name = x[6]\n #adr_id = ddi.loc[x,'EVENT_UMLS_ID']\n adr_id = x[5]\n if adr_id in self.adr_ids:\n adr = self.get_adr(adr_id)\n else:\n adr = ADR(adr_id,adr_name)\n self.adrs.append(adr)\n self.adr_ids.append(adr.id_)\n # Compound pair\n ids = (int(x[1]),int(x[3]))\n #ids = (int(ddi.loc[x,'CANDO_ID-1']),int(ddi.loc[x,'CANDO_ID-2']))\n if ids in self.compound_pair_ids:\n cm_p = self.get_compound_pair(ids)\n elif (ids[1],ids[0]) in self.compound_pair_ids:\n cm_p = self.get_compound_pair((ids[1],ids[0]))\n else:\n #names = (x[1],x[3])\n names = (self.get_compound(ids[0]).name,self.get_compound(ids[1]).name)\n cm_p = Compound_pair(names, ids, ids)\n self.compound_pairs.append(cm_p)\n self.compound_pair_ids.append(ids)\n # Add comppund pair to ADR and vice versa\n cm_p.add_adr(adr)\n adr.compound_pairs.append(cm_p)\n print('Done reading compound-compound adverse event associations.\\n')\n '''\n '''\n print(\"Generating compound pairs...\")\n for i in range(len(self.compounds)):\n c1 = self.compounds[i]\n for j in range(i,len(self.compounds)):\n if i == j:\n continue\n c2 = self.compounds[j]\n names = (c1.name,c2.name)\n ids = (c1.id_,c2.id_)\n idxs = (c1.id_,c2.id_)\n cm_p = Compound_pair(names,ids,idxs)\n self.compound_pairs.append(cm_p)\n self.compound_pair_ids.append(ids)\n print(\"Done generating compound pairs.\\n\")\n '''\n print(\"Generating compound-compound signatures...\")\n for cm_p in self.compound_pairs:\n c1 = self.get_compound(cm_p.id_[0])\n c2 = self.get_compound(cm_p.id_[1])\n # Add signatures??\n cm_p.sig = [i+j for i,j in zip(c1.sig,c2.sig)]\n # max, min, mult?\n print(\"Done generating compound-compound signatures.\\n\")\n\n if self.indication_proteins:\n print('Reading indication-gene associations...')\n with open(indication_proteins, 'r') as igf:\n for l in igf:\n ls = l.strip().split('\\t')\n ind_id = ls[0]\n genes = ls[1].split(\";\")\n for p in genes:\n try:\n pi = self.protein_id_to_index[p]\n pro = self.proteins[pi]\n ind = self.get_indication(ind_id)\n ind.proteins.append(pro)\n pro.indications.append(ind)\n except KeyError:\n pass\n except LookupError:\n pass\n print('Done reading indication-gene associations.')\n\n if read_dists:\n print('Reading {} distances...'.format(self.dist_metric))\n with open(read_dists, 'r') as rrs:\n lines = rrs.readlines()\n for i in range(len(lines)):\n c1 = self.compounds[i]\n scores = lines[i].strip().split('\\t')\n if len(scores) != len(self.compounds):\n print('The number of compounds in {} does not match the '\n 'number of values in {} -- quitting.'.format(self.c_map, self.matrix))\n quit()\n for j in range(len(scores)):\n if i == j:\n continue\n else:\n s = float(scores[j])\n if similarity:\n s = 1 - s\n c1.similar.append((self.compounds[j], s))\n for c in self.compounds:\n sorted_scores = sorted(c.similar, key=lambda x: x[1] if not math.isnan(x[1]) else 100000)\n c.similar = sorted_scores\n c.similar_computed = True\n c.similar_sorted = True\n print('Done reading {} distances.\\n'.format(self.dist_metric))\n\n # if compute distance is true, generate similar compounds for each\n if compute_distance and not read_dists:\n if self.pathways and not self.indication_pathways and not ddi_adrs:\n print('Computing distances using global pathway signatures...')\n for c in self.compounds:\n self.generate_similar_sigs(c, aux=True)\n \n # Still cleaning this code up.\n # Memory issues with full Twosides is a huge limitation\n elif ddi_adrs:\n print('Computing {} distances for compound pairs...'.format(self.dist_metric))\n # put all compound_pair signatures into 2D-array\n snp = [self.compound_pairs[i].sig for i in range(0, len(self.compound_pairs))]\n snp = np.array(snp) # convert to numpy form\n \n # call pairwise_distances, speed up with custom RMSD function and parallelism\n if self.dist_metric == \"rmsd\":\n distance_matrix = pairwise_distances(snp, metric=lambda u, v: np.sqrt(((u - v) ** 2).mean()), n_jobs=self.ncpus)\n distance_matrix = squareform(distance_matrix)\n #elif self.dist_metric in ['cosine']:\n # distance_matrix = cosine_dist(snp)\n # distance_matrix = squareform(distance_matrix, checks=False)\n elif self.dist_metric in ['cosine', 'correlation', 'euclidean', 'cityblock']:\n for i in range(len(self.compound_pairs)):\n print(\"{} of {}\".format(i+1,len(self.compound_pairs)))\n dists = cdist([snp[i]], snp, dist_metric)[0]\n self.compound_pairs[i].similar = dict(zip(self.compound_pairs, dists))\n self.compound_pairs[i].similar.pop(i)\n self.compound_pairs[i].similar_computed = True\n \n\n\n '''\n distance_matrix = pairwise_distances_chunked(snp, metric=self.dist_metric, \n force_all_finite=False,\n n_jobs=self.ncpus)\n print(\"pairwise is done.\")\n '''\n #distance_matrix = np.concatenate(list(distance_matrix), axis=0) \n #print(\"concat is done.\")\n \n #distance_matrix = pairwise_distances(snp, metric=self.dist_metric,\n # force_all_finite=False,\n # n_jobs=self.ncpus)\n \n # Removed checks in case the diagonal is very small (close to zero) but not zero.\n #distance_matrix = squareform(distance_matrix, checks=False)\n #print(\"squareform is done.\")\n \n #i = 0\n #cp_ids = [i.id_ for i in self.compound_pairs]\n #for cp in self.compound_pairs:\n #for i in range(len(self.compound_pairs)):\n '''\n for x in distance_matrix:\n for y in x:\n cp = self.compound_pairs[i]\n print(\"{} of {}\".format(i+1,len(self.compound_pairs)))\n cp.similar = dict(zip(self.compound_pairs, y))\n # Remove self similar\n del cp.similar[cp]\n # Completed simialr calc\n cp.similar_computed = True\n '''\n #print(distance_matrix[i])\n #dists = cdist([snp[i]], snp, dist_metric)[0]\n # Let us try dicts instead of list of tuples\n #self.compound_pairs[i].similar = dict(zip(self.compound_pairs, dists))\n #del self.compound_pairs[i].similar[self.compound_pairs[i]]\n #self.compound_pairs[i].similar = list(zip(self.compound_pairs, dists))\n #self.compound_pairs[i].similar = list(zip(self.compound_pairs, distance_matrix[i]))\n #self.compound_pairs[i].similar.pop(i)\n #distance_matrix = np.delete(distance_matrix, 0, 0)\n #cp.similar = dict(zip(cp_ids, distance_matrix[i]))\n \n # Sort similar\n #cp.similar = {k: v for k,v in sorted(cp.similar.items(), key=operator.itemgetter(1))} \n #cp.similar_sorted = True\n #i+=1\n #del distance_matrix\n \n else:\n print(\"Incorrect distance metric - {}\".format(self.dist_metric))\n exit()\n '''\n # step through the condensed matrix - add RMSDs to Compound.similar lists\n nc = len(self.compound_pairs)\n print(nc)\n n = 0\n for i in range(nc):\n for j in range(i, nc):\n c1 = self.compound_pairs[i]\n c2 = self.compound_pairs[j]\n if i == j:\n continue\n print(\"got both pairs\")\n r = distance_matrix[n]\n print(r)\n c1.similar.append((c2, r))\n c2.similar.append((c1, r))\n n += 1\n '''\n print('Done computing {} distances.\\n'.format(self.dist_metric))\n \n # sort the dists after saving (if desired)\n print('Sorting {} distances...'.format(self.dist_metric))\n i = 1\n for cp in self.compound_pairs:\n print(\"{} of {}\".format(i,len(self.compound_pairs)))\n cp.similar = {k: v for k,v in sorted(cp.similar.items(), key=operator.itemgetter(1))} \n #cp.similar = {k: v for k, v in sorted(cp.similar.items(), key=lambda item: item[1])} \n cp.similar_sorted = True\n i+=1\n print('Done sorting {} distances.\\n'.format(self.dist_metric))\n '''\n for c in self.compound_pairs:\n sorted_scores = sorted(c.similar, key=lambda x: x[1] if not math.isnan(x[1]) else 100000)\n c.similar = sorted_scores\n c.similar_computed = True\n c.similar_sorted = True\n '''\n\n else:\n print('Computing {} distances...'.format(self.dist_metric))\n # put all compound signatures into 2D-array\n signatures = []\n for i in range(0, len(self.compounds)):\n signatures.append(self.compounds[i].sig)\n snp = np.array(signatures) # convert to numpy form\n # call pairwise_distances, speed up with custom RMSD function and parallelism\n if self.dist_metric == \"rmsd\":\n distance_matrix = pairwise_distances(snp, metric=lambda u, v: np.sqrt(np.mean((u - v)**2)), n_jobs=self.ncpus)\n distance_matrix = squareform(distance_matrix)\n elif self.dist_metric in ['correlation', 'euclidean', 'cityblock', 'cosine']:\n distance_matrix = pairwise_distances(snp, metric=self.dist_metric, force_all_finite=False, n_jobs=self.ncpus)\n #distance_matrix = pairwise_distances(snp, metric=self.dist_metric, force_all_finite=False, n_jobs=self.ncpus)\n # Removed checks in case the diagonal is very small (close to zero) but not zero.\n distance_matrix = squareform(distance_matrix, checks=False)\n #elif self.dist_metric in ['cosine']:\n # distance_matrix = cosine_dist(snp)\n # distance_matrix = squareform(distance_matrix, checks=False)\n else:\n print(\"Incorrect distance metric - {}\".format(self.dist_metric))\n exit()\n\n # step through the condensed matrix - add RMSDs to Compound.similar lists\n nc = len(self.compounds)\n n = 0\n for i in range(nc):\n for j in range(i, nc):\n c1 = self.compounds[i]\n c2 = self.compounds[j]\n if i == j:\n continue\n r = distance_matrix[n]\n c1.similar.append((c2, r))\n c2.similar.append((c1, r))\n n += 1\n print('Done computing {} distances.\\n'.format(self.dist_metric))\n \n if self.save_dists:\n def dists_to_str(cmpd, ci):\n o = []\n for si in range(len(cmpd.similar)):\n if ci == si:\n if self.similarity:\n o.append('1.0')\n else:\n o.append('0.0')\n s = cmpd.similar[si]\n o.append(str(s[1]))\n if len(o) < len(self.compounds):\n o.append('0.0')\n o = \"\\t\".join(o)\n o = o + '\\n'\n return o\n\n print('Saving {} distances...'.format(self.dist_metric))\n '''\n if adr_ddi:\n with open(self.save_dists, 'w') as srf:\n for ci in range(len(self.compound_pairs)):\n c = self.compound_pairs[ci]\n srf.write(dists_to_str(c, ci))\n else:\n with open(self.save_dists, 'w') as srf:\n for ci in range(len(self.compounds)):\n c = self.compounds[ci]\n srf.write(dists_to_str(c, ci))\n '''\n with open(self.save_dists, 'w') as srf:\n for ci in range(len(self.compounds)):\n c = self.compounds[ci]\n srf.write(dists_to_str(c, ci))\n print('Done saving {} distances.\\n'.format(self.dist_metric))\n\n if rm_compounds:\n print('Removing undesired compounds in {}...'.format(rm_compounds))\n with open(rm_compounds, 'r') as rcf:\n self.rm_cmpds = [int(line.strip().split('\\t')[0]) for line in rcf]\n self.compounds = [c for c in self.compounds if c.id_ not in self.rm_cmpds]\n for c in self.compounds:\n c.similar = [s for s in c.similar if s[0].id_ not in self.rm_cmpds]\n c.compounds = [s for s in c.compounds if s.id_ not in self.rm_cmpds]\n if self.matrix:\n for p in self.proteins:\n p.sig = [y for x, y in enumerate(p.sig) if x not in self.rm_cmpds]\n print('Done removing undesired compounds.\\n')\n\n # sort the RMSDs after saving (if desired)\n for c in self.compounds:\n sorted_scores = sorted(c.similar, key=lambda x: x[1] if not math.isnan(x[1]) else 100000)\n c.similar = sorted_scores\n c.similar_computed = True\n c.similar_sorted = True\n\n if self.rm_zeros:\n print('Removing compounds with all-zero signatures...')\n\n def check_sig(sig):\n for s in sig:\n if s != 0.0:\n return True\n return False\n\n non_zero_compounds = []\n for c in self.compounds:\n if check_sig(c.sig):\n non_zero_compounds.append(c)\n self.compounds = non_zero_compounds\n print('Done removing compounds with all-zero signatures.\\n')\n\n if self.rm_zeros or self.rm_compounds:\n print('Filtering indication mapping...')\n for ind in self.indications:\n ind.compounds = [cmpd for cmpd in ind.compounds if cmpd.id_ not in self.rm_cmpds]\n print('Done filtering indication mapping.\\n')\n\n if compute_distance and not read_dists and (rm_compounds or rm_zeros):\n if self.pathways and not self.indication_pathways:\n print('Recomputing distances using global pathway signatures...')\n for c in self.compounds:\n self.generate_similar_sigs(c, aux=True)\n else:\n print('Recomputing {} distances...'.format(self.dist_metric))\n # put all compound signatures into 2D-array\n signatures = []\n for i in range(0, len(self.compounds)):\n signatures.append(self.compounds[i].sig)\n snp = np.array(signatures) # convert to numpy form\n # call pairwise_distances, speed up with custom RMSD function and parallelism\n if self.dist_metric == \"rmsd\":\n distance_matrix = pairwise_distances(snp, metric=lambda u, v: np.sqrt(np.mean((u - v)**2)),\n n_jobs=self.ncpus)\n distance_matrix = squareform(distance_matrix)\n elif self.dist_metric in ['correlation', 'euclidean', 'cityblock', 'cosine']:\n distance_matrix = pairwise_distances_chunked(snp, metric=self.dist_metric, \n force_all_finite=False,\n n_jobs=self.ncpus)\n distance_matrix = np.concatenate(list(distance_matrix), axis=0) \n #distance_matrix = pairwise_distances(snp, metric=self.dist_metric, force_all_finite=False,\n # n_jobs=self.ncpus)\n # Removed checks in case the diagonal is very small (close to zero) but not zero.\n distance_matrix = squareform(distance_matrix, checks=False)\n elif self.dist_metric in ['cosine']:\n distance_matrix = cosine_dist(snp)\n distance_matrix = squareform(distance_matrix, checks=False)\n else:\n print(\"Incorrect distance metric - {}\".format(self.dist_metric))\n exit()\n\n # step through the condensed matrix - add RMSDs to Compound.similar lists\n nc = len(self.compounds)\n n = 0\n for i in range(nc):\n for j in range(i, nc):\n c1 = self.compounds[i]\n c2 = self.compounds[j]\n if i == j:\n continue\n r = distance_matrix[n]\n c1.similar.append((c2, r))\n c2.similar.append((c1, r))\n n += 1\n print('Done recomputing {} distances.\\n'.format(self.dist_metric))\n\n if adr_map:\n print('Reading ADR mapping file...')\n with open(adr_map, 'r') as amf:\n lines = amf.readlines()\n header = lines[0]\n h2i = {}\n for i, h in enumerate(header.strip().split('\\t')):\n h2i[h] = i\n prev_id = -1\n lcount = 0\n for l in lines[1:]:\n ls = l.strip().split('\\t')\n adr_name = ls[h2i['CONDITION_MESH_NAME']]\n adr_id = ls[h2i['CONDITION_MESH_ID']]\n c_id = int(ls[h2i['CANDO_ID']])\n #adr_name = ls[h2i['condition_concept_name']]\n #c_id = int(ls[h2i['drug_cando_id']])\n #adr_id = ls[h2i['condition_meddra_id']]\n\n if c_id == -1:\n continue\n if prev_id == c_id:\n pass\n else:\n cmpd = self.get_compound(c_id, quiet=True)\n if cmpd is not None:\n prev_id = c_id\n else:\n # cmpd is not in CANDO - prevents from crashing\n continue\n try:\n adr = self.get_adr(adr_id)\n adr.compounds.append(cmpd)\n cmpd.adrs.append(adr)\n except LookupError:\n adr = ADR(adr_id, adr_name)\n adr.compounds.append(cmpd)\n cmpd.adrs.append(adr)\n self.adrs.append(adr)\n print('Read {} ADRs.'.format(len(self.adrs)))\n\n if protein_map:\n print('Reading Protein mapping file...')\n prot_df = pd.read_csv(protein_map,sep='\\t',index_col=0)\n for i in prot_df.index:\n p = self.get_protein(i)\n p.name = prot_df['uniprotRecommendedName'][i]\n p.gene = prot_df['geneName'][i]\n\n def search_compound(self, name, n=5):\n \"\"\"!\n Print closest Compound names/IDs for input search str\n @param name str: Compound name\n @param n int: Number of outputted compounds\n @return Returns None\n \"\"\"\n id_d = {}\n\n def return_names(x):\n id_d[x.name] = x.id_\n return x.name\n\n name = name.strip().lower().replace(' ', '_')\n cando_drugs = list(map(return_names, self.compounds))\n nms = difflib.get_close_matches(name, cando_drugs, n=n, cutoff=0.5)\n print('id\\tname')\n for nm in nms:\n print(\"{}\\t{}\".format(id_d[nm], nm))\n\n def get_compound(self, cmpd_id, quiet=False):\n \"\"\"!\n Get Compound object from Compound id or fuzzy match to Compound name\n @param cmpd_id int or str: Compound id or Compound name\n @return Returns object: Compound object or None if no exact match is found\n \"\"\"\n if type(cmpd_id) is int:\n for c in self.compounds:\n if c.id_ == cmpd_id:\n return c\n if not quiet:\n print(\"{0} not in {1}\".format(cmpd_id, self.c_map))\n return None\n elif type(cmpd_id) is str:\n id_d = {}\n\n def return_names(x):\n id_d[x.name] = x.id_\n return x.name\n\n cando_drugs = list(map(return_names, self.compounds))\n name = cmpd_id.strip().lower().replace(' ', '_')\n if name not in cando_drugs:\n print('\"{}\" is not in our mapping, here are the 5 closest results:'.format(name))\n self.search_compound(name, n=5)\n return None\n else:\n return self.get_compound(id_d[name])\n\n def get_compound_pair(self, ids):\n \"\"\"!\n Get Compound_pair object from Compound_pair id\n @param id_ int: Compound_pair id\n @return Returns object: Compound_pair object\n \"\"\"\n for c in self.compound_pairs:\n if c.id_ == ids:\n return c\n elif c.id_ == (ids[1],ids[0]):\n return c\n print(\"{0} not in {1}\".format(ids, self.ddi_adrs))\n return None\n\n def get_protein(self, protein_id):\n \"\"\"!\n Get Protein object from Protein id\n @param protein_id str: Protein name\n @return Returns object: Protein object\n \"\"\"\n if len(self.proteins) == 0 or not self.matrix:\n print('No matrix/proteins loaded -- quitting.')\n quit()\n for p in self.proteins:\n if p.id_ == protein_id:\n return p\n\n def get_indication(self, ind_id):\n \"\"\"!\n Get Indication object from Indication id\n @param ind_id str: Indication id\n @return Returns object: Indication object\n \"\"\"\n for i in self.indications:\n if i.id_ == ind_id:\n return i\n print('{} not in {}'.format(ind_id, self.i_map))\n raise LookupError\n\n def get_pathway(self, id_):\n \"\"\"!\n Get Pathway object from Pathway id\n @param id_ str: Pathway id\n @return Returns object: Pathway object\n \"\"\"\n for p in self.pathways:\n if p.id_ == id_:\n return p\n raise LookupError\n\n def get_adr(self, id_):\n \"\"\"!\n Get ADR (adverse drug reaction) from ADR id\n \n @param id_ str: ADR id\n @return Returns object: ADR object\n \"\"\"\n for a in self.adrs:\n if a.id_ == id_:\n return a\n raise LookupError\n\n def search_indication(self, name, n=5):\n \"\"\"!\n Print closest MeSH IDs for Indication name\n @param name str: Indication name\n @param n int: Number of outputted indications\n @return Returns None\n \"\"\"\n id_d = {}\n\n def return_names(x):\n id_d[x.name] = x.id_\n return x.name\n\n name = name.strip()\n cando_inds = list(map(return_names, self.indications))\n exact_matches = []\n for ci in cando_inds:\n if name in ci:\n exact_matches.append(ci)\n if exact_matches:\n print('Matches exactly containing {}:'.format(name))\n print('id \\tname')\n for em in exact_matches:\n print(\"{}\\t{}\".format(id_d[em], em))\n print()\n nms = difflib.get_close_matches(name, cando_inds, n=n, cutoff=0.3)\n print('Matches using string distance:')\n print('id \\tname')\n for nm in nms:\n print(\"{}\\t{}\".format(id_d[nm], nm))\n\n def top_targets(self, cmpd, n=10, negative=False, save_file=''):\n \"\"\"!\n Get the top scoring protein targets for a given compound\n @param cmpd Compound or int: Compound object or int id_ for which to print targets\n @param n int: number of top targets to print/return\n @param negative int: if the interaction scores are negative (stronger) energies\n @param save_file str: output file for results\n @return Returns list: list of tuples (protein id_, score)\n \"\"\"\n # print the list of the top targets\n if type(cmpd) is Compound:\n pass\n elif type(cmpd) is int:\n cmpd = self.get_compound(cmpd)\n else:\n print('Please enter a Compound object or integer id_ for a compound -- quitting.')\n quit()\n all_interactions = []\n sig = cmpd.sig\n for i in range(len(sig)):\n s = sig[i]\n p = self.proteins[i]\n all_interactions.append((p, s))\n if negative:\n interactions_sorted = sorted(all_interactions, key=lambda x: x[1])\n else:\n interactions_sorted = sorted(all_interactions, key=lambda x: x[1])[::-1]\n if save_file:\n o = open(save_file,'w')\n o.write('rank\\tscore\\tindex\\tid\\tgene\\tname\\n')\n print('Compound is {}'.format(cmpd.name))\n print('rank\\tscore\\tindex\\tid\\tgene\\tname')\n for si in range(n):\n pr = interactions_sorted[si][0]\n print('{}\\t{}\\t{}\\t{}\\t{}\\t{}'.format(si+1, round(interactions_sorted[si][1], 3),\n self.proteins.index(pr), pr.id_, pr.gene, pr.name))\n if save_file:\n o.write('{}\\t{}\\t{}\\t{}\\t{}\\t{}\\n'.format(si+1, round(interactions_sorted[si][1], 3),\n self.proteins.index(pr), pr.id_, pr.gene, pr.name))\n print()\n if save_file:\n o.close()\n return interactions_sorted[0:n]\n\n def common_targets(self, cmpds_file, n=10, negative=False, save_file=''):\n \"\"\"!\n Get the consensus top scoring protein targets for a set of compounds\n @param cmpds_file str: File containing a list of Compound IDs for which to search common targets\n @param n int: number of top targets to print/return\n @param negative int: if the interaction scores are negative (stronger) energies\n @param save_file str: save results to file name\n @return Returns list: list of tuples (protein id_, score)\n \"\"\"\n cs_df = pd.read_csv(cmpds_file,sep='\\t',header=None)\n sum_sig = [0]*len(self.get_compound(0).sig)\n for ci in cs_df.itertuples(index=False):\n try:\n s = self.get_compound(int(ci[0])).sig\n except:\n print(\"{} does not exist in the current drug library.\\n\".format(ci[0]))\n continue\n sum_sig = [i+j for i,j in zip(sum_sig,s)]\n # print the list of the top targets\n all_interactions = []\n for i in range(len(sum_sig)):\n s = sum_sig[i]\n p = self.proteins[i]\n all_interactions.append((p, s))\n if negative:\n interactions_sorted = sorted(all_interactions, key=lambda x: x[1])\n else:\n interactions_sorted = sorted(all_interactions, key=lambda x: x[1])[::-1]\n if save_file:\n o = open(save_file,'w')\n o.write('rank\\tscore\\tindex\\tid\\tgene\\tname\\n')\n print('rank\\tscore\\tindex\\tid\\tgene\\tname')\n for si in range(n):\n pr = interactions_sorted[si][0]\n print('{}\\t{}\\t{}\\t{}\\t{}\\t{}'.format(si+1, round(interactions_sorted[si][1], 3),\n self.proteins.index(pr), pr.id_, pr.gene, pr.name))\n if save_file:\n o.write('{}\\t{}\\t{}\\t{}\\t{}\\t{}\\n'.format(si+1, round(interactions_sorted[si][1], 3),\n self.proteins.index(pr), pr.id_, pr.gene, pr.name))\n print()\n if save_file:\n o.close()\n return interactions_sorted[0:n]\n\n def virtual_screen(self, protein, n=10, negative=False, compound_set='all', save_file=''):\n \"\"\"!\n Get the top scoring compounds for a given protein\n @param protein Protein int or str: Protein (object, int index, or str id_) of which to screen for top scores\n @param n int: number of top compounds to print/return\n @param negative int: if the interaction scores are negative (stronger) energies\n @param compound_set str: use all Compounds ('all') or only approved Compounds ('approved')\n @param save_file str: save results to file name\n @return Returns None\n \"\"\"\n if type(protein) is Protein:\n prot = protein\n elif type(protein) is int:\n prot = self.proteins[protein]\n elif type(protein) is str:\n for p in self.proteins:\n if p.id_ == protein:\n prot = p\n\n # print the list of the top targets\n all_interactions = []\n sig = prot.sig\n for i in range(len(sig)):\n s = sig[i]\n c_id = self.compounds[i].id_\n #if c_id in self.rm_cmpds:\n # continue\n all_interactions.append((c_id, s))\n if negative:\n interactions_sorted = sorted(all_interactions, key=lambda x: x[1])\n else:\n interactions_sorted = sorted(all_interactions, key=lambda x: x[1])[::-1]\n print('Protein is {}'.format(prot.id_))\n if save_file:\n o = open(save_file,'w')\n o.write('rank\\tscore\\tid\\tapproved\\tname\\n')\n print('rank\\tscore\\tid\\tapproved\\tname')\n printed = 0\n si = 0\n while printed < n:\n c = self.get_compound(interactions_sorted[si][0])\n #c = self.compounds[interactions_sorted[si][0]]\n if compound_set == 'approved':\n if c.status == 'approved':\n print('{}\\t{}\\t{}\\t{} \\t{}'.format(printed+1, round(interactions_sorted[si][1], 3), c.id_,\n 'true', c.name))\n if save_file:\n o.write('{}\\t{}\\t{}\\t{}\\t{}\\n'.format(printed+1, round(interactions_sorted[si][1], 3), c.id_,\n 'true', c.name))\n printed += 1\n else:\n print('{}\\t{}\\t{}\\t{} \\t{}'.format(printed+1, round(interactions_sorted[si][1], 3),\n c.id_, str(c.status == 'approved').lower(),\n c.name))\n if save_file:\n o.write('{}\\t{}\\t{}\\t{}\\t{}\\n'.format(printed+1, round(interactions_sorted[si][1], 3),\n c.id_, str(c.status == 'approved').lower(),\n c.name))\n printed += 1\n si += 1\n print()\n if save_file:\n o.close()\n return\n\n def uniprot_set_index(self, prots):\n \"\"\"!\n Gather proteins from input matrix that map to UniProt IDs from 'protein_set=' param\n @param prots list: UniProt IDs (str)\n @return Returns list: Protein chains (str) matching input UniProt IDs\n \"\"\"\n pre = os.path.dirname(__file__) + \"/data/v2.2+/\"\n if not os.path.exists('{}/mappings/pdb_2_uniprot.csv'.format(pre)):\n print('Downloading UniProt to PDB mapping file...')\n url = 'http://protinfo.compbio.buffalo.edu/cando/data/v2.2+/mappings/pdb_2_uniprot.csv'\n dl_file(url, '{}/mappings/pdb_2_uniprot.csv'.format(pre))\n pdct = {}\n pdct_rev = {}\n with open('{}/mappings/pdb_2_uniprot.csv'.format(pre), 'r') as u2p:\n for l in u2p.readlines()[1:]:\n spl = l.strip().split(',')\n pdb = spl[0] + spl[1]\n uni = spl[2]\n try:\n if pdb not in pdct[uni]:\n pdct[uni].append(pdb)\n except KeyError:\n pdct[uni] = [pdb]\n pdct_rev[pdb] = uni\n targets = []\n with open(prots, 'r') as unisf:\n for lp in unisf:\n prot = lp.strip()\n targets.append(prot)\n #pdct_rev[prot] = lp.strip().upper()\n try:\n targets += pdct[lp.strip().upper()]\n except KeyError:\n pass\n return targets, pdct_rev\n\n def generate_similar_sigs(self, cmpd, sort=False, proteins=[], aux=False):\n \"\"\"!\n For a given compound, generate the similar compounds using distance of sigs.\n @param cmpd object: Compound object\n @param sort bool: Sort the list of similar compounds\n @param proteins list: Protein objects to identify a subset of the Compound signature\n @param aux bool: Use an auxiliary signature (default: False)\n @return Returns list: Similar Compounds to the given Compound\n \"\"\"\n # find index of query compound, collect signatures for both\n q = 0\n c_sig = []\n if proteins is None:\n c_sig = cmpd.sig\n elif proteins:\n for pro in proteins:\n index = self.protein_id_to_index[pro.id_]\n c_sig.append(cmpd.sig[index])\n else:\n if aux:\n c_sig = cmpd.aux_sig\n else:\n c_sig = cmpd.sig\n ca = np.array([c_sig])\n\n other_sigs = []\n for ci in range(len(self.compounds)):\n c = self.compounds[ci]\n if cmpd.id_ == c.id_:\n q = ci\n other = []\n if proteins is None:\n other_sigs.append(c.sig)\n elif proteins:\n for pro in proteins:\n index = self.protein_id_to_index[pro.id_]\n other.append(c.sig[index])\n other_sigs.append(other)\n else:\n if aux:\n other_sigs.append(c.aux_sig)\n else:\n other_sigs.append(c.sig)\n oa = np.array(other_sigs)\n \n # call cdist, speed up with custom RMSD function\n if self.dist_metric == \"rmsd\":\n distances = pairwise_distances(ca, oa, lambda u, v: np.sqrt(np.mean((u - v) ** 2)), n_jobs=self.ncpus)\n elif self.dist_metric in ['cosine', 'correlation', 'euclidean', 'cityblock']:\n distances = pairwise_distances(ca, oa, self.dist_metric, n_jobs=self.ncpus)\n else:\n print(\"Incorrect distance metric - {}\".format(self.dist_metric))\n\n cmpd.similar = []\n # step through the cdist list - add RMSDs to Compound.similar list\n n = len(self.compounds)\n for i in range(n):\n c2 = self.compounds[i]\n if i == q:\n continue\n d = distances[0][i]\n cmpd.similar.append((c2, d))\n n += 1\n\n if sort:\n sorted_scores = sorted(cmpd.similar, key=lambda x: x[1] if not math.isnan(x[1]) else 100000)\n cmpd.similar = sorted_scores\n cmpd.similar_computed = True\n cmpd.similar_sorted = True\n return sorted_scores\n else:\n cmpd.similar_computed = True\n return cmpd.similar\n\n def generate_similar_sigs_cp(self, cmpd_pair, sort=False, proteins=[], aux=False):\n \"\"\"!\n For a given compound pair, generate the similar compound pairs using distance of sigs.\n @param cmpd_pair object: Compound_pair object\n @param sort bool: Sort the list of similar compounds\n @param proteins list: Protein objects to identify a subset of the Compound signature\n @param aux bool: Use an auxiliary signature (default: False)\n @return Returns list: Similar Compounds to the given Compound\n \"\"\"\n # find index of query compound, collect signatures for both\n q = 0\n cp_sig = []\n if proteins is None:\n cp_sig = cmpd_pair.sig\n elif proteins:\n for pro in proteins:\n index = self.protein_id_to_index[pro.id_]\n cp_sig.append(cmpd_pair.sig[index])\n else:\n if aux:\n cp_sig = cmpd_pair.aux_sig\n else:\n cp_sig = cmpd_pair.sig\n ca = np.array([cp_sig])\n\n other_sigs = []\n for ci in range(len(self.compound_pairs)):\n cp = self.compound_pairs[ci]\n if cmpd_pair.id_ == cp.id_:\n q = ci\n other = []\n if proteins is None:\n other_sigs.append(cp.sig)\n elif proteins:\n for pro in proteins:\n index = self.protein_id_to_index[pro.id_]\n other.append(cp.sig[index])\n other_sigs.append(other)\n else:\n if aux:\n other_sigs.append(cp.aux_sig)\n else:\n other_sigs.append(cp.sig)\n oa = np.array(other_sigs)\n \n # call cdist, speed up with custom RMSD function\n if self.dist_metric == \"rmsd\":\n distances = pairwise_distances(ca, oa, lambda u, v: np.sqrt(np.mean((u - v) ** 2)), n_jobs=self.ncpus)\n elif self.dist_metric in ['cosine', 'correlation', 'euclidean', 'cityblock']:\n distances = pairwise_distances(ca, oa, self.dist_metric, n_jobs=self.ncpus)\n else:\n print(\"Incorrect distance metric - {}\".format(self.dist_metric))\n\n cmpd_pair.similar = []\n # step through the cdist list - add RMSDs to Compound.similar list\n n = len(self.compound_pairs)\n for i in range(n):\n c2 = self.compound_pairs[i]\n if i == q:\n continue\n d = distances[0][i]\n cmpd_pair.similar.append((c2, d))\n n += 1\n\n if sort:\n sorted_scores = sorted(cmpd_pair.similar, key=lambda x: x[1] if not math.isnan(x[1]) else 100000)\n cmpd_pair.similar = sorted_scores\n cmpd_pair.similar_computed = True\n cmpd_pair.similar_sorted = True\n return sorted_scores\n else:\n cmpd_pair.similar_computed = True\n return cmpd_pair.similar\n\n def generate_some_similar_sigs(self, cmpds, sort=False, proteins=[], aux=False):\n \"\"\"!\n For a given list of compounds, generate the similar compounds based on dist of sigs\n This is pathways/genes for all intents and purposes\n @param cmpds list: Compound objects\n @param sort bool: Sort similar compounds for each Compound\n @param proteins list: Protein objects to identify a subset of the Compound signature\n @param aux bool: Use an auxiliary signature (default: False)\n @return Returns list: Similar Compounds to the given Compound\n \"\"\"\n q = [cmpd.id_ for cmpd in cmpds]\n \n if proteins is None:\n ca = [cmpd.sig for cmpd in cmpds]\n oa = [cmpd.sig for cmpd in self.compounds]\n elif proteins:\n index = [self.protein_id_to_index[pro.id_] for pro in proteins]\n ca = [[cmpd.sig[i] for i in index] for cmpd in cmpds]\n oa = [[cmpd.sig[i] for i in index] for cmpd in self.compounds]\n else:\n if aux:\n ca = [cmpd.aux_sig for cmpd in cmpds]\n oa = [cmpd.aux_sig for cmpd in self.compounds]\n else:\n ca = [cmpd.sig for cmpd in cmpds]\n oa = [cmpd.sig for cmpd in self.compounds]\n ca = np.asarray(ca)\n oa = np.asarray(oa)\n \n # call cdist, speed up with custom RMSD function\n if self.dist_metric == \"rmsd\":\n distances = pairwise_distances(ca, oa, lambda u, v: np.sqrt(np.mean((u - v) ** 2)), n_jobs=self.ncpus)\n elif self.dist_metric in ['cosine', 'correlation', 'euclidean', 'cityblock']:\n distances = pairwise_distances(ca, oa, self.dist_metric, n_jobs=self.ncpus)\n else:\n print(\"Incorrect distance metric - {}\".format(self.dist_metric))\n\n # step through the cdist list - add RMSDs to Compound.similar list\n n = len(self.compounds)\n for j in range(len(cmpds)):\n cmpds[j].similar = []\n for i in range(n):\n c2 = self.compounds[i]\n id2 = c2.id_\n if id2 == q[j]:\n continue\n d = distances[j][i]\n cmpds[j].similar.append((c2, d))\n\n if sort:\n sorted_scores = sorted(cmpds[j].similar, key=lambda x: x[1] if not math.isnan(x[1]) else 100000)\n cmpds[j].similar = sorted_scores\n cmpds[j].similar_computed = True\n cmpds[j].similar_sorted = True\n else:\n cmpds[j].similar_computed = True\n\n def quantify_pathways(self, indication=None):\n \"\"\"!\n Uses the pathway quantifier defined in the CANDO instantiation to make a\n pathway signature for all pathways in the input file (NOTE: does not compute distances)\n @param indication object: Indication object\n @return Returns None\n \"\"\"\n pq = self.pathway_quantifier\n if pq == 'max':\n func = max\n elif pq == 'sum':\n func = sum\n elif pq == 'avg':\n func = np.average\n elif pq == 'proteins':\n if not self.indication_pathways:\n print('Pathway quantifier \"proteins\" should only be used in combination with a '\n 'pathway-disease mapping (indication_pathways), quitting.')\n quit()\n func = None\n else:\n print('Please enter a proper pathway quantify method, quitting.')\n func = None\n quit()\n\n # this is a recursive function for checking if the pathways have proteins\n def check_proteins(paths):\n pl = [] # list of pathways with >1 protein\n n = 0\n for path in paths:\n if len(path.proteins) > 0:\n pl.append(path)\n n += 1\n if n > 0:\n return pl\n else:\n print('The associated pathways for this indication ({}) do not have enough proteins, '\n 'using all pathways'.format(indication.id_))\n return check_proteins(self.pathways)\n\n if indication:\n if len(indication.pathways) == 0:\n print('Warning: {} does not have any associated pathways - using all pathways'.format(indication.name))\n pws = self.pathways\n else:\n pws = check_proteins(indication.pathways)\n else:\n pws = check_proteins(self.pathways)\n\n for ci in range(len(self.compounds)):\n pw_sig_all = []\n c = self.compounds[ci]\n for pw in pws:\n if len(pw.proteins) == 0:\n print('No associated proteins for pathway {}, skipping'.format(pw.id_))\n continue\n pw_sig = []\n for p in pw.proteins:\n ch = p.id_\n ch_i = self.protein_id_to_index[ch]\n pw_sig.append(c.sig[ch_i])\n\n if pq == 'proteins':\n pw_sig_all += pw_sig\n else:\n pw_sig_all.append(pw_sig)\n if pq != 'proteins':\n c.aux_sig = list(map(func, pw_sig_all))\n else:\n c.aux_sig = pw_sig_all\n\n def results_analysed(self, f, metrics, effect_type):\n \"\"\"!\n Creates the results analysed named file for the benchmarking and\n computes final avg indication accuracies\n @param f str: File path for results analysed named\n @param metrics list: Cutoffs used for the benchmarking protocol\n @param effect_type str: Defines the effect as either an Indication (disease) or ADR (adverse reaction)\n @return Returns dct: dict of accuracies at each cutoff\n \"\"\"\n fo = open(f, 'w')\n effects = list(self.accuracies.keys())\n # Write header\n fo.write(\"{0}_id\\tcmpds_per_{0}\\ttop10\\ttop25\\ttop50\\ttop100\\ttopAll\\ttop1%\\t\"\n \"top5%\\ttop10%\\ttop50%\\ttop100%\\t{0}_name\\n\".format(effect_type))\n effects_sorted = sorted(effects, key=lambda x: (len(x[0].compounds), x[0].id_))[::-1]\n l = len(effects)\n final_accs = {}\n for m in metrics:\n final_accs[m] = 0.0\n for effect, c in effects_sorted:\n fo.write(\"{0}\\t{1}\\t\".format(effect.id_, c))\n accs = self.accuracies[(effect, c)]\n for m in metrics:\n n = accs[m]\n y = str(n / c * 100)[0:4]\n fo.write(\"{}\\t\".format(y))\n\n final_accs[m] += n / c / l\n fo.write(\"{}\\n\".format(effect.name))\n fo.close()\n return final_accs\n\n def canbenchmark(self, file_name, indications=[], continuous=False, bottom=False,\n ranking='standard', adrs=False):\n \"\"\"!\n Benchmarks the platform based on compound similarity of those approved for the same diseases\n @param file_name str: Name to be used for the various results files (e.g. file_name=test --> summary_test.tsv)\n @param indications list or str: List of Indication ids to be benchmarked, otherwise all will be used.\n @param continuous bool: Use the percentile of distances from the similarity matrix as the benchmarking cutoffs\n @param bottom bool: Reverse the ranking (descending) for the benchmark\n @param ranking str: What ranking method to use for the compounds. This really only affects ties. (standard,\n modified, and ordinal)\n @param adrs bool: ADRs are used as the Compounds' phenotypic effects instead of Indications\n @return Returns None\n \"\"\"\n\n if (continuous and self.indication_pathways) or (continuous and self.indication_proteins):\n print('Continuous benchmarking and indication-based signatures are not compatible, quitting.')\n exit()\n\n if not self.indication_proteins and not self.indication_pathways:\n if not self.compounds[0].similar_sorted:\n for c in self.compounds:\n sorted_scores = sorted(c.similar, key=lambda x: x[1] if not math.isnan(x[1]) else 100000)\n c.similar = sorted_scores\n c.similar_sorted = True\n\n if not os.path.exists('./results_analysed_named'):\n print(\"Directory 'results_analysed_named' does not exist, creating directory\")\n os.system('mkdir results_analysed_named')\n if not os.path.exists('./raw_results'):\n print(\"Directory 'raw_results' does not exist, creating directory\")\n os.system('mkdir raw_results')\n\n ra_named = 'results_analysed_named/results_analysed_named-' + file_name + '.tsv'\n ra = 'raw_results/raw_results-' + file_name + '.csv'\n summ = 'summary-' + file_name + '.tsv'\n ra_out = open(ra, 'w')\n\n def effect_type():\n if adrs:\n return 'ADR'\n else:\n return 'disease'\n\n def competitive_standard_bottom(sims, r):\n rank = 0\n for sim in sims:\n if sim[1] > r:\n rank += 1.0\n else:\n return rank\n return len(sims)\n\n def competitive_modified_bottom(sims, r):\n rank = 0\n for sim in sims:\n if sim[1] >= r:\n rank += 1.0\n else:\n return rank\n return len(sims)\n\n # Competitive modified ranking code\n def competitive_modified(sims, r):\n rank = 0\n for sim in sims:\n if sim[1] <= r:\n rank += 1.0\n else:\n return rank\n return len(sims)\n\n # Competitive standard ranking code\n def competitive_standard(sims, r):\n rank = 0\n for sim in sims:\n if sim[1] < r:\n rank += 1.0\n else:\n return rank\n return len(sims)\n\n def filter_indications(ind_set):\n if not os.path.exists('v2.0/mappings/group_disease-top_level.tsv'):\n url = 'http://protinfo.compbio.buffalo.edu/cando/data/v2/mappings/group_disease-top_level.tsv'\n dl_file(url, 'v2.0/mappings/group_disease-top_level.tsv')\n path_ids = ['C01', 'C02', 'C03']\n with open('v2.0/mappings/group_disease-top_level.tsv', 'r') as fgd:\n for l in fgd:\n ls = l.strip().split('\\t')\n if ls[1] in path_ids:\n ind = self.get_indication(ls[0])\n ind.pathogen = True\n if ind_set == 'pathogen':\n return [indx for indx in self.indications if indx.pathogen]\n elif ind_set == 'human':\n return [indx for indx in self.indications if not indx.pathogen]\n else:\n print('Please enter proper indication set, options include \"pathogen\", \"human\", or \"all\".')\n quit()\n\n effect_dct = {}\n ss = []\n c_per_effect = 0\n\n if isinstance(indications, list) and len(indications) >= 1:\n effects = list(map(self.get_indication, indications))\n elif isinstance(indications, list) and len(indications) == 0 and not adrs:\n effects = self.indications\n elif adrs:\n effects = self.adrs\n else:\n if isinstance(indications, str):\n if indications == 'all':\n effects = self.indications\n else:\n effects = filter_indications(indications)\n\n def cont_metrics():\n all_v = []\n for c in self.compounds:\n for s in c.similar:\n if s[1] != 0.0:\n all_v.append(s[1])\n avl = len(all_v)\n all_v_sort = sorted(all_v)\n # for tuple 10, have to add the '-1' for index out of range reasons\n metrics = [(1, all_v_sort[int(avl/1000.0)]), (2, all_v_sort[int(avl/400.0)]), (3, all_v_sort[int(avl/200.0)]),\n (4, all_v_sort[int(avl/100.0)]), (5, all_v_sort[int(avl/20.0)]), (6, all_v_sort[int(avl/10.0)]),\n (7, all_v_sort[int(avl/5.0)]), (8, all_v_sort[int(avl/3.0)]), (9, all_v_sort[int(avl/2.0)]),\n (10, all_v_sort[int(avl/1.0)-1])]\n return metrics\n\n x = (len(self.compounds)) / 100.0 # changed this...no reason to use similar instead of compounds\n # had to change from 100.0 to 100.0001 because the int function\n # would chop off an additional value of 1 for some reason...\n if continuous:\n metrics = cont_metrics()\n else:\n metrics = [(1, 10), (2, 25), (3, 50), (4, 100), (5, int(x*100.0001)),\n (6, int(x*1.0001)), (7, int(x*5.0001)), (8, int(x*10.0001)),\n (9, int(x*50.0001)), (10, int(x*100.0001))]\n\n if continuous:\n ra_out.write(\"compound_id,{}_id,0.1%({:.3f}),0.25%({:.3f}),0.5%({:.3f}),\"\n \"1%({:.3f}),5%({:.3f}),10%({:.3f}),20%({:.3f}),33%({:.3f}),\"\n \"50%({:.3f}),100%({:.3f}),value\\n\".format(effect_type(), metrics[0][1], metrics[1][1],\n metrics[2][1], metrics[3][1], metrics[4][1],\n metrics[5][1], metrics[6][1], metrics[7][1],\n metrics[8][1], metrics[9][1]))\n else:\n ra_out.write(\"compound_id,{}_id,top10,top25,top50,top100,\"\n \"topAll,top1%,top5%,top10%,top50%,top100%,rank\\n\".format(effect_type()))\n\n for effect in effects:\n count = len(effect.compounds)\n if count < 2:\n continue\n if not adrs:\n if self.indication_pathways:\n if len(effect.pathways) == 0:\n print('No associated pathways for {}, skipping'.format(effect.id_))\n continue\n elif len(effect.pathways) < 1:\n #print('Less than 5 associated pathways for {}, skipping'.format(effect.id_))\n continue\n c_per_effect += count\n effect_dct[(effect, count)] = {}\n for m in metrics:\n effect_dct[(effect, count)][m] = 0.0\n # retrieve the appropriate proteins/pathway indices here, should be\n # incorporated as part of the ind object during file reading\n vs = []\n if self.pathways:\n if self.indication_pathways:\n if self.pathway_quantifier == 'proteins':\n for pw in effect.pathways:\n for p in pw.proteins:\n if p not in vs:\n vs.append(p)\n else:\n self.quantify_pathways(indication=effect)\n\n # Retrieve the appropriate protein indices here, should be\n # incorporated as part of the ind object during file reading\n if self.indication_proteins:\n dg = []\n for p in effect.proteins:\n if p not in dg:\n dg.append(p)\n\n cmpds = effect.compounds\n if self.pathways:\n if self.indication_pathways:\n if self.pathway_quantifier == 'proteins':\n if not vs:\n print('Warning: protein list empty for {}, using all proteins'.format(effect.id_))\n self.generate_some_similar_sigs(cmpds, sort=True, proteins=None, aux=True)\n else:\n self.generate_some_similar_sigs(cmpds, sort=True, proteins=vs, aux=True)\n else:\n self.generate_some_similar_sigs(cmpds, sort=True, aux=True)\n elif self.indication_proteins:\n if len(dg) < 2:\n self.generate_some_similar_sigs(cmpds, sort=True, proteins=None)\n else:\n self.generate_some_similar_sigs(cmpds, sort=True, proteins=dg)\n # call c.generate_similar_sigs()\n # use the proteins/pathways specified above\n\n for c in effect.compounds:\n for cs in c.similar:\n if adrs:\n if effect in cs[0].adrs:\n cs_dist = cs[1]\n else:\n continue\n else:\n if effect in cs[0].indications:\n cs_dist = cs[1]\n else:\n continue\n\n value = 0.0 \n if continuous:\n value = cs_dist\n elif bottom:\n if ranking == 'modified':\n value = competitive_modified_bottom(c.similar, cs_dist)\n elif ranking == 'standard':\n value = competitive_standard_bottom(c.similar, cs_dist)\n elif ranking == 'ordinal':\n value = c.similar.index(cs)\n else:\n print(\"Ranking function {} is incorrect.\".format(ranking))\n exit()\n elif ranking == 'modified':\n value = competitive_modified(c.similar, cs_dist)\n elif ranking == 'standard':\n value = competitive_standard(c.similar, cs_dist)\n elif ranking == 'ordinal':\n value = c.similar.index(cs)\n else: \n print(\"Ranking function {} is incorrect.\".format(ranking))\n exit()\n \n if adrs:\n s = [str(c.index), effect.name]\n else:\n s = [str(c.index), effect.id_]\n for x in metrics:\n if value <= x[1]:\n effect_dct[(effect, count)][x] += 1.0\n s.append('1')\n else:\n s.append('0')\n if continuous:\n s.append(str(value))\n else:\n s.append(str(int(value)))\n ss.append(s)\n break\n\n self.accuracies = effect_dct\n final_accs = self.results_analysed(ra_named, metrics, effect_type())\n ss = sorted(ss, key=lambda xx: int(xx[0]))\n top_pairwise = [0.0] * 10\n for s in ss:\n if s[2] == '1':\n top_pairwise[0] += 1.0\n if s[3] == '1':\n top_pairwise[1] += 1.0\n if s[4] == '1':\n top_pairwise[2] += 1.0\n if s[5] == '1':\n top_pairwise[3] += 1.0\n if s[6] == '1':\n top_pairwise[4] += 1.0\n if s[7] == '1':\n top_pairwise[5] += 1.0\n if s[8] == '1':\n top_pairwise[6] += 1.0\n if s[9] == '1':\n top_pairwise[7] += 1.0\n if s[10] == '1':\n top_pairwise[8] += 1.0\n if s[11] == '1':\n top_pairwise[9] += 1.0\n sj = ','.join(s)\n sj += '\\n'\n ra_out.write(sj)\n ra_out.close()\n\n cov = [0] * 10\n for effect, c in list(self.accuracies.keys()):\n accs = self.accuracies[effect, c]\n for m_i in range(len(metrics)):\n v = accs[metrics[m_i]]\n if v > 0.0:\n cov[m_i] += 1\n\n if continuous:\n headers = ['0.1%ile', '.25%ile', '0.5%ile', '1%ile', '5%ile',\n '10%ile', '20%ile', '33%ile', '50%ile', '100%ile']\n else:\n headers = ['top10', 'top25', 'top50', 'top100', 'top{}'.format(len(self.compounds)),\n 'top1%', 'top5%', 'top10%', 'top50%', 'top100%']\n # Create average indication accuracy list in percent\n ia = []\n for m in metrics:\n ia.append(final_accs[m] * 100.0)\n # Create average pairwise accuracy list in percent\n pa = [(x * 100.0 / len(ss)) for x in top_pairwise]\n # Indication coverage\n cov = map(int, cov)\n # Append 3 lists to df and write to file\n with open(summ, 'w') as sf:\n sf.write(\"\\t\" + '\\t'.join(headers) + '\\n')\n ast = \"\\t\".join(map(str, [format(x, \".3f\") for x in ia]))\n pst = \"\\t\".join(map(str, [format(x, \".3f\") for x in pa]))\n cst = \"\\t\".join(map(str, cov)) + '\\n'\n sf.write('aia\\t{}\\napa\\t{}\\nic\\t{}\\n'.format(ast, pst, cst))\n \n # pretty print the average indication accuracies\n cut = 0\n print(\"\\taia\")\n for m in metrics:\n print(\"{}\\t{:.3f}\".format(headers[cut], final_accs[m] * 100.0))\n cut += 1\n print('\\n')\n\n def canbenchmark_associated(self, file_name, indications=[], continuous=False, ranking='standard'):\n \"\"\"!\n Benchmark only the compounds in the indication mapping, aka get rid of \"noisy\" compounds.\n This function returns the filtered CANDO object in the event that you want to explore further.\n @param file_name str: Name to be used for the variosu results files (e.g. file_name=test --> summary_test.tsv)\n @param indications list: List of Indication ids to be used for this benchmark, otherwise all will be used.\n @param continuous bool: Use the percentile of distances from the similarity matrix as the benchmarking cutoffs\n @param ranking str: What ranking method to use for the compounds. This really only affects ties.\n (standard, modified, and ordinal)\n @return Returns None\n \"\"\"\n print(\"Making CANDO copy with only benchmarking-associated compounds\")\n cp = CANDO(self.c_map, self.i_map, self.matrix, compound_set=self.compound_set)\n good_cs = []\n good_ids = []\n for ind in cp.indications:\n if len(ind.compounds) >= 2:\n for c in ind.compounds:\n if c.id_ not in good_ids:\n good_cs.append(c)\n good_ids.append(c.id_)\n cp.compounds = good_cs\n \n print('Computing {} distances...'.format(self.dist_metric))\n\n for c in cp.compounds:\n cp.generate_similar_sigs(c, sort=True)\n good_sims = []\n for s in c.similar:\n if s[0].id_ not in good_ids:\n pass\n else:\n good_sims.append(s)\n c.similar = good_sims\n sorted_scores = sorted(c.similar, key=lambda x: x[1] if not math.isnan(x[1]) else 100000)\n c.similar = sorted_scores\n c.similar_computed = True\n c.similar_sorted = True\n \n print('Done computing {} distances.\\n'.format(self.dist_metric))\n \n cp.canbenchmark(file_name=file_name, indications=indications, continuous=continuous, ranking=ranking)\n\n def canbenchmark_bottom(self, file_name, indications=[], ranking='standard'):\n \"\"\"!\n Benchmark the reverse ranking of similar compounds as a control.\n @param file_name str: Name to be used for the variosu results files (e.g. file_name=test --> summary_test.tsv)\n @param indications list: List of Indication ids to be used for this benchmark, otherwise all will be used.\n @param ranking str: What ranking method to use for the compounds. This really only affects ties. (standard,\n modified, and ordinal)\n @return Returns None\n \"\"\"\n print(\"Making CANDO copy with reversed compound ordering\")\n cp = CANDO(self.c_map, self.i_map, self.matrix)\n \n print('Computing {} distances...'.format(self.dist_metric))\n \n for ic in range(len(cp.compounds)):\n cp.generate_similar_sigs(cp.compounds[ic], sort=True)\n sorted_scores = sorted(cp.compounds[ic].similar, key=lambda x: x[1])[::-1]\n cp.compounds[ic].similar = sorted_scores\n cp.compounds[ic].similar_computed = True\n cp.similar_sorted = True\n \n print('Done computing {} distances.\\n'.format(self.dist_metric))\n \n cp.canbenchmark(file_name=file_name, indications=indications, ranking=ranking, bottom=True)\n\n def canbenchmark_ndcg(self, file_name):\n \"\"\"!\n Benchmark using the normalized discounted cumulative gain metric\n @param file_name str: Name to be used for the results files (file_name=test --> summary_ndcg-test.tsv)\n @return Returns None\n \"\"\"\n def dcg(l,k):\n dcg = [((2**x)-1)/(math.log2(i+1)) for i,x in enumerate(l[:k],1)]\n return np.sum(dcg)\n\n k_s = [10,25,50,100,len(self.compounds),0.01*len(self.compounds),0.05*len(self.compounds),0.10*len(self.compounds),0.50*len(self.compounds),len(self.compounds)]\n i_accs = {}\n c_accs = {}\n nz_counts = {}\n for k in range(len(k_s)):\n i_accs[k] = {}\n c_accs[k] = []\n nz_counts[k] = 0\n for ind in self.indications:\n if len(ind.compounds) < 2:\n continue\n approved_ids = [i.id_ for i in ind.compounds]\n acc = {}\n for k in range(len(k_s)):\n acc[k] = []\n for c in ind.compounds:\n if not c.similar_sorted:\n sorted_scores = sorted(c.similar, key=lambda x: x[1] if not math.isnan(x[1]) else 100000)\n c.similar = sorted_scores\n c.similar_sorted = True\n c_ideal = [0]*len(c.similar)\n for x in range(len(approved_ids)):\n c_ideal[x] = 1\n c_rank = []\n for x in c.similar:\n if x[0].id_ in approved_ids:\n c_rank.append(1)\n else:\n c_rank.append(0)\n for k in range(len(k_s)):\n acc[k].append(dcg(c_rank,int(k_s[k]))/dcg(c_ideal,int(k_s[k])))\n c_accs[k].append((c.id_,ind.id_,dcg(c_rank,int(k_s[k]))/dcg(c_ideal,int(k_s[k]))))\n for k in range(len(k_s)):\n i_accs[k][ind.id_] = (ind,np.mean(acc[k]))\n for k in range(len(k_s)):\n # Non-zero ndcg\n i_accs_nz = [i_accs[k][x][1] for x in i_accs[k] if i_accs[k][x][1] > 0.0]\n nz_counts[k] = len(i_accs_nz)\n # Write NDCG results per indication in results_analysed_named\n if not os.path.exists('./results_analysed_named/'):\n os.system('mkdir results_analysed_named')\n with open(\"results_analysed_named/results_analysed_named_ndcg-{}.tsv\".format(file_name), 'w') as o:\n o.write(\"disease_id\\tcmpds_per_disease\\ttop10\\ttop25\\ttop50\\ttop100\\ttop{}\\ttop1%\\ttop5%\\ttop10%\\ttop50%\\ttop100%\\tdisease_name\\n\".format(len(self.compounds)))\n for x in i_accs[0]:\n o.write(\"{}\\t{}\".format(i_accs[0][x][0].id_,len(i_accs[0][x][0].compounds)))\n for k in range(len(k_s)):\n o.write(\"\\t{:.3f}\".format(i_accs[k][x][1]))\n o.write(\"\\t{}\\n\".format(i_accs[0][x][0].name))\n # Write NDCG results per compound-indication pair in raw_results\n if not os.path.exists('./raw_results/'):\n os.system('mkdir raw_results')\n with open(\"raw_results/raw_results_ndcg-{}.csv\".format(file_name), 'w') as o:\n o.write(\"compound_id,disease_id,top10,top25,top50,top100,top{},top1%,top5%,top10%,top50%,top100%\\n\".format(len(self.compounds)))\n for x in range(len(c_accs[0])):\n o.write(\"{},{}\".format(c_accs[0][x][0],c_accs[0][x][1]))\n for k in range(len(k_s)):\n o.write(\",{:.3f}\".format(c_accs[k][x][2]))\n o.write(\"\\n\")\n # Write a summary file for NDCG\n with open(\"summary_ndcg-{}.tsv\".format(file_name), 'w') as o:\n o.write(\"\\ttop10\\ttop25\\ttop50\\ttop100\\ttop{}\\ttop1%\\ttop5%\\ttop10%\\ttop50%\\ttop100%\\n\".format(len(self.compounds)))\n o.write(\"ai-ndcg\")\n for k in range(len(k_s)):\n o.write(\"\\t{:.3f}\".format(np.mean(list(zip(*i_accs[k].values()))[1])))\n o.write(\"\\n\")\n o.write(\"ap-ndcg\")\n for k in range(len(k_s)):\n o.write(\"\\t{:.3f}\".format(np.mean(list(zip(*c_accs[k]))[2])))\n o.write(\"\\n\")\n o.write(\"ic-ndcg\")\n for k in range(len(k_s)):\n o.write(\"\\t{}\".format(int(nz_counts[k])))\n o.write(\"\\n\")\n #print(\"NDCG averaged over {} indications = {}\".format(len(i_accs),np.mean(list(zip(*i_accs.values()))[1])))\n #print(\"Pairwise NDCG averaged over {} compound-indication pairs = {}\".format(len(c_accs),np.mean(list(zip(*c_accs))[3])))\n\n def canbenchmark_cluster(self, n_clusters=5):\n \"\"\"!\n Benchmark using k-means clustering\n @param n_clusters int: Number of clusters for k-means\n @return Returns None\n \"\"\"\n def cluster_kmeans(cmpds):\n def f(x):\n return x.sig\n\n def g(x):\n return x.indications\n\n def h(x):\n return x.id_\n\n sigs = np.array(list(map(f, cmpds)))\n pca = PCA(n_components=10).fit(sigs)\n sigs = pca.transform(sigs)\n inds = np.array(list(map(g, cmpds)))\n ids = np.array(list(map(h, cmpds)))\n sigs_train, sigs_test, inds_train, inds_test, ids_train, ids_test = train_test_split(sigs, inds, ids,\n test_size=0.20,\n random_state=1)\n clusters = KMeans(n_clusters, random_state=1).fit(sigs_train)\n return clusters, sigs_test, inds_train, inds_test, ids_train, ids_test\n\n # Calculate the K means clusters for all compound signatures\n cs, sigs_test, inds_train, inds_test, ids_train, ids_test = cluster_kmeans(self.compounds)\n labels = cs.labels_\n\n # Determine how many compounds are in each cluster\n # Plot the results and output the mean, median, and range\n c_clusters = [0] * n_clusters\n for l in labels:\n c_clusters[l] += 1\n '''\n all_clusters = range(n_clusters)\n plt.scatter(all_clusters,c_clusters)\n plt.text(1, 1, \"Average cluster size = {}\".format(np.mean(c_clusters)), horizontalalignment='center', verticalalignment='center', transform=ax.transAxes)\n plt.text(1, 1, \"Median cluster size = {}\".format(np.median(c_clusters)), horizontalalignment='center', verticalalignment='center', transform=ax.transAxes)\n plt.text(1, 1, \"Range of cluster sizes = {}\".format(np.min(c_clusters), np.max(c_clusters)), horizontalalignment='center', verticalalignment='center', transform=ax.transAxes)\n plt.savefig(\"cluster_size.png\")\n '''\n # Map the labels for each compound to the cluster_id for each compound object\n for ci in range(len(labels)):\n self.compounds[ids_train[ci]].cluster_id = labels[ci]\n\n total_acc = 0.0\n total_count = 0\n\n # Calculate the benchmark accuracy by\n # mimicking classic benchmark -- leave one out\n # and recapture at least one for each indication-drug pair\n for i in range(len(sigs_test)):\n lab = cs.predict(sigs_test[i].reshape(1,-1))\n for ind in inds_test[i]:\n for c in range(len(inds_train)):\n done = False\n for ind_train in inds_train[c]:\n if ind.name == ind_train.name and lab[0] == labels[c]:\n total_acc+=1.0\n done = True\n break\n if done:\n break\n total_count += 1\n\n print(\"Number of cluster = {}\".format(n_clusters))\n print(\"Mean cluster size = {}\".format(np.mean(c_clusters)))\n print(\"Median cluster size = {}\".format(np.median(c_clusters)))\n print(\"Range of cluster sizes = [{},{}]\".format(np.min(c_clusters), np.max(c_clusters)))\n print(\"% Accuracy = {}\".format(total_acc / total_count * 100.0))\n\n def compounds_analysed(self, f, metrics):\n fo = open(f, 'w')\n cmpds = list(self.accuracies.keys())\n cmpds_sorted = sorted(cmpds, key=lambda x: (len(x[0].compounds), x[0].id_))[::-1]\n l = len(cmpds)\n final_accs = {}\n for m in metrics:\n final_accs[m] = 0.0\n for cmpd, c in cmpds_sorted:\n fo.write(\"{0}\\t{1}\\t\".format(cmpd.id_, c))\n accs = self.accuracies[(cmpd,c)]\n for m in metrics:\n n = accs[m]\n y = str(n / c * 100)[0:4]\n fo.write(\"{}\\t\".format(y))\n\n final_accs[m] += n / c / l\n fo.write(\"|\\t{}\\n\".format(cmpd.name))\n fo.close()\n return final_accs\n\n def canbenchmark_compounds(self, file_name, adrs=[], continuous=False,\n bottom=False, ranking='standard'):\n \"\"\"!\n Benchmarks the platform based on compound similarity of those known to interact with other compounds.\n @param file_name str: Name to be used for the various results files (e.g. file_name=test --> summary_test.tsv)\n @param adrs list: List of ADR ids to be used for this benchmark, otherwise all will be used.\n @param continuous bool: Use the percentile of distances from the similarity matrix as the cutoffs for\n benchmarking\n @param bottom bool: Reverse the ranking (descending) for the benchmark\n @param ranking str: What ranking method to use for the compounds. This really only affects ties. (standard,\n modified, and ordinal)\n @return Returns None\n \"\"\"\n if (continuous and self.indication_pathways) or (continuous and self.indication_proteins):\n print('Continuous benchmarking and indication-based signatures are not compatible, quitting.')\n exit()\n\n if not self.indication_proteins and not self.indication_pathways:\n if not self.compounds[0].similar_sorted:\n for c in self.compounds:\n sorted_scores = sorted(c.similar, key=lambda x: x[1] if not math.isnan(x[1]) else 100000)\n c.similar = sorted_scores\n c.similar_sorted = True\n\n if not os.path.exists('./results_analysed_named'):\n print(\"Directory 'results_analysed_named' does not exist, creating directory\")\n os.system('mkdir results_analysed_named')\n if not os.path.exists('./raw_results'):\n print(\"Directory 'raw_results' does not exist, creating directory\")\n os.system('mkdir raw_results')\n\n ra_named = 'results_analysed_named/results_analysed_named_' + file_name + '-cmpds.tsv'\n ra = 'raw_results/raw_results_' + file_name + '-cmpds.csv'\n summ = 'summary_' + file_name + '-cmpds.tsv'\n ra_out = open(ra, 'w')\n\n def effect_type():\n if adrs:\n return 'ADR'\n else:\n return 'disease'\n\n def competitive_standard_bottom(sims, r):\n rank = 0\n for sim in sims:\n if sim[1] > r:\n rank += 1.0\n else:\n return rank\n return len(sims)\n\n def competitive_modified_bottom(sims, r):\n rank = 0\n for sim in sims:\n if sim[1] >= r:\n rank += 1.0\n else:\n return rank\n return len(sims)\n\n # Competitive modified ranking code\n def competitive_modified(sims, r):\n rank = 0\n for sim in sims:\n if sim[1] <= r:\n rank += 1.0\n else:\n return rank\n return len(sims)\n\n # Competitive standard ranking code\n def competitive_standard(sims, r):\n rank = 0\n for sim in sims:\n if sim[1] < r:\n rank += 1.0\n else:\n return rank\n return len(sims)\n\n cmpd_dct = {}\n ss = []\n c_per_cmpd = 0\n\n def cont_metrics():\n all_v = []\n for c in self.compounds:\n for s in c.similar:\n if s[1] != 0.0:\n all_v.append(s[1])\n avl = len(all_v)\n all_v_sort = sorted(all_v)\n # for tuple 10, have to add the '-1' for index out of range reasons\n metrics = [(1, all_v_sort[int(avl/1000.0)]), (2, all_v_sort[int(avl/400.0)]), (3, all_v_sort[int(avl/200.0)]),\n (4, all_v_sort[int(avl/100.0)]), (5, all_v_sort[int(avl/20.0)]), (6, all_v_sort[int(avl/10.0)]),\n (7, all_v_sort[int(avl/5.0)]), (8, all_v_sort[int(avl/3.0)]), (9, all_v_sort[int(avl/2.0)]),\n (10, all_v_sort[int(avl/1.0)-1])]\n return metrics\n\n x = (len(self.compounds)) / 100.0 # changed this...no reason to use similar instead of compounds\n # had to change from 100.0 to 100.0001 because the int function\n # would chop off an additional value of 1 for some reason...\n if continuous:\n metrics = cont_metrics()\n else:\n metrics = [(1, 10), (2, 25), (3, 50), (4, 100), (5, int(x*100.0001)),\n (6, int(x*1.0001)), (7, int(x*5.0001)), (8, int(x*10.0001)),\n (9, int(x*50.0001)), (10, int(x*100.0001))]\n\n if continuous:\n ra_out.write(\"compound_id,compound_id,0.1%({:.3f}),0.25%({:.3f}),0.5%({:.3f}),\"\n \"1%({:.3f}),5%({:.3f}),10%({:.3f}),20%({:.3f}),33%({:.3f}),\"\n \"50%({:.3f}),100%({:.3f}),value\\n\".format(metrics[0][1], metrics[1][1],\n metrics[2][1], metrics[3][1], metrics[4][1],\n metrics[5][1], metrics[6][1], metrics[7][1],\n metrics[8][1], metrics[9][1]))\n else:\n ra_out.write(\"compound_id,compound_id,top10,top25,top50,top100,\"\n \"topAll,top1%,top5%,top10%,top50%,top100%,rank\\n\")\n\n for cmpd in self.compounds:\n count = len(cmpd.compounds)\n if count < 2:\n continue\n c_per_cmpd += count\n cmpd_dct[(cmpd, count)] = {}\n for m in metrics:\n cmpd_dct[(cmpd, count)][m] = 0.0\n # retrieve the appropriate proteins/pathway indices here, should be\n # incorporated as part of the ind object during file reading\n vs = []\n\n for c in cmpd.compounds:\n for cs in c.similar:\n if cs[0] in cmpd.compounds:\n #if cmpd in cs[0].compounds:\n cs_rmsd = cs[1]\n else:\n continue\n\n value = 0.0\n if continuous:\n value = cs_rmsd\n elif bottom:\n if ranking == 'modified':\n value = competitive_modified_bottom(c.similar, cs_rmsd)\n elif ranking == 'standard':\n value = competitive_standard_bottom(c.similar, cs_rmsd)\n elif ranking == 'ordinal':\n value = c.similar.index(cs)\n else:\n print(\"Ranking function {} is incorrect.\".format(ranking))\n exit()\n elif ranking == 'modified':\n value = competitive_modified(c.similar, cs_rmsd)\n elif ranking == 'standard':\n value = competitive_standard(c.similar, cs_rmsd)\n elif ranking == 'ordinal':\n value = c.similar.index(cs)\n else:\n print(\"Ranking function {} is incorrect.\".format(ranking))\n exit()\n\n s = [str(c.index), str(cmpd.id_)]\n for x in metrics:\n if value <= x[1]:\n cmpd_dct[(cmpd, count)][x] += 1.0\n s.append('1')\n else:\n s.append('0')\n if continuous:\n s.append(str(value))\n else:\n s.append(str(value))\n ss.append(s)\n break\n\n self.accuracies = cmpd_dct\n final_accs = self.compounds_analysed(ra_named, metrics)\n ss = sorted(ss, key=lambda xx: int(xx[0]))\n top_pairwise = [0.0] * 10\n for s in ss:\n if s[2] == '1':\n top_pairwise[0] += 1.0\n if s[3] == '1':\n top_pairwise[1] += 1.0\n if s[4] == '1':\n top_pairwise[2] += 1.0\n if s[5] == '1':\n top_pairwise[3] += 1.0\n if s[6] == '1':\n top_pairwise[4] += 1.0\n if s[7] == '1':\n top_pairwise[5] += 1.0\n if s[8] == '1':\n top_pairwise[6] += 1.0\n if s[9] == '1':\n top_pairwise[7] += 1.0\n if s[10] == '1':\n top_pairwise[8] += 1.0\n if s[11] == '1':\n top_pairwise[9] += 1.0\n sj = ','.join(s)\n sj += '\\n'\n ra_out.write(sj)\n ra_out.close()\n\n cov = [0] * 10\n for cmpd, c in list(self.accuracies.keys()):\n accs = self.accuracies[cmpd, c]\n for m_i in range(len(metrics)):\n v = accs[metrics[m_i]]\n if v > 0.0:\n cov[m_i] += 1\n\n if continuous:\n headers = ['0.1%ile', '.25%ile', '0.5%ile', '1%ile', '5%ile',\n '10%ile', '20%ile', '33%ile', '50%ile', '100%ile']\n else:\n headers = ['top10', 'top25', 'top50', 'top100', 'top{}'.format(len(self.compounds)),\n 'top1%', 'top5%', 'top10%', 'top50%', 'top100%']\n # Create average indication accuracy list in percent\n ia = []\n for m in metrics:\n ia.append(final_accs[m] * 100.0)\n # Create average pairwise accuracy list in percent\n pa = [(x * 100.0 / len(ss)) for x in top_pairwise]\n # Indication coverage\n cov = map(int, cov)\n # Append 3 lists to df and write to file\n with open(summ, 'w') as sf:\n sf.write(\"\\t\" + '\\t'.join(headers) + '\\n')\n ast = \"\\t\".join(map(str, [format(x, \".3f\") for x in ia]))\n pst = \"\\t\".join(map(str, [format(x, \".3f\") for x in pa]))\n cst = \"\\t\".join(map(str, cov)) + '\\n'\n sf.write('aia\\t{}\\napa\\t{}\\nic\\t{}\\n'.format(ast, pst, cst))\n\n # pretty print the average indication accuracies\n cut = 0\n print(\"\\taia\")\n for m in metrics:\n print(\"{}\\t{:.3f}\".format(headers[cut], final_accs[m] * 100.0))\n cut += 1\n print('\\n')\n\n def canbenchmark_ddi(self, file_name, adrs=[], continuous=False,\n bottom=False, ranking='standard'):\n \"\"\"!\n Benchmarks the platform based on compound pairs known to cause ADRs\n @param file_name str: Name to be used for the results files (file_name=test --> summary_test-ddi_adr.tsv)\n @param continuous bool: Use the percentile of distances from the similarity matrix as the cutoffs for\n benchmarking\n @param bottom bool: Reverse the ranking (descending) for the benchmark\n @param ranking str: What ranking method to use for the compounds. This really only affects ties. (standard,\n modified, and ordinal)\n @return Returns None\n \"\"\"\n\n adrs = True\n '''\n if (continuous and self.indication_pathways) or (continuous and self.indication_proteins):\n print('Continuous benchmarking and indication-based signatures are not compatible, quitting.')\n exit()\n '''\n if not self.indication_proteins and not self.indication_pathways:\n if not self.compound_pairs[0].similar_sorted:\n #if not self.compound_pairs[0].similar_sorted and not associated:\n for cm_p in self.compound_pairs:\n cm_p.similar = {k: v for k, v in sorted(cm_p.similar.items(), key=lambda item: item[1])} \n #sorted_scores = sorted(cm_p.similar, key=lambda x: x[1] if not math.isnan(x[1]) else 100000)\n #cm_p.similar = sorted_scores\n cm_p.similar_sorted = True\n\n if not os.path.exists('./results_analysed_named'):\n print(\"Directory 'results_analysed_named' does not exist, creating directory\")\n os.system('mkdir results_analysed_named')\n if not os.path.exists('./raw_results'):\n print(\"Directory 'raw_results' does not exist, creating directory\")\n os.system('mkdir raw_results')\n\n ra_named = 'results_analysed_named/results_analysed_named_' + file_name + '-ddi_adr.tsv'\n ra = 'raw_results/raw_results_' + file_name + '-ddi_adr.csv'\n summ = 'summary_' + file_name + '-ddi_adr.tsv'\n ra_out = open(ra, 'w')\n\n def effect_type():\n if adrs:\n return 'ADR'\n else:\n return 'disease'\n\n def competitive_standard_bottom(sims, r):\n rank = 0\n for sim in sims:\n if sims[sim] > r:\n rank += 1.0\n else:\n return rank\n return len(sims)\n\n def competitive_modified_bottom(sims, r):\n rank = 0\n for sim in sims:\n if sims[sim] >= r:\n rank += 1.0\n else:\n return rank\n return len(sims)\n\n # Competitive modified ranking code\n def competitive_modified(sims, r):\n rank = 0\n for sim in sims:\n if sims[sim] <= r:\n rank += 1.0\n else:\n return rank\n return len(sims)\n\n # Competitive standard ranking code\n def competitive_standard(sims, r):\n rank = 0\n for sim in sims:\n if sims[sim] < r:\n rank += 1.0\n else:\n return rank\n return len(sims)\n\n effect_dct = {}\n ss = []\n c_per_effect = 0\n\n if adrs:\n effects = self.adrs\n else:\n effects = self.indications\n\n def cont_metrics():\n all_v = []\n for c in self.compound_pairs:\n for c_sim in c.similar:\n c_dist = c.similar[c_sim]\n if c_dist != 0.0:\n all_v.append(c_dist)\n avl = len(all_v)\n all_v_sort = sorted(all_v)\n # for tuple 10, have to add the '-1' for index out of range reasons\n metrics = [(1, all_v_sort[int(avl/1000.0)]), (2, all_v_sort[int(avl/400.0)]), (3, all_v_sort[int(avl/200.0)]),\n (4, all_v_sort[int(avl/100.0)]), (5, all_v_sort[int(avl/20.0)]), (6, all_v_sort[int(avl/10.0)]),\n (7, all_v_sort[int(avl/5.0)]), (8, all_v_sort[int(avl/3.0)]), (9, all_v_sort[int(avl/2.0)]),\n (10, all_v_sort[int(avl/1.0)-1])]\n return metrics\n\n x = (len(self.compound_pairs)) / 100.0 # changed this...no reason to use similar instead of compounds\n # had to change from 100.0 to 100.0001 because the int function\n # would chop off an additional value of 1 for some reason...\n if continuous:\n metrics = cont_metrics()\n else:\n metrics = [(1, 10), (2, 25), (3, 50), (4, 100), (5, int(x*100.0001)),\n (6, int(x*1.0001)), (7, int(x*5.0001)), (8, int(x*10.0001)),\n (9, int(x*50.0001)), (10, int(x*100.0001))]\n\n if continuous:\n ra_out.write(\"compound_id,{}_id,0.1%({:.3f}),0.25%({:.3f}),0.5%({:.3f}),\"\n \"1%({:.3f}),5%({:.3f}),10%({:.3f}),20%({:.3f}),33%({:.3f}),\"\n \"50%({:.3f}),100%({:.3f}),value\\n\".format(effect_type(), metrics[0][1], metrics[1][1],\n metrics[2][1], metrics[3][1], metrics[4][1],\n metrics[5][1], metrics[6][1], metrics[7][1],\n metrics[8][1], metrics[9][1]))\n else:\n ra_out.write(\"compound_id,{}_id,top10,top25,top50,top100,\"\n \"topAll,top1%,top5%,top10%,top50%,top100%,rank\\n\".format(effect_type()))\n\n print(\"Running canbenchmark...\")\n for effect in effects:\n count = len(effect.compound_pairs)\n if count < 2:\n continue\n if not adrs:\n if self.indication_pathways:\n if len(effect.pathways) == 0:\n print('No associated pathways for {}, skipping'.format(effect.id_))\n continue\n elif len(effect.pathways) < 1:\n #print('Less than 5 associated pathways for {}, skipping'.format(effect.id_))\n continue\n c_per_effect += count\n effect_dct[(effect, count)] = {}\n for m in metrics:\n effect_dct[(effect, count)][m] = 0.0\n # retrieve the appropriate proteins/pathway indices here, should be\n # incorporated as part of the ind object during file reading\n vs = []\n if self.pathways:\n if self.indication_pathways:\n if self.pathway_quantifier == 'proteins':\n for pw in effect.pathways:\n for p in pw.proteins:\n if p not in vs:\n vs.append(p)\n else:\n self.quantify_pathways(indication=effect)\n\n # Retrieve the appropriate protein indices here, should be\n # incorporated as part of the ind object during file reading\n if self.indication_proteins:\n dg = []\n for p in effect.proteins:\n if p not in dg:\n dg.append(p)\n\n c = effect.compound_pairs\n if self.pathways:\n if self.indication_pathways:\n if self.pathway_quantifier == 'proteins':\n if not vs:\n print('Warning: protein list empty for {}, using all proteins'.format(effect.id_))\n self.generate_some_similar_sigs(c, sort=True, proteins=None, aux=True)\n else:\n self.generate_some_similar_sigs(c, sort=True, proteins=vs, aux=True)\n else:\n self.generate_some_similar_sigs(c, sort=True, aux=True)\n elif self.indication_proteins:\n if len(dg) < 2:\n self.generate_some_similar_sigs(c, sort=True, proteins=None)\n else:\n self.generate_some_similar_sigs(c, sort=True, proteins=dg)\n # call c.generate_similar_sigs()\n # use the proteins/pathways specified above\n\n for c in effect.compound_pairs:\n for c_sim in c.similar:\n c_dist = c.similar[c_sim]\n if adrs:\n if effect not in c_sim.adrs:\n continue\n else:\n if effect not in c_sim.indications:\n continue\n\n value = 0.0\n if continuous:\n value = c_dist\n elif bottom:\n if ranking == 'modified':\n value = competitive_modified_bottom(c.similar, c_dist)\n elif ranking == 'standard':\n value = competitive_standard_bottom(c.similar, c_dist)\n elif ranking == 'ordinal':\n #value = c.similar.index(cs)\n value = list(c.similar).index(c_sim)\n else:\n print(\"Ranking function {} is incorrect.\".format(ranking))\n exit()\n elif ranking == 'modified':\n value = competitive_modified(c.similar, c_dist)\n elif ranking == 'standard':\n value = competitive_standard(c.similar, c_dist)\n elif ranking == 'ordinal':\n #value = c.similar.index(cs)\n value = list(c.similar).index(c_sim)\n else:\n print(\"Ranking function {} is incorrect.\".format(ranking))\n exit()\n\n if adrs:\n s = [str(c.index), effect.name]\n else:\n s = [str(c.index), effect.id_]\n for x in metrics:\n if value <= x[1]:\n effect_dct[(effect, count)][x] += 1.0\n s.append('1')\n else:\n s.append('0')\n if continuous:\n s.append(str(value))\n else:\n s.append(str(int(value)))\n ss.append(s)\n break\n\n self.accuracies = effect_dct\n final_accs = self.results_analysed(ra_named, metrics, effect_type())\n ss = sorted(ss, key=lambda xx: xx[0])\n #ss = sorted(ss, key=lambda xx: int(xx[0]))\n top_pairwise = [0.0] * 10\n for s in ss:\n if s[2] == '1':\n top_pairwise[0] += 1.0\n if s[3] == '1':\n top_pairwise[1] += 1.0\n if s[4] == '1':\n top_pairwise[2] += 1.0\n if s[5] == '1':\n top_pairwise[3] += 1.0\n if s[6] == '1':\n top_pairwise[4] += 1.0\n if s[7] == '1':\n top_pairwise[5] += 1.0\n if s[8] == '1':\n top_pairwise[6] += 1.0\n if s[9] == '1':\n top_pairwise[7] += 1.0\n if s[10] == '1':\n top_pairwise[8] += 1.0\n if s[11] == '1':\n top_pairwise[9] += 1.0\n sj = ','.join(s)\n sj += '\\n'\n ra_out.write(sj)\n ra_out.close()\n\n cov = [0] * 10\n for effect, c in list(self.accuracies.keys()):\n accs = self.accuracies[effect, c]\n for m_i in range(len(metrics)):\n v = accs[metrics[m_i]]\n if v > 0.0:\n cov[m_i] += 1\n\n if continuous:\n headers = ['0.1%ile', '.25%ile', '0.5%ile', '1%ile', '5%ile',\n '10%ile', '20%ile', '33%ile', '50%ile', '100%ile']\n else:\n headers = ['top10', 'top25', 'top50', 'top100', 'top{}'.format(len(self.compound_pairs)),\n 'top1%', 'top5%', 'top10%', 'top50%', 'top100%']\n # Create average indication accuracy list in percent\n ia = []\n for m in metrics:\n ia.append(final_accs[m] * 100.0)\n # Create average pairwise accuracy list in percent\n pa = [(x * 100.0 / len(ss)) for x in top_pairwise]\n # Indication coverage\n cov = map(int, cov)\n # Append 3 lists to df and write to file\n with open(summ, 'w') as sf:\n sf.write(\"\\t\" + '\\t'.join(headers) + '\\n')\n ast = \"\\t\".join(map(str, [format(x, \".3f\") for x in ia]))\n pst = \"\\t\".join(map(str, [format(x, \".3f\") for x in pa]))\n cst = \"\\t\".join(map(str, cov)) + '\\n'\n sf.write('aia\\t{}\\napa\\t{}\\nic\\t{}\\n'.format(ast, pst, cst))\n\n # pretty print the average indication accuracies\n cut = 0\n print(\"\\taia\")\n for m in metrics:\n print(\"{}\\t{:.3f}\".format(headers[cut], final_accs[m] * 100.0))\n cut += 1\n print('\\n')\n\n def ml(self, method='rf', effect=None, benchmark=False, adrs=False, predict=[], threshold=0.5,\n negative='random', seed=42, out=''):\n \"\"\"!\n Create an ML classifier for a specified indication to make drug-disease predictions or all inds for benchmarking\n @param method str: type of machine learning algorithm to use ('rf' or 'log')\n @param effect Indication or ADR: provide a specific Indication or ADR object to train a classifer\n @param benchmark bool: benchmark the ML pipeline by training a classifier with LOOCV for each Indication or ADR\n @param adrs bool: if the models are trained with ADRs instead of Indications\n @param predict list: provide a list of Compound objects to classify with the model (only used in\n combination with effect=Indication/ADR object)\n @param threshold float: decision threshold for positive vs negative classification\n @param negative str: choose random negative samples (default) or 'inverse' for most opposite signatures\n @param seed int: choose a seed for reproducibility\n @param out str: file name extension for the output of benchmark (note: must have benchmark=True)\n @return Returns None\n \"\"\"\n\n if method in ['1csvm', 'svm']:\n print('SVMs are currently unsupported by this version of cando.py. Please choose \"log\" or \"rf\" - quitting.')\n quit()\n\n if out:\n if not os.path.exists('./raw_results/'):\n os.system('mkdir raw_results')\n if not os.path.exists('./results_analysed_named/'):\n os.system('mkdir results_analysed_named')\n\n paired_negs = {}\n\n # gather approved compound signatures for training\n def split_cs(efct, cmpd=None):\n mtrx = []\n for cm in efct.compounds:\n if cmpd:\n if cm.id_ == cmpd.id_:\n continue\n if self.indication_proteins:\n if len(efct.proteins) >= 3:\n eps = []\n for ep in efct.proteins:\n ep_index = self.protein_id_to_index[ep.id_]\n eps.append(cm.sig[ep_index])\n mtrx.append(eps)\n else:\n mtrx.append(cm.sig)\n return mtrx, [1] * len(mtrx)\n\n def choose_negatives(efct, neg_set=negative, s=None, hold_out=None, avoid=[], test=None):\n if neg_set == 'inverse':\n if not self.compute_distance and not self.read_dists:\n print('Please compute all compound-compound distances before using inverse_negatives().\\n'\n 'Re-run with \"compute_distance=True\" or read in pre-computed distance file \"read_dists=\"'\n 'in the CANDO object instantiation -- quitting.')\n quit()\n negatives = []\n used = avoid\n\n def pick_first_last(cmpd, s):\n if neg_set == 'inverse':\n r = int(len(self.compounds) / 2)\n shuffled = [cx[0].id_ for cx in cmpd.similar][::-1][0:r]\n else:\n shuffled = [cx.id_ for cx in self.compounds]\n if s:\n random.seed(s)\n random.shuffle(shuffled)\n else:\n s = random.randint(0, len(self.compounds) - 1)\n random.seed(s)\n random.shuffle(shuffled)\n for si in range(len(shuffled)):\n n = shuffled[si]\n if n in used:\n continue\n inv = self.get_compound(n)\n if inv not in efct.compounds:\n if n not in used:\n paired_negs[cmpd] = inv\n return inv\n\n if test:\n inv = pick_first_last(c, s)\n return inv\n\n for ce in efct.compounds:\n if hold_out:\n if ce.id_ == hold_out.id_:\n continue\n inv = pick_first_last(ce, s)\n if self.indication_proteins:\n if len(efct.proteins) >= 3:\n eps = []\n for ep in efct.proteins:\n ep_index = self.protein_id_to_index[ep.id_]\n eps.append(inv.sig[ep_index])\n negatives.append(eps)\n else:\n negatives.append(inv.sig)\n used.append(inv.id_)\n return negatives, [0] * len(negatives), used\n\n def model(meth, samples, labels, params=None, seed=None):\n if meth == 'rf':\n m = RandomForestClassifier(n_estimators=100, random_state=seed)\n m.fit(samples, labels)\n return m\n elif meth == 'svm':\n m = svm.SVC(kernel='rbf', gamma='scale', degree=3, random_state=seed)\n m.fit(samples, labels)\n return m\n elif meth == '1csvm':\n keep = []\n for i in range(len(samples)):\n if labels[i] == 1:\n keep.append(samples[i])\n m = svm.OneClassSVM(kernel='poly', gamma='scale', degree=2)\n m.fit(keep)\n return m\n elif meth == 'log':\n m = LogisticRegression(penalty='l2', solver='newton-cg', random_state=seed)\n m.fit(samples, labels)\n return m\n else:\n print(\"Please enter valid machine learning method ('rf', '1csvm', 'log', or 'svm')\")\n quit()\n\n if benchmark:\n if adrs:\n effects = sorted(self.adrs, key=lambda x: (len(x.compounds), x.id_))[::-1]\n else:\n effects = sorted(self.indications, key=lambda x: (len(x.compounds), x.id_))[::-1]\n if out:\n frr = open('./raw_results/raw_results_ml_{}'.format(out), 'w')\n frr.write('Compound,Effect,Prob,Neg,Neg_prob\\n')\n fran = open('./results_analysed_named/results_analysed_named_ml_{}'.format(out), 'w')\n fsum = open('summary_ml-{}'.format(out), 'w')\n else:\n if len(effect.compounds) < 1:\n print('No compounds associated with {} ({}), quitting.'.format(effect.name, effect.id_))\n quit()\n elif self.indication_proteins and len(effect.proteins) <= 2:\n print('Less than 3 proteins associated with {} ({}), quitting.'.format(effect.name, effect.id_))\n effects = [effect]\n\n rf_scores = []\n for e in effects:\n if len(e.compounds) < 2:\n continue\n if self.indication_proteins:\n if not len(e.proteins) >= 3:\n continue\n tp_fn = [0, 0]\n fp_tn = [0, 0]\n for c in e.compounds:\n pos = split_cs(e, cmpd=c)\n negs = choose_negatives(e, s=seed, hold_out=c, avoid=[])\n already_used = negs[2]\n train_samples = np.array(pos[0] + negs[0])\n train_labels = np.array(pos[1] + negs[1])\n mdl = model(method, train_samples, train_labels, seed=seed)\n test_neg = choose_negatives(e, s=seed, avoid=already_used, test=c)\n if self.indication_proteins:\n eps_pos = []\n eps_neg = []\n for ep in e.proteins:\n ep_index = self.protein_id_to_index[ep.id_]\n eps_pos.append(c.sig[ep_index])\n eps_neg.append(test_neg.sig[ep_index])\n pred = mdl.predict_proba(np.array([eps_pos]))\n pred_neg = mdl.predict_proba(np.array([eps_neg]))\n else:\n pred = mdl.predict_proba(np.array([c.sig]))\n pred_neg = mdl.predict_proba(np.array([test_neg.sig]))\n pos_class = list(mdl.classes_).index(1)\n\n if pred[0][pos_class] > threshold:\n tp_fn[0] += 1\n else:\n tp_fn[1] += 1\n if pred_neg[0][pos_class] > threshold:\n fp_tn[0] += 1\n else:\n fp_tn[1] += 1\n if benchmark and out:\n frr.write('{},{},{},{},{}\\n'.format(c.id_, e.id_, pred[0][pos_class],\n test_neg.id_, pred_neg[0][pos_class]))\n\n # predict whether query drugs are associated with this indication\n if predict:\n print('Indication: {}'.format(e.name))\n print('Leave-one-out cross validation: TP={}, FP={}, FN={}, TN={}, Acc={:0.3f}'.format(\n tp_fn[0], fp_tn[0], tp_fn[1], fp_tn[1], 100 * ((tp_fn[0]+fp_tn[1]) / (float(len(e.compounds))*2))))\n negs = choose_negatives(e, s=seed)\n pos = split_cs(e)\n train_samples = np.array(pos[0] + negs[0])\n train_labels = np.array(pos[1] + negs[1])\n mdl = model(method, train_samples, train_labels, seed=seed)\n print('\\tCompound\\tProb')\n for c in predict:\n inv = choose_negatives(effect, s=seed, test=c, avoid=negs[2])\n if self.indication_proteins:\n eps_pos = []\n eps_neg = []\n for ep in e.proteins:\n ep_index = self.protein_id_to_index[ep.id_]\n eps_pos.append(c.sig[ep_index])\n eps_neg.append(test_neg.sig[ep_index])\n pred = mdl.predict_proba(np.array([eps_pos]))\n pred_neg = mdl.predict_proba(np.array([test_neg.sig]))\n else:\n pred = mdl.predict_proba(np.array([c.sig]))\n pred_inv = mdl.predict_proba(np.array([inv.sig]))\n pos_class = list(mdl.classes_).index(1)\n\n print('\\t{}\\t{:0.3f}'.format(c.name, pred[0][pos_class]))\n #print('\\t{}\\t{:0.3f}\\t(random negative of {})'.format(inv.name, pred_inv[0][pos_class], c.name))\n\n # append loocv results to combined list\n rf_scores.append((e, tp_fn, fp_tn))\n\n sm = [0, 0, 0, 0]\n if benchmark:\n for rf_score in rf_scores:\n efct = rf_score[0]\n tfp = rf_score[1]\n ffp = rf_score[2]\n acc = (tfp[0] + ffp[1]) / (float(len(efct.compounds) * 2))\n sm[0] += len(efct.compounds)\n sm[1] += acc\n sm[2] += (acc * len(efct.compounds))\n if acc > 0.5:\n sm[3] += 1\n if out:\n fran.write('{}\\t{}\\t{}\\t{}\\t{:0.3f}\\t{}\\n'.format(efct.id_, len(efct.compounds),\n tfp[0], tfp[1], 100 * acc, efct.name))\n if out:\n fsum.write('aia\\t{:0.3f}\\n'.format(100 * (sm[1]/len(rf_scores))))\n fsum.write('apa\\t{:0.3f}\\n'.format(100 * (sm[2] / sm[0])))\n fsum.write('ic\\t{}\\n'.format(sm[3]))\n\n print('aia\\t{:0.3f}'.format(100 * (sm[1]/len(rf_scores))))\n print('apa\\t{:0.3f}'.format(100 * (sm[2] / sm[0])))\n print('ic\\t{}'.format(sm[3]))\n return\n\n def raw_results_roc(self, rr_files, labels, save='roc-raw_results.pdf'):\n\n if len(labels) != len(rr_files):\n print('Please enter a label for each input raw results file '\n '({} files, {} labels).'.format(len(rr_files), len(labels)))\n quit()\n\n n_per_d = {}\n dt = {}\n ds = {}\n metrics = {}\n truth = []\n scores = []\n for rr_file in rr_files:\n for l in open(rr_file, 'r').readlines()[1:]:\n ls = l.strip().split(',')\n pp = float(ls[2])\n truth.append(1)\n scores.append(pp)\n\n np = float(ls[4])\n truth.append(0)\n scores.append(np)\n if ls[1] not in n_per_d:\n n_per_d[ls[1]] = 1\n else:\n n_per_d[ls[1]] += 1\n pr = average_precision_score(truth, scores)\n fpr, tpr, thrs = roc_curve(truth, scores)\n area = roc_auc_score(truth, scores)\n dt[rr_file] = truth\n ds[rr_file] = scores\n metrics[rr_file] = [fpr, tpr, thrs, area, pr]\n\n plt.figure()\n lw = 2\n for rr_file in rr_files:\n i = rr_files.index(rr_file)\n [fpr, tpr, thrs, area, pr] = metrics[rr_file]\n plt.plot(fpr, tpr, lw=lw, label='{} (AUC-ROC={}, AUPR={})'.format(labels[i], format(area, '.3f'),\n format(pr, '.3f')))\n plt.plot([0, 1], [0, 1], lw=lw, linestyle='--')\n plt.xlim([0.0, 1.0])\n plt.ylim([0.0, 1.05])\n plt.xlabel('False Positive Rate')\n plt.ylabel('True Positive Rate')\n plt.legend(loc=\"lower right\", prop={'size': 8})\n if save:\n plt.savefig(save, dpi=300)\n plt.show()\n\n def canpredict_denovo(self, method='count', threshold=0.0, topX=10, ind_id=None, proteins=None,\n minimize=None, consensus=True, cmpd_set='all', save=''):\n \"\"\"!\n This function is used for predicting putative therapeutics for an indication\n of interest by summing/counting the number of interactions above a certain input interaction\n threshold for all proteins or a specified subset of proteins. An indication can be specified to\n mark drugs associated with that indication in the output. The threshold will vary based on the\n values of the input matrix. Method can be 'count' (score1), which ranks compounds based on the\n number of interactions above the threshold, 'sum' (score2), which ranks the compounds based on the\n highest total sum for interaction scores above the threshold (these two are highly correlated but can\n differ for larger sets of proteins or lower thresholds), 'min', which first ranks by 'count' then re-ranks\n based on the summed interactions with the proteins in the input 'minimize' list - this list should contain\n proteins IDs towards which the user wants low interaction scores - or 'diff', which ranks by the difference of\n sums and the summed scores from off-targets in 'minimize'. A fifth option is 'targets', which inspects\n and outputs the top protein interactions on an individual basis without summing/counting per drug (the\n output format differs from the other two options). If indication_proteins flag is used for\n the CANDO object instantiation, the proteins associated with the input indication will automatically\n be used. Otherwise, the 'proteins=' input can be used. The output can be saved to a file specified\n by 'save='. If ind_id is used, compounds associated with the indication will be included and marked\n in the output for comparison.\n @param method str: 'sum', 'count', or 'targets'\n @param threshold float: a interaction score cutoff to use (ignores values for sum/count less than threshold)\n @param topX int: top number of predicted Compounds to be printed/saved\n @param ind_id str: an indication id for marking drug output/ specifying protein set\n @param proteins List str: list of protein IDs to use from the matrix\n @param minimize List str: list of protein IDs to treat as 'off targets' to avoid, ranking\n @param consensus bool: if True, only compounds with score1 >= 2 will be printed\n @param cmpd_set str: specify the compound set to use ('all', 'approved', or 'other')\n @param save str: name of a file to save results\n @return Returns None\n \"\"\"\n\n if ind_id:\n ind = self.get_indication(ind_id)\n c_dct = {}\n top_hits = []\n min_hits = []\n if self.indication_proteins and ind_id:\n indices = []\n for p in ind.proteins:\n indices.append(self.protein_id_to_index[p.id_])\n elif proteins:\n indices = []\n for p in proteins:\n if type(p) is str:\n indices.append(self.protein_id_to_index[p])\n elif type(p) is int:\n indices.append(p)\n elif type(p) is Protein:\n indices.append(self.protein_id_to_index[p.id_])\n else:\n indices = range(len(self.proteins))\n if minimize is None:\n minimize = []\n for c in self.compounds:\n ss = 0.0\n count = 0\n min_ss = 0.0\n min_count = 0\n for pi in indices:\n si = float(c.sig[pi])\n p = self.proteins[pi]\n if si >= threshold:\n if p.id_ in minimize:\n min_ss += si\n min_count += 1\n top_hits.append((p.id_, c, si, False))\n else:\n ss += si\n count += 1\n top_hits.append((p.id_, c, si, True))\n if ind_id:\n already_approved = ind in c.indications\n else:\n already_approved = False # Not relevant since there is no indication\n c_dct[c.id_] = [ss, count, already_approved, min_ss, min_count]\n\n if method == 'sum':\n sorted_x = sorted(c_dct.items(), key=lambda x: (x[1][0], x[1][1]))[::-1]\n elif method == 'count':\n sorted_x = sorted(c_dct.items(), key=lambda x: (x[1][1], x[1][0]))[::-1]\n elif method == 'min':\n sorted_x = sorted(c_dct.items(), key=lambda x: (x[1][1], x[1][3]*-1))[::-1]\n elif method == 'diff':\n sorted_x = sorted(c_dct.items(), key=lambda x: (x[1][0] - x[1][3]))[::-1]\n elif method == 'targets':\n sp = sorted(top_hits, key=lambda x: x[2])[::-1]\n print('target \\tscore\\toff_target\\tid\\tapproved\\tname')\n if save:\n fo = open(save, 'w')\n fo.write('target \\tscore\\toff_target\\tid\\tapproved\\tname\\n')\n for s in sp:\n co = s[1]\n if cmpd_set == 'approved':\n if co.status == 'approved' or (co in ind.compounds):\n pass\n else:\n continue\n st = '{}\\t{}\\t{}\\t{}\\t{}\\t{}'.format(s[0].ljust(8), round(s[2], 3), co.id_,\n str(s[3]).lower().ljust(10),\n (str(co.status == 'approved').lower()).ljust(8), co.name)\n print(st)\n fo.write(st + '\\n')\n return\n\n else:\n sorted_x = []\n print('Please enter a valid ranking method -- quitting.')\n quit()\n if save:\n fo = open(save, 'w')\n fo.write('rank\\tscore1\\tscore2\\toffhits\\tdiff\\tid\\tapproved\\tname\\n')\n print(\"Printing the {} highest predicted compounds...\\n\".format(topX))\n i = 0\n print('rank\\tscore1\\tscore2\\toffhits\\tdiff\\tid\\tapproved\\tname')\n for p in enumerate(sorted_x):\n if i >= topX != -1:\n break\n else:\n if consensus and p[1][1][1] <= 1:\n if i == 0:\n print('\\n\\tFAILED - there are no compounds with score1 >= 2 -- change the\\n'\n '\\targuments to include \"consensus=False\" to print results with\\n'\n '\\tscore1 == 1, or lower the threshold.\\n')\n break\n co = self.get_compound(p[1][0])\n if cmpd_set == 'approved':\n if co.status != 'approved':\n if ind_id:\n if co in ind.compounds:\n pass\n else:\n continue\n else:\n continue\n if p[1][1][2]:\n diff = str(round(p[1][1][0] - p[1][1][3], 3))[0:7].ljust(7)\n st = \"{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\".format(i + 1, p[1][1][1], str(round(p[1][1][0], 3))[0:7],\n str(round(p[1][1][3], 3))[0:7].ljust(7), diff, co.id_,\n (str(co.status == 'approved').lower() + '+').ljust(8),\n co.name)\n else:\n diff = str(round(p[1][1][0] - p[1][1][3], 3))[0:7].ljust(7)\n st = \"{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\".format(i + 1, p[1][1][1], str(round(p[1][1][0], 3))[0:7],\n str(round(p[1][1][3], 3))[0:7].ljust(7), diff, co.id_,\n (str(co.status == 'approved').lower()).ljust(8),\n co.name)\n print(st)\n i += 1\n if save:\n fo.write(st + '\\n')\n return\n\n def canpredict_compounds(self, ind_id, n=10, topX=10, consensus=True, keep_associated=False, cmpd_set='all',\n save=''):\n \"\"\"!\n This function is used for predicting putative therapeutics for an indication\n of interest using a homology-based approach. Input an ind_id id and for each of the\n associated compounds, it will generate the similar compounds (based on distance) and add\n them to a dictionary with a value of how many times it shows up (enrichment). If a\n compound not approved for the indication of interest keeps showing\n up, that means it is similar in signature to the drugs that are\n ALREADY approved for the indication, so it may be a target for repurposing.\n Control how many similar compounds to consider with the argument 'n'. In the output, 'score1'\n refers to the number of times the compound shows up in the top 'n' drugs associated with\n the indication and 'score2' is the average of the ranks for 'score1' (note: 'score2' <= 'n').\n \n @param ind_id str: Indication id\n @param n int: top number of similar Compounds to be used for each Compound associated with the given Indication\n @param topX int: top number of predicted Compounds to be printed\n @param consensus bool: if True, only compounds with at least 2 votes will be printed\n @param keep_associated bool: Print Compounds that are already approved/associated for the Indication\n @param cmpd_set str: specify the compound set to use ('all', 'approved', or 'other')\n @param save str: name of a file to save results\n @return Returns None\n \"\"\"\n\n if int(topX) == -1:\n topX = len(self.compounds)-1\n if int(n) == -1:\n n = len(self.compounds)-1\n\n i = self.indication_ids.index(ind_id)\n ind = self.indications[i]\n print(\"{0} compounds found for {1} --> {2}\".format(len(ind.compounds), ind.id_, ind.name))\n\n if self.pathways:\n if self.indication_pathways:\n self.quantify_pathways(ind)\n else:\n self.quantify_pathways()\n for c in ind.compounds:\n if c.similar_computed:\n continue\n if self.pathways:\n self.generate_similar_sigs(c, aux=True, sort=True)\n elif self.indication_proteins:\n self.generate_similar_sigs(c, sort=True, proteins=ind.proteins)\n else:\n self.generate_similar_sigs(c, sort=True)\n\n print(\"Generating compound predictions using top{} most similar compounds...\\n\".format(n))\n c_dct = {}\n for c in ind.compounds:\n c2_i = 0\n c_count = 0\n while c_count < n:\n c2 = c.similar[c2_i]\n if c2[0].status != 'approved' and cmpd_set == 'approved':\n c2_i += 1\n continue\n if c2[1] == 0.0:\n c2_i += 1\n continue\n already_approved = ind in c2[0].indications\n k = c2[0].id_\n if k not in c_dct:\n c_dct[k] = [1, already_approved, c_count]\n else:\n c_dct[k][0] += 1\n c_dct[k][2] += c_count\n c2_i += 1\n c_count += 1\n\n sorted_x = sorted(c_dct.items(), key=lambda x: (x[1][0], (-1 * (x[1][2] / x[1][0]))))[::-1]\n i = 0\n if save:\n fo = open(save, 'w')\n fo.write('rank\\tscore1\\tscore2\\tprobability\\tid\\tapproved\\tname\\n')\n else:\n print('rank\\tscore1\\tscore2\\tprobability\\tid\\tapproved\\tname')\n hg_dct = {}\n for p in enumerate(sorted_x):\n if i >= topX != -1:\n break\n co = self.get_compound(p[1][0])\n if cmpd_set == 'approved':\n if co.status != 'approved':\n continue\n if not keep_associated and p[1][1][1]:\n continue\n if consensus and p[1][1][0] <= 1:\n if i == 0:\n print('\\n\\tFAILED - there are no compounds with score1 >= 2 -- change the\\n'\n '\\targuments to include \"consensus=False\" to print results with\\n'\n '\\tscore1 == 1, and/or increase \"n\". \\n')\n break\n if p[1][1][0] in hg_dct:\n prb = hg_dct[p[1][1][0]]\n else:\n prb_success = 1 / (len(self.compounds) - 1) * n\n prb = '%.2e' % Decimal(1.0 - stats.binom.cdf(p[1][1][0], len(ind.compounds), prb_success))\n hg_dct[p[1][1][0]] = prb\n if p[1][1][1]:\n st = \"{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\".format(i + 1, p[1][1][0], round(p[1][1][2] / p[1][1][0], 1),\n prb.ljust(11), co.id_,\n (str(co.status == 'approved').lower() + '*').ljust(8), co.name)\n else:\n st = \"{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\".format(i + 1, p[1][1][0], round(p[1][1][2] / p[1][1][0], 1),\n prb.ljust(11), co.id_,\n (str(co.status == 'approved').lower()).ljust(8), co.name)\n if save:\n fo.write(st + '\\n')\n else:\n print(st)\n i += 1\n print('\\n')\n\n def canpredict_indications(self, cmpd, n=10, topX=10, consensus=True, sorting='prob', save=''):\n \"\"\"!\n This function is the inverse of canpredict_compounds. Input a compound\n of interest cando_cmpd (or a novel protein signature of interest new_sig)\n and the most similar compounds to it will be computed. The indications\n associated with the top n most similar compounds to the query compound will\n be examined to see if any are repeatedly enriched.\n @param cmpd Compound: Compound object to be used\n @param n int: top number of similar Compounds to be used for prediction\n @param topX int: top number of predicted Indications to be printed\n @param consensus bool: if True, only indications with at least 2 votes will be printed\n @param sorting str: whether to sort the indications by probability ('prob') or score ('score')\n @param save str: path to file to save the output\n @return Returns None\n \"\"\"\n if n == -1:\n n = len(self.compounds)-1\n if topX == -1:\n topX = len(self.indications)\n\n if type(cmpd) is Compound:\n cmpd = cmpd\n elif type(cmpd) is int:\n cmpd = self.get_compound(cmpd)\n print(\"Using CANDO compound {}\".format(cmpd.name))\n print(\"Compound has id {} and index {}\".format(cmpd.id_, cmpd.index))\n print(\"Comparing signature to all CANDO compound signatures...\")\n self.generate_similar_sigs(cmpd, sort=True)\n print(\"Generating indication predictions using top{} most similar compounds...\".format(n))\n i_dct = {}\n for c in cmpd.similar[0:n]:\n for ind in c[0].indications:\n if ind.id_ not in i_dct:\n i_dct[ind.id_] = [1, len(ind.compounds)]\n else:\n i_dct[ind.id_][0] += 1\n\n i2p_dct = {}\n for ik in i_dct:\n [k, n_app] = i_dct[ik]\n if consensus and k == 1:\n continue\n prb = 1.0 - stats.hypergeom.cdf(k, len(self.compounds) - 1, n_app, n)\n i2p_dct[ik] = (k, prb)\n\n if consensus and len(i2p_dct) == 0:\n print('\\n\\tFAILED - there are no compounds with score1 >= 2 -- change the\\n'\n '\\targuments to include \"consensus=False\" to print results with\\n'\n '\\tscore1 == 1, and/or increase \"n\".\\n')\n quit()\n\n if sorting == 'score':\n sorted_x = sorted(list(i2p_dct.items()), key=lambda x: x[1][0], reverse=True)\n elif sorting == 'prob':\n sorted_x = sorted(list(i2p_dct.items()), key=lambda x: x[1][1], reverse=False)\n else:\n sorted_x = []\n print('Please enter proper sorting method: \"prob\" or \"score\" -- quitting.')\n quit()\n\n if save:\n fo = open(save, 'w')\n print(\"Saving the {} highest predicted indications...\\n\".format(topX))\n fo.write(\"rank\\tprobability\\tscore\\tind_id\\tindication\\n\")\n else:\n print(\"Printing the {} highest predicted indications...\\n\".format(topX))\n print(\"rank\\tprobability\\tscore\\tind_id \\tindication\")\n n_print = topX if len(sorted_x) >= topX else len(sorted_x)\n for i in range(n_print):\n indd = self.get_indication(sorted_x[i][0])\n prb = '%.2e' % Decimal(sorted_x[i][1][1])\n if save:\n fo.write(\"{}\\t{}\\t{}\\t{}\\t{}\\n\".format(i+1, prb, sorted_x[i][1][0], indd.id_, indd.name))\n else:\n print(\"{}\\t{}\\t{}\\t{}\\t{}\".format(i+1, prb.ljust(11), sorted_x[i][1][0], indd.id_, indd.name))\n if save:\n fo.close()\n print('')\n\n def canpredict_adr(self, cmpd, n=10, topX=10, consensus=True, sorting='prob', save=''):\n \"\"\"!\n This function is the inverse of canpredict_compounds. Input a compound\n of interest cando_cmpd (or a novel protein signature of interest new_sig)\n and the most similar compounds to it will be computed. The ADRs\n associated with the top n most similar compounds to the query compound will\n be examined to see if any are repeatedly enriched.\n @param cmpd Compound: Compound object to be used\n @param n int: top number of similar Compounds to be used for prediction\n @param topX int: top number of predicted Indications to be printed\n @param consensus bool: if True, only ADRs with at least 2 votes will be printed\n @param sorting str: whether to sort the ADRs by probability ('prob') or score ('score')\n @param save str: path to file to save output\n @return Returns None\n \"\"\"\n if n == -1:\n n = len(self.compounds)-1\n if topX == -1:\n topX = len(self.adrs)\n\n if type(cmpd) is Compound:\n cmpd = cmpd\n elif type(cmpd) is int:\n cmpd = self.get_compound(cmpd)\n print(\"Using CANDO compound {}\".format(cmpd.name))\n print(\"Compound has id {} and index {}\".format(cmpd.id_, cmpd.index))\n print(\"Comparing signature to all CANDO compound signatures...\")\n self.generate_similar_sigs(cmpd, sort=True)\n print(\"Generating ADR predictions using top{} most similar compounds...\".format(n))\n a_dct = {}\n for c in cmpd.similar[0:n]:\n for adr in c[0].adrs:\n if adr.id_ not in a_dct:\n a_dct[adr.id_] = [1, len(adr.compounds)]\n else:\n a_dct[adr.id_][0] += 1\n\n a2p_dct = {}\n for ik in a_dct:\n [k, n_app] = a_dct[ik]\n if consensus and k == 1:\n continue\n prb = 1.0 - stats.hypergeom.cdf(k, len(self.compounds) - 1, n_app, n)\n a2p_dct[ik] = (k, prb)\n\n if consensus and len(a2p_dct) == 0:\n print('\\n\\tFAILED - there are no compounds with score1 >= 2 -- change the\\n'\n '\\targuments to include \"consensus=False\" to print results with\\n'\n '\\tscore1 == 1, and/or increase \"n\".\\n')\n quit()\n\n if sorting == 'score':\n sorted_x = sorted(list(a2p_dct.items()), key=lambda x: x[1][0], reverse=True)\n elif sorting == 'prob':\n sorted_x = sorted(list(a2p_dct.items()), key=lambda x: x[1][1], reverse=False)\n else:\n sorted_x = []\n print('Please enter proper sorting method: \"prob\" or \"score\" -- quitting.')\n quit()\n\n if save:\n fo = open(save, 'w')\n print(\"Saving the {} highest predicted ADRs...\\n\".format(topX))\n fo.write(\"rank\\tprobability\\tscore\\tadr_id\\tadr\\n\")\n else:\n print(\"Printing the {} highest predicted ADRs...\\n\".format(topX))\n print(\"rank\\tprobability\\tscore\\tadr_id \\tadr\")\n n_print = topX if len(sorted_x) >= topX else len(sorted_x)\n for i in range(n_print):\n adrr = self.get_adr(sorted_x[i][0])\n prb = '%.2e' % Decimal(sorted_x[i][1][1])\n if save:\n fo.write(\"{}\\t{}\\t{}\\t{}\\t{}\\n\".format(i+1, prb, sorted_x[i][1][0], adrr.id_, adrr.name))\n else:\n print(\"{}\\t{}\\t{}\\t{}\\t{}\".format(i+1, prb.ljust(11), sorted_x[i][1][0], adrr.id_, adrr.name))\n if save:\n fo.close()\n print('')\n\n def canpredict_ddi_cmpds(self, cmpd, n=10, topX=10, save=''):\n \"\"\"!\n Input a compound of interest cando_cmpd and the most similar compounds to it will be computed\n and outputted as potential drug-drug-interactions.\n @param cmpd Compound: Compound object to be used\n @param n int: top number of similar Compounds to be used for prediction\n @param topX int: top number of predicted Drug-drug Interactions to be printed\n @return Returns None\n \"\"\"\n if n == -1:\n n = len(self.compounds)-1\n if topX == -1:\n topX = len(self.compounds)-1\n\n if type(cmpd) is Compound:\n cmpd = cmpd\n elif type(cmpd) is int:\n cmpd = self.get_compound(cmpd)\n print(\"Using CANDO compound {}\".format(cmpd.name))\n print(\"Compound has id {} and index {}\".format(cmpd.id_, cmpd.index))\n print(\"Comparing signature to all CANDO compound signatures...\")\n self.generate_similar_sigs(cmpd, sort=True)\n print(\"Generating interaction predictions using top{} most similar compounds...\".format(n))\n i_dct = {}\n for c in cmpd.similar[0:n]:\n for itx in c[0].compounds:\n if itx.id_ not in i_dct:\n i_dct[itx.id_] = 1\n else:\n i_dct[itx.id_] += 1\n sorted_x = sorted(i_dct.items(), key=operator.itemgetter(1), reverse=True)\n if save:\n fo = open(save, 'w')\n print(\"Saving the {} highest predicted compounds...\\n\".format(topX))\n fo.write(\"rank\\tscore\\tcmpd_id\\tcompound\\n\")\n else:\n print(\"Printing the {} highest predicted compounds...\\n\".format(topX))\n print(\"rank\\tscore\\tcmpd_id \\tcompound\")\n topX = min(topX,len(sorted_x))\n for i in range(topX):\n itxd = self.get_compound(sorted_x[i][0])\n if save:\n fo.write(\"{}\\t{}\\t{}\\t{}\\n\".format(i+1, sorted_x[i][1], itxd.id_, itxd.name))\n else:\n print(\"{}\\t{}\\t{}\\t{}\".format(i+1, sorted_x[i][1], itxd.id_, itxd.name))\n if save:\n fo.close()\n print('')\n\n def canpredict_ddi_adrs(self, cmpd_pair, n=10, topX=10, save=''):\n \"\"\"!\n Similarly to canpredict_adrs(), input a compound pair of interest (cmpd_pair)\n and the most similar compound pairs to it will be computed. The ADRs associated\n with the top n most similar compound pairs to the query pair will be examined\n to see if any are repeatedly enriched.\n @param cmpd_pair Compound_pair: Compound_pair object to be used\n @param n int: top number of similar Compounds to be used for prediction\n @param topX int: top number of predicted Indications to be printed\n @return Returns None\n \"\"\"\n if n == -1:\n n = len(self.compound_pairs)-1\n if topX == -1:\n topX = len(self.adrs)\n\n if type(cmpd_pair) is Compound_pair:\n cmpd_pair = cmpd_pair\n elif type(cmpd_pair) is tuple:\n cmpd = self.get_compound_pair(cmpd_pair)\n if type(cmpd_pair) is tuple:\n c1 = self.get_compound(cmpd_pair[0])\n c2 = self.get_compound(cmpd_pair[1])\n cmpd_pair = Compound_pair((c1.name,c2.name),cmpd_pair,cmpd_pair)\n self.compound_pairs.append(cmpd_pair)\n self.compound_pair_ids.append(cmpd_pair.id_)\n cmpd_pair.sig = [i+j for i,j in zip(c1.sig,c2.sig)]\n print(\"Using CANDO compound pair {}\".format(cmpd_pair.name))\n print(\"Compound pair has id {} and index {}\".format(cmpd_pair.id_, cmpd_pair.index))\n print(\"Comparing signature to all CANDO compound pair signatures...\")\n self.generate_similar_sigs_cp(cmpd_pair, sort=True)\n print(\"Generating ADR predictions using top{} most similar compound pairs...\".format(n))\n a_dct = {}\n for c in cmpd_pair.similar[0:n]:\n for adr in c[0].adrs:\n if adr.id_ not in a_dct:\n a_dct[adr.id_] = 1\n else:\n a_dct[adr.id_] += 1\n sorted_x = sorted(a_dct.items(), key=operator.itemgetter(1), reverse=True)\n if save:\n fo = open(save, 'w')\n print(\"Saving the {} highest predicted indications...\\n\".format(topX))\n fo.write(\"rank\\tscore\\tadr_id\\tadverse_reaction\\n\")\n else:\n print(\"Printing the {} highest predicted indications...\\n\".format(topX))\n print(\"rank\\tscore\\tadr_id \\tadverse_reaction\")\n for i in range(topX):\n adr = self.get_adr(sorted_x[i][0])\n if save:\n fo.write(\"{}\\t{}\\t{}\\t{}\\n\".format(i+1, sorted_x[i][1], adr.id_, adr.name))\n else:\n print(\"{}\\t{}\\t{}\\t{}\".format(i+1, sorted_x[i][1], adr.id_, adr.name))\n if save:\n fo.close()\n print('')\n\n def similar_compounds(self, cmpd, n=10):\n \"\"\"!\n Computes and prints the top n most similar compounds to an input\n Compound object cando_cmpd or input novel signature new_sig\n @param cmpd Compound: Compound object\n @param n int: top number of similar Compounds to be used for prediction\n @return Returns None\n \"\"\"\n if type(cmpd) is Compound:\n cmpd = cmpd\n elif type(cmpd) is int:\n cmpd = self.get_compound(cmpd)\n print(\"Using CANDO compound {}\".format(cmpd.name))\n print(\"Compound has id {} and index {}\".format(cmpd.id_, cmpd.index))\n print(\"Comparing signature to all CANDO compound signatures...\")\n self.generate_similar_sigs(cmpd, sort=True)\n print(\"Printing top{} most similar compounds...\\n\".format(n))\n print(\"rank\\tdist\\tid\\tname\")\n for i in range(n+1):\n print(\"{}\\t{:.3f}\\t{}\\t{}\".format(i+1, cmpd.similar[i][1], cmpd.similar[i][0].id_, cmpd.similar[i][0].name))\n print('\\n')\n return\n\n def add_cmpd(self, new_sig, new_name=''):\n \"\"\"!\n Add a new Compound object to the platform\n \n @param new_sig str: Path to the tab-separated interaction scores\n @param new_name str: Name for the new Compound\n @return Returns None\n \"\"\"\n with open(new_sig, 'r') as nsf:\n n_sig = [0.00] * len(self.proteins)\n for l in nsf:\n [pr, sc] = l.strip().split('\\t')\n pr_i = self.protein_id_to_index[pr]\n n_sig[pr_i] = sc\n i = max([cm.id_ for cm in self.compounds]) + 1\n if not new_name:\n new_name = 'compound_{}'.format(i)\n cmpd = Compound(new_name, i, i)\n cmpd.sig = n_sig\n self.compounds.append(cmpd)\n\n if self.compounds[0].similar_computed or len(self.compounds[0].similar) > 1:\n dists = self.generate_similar_sigs(cmpd, sort=True)\n for c, dist in dists:\n c.similar.append((cmpd, dist))\n c.similar = sorted(c.similar, key=lambda x: x[1] if not math.isnan(x[1]) else 100000)\n\n print(\"New compound is \" + cmpd.name)\n print(\"New compound has id {} and index {}\".format(cmpd.id_, cmpd.index))\n\n def sigs(self, rm):\n \"\"\"!\n Return a list of all signatures, rm is a list of compound ids you do not want in the list\n @param rm list: List of compound ids to remove from list of signatures\n @return list: List of all signatures\n \"\"\"\n return [x.sig for x in self.proteins if x.id_ not in rm]\n\n def save_dists_to_file(self, f):\n \"\"\"!\n Write calculated distances of all compounds to all compounds to file\n @param f File name to save distances\n \"\"\"\n def dists_to_str(cmpd):\n o = ''\n for s in cmpd.similar:\n o += '{}\\t'.format(s[1])\n o = o + '\\n'\n return o\n\n with open(f, 'w') as srf:\n for c in self.compounds:\n srf.write(dists_to_str(c))\n\n def fusion(self, cando_objs, out_file='', method='sum'):\n \"\"\"!\n This function re-ranks the compounds according to the desired comparison specified by\n 'method' -> currently supports 'min', 'avg', 'mult', and 'sum'\n @param cando_objs list: List of CANDO objects\n @param out_file str: Path to where the result will be written\n @param method str: Method of fusion to be used (e.g., sum, mult, etc.)\n @return Returns CANDO object\n \"\"\"\n print(\"Fusing CANDO objects using \" + method)\n cnd = CANDO(self.c_map, self.i_map)\n if self.rm_cmpds:\n cnd.compounds = self.compounds\n cnd.indications = self.indications\n for c in cnd.compounds:\n c.similar = []\n c.sig = []\n dn = [self.data_name]\n for obj in cando_objs:\n dn.append(obj.data_name)\n cnd.data_name = \"-\".join(dn) + '-' + method\n cid_to_ranks = {}\n for c in self.compounds:\n cid_to_ranks[c.id_] = {}\n sims = c.similar\n for i in range(len(sims)):\n cid_to_ranks[c.id_][sims[i][0].id_] = [i]\n for cando_obj in cando_objs:\n for c2 in cando_obj.compounds:\n sims2 = c2.similar\n for j in range(len(sims2)):\n cid_to_ranks[c2.id_][sims2[j][0].id_].append(j)\n for c3 in cnd.compounds:\n ranks_dct = cid_to_ranks[c3.id_]\n for c4 in cnd.compounds:\n if c4.id_ == c3.id_:\n continue\n ranks = ranks_dct[c4.id_]\n if method == 'min':\n c3.similar.append((c4, float(min(ranks))))\n if method == 'sum':\n c3.similar.append((c4, float(sum(ranks))))\n if method == 'avg':\n c3.similar.append((c4, (float(sum(ranks))) / len(ranks)))\n if method == 'mult':\n m = 1.0\n for r in ranks:\n m *= r\n c3.similar.append((c4, m))\n if out_file:\n with open(out_file, 'w') as fo:\n for co in cnd.compounds:\n s = list(map(str, [x[1] for x in co.similar]))\n fo.write('\\t'.join(s) + '\\n')\n for cf in cnd.compounds:\n sorted_scores = sorted(cf.similar, key=lambda x: x[1] if not math.isnan(x[1]) else 100000)\n cf.similar = sorted_scores\n cf.similar_computed = True\n cf.similar_sorted = True\n return cnd\n\n def normalize(self):\n \"\"\"!\n Normalize the distance scores to between [0,1]. Simply divides all scores by the largest distance\n between any two compounds.\n @return Returns None\n \"\"\"\n if len(self.compounds[0].similar) == 0:\n print('Similar scores not computed yet -- quitting')\n return\n\n mx = 0\n for c in self.compounds:\n for s in c.similar:\n if s[1] > mx:\n mx = s[1]\n\n print('Max value is {}'.format(mx))\n\n def norm(x):\n v = x[1] / mx\n return x[0], v\n\n for c in self.compounds:\n c.similar = list(map(norm, c.similar))\n return\n\n def cando_methods(self):\n return []\n\n def compounds_drugs_methods(self):\n return ['canpredict_compounds']\n\n def indications_methods(self):\n return ['canpredict_indications', 'canbenchmark', 'canbenchmark_associated', 'canbenchmark_bottom', 'canbenchmark_cluster',\n 'canbenchmark_compounds', 'canbenchmark_ddi', 'canbenchmark_ndcg']\n\n def adr_methods(self):\n return ['canpredict_adr']\n\n def inspect_method(self, method_name):\n return inspect.getargspec(getattr(CANDO, method_name)).args[1:]\n\n def __str__(self):\n \"\"\"!\n Print stats about the CANDO object\n \"\"\"\n nc = len(self.compounds)\n b = self.compounds[0].similar_computed\n ni = len(self.indications)\n np = len(self.proteins)\n if np:\n return 'CANDO: {0} compounds, {1} proteins, {2} indications\\n' \\\n '\\tMatrix - {3}\\nIndication mapping - {4}\\n' \\\n '\\tDistances computed - {5}'.format(nc, np, ni, self.matrix, self.i_map, b)\n elif self.read_dists:\n return 'CANDO: {0} compounds, {1} indications\\n' \\\n '\\tCompound comparison file - {2}\\n' \\\n '\\tIndication mapping - {3}'.format(nc, ni, self.read_dists, self.i_map)\n else:\n return 'CANDO: {0} compounds, {1} indications\\n' \\\n '\\tIndication mapping - {2}'.format(nc, ni, self.i_map)\n\n\nclass Matrix(object):\n \"\"\"!\n An object to represent a matrix\n Intended for easier handling of matrices.\n Convert between fpt and tsv, as well as distance to similarity (and vice versa)\n \"\"\"\n def __init__(self, matrix_file, dist=False, convert_to_tsv=False):\n ## @var matrix_file\n # str: Path to file with interaction scores\n self.matrix_file = matrix_file\n ## @var dist\n # bool: if the matrix_file is an dist file\n self.dist = dist\n ## @var convert_to_tsv\n # bool: Convert old matrix format (.fpt) to .tsv\n self.convert_to_tsv = convert_to_tsv\n ## @var proteins\n # list: Proteins in the Matrix\n self.proteins = []\n ## @var values\n # list: Values in the Matrix\n self.values = []\n\n def pro_name(l):\n name = l[0]\n curr = l[1]\n index = 1\n while curr != ' ':\n name += curr\n index += 1\n curr = l[index]\n return name\n\n if not dist:\n with open(matrix_file, 'r') as f:\n lines = f.readlines()\n if convert_to_tsv:\n if matrix_file[-4:] == '.fpt':\n out_file = '.'.join(matrix_file.split('.')[:-1]) + '.tsv'\n else:\n out_file = matrix_file + '.tsv'\n of = open(out_file, 'w')\n for l_i in range(len(lines)):\n name = pro_name(lines[l_i])\n scores = []\n i = 24\n while i < len(lines[l_i]):\n score = lines[l_i][i:i + 5]\n i += 8\n scores.append(score)\n self.proteins.append(name)\n self.values.append(list(map(float, scores)))\n of.write(\"{0}\\t{1}\\n\".format(name, '\\t'.join(scores)))\n of.close()\n else:\n for l_i in range(len(lines)):\n vec = lines[l_i].strip().split('\\t')\n if len(vec) < 2:\n print('The matrix file {} is in the old fpt format -- please '\n 'convert to tsv with the following line of code:'.format(self.matrix_file))\n print('-> Matrix(\"{}\", convert_to_tsv=True) <-'.format(self.matrix_file))\n quit()\n name = vec[0]\n scores = vec[1:]\n self.proteins.append(name)\n self.values.append(list(map(float, scores)))\n else:\n with open(matrix_file, 'r') as rrs:\n lines = rrs.readlines()\n for i in range(len(lines)):\n scores = list(map(float, lines[i].strip().split('\\t')))\n self.values.append(scores)\n\n def convert(self, out_file):\n \"\"\"!\n Convert similarity matrix to distance matrix or vice versa. The\n first value in the matrix will determine the type of conversion\n (0.0 means distance to similarity, 1.0 means similarity to distance).\n @param out_file str: File path to which write the converted matrix.\n @return Returns None\n \"\"\"\n if self.values[0][0] == 0.0:\n metric = 'd'\n elif self.values[0][0] == 1.0:\n metric = 's'\n else:\n metric = None\n print('The first value is not 0.0 or 1.0; '\n 'please ensure the matrix is generated properly')\n quit()\n\n def to_dist(s):\n return 1 - s\n\n def to_sim(d):\n return 1 / (1 + d)\n\n of = open(out_file, 'w')\n if metric == 'd':\n for vs in self.values:\n vs = list(map(to_sim, vs))\n of.write(\"{}\\n\".format('\\t'.join(list(map(str, vs)))))\n else:\n if metric == 's':\n for vs in self.values:\n vs = list(map(to_dist, vs))\n of.write(\"{}\\n\".format('\\t'.join(list(map(str, vs)))))\n of.close()\n\n def normalize(self, outfile, dimension='drugs', method='avg'):\n \"\"\"!\n Normalize the interaction scores across drugs (default) or proteins (not implemented yet).\n @param outfile str: File path to which is written the converted matrix.\n @param dimension str: which vector to normalize - either 'drugs' to normalize all\n scores within the proteomic vector or 'proteins' to normalize for a protein against\n all drug scores.\n @param method str: normalize by the average or max within the vectors\n @return Returns None\n \"\"\"\n # dimensions include drugs or features (e.g. \"proteins\")\n # methods are average ('avg') or max ('max')\n dvs = {} # drug vectors\n cc = 0\n if dimension == 'drugs':\n for vec in self.values:\n for vi in range(len(vec)):\n if cc == 0:\n dvs[vi] = []\n dvs[vi].append(vec[vi])\n cc += 1\n\n new_dvecs = []\n for i in range(len(dvs)):\n vec = dvs[i]\n if method == 'avg':\n norm_val = np.average(vec)\n elif method == 'max':\n norm_val = max(vec)\n else:\n print('Please enter a proper normalization method: \"max\" or \"avg\"')\n quit()\n\n def norm(x):\n if norm_val == 0:\n return 0.0\n else:\n return x/norm_val\n\n new_dvecs.append(list(map(norm, vec)))\n\n pvs = {}\n for dvi in range(len(new_dvecs)):\n for p in range(len(self.proteins)):\n try:\n pvs[p].append(new_dvecs[dvi][p])\n except KeyError:\n pvs[p] = [new_dvecs[dvi][p]]\n\n with open(outfile, 'w') as fo:\n for p in range(len(self.proteins)):\n fo.write('{}\\t{}\\n'.format(self.proteins[p], '\\t'.join(list(map(str, pvs[p])))))\n\ndef single_interaction(c_id, p_id, v=\"v2.2\", fp=\"rd_ecfp4\", vect=\"int\", \n dist=\"dice\", org=\"nrpdb\", bs=\"coach\", \n c_cutoff=0.0, p_cutoff=0.0, percentile_cutoff=0.0, \n i_score=\"P\", nr_ligs=True, approved_only=False, lig_name=False, \n lib_path='',prot_path=''):\n\n def print_time(s):\n if s >= 60:\n m = s / 60.0\n s -= m * 60.0\n if m >= 60.0:\n h = m / 60.0\n m -= h * 60.0\n print(\"Interaciton calculation took {:.0f} hr {:.0f} min {:.0f} s to finish.\".format(h, m, s))\n else:\n print(\"Interaciton calculation took {:.0f} min {:.0f} s to finish.\".format(m, s))\n else:\n print(\"Interaciton calculation took {:.0f} s to finish.\".format(s))\n\n print(\"Calculating BANDOCK interaction...\")\n start = time.time()\n\n c_id = int(c_id)\n\n pre = os.path.dirname(__file__) + \"/data/v2.2+/\"\n lig_path = \"{}/ligs/fps\".format(pre)\n if not lib_path:\n cmpd_path = \"{}/cmpds/fps-{}\".format(pre,v)\n map_path = \"{}/mappings\".format(pre)\n else:\n cmpd_path = \"{0}/{1}/cmpds/fps-{1}\".format(lib_path,v)\n map_path = \"{0}/{1}/mappings\".format(lib_path,v)\n\n # Remove redundant ligands from full list\n # Especially important for percentile calculations\n if nr_ligs:\n if not os.path.exists(\"{}/mappings/nr_ligs.csv\".format(pre)):\n url = 'http://protinfo.compbio.buffalo.edu/cando/data/v2.2+/mappings/nr_ligs.csv'\n dl_file(url, '{}/mappings/nr_ligs.csv'.format(pre))\n nr_ligs = pd.read_csv(\"{}/mappings/nr_ligs.csv\".format(pre),header=None)\n nr_ligs = nr_ligs[0].values.flatten()\n\n # Download protein matrix if it does not exist\n if not prot_path:\n if not os.path.exists(\"{}/prots/{}-{}.tsv\".format(pre,org,bs)):\n url = 'http://protinfo.compbio.buffalo.edu/cando/data/v2.2+/prots/{}-{}.tsv'.format(org,bs)\n dl_file(url, '{}/prots/{}-{}.tsv'.format(pre,org,bs))\n p_matrix = pd.read_csv(\"{}/prots/{}-{}.tsv\".format(pre,org,bs),sep='\\t',header=None,index_col=0)\n else:\n p_matrix = pd.read_csv(\"{}/{}-{}.tsv\".format(prot_path,org,bs),sep='\\t',header=None,index_col=0)\n\n \n # Create dictionary of lists\n # Keys == proteins\n # Values == list of predicted bs + bs scores\n p_dict = {}\n for p in p_matrix.itertuples():\n p_dict[p[0]] = list(zip(p[1].split(','),p[2].split(',')))\n \n try:\n p_dict = {p_id: p_dict[p_id]}\n except:\n print(\"{} does not exist in protein library\".format(p_id))\n sys.exit()\n\n if i_score not in ['C','dC','P','CxP','dCxP','avgC','medC','avgP','medP']:\n print(\"{} is not an applicable interaction score.\".format(i_score))\n return\n\n if not os.path.exists(\"{}/{}-{}_vect.pickle\".format(cmpd_path,fp,vect)):\n url = 'http://protinfo.compbio.buffalo.edu/cando/data/v2.2+/cmpds/fps-{}/{}-{}_vect.pickle'.format(v,fp,vect)\n dl_file(url, '{}/{}-{}_vect.pickle'.format(cmpd_path,fp,vect))\n\n if not os.path.exists(\"{}/{}-{}_vect.pickle\".format(lig_path,fp,vect)):\n url = 'http://protinfo.compbio.buffalo.edu/cando/data/v2.2+/ligs/fps/{}-{}_vect.pickle'.format(fp,vect)\n dl_file(url, '{}/{}-{}_vect.pickle'.format(lig_path,fp,vect))\n\n # Load compound and ligand fingerprint pickles\n with open('{}/{}-{}_vect.pickle'.format(cmpd_path,fp,vect), 'rb') as f:\n c_fps = pickle.load(f)\n with open('{}/{}-{}_vect.pickle'.format(lig_path,fp,vect), 'rb') as f:\n l_fps = pickle.load(f)\n\n try:\n check = c_fps[c_id]\n except:\n print(\"{} does not exist in compound library\".format(c_id))\n sys.exit()\n\n print(\"Interaction between {} and {}.\".format(c_id,p_id))\n\n score = calc_scores(c_id,c_fps,l_fps,p_dict,dist,p_cutoff,c_cutoff,percentile_cutoff,i_score,nr_ligs,lig_name)\n print(\"Interaction score between {} and {} = {}\".format(c_id,p_id,score[1][0]))\n\n end = time.time()\n print_time(end-start) \n \n return(score[1][0])\n\n\ndef generate_matrix(v=\"v2.2\", fp=\"rd_ecfp4\", vect=\"int\", dist=\"dice\", org=\"nrpdb\", bs=\"coach\", c_cutoff=0.0,\n p_cutoff=0.0, percentile_cutoff=0.0, i_score=\"P\", out_file='', out_path=\".\", nr_ligs=True,\n approved_only=False, lig_name=False, lib_path='', prot_path='', ncpus=1):\n \"\"\"!\n Generate a matrix using our in-house protocol BANDOCK.\n @param v str: version to use (supports v2.2 - v2.5)\n @param fp str: the chemical fingerprint to use (rd_ecfp4, rd_ecfp10, etc)\n @param vect str: integer \"int\" or binary \"bit\" vector for fingerprint\n @param dist str: use Sorenson-Dice \"dice\" for vect=\"int\" and Tanimoto \"tani\" for vect=\"bit\"\n @param org str: protein library to use ('nrpdb' or 'homo_sapien')\n @param bs str: the method to use, just use \"coach\"\n @param c_cutoff float: minimum Cscore (Tanimoto/Dice similarity score) to consider for scoring\n @param p_cutoff float: minimum Pscore (binding site score from COACH) to consider for scoring\n @param percentile_cutoff float: %ile cutoff for fingerprint similarity scores in 'dC' scoring protocols\n @param i_score str: the scoring protocol to use ('P', 'C', 'dC', 'CxP', dCxP')\n @param out_file str: filename of the output matrix\n @param out_path str: path to the output matrix\n @param nr_ligs bool: use only the non-redundant set of ligands for 'dC' scoring protocols (recommended)\n @param approved_only bool: use only approved drugs to create the matrix\n @param lig_name bool: output the ligand chosen for the compound-protein interaction score instead of the score\n @param lib_path str: specify a local compound fingerprint set for custom analyses\n @param prot_path str: specify a local protein library for custom analyses\n @param ncpus int: number of cores to run on\n @return Returns None\n \"\"\"\n\n def print_time(s):\n if s >= 60:\n m = s / 60.0\n s -= m * 60.0\n if m >= 60.0:\n h = m / 60.0\n m -= h * 60.0\n print(\"Matrix generation took {:.0f} hr {:.0f} min {:.0f} s to finish.\".format(h, m, s))\n else:\n print(\"Matrix generation took {:.0f} min {:.0f} s to finish.\".format(m, s))\n else:\n print(\"Matrix generation took {:.0f} s to finish.\".format(s))\n\n print(\"Generating CANDO matrix...\")\n start = time.time()\n\n pre = os.path.dirname(__file__) + \"/data/v2.2+/\"\n lig_path = \"{}/ligs/fps\".format(pre)\n if not lib_path:\n cmpd_path = \"{}/cmpds/fps-{}\".format(pre,v)\n map_path = \"{}/mappings\".format(pre)\n else:\n cmpd_path = \"{0}/{1}/cmpds/fps-{1}\".format(lib_path,v)\n map_path = \"{0}/{1}/mappings\".format(lib_path,v)\n\n if out_file == '':\n if percentile_cutoff != 0.0:\n if approved_only:\n out_file = \"{}-{}-{}-{}-{}-percentile{}-p{}-{}-approved.tsv\".format(fp,vect,dist,org,bs,percentile_cutoff,p_cutoff,i_score)\n else:\n out_file = \"{}-{}-{}-{}-{}-percentile{}-p{}-{}.tsv\".format(fp,vect,dist,org,bs,percentile_cutoff,p_cutoff,i_score)\n else:\n if approved_only:\n out_file = \"{}-{}-{}-{}-{}-c{}-p{}-{}-approved.tsv\".format(fp,vect,dist,org,bs,c_cutoff,p_cutoff,i_score)\n else:\n out_file = \"{}-{}-{}-{}-{}-c{}-p{}-{}.tsv\".format(fp,vect,dist,org,bs,c_cutoff,p_cutoff,i_score)\n\n if not out_path and not lib_path:\n out_path = '{}/matrices/{}'.format(pre,v)\n elif not out_path and lib_path:\n out_path = '{}/{}/matrices'.format(lib_path,v)\n os.makedirs(out_path, exist_ok=True)\n\n # Remove redundant ligands from full list\n # Especially important for percentile calculations\n if nr_ligs:\n if not os.path.exists(\"{}/mappings/nr_ligs.csv\".format(pre)):\n url = 'http://protinfo.compbio.buffalo.edu/cando/data/v2.2+/mappings/nr_ligs.csv'\n dl_file(url, '{}/mappings/nr_ligs.csv'.format(pre))\n nr_ligs = pd.read_csv(\"{}/mappings/nr_ligs.csv\".format(pre),header=None)\n nr_ligs = nr_ligs[0].values.flatten()\n\n # Download protein matrix if it does not exist\n if not prot_path:\n if not os.path.exists(\"{}/prots/{}-{}.tsv\".format(pre,org,bs)):\n url = 'http://protinfo.compbio.buffalo.edu/cando/data/v2.2+/prots/{}-{}.tsv'.format(org,bs)\n dl_file(url, '{}/prots/{}-{}.tsv'.format(pre,org,bs))\n p_matrix = pd.read_csv(\"{}/prots/{}-{}.tsv\".format(pre,org,bs),sep='\\t',header=None,index_col=0)\n else:\n p_matrix = pd.read_csv(\"{}/{}-{}.tsv\".format(prot_path,org,bs),sep='\\t',header=None,index_col=0)\n\n \n # Create dictionary of lists\n # Keys == proteins\n # Values == list of predicted bs + bs scores\n p_dict = {}\n for p in p_matrix.itertuples():\n p_dict[p[0]] = list(zip(p[1].split(','),p[2].split(',')))\n\n if i_score not in ['C','dC','P','CxP','dCxP','avgC','medC','avgP','medP']:\n print(\"{} is not an applicable interaction score.\".format(i_score))\n return\n\n if not os.path.exists(\"{}/{}-{}_vect.pickle\".format(cmpd_path,fp,vect)):\n url = 'http://protinfo.compbio.buffalo.edu/cando/data/v2.2+/cmpds/fps-{}/{}-{}_vect.pickle'.format(v,fp,vect)\n dl_file(url, '{}/{}-{}_vect.pickle'.format(cmpd_path,fp,vect))\n\n if not os.path.exists(\"{}/{}-{}_vect.pickle\".format(lig_path,fp,vect)):\n url = 'http://protinfo.compbio.buffalo.edu/cando/data/v2.2+/ligs/fps/{}-{}_vect.pickle'.format(fp,vect)\n dl_file(url, '{}/{}-{}_vect.pickle'.format(lig_path,fp,vect))\n\n # Load compound and ligand fingerprint pickles\n with open('{}/{}-{}_vect.pickle'.format(cmpd_path,fp,vect), 'rb') as f:\n c_fps = pickle.load(f)\n with open('{}/{}-{}_vect.pickle'.format(lig_path,fp,vect), 'rb') as f:\n l_fps = pickle.load(f)\n\n if approved_only:\n if not os.path.exists(\"{}/drugbank-{}-approved.tsv\".format(map_path,v)):\n url = 'http://protinfo.compbio.buffalo.edu/cando/data/v2.2+/mappings/drugbank-{}-approved.tsv'.format(v)\n dl_file(url, '{}/drugbank-{}-approved.tsv'.format(map_path,v))\n approved_df = pd.read_csv('{}/drugbank-{}-approved.tsv'.format(map_path,v),sep='\\t',index_col=0)\n c_list = approved_df.index\n else:\n c_list = list(c_fps.keys())\n\n if ncpus > 1:\n pool = mp.Pool(ncpus)\n scores = pool.starmap_async(calc_scores, [(c,c_fps,l_fps,p_dict,dist,p_cutoff,c_cutoff,percentile_cutoff,i_score,nr_ligs,lig_name) for c in c_list]).get()\n pool.close\n pool.join\n else:\n scores = [calc_scores(c,c_fps,l_fps,p_dict,dist,p_cutoff,c_cutoff,percentile_cutoff,i_score,nr_ligs,lig_name) for c in c_list]\n scores = {d[0]:d[1] for d in scores}\n\n mat = pd.DataFrame.from_dict(scores)\n mat.sort_index(axis=1,inplace=True)\n mat.rename(index=dict(zip(range(len(p_matrix.index)), p_matrix.index)), inplace=True)\n \n mat.to_csv(\"{}/{}\".format(out_path,out_file), sep='\\t', index=True, header=False, float_format='%.3f')\n \n end = time.time()\n print(\"Matrix written to {}/{}.\".format(out_path,out_file))\n print_time(end-start) \n\n\ndef calc_scores(c,c_fps,l_fps,p_dict,dist,pscore_cutoff=0.0,cscore_cutoff=0.0,percentile_cutoff=0.0,i_score='P',nr_ligs=[],lig_name=False):\n if i_score in ['dC','dCxP'] or percentile_cutoff != 0.0:\n if dist == 'dice':\n all_scores = DataStructs.BulkDiceSimilarity(c_fps[c],l_fps.loc[nr_ligs,0].values.tolist())\n elif dist == 'tani':\n all_scores = DataStructs.BulkTanimotoSimilarity(c_fps[c],l_fps.loc[nr_ligs,0].values.tolist())\n elif dist == 'cos':\n all_scores = DataStructs.BulkCosineSimilarity(c_fps[c],l_fps.loc[nr_ligs,0].values.tolist())\n if percentile_cutoff != 0.0:\n cscore_cutoff = np.percentile(all_scores,percentile_cutoff)\n scores = []\n for p in p_dict.keys():\n li = [i[0:2] for i in p_dict[p] if i[0] in l_fps.index and float(i[1]) >= pscore_cutoff]\n if li:\n li_bs, li_score = zip(*li)\n li_bs = list(li_bs)\n li_score = list(li_score)\n else:\n li_bs = li_score = []\n x = l_fps.loc[li_bs,0].values.tolist()\n y = l_fps.loc[li_bs].index.tolist()\n # Pscore\n if i_score in ['P','CxP','dCxP','avgP','medP']:\n try:\n if dist == 'dice':\n temp_scores = list(zip(y,DataStructs.BulkDiceSimilarity(c_fps[c],x)))\n elif dist == 'tani':\n temp_scores = list(zip(y,DataStructs.BulkTanimotoSimilarity(c_fps[c],x)))\n elif dist == 'cos':\n temp_scores = list(zip(y,DataStructs.BulkCosineSimilarity(c_fps[c],x)))\n\n #Cscore cutoff\n temp_scores = [i for i in temp_scores if float(i[1]) >= cscore_cutoff]\n\n if i_score == 'dCxP':\n temp_c = max(temp_scores, key = lambda i:i[1])\n if not lig_name:\n c_score = stats.percentileofscore(all_scores,temp_c[1])/100.0\n p_score = li_score[li_bs.index(temp_c[0])]\n scores.append(float(c_score) * float(p_score))\n else:\n scores.append(temp_c[0])\n elif i_score == 'CxP':\n temp_c = max(temp_scores, key = lambda i:i[1])\n if not lig_name:\n c_score = temp_c[1]\n p_score = li_score[li_bs.index(temp_c[0])]\n scores.append(float(c_score) * float(p_score))\n continue\n else:\n scores.append(temp_c[0])\n elif i_score == 'P':\n temp_c = max(temp_scores, key = lambda i:i[1])\n if not lig_name:\n p_score = li_score[li_bs.index(temp_c[0])]\n scores.append(float(p_score))\n else:\n scores.append(temp_c[0])\n elif i_score == 'avgP':\n # Will produce a warning when li_score is empty\n # temp_p will then == nan, so we check for that\n # append 0.00 if True.\n temp_p = np.mean(li_score)\n if not np.isnan(temp_p):\n scores.append(temp_p)\n else:\n scores.append(0.000)\n elif i_score == 'medP':\n temp_p = np.median(li_score)\n if not np.isnan(temp_p):\n scores.append(temp_p)\n else:\n scores.append(0.000)\n except:\n if not lig_name:\n scores.append(0.000)\n else:\n scores.append(\"None\")\n # Cscore\n elif i_score in ['dC','C','avgC','medC']:\n try:\n if dist == 'dice':\n temp_scores = DataStructs.BulkDiceSimilarity(c_fps[c],x)\n elif dist == 'tani':\n temp_scores = DataStructs.BulkTanimotoSimilarity(c_fps[c],x)\n elif dist == 'cos':\n temp_scores = DataStructs.BulkCosineSimilarity(c_fps[c],x)\n\n #Cscore cutoff\n temp_scores = [i for i in temp_scores if float(i) >= cscore_cutoff]\n\n if i_score == 'dC':\n temp_c = max(temp_scores)\n if not lig_name:\n scores.append(stats.percentileofscore(all_scores, temp_c) / 100.0)\n else:\n scores.append(li_bs[li_score.index(temp_c)])\n elif i_score == 'C':\n temp_c = max(temp_scores)\n if not lig_name:\n scores.append(temp_c)\n else:\n scores.append(li_bs[li_score.index(temp_c)])\n elif i_score == 'avgC':\n temp_c = np.mean(temp_scores)\n if not np.isnan(temp_c):\n scores.append(temp_c)\n else:\n scores.append(0.000)\n elif i_score == 'medC':\n temp_c = np.median(temp_scores)\n if not np.isnan(temp_c):\n scores.append(temp_c)\n else:\n scores.append(0.000)\n except:\n if not lig_name:\n scores.append(0.000)\n else:\n scores.append(\"None\")\n \n return (c, scores)\n\n\ndef generate_signature(cmpd_file, fp=\"rd_ecfp4\", vect=\"int\", dist=\"dice\", org=\"nrpdb\", bs=\"coach\", c_cutoff=0.0,\n p_cutoff=0.0, percentile_cutoff=0.0, i_score=\"P\", out_file='', out_path=\".\", nr_ligs=True,\n prot_path=''):\n \"\"\"!\n Generate an interaction signature for a query compound using our in-house protocol BANDOCK. Note: the parameters\n for this function MUST MATCH the parameters used to generate the matrix in use. Otherwise, the scores will be\n incompatible.\n @param cmpd_file str: filepath to an input mol file\n @param fp str: the chemical fingerprint to use (rd_ecfp4, rd_ecfp10, etc)\n @param vect str: integer \"int\" or binary \"bit\" vector for fingerprint\n @param dist str: use Sorenson-Dice \"dice\" for vect=\"int\" and Tanimoto \"tani\" for vect=\"bit\"\n @param org str: protein library to use ('nrpdb' or 'homo_sapien')\n @param bs str: the method to use, just use \"coach\"\n @param c_cutoff float: minimum Cscore (Tanimoto/Dice similarity score) to consider for scoring\n @param p_cutoff float: minimum Pscore (binding site score from COACH) to consider for scoring\n @param percentile_cutoff float: %ile cutoff for fingerprint similarity scores in 'dC' scoring protocols\n @param i_score str: the scoring protocol to use ('P', 'C', 'dC', 'CxP', dCxP')\n @param out_file str: filename of the output signature\n @param out_path str: path to the output signature\n @param nr_ligs bool: use only the non-redundant set of ligands for 'dC' scoring protocols (recommended)\n @param prot_path str: specify a local protein library for custom analyses\n @return Returns None\n \"\"\"\n def print_time(s):\n if s >= 60:\n m = s / 60.0\n s -= m * 60.0\n if m >= 60.0:\n h = m / 60.0\n m -= h * 60.0\n print(\"Signature generation took {:.0f} hr {:.0f} min {:.0f} s to finish.\".format(h, m, s))\n else:\n print(\"Signature generation took {:.0f} min {:.0f} s to finish.\".format(m, s))\n else:\n print(\"signature generation took {:.0f} s to finish.\".format(s))\n\n print(\"Generating CANDO signature...\")\n start = time.time()\n\n pre = os.path.dirname(__file__) + \"/data/v2.2+/\"\n lig_path = \"{}/ligs/fps/\".format(pre)\n if out_file == '':\n if percentile_cutoff != 0.0:\n out_file = \"{}/cmpd_0-{}-{}-{}-{}-{}-percentile{}-p{}-{}.tsv\".format(out_path,fp,vect,dist,org,bs,percentile_cutoff,p_cutoff,i_score)\n else:\n out_file = \"{}/cmpd_0-{}-{}-{}-{}-{}-c{}-p{}-{}.tsv\".format(out_path,fp,vect,dist,org,bs,c_cutoff,p_cutoff,i_score)\n os.makedirs(out_path, exist_ok=True)\n\n # Remove redundant ligands from full list\n # Especially important for percentile calculations\n if nr_ligs:\n if not os.path.exists(\"{}/mappings/nr_ligs.csv\".format(pre)):\n url = 'http://protinfo.compbio.buffalo.edu/cando/data/v2.2+/mappings/nr_ligs.csv'\n dl_file(url, '{}/mappings/nr_ligs.csv'.format(pre))\n nr_ligs = pd.read_csv(\"{}/mappings/nr_ligs.csv\".format(pre),header=None)\n nr_ligs = nr_ligs[0].values.flatten()\n\n # Download protein matrix if it does not exist\n if not prot_path:\n if not os.path.exists(\"{}/prots/{}-{}.tsv\".format(pre,org,bs)):\n url = 'http://protinfo.compbio.buffalo.edu/cando/data/v2.2+/prots/{}-{}.tsv'.format(org,bs)\n dl_file(url, '{}/prots/{}-{}.tsv'.format(pre,org,bs))\n p_matrix = pd.read_csv(\"{}/prots/{}-{}.tsv\".format(pre,org,bs),sep='\\t',header=None,index_col=0)\n else:\n p_matrix = pd.read_csv(\"{}/{}-{}.tsv\".format(prot_path,org,bs),sep='\\t',header=None,index_col=0)\n \n # Create dictionary of lists\n # Keys == proteins\n # Values == list of predicted bs + bs scores\n p_dict = {}\n for p in p_matrix.itertuples():\n p_dict[p[0]] = list(zip(p[1].split(','),p[2].split(',')))\n\n if i_score not in ['C','dC','P','CxP','dCxP','avgC','medC','avgP','medP']:\n print(\"{} is not an applicable interaction score.\".format(i_score))\n return\n\n nc = Chem.MolFromMolFile(cmpd_file)\n nc = Chem.RemoveHs(nc)\n name = nc.GetProp(\"_Name\")\n\n c_fps = {}\n rad = int(int(fp[7:])/2)\n if fp[3]=='f':\n features = True\n else:\n features = False\n\n if vect=='int':\n c_fps[0] = AllChem.GetMorganFingerprint(nc,rad,useFeatures=features)\n else:\n bits = int(vect[:4])\n c_fps[0] = AllChem.GetMorganFingerprintAsBitVect(nc,rad,useFeatures=features,nBits=bits)\n\n if not os.path.exists(\"{}/{}-{}_vect.pickle\".format(lig_path,fp,vect)):\n url = 'http://protinfo.compbio.buffalo.edu/cando/data/v2.2+/ligs/fps/{}-{}_vect.pickle'.format(fp,vect)\n dl_file(url, '{}/{}-{}_vect.pickle'.format(lig_path,fp,vect))\n\n # Load ligand fingerprint pickles\n with open('{}/{}-{}_vect.pickle'.format(lig_path,fp,vect), 'rb') as f:\n l_fps = pickle.load(f)\n\n scores = calc_scores(0,c_fps,l_fps,p_dict,dist,p_cutoff,c_cutoff,percentile_cutoff,i_score,nr_ligs)\n #scores = pool.starmap_async(calc_scores, [(c,c_fps,l_fps,p_dict,dist,p_cutoff,c_cutoff,percentile_cutoff,i_score,nr_ligs) for c in c_list]).get()\n scores = {scores[0]:scores[1]}\n\n mat = pd.DataFrame.from_dict(scores)\n mat.sort_index(axis=1,inplace=True)\n mat.rename(index=dict(zip(range(len(p_matrix.index)), p_matrix.index)), inplace=True)\n \n mat.to_csv(\"{}/{}\".format(out_path,out_file), sep='\\t', index=True, header=False, float_format='%.3f')\n \n end = time.time()\n print(\"Signature written to {}/{}.\".format(out_path,out_file))\n print_time(end-start) \n return(mat.iloc[:,0].values)\n\n\ndef add_cmpds(cmpd_list, file_type='smi', fp=\"rd_ecfp4\", vect=\"int\", cmpd_dir=\".\", v=None, map_indications='v2.3'):\n \"\"\"!\n Add new compounds to an existing CANDO Compound library, or create a new Compound library using our in-house protocol\n BANDOCK.\n @param cmpd_list str: filepath to all input compounds\n @param fp str: the chemical fingerprint to use (rd_ecfp4, rd_ecfp10, etc)\n @param vect str: integer \"int\" or binary \"bit\" vector for fingerprint\n @param cmpd_dir str: ??\n @param v str: ??\n @param map_indications str: CANDO version number to string match exact names from compound file to existing ind_map\n @return Returns None\n \"\"\"\n start = time.time()\n pre = os.path.dirname(__file__) + \"/data/v2.2+/\"\n # List of new compounds loaded into df\n ncs = pd.read_csv(cmpd_list, sep='\\t', header=None)\n \n vs = ['v2.2', 'v2.3', 'v2.4', 'v2.5', 'test.0']\n if v in vs:\n # Redundant with future lines. \n # Remove future lines and implement them into get_data()\n #get_data(v=v, org=None)\n curr_v = v\n print(\"Adding new compounds to compound library {}...\".format(curr_v))\n t = curr_v.split('.')\n t[-1] = str(int(t[-1])+1)\n new_v = '.'.join(t)\n print(\"New compound library is {}.\".format(new_v))\n \n curr_cmpd_path = \"{}/cmpds/fps-{}/\".format(pre, curr_v)\n if not os.path.exists(\"{}/cmpds/fps-{}/{}-{}_vect.pickle\".format(pre, curr_v, fp, vect)):\n url = 'http://protinfo.compbio.buffalo.edu/cando/data/v2.2+/cmpds/fps-{}/{}-{}_vect.pickle'.format(curr_v,\n fp, vect)\n dl_file(url, '{}/cmpds/fps-{}/{}-{}_vect.pickle'.format(pre, curr_v, fp, vect))\n cmpd_path = \"{}/cmpds/fps-{}/\".format(pre, new_v)\n os.makedirs(cmpd_path, exist_ok=True)\n os.system(\"cp {0}/{2}-{3}_vect.pickle {1}/{2}-{3}_vect.pickle\".format(curr_cmpd_path, cmpd_path, fp, vect))\n \n if not os.path.exists(\"{}/mappings/drugbank-{}.tsv\".format(pre, curr_v)):\n url = 'http://protinfo.compbio.buffalo.edu/cando/data/v2.2+/mappings/drugbank-{}.tsv'.format(curr_v)\n dl_file(url, '{}/mappings/drugbank-{}.tsv'.format(pre, curr_v))\n \n d_map = pd.read_csv(\"{}/mappings/drugbank-{}.tsv\".format(pre, curr_v), sep='\\t')\n \n if not os.path.exists(\"{}/mappings/drugbank2ctd-{}.tsv\".format(pre, curr_v)):\n url = 'http://protinfo.compbio.buffalo.edu/cando/data/v2.2+/mappings/drugbank2ctd-{}.tsv'.format(curr_v)\n dl_file(url, '{}/mappings/drugbank2ctd-{}.tsv'.format(pre, curr_v))\n os.system(\"cp {0}/mappings/drugbank2ctd-{1}.tsv {0}/mappings/drugbank2ctd-{2}.tsv\".format(pre, curr_v, new_v))\n\n if not os.path.exists(\"{}/cmpds/fps-{}/inchi_keys.pickle\".format(pre, curr_v)):\n url = 'http://protinfo.compbio.buffalo.edu/cando/data/v2.2+/cmpds/fps-{}/inchi_keys.pickle'.format(curr_v)\n dl_file(url, '{}/cmpds/fps-{}/inchi_keys.pickle'.format(pre, curr_v))\n \n with open('{}/inchi_keys.pickle'.format(curr_cmpd_path), 'rb') as f:\n inchi_dict = pickle.load(f)\n cmpd_num = len(inchi_dict)\n\n for c in ncs.itertuples(index=False):\n try:\n if file_type == 'mol':\n nc = Chem.MolFromMolFile(\"{}/{}.mol\".format(cmpd_dir, c[0]))\n name = nc.GetProp(\"_Name\")\n elif file_type == 'smi':\n nc = Chem.MolFromSmiles(\"{}\".format(c[0]))\n name = c[1]\n nc.SetProp(\"_Name\", name)\n nc = Chem.RemoveHs(nc)\n except:\n print(\"{} cannot load this molecule.\".format(c[0]))\n continue\n inchi_key = Chem.MolToInchiKey(nc)\n try:\n match = str(inchi_dict[inchi_key])\n except:\n match = None\n if match:\n print(\" {} is the same as {} - {} in the library\".format(name, int(match),\n d_map.loc[(d_map['CANDO_ID'] == int(match)),\n 'GENERIC_NAME'].values[0], match))\n continue\n else:\n print(\" Adding compound {} - {}\".format(cmpd_num,name))\n \n with open('{}/inchi_keys.pickle'.format(cmpd_path), 'wb') as f:\n inchi_dict[inchi_key] = cmpd_num\n pickle.dump(inchi_dict, f)\n \n d_map = d_map.append(pd.DataFrame([[cmpd_num, 'NA', name, 'other']],\n columns=['CANDO_ID', 'DRUGBANK_ID', 'GENERIC_NAME', 'DRUG_GROUPS']),\n ignore_index=True)\n rad = int(int(fp[7:])/2)\n if fp[3] == 'f':\n features = True\n else:\n features = False\n\n if vect == 'int':\n with open('{}/{}-{}_vect.pickle'.format(cmpd_path, fp, vect), 'rb') as f:\n c_fps = pickle.load(f)\n c_fps[cmpd_num] = AllChem.GetMorganFingerprint(nc, rad, useFeatures=features)\n with open('{}/{}-{}_vect.pickle'.format(cmpd_path, fp, vect), 'wb') as f:\n pickle.dump(c_fps, f)\n else:\n bits = int(vect[:4])\n with open('{}/{}-{}_vect.pickle'.format(cmpd_path, fp, vect), 'rb') as f:\n c_fps = pickle.load(f)\n c_fps[cmpd_num] = AllChem.GetMorganFingerprintAsBitVect(nc, rad, useFeatures=features, nBits=bits)\n with open('{}/{}-{}_vect.pickle'.format(cmpd_path, fp, vect), 'wb') as f:\n pickle.dump(c_fps, f)\n cmpd_num += 1\n elif v and v not in vs:\n new_v = v\n print(\"Creating new compound library {}...\".format(new_v))\n print(\"The library will be built at {}/{}.\".format(os.getcwd(), new_v))\n os.makedirs(new_v, exist_ok=True)\n os.makedirs(\"{}/cmpds\".format(new_v), exist_ok=True)\n os.makedirs(\"{}/mappings\".format(new_v), exist_ok=True)\n cmpd_path = \"{0}/cmpds/fps-{0}/\".format(new_v)\n os.makedirs(cmpd_path, exist_ok=True)\n d_map = pd.DataFrame(columns=['CANDO_ID', 'DRUGBANK_ID', 'GENERIC_NAME', 'DRUG_GROUPS'])\n\n cid2name = {}\n cname2inds = {}\n if map_indications:\n if not os.path.exists(\"{}/mappings/drugbank-{}.tsv\".format(pre, map_indications)):\n url = 'http://protinfo.compbio.buffalo.edu/cando/data/v2.2+/mappings/' \\\n 'drugbank-{}.tsv'.format(map_indications)\n dl_file(url, '{}/mappings/drugbank-{}.tsv'.format(pre, map_indications))\n if not os.path.exists(\"{}/mappings/drugbank2ctd-{}.tsv\".format(pre, map_indications)):\n url = 'http://protinfo.compbio.buffalo.edu/cando/data/v2.2+/mappings/' \\\n 'drugbank2ctd-{}.tsv'.format(map_indications)\n dl_file(url, '{}/mappings/drugbank2ctd-{}.tsv'.format(pre, map_indications))\n\n fcm = open('{}/mappings/drugbank-{}.tsv'.format(pre, map_indications), 'r')\n cmls = fcm.readlines()\n fcm.close()\n for cml in cmls[1:]:\n cls = cml.split('\\t')\n cid = cls[0]\n cname = cls[2]\n cid2name[cid] = cname\n\n fim = open('{}/mappings/drugbank2ctd-{}.tsv'.format(pre, map_indications), 'r')\n imls = fim.readlines()\n fim.close()\n for iml in imls[1:]:\n ils = iml.split('\\t')\n cid = ils[0]\n indname = ils[1]\n indid = ils[2]\n cname = cid2name[cid]\n if cname in cname2inds:\n if (indname, indid) not in cname2inds[cname]:\n cname2inds[cname].append((indname, indid))\n else:\n cname2inds[cname] = [(indname, indid)]\n\n cmpd_num = 0\n # Create new fingerprint dict and save it to pickle for future use\n c_fps = {}\n if vect == 'int':\n with open('{}/{}-{}_vect.pickle'.format(cmpd_path, fp, vect), 'wb') as f:\n pickle.dump(c_fps, f)\n else:\n bits = int(vect[:4])\n with open('{}/{}-{}_vect.pickle'.format(cmpd_path, fp, vect), 'wb') as f:\n pickle.dump(c_fps, f)\n # Create new inchi dict\n inchi_dict = {}\n\n if map_indications:\n foind = open(\"{0}/mappings/inds-{0}.tsv\".format(new_v), 'w')\n foind.write('CANDO_ID\\tINDICATION_NAME\\tMESH_ID\\tINDICATION_ID\\n')\n ind2id = {}\n curr_ind_id = 0\n\n for c in ncs.itertuples(index=False):\n try:\n if file_type == 'mol':\n nc = Chem.MolFromMolFile(\"{}/{}.mol\".format(cmpd_dir, c[0]))\n name = nc.GetProp(\"_Name\")\n elif file_type == 'smi':\n nc = Chem.MolFromSmiles(\"{}\".format(c[0]))\n name = c[1]\n nc.SetProp(\"_Name\", name)\n except:\n print(\"{} cannot load this molecule.\".format(c[0]))\n continue\n inchi_key = Chem.MolToInchiKey(nc)\n try:\n match = str(inchi_dict[inchi_key])\n except:\n match = None\n if match:\n print(\" {} is the same as {} - {} in the library\".format(name, int(match),\n d_map.loc[(d_map['CANDO_ID'] == int(match)),\n 'GENERIC_NAME'].values[0], match))\n continue\n else:\n print(\" Adding compound {} - {}\".format(cmpd_num, name))\n \n with open('{}/inchi_keys.pickle'.format(cmpd_path), 'wb') as f:\n inchi_dict[inchi_key] = cmpd_num\n pickle.dump(inchi_dict, f)\n \n d_map = d_map.append(pd.DataFrame([[cmpd_num, 'NA', name, 'other']],\n columns=['CANDO_ID', 'DRUGBANK_ID', 'GENERIC_NAME', 'DRUG_GROUPS']),\n ignore_index=True)\n\n if map_indications:\n if name in cname2inds:\n inds = cname2inds[name]\n for ind in inds:\n if ind in ind2id:\n indid = ind2id[ind]\n else:\n indid = curr_ind_id\n ind2id[ind] = curr_ind_id\n curr_ind_id += 1\n foind.write('{}\\t{}\\t{}\\t{}\\n'.format(cmpd_num, ind[0], ind[1], indid))\n \n rad = int(int(fp[7:])/2)\n if fp[3] == 'f':\n features = True\n else:\n features = False\n\n if vect == 'int':\n with open('{}/{}-{}_vect.pickle'.format(cmpd_path, fp, vect), 'rb') as f:\n c_fps = pickle.load(f)\n c_fps[cmpd_num] = AllChem.GetMorganFingerprint(nc, rad, useFeatures=features)\n with open('{}/{}-{}_vect.pickle'.format(cmpd_path, fp, vect), 'wb') as f:\n pickle.dump(c_fps, f)\n else:\n with open('{}/{}-{}_vect.pickle'.format(cmpd_path, fp, vect), 'rb') as f:\n c_fps = pickle.load(f)\n c_fps[cmpd_num] = AllChem.GetMorganFingerprintAsBitVect(nc, rad, useFeatures=features, nBits=bits)\n with open('{}/{}-{}_vect.pickle'.format(cmpd_path, fp, vect), 'wb') as f:\n pickle.dump(c_fps, f)\n cmpd_num += 1\n \n elif not v:\n new_v = \"v0.0\"\n print(\"Creating new compound library {}...\".format(new_v))\n cmpd_path = \"{0}/cmpds/fps-{0}/\".format(new_v)\n os.makedirs(cmpd_path, exist_ok=True)\n d_map = pd.DataFrame(columns=['CANDO_ID', 'DRUGBANK_ID', 'GENERIC_NAME', 'DRUG_GROUPS'])\n cmpd_num = 0\n # Create new fingerprint dict and save it to pickle for future use\n c_fps = {}\n if vect=='int':\n with open('{}/{}-{}_vect.pickle'.format(cmpd_path,fp,vect), 'wb') as f:\n pickle.dump(c_fps, f)\n else:\n bits = int(vect[:4])\n with open('{}/{}-{}_vect.pickle'.format(cmpd_path,fp,vect), 'wb') as f:\n pickle.dump(c_fps, f)\n # Create new inchi dict\n inchi_dict = {}\n\n for c in ncs.itertuples(index=False):\n try:\n nc = Chem.MolFromMolFile(\"{}/{}.mol\".format(cmpd_dir, c[0]))\n nc = Chem.RemoveHs(nc)\n except:\n print(\"{} cannot load this molecule.\".format(c[0]))\n continue\n name = nc.GetProp(\"_Name\")\n inchi_key = Chem.MolToInchiKey(nc)\n try:\n match = str(inchi_dict[inchi_key])\n except:\n match = None\n if match:\n print(\" {} is the same as {} - {} in the library\".format(name, int(match),\n d_map.loc[(d_map['CANDO_ID'] == int(match)),\n 'GENERIC_NAME'].values[0], match))\n continue\n else:\n print(\" Adding compound {} - {}\".format(cmpd_num, name))\n \n with open('{}/inchi_keys.pickle'.format(cmpd_path), 'wb') as f:\n inchi_dict[inchi_key] = cmpd_num\n pickle.dump(inchi_dict, f)\n \n d_map = d_map.append(pd.DataFrame([[cmpd_num, 'NA', name, 'other']],\n columns=['CANDO_ID', 'DRUGBANK_ID', 'GENERIC_NAME', 'DRUG_GROUPS']),\n ignore_index=True)\n \n rad = int(int(fp[7:])/2)\n if fp[3] == 'f':\n features = True\n else:\n features = False\n\n if vect == 'int':\n with open('{}/{}-{}_vect.pickle'.format(cmpd_path, fp, vect), 'rb') as f:\n c_fps = pickle.load(f)\n c_fps[cmpd_num] = AllChem.GetMorganFingerprint(nc, rad, useFeatures=features)\n with open('{}/{}-{}_vect.pickle'.format(cmpd_path, fp, vect), 'wb') as f:\n pickle.dump(c_fps, f)\n else:\n with open('{}/{}-{}_vect.pickle'.format(cmpd_path, fp, vect), 'rb') as f:\n c_fps = pickle.load(f)\n c_fps[cmpd_num] = AllChem.GetMorganFingerprintAsBitVect(nc, rad, useFeatures=features, nBits=bits)\n with open('{}/{}-{}_vect.pickle'.format(cmpd_path, fp, vect), 'wb') as f:\n pickle.dump(c_fps, f)\n cmpd_num += 1\n os.makedirs(\"{}/mappings\".format(new_v), exist_ok=True)\n d_map.to_csv(\"{0}/mappings/cmpds-{0}.tsv\".format(new_v), sep='\\t', index=False, na_rep='NA')\n print(\"Added compounds to compound library {}.\\n\".format(new_v))\n # Need to add functionality to handle loading a new version created by user.\n\n\ndef cosine_dist(A):\n similarity = np.dot(A, A.T)\n # squared magnitude of preference vectors (number of occurrences)\n square_mag = np.diag(similarity)\n # inverse squared magnitude\n inv_square_mag = 1 / square_mag\n # if it doesn't occur, set it's inverse magnitude to zero (instead of inf)\n inv_square_mag[np.isinf(inv_square_mag)] = 0\n # inverse of the magnitude\n inv_mag = np.sqrt(inv_square_mag)\n # cosine similarity (elementwise multiply by inverse magnitudes)\n cosine = similarity * inv_mag\n cos_sim = cosine.T * inv_mag\n cos_dist = [1-i for i in cos_sim]\n return np.asarray(cos_dist)\n\n\ndef tanimoto_sparse(str1, str2):\n \"\"\"!\n Calculate the tanimoto coefficient for a pair of sparse vectors\n @param str1 str: String of 1s and 0s representing the first compound fingerprint\n @param str2 str: String of 1s and 0s representing the second compound fingerprint\n @return Returns float\n \"\"\"\n n_c = 0.0\n n_a = 0.0\n n_b = 0.0\n for i in range(len(str1)):\n if str1[i] == '1' and str2[i] == '1':\n n_c += 1\n if str1[i] == '1':\n n_a += 1\n if str2[i] == '1':\n n_b += 1\n if n_c + n_a + n_b == 0:\n return 0.000\n return float(n_c/(n_a+n_b-n_c)) \n\n\ndef tanimoto_dense(list1, list2):\n \"\"\"!\n Calculate the tanimoto coefficient for a pair of dense vectors\n @param list1 list: List of positions that have a 1 in first compound fingerprint\n @param list2 list: List of positions that have a 1 in second compound fingerprint\n @return Returns float\n \"\"\"\n c = [common_item for common_item in list1 if common_item in list2]\n return float(len(c))/(len(list1) + len(list2) - len(c))\n\n\ndef get_fp_lig(fp):\n \"\"\"!\n Download precompiled binding site ligand fingerprints using the given fingerprint method.\n @param fp str: Fingerprinting method used to compile each binding site ligand fingerprint\n @return Returns None\n \"\"\"\n pre = os.path.dirname(__file__)\n out_file = '{}/v2.2+/ligs/{}.pickle'.format(pre, fp)\n if not os.path.exists(out_file):\n print('Downloading ligand fingerprints for {}...'.format(fp))\n url = 'http://protinfo.compbio.buffalo.edu/cando/data/v2.2+/ligs/{}.pickle'.format(fp)\n dl_file(url, out_file)\n print(\"{} ligand fingerprints downloaded.\".format(fp))\n else:\n print(\"{} ligand fingerprints have already been downloaded.\".format(fp))\n print(\"This file can be found at {}\".format(out_file))\n\n\ndef get_data(v=\"v2.2\", org='nrpdb', fp='rd_ecfp4', vect='int'):\n \"\"\"!\n Download CANDO v2.2+ data.\n @param v str: version to use (supports v2.2 - v2.5)\n @param org str: protein library to use ('nrpdb' or 'homo_sapien')\n @param fp str: the chemical fingerprint to use (rd_ecfp4, rd_ecfp10, etc)\n @param vect str: integer \"int\" or binary \"bit\" vector for fingerprint\n @returns Returns None\n \"\"\"\n # Check v and org before moving on\n vs = ['v2.2','v2.3','v2.4','v2.5','v2.6','v2.7','v2.8','test.0']\n orgs = ['all','nrpdb','homo_sapien','cryptococcus','test','tutorial']\n if v not in vs:\n print(\"{} is not a correct version.\".format(v))\n sys.exit()\n if org not in orgs:\n print(\"{} is not a correct organism.\".format(org))\n sys.exit()\n print('Downloading data for {}...'.format(v))\n pre = os.path.dirname(__file__) + \"/data/v2.2+\"\n # Dirs\n os.makedirs(pre, exist_ok=True)\n os.makedirs('{}/mappings'.format(pre), exist_ok=True)\n #os.makedirs('{}/matrices'.format(pre), exist_ok=True)\n os.makedirs('{}/prots'.format(pre), exist_ok=True)\n os.makedirs('{}/cmpds'.format(pre), exist_ok=True)\n # Mappings\n url = 'http://protinfo.compbio.buffalo.edu/cando/data/v2.2+/mappings/drugbank-{}.tsv'.format(v)\n dl_file(url, '{}/mappings/drugbank-{}.tsv'.format(pre,v))\n url = 'http://protinfo.compbio.buffalo.edu/cando/data/v2.2+/mappings/drugbank-{}-approved.tsv'.format(v)\n dl_file(url, '{}/mappings/drugbank-{}-approved.tsv'.format(pre, v))\n url = 'http://protinfo.compbio.buffalo.edu/cando/data/v2.2+/mappings/drugbank2ctd-{}.tsv'.format(v)\n dl_file(url, '{}/mappings/drugbank2ctd-{}.tsv'.format(pre,v))\n # Compounds\n if not os.path.exists(\"{}/cmpds/fps-{}/{}-{}_vect.pickle\".format(pre,v,fp,vect)):\n url = 'http://protinfo.compbio.buffalo.edu/cando/data/v2.2+/cmpds/fps-{}/{}-{}_vect.pickle'.format(v,fp,vect)\n dl_file(url, '{}/cmpds/fps-{}/{}-{}_vect.pickle'.format(pre,v,fp,vect))\n if not os.path.exists(\"{}/ligs/fps/{}-{}_vect.pickle\".format(pre,fp,vect)):\n url = 'http://protinfo.compbio.buffalo.edu/cando/data/v2.2+/ligs/fps/{}-{}_vect.pickle'.format(fp,vect)\n dl_file(url, '{}/ligs/fps/{}-{}_vect.pickle'.format(pre,fp,vect))\n # Matrices\n '''\n if matrix == 'all':\n url = 'http://protinfo.compbio.buffalo.edu/cando/data/v2/matrices/rd_ecfp4/drugbank-approved_x_nrpdb.tsv'\n dl_file(url, 'v2.0/matrices/rd_ecfp4/drugbank-approved_x_nrpdb.tsv')\n url = 'http://protinfo.compbio.buffalo.edu/cando/data/v2/matrices/ob_fp4/drugbank-approved_x_nrpdb.tsv'\n dl_file(url, 'v2.0/matrices/ob_fp4/drugbank-approved_x_nrpdb.tsv')\n url = 'http://protinfo.compbio.buffalo.edu/cando/data/v2/matrices/rd_ecfp4/drugbank-human.tsv'\n dl_file(url, 'v2.0/matrices/rd_ecfp4/drugbank-human.tsv')\n elif matrix == 'nrpdb':\n url = 'http://protinfo.compbio.buffalo.edu/cando/data/v2/matrices/rd_ecfp4/drugbank-approved_x_nrpdb.tsv'\n dl_file(url, 'v2.0/matrices/rd_ecfp4/drugbank-approved_x_nrpdb.tsv')\n url = 'http://protinfo.compbio.buffalo.edu/cando/data/v2/matrices/ob_fp4/drugbank-approved_x_nrpdb.tsv'\n dl_file(url, 'v2.0/matrices/ob_fp4/drugbank-approved_x_nrpdb.tsv')\n elif matrix == 'human':\n url = 'http://protinfo.compbio.buffalo.edu/cando/data/v2/matrices/rd_ecfp4/drugbank-human.tsv'\n dl_file(url, 'v2.0/matrices/rd_ecfp4/drugbank-human.tsv')\n '''\n print('Downloading data for {}...'.format(org))\n # Proteins\n if org=='all':\n for o in orgs[1:]:\n if o=='test' or o=='tutorial':\n continue\n url = 'http://protinfo.compbio.buffalo.edu/cando/data/v2.2+/prots/{}-coach.tsv'.format(o)\n dl_file(url, '{}/prots/{}-coach.tsv'.format(pre,o))\n else:\n url = 'http://protinfo.compbio.buffalo.edu/cando/data/v2.2+/prots/{}-coach.tsv'.format(org)\n dl_file(url, '{}/prots/{}-coach.tsv'.format(pre,org))\n\n '''\n if not os.path.exists('v2.0/cmpds/scores/drugbank-approved-rd_ecfp4.tsv'):\n url = 'http://protinfo.compbio.buffalo.edu/cando/data/v2/cmpds/scores/drugbank-approved-rd_ecfp4.tsv.gz'\n dl_file(url, 'v2.0/cmpds/scores/drugbank-approved-rd_ecfp4.tsv.gz')\n os.chdir(\"v2.0/cmpds/scores\")\n os.system(\"gunzip -f drugbank-approved-rd_ecfp4.tsv.gz\")\n os.chdir(\"../../..\")\n '''\n print('All data for {} and {} downloaded.'.format(v,org))\n\n\ndef clear_cache():\n \"\"\"!\n Clear files in \"data/\" directory.\n @returns Returns None\n \"\"\"\n pre = os.path.dirname(__file__) + \"/data/\"\n os.system(\"rm -r {}\".format(pre))\n print(\"{} directory has been removed.\".format(pre))\n\n\ndef get_tutorial():\n \"\"\"!\n Download data for tutorial.\n @returns Returns None\n \"\"\"\n print('Downloading data for tutorial...')\n pre = os.path.dirname(__file__) + \"/data/v2.2+\"\n if not os.path.exists('tutorial'):\n os.mkdir('tutorial')\n # Example matrix (rd_ecfp4 w/ 64 prots x 2,449 drugs)\n if not os.path.exists('./tutorial/tutorial_matrix-all.tsv'):\n url = 'http://protinfo.compbio.buffalo.edu/cando/data/v2.2+/tutorial/tutorial_matrix-all.tsv'\n dl_file(url, './tutorial/tutorial_matrix-all.tsv')\n if not os.path.exists('./tutorial/tutorial_matrix-approved.tsv'):\n url = 'http://protinfo.compbio.buffalo.edu/cando/data/v2.2+/tutorial/tutorial_matrix-approved.tsv'\n dl_file(url, './tutorial/tutorial_matrix-approved.tsv')\n # Mappings\n if not os.path.exists('./tutorial/cmpds-v2.2.tsv'):\n url = 'http://protinfo.compbio.buffalo.edu/cando/data/v2.2+/mappings/cmpds-v2.2.tsv'\n dl_file(url, './tutorial/cmpds-v2.2.tsv')\n #if not os.path.exists('./tutorial/cmpds-v2.2-approved.tsv'):\n # url = 'http://protinfo.compbio.buffalo.edu/cando/data/v2.2+/mappings/cmpds-v2.2-approved.tsv'\n # dl_file(url, './tutorial/cmpds-v2.2-approved.tsv')\n if not os.path.exists('./tutorial/cmpds2inds-v2.2.tsv'):\n url = 'http://protinfo.compbio.buffalo.edu/cando/data/v2.2+/mappings/cmpds2inds-v2.2.tsv'\n dl_file(url, './tutorial/cmpds2inds-v2.2.tsv')\n # Protein scores\n if not os.path.exists('{}/prots/tutorial-coach.tsv'.format(pre)):\n url = 'http://protinfo.compbio.buffalo.edu/cando/data/v2.2+/prots/tutorial-coach.tsv'\n dl_file(url, '{}/prots/tutorial-coach.tsv'.format(pre))\n # New compound set\n if not os.path.exists('./tutorial/tki_set-test.smi'):\n url = 'http://protinfo.compbio.buffalo.edu/cando/data/v2.2+/tutorial/tki_set-test.smi'\n dl_file(url, './tutorial/tki_set-test.smi')\n # New compound\n if not os.path.exists('./tutorial/lm235.mol'):\n url = 'http://protinfo.compbio.buffalo.edu/cando/data/v2.2+/tutorial/lmk235.mol'\n dl_file(url, './tutorial/lmk235.mol')\n # Protein subset\n if not os.path.exists('./tutorial/tutorial-bac-prots.txt'):\n url = 'http://protinfo.compbio.buffalo.edu/cando/data/v2.2+/tutorial/tutorial-bac-prots.txt'\n dl_file(url, './tutorial/tutorial-bac-prots.txt')\n print('All data for tutorial downloaded.\\n')\n\n\ndef get_test():\n \"\"\"!\n Download data for test script.\n @returns Returns None\n \"\"\"\n print('Downloading data for test...')\n pre = os.path.dirname(__file__) + \"/data/v2.2+/test\"\n os.makedirs(pre,exist_ok=True)\n #url = 'http://protinfo.compbio.buffalo.edu/cando/data/test/test-cmpd_scores.tsv'\n #dl_file(url, '{}/test-cmpd_scores.tsv'.format(pre))\n #url = 'http://protinfo.compbio.buffalo.edu/cando/data/test/test-prot_scores.tsv'\n #dl_file(url, '{}/test-prot_scores.tsv'.format(pre))\n url = 'http://protinfo.compbio.buffalo.edu/cando/data/v2.2+/test/test-cmpds.tsv'\n dl_file(url, '{}/test-cmpds.tsv'.format(pre))\n with open('{}/test-cmpds.tsv'.format(pre), 'r') as f:\n l = []\n f.readline()\n for i in f:\n i = i.split('\\t')[0]\n i = \"{}.mol\".format(i)\n l.append(i)\n url = 'http://protinfo.compbio.buffalo.edu/cando/data/v2.2+/test/test-cmpds_mol'\n out = '{}/test-cmpds_mol'.format(pre)\n dl_dir(url, out, l)\n url = 'http://protinfo.compbio.buffalo.edu/cando/data/v2.2+/test/test-inds.tsv'\n dl_file(url, '{}/test-inds.tsv'.format(pre))\n url = 'http://protinfo.compbio.buffalo.edu/cando/data/v2.2+/test/test-cmpds_mol/8100.mol'\n dl_file(url, '{}/test-cmpds_mol/8100.mol'.format(pre))\n url = 'http://protinfo.compbio.buffalo.edu/cando/data/v2.2+/test/test_set.smi'\n dl_file(url, '{}/tki_set-test.smi'.format(pre))\n url = 'http://protinfo.compbio.buffalo.edu/cando/data/v2.2+/test/test-uniprot_set.tsv'\n dl_file(url, '{}/test-uniprot_set.tsv'.format(pre))\n url = 'http://protinfo.compbio.buffalo.edu/cando/data/v2.2+/test/vina64x.fpt'\n dl_file(url, '{}/vina64x.fpt'.format(pre))\n url = 'http://protinfo.compbio.buffalo.edu/cando/data/v2.2+/test/toy64x.fpt'\n dl_file(url, '{}/toy64x.fpt'.format(pre))\n url = 'http://protinfo.compbio.buffalo.edu/cando/data/v2.2+/test/test-pathway-prot.tsv'\n dl_file(url, '{}/test-pathway-prot.tsv'.format(pre))\n url = 'http://protinfo.compbio.buffalo.edu/cando/data/v2.2+/test/test-pathway-mesh.tsv'\n dl_file(url, '{}/test-pathway-mesh.tsv'.format(pre))\n url = 'http://protinfo.compbio.buffalo.edu/cando/data/v2.2+/test/test-new_cmpds.tsv'\n dl_file(url, '{}/test-new_cmpds.tsv'.format(pre))\n url = 'http://protinfo.compbio.buffalo.edu/cando/data/v2.2+/test/test-uniprot_set.tsv'\n print('All test data downloaded.\\n')\n\n\ndef dl_dir(url, out, l):\n \"\"\"!\n Function to recursively download a directory.\n Prints the name of the directory and a progress bar.\n @param url str: URL of the dir to be downloaded\n @param out str: Path to where the dir will be downloaded\n @param l list: List of files in dir to be downloaded\n @returns Returns None\n \"\"\"\n if not os.path.exists(out):\n os.makedirs(out)\n else:\n for n in l:\n if not os.path.exists(\"{}/{}\".format(out, n)):\n break\n return\n format_custom_text = progressbar.FormatCustomText(\n '%(f)s',\n dict(\n f='',\n ),\n )\n widgets = [\n format_custom_text,\n ' [', progressbar.DataSize(format='%(scaled)i files',), '] ',\n progressbar.Bar(left='[', right=']'),\n ' [', progressbar.ETA(), '] ',\n ]\n num_bars = len(l)\n bar = progressbar.ProgressBar(max_value=num_bars, widgets=widgets).start()\n i = 0\n for n in l:\n format_custom_text.update_mapping(f=out)\n url2 = \"{}/{}\".format(url, n)\n r = requests.get(url2)\n out_file = \"{}/{}\".format(out, n)\n with open(out_file, 'wb') as f:\n f.write(r.content)\n bar.update(i)\n i += 1\n bar.finish()\n\n\ndef dl_file(url, out_file):\n \"\"\"!\n Function to download a file.\n Prints the name of the file and a progress bar.\n @param url str: URL of the file to be downloaded\n @param out_file str: File path to where the file will be downloaded\n @returns Returns None\n \"\"\"\n if os.path.exists(out_file):\n print(\"{} exists.\".format(out_file))\n return\n elif not os.path.exists(os.path.dirname(out_file)):\n os.makedirs(os.path.dirname(out_file))\n time.sleep(1)\n r = requests.get(url, stream=True)\n format_custom_text = progressbar.FormatCustomText(\n '%(f)s',\n dict(\n f='',\n ),\n )\n widgets = [\n format_custom_text,\n ' [', progressbar.DataSize(prefixes=('K', 'M', 'G')), '] ',\n progressbar.Bar(left='[', right=']'),\n ' [', progressbar.ETA(), '] ',\n ]\n with open(out_file, 'wb') as f:\n total_length = int(r.headers.get('content-length'))\n if total_length >= 1024:\n chunk_size = 1024\n num_bars = total_length / chunk_size\n else:\n chunk_size = 1\n num_bars = total_length / chunk_size\n bar = progressbar.ProgressBar(max_value=num_bars, widgets=widgets).start()\n i = 0\n for chunk in r.iter_content(chunk_size=chunk_size):\n format_custom_text.update_mapping(f=out_file)\n f.write(chunk)\n f.flush()\n os.fsync(f.fileno())\n bar.update(i)\n i += 1\n bar.finish()\n\n\ndef load_version(v='v2.3', protlib='nrpdb', i_score='CxP', approved_only=False, compute_distance=False,\n dist_metric='cosine', protein_set='', ncpus=1):\n \"\"\"!\n Directly load a pre-compiled version of CANDO.\n @param v str: version to use (supports v2.2 - v2.5)\n @param protlib str: protein library to use ('nrpdb' or 'homo_sapien')\n @param i_score str: the scoring protocol to use ('P', 'C', 'dC', 'CxP', dCxP')\n @param approved_only bool: use only approved drugs to create the matrix\n @param compute_distance bool: compute distance between compounds for specified matrix\n @param dist_metric str: the distance metric to use if compute_distance=True ('cosine', 'rmsd', etc)\n @param protein_set str: path to a file containing a subset of proteins of interest\n @param ncpus int: number of cores to run on\n @return Returns CANDO object\n \"\"\"\n\n # download data for version\n get_data(v=v, org=protlib)\n\n # separate matrix file download (for now)\n app = 'approved' if approved_only else 'all'\n mat_name = 'rd_ecfp4-{}-{}-{}-int_vect-dice-{}.tsv'.format(protlib, v, app, i_score)\n url = 'http://protinfo.compbio.buffalo.edu/cando/data/v2.2+/matrices/{}'.format(mat_name)\n dl_file(url, './data/v2.2+/matrices/{}'.format(mat_name))\n\n # create CANDO object\n if approved_only:\n cmpd_map_path = 'data/v2.2+/mappings/drugbank-{}-approved.tsv'.format(v)\n matrix_path = 'data/v2.2+/matrices/rd_ecfp4-{}-{}-approved-int_vect-dice-{}.tsv'.format(protlib, v, i_score)\n else:\n cmpd_map_path = 'data/v2.2+/mappings/drugbank-{}.tsv'.format(v)\n matrix_path = 'data/v2.2+/matrices/rd_ecfp4-{}-{}-all-int_vect-dice-{}.tsv'.format(protlib, v, i_score)\n\n ind_map_path = 'data/v2.2+/mappings/drugbank2ctd-{}.tsv'.format(v)\n\n cando = CANDO(cmpd_map_path, ind_map_path, matrix=matrix_path, compound_set=app,\n compute_distance=compute_distance, dist_metric=dist_metric, protein_set=protein_set, ncpus=ncpus)\n\n return cando" ]
[ [ "matplotlib.pyplot.legend", "sklearn.metrics.roc_auc_score", "sklearn.cluster.KMeans", "pandas.DataFrame", "matplotlib.pyplot.plot", "scipy.spatial.distance.squareform", "sklearn.metrics.pairwise_distances_chunked", "pandas.read_csv", "sklearn.ensemble.RandomForestClassifier", "matplotlib.pyplot.figure", "matplotlib.pyplot.ylim", "sklearn.model_selection.train_test_split", "sklearn.metrics.roc_curve", "matplotlib.pyplot.savefig", "scipy.stats.percentileofscore", "scipy.spatial.distance.cdist", "sklearn.svm.SVC", "pandas.DataFrame.from_dict", "matplotlib.pyplot.show", "sklearn.decomposition.PCA", "matplotlib.pyplot.ylabel", "sklearn.metrics.pairwise_distances", "sklearn.linear_model.LogisticRegression", "matplotlib.pyplot.xlim", "sklearn.svm.OneClassSVM", "sklearn.metrics.average_precision_score", "matplotlib.pyplot.xlabel" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [ "0.13", "1.6", "0.14", "1.10", "0.15", "1.4", "1.3", "1.9", "0.19", "1.5", "0.18", "1.2", "1.7", "0.12", "1.0", "0.17", "0.16", "1.8" ], "tensorflow": [] } ]
jterrellSchool21/maya_tg_www
[ "b31047344afe2976969450a2160fd7c90dfc8fdf" ]
[ "train.py" ]
[ "#!/usr/bin/env python3\n# Usage:\n# PYTHONPATH=src ./train --dataset <file|directory|glob>\n\nimport argparse\nimport json\nimport os\nimport numpy as np\nimport tensorflow as tf\nimport time\nimport tqdm\nfrom tensorflow.core.protobuf import rewriter_config_pb2\n\nimport model, sample, encoder\nfrom load_dataset import load_dataset, Sampler\nfrom accumulate import AccumulatingOptimizer\nimport memory_saving_gradients\n\nCHECKPOINT_DIR = 'checkpoint'\nSAMPLE_DIR = 'samples'\n\n\nparser = argparse.ArgumentParser(\n description='Fine-tune GPT-2 on your custom dataset.',\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n\nparser.add_argument('--dataset', metavar='PATH', type=str, required=True, help='Input file, directory, or glob pattern (utf-8 text, or preencoded .npz files).')\nparser.add_argument('--model_name', metavar='MODEL', type=str, default='117M', help='Pretrained model name')\nparser.add_argument('--combine', metavar='CHARS', type=int, default=50000, help='Concatenate input files with <|endoftext|> separator into chunks of this minimum size')\nparser.add_argument('--encoding', type=str, default='utf-8', help='Set the encoding for reading and writing files.')\n\nparser.add_argument('--batch_size', metavar='SIZE', type=int, default=1, help='Batch size')\nparser.add_argument('--learning_rate', metavar='LR', type=float, default=0.00002, help='Learning rate for Adam')\nparser.add_argument('--accumulate_gradients', metavar='N', type=int, default=1, help='Accumulate gradients across N minibatches.')\nparser.add_argument('--memory_saving_gradients', default=False, action='store_true', help='Use gradient checkpointing to reduce vram usage.')\nparser.add_argument('--only_train_transformer_layers', default=False, action='store_true', help='Restrict training to the transformer blocks.')\nparser.add_argument('--optimizer', type=str, default='adam', help='Optimizer. <adam|sgd>.')\nparser.add_argument('--noise', type=float, default=0.0, help='Add noise to input training data to regularize against typos.')\n\nparser.add_argument('--top_k', type=int, default=40, help='K for top-k sampling.')\nparser.add_argument('--top_p', type=float, default=0.0, help='P for top-p sampling. Overrides top_k if set > 0.')\n\nparser.add_argument('--restore_from', type=str, default='latest', help='Either \"latest\", \"fresh\", or a path to a checkpoint file')\nparser.add_argument('--run_name', type=str, default='run1', help='Run id. Name of subdirectory in checkpoint/ and samples/')\nparser.add_argument('--sample_every', metavar='N', type=int, default=100, help='Generate samples every N steps')\nparser.add_argument('--sample_length', metavar='TOKENS', type=int, default=1023, help='Sample this many tokens')\nparser.add_argument('--sample_num', metavar='N', type=int, default=1, help='Generate this many samples')\nparser.add_argument('--save_every', metavar='N', type=int, default=1000, help='Write a checkpoint every N steps')\n\nparser.add_argument('--val_dataset', metavar='PATH', type=str, default=None, help='Dataset for validation loss, defaults to --dataset.')\nparser.add_argument('--val_batch_size', metavar='SIZE', type=int, default=2, help='Batch size for validation.')\nparser.add_argument('--val_batch_count', metavar='N', type=int, default=40, help='Number of batches for validation.')\nparser.add_argument('--val_every', metavar='STEPS', type=int, default=0, help='Calculate validation loss every STEPS steps.')\n\n\ndef maketree(path):\n try:\n os.makedirs(path)\n except:\n pass\n\n\ndef randomize(context, hparams, p):\n if p > 0:\n mask = tf.random.uniform(shape=tf.shape(context)) < p\n noise = tf.random.uniform(shape=tf.shape(context), minval=0, maxval=hparams.n_vocab, dtype=tf.int32)\n return tf.where(mask, noise, context)\n else:\n return context\n\n\ndef main():\n args = parser.parse_args()\n enc = encoder.get_encoder(args.model_name, \"models\")\n hparams = model.default_hparams()\n with open(os.path.join('models', args.model_name, 'hparams.json')) as f:\n hparams.override_from_dict(json.load(f))\n\n if args.sample_length > hparams.n_ctx:\n raise ValueError(\n \"Can't get samples longer than window size: %s\" % hparams.n_ctx)\n\n if args.model_name == '345M':\n args.memory_saving_gradients = True\n if args.optimizer == 'adam':\n args.only_train_transformer_layers = True\n\n config = tf.ConfigProto()\n config.gpu_options.allow_growth = True\n config.graph_options.rewrite_options.layout_optimizer = rewriter_config_pb2.RewriterConfig.OFF\n with tf.Session(config=config) as sess:\n context = tf.placeholder(tf.int32, [args.batch_size, None])\n context_in = randomize(context, hparams, args.noise)\n output = model.model(hparams=hparams, X=context_in)\n loss = tf.reduce_mean(\n tf.nn.sparse_softmax_cross_entropy_with_logits(\n labels=context[:, 1:], logits=output['logits'][:, :-1]))\n\n if args.val_every > 0:\n val_context = tf.placeholder(tf.int32, [args.val_batch_size, None])\n val_output = model.model(hparams=hparams, X=val_context)\n val_loss = tf.reduce_mean(\n tf.nn.sparse_softmax_cross_entropy_with_logits(\n labels=val_context[:, 1:], logits=val_output['logits'][:, :-1]))\n val_loss_summary = tf.summary.scalar('val_loss', val_loss)\n\n\n tf_sample = sample.sample_sequence(\n hparams=hparams,\n length=args.sample_length,\n context=context,\n batch_size=args.batch_size,\n temperature=1.0,\n top_k=args.top_k,\n top_p=args.top_p)\n\n all_vars = [v for v in tf.trainable_variables() if 'model' in v.name]\n train_vars = [v for v in all_vars if '/h' in v.name] if args.only_train_transformer_layers else all_vars\n\n if args.optimizer == 'adam':\n opt = tf.train.AdamOptimizer(learning_rate=args.learning_rate)\n elif args.optimizer == 'sgd':\n opt = tf.train.GradientDescentOptimizer(learning_rate=args.learning_rate)\n else:\n exit('Bad optimizer:', args.optimizer)\n\n if args.accumulate_gradients > 1:\n if args.memory_saving_gradients:\n exit(\"Memory saving gradients are not implemented for gradient accumulation yet.\")\n opt = AccumulatingOptimizer(\n opt=opt,\n var_list=train_vars)\n opt_reset = opt.reset()\n opt_compute = opt.compute_gradients(loss)\n opt_apply = opt.apply_gradients()\n summary_loss = tf.summary.scalar('loss', opt_apply)\n else:\n if args.memory_saving_gradients:\n opt_grads = memory_saving_gradients.gradients(loss, train_vars)\n else:\n opt_grads = tf.gradients(loss, train_vars)\n opt_grads = list(zip(opt_grads, train_vars))\n opt_apply = opt.apply_gradients(opt_grads)\n summary_loss = tf.summary.scalar('loss', loss)\n\n summary_lr = tf.summary.scalar('learning_rate', args.learning_rate)\n summaries = tf.summary.merge([summary_lr, summary_loss])\n\n summary_log = tf.summary.FileWriter(\n os.path.join(CHECKPOINT_DIR, args.run_name))\n\n saver = tf.train.Saver(\n var_list=all_vars,\n max_to_keep=5,\n keep_checkpoint_every_n_hours=2)\n sess.run(tf.global_variables_initializer())\n\n if args.restore_from == 'latest':\n ckpt = tf.train.latest_checkpoint(\n os.path.join(CHECKPOINT_DIR, args.run_name))\n if ckpt is None:\n # Get fresh GPT weights if new run.\n ckpt = tf.train.latest_checkpoint(\n os.path.join('models', args.model_name))\n elif args.restore_from == 'fresh':\n ckpt = tf.train.latest_checkpoint(\n os.path.join('models', args.model_name))\n else:\n ckpt = tf.train.latest_checkpoint(args.restore_from)\n print('Loading checkpoint', ckpt)\n saver.restore(sess, ckpt)\n\n print('Loading dataset...')\n chunks = load_dataset(enc, args.dataset, args.combine, encoding=args.encoding)\n print(f\"Count of chunks -> {len(chunks)}\")\n data_sampler = Sampler(chunks)\n if args.val_every > 0:\n if args.val_dataset:\n val_chunks = load_dataset(enc, args.val_dataset, args.combine, encoding=args.encoding)\n else:\n val_chunks = chunks\n print('dataset has', data_sampler.total_size, 'tokens')\n print('Training...')\n\n if args.val_every > 0:\n # Sample from validation set once with fixed seed to make\n # it deterministic during training as well as across runs.\n val_data_sampler = Sampler(val_chunks, seed=1)\n val_batches = [[val_data_sampler.sample(1024) for _ in range(args.val_batch_size)]\n for _ in range(args.val_batch_count)]\n\n counter = 1\n counter_path = os.path.join(CHECKPOINT_DIR, args.run_name, 'counter')\n if os.path.exists(counter_path):\n # Load the step number if we're resuming a run\n # Add 1 so we don't immediately try to save again\n with open(counter_path, 'r') as fp:\n counter = int(fp.read()) + 1\n\n def save():\n maketree(os.path.join(CHECKPOINT_DIR, args.run_name))\n print(\n 'Saving',\n os.path.join(CHECKPOINT_DIR, args.run_name,\n 'model-{}').format(counter))\n saver.save(\n sess,\n os.path.join(CHECKPOINT_DIR, args.run_name, 'model'),\n global_step=counter)\n with open(counter_path, 'w') as fp:\n fp.write(str(counter) + '\\n')\n\n def generate_samples():\n print('Generating samples...')\n context_tokens = data_sampler.sample(1)\n all_text = []\n index = 0\n while index < args.sample_num:\n out = sess.run(\n tf_sample,\n feed_dict={context: args.batch_size * [context_tokens]})\n for i in range(min(args.sample_num - index, args.batch_size)):\n text = enc.decode(out[i])\n text = '======== SAMPLE {} ========\\n{}\\n'.format(\n index + 1, text)\n all_text.append(text)\n index += 1\n print(text)\n maketree(os.path.join(SAMPLE_DIR, args.run_name))\n with open(\n os.path.join(SAMPLE_DIR, args.run_name,\n 'samples-{}').format(counter), 'w', encoding=args.encoding) as fp:\n fp.write('\\n'.join(all_text))\n\n def validation():\n print('Calculating validation loss...')\n losses = []\n for batch in tqdm.tqdm(val_batches):\n losses.append(sess.run(val_loss, feed_dict={val_context: batch}))\n v_val_loss = np.mean(losses)\n v_summary = sess.run(val_loss_summary, feed_dict={val_loss: v_val_loss})\n summary_log.add_summary(v_summary, counter)\n summary_log.flush()\n print(\n '[{counter} | {time:2.2f}] validation loss = {loss:2.2f}'\n .format(\n counter=counter,\n time=time.time() - start_time,\n loss=v_val_loss))\n\n def sample_batch():\n return [data_sampler.sample(1024) for _ in range(args.batch_size)]\n\n\n avg_loss = (0.0, 0.0)\n start_time = time.time()\n\n try:\n while True:\n if counter % args.save_every == 0:\n save()\n if counter % args.sample_every == 0:\n generate_samples()\n if args.val_every > 0 and (counter % args.val_every == 0 or counter == 1):\n validation()\n\n if args.accumulate_gradients > 1:\n sess.run(opt_reset)\n for _ in range(args.accumulate_gradients):\n sess.run(\n opt_compute, feed_dict={context: sample_batch()})\n (v_loss, v_summary) = sess.run((opt_apply, summaries))\n else:\n (_, v_loss, v_summary) = sess.run(\n (opt_apply, loss, summaries),\n feed_dict={context: sample_batch()})\n\n summary_log.add_summary(v_summary, counter)\n\n avg_loss = (avg_loss[0] * 0.99 + v_loss,\n avg_loss[1] * 0.99 + 1.0)\n\n print(\n '[{counter} | {time:2.2f}] loss={loss:2.2f} avg={avg:2.2f}'\n .format(\n counter=counter,\n time=time.time() - start_time,\n loss=v_loss,\n avg=avg_loss[0] / avg_loss[1]))\n\n counter += 1\n except KeyboardInterrupt:\n print('interrupted')\n save()\n\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "tensorflow.train.latest_checkpoint", "tensorflow.shape", "tensorflow.gradients", "tensorflow.placeholder", "tensorflow.trainable_variables", "tensorflow.ConfigProto", "tensorflow.global_variables_initializer", "tensorflow.train.GradientDescentOptimizer", "tensorflow.nn.sparse_softmax_cross_entropy_with_logits", "numpy.mean", "tensorflow.Session", "tensorflow.where", "tensorflow.train.Saver", "tensorflow.train.AdamOptimizer", "tensorflow.summary.scalar", "tensorflow.summary.merge" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] } ]
lucifer2288/agents
[ "63a8ea8ea9095cb9ab9f7c9fcf3aa2f9ac5fa280" ]
[ "tf_agents/distributions/utils.py" ]
[ "# coding=utf-8\n# Copyright 2018 The TF-Agents Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Utilities related to distributions.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\n# Using Type Annotations.\nfrom __future__ import print_function\n\nimport inspect\nfrom typing import Any, Mapping, Type, Text\n\nimport tensorflow as tf\nimport tensorflow_probability as tfp\n\nfrom tf_agents.distributions import tanh_bijector_stable\nfrom tf_agents.typing import types\nfrom tf_agents.utils import common\nfrom tf_agents.utils import nest_utils\n\n\ndef scale_distribution_to_spec(distribution, spec):\n \"\"\"Scales the given distribution to the bounds of the given spec.\"\"\"\n return SquashToSpecNormal(distribution, spec)\n\n\nclass SquashToSpecNormal(tfp.distributions.Distribution):\n \"\"\"Scales an input normalized action distribution to match spec bounds.\n\n Unlike the normal distribution computed when NormalProjectionNetwork\n is called with scale_distribution=False, which merely squashes the mean\n of the distribution to within the action spec, this distribution scales the\n output distribution to ensure that the output action fits within the spec.\n\n This distribution also maintains the input normal distribution, and uses this\n distribution to compute the KL-divergence between two SquashToSpecNormal\n distributions provided that they were scaled by the same action spec.\n This is possible as KL divergence is invariant when both distributions are\n transformed using the same invertible function.\n\n Formally, let a be the action magnitude and b be the action mean. The\n squashing operation performs the following change of variables to the\n input distribution X:\n\n Y = a * tanh(X) + b\n\n Note that this is a change of variables as the function is invertible, with:\n\n X = tan((Y - b) / a), where Y in (b - a, b + a)\n \"\"\"\n\n def __init__(self,\n distribution,\n spec,\n validate_args=False,\n name=\"SquashToSpecNormal\"):\n \"\"\"Constructs a SquashToSpecNormal distribution.\n\n Args:\n distribution: input normal distribution with normalized mean and std dev\n spec: bounded action spec from which to compute action ranges\n validate_args: Python `bool`, default `False`. When `True` distribution\n parameters are checked for validity despite possibly degrading runtime\n performance. When `False` invalid inputs may silently render incorrect\n outputs.\n name: Python `str` name prefixed to Ops created by this class.\n \"\"\"\n\n if not isinstance(\n distribution,\n (tfp.distributions.Normal, tfp.distributions.MultivariateNormalDiag)):\n raise ValueError(\"Input distribution must be a normal distribution, \"\n \"got {} instead\".format(distribution))\n self.action_means, self.action_magnitudes = common.spec_means_and_magnitudes(\n spec)\n # Parameters here describe the actor network's output, which is a normalized\n # distribution prior to squashing to the action spec.\n # This is necessary (and sufficient) in order for policy info to compare an\n # old policy to a new policy.\n parameters = {\"loc\": distribution.loc, \"scale\": distribution.scale}\n # The raw action distribution\n self.input_distribution = distribution\n\n bijectors = [\n tfp.bijectors.Shift(self.action_means)(\n tfp.bijectors.Scale(self.action_magnitudes)),\n tanh_bijector_stable.Tanh()\n ]\n bijector_chain = tfp.bijectors.Chain(bijectors)\n self._squashed_distribution = tfp.distributions.TransformedDistribution(\n distribution=distribution, bijector=bijector_chain)\n super(SquashToSpecNormal, self).__init__(\n dtype=distribution.dtype,\n reparameterization_type=distribution.reparameterization_type,\n validate_args=validate_args,\n allow_nan_stats=distribution.allow_nan_stats,\n parameters=parameters,\n # We let TransformedDistribution access _graph_parents since this class\n # is more like a baseclass than derived.\n graph_parents=(\n distribution._graph_parents + # pylint: disable=protected-access\n bijector_chain.graph_parents),\n name=name)\n\n def kl_divergence(self, other, name=\"kl_divergence\"):\n \"\"\"Computes the KL Divergence between two SquashToSpecNormal distributions.\"\"\"\n if not isinstance(other, SquashToSpecNormal):\n raise ValueError(\"other distribution should be of type \"\n \"SquashToSpecNormal, got {}\".format(other))\n if (tf.reduce_any(tf.not_equal(self.action_means, other.action_means)) or\n tf.reduce_any(\n tf.not_equal(self.action_magnitudes, other.action_magnitudes))):\n raise ValueError(\"Other distribution does not have same action mean \"\n \"and magnitude. This mean {}, this magnitude {}, \"\n \"other mean {}, other magnitude {}.\".format(\n self.action_means, self.action_magnitudes,\n other.action_means, other.action_magnitudes))\n return self.input_distribution.kl_divergence(other.input_distribution, name)\n\n def sample(self, sample_shape=(), seed=None, name=\"sample\"):\n \"\"\"Generates samples from the wrapped TransformedDistribution.\"\"\"\n return self._squashed_distribution.sample(sample_shape, seed, name)\n\n def log_prob(self, value, name=\"log_prob\"):\n \"\"\"Computes log probability from the wrapped TransformedDistribution.\"\"\"\n return self._squashed_distribution.log_prob(value, name)\n\n def prob(self, value, name=\"prob\"):\n \"\"\"Computes probability from the wrapped TransformedDistribution.\"\"\"\n return self._squashed_distribution.prob(value, name)\n\n def stddev(self, name=\"stddev\"):\n \"\"\"Compute stddev of the SquashToSpecNormal distribution.\"\"\"\n stddev = self.action_magnitudes * tf.tanh(self.input_distribution.stddev())\n return stddev\n\n def mode(self, name=\"mode\"):\n \"\"\"Compute mean of the SquashToSpecNormal distribution.\"\"\"\n mean = self.action_magnitudes * tf.tanh(self.input_distribution.mode()) + \\\n self.action_means\n return mean\n\n def mean(self, name=\"mean\", **kwargs):\n \"\"\"Compute mean of the SquashToSpecNormal distribution.\"\"\"\n return self.mode(name)\n\n def event_shape_tensor(self, name=\"event_shape_tensor\"):\n \"\"\"Compute event shape tensor of the SquashToSpecNormal distribution.\"\"\"\n return self._squashed_distribution.event_shape_tensor(name)\n\n def batch_shape_tensor(self, name=\"batch_shape_tensor\"):\n \"\"\"Compute event shape tensor of the SquashToSpecNormal distribution.\"\"\"\n return self._squashed_distribution.batch_shape_tensor(name)\n\n\nclass Params(object):\n \"\"\"The (recursive) parameters of objects exposing the `parameters` property.\n\n This includes TFP `Distribution`, `Bijector`, and TF `LinearOperator`.\n\n `Params` objects are created with\n `tf_agents.distributions.utils.get_parameters`;\n `Params` can be converted back to original objects via\n `tf_agents.distributions.utils.make_from_parameters`.\n\n In-place edits of fields are allowed, and will not modify the original\n objects (with the exception of, e.g., reference objects like `tf.Variable`\n being modified in-place).\n\n The components of a `Params` object are: `type_` and `params`.\n\n - `type_` is the type of object.\n - `params` is a `dict` of the (non-default) non-tensor arguments passed to the\n object's `__init__`; and includes nests of Python objects, as well as other\n `Params` values representing \"Param-representable\" objects passed to init.\n\n A non-trivial example:\n\n ```python\n scale_matrix = tf.Variable([[1.0, 2.0], [-1.0, 0.0]])\n d = tfp.distributions.MultivariateNormalDiag(\n loc=[1.0, 1.0], scale_diag=[2.0, 3.0], validate_args=True)\n b = tfp.bijectors.ScaleMatvecLinearOperator(\n scale=tf.linalg.LinearOperatorFullMatrix(matrix=scale_matrix),\n adjoint=True)\n b_d = b(d)\n p = utils.get_parameters(b_d)\n ```\n\n Then `p` is:\n\n ```python\n Params(\n tfp.distributions.TransformedDistribution,\n params={\n \"bijector\": Params(\n tfp.bijectors.ScaleMatvecLinearOperator,\n params={\"adjoint\": True,\n \"scale\": Params(\n tf.linalg.LinearOperatorFullMatrix,\n params={\"matrix\": scale_matrix})}),\n \"distribution\": Params(\n tfp.distributions.MultivariateNormalDiag,\n params={\"validate_args\": True,\n \"scale_diag\": [2.0, 3.0],\n \"loc\": [1.0, 1.0]})})\n ```\n\n This structure can be manipulated and/or converted back to a `Distribution`\n instance via `make_from_parameters`:\n\n ```python\n p.params[\"distribution\"].params[\"loc\"] = [0.0, 0.0]\n\n # The distribution `new_b_d` will be a MVN centered on `(0, 0)` passed through\n # the `ScaleMatvecLinearOperator` bijector.\n new_b_d = utils.make_from_parameters(p)\n ```\n \"\"\"\n type_: Type[Any] # Any class that has a .parameters.\n params: Mapping[Text, Any]\n\n def __str__(self):\n return \"<Params: type={}, params={}>\".format(self.type_, self.params)\n\n def __repr__(self):\n return str(self)\n\n def __eq__(self, other):\n return (isinstance(self, type(other))\n and self.type_ == other.type_\n and self.params == other.params)\n\n def __init__(self, type_, params):\n self.type_ = type_\n self.params = params\n\n\ndef get_parameters(value: Any) -> Params:\n \"\"\"Creates a recursive `Params` object from `value`.\n\n The `Params` object can be converted back to an object of type `type(value)`\n with `make_from_parameters`. For more details, see the docstring of\n `Params`.\n\n Args:\n value: Typically a user provides `tfp.Distribution`, `tfp.Bijector`, or\n `tf.linalg.LinearOperator`, but this can be any Python object.\n\n Returns:\n An instance of `Params`.\n\n Raises:\n TypeError: If `value.parameters` exists, is not `None`, but but is also not\n a `Mapping` (e.g. a `dict`).\n \"\"\"\n parameters = getattr(value, \"parameters\", None)\n if not isinstance(parameters, Mapping):\n raise TypeError(\n \"value.parameters is not available or is not a dict; \"\n \"value: {}; parameters: {}\".format(value, parameters))\n type_ = type(value)\n params = {}\n\n def process_parameter(p):\n if getattr(p, \"parameters\", None) is not None:\n return get_parameters(p)\n else:\n return p\n\n if getattr(value, \"parameters\"):\n default_values = inspect.signature(type_).parameters.items()\n default_values = {\n k: v.default\n for (k, v) in default_values\n if v.default is not inspect.Parameter.empty\n }\n params = {\n k: tf.nest.map_structure(process_parameter, v)\n for k, v in value.parameters.items()\n if v is not default_values.get(k, None)\n }\n return Params(type(value), params)\n\n return value\n\n\ndef make_from_parameters(value: Params) -> Any:\n \"\"\"Creates an instance of type `value.type_` with the parameters in `value`.\n\n For more details, see the docstrings for `get_parameters` and `Params`.\n\n This function may raise strange errors if `value` is a `Params` created from\n a badly constructed object (one which does not set `self._parameters`\n properly). For example:\n\n ```python\n class MyBadlyConstructedDistribution(tfp.distributions.Categorical):\n def __init__(self, extra_arg, **kwargs):\n super().__init__(**kwargs)\n self._extra_arg = extra_arg\n\n ...\n ```\n\n To fix this, make sure `self._parameters` are properly set:\n\n ```python\n class MyProperlyConstructedDistribution(tfp.distributions.Categorical):\n def __init__(self, extra_arg, **kwargs):\n super().__init__(**kwargs)\n # Ensure all arguments to `__init__` are in `self._parameters`.\n self._parameters = dict(extra_arg=extra_arg, **kwargs)\n self._extra_arg = extra_arg\n\n ...\n ```\n\n Args:\n value: A `Params` object; the output of `get_parameters` (or a\n modified version thereof).\n\n Returns:\n An instance of `value.type_`.\n\n Raises:\n Exception: If `value` is a `Params` object and the initializer of\n `value.type_` does not recognize accept the args structure given in\n `value.params`. This can happen if, e.g., `value.type_.__init__` does not\n properly set `self._parameters` or `self.parameters` to match the\n arguments it expects.\n \"\"\"\n def make_from_params_or_identity(v_):\n return make_from_parameters(v_) if isinstance(v_, Params) else v_\n\n params = {\n k: tf.nest.map_structure(make_from_params_or_identity, v)\n for k, v in value.params.items()\n }\n return value.type_(**params)\n\n\ndef parameters_to_dict(value: Params) -> Mapping[Text, Any]:\n \"\"\"Converts `value` to a nested `dict` (excluding all `type_` info).\n\n Sub-dicts represent `Params` objects; keys represent flattened nest structures\n in `value.params`.\n\n Example:\n\n ```python\n scale_matrix = tf.Variable([[1.0, 2.0], [-1.0, 0.0]])\n d = tfp.distributions.MultivariateNormalDiag(\n loc=[1.0, 1.0], scale_diag=[2.0, 3.0], validate_args=True)\n b = tfp.bijectors.ScaleMatvecLinearOperator(\n scale=tf.linalg.LinearOperatorFullMatrix(matrix=scale_matrix),\n adjoint=True)\n b_d = b(d)\n p = utils.get_parameters(b_d)\n params_dict = utils.parameters_to_dict(p)\n ```\n\n results in the nested dictionary:\n\n ```python\n {\"bijector\": {\"adjoint\": True,\n \"scale\": {\"matrix\": scale_matrix}},\n \"distribution\": {\"validate_args\": True,\n # These are deeply nested because we passed lists\n # intead of numpy arrays for `loc` and `scale_diag`.\n \"scale_diag:0\": 2.0,\n \"scale_diag:1\": 3.0,\n \"loc:0\": 1.0,\n \"loc:1\": 1.0}\n }\n ```\n\n The dictionary may then be modified or updated (e.g., in place), and converted\n back to a `Params` object using `merge_to_parameters_from_dict`.\n\n Args:\n value: The (possibly recursively defined) `Params`.\n\n Returns:\n A `dict` mapping `value.params` to flattened key/value pairs. Any\n sub-`Params` objects become nested dicts.\n \"\"\"\n convert = lambda p: parameters_to_dict(p) if isinstance(p, Params) else p\n\n output_entries = {}\n for k, v in value.params.items():\n if tf.nest.is_nested(v):\n flattened_params = nest_utils.flatten_with_joined_paths(v)\n for (param_k, param_v) in flattened_params:\n key = \"{}:{}\".format(k, param_k)\n output_entries[key] = convert(param_v)\n else:\n output_entries[k] = convert(v)\n return output_entries\n\n\ndef merge_to_parameters_from_dict(\n value: Params, params_dict: Mapping[Text, Any]) -> Params:\n \"\"\"Merges dict matching data of `parameters_to_dict(value)` to a new `Params`.\n\n For more details, see the example below and the documentation of\n `parameters_to_dict`.\n\n Example:\n\n ```python\n scale_matrix = tf.Variable([[1.0, 2.0], [-1.0, 0.0]])\n d = tfp.distributions.MultivariateNormalDiag(\n loc=[1.0, 1.0], scale_diag=[2.0, 3.0], validate_args=True)\n b = tfp.bijectors.ScaleMatvecLinearOperator(\n scale=tf.linalg.LinearOperatorFullMatrix(matrix=scale_matrix),\n adjoint=True)\n b_d = b(d)\n p = utils.get_parameters(b_d)\n\n params_dict = utils.parameters_to_dict(p)\n params_dict[\"bijector\"][\"scale\"][\"matrix\"] = new_scale_matrix\n\n new_params = utils.merge_to_parameters_from_dict(\n p, params_dict)\n\n # new_d is a `ScaleMatvecLinearOperator()(MultivariateNormalDiag)` with\n # a new scale matrix.\n new_d = utils.make_from_parameters(new_params)\n ```\n\n Args:\n value: A `Params` from which `params_dict` was derived.\n params_dict: A nested `dict` created by e.g. calling\n `parameters_to_dict(value)` and modifying it to modify parameters.\n **NOTE** If any keys in the dict are missing, the \"default\" value in\n `value` is used instead.\n\n Returns:\n A new `Params` object which can then be turned into e.g. a\n `tfp.Distribution` via `make_from_parameters`.\n\n Raises:\n ValueError: If `params_dict` has keys missing from `value.params`.\n KeyError: If a subdict entry is missing for a nested value in\n `value.params`.\n \"\"\"\n\n new_params = {}\n\n processed_params = set()\n for k, v in value.params.items():\n # pylint: disable=cell-var-from-loop\n def convert(params_k, p):\n if params_k is not None:\n params_key = \"{}:{}\".format(k, params_k)\n params_dict_value = params_dict.get(params_key, None)\n if params_dict_value is None:\n raise KeyError(\"Missing a required nested element from \"\n \"params_dict.keys: '{}'. params_dict.keys: {}\"\n .format(params_key, sorted(params_dict.keys())))\n else:\n params_key = k\n params_dict_value = params_dict.get(k, None)\n processed_params.add(params_key)\n if isinstance(p, Params):\n return merge_to_parameters_from_dict(p, params_dict_value)\n else:\n return params_dict_value if params_dict_value is not None else p\n # pylint: enable=cell-var-from-loop\n\n if tf.nest.is_nested(v):\n new_params[k] = nest_utils.map_structure_with_paths(convert, v)\n else:\n new_params[k] = convert(None, v)\n\n unvisited_params_keys = set(params_dict) - processed_params\n if unvisited_params_keys:\n raise ValueError(\n \"params_dict had keys that were not part of value.params. \"\n \"params_dict keys: {}, value.params processed keys: {}\".format(\n sorted(params_dict.keys()), sorted(processed_params)))\n\n return Params(type_=value.type_, params=new_params)\n\n\ndef _check_no_tensors(parameters: Params):\n flat_params = tf.nest.flatten(parameters.params)\n for p in flat_params:\n if isinstance(p, Params):\n _check_no_tensors(p)\n if tf.is_tensor(p):\n raise TypeError(\n \"Saw a `Tensor` value in parameters:\\n {}\".format(parameters))\n\n\nclass DistributionSpecV2(object):\n \"\"\"Describes a tfp.distribution.Distribution using nested parameters.\"\"\"\n\n def __init__(self,\n event_shape: tf.TensorShape,\n dtype: tf.DType,\n parameters: Params):\n \"\"\"Construct a `DistributionSpecV2` from a Distribution's properties.\n\n Note that the `parameters` used to create the spec should contain\n `tf.TypeSpec` objects instead of tensors. We check for this.\n\n Args:\n event_shape: The distribution's `event_shape`. This is the shape that\n `distribution.sample()` returns. `distribution.sample(sample_shape)`\n returns tensors of shape `sample_shape + event_shape`.\n dtype: The distribution's `dtype`.\n parameters: The recursive parameters of the distribution, with\n tensors having directly been converted to `tf.TypeSpec` objects.\n\n Raises:\n TypeError: If for any entry `x` in `parameters`: `tf.is_tensor(x)`.\n \"\"\"\n _check_no_tensors(parameters)\n self._event_shape = event_shape\n self._dtype = dtype\n self._parameters = parameters\n self._event_spec = tf.TensorSpec(shape=event_shape, dtype=dtype)\n\n @property\n def event_shape(self) -> tf.TensorShape:\n return self._event_shape\n\n @property\n def dtype(self) -> tf.DType:\n return self._dtype\n\n @property\n def event_spec(self) -> tf.TensorSpec:\n return self._event_spec\n\n @property\n def parameters(self) -> Params:\n return self._parameters\n\n def __eq__(self, other):\n return (isinstance(self, type(other))\n and self._event_shape == other._event_shape\n and self._dtype == other._dtype\n and self._parameters == other._parameters)\n\n def __str__(self):\n return (\"<DistributionSpecV2: event_shape={}, dtype={}, parameters={}>\"\n .format(self.event_shape, self.dtype, self.parameters))\n\n def __repr__(self):\n return str(self)\n\n\ndef assert_specs_are_compatible(\n network_output_spec: types.NestedTensorSpec,\n spec: types.NestedTensorSpec,\n message_prefix: str):\n \"\"\"Checks that the output of `network.create_variables` matches a spec.\n\n Args:\n network_output_spec: The output of `network.create_variables`.\n spec: The spec we are matching to.\n message_prefix: The message prefix for error messages, used when the specs\n don't match.\n\n Raises:\n ValueError: If the specs don't match.\n \"\"\"\n def to_event(s):\n return s.event_spec if isinstance(s, DistributionSpecV2) else s\n\n event_spec = tf.nest.map_structure(to_event, network_output_spec)\n\n nest_utils.assert_same_structure(\n event_spec,\n spec,\n message=(\"{}:\\n{}\\nvs.\\n{}\".format(message_prefix, event_spec, spec)))\n\n def compare_output_to_spec(s1, s2):\n if not s1.is_compatible_with(s2):\n raise ValueError(\"{}:\\n{}\\nvs.\\n{}\".format(message_prefix, event_spec,\n spec))\n\n tf.nest.map_structure(compare_output_to_spec, event_spec, spec)\n" ]
[ [ "tensorflow.not_equal", "tensorflow.is_tensor", "tensorflow.nest.flatten", "tensorflow.TensorSpec", "tensorflow.nest.map_structure", "tensorflow.nest.is_nested" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]