repo_name
stringlengths 6
130
| hexsha
sequence | file_path
sequence | code
sequence | apis
sequence | possible_versions
list |
---|---|---|---|---|---|
zoeyuchao/onpolicy-release | [
"c2cb64e59c5b1f21cce022db76c378b396fd480e"
] | [
"onpolicy/envs/mpe/scenarios/simple_push.py"
] | [
"import numpy as np\nfrom onpolicy.envs.mpe.core import World, Agent, Landmark\nfrom onpolicy.envs.mpe.scenario import BaseScenario\nimport random\n\n#\n# # the non-ensemble version of <ensemble_push>\n#\n#\n\nclass Scenario(BaseScenario):\n def make_world(self, args):\n world = World()\n world.world_length = args.episode_length\n # set any world properties first\n world.dim_c = 2\n num_agents = args.num_agents#2\n num_adversaries = 1\n num_landmarks = args.num_landmarks#2\n # add agents\n world.agents = [Agent() for i in range(num_agents)]\n for i, agent in enumerate(world.agents):\n agent.name = 'agent %d' % i\n agent.collide = True\n agent.silent = True\n if i < num_adversaries:\n agent.adversary = True\n else:\n agent.adversary = False\n # agent.u_noise = 1e-1\n # agent.c_noise = 1e-1\n # add landmarks\n world.landmarks = [Landmark() for i in range(num_landmarks)]\n for i, landmark in enumerate(world.landmarks):\n landmark.name = 'landmark %d' % i\n landmark.collide = False\n landmark.movable = False\n # make initial conditions\n self.reset_world(world)\n return world\n\n def reset_world(self, world):\n # random properties for landmarks\n for i, landmark in enumerate(world.landmarks):\n landmark.color = np.array([0.1, 0.1, 0.1])\n landmark.color[i + 1] += 0.8\n landmark.index = i\n # set goal landmark\n goal = np.random.choice(world.landmarks)\n for i, agent in enumerate(world.agents):\n agent.goal_a = goal\n agent.color = np.array([0.25, 0.25, 0.25])\n if agent.adversary:\n agent.color = np.array([0.75, 0.25, 0.25])\n else:\n j = goal.index\n agent.color[j + 1] += 0.5\n # set random initial states\n for agent in world.agents:\n agent.state.p_pos = np.random.uniform(-1, +1, world.dim_p)\n agent.state.p_vel = np.zeros(world.dim_p)\n agent.state.c = np.zeros(world.dim_c)\n for i, landmark in enumerate(world.landmarks):\n landmark.state.p_pos = 0.8 * np.random.uniform(-1, +1, world.dim_p)\n landmark.state.p_vel = np.zeros(world.dim_p)\n\n def reward(self, agent, world):\n # Agents are rewarded based on minimum agent distance to each landmark\n return self.adversary_reward(agent, world) if agent.adversary else self.agent_reward(agent, world)\n\n def agent_reward(self, agent, world):\n # the distance to the goal\n return -np.sqrt(np.sum(np.square(agent.state.p_pos - agent.goal_a.state.p_pos)))\n\n def adversary_reward(self, agent, world):\n # keep the nearest good agents away from the goal\n agent_dist = [np.sqrt(np.sum(np.square(a.state.p_pos - a.goal_a.state.p_pos))) for a in world.agents if not a.adversary]\n pos_rew = min(agent_dist)\n #nearest_agent = world.good_agents[np.argmin(agent_dist)]\n #neg_rew = np.sqrt(np.sum(np.square(nearest_agent.state.p_pos - agent.state.p_pos)))\n neg_rew = np.sqrt(np.sum(np.square(agent.goal_a.state.p_pos - agent.state.p_pos)))\n #neg_rew = sum([np.sqrt(np.sum(np.square(a.state.p_pos - agent.state.p_pos))) for a in world.good_agents])\n return pos_rew - neg_rew\n \n def observation(self, agent, world):\n # get positions of all entities in this agent's reference frame\n entity_pos = []\n for entity in world.landmarks: # world.entities:\n entity_pos.append(entity.state.p_pos - agent.state.p_pos)\n # entity colors\n entity_color = []\n for entity in world.landmarks: # world.entities:\n entity_color.append(entity.color)\n # communication of all other agents\n comm = []\n other_pos = []\n for other in world.agents:\n if other is agent: continue\n comm.append(other.state.c)\n other_pos.append(other.state.p_pos - agent.state.p_pos)\n if not agent.adversary:\n return np.concatenate([agent.state.p_vel] + [agent.goal_a.state.p_pos - agent.state.p_pos] + [agent.color] + entity_pos + entity_color + other_pos)\n else:\n #other_pos = list(reversed(other_pos)) if random.uniform(0,1) > 0.5 else other_pos # randomize position of other agents in adversary network\n return np.concatenate([agent.state.p_vel] + entity_pos + other_pos)\n"
] | [
[
"numpy.square",
"numpy.random.choice",
"numpy.concatenate",
"numpy.random.uniform",
"numpy.array",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ael-noblegas/pychron | [
"1a81e05d9fba43b797f335ceff6837c016633bcf",
"1a81e05d9fba43b797f335ceff6837c016633bcf"
] | [
"pychron/core/ui/qt/color_map_bar_editor.py",
"pychron/mv/focus/autofocus_manager.py"
] | [
"# ===============================================================================\n# Copyright 2012 Jake Ross\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ===============================================================================\n\n# ============= enthought library imports =======================\nfrom __future__ import absolute_import\nfrom chaco.data_range_1d import DataRange1D\nfrom chaco.default_colormaps import color_map_dict, color_map_name_dict\nfrom pyface.qt.QtGui import QPainter, QColor, QFrame\nfrom traits.api import Float, Int, Str\nfrom traitsui.basic_editor_factory import BasicEditorFactory\nfrom traitsui.qt4.editor import Editor\nfrom numpy import array\n\n# ============= local library imports ==========================\n# from matplotlib.cm import get_cmap\n\n\nclass Bar(QFrame):\n value = None\n low = 0\n high = 1\n color_scalar = 1\n colormap = 'jet'\n bar_width = 100\n scale = 'power'\n\n # def __init__(self, parent, ident=-1):\n # super(Bar, self).__init__()\n # self._cmap = get_cmap(self.colormap)\n\n def paintEvent(self, e):\n qp = QPainter()\n qp.begin(self)\n qp.setBrush(QColor(*self.value))\n qp.drawRect(0, 0, self.bar_width, 20)\n qp.end()\n\n def set_value(self, v):\n \"\"\"\n map v to users color scale\n use power law v=A*x**(1/cs)\n increase cs increases the rate of change at low values\n increase cs will make it easier to see small pertubations (more color change) at\n the low end.\n\n \"\"\"\n if self.scale == 'power':\n N = 1 / float(self.color_scalar)\n A = 1 / self.high ** N\n nv = A * v ** N\n else:\n nv = min(1, max(0, (v - self.low) / (self.high - self.low)))\n\n vs = self.cmap.map_screen(array([nv,]))[0][:3]\n self.value = [x * 255 for x in vs]\n self.update()\n\n\nclass _BarGaugeEditor(Editor):\n def init(self, parent):\n self.control = Bar()\n self.control.low = low = self.factory.low\n self.control.high = high = self.factory.high\n self.control.color_scalar = self.factory.color_scalar\n self.control.bar_width = self.factory.width\n self.control.scale = self.factory.scale\n\n # if self.factory.scale == 'power':\n # high = N = 1 / float(self.color_scalar)\n # A = 1 / self.high ** N\n self.control.cmap = color_map_name_dict[self.factory.colormap](DataRange1D(low_setting=0, high_setting=1))\n\n def update_editor(self):\n if self.control:\n self.control.set_value(self.value)\n\n\nclass BarGaugeEditor(BasicEditorFactory):\n klass = _BarGaugeEditor\n low = Float\n high = Float\n color_scalar = Int(1)\n scale = Str('power')\n colormap = Str('jet')\n width = Int(100)\n\n# ============= EOF =============================================\n",
"# ===============================================================================\n# Copyright 2011 Jake Ross\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ===============================================================================\n\n# =============enthought library imports=======================\nfrom __future__ import absolute_import\nfrom __future__ import print_function\nimport six.moves.cPickle as pickle\n\nfrom traits.api import Bool, Any, Instance, Button, Property, Event, on_trait_change\nfrom traitsui.api import View, Item, Handler, HGroup\n\n# ============= standard library imports ========================\n# from threading import Thread\nfrom threading import Event as TEvent\nfrom numpy import linspace, argmin, argmax, random, asarray\nimport time\nimport os\n# ============= local library imports ==========================\nfrom pychron.core.time_series.time_series import smooth\nfrom pychron.image.cv_wrapper import grayspace, crop, get_focus_measure\n# from pychron.image.cvwrapper import grayspace, get_focus_measure, crop, resize\nfrom scipy.ndimage.measurements import variance\nfrom scipy.ndimage.filters import generic_gradient_magnitude, sobel\nfrom scipy.ndimage import sum as ndsum\nfrom pychron.paths import paths\nfrom pychron.managers.manager import Manager\nfrom pychron.image.image import Image\n# from pychron.machine_vision.focus_parameters import FocusParameters\n# from pychron.image.image_editor import ImageEditor\nfrom pychron.graph.graph import Graph\nfrom pychron.mv.focus.focus_parameters import FocusParameters\nfrom pychron.core.ui.image_editor import ImageEditor\nfrom pychron.core.ui.gui import invoke_in_main_thread\nfrom pychron.core.ui.thread import Thread\n\n\nclass ConfigureHandler(Handler):\n def closed(self, info, isok):\n if isok:\n info.object.dump_parameters()\n\n\nclass AutoFocusManager(Manager):\n \"\"\"\n currently uses passive focus techniques\n see\n\n http://en.wikipedia.org/wiki/Autofocus\n\n \"\"\"\n\n video = Any\n laser_manager = Any\n stage_controller = Any\n canvas = Any\n parameters = Instance(FocusParameters)\n configure_button = Button('configure')\n\n autofocus_button = Event\n autofocus_label = Property(depends_on='autofocusing')\n autofocusing = Bool\n\n # threading event for cancel signal\n _evt_autofocusing = None\n\n image = Instance(Image, ())\n\n graph = None\n\n def dump_parameters(self):\n p = os.path.join(paths.hidden_dir, 'autofocus_configure')\n self.info('dumping parameters to {}'.format(p))\n with open(p, 'wb') as f:\n pickle.dump(self.parameters, f)\n\n def load_parameter(self):\n p = os.path.join(paths.hidden_dir, 'autofocus_configure')\n if os.path.isfile(p):\n with open(p, 'rb') as f:\n try:\n params = pickle.load(f)\n self.info('loading parameters from {}'.format(p))\n\n if not isinstance(params, FocusParameters):\n self.info('out of date parameters file. using default')\n params = FocusParameters()\n return params\n\n except Exception as e:\n print('autofocus load parameter', e)\n return FocusParameters()\n else:\n return FocusParameters()\n\n def passive_focus(self, block=False, **kw):\n\n self._evt_autofocusing = TEvent()\n self._evt_autofocusing.clear()\n# manager = self.laser_manager\n oper = self.parameters.operator\n self.info('passive focus. operator = {}'.format(oper))\n\n g = self.graph\n if not g:\n g = Graph(plotcontainer_dict=dict(padding=10),\n window_x=0.70,\n window_y=20,\n window_width=325,\n window_height=325,\n window_title='Autofocus'\n )\n self.graph = g\n\n g.clear()\n\n g.new_plot(padding=[40, 10, 10, 40],\n xtitle='Z (mm)',\n ytitle='Focus Measure ({})'.format(oper)\n )\n g.new_series()\n g.new_series()\n\n invoke_in_main_thread(self._open_graph)\n\n target = self._passive_focus\n self._passive_focus_thread = Thread(name='autofocus', target=target,\n args=(self._evt_autofocusing,\n\n ),\n kwargs=kw\n )\n self._passive_focus_thread.start()\n if block:\n# while 1:\n# if not self._passive_focus_thread.isRunning():\n# break\n# time.sleep(0.25)\n self._passive_focus_thread.join()\n\n def _open_graph(self):\n ui = self.graph.edit_traits()\n self.add_window(ui)\n\n def stop_focus(self):\n\n if self.stage_controller:\n self.stage_controller.stop()\n\n self.info('autofocusing stopped by user')\n\n def _passive_focus(self, stop_signal, set_zoom=True):\n '''\n sweep z looking for max focus measure\n FMgrad= roberts or sobel (sobel removes noise)\n FMvar = intensity variance\n '''\n\n self.autofocusing = True\n\n manager = self.laser_manager\n fstart = self.parameters.fstart\n fend = self.parameters.fend\n step_scalar = self.parameters.step_scalar\n zoom = self.parameters.zoom\n operator = self.parameters.operator\n\n steps = step_scalar * (max(fend, fstart) - min(fend, fstart)) + 1\n\n prev_zoom = None\n if set_zoom and \\\n manager is not None and \\\n zoom:\n motor = manager.get_motor('zoom')\n if motor:\n prev_zoom = motor.data_position\n self.info('setting zoom: {}'.format(zoom))\n manager.set_motor('zoom', zoom, block=True)\n time.sleep(1.5)\n\n args = self._do_focusing(fstart, fend, steps, operator)\n\n if manager is not None:\n if prev_zoom is not None:\n self.info('returning to previous zoom: {}'.format(prev_zoom))\n manager.set_motor('zoom', prev_zoom, block=True)\n\n if args:\n mi, fmi, ma, fma = args\n\n self.info('''passive focus results:Operator={}\nImageGradmin={} (z={})\nImageGradmax={}, (z={})'''.format(operator, mi, fmi, ma, fma))\n\n focus_pos = fma\n self.graph.add_vertical_rule(focus_pos)\n self.graph.redraw()\n# self.graph.add_vertical_rule(fma)\n\n self.info('calculated focus z= {}'.format(focus_pos))\n\n# if set_z:\n controller = self.stage_controller\n if controller is not None:\n if not stop_signal.isSet():\n controller.single_axis_move('z', focus_pos, block=True)\n controller._z_position = focus_pos\n controller.z_progress = focus_pos\n\n self.autofocusing = False\n\n def _cancel_sweep(self, vo):\n if self._evt_autofocusing.isSet():\n # return to original velocity\n self.autofocusing = False\n self._reset_velocity(vo)\n return True\n\n def _reset_velocity(self, vo):\n if self.stage_controller:\n pdict = dict(velocity=vo, key='z')\n self.stage_controller.set_single_axis_motion_parameters(pdict=pdict)\n\n def _do_focusing(self, start, end, steps, operator):\n screen_roi = self._get_roi()\n self._add_focus_area_rect(*screen_roi)\n\n src = self._load_source()\n src = asarray(src)\n h, w, _d = src.shape\n\n cx = w / 2.\n cy = h / 2.\n\n cw = self.parameters.crop_width\n ch = self.parameters.crop_height\n\n roi = cx, cy, cw, ch\n\n '''\n start the z in motion and take pictures as you go\n query stage_controller to get current z\n '''\n\n self.info('focus sweep start={} end={}'.format(start, end))\n # move to start position\n controller = self.stage_controller\n if controller:\n vo = controller.axes['z'].velocity\n if self._cancel_sweep(vo):\n return\n self.graph.set_x_limits(min(start, end), max(start, end), pad=2)\n # sweep 1 and velocity 1\n self._do_sweep(start, end, velocity=self.parameters.velocity_scalar1)\n fms, focussteps = self._collect_focus_measures(operator, roi)\n if not (fms and focussteps):\n return\n\n # reached end of sweep\n # calculate a nominal focal point\n args = self._calculate_nominal_focal_point(fms, focussteps)\n if not args:\n return\n nfocal = args[3]\n\n nwin = self.parameters.negative_window\n pwin = self.parameters.positive_window\n\n if self._cancel_sweep(vo):\n return\n nstart, nend = max(0, nfocal - nwin), nfocal + pwin\n# mi = min(min(nstart, nend), min(start, end))\n# ma = max(max(nstart, nend), max(start, end))\n# self.graph.set_x_limits(mi, ma, pad=2)\n time.sleep(1)\n # do a slow tight sweep around the nominal focal point\n self._do_sweep(nstart, nend, velocity=self.parameters.velocity_scalar2)\n fms, focussteps = self._collect_focus_measures(operator, roi, series=1)\n\n self._reset_velocity(vo)\n\n else:\n focussteps = linspace(0, 10, 11)\n fms = -(focussteps - 5) ** 2 + 10 + random.random(11)\n\n self.info('frames analyzed {}'.format(len(fms)))\n\n# self.canvas.markupcontainer.pop('croprect')\n return self._calculate_nominal_focal_point(fms, focussteps)\n\n def _do_sweep(self, start, end, velocity=None):\n controller = self.stage_controller\n controller.single_axis_move('z', start, block=True)\n# time.sleep(0.1)\n # explicitly check for motion\n# controller.block(axis='z')\n\n if velocity:\n vo = controller.axes['z'].velocity\n\n controller.set_single_axis_motion_parameters(pdict=dict(velocity=vo * velocity,\n key='z'))\n\n self.info('starting sweep from {}'.format(controller.z_progress))\n # pause before moving to end\n time.sleep(0.25)\n controller.single_axis_move('z', end, update=100, immediate=True)\n\n def _collect_focus_measures(self, operator, roi, series=0):\n controller = self.stage_controller\n focussteps = []\n fms = []\n if controller.timer:\n p = controller.timer.get_interval()\n self.debug('controller timer period {}'.format(p))\n pz = controller.z_progress\n\n while 1:\n src = self._load_source()\n x = controller.z_progress\n if x != pz:\n y = self._calculate_focus_measure(src, operator, roi)\n self.graph.add_datum((x, y), series=series)\n\n focussteps.append(x)\n fms.append(y)\n\n pz = x\n\n if not (controller.timer.isActive() and \\\n not self._evt_autofocusing.isSet()):\n break\n time.sleep(p)\n\n self.debug('sweep finished')\n\n\n return fms, focussteps\n\n def _calculate_nominal_focal_point(self, fms, focussteps):\n if fms:\n sfms = smooth(fms)\n if sfms is not None:\n\n self.graph.new_series(focussteps, sfms)\n self.graph.redraw()\n\n fmi = focussteps[argmin(sfms)]\n fma = focussteps[argmax(sfms)]\n\n mi = min(sfms)\n ma = max(sfms)\n\n return mi, fmi, ma, fma\n\n def _calculate_focus_measure(self, src, operator, roi):\n '''\n see\n IMPLEMENTATION OF A PASSIVE AUTOMATIC FOCUSING ALGORITHM\n FOR DIGITAL STILL CAMERA\n DOI 10.1109/30.468047\n and\n http://cybertron.cg.tu-berlin.de/pdci10/frankencam/#autofocus\n '''\n\n # need to resize to 640,480. this is the space the roi is in\n# s = resize(grayspace(pychron), 640, 480)\n src = grayspace(src)\n v = crop(src, *roi)\n\n di = dict(var=lambda x:variance(x),\n laplace=lambda x: get_focus_measure(x, 'laplace'),\n sobel=lambda x: ndsum(generic_gradient_magnitude(x, sobel, mode='nearest'))\n )\n\n func = di[operator]\n return func(v)\n\n def image_view(self):\n v = View(Item('image', show_label=False, editor=ImageEditor(),\n width=640,\n height=480,\n style='custom'))\n return v\n\n def traits_view(self):\n v = View(\n HGroup(self._button_factory('autofocus_button', 'autofocus_label'),\n Item('configure_button', show_label=False),\n show_border=True,\n label='Autofocus'\n )\n )\n return v\n\n def configure_view(self):\n v = View(Item('parameters', style='custom', show_label=False),\n handler=ConfigureHandler,\n buttons=['OK', 'Cancel'],\n kind='livemodal',\n title='Configure Autofocus',\n x=0.80,\n y=0.05\n )\n return v\n\n def _load_source(self):\n src = self.video.get_frame()\n return src\n# if pychron:\n# return Image.new_frame(pychron)\n# self.image.load(pychron)\n\n# return self.image.source_frame\n\n def _get_roi(self):\n w = self.parameters.crop_width\n h = self.parameters.crop_height\n\n cx, cy = self.canvas.get_center_rect_position(w, h)\n\n\n# cw, ch = self.canvas.outer_bounds\n# print w, h, cw, ch\n# cx = cw / 2. - w / 2.\n# cy = ch / 2. - h / 2.\n# cx = (cw - w) / 2.\n# cy = (ch - h) / 2.\n# cx = (640 * self.canvas.scaling - w) / 2\n# cy = (480 * self.canvas.scaling - h) / 2\n roi = cx, cy, w, h\n\n return roi\n\n def _add_focus_area_rect(self, cx, cy, w, h):\n# pl = self.canvas.padding_left\n# pb = self.canvas.padding_bottom\n\n self.canvas.remove_item('croprect')\n self.canvas.add_markup_rect(cx, cy, w, h, identifier='croprect')\n\n def _autofocus_button_fired(self):\n if not self.autofocusing:\n self.autofocusing = True\n\n self.passive_focus()\n else:\n self.autofocusing = False\n self._evt_autofocusing.set()\n self.stop_focus()\n\n def _configure_button_fired(self):\n self._crop_rect_update()\n self.edit_traits(view='configure_view', kind='livemodal')\n\n self.canvas.remove_item('croprect')\n# try:\n# self.canvas.markupcontainer.pop('croprect')\n# except KeyError:\n# pass\n\n @on_trait_change('parameters:[_crop_width,_crop_height]')\n def _crop_rect_update(self):\n roi = self._get_roi()\n self._add_focus_area_rect(*roi)\n\n def _get_autofocus_label(self):\n return 'Autofocus' if not self.autofocusing else 'Stop'\n\n\n def _parameters_default(self):\n return self.load_parameter()\n\n def _autofocusing_changed(self, new):\n if not new:\n self.canvas.remove_item('croprect')\n# ===============================================================================\n# Deprecated\n# ===============================================================================\n# ============= EOF =====================================\n\n"
] | [
[
"numpy.array"
],
[
"numpy.random.random",
"scipy.ndimage.measurements.variance",
"numpy.linspace",
"numpy.asarray",
"scipy.ndimage.filters.generic_gradient_magnitude",
"numpy.argmax",
"numpy.argmin"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"1.6",
"0.14",
"0.15",
"1.4",
"0.10",
"1.3",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"0.12",
"1.0",
"0.17",
"0.16"
],
"tensorflow": []
}
] |
awinawin1/prediksi | [
"b3d552555f775d7b6a1b22077146443fe09bbf5d"
] | [
"public/code/simpleCropPredictSpektogram.py"
] | [
"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat May 15 00:21:05 2021\n\n@author: marina\n\"\"\"\nimport os\nimport shutil\nimport pyedflib\nimport numpy as np\nimport pandas as pd\nimport sys\nimport mne \nfrom pywt import wavedec\nfrom sklearn.preprocessing import LabelEncoder\nimport matplotlib.pyplot as plt\nfrom scipy import signal\nfrom keras.models import Sequential\n #importing layers\nfrom keras.layers import Conv2D,Flatten,Dense,MaxPooling2D \nfrom tensorflow.keras.optimizers import SGD\n# pathDataSet = \"D:\\\\Kuliah\\Tugas Akhir\\chb-mit-scalp-eeg-database-1.0.0\\\\chb07\\\\\"\npathDataSet = \"/Applications/XAMPP/xamppfiles/htdocs/prediksi/storage/app/public/uploadedSpektogram/\"\npathSaveData = \"/Applications/XAMPP/xamppfiles/htdocs/prediksi/storage/app/public/uploadedSpektogram/spektogram/\"\n\n\ndef data_load(FILE, selected_channels=[]): \n fullNm = pathDataSet + FILE\n # fullNm = FILE\n f = pyedflib.EdfReader(fullNm )\n n = f.signals_in_file\n signal_labels = f.getSignalLabels()\n channel_freq = f.getSampleFrequencies()\n\n sigbufs = np.zeros((n, f.getNSamples()[0]))\n for i in np.arange(n):\n sigbufs[i, :] = f.readSignal(i)\n f.close()\n \n # and load the data into a DataFrame\n df_signals = pd.DataFrame(sigbufs)\n df_signals = df_signals.transpose()\n df_signals.columns = signal_labels\n df_signals = df_signals.loc[:,~df_signals.columns.duplicated()]\n df_signals = df_signals[selected_channels].astype('float32') \n return df_signals,channel_freq[0]\n\ndef mne_object(data, freq, events = None):\n info = mne.create_info(ch_names=list(data.columns), \n sfreq=freq, \n ch_types=['eeg']*data.shape[-1])\n data_T = data.transpose()\n raw = mne.io.RawArray(data_T, info,verbose=False)\n if events:\n start_times = np.array(events[::2])\n end_times = np.array(events[1::2])\n anno_length = end_times-start_times\n event_name = np.array(['Ictal']*len(anno_length))\n raw.set_annotations(mne.Annotations(start_times,\n anno_length,\n event_name))\n return raw\n\ndef loadAndFiltering(FILE,channel_keeps):\n raw_data, freq = data_load(FILE, channel_keeps)\n if len(raw_data) ==0:\n print(\"no data \")\n return raw_data\n mne_data = mne_object(raw_data, freq)\n raw=mne_data.copy()\n return raw\n\ndef extract_windows(array, start, max_time, sub_window_size,\n stride_size): \n sub_windows = (\n start + \n np.expand_dims(np.arange(sub_window_size), 0) +\n np.expand_dims(np.arange(max_time + 1- sub_window_size-start, step=stride_size), 0).T\n ) \n return array[:,sub_windows]\n\n\ndef Crop(raw): \n cropS = 3\n strides = 1\n \n tMin=0\n tMax=raw.get_data().shape[1]#18*256*cropS \n\n\n sub_window_size,stride_size = 256*cropS,256*strides\n cropData = extract_windows(raw.get_data(), tMin, tMax , sub_window_size,stride_size)\n cropData = cropData.reshape(cropData.shape[1],cropData.shape[0],cropData.shape[2])\n \n return cropData\n\n# def create_modelCNN(input_shape, num_class,flatten=False):\n# from tensorflow.keras.models import Sequential\n# from tensorflow.keras.layers import Dense\n# from tensorflow.keras.backend import clear_session\n# from tensorflow.keras.optimizers import Adam\n \n# from tensorflow.keras.layers import Conv1D#, Input\n# from tensorflow.keras.layers import MaxPooling1D\n# from tensorflow.keras.layers import GlobalAveragePooling1D#, GlobalMaxPooling1D\n# from keras.layers import Activation,Flatten, Dropout\n \n# clear_session()\n# model = Sequential()\n# def add_conv_block(model, num_filters, input_shape=None):\n# if input_shape:\n# model.add(Conv1D(num_filters, kernel_size=3, activation='relu', padding='same', input_shape=input_shape))\n# else:\n# model.add(Conv1D(num_filters, kernel_size=3, activation='relu', padding='same'))\n# return model\n# model = add_conv_block(model, 128, input_shape=input_shape[1:])\n# model = add_conv_block(model, 128)\n# model.add(Dropout(0.3)) \n# model.add(MaxPooling1D(pool_size=3, # size of the window\n# strides=2, # factor to downsample\n# padding='same'))\n# model.add(Dropout(0.1))\n# for i in range(2):\n# model.add(Conv1D(filters=256,kernel_size=3,padding=\"same\",activation='relu'))\n# model.add(Dropout(0.1))\n# if flatten:\n# model.add(Flatten())\n# else:\n# model.add(GlobalAveragePooling1D())\n# model.add(Dense(units=128,activation='relu'))\n# model.add(Dropout(0.1))\n# model.add(Dense(num_class))\n# model.add(Activation('softmax'))\n# model.compile(optimizer=Adam(0.0001), \n# loss='categorical_crossentropy', \n# metrics=['accuracy'])\n# return model\n\ndef modelCNN2(input_shape,nb_classes):\n model = Sequential()\n model.add(Conv2D(32, (3, 3), activation='relu', padding='same', input_shape=input_shape))\n model.add(Conv2D(32, (3, 3), activation='relu', padding='same'))\n model.add(MaxPooling2D((2, 2)))\n model.add(Conv2D(64, (3, 3), activation='relu', padding='same'))\n model.add(Conv2D(64, (3, 3), activation='relu', padding='same'))\n model.add(MaxPooling2D((2, 2)))\n model.add(Conv2D(128, (3, 3), activation='relu', padding='same'))\n model.add(Conv2D(128, (3, 3), activation='relu', padding='same'))\n model.add(MaxPooling2D((2, 2)))\n model.add(Flatten())\n model.add(Dense(128, activation='relu'))\n model.add(Dense(nb_classes, activation='softmax'))\n\t# compile model\n opt = SGD(lr=0.001, momentum=0.9)\n model.compile(optimizer=opt, loss='categorical_crossentropy', metrics=['accuracy'])\n return model\n\ndef plotSpektogram(x,fs,nmFile=''):\n f, t, Sxx = signal.spectrogram(x, fs)\n cut=10\n imgAll=[]\n for i,sinyal in enumerate(Sxx):\n img = plt.pcolormesh(t, f[:cut], sinyal[:cut], shading='gouraud')\n imgAll.append([(r, g, b) for r, g, b, a in img.to_rgba(img.get_array())])\n # print(nmFile)\n # if nmFile !='':\n #(18, 30, 3)\n # print(\"masuk sini\")\n # plt.savefig(nmFile)\n # plt.show()\n # plt.imsave(nmFile, imgAll)\n \n # imgAll = np.array(imgAll)# .reshape(-1,3)\n imgAll = np.array(imgAll).ravel()\n #(18, 30, 3)\n return imgAll \n \nif __name__ == '__main__':\n FILE=sys.argv[1]\n # FILE = 'D:\\\\Kuliah\\Tugas Akhir\\chb-mit-scalp-eeg-database-1.0.0\\\\chb24\\\\chb24_22.edf'\n # FILE = 'chb07_12.edf'\n FILE = FILE.replace(\"'\",\"\")\n dir_path = \"/Applications/XAMPP/xamppfiles/htdocs/prediksi/storage/app/public/fitur3Kelas30DetikImg/\"\n # if(os.path.isdir(dir_path+FILE)):\n # shutil.rmtree(dir_path+FILE)\n # os.mkdir(\"/Applications/XAMPP/xamppfiles/htdocs/prediksi/storage/app/public/fitur3Kelas30DetikImg/\"+FILE,0o777)\n loaded = np.load(\"/Applications/XAMPP/xamppfiles/htdocs/prediksi/storage/app/public/spektogram/channel_keeps.npz\")\n selected_channels =loaded['channel_keeps'] \n segmen=[]\n raw = loadAndFiltering(FILE,selected_channels)\n \n cropData = Crop(raw) \n numCH = cropData[0].shape[0]\n oneData = cropData[0]\n oneData = plotSpektogram(oneData,256)\n \n oneData = oneData.reshape(1,numCH,-1, 3)\n KELAS = 3\n bntk_input = (18, 30, 3)\n model = modelCNN2(bntk_input,KELAS)\n # model = modelCNN2(oneData.shape,KELAS)#,False) \n nmModel = '/Applications/XAMPP/xamppfiles/htdocs/prediksi/storage/app/public/modelCNNSpektrogram_3.h5'\n\n model.load_weights(nmModel) \n cnt=0 \n \n for idx in range(cropData.shape[0]): \n numCH = cropData[idx].shape[0]\n oneData = cropData[idx]\n nmFile = \"/Applications/XAMPP/xamppfiles/htdocs/prediksi/storage/app/public/fitur3Kelas30DetikImg/%s/%s_%d.png\"%(FILE,FILE,idx)\n # nmFile = dir+\"%s_%s.png\"%(FILE,idx)\n oneData = plotSpektogram(oneData,256,nmFile)\n oneData = oneData.reshape(1,numCH,-1, 3)\n yPred = model.predict(oneData)\n yPred = np.argmax(yPred,axis=1)\n if yPred[0] == 0:\n hasil = \"Normal\"\n elif yPred[0] == 1:\n hasil = \"Inter\" \n else:\n hasil = \"Ictal\"\n # break\n segmen.append(hasil) \n # print(\"segment=%d prediksi=%s <br>\"%(idx,hasil))\n cnt+=1\n if cnt>1000:\n break\n saveHistory = open(pathSaveData+FILE+\".txt\",\"w\")\n saveHistory.write(str(segmen))\n saveHistory.close()\n print(segmen)\n \n \n \n \n\n"
] | [
[
"matplotlib.pyplot.pcolormesh",
"numpy.arange",
"scipy.signal.spectrogram",
"pandas.DataFrame",
"numpy.argmax",
"numpy.load",
"numpy.array",
"tensorflow.keras.optimizers.SGD"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [
"1.6",
"1.10",
"1.4",
"0.16",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"1.0",
"0.17",
"1.3",
"1.8"
],
"tensorflow": [
"1.10",
"2.7",
"2.2",
"2.3",
"2.4",
"2.5",
"2.6"
]
}
] |
Aympab/BigDataHadoopSparkDaskCourse | [
"42f9e0475cbd7c5db240ccc6dc00c19b9006012a"
] | [
"TPs/TP4/test_flower.py"
] | [
"import pyspark\nfrom pyspark import SparkContext\nfrom pyspark.sql import Row\nfrom pyspark.sql import SQLContext\nfrom pyspark import SparkFiles\nimport os\nimport pandas as pd\n\nsc =SparkContext()\nsqlContext = SQLContext(sc)\n\n\ndata_dir=\"/work/irlin355_1/gratienj/ParallelProgrammingCourse/BigDataHadoopSpark/data\"\nfile = os.path.join(data_dir,\"iris.csv\")\npanda_df = pd.read_csv(file)\n\nsqlContext = SQLContext(sc)\n#df = sqlContext.read.csv(SparkFiles.get(\"iris.csv\"), header=True, inferSchema= True)\t\ndf=sqlContext.createDataFrame(panda_df)\ndf.printSchema()\ndf.show(5, truncate = False)\ndf.select('petal_width','variety').show(5)\n\ndf.groupBy(\"variety\").count().sort(\"count\",ascending=True).show()\n\ndf.describe().show()\n"
] | [
[
"pandas.read_csv"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
owenshen24/acme | [
"71434dffd3449236f9b8aaf7a53ceab515e75a2a",
"71434dffd3449236f9b8aaf7a53ceab515e75a2a"
] | [
"acme/agents/actors_tf2_test.py",
"acme/agents/mpo/agent_test.py"
] | [
"# python3\n# Copyright 2018 DeepMind Technologies Limited. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Tests for actors_tf2.\"\"\"\n\nfrom absl.testing import absltest\n\nfrom acme import environment_loop\nfrom acme import specs\nfrom acme.agents import actors_tf2\nfrom acme.testing import fakes\n\nimport dm_env\nimport numpy as np\nimport sonnet as snt\nimport tensorflow as tf\n\n\ndef _make_fake_env() -> dm_env.Environment:\n env_spec = specs.EnvironmentSpec(\n observations=specs.Array(shape=(10, 5), dtype=np.float32),\n actions=specs.DiscreteArray(num_values=3),\n rewards=specs.Array(shape=(), dtype=np.float32),\n discounts=specs.BoundedArray(\n shape=(), dtype=np.float32, minimum=0., maximum=1.),\n )\n return fakes.Environment(env_spec, episode_length=10)\n\n\nclass ActorTest(absltest.TestCase):\n\n def test_feedforward(self):\n environment = _make_fake_env()\n env_spec = specs.make_environment_spec(environment)\n\n network = snt.Sequential([\n snt.Flatten(),\n snt.Linear(env_spec.actions.num_values),\n lambda x: tf.argmax(x, axis=-1, output_type=env_spec.actions.dtype),\n ])\n\n actor = actors_tf2.FeedForwardActor(network)\n loop = environment_loop.EnvironmentLoop(environment, actor)\n loop.run(20)\n\n def test_recurrent(self):\n environment = _make_fake_env()\n env_spec = specs.make_environment_spec(environment)\n\n network = snt.DeepRNN([\n snt.Flatten(),\n snt.Linear(env_spec.actions.num_values),\n lambda x: tf.argmax(x, axis=-1, output_type=env_spec.actions.dtype),\n ])\n\n actor = actors_tf2.RecurrentActor(network)\n loop = environment_loop.EnvironmentLoop(environment, actor)\n loop.run(20)\n\n\nif __name__ == '__main__':\n absltest.main()\n",
"# python3\n# Copyright 2018 DeepMind Technologies Limited. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Tests for the MPO agent.\"\"\"\n\nfrom absl.testing import absltest\n\nimport acme\nfrom acme import networks\nfrom acme import specs\nfrom acme.agents import mpo\nfrom acme.testing import fakes\n\nimport numpy as np\nimport sonnet as snt\n\n\ndef make_networks(\n action_spec,\n policy_layer_sizes=(10, 10),\n critic_layer_sizes=(10, 10),\n):\n \"\"\"Creates networks used by the agent.\"\"\"\n\n num_dimensions = np.prod(action_spec.shape, dtype=int)\n critic_layer_sizes = list(critic_layer_sizes) + [1]\n\n policy_network = snt.Sequential([\n networks.LayerNormMLP(policy_layer_sizes),\n networks.MultivariateNormalDiagHead(num_dimensions)\n ])\n critic_network = networks.CriticMultiplexer(\n critic_network=networks.LayerNormMLP(critic_layer_sizes))\n\n return {\n 'policy': policy_network,\n 'critic': critic_network,\n }\n\n\nclass MPOTest(absltest.TestCase):\n\n def test_mpo(self):\n # Create a fake environment to test with.\n environment = fakes.ContinuousEnvironment(episode_length=10, bounded=False)\n spec = specs.make_environment_spec(environment)\n\n # Create networks.\n agent_networks = make_networks(spec.actions)\n\n # Construct the agent.\n agent = mpo.MPO(\n spec,\n policy_network=agent_networks['policy'],\n critic_network=agent_networks['critic'],\n batch_size=10,\n samples_per_insert=2,\n min_replay_size=10)\n\n # Try running the environment loop. We have no assertions here because all\n # we care about is that the agent runs without raising any errors.\n loop = acme.EnvironmentLoop(environment, agent)\n loop.run(num_episodes=2)\n\n\nif __name__ == '__main__':\n absltest.main()\n"
] | [
[
"tensorflow.argmax"
],
[
"numpy.prod"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"1.12",
"1.4",
"1.13",
"1.5",
"1.7",
"0.12",
"1.0",
"1.2"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
prasadph/ga-learner-dsmp-repo | [
"ac1cc9d96250718f2842592e643c885d54ab2903"
] | [
"NLP/code.py"
] | [
"# --------------\n# import packages\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\nimport seaborn as sns\nimport re\nfrom nltk.corpus import stopwords\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer\nfrom sklearn.naive_bayes import MultinomialNB\nfrom sklearn.multiclass import OneVsRestClassifier\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.metrics import accuracy_score ,confusion_matrix\n\n\n# Code starts here\n\n# load data\nnews = pd.read_csv(path)\n\n# subset data\nnews = news[[\"TITLE\",\"CATEGORY\"]]\n# distribution of classes\ndist = news.CATEGORY.value_counts()\n\n# display class distribution\nprint(dist)\n\n# display data\nprint(news.head())\n\n# Code ends here\n\n\n# --------------\n# Code starts here\n\n# stopwords \n\nstop = set(stopwords.words('english'))\n# retain only alphabets\nnews.TITLE = news.TITLE.apply(lambda x:re.sub(\"[^a-zA-Z]\", \" \",x))\n\n# convert to lowercase and tokenize\nnews.TITLE = news.TITLE.apply(lambda row:row.lower().split())\n\n# remove stopwords\nnews.TITLE = news.TITLE.apply(lambda row:[i for i in row if i not in stop] )\n\n# join list elements\nnews.TITLE = news.TITLE.apply(lambda x: ' '.join(x))\n\n# split into training and test sets\nX_train, X_test, y_train, y_test = train_test_split(news['TITLE'], news['CATEGORY'], test_size=0.2, random_state=3)\n\n# Code ends here\n\n\n# --------------\n# Code starts here\n\n# initialize count vectorizer\ncount_vectorizer = CountVectorizer()\n# initialize tfidf vectorizer\ntfidf_vectorizer = TfidfVectorizer(ngram_range=(1,3))\n# fit and transform with count vectorizer\nX_train_count = count_vectorizer.fit_transform(X_train)\nX_test_count = count_vectorizer.transform(X_test)\n\n# fit and transform with tfidf vectorizer\nX_train_tfidf = tfidf_vectorizer.fit_transform(X_train)\nX_test_tfidf = tfidf_vectorizer.transform(X_test)\n\n# Code ends here\n\n\n# --------------\n# Code starts here\n\n# initialize multinomial naive bayes\nnb_1 = MultinomialNB()\nnb_2 = MultinomialNB() \n# fit on count vectorizer training data\nnb_1.fit(X_train_count, y_train)\n# fit on tfidf vectorizer training data\nnb_2.fit(X_train_tfidf, y_train)\n\n# accuracy with count vectorizer\nacc_count_nb = accuracy_score(nb_1.predict(X_test_count), y_test)\n\n# accuracy with tfidf vectorizer\nacc_tfidf_nb = accuracy_score(nb_2.predict(X_test_tfidf), y_test)\n\n# display accuracies\nprint(acc_count_nb)\nprint(acc_tfidf_nb)\n\n# Code ends here\n\n\n# --------------\nimport warnings\nwarnings.filterwarnings('ignore')\n\n# initialize logistic regression\nlogreg_1 = OneVsRestClassifier(LogisticRegression(random_state=10))\nlogreg_2 = OneVsRestClassifier(LogisticRegression(random_state=10))\n# fit on count vectorizer training data\nlogreg_1.fit(X_train_count, y_train)\n\n# fit on tfidf vectorizer training data\nlogreg_2.fit(X_train_tfidf, y_train)\n\n# accuracy with count vectorizer\nacc_count_logreg = accuracy_score(logreg_1.predict(X_test_count), y_test)\n# accuracy with tfidf vectorizer\nacc_tfidf_logreg = accuracy_score(logreg_2.predict(X_test_tfidf), y_test)\n\n# display accuracies\nprint(acc_count_logreg)\nprint(acc_tfidf_logreg)\n# Code ends here\n\n\n"
] | [
[
"pandas.read_csv",
"sklearn.linear_model.LogisticRegression",
"sklearn.naive_bayes.MultinomialNB",
"sklearn.model_selection.train_test_split",
"sklearn.feature_extraction.text.CountVectorizer",
"sklearn.feature_extraction.text.TfidfVectorizer"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
okdshin/onnx | [
"27b40225ea98f6412ae2879ed67211d49564af2a",
"27b40225ea98f6412ae2879ed67211d49564af2a",
"31ca96ca3331d05884a71c38975d34870eb9c81d"
] | [
"onnx/backend/test/case/node/xor.py",
"onnx/backend/test/case/node/flatten.py",
"onnx/backend/test/case/node/globalaveragepool.py"
] | [
"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport numpy as np\n\nimport onnx\nfrom ..base import Base\nfrom . import expect\n\n\nclass Xor(Base):\n\n @staticmethod\n def export():\n node = onnx.helper.make_node(\n 'Xor',\n inputs=['x', 'y'],\n outputs=['xor'],\n )\n\n # 2d\n x = (np.random.randn(3, 4) > 0).astype(np.bool)\n y = (np.random.randn(3, 4) > 0).astype(np.bool)\n z = np.logical_xor(x, y)\n expect(node, inputs=[x, y], outputs=[z],\n name='test_xor2d')\n\n # 3d\n x = (np.random.randn(3, 4, 5) > 0).astype(np.bool)\n y = (np.random.randn(3, 4, 5) > 0).astype(np.bool)\n z = np.logical_xor(x, y)\n expect(node, inputs=[x, y], outputs=[z],\n name='test_xor3d')\n\n # 4d\n x = (np.random.randn(3, 4, 5, 6) > 0).astype(np.bool)\n y = (np.random.randn(3, 4, 5, 6) > 0).astype(np.bool)\n z = np.logical_xor(x, y)\n expect(node, inputs=[x, y], outputs=[z],\n name='test_xor4d')\n\n @staticmethod\n def export_xor_broadcast():\n node = onnx.helper.make_node(\n 'Xor',\n inputs=['x', 'y'],\n outputs=['xor'],\n broadcast=1,\n )\n\n #3d vs 1d\n x = (np.random.randn(3, 4, 5) > 0).astype(np.bool)\n y = (np.random.randn(5) > 0).astype(np.bool)\n z = np.logical_xor(x, y)\n expect(node, inputs=[x, y], outputs=[z],\n name='test_xor_bcast3v1d')\n\n #3d vs 2d\n x = (np.random.randn(3, 4, 5) > 0).astype(np.bool)\n y = (np.random.randn(4, 5) > 0).astype(np.bool)\n z = np.logical_xor(x, y)\n expect(node, inputs=[x, y], outputs=[z],\n name='test_xor_bcast3v2d')\n\n #4d vs 2d\n x = (np.random.randn(3, 4, 5, 6) > 0).astype(np.bool)\n y = (np.random.randn(5, 6) > 0).astype(np.bool)\n z = np.logical_xor(x, y)\n expect(node, inputs=[x, y], outputs=[z],\n name='test_xor_bcast4v2d')\n\n #4d vs 3d\n x = (np.random.randn(3, 4, 5, 6) > 0).astype(np.bool)\n y = (np.random.randn(4, 5, 6) > 0).astype(np.bool)\n z = np.logical_xor(x, y)\n expect(node, inputs=[x, y], outputs=[z],\n name='test_xor_bcast4v3d')\n\n @staticmethod\n def export_xor_axis():\n x = (np.random.randn(5, 5, 5, 5) > 0).astype(np.bool)\n y = (np.random.randn(5) > 0).astype(np.bool)\n\n node = onnx.helper.make_node(\n 'Xor',\n inputs=['x', 'y'],\n outputs=['xor'],\n broadcast=1,\n axis=0,\n )\n\n z = np.logical_xor(x, y[:, np.newaxis, np.newaxis, np.newaxis])\n expect(node, inputs=[x, y], outputs=[z],\n name='test_xor_axis0')\n\n node = onnx.helper.make_node(\n 'Xor',\n inputs=['x', 'y'],\n outputs=['xor'],\n broadcast=1,\n axis=1,\n )\n\n z = np.logical_xor(x, y[:, np.newaxis, np.newaxis,])\n expect(node, inputs=[x, y], outputs=[z],\n name='test_xor_axis1')\n\n node = onnx.helper.make_node(\n 'Xor',\n inputs=['x', 'y'],\n outputs=['xor'],\n broadcast=1,\n axis=2,\n )\n\n z = np.logical_xor(x, y[:, np.newaxis,])\n expect(node, inputs=[x, y], outputs=[z],\n name='test_xor_axis2')\n\n node = onnx.helper.make_node(\n 'Xor',\n inputs=['x', 'y'],\n outputs=['xor'],\n broadcast=1,\n axis=3,\n )\n\n z = np.logical_xor(x, y)\n expect(node, inputs=[x, y], outputs=[z],\n name='test_xor_axis3')",
"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport numpy as np\n\nimport onnx\nfrom ..base import Base\nfrom . import expect\n\n\nclass Flatten(Base):\n\n @staticmethod\n def export():\n shape = (2, 3, 4, 5)\n a = np.random.random_sample(shape).astype(np.float32)\n\n for i in range(len(shape)):\n node = onnx.helper.make_node(\n 'Flatten',\n inputs=['a'],\n outputs=['b'],\n axis=i,\n )\n\n new_shape = (1, -1) if i == 0 else (np.prod(shape[0:i]).astype(int), -1)\n b= np.reshape(a, new_shape)\n expect(node, inputs=[a], outputs=[b],\n name='test_flatten_axis' + str(i))\n\n @staticmethod\n def export_flatten_with_default_axis():\n node = onnx.helper.make_node(\n 'Flatten',\n inputs=['a'],\n outputs=['b'], # Default value for axis: axis=1\n )\n \n shape = (5, 4, 3, 2)\n a = np.random.random_sample(shape).astype(np.float32)\n new_shape = (5, 24)\n b= np.reshape(a, new_shape)\n expect(node, inputs=[a], outputs=[b],\n name='test_flatten_default_axis')",
"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport itertools\n\nimport numpy as np\n\nimport onnx\nfrom ..base import Base\nfrom . import expect\n\n\nclass GlobalAveragePool(Base):\n\n @staticmethod\n def export():\n node = onnx.helper.make_node(\n 'GlobalAveragePool',\n inputs=['x'],\n outputs=['y'],\n )\n x = np.random.randn(1, 3, 5, 5).astype(np.float32)\n spatial_shape = np.ndim(x) - 2\n y = np.average(x, axis=tuple(range(spatial_shape, spatial_shape + 2)))\n for _ in range(spatial_shape):\n y = np.expand_dims(y, -1)\n expect(node, inputs=[x], outputs=[y], name='test_globalaveragepool')\n\n @staticmethod\n def export_globalaveragepool_precomputed():\n\n node = onnx.helper.make_node(\n 'GlobalAveragePool',\n inputs=['x'],\n outputs=['y'],\n )\n x = np.array([[[\n [1, 2, 3],\n [4, 5, 6],\n [7, 8, 9],\n ]]]).astype(np.float32)\n y = np.array([[[[5]]]]).astype(np.float32)\n expect(node, inputs=[x], outputs=[y], name='test_globalaveragepool_precomputed')\n"
] | [
[
"numpy.logical_xor",
"numpy.random.randn"
],
[
"numpy.reshape",
"numpy.random.random_sample",
"numpy.prod"
],
[
"numpy.ndim",
"numpy.array",
"numpy.expand_dims",
"numpy.random.randn"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
GAA-UAM/scikit-fda | [
"a9953a3104195ce9796397d094b17b1b90fd090f"
] | [
"skfda/_utils/_utils.py"
] | [
"\"\"\"Module with generic methods.\"\"\"\n\nfrom __future__ import annotations\n\nimport functools\nimport numbers\nfrom typing import (\n TYPE_CHECKING,\n Any,\n Callable,\n Iterable,\n List,\n Optional,\n Sequence,\n Tuple,\n TypeVar,\n Union,\n cast,\n overload,\n)\n\nimport numpy as np\nimport scipy.integrate\nfrom numpy import ndarray\nfrom pandas.api.indexers import check_array_indexer\nfrom sklearn.base import clone\nfrom sklearn.preprocessing import LabelEncoder\nfrom sklearn.utils.multiclass import check_classification_targets\nfrom typing_extensions import Literal, Protocol\n\nfrom ..representation._typing import (\n ArrayLike,\n DomainRange,\n DomainRangeLike,\n GridPoints,\n GridPointsLike,\n)\nfrom ..representation.extrapolation import ExtrapolationLike\n\nRandomStateLike = Optional[Union[int, np.random.RandomState]]\n\nif TYPE_CHECKING:\n from ..exploratory.depth import Depth\n from ..representation import FData, FDataGrid\n from ..representation.basis import Basis\n T = TypeVar(\"T\", bound=FData)\n\n\ndef check_is_univariate(fd: FData) -> None:\n \"\"\"Check if an FData is univariate and raises an error.\n\n Args:\n fd: Functional object to check if is univariate.\n\n Raises:\n ValueError: If it is not univariate, i.e., `fd.dim_domain != 1` or\n `fd.dim_codomain != 1`.\n\n \"\"\"\n if fd.dim_domain != 1 or fd.dim_codomain != 1:\n domain_str = (\n \"\" if fd.dim_domain == 1\n else f\"(currently is {fd.dim_domain}) \"\n )\n\n codomain_str = (\n \"\" if fd.dim_codomain == 1\n else f\"(currently is {fd.dim_codomain})\"\n )\n\n raise ValueError(\n f\"The functional data must be univariate, i.e., \"\n f\"with dim_domain=1 {domain_str}\"\n f\"and dim_codomain=1 {codomain_str}\",\n )\n\n\ndef _check_compatible_fdata(fdata1: FData, fdata2: FData) -> None:\n \"\"\"Check that fdata is compatible.\"\"\"\n if (fdata1.dim_domain != fdata2.dim_domain):\n raise ValueError(\n f\"Functional data has incompatible domain dimensions: \"\n f\"{fdata1.dim_domain} != {fdata2.dim_domain}\",\n )\n\n if (fdata1.dim_codomain != fdata2.dim_codomain):\n raise ValueError(\n f\"Functional data has incompatible codomain dimensions: \"\n f\"{fdata1.dim_codomain} != {fdata2.dim_codomain}\",\n )\n\n\ndef _to_grid(\n X: FData,\n y: FData,\n eval_points: Optional[np.ndarray] = None,\n) -> Tuple[FDataGrid, FDataGrid]:\n \"\"\"Transform a pair of FDatas in grids to perform calculations.\"\"\"\n from .. import FDataGrid\n x_is_grid = isinstance(X, FDataGrid)\n y_is_grid = isinstance(y, FDataGrid)\n\n if eval_points is not None:\n X = X.to_grid(eval_points)\n y = y.to_grid(eval_points)\n elif x_is_grid and not y_is_grid:\n y = y.to_grid(X.grid_points[0])\n elif not x_is_grid and y_is_grid:\n X = X.to_grid(y.grid_points[0])\n elif not x_is_grid and not y_is_grid:\n X = X.to_grid()\n y = y.to_grid()\n\n return X, y\n\n\ndef _to_grid_points(grid_points_like: GridPointsLike) -> GridPoints:\n \"\"\"Convert to grid points.\n\n If the original list is one-dimensional (e.g. [1, 2, 3]), return list to\n array (in this case [array([1, 2, 3])]).\n\n If the original list is two-dimensional (e.g. [[1, 2, 3], [4, 5]]), return\n a list containing other one-dimensional arrays (in this case\n [array([1, 2, 3]), array([4, 5])]).\n\n In any other case the behaviour is unespecified.\n\n \"\"\"\n unidimensional = False\n\n if not isinstance(grid_points_like, Iterable):\n grid_points_like = [grid_points_like]\n\n if not isinstance(grid_points_like[0], Iterable):\n unidimensional = True\n\n if unidimensional:\n return (_int_to_real(np.asarray(grid_points_like)),)\n\n return tuple(_int_to_real(np.asarray(i)) for i in grid_points_like)\n\n\ndef _to_domain_range(sequence: DomainRangeLike) -> DomainRange:\n \"\"\"Convert sequence to a proper domain range.\"\"\"\n seq_aux = cast(\n Sequence[Sequence[float]],\n (sequence,) if isinstance(sequence[0], numbers.Real) else sequence,\n )\n\n tuple_aux = tuple(tuple(s) for s in seq_aux)\n\n if not all(len(s) == 2 and s[0] <= s[1] for s in tuple_aux):\n raise ValueError(\n \"Domain intervals should have 2 bounds for \"\n \"dimension: (lower, upper).\",\n )\n\n return cast(DomainRange, tuple_aux)\n\n\ndef _to_array_maybe_ragged(\n array: Iterable[ArrayLike],\n *,\n row_shape: Optional[Sequence[int]] = None,\n) -> np.ndarray:\n \"\"\"\n Convert to an array where each element may or may not be of equal length.\n\n If each element is of equal length the array is multidimensional.\n Otherwise it is a ragged array.\n\n \"\"\"\n def convert_row(row: ArrayLike) -> np.ndarray:\n r = np.array(row)\n\n if row_shape is not None:\n r = r.reshape(row_shape)\n\n return r\n\n array_list = [convert_row(a) for a in array]\n shapes = [a.shape for a in array_list]\n\n if all(s == shapes[0] for s in shapes):\n return np.array(array_list)\n\n res = np.empty(len(array_list), dtype=np.object_)\n\n for i, a in enumerate(array_list):\n res[i] = a\n\n return res\n\n\n@overload\ndef _cartesian_product(\n axes: Sequence[np.ndarray],\n *,\n flatten: bool = True,\n return_shape: Literal[False] = False,\n) -> np.ndarray:\n pass\n\n\n@overload\ndef _cartesian_product(\n axes: Sequence[np.ndarray],\n *,\n flatten: bool = True,\n return_shape: Literal[True],\n) -> Tuple[np.ndarray, Tuple[int, ...]]:\n pass\n\n\ndef _cartesian_product( # noqa: WPS234\n axes: Sequence[np.ndarray],\n *,\n flatten: bool = True,\n return_shape: bool = False,\n) -> Union[np.ndarray, Tuple[np.ndarray, Tuple[int, ...]]]:\n \"\"\"\n Compute the cartesian product of the axes.\n\n Computes the cartesian product of the axes and returns a numpy array of\n 1 dimension with all the possible combinations, for an arbitrary number of\n dimensions.\n\n Args:\n axes: List with axes.\n flatten: Whether to return the flatten array or keep one dimension per\n axis.\n return_shape: If ``True`` return the shape of the array before\n flattening.\n\n Returns:\n Numpy 2-D array with all the possible combinations.\n The entry (i,j) represent the j-th coordinate of the i-th point.\n If ``return_shape`` is ``True`` returns also the shape of the array\n before flattening.\n\n Examples:\n >>> from skfda._utils import _cartesian_product\n >>> axes = [[0,1],[2,3]]\n >>> _cartesian_product(axes)\n array([[0, 2],\n [0, 3],\n [1, 2],\n [1, 3]])\n\n >>> axes = [[0,1],[2,3],[4]]\n >>> _cartesian_product(axes)\n array([[0, 2, 4],\n [0, 3, 4],\n [1, 2, 4],\n [1, 3, 4]])\n\n >>> axes = [[0,1]]\n >>> _cartesian_product(axes)\n array([[0],\n [1]])\n \"\"\"\n cartesian = np.stack(np.meshgrid(*axes, indexing='ij'), -1)\n\n shape = cartesian.shape\n\n if flatten:\n cartesian = cartesian.reshape(-1, len(axes))\n\n if return_shape:\n return cartesian, shape\n\n return cartesian\n\n\ndef _same_domain(fd: Union[Basis, FData], fd2: Union[Basis, FData]) -> bool:\n \"\"\"Check if the domain range of two objects is the same.\"\"\"\n return np.array_equal(fd.domain_range, fd2.domain_range)\n\n\n@overload\ndef _reshape_eval_points(\n eval_points: ArrayLike,\n *,\n aligned: Literal[True],\n n_samples: int,\n dim_domain: int,\n) -> np.ndarray:\n pass\n\n\n@overload\ndef _reshape_eval_points(\n eval_points: Sequence[ArrayLike],\n *,\n aligned: Literal[True],\n n_samples: int,\n dim_domain: int,\n) -> np.ndarray:\n pass\n\n\n@overload\ndef _reshape_eval_points(\n eval_points: Union[ArrayLike, Sequence[ArrayLike]],\n *,\n aligned: bool,\n n_samples: int,\n dim_domain: int,\n) -> np.ndarray:\n pass\n\n\ndef _reshape_eval_points(\n eval_points: Union[ArrayLike, Iterable[ArrayLike]],\n *,\n aligned: bool,\n n_samples: int,\n dim_domain: int,\n) -> np.ndarray:\n \"\"\"Convert and reshape the eval_points to ndarray.\n\n Args:\n eval_points: Evaluation points to be reshaped.\n aligned: Boolean flag. True if all the samples\n will be evaluated at the same evaluation_points.\n n_samples: Number of observations.\n dim_domain: Dimension of the domain.\n\n Returns:\n Numpy array with the eval_points, if\n evaluation_aligned is True with shape `number of evaluation points`\n x `dim_domain`. If the points are not aligned the shape of the\n points will be `n_samples` x `number of evaluation points`\n x `dim_domain`.\n\n \"\"\"\n if aligned:\n eval_points = np.asarray(eval_points)\n else:\n eval_points = cast(Iterable[ArrayLike], eval_points)\n\n eval_points = _to_array_maybe_ragged(\n eval_points,\n row_shape=(-1, dim_domain),\n )\n\n # Case evaluation of a single value, i.e., f(0)\n # Only allowed for aligned evaluation\n if aligned and (\n eval_points.shape == (dim_domain,)\n or (eval_points.ndim == 0 and dim_domain == 1)\n ):\n eval_points = np.array([eval_points])\n\n if aligned: # Samples evaluated at same eval points\n eval_points = eval_points.reshape(\n (eval_points.shape[0], dim_domain),\n )\n\n else: # Different eval_points for each sample\n\n if eval_points.shape[0] != n_samples:\n raise ValueError(\n f\"eval_points should be a list \"\n f\"of length {n_samples} with the \"\n f\"evaluation points for each sample.\",\n )\n\n return eval_points\n\n\ndef _one_grid_to_points(\n axes: GridPointsLike,\n *,\n dim_domain: int,\n) -> Tuple[np.ndarray, Tuple[int, ...]]:\n \"\"\"\n Convert a list of ndarrays, one per domain dimension, in the points.\n\n Returns also the shape containing the information of how each point\n is formed.\n \"\"\"\n axes = _to_grid_points(axes)\n\n if len(axes) != dim_domain:\n raise ValueError(\n f\"Length of axes should be {dim_domain}\",\n )\n\n cartesian, shape = _cartesian_product(axes, return_shape=True)\n\n # Drop domain size dimension, as it is not needed to reshape the output\n shape = shape[:-1]\n\n return cartesian, shape\n\n\nclass EvaluateMethod(Protocol):\n \"\"\"Evaluation method.\"\"\"\n\n def __call__(\n self,\n __eval_points: np.ndarray, # noqa: WPS112\n extrapolation: Optional[ExtrapolationLike],\n aligned: bool,\n ) -> np.ndarray:\n \"\"\"Evaluate a function.\"\"\"\n pass\n\n\n@overload\ndef _evaluate_grid(\n axes: GridPointsLike,\n *,\n evaluate_method: EvaluateMethod,\n n_samples: int,\n dim_domain: int,\n dim_codomain: int,\n extrapolation: Optional[ExtrapolationLike] = None,\n aligned: Literal[True] = True,\n) -> np.ndarray:\n pass\n\n\n@overload\ndef _evaluate_grid(\n axes: Iterable[GridPointsLike],\n *,\n evaluate_method: EvaluateMethod,\n n_samples: int,\n dim_domain: int,\n dim_codomain: int,\n extrapolation: Optional[ExtrapolationLike] = None,\n aligned: Literal[False],\n) -> np.ndarray:\n pass\n\n\ndef _evaluate_grid( # noqa: WPS234\n axes: Union[GridPointsLike, Iterable[GridPointsLike]],\n *,\n evaluate_method: EvaluateMethod,\n n_samples: int,\n dim_domain: int,\n dim_codomain: int,\n extrapolation: Optional[ExtrapolationLike] = None,\n aligned: bool = True,\n) -> np.ndarray:\n \"\"\"\n Evaluate the functional object in the cartesian grid.\n\n This method is called internally by :meth:`evaluate` when the argument\n `grid` is True.\n\n Evaluates the functional object in the grid generated by the cartesian\n product of the axes. The length of the list of axes should be equal\n than the domain dimension of the object.\n\n If the list of axes has lengths :math:`n_1, n_2, ..., n_m`, where\n :math:`m` is equal than the dimension of the domain, the result of the\n evaluation in the grid will be a matrix with :math:`m+1` dimensions and\n shape :math:`n_{samples} x n_1 x n_2 x ... x n_m`.\n\n If `aligned` is false each sample is evaluated in a\n different grid, and the list of axes should contain a list of axes for\n each sample.\n\n If the domain dimension is 1, the result of the behaviour of the\n evaluation will be the same than :meth:`evaluate` without the grid\n option, but with worst performance.\n\n Args:\n axes: List of axes to generated the grid where the\n object will be evaluated.\n evaluate_method: Function used to evaluate the functional object.\n n_samples: Number of samples.\n dim_domain: Domain dimension.\n dim_codomain: Codomain dimension.\n extrapolation: Controls the\n extrapolation mode for elements outside the domain range. By\n default it is used the mode defined during the instance of the\n object.\n aligned: If False evaluates each sample\n in a different grid.\n evaluate_method: method to use to evaluate the points\n n_samples: number of samples\n dim_domain: dimension of the domain\n dim_codomain: dimensions of the codomain\n\n Returns:\n Numpy array with dim_domain + 1 dimensions with\n the result of the evaluation.\n\n Raises:\n ValueError: If there are a different number of axes than the domain\n dimension.\n\n \"\"\"\n # Compute intersection points and resulting shapes\n if aligned:\n\n axes = cast(GridPointsLike, axes)\n\n eval_points, shape = _one_grid_to_points(axes, dim_domain=dim_domain)\n\n else:\n\n axes_per_sample = cast(Iterable[GridPointsLike], axes)\n\n axes_per_sample = list(axes_per_sample)\n\n eval_points_tuple, shape_tuple = zip(\n *[\n _one_grid_to_points(a, dim_domain=dim_domain)\n for a in axes_per_sample\n ],\n )\n\n if len(eval_points_tuple) != n_samples:\n raise ValueError(\n \"Should be provided a list of axis per sample\",\n )\n\n eval_points = _to_array_maybe_ragged(eval_points_tuple)\n\n # Evaluate the points\n evaluated = evaluate_method(\n eval_points,\n extrapolation=extrapolation,\n aligned=aligned,\n )\n\n # Reshape the result\n if aligned:\n\n res = evaluated.reshape(\n [n_samples] + list(shape) + [dim_codomain],\n )\n\n else:\n\n res = _to_array_maybe_ragged([\n r.reshape(list(s) + [dim_codomain])\n for r, s in zip(evaluated, shape_tuple)\n ])\n\n return res\n\n\ndef nquad_vec(\n func: Callable[[np.ndarray], np.ndarray],\n ranges: Sequence[Tuple[float, float]],\n) -> np.ndarray:\n \"\"\"Perform multiple integration of vector valued functions.\"\"\"\n initial_depth = len(ranges) - 1\n\n def integrate(*args: Any, depth: int) -> np.ndarray: # noqa: WPS430\n\n if depth == 0:\n f = functools.partial(func, *args)\n else:\n f = functools.partial(integrate, *args, depth=depth - 1)\n\n return scipy.integrate.quad_vec(f, *ranges[initial_depth - depth])[0]\n\n return integrate(depth=initial_depth)\n\n\ndef _map_in_batches(\n function: Callable[..., np.ndarray],\n arguments: Tuple[Union[FData, np.ndarray], ...],\n indexes: Tuple[np.ndarray, ...],\n memory_per_batch: Optional[int] = None,\n **kwargs: Any,\n) -> np.ndarray:\n \"\"\"\n Map a function over samples of FData or ndarray tuples efficiently.\n\n This function prevents a large set of indexes to use all available\n memory and hang the PC.\n\n \"\"\"\n if memory_per_batch is None:\n # 256MB is not too big\n memory_per_batch = 256 * 1024 * 1024 # noqa: WPS432\n\n memory_per_element = sum(a.nbytes // len(a) for a in arguments)\n n_elements_per_batch_allowed = memory_per_batch // memory_per_element\n if n_elements_per_batch_allowed < 1:\n raise ValueError(\"Too few memory allowed for the operation\")\n\n n_indexes = len(indexes[0])\n\n assert all(n_indexes == len(i) for i in indexes)\n\n batches: List[np.ndarray] = []\n\n for pos in range(0, n_indexes, n_elements_per_batch_allowed):\n batch_args = tuple(\n a[i[pos:pos + n_elements_per_batch_allowed]]\n for a, i in zip(arguments, indexes)\n )\n\n batches.append(function(*batch_args, **kwargs))\n\n return np.concatenate(batches, axis=0)\n\n\ndef _pairwise_symmetric(\n function: Callable[..., np.ndarray],\n arg1: Union[FData, np.ndarray],\n arg2: Optional[Union[FData, np.ndarray]] = None,\n memory_per_batch: Optional[int] = None,\n **kwargs: Any,\n) -> np.ndarray:\n \"\"\"Compute pairwise a commutative function.\"\"\"\n dim1 = len(arg1)\n if arg2 is None or arg2 is arg1:\n indices = np.triu_indices(dim1)\n\n matrix = np.empty((dim1, dim1))\n\n triang_vec = _map_in_batches(\n function,\n (arg1, arg1),\n indices,\n memory_per_batch=memory_per_batch,\n **kwargs,\n )\n\n # Set upper matrix\n matrix[indices] = triang_vec\n\n # Set lower matrix\n matrix[(indices[1], indices[0])] = triang_vec\n\n return matrix\n\n dim2 = len(arg2)\n indices = np.indices((dim1, dim2))\n\n vec = _map_in_batches(\n function,\n (arg1, arg2),\n (indices[0].ravel(), indices[1].ravel()),\n memory_per_batch=memory_per_batch,\n **kwargs,\n )\n\n return vec.reshape((dim1, dim2))\n\n\ndef _int_to_real(array: np.ndarray) -> np.ndarray:\n \"\"\"Convert integer arrays to floating point.\"\"\"\n return array + 0.0\n\n\ndef _check_array_key(array: np.ndarray, key: Any) -> Any:\n \"\"\"Check a getitem key.\"\"\"\n key = check_array_indexer(array, key)\n if isinstance(key, tuple):\n non_ellipsis = [i for i in key if i is not Ellipsis]\n if len(non_ellipsis) > 1:\n raise KeyError(key)\n key = non_ellipsis[0]\n\n if isinstance(key, numbers.Integral): # To accept also numpy ints\n key = int(key)\n key = range(len(array))[key]\n\n return slice(key, key + 1)\n\n return key\n\n\ndef _check_estimator(estimator):\n from sklearn.utils.estimator_checks import (\n check_get_params_invariance,\n check_set_params,\n )\n\n name = estimator.__name__\n instance = estimator()\n check_get_params_invariance(name, instance)\n check_set_params(name, instance)\n\n\ndef _classifier_get_classes(y: ndarray) -> Tuple[ndarray, ndarray]:\n\n check_classification_targets(y)\n\n le = LabelEncoder()\n y_ind = le.fit_transform(y)\n\n classes = le.classes_\n\n if classes.size < 2:\n raise ValueError(\n f'The number of classes has to be greater than'\n f'one; got {classes.size} class',\n )\n return classes, y_ind\n\n\ndef _classifier_get_depth_methods(\n classes: ndarray,\n X: T,\n y_ind: ndarray,\n depth_methods: Sequence[Depth[T]],\n) -> Sequence[Depth[T]]:\n return [\n clone(depth_method).fit(X[y_ind == cur_class])\n for cur_class in range(classes.size)\n for depth_method in depth_methods\n ]\n\n\ndef _classifier_fit_depth_methods(\n X: T,\n y: ndarray,\n depth_methods: Sequence[Depth[T]],\n) -> Tuple[ndarray, Sequence[Depth[T]]]:\n classes, y_ind = _classifier_get_classes(y)\n\n class_depth_methods_ = _classifier_get_depth_methods(\n classes, X, y_ind, depth_methods,\n )\n\n return classes, class_depth_methods_\n\n\n_DependenceMeasure = Callable[[np.ndarray, np.ndarray], np.ndarray]\n\n\ndef _compute_dependence(\n X: np.ndarray,\n y: np.ndarray,\n *,\n dependence_measure: _DependenceMeasure,\n) -> np.ndarray:\n \"\"\"\n Compute dependence between points and target.\n\n Computes the dependence of each point in each trajectory in X with the\n corresponding class label in Y.\n\n \"\"\"\n from dcor import rowwise\n\n # Move n_samples to the end\n # The shape is now input_shape + n_samples + n_output\n X = np.moveaxis(X, 0, -2)\n\n input_shape = X.shape[:-2]\n\n # Join input in a list for rowwise\n X = X.reshape(-1, X.shape[-2], X.shape[-1])\n\n if y.ndim == 1:\n y = np.atleast_2d(y).T\n Y = np.array([y] * len(X))\n\n dependence_results = rowwise(dependence_measure, X, Y)\n\n return dependence_results.reshape(input_shape)\n"
] | [
[
"numpy.array_equal",
"numpy.meshgrid",
"numpy.asarray",
"numpy.triu_indices",
"sklearn.utils.estimator_checks.check_set_params",
"sklearn.utils.multiclass.check_classification_targets",
"numpy.indices",
"numpy.concatenate",
"numpy.atleast_2d",
"sklearn.base.clone",
"pandas.api.indexers.check_array_indexer",
"sklearn.utils.estimator_checks.check_get_params_invariance",
"numpy.array",
"sklearn.preprocessing.LabelEncoder",
"numpy.moveaxis",
"numpy.empty"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.0",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
tombackstrom/mdct | [
"f59e708f9a7f65ee672dbf44e6f164e79c82d83a"
] | [
"tests/test_windows.py"
] | [
"import pytest\nimport numpy\nimport mdct.windows\n\n\ndef test_kbd():\n M = 100\n w = mdct.windows.kaiser_derived(M, beta=4.)\n\n assert numpy.allclose(w[:M//2] ** 2 + w[-M//2:] ** 2, 1.)\n\n with pytest.raises(ValueError):\n mdct.windows.kaiser_derived(M + 1, beta=4.)\n\n assert numpy.allclose(\n mdct.windows.kaiser_derived(2, beta=numpy.pi/2)[:1],\n [numpy.sqrt(2)/2])\n\n assert numpy.allclose(\n mdct.windows.kaiser_derived(4, beta=numpy.pi/2)[:2],\n [0.518562710536, 0.855039598640])\n\n assert numpy.allclose(\n mdct.windows.kaiser_derived(6, beta=numpy.pi/2)[:3],\n [0.436168993154, 0.707106781187, 0.899864772847])\n"
] | [
[
"numpy.sqrt",
"numpy.allclose"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
dpetrini/nova | [
"00b7637901420f68c7d805c13ccd4c39d514efb1"
] | [
"trainer.py"
] | [
"from matplotlib.pyplot import show\nimport torch\nfrom torch.autograd import Variable\nfrom torch.cuda.amp import GradScaler, autocast\nimport numpy as np\nfrom sklearn.metrics import roc_auc_score\n\nfrom callbacks.cb_handler import CallbackHandler\nfrom callbacks.cb_base import BaseCB\nfrom callbacks.cb_lr_patch_clf import LR_SchedCB_patch\nfrom callbacks.cb_lr_full_clf import LR_SchedCB_full\nfrom callbacks.cb_lr_2views_clf import LR_SchedCB_2views\nfrom callbacks.cb_lr_w_cyc_cos import LR_SchedCB_W_Cyc_Cos\nfrom callbacks.cb_lr_w_cos import LR_SchedCB_W_Cos\nfrom callbacks.cb_auc import AUC_CB\n\n# from parallel import DataParallelModel, DataParallelCriterion\nfrom util.util import show_auc, calc_auc_desv\n\nparallel = False\n\n#APAGAR\nimport cv2\n\n# Accuracy\ndef acc(y_hat, labels):\n \"\"\" Default accuracy \"\"\"\n\n # para parallel\n if len(y_hat) > 1 and parallel:\n y_hat = torch.cat(y_hat)\n\n return (torch.argmax(y_hat, dim=1) == labels).float().sum()\n\n\nclass Trainer():\n \"\"\"\n Many possible configurations for Trainer\n config = {\n 'num_epochs': NUM_EPOCHS,\n 'batch_size': MINI_BATCH,\n 'name': 'example',\n 'title': 'Cats & Dogs Classifier',\n 'save_last': True, # optional: Save last model (default=False)\n 'save_best': True, # optional: Save best models (ACC, {AUC}) (default=True)\n 'stable_metric: N # optional: extend epochs number to wait N epochs with no metric change (ex.AUC)\n 'save_checkpoints': N, # Save checkpoint each N epochs\n 'features': ['auc'], # optional: features like auc stats or some scheduler (if none default:optim)\n 'save_path': folder, # if want to save artifacts in other place (eg.cloud)\n 'show_plots': False, # if want to show plots\n 'make_plots': False, # if want to disable plots\n 'cv_k': (number), # interactio number if using Cross Validation\n }\n \"\"\"\n\n def __init__(self, model, train_dataloader, val_dataloader,\n loss_criterion, optimizer, optimizer_args,\n device, config):\n self.model = model\n self.device = device\n self.loss_criterion = loss_criterion\n\n # parts of config are only retrieved in callbacks\n self.epochs = int(config['num_epochs']) if 'num_epochs' in config else 10\n self.mini_batch = int(config['batch_size']) if 'batch_size' in config else 1\n self.first_epoch = int(config['start_epoch']) if 'start_epoch' in config else 1\n self.stable_metric = int(config['stable_metric']) if 'stable_metric' in config else False\n self.name = config['name'] if 'name' in config else 'default'\n self.title = config['title'] if 'title' in config else 'Classifier'\n self.features = config['features'] if 'features' in config else []\n self.make_plots = config['make_plots'] if 'make_plots' in config else True\n\n if train_dataloader:\n self.train_dataloader = train_dataloader\n else:\n return\n\n self.train_dataloader = train_dataloader\n self.val_dataloader = val_dataloader\n self.optimizer = optimizer\n self.optimizer_args = optimizer_args\n\n print(self.title)\n\n # Load Callbacks for this session\n callbacks = [BaseCB(self.name, self.title, config)]\n for feat in self.features:\n if feat == 'auc':\n callbacks.append(AUC_CB(self.name, config))\n if feat == 'lr_step_full':\n callbacks.append(LR_SchedCB_full())\n if feat == 'lr_step_patch':\n callbacks.append(LR_SchedCB_patch())\n if feat == 'lr_step_2views':\n callbacks.append(LR_SchedCB_2views())\n if feat == 'lr_warmup_cos':\n callbacks.append(LR_SchedCB_W_Cos())\n if feat == 'lr_warmup_cyc_cos':\n callbacks.append(LR_SchedCB_W_Cyc_Cos())\n if feat == 'LR_SchedCB_W_Cos':\n callbacks.append(LR_SchedCB_W_Cos())\n self.cb = CallbackHandler(callbacks)\n\n\n def train_and_validate(self, **kwargs):\n \"\"\"\n Main train and validate function that runs main loop (fit).\n Receives all parameters and feed callback system.\n Loop through epochs and executes pytorch forward, loss,\n backpropagation and optimization (grads calc).\n Returns the model trained.\n \"\"\"\n\n calc_acc = kwargs.get('accuracy') if kwargs.get('accuracy') else acc\n input_dict = kwargs.get('input_dict') if kwargs.get('input_dict') else []\n\n if not self.cb.begin_train_val(self.epochs, self.model, self.train_dataloader,\n self.val_dataloader, self.mini_batch, self.optimizer):\n return\n\n self.cb.update_loss(self.loss_criterion, calc_acc)\n\n device = self.device\n\n for epoch in range(self.first_epoch, self.epochs+1):\n self.model.train()\n train_loss, train_acc = 0.0, 0.0\n val_loss, val_acc = 0.0, 0.0\n\n if not self.cb.begin_epoch(epoch): return # noqa: E701\n\n optim = self.cb.update_LR(epoch, self.model, self.optimizer, self.optimizer_args)\n if optim: self.optimizer = optim\n\n # Train loop\n for _, (inputs, labels) in enumerate(self.train_dataloader):\n\n if isinstance(inputs, dict):\n for key in input_dict:\n inputs[key] = inputs[key].to(device)\n else:\n inputs = Variable(inputs.to(device))\n\n labels = Variable(labels.to(device))\n\n # inserting MIXUP handling\n res = self.cb.begin_batch(inputs, labels)\n if res: inputs, labels, self.loss_criterion, calc_acc = res\n\n self.optimizer.zero_grad() # clean existing gradients\n outputs = self.model(inputs) # forward pass\n loss = self.loss_criterion(outputs, labels) # compute loss\n if parallel:\n loss = loss.mean() # list in this case\n loss.backward() # backprop the gradients\n self.optimizer.step() # update parameters\n train_loss += loss.item() * labels.size(0) # inputs.size(0) == mini_batch size\n train_acc += calc_acc(outputs, labels).item()\n\n self.cb.after_step(labels.size(0), labels, outputs)\n\n # validation - no gradient tracking needed\n with torch.no_grad():\n self.model.eval()\n self.cb.begin_val()\n\n # validation loop\n for _, (inputs, labels) in enumerate(self.val_dataloader):\n\n if isinstance(inputs, dict):\n for key in input_dict:\n inputs[key] = inputs[key].to(device)\n else:\n inputs = Variable(inputs.to(device))\n\n labels = Variable(labels.to(device))\n\n outputs = self.model(inputs) # forward pass\n loss = self.loss_criterion(outputs, labels) # compute loss\n if parallel:\n loss = loss.mean()\n val_loss += loss.item() * labels.size(0) # inputs.size(0) == mini_batch size\n val_acc += calc_acc(outputs, labels).item()\n\n self.cb.after_step_val(labels.size(0), labels, outputs)\n\n self.cb.after_epoch(self.model, train_acc, train_loss, val_acc, val_loss)\n\n self.cb.after_train_val()\n\n return self.model\n\n def train_and_validate_amp(self, **kwargs):\n \"\"\"\n Mixed precision (automatic) version for train_and_validate.\n Uses FP16 and FP32 in main loop with pytorch Automatic Mixed Precision.\n In simple tests: use 75% of memory in 66% of time. Less memory and faster.\n Sometimes it just don't work and get worse, like for resnest...\n \"\"\"\n\n assert torch.__version__ >= '1.6.0', \"[Mixed precision] Please use PyTorch 1.6.0+\"\n\n print('Using AMP')\n\n calc_acc = kwargs.get('accuracy') if kwargs.get('accuracy') else acc\n input_dict = kwargs.get('input_dict') if kwargs.get('input_dict') else []\n\n if not self.cb.begin_train_val(self.epochs, self.model, self.train_dataloader,\n self.val_dataloader, self.mini_batch, self.optimizer):\n return\n\n # Creates a GradScaler once at the beginning of training.\n scaler = GradScaler()\n\n device = self.device\n\n # for epoch in range(self.first_epoch, self.epochs+1):\n epoch = self.first_epoch # suport for \"wait N epochs after best metric\"\n last_epoch = self.epochs\n while epoch <= last_epoch:\n self.model.train()\n train_loss, train_acc = 0.0, 0.0\n val_loss, val_acc = 0.0, 0.0\n\n if not self.cb.begin_epoch(epoch): return # noqa: E701\n\n optim = self.cb.update_LR(epoch, self.model, self.optimizer, self.optimizer_args)\n if optim: self.optimizer = optim\n\n # Train loop\n for _, (inputs, labels) in enumerate(self.train_dataloader):\n\n if isinstance(inputs, dict):\n for key in input_dict:\n inputs[key] = inputs[key].to(device)\n else:\n inputs = Variable(inputs.to(device))\n\n labels = Variable(labels.to(device))\n\n self.optimizer.zero_grad() # clean existing gradients\n # Runs the forward pass with autocasting.\n with autocast():\n outputs = self.model(inputs) # forward pass\n loss = self.loss_criterion(outputs, labels) # compute loss\n if parallel:\n loss = loss.mean() # list in this case\n scaler.scale(loss).backward() # backward() on scaled loss for scaled gradients. \n scaler.step(self.optimizer) # update parameters\n scaler.update() # Updates the scale for next iteration.\n\n train_loss += loss.item() * labels.size(0) # == mini_batch size\n train_acc += calc_acc(outputs, labels).item()\n\n self.cb.after_step(labels.size(0), labels, outputs)\n\n # validation - no gradient tracking needed\n with torch.no_grad():\n self.model.eval()\n\n # validation loop\n for _, (inputs, labels) in enumerate(self.val_dataloader):\n\n if isinstance(inputs, dict):\n for key in input_dict:\n inputs[key] = inputs[key].to(device)\n else:\n inputs = Variable(inputs.to(device))\n\n labels = Variable(labels.to(device))\n\n outputs = self.model(inputs) # forward pass\n loss = self.loss_criterion(outputs, labels) # compute loss\n if parallel:\n loss = loss.mean()\n val_loss += loss.item() * labels.size(0) # == mini_batch size\n val_acc += calc_acc(outputs, labels).item()\n\n self.cb.after_step_val(labels.size(0), labels, outputs)\n\n self.cb.after_epoch(self.model, train_acc, train_loss, val_acc, val_loss)\n\n epoch += 1\n # print('-', self.cb.best_metric_epoch[self.cb.metric_name[-1]], last_epoch)\n # Is use stable metric - will stop training earlier, after \n # stable_metric epochs without validation metric (to be selected) improve\n # last_epoch = self.epochs if not self.stable_metric else max(self.epochs, self.cb.best_metric_epoch[self.cb.metric_name[-1]] + self.stable_metric)\n # for metric in self.cb.metric_name:\n # print(metric)\n last_epoch = self.epochs if not self.stable_metric else min(self.epochs, self.cb.best_metric_epoch[self.cb.metric_name[-1]] + self.stable_metric)\n\n self.cb.after_train_val()\n\n values = [self.cb.best_metric, self.cb.best_metric_epoch, self.cb.elapsed_mins, \n self.cb.metric_name, self.cb.loss_plot, self.cb.metric_plot, \n self.cb.best_model_file]\n\n return values\n\n\n def run_test(self, test_dataloader, model_type, **kwargs):\n \"\"\" Run test from test_dataloader according to model_type.\n if model_type = 'normal' : use last saved model\n if model_type = 'best' : use best model\n Uses: loss function from Trainer\n Input: test_dataloader\n \"\"\"\n calc_acc = kwargs.get('accuracy') if kwargs.get('accuracy') else acc\n quiet = kwargs.get('quiet') if kwargs.get('quiet') else False\n\n if model_type == 'normal':\n model = self.cb.last_model\n elif model_type == 'best':\n model = self.cb.best_model\n elif model_type == 'bootstrap':\n model = self.model\n\n test_acc, test_loss = 0., 0.\n batch_val_counter = 0\n device = self.device\n\n with torch.no_grad():\n model.eval()\n\n # validation loop\n for _, (inputs, labels) in enumerate(test_dataloader):\n\n if isinstance(inputs, dict):\n for key in ['CC', 'MLO']:\n inputs[key] = inputs[key].to(device)\n labels = Variable(labels.to(device))\n else:\n inputs = Variable(inputs.to(device))\n labels = Variable(labels.to(device))\n\n outputs = model(inputs) # forward pass\n loss = self.loss_criterion(outputs, labels) # compute loss\n if parallel:\n loss = loss.mean()\n test_loss += loss.item() * labels.size(0)\n test_acc += calc_acc(outputs, labels).item()\n\n batch_val_counter += labels.size(0)\n\n # Find average test loss and test accuracy\n avg_test_loss = test_loss/batch_val_counter\n avg_test_acc = test_acc/batch_val_counter\n\n if not quiet:\n print(f'Model: {model_type} - Test accuracy : {avg_test_acc:.5f}' +\n f' Test loss : {avg_test_loss:.5f}')\n\n return avg_test_acc \n\n\n def run_test_auc(self, test_dataloader, model_type, **kwargs):\n \"\"\" Run test from test_dataloader, calculating AUC and ROC curve\n According to model_type:\n if model_type = 'normal' : use last saved model\n if model_type = 'best' : use best model\n If we are running test iunference only can pass model through kwargs.\n Uses: loss function from Trainer\n Input: test_dataloader\n \"\"\"\n calc_acc = kwargs.get('accuracy') if kwargs.get('accuracy') else acc\n model = kwargs.get('model') if kwargs.get('model') else None\n show_results = kwargs.get('show_results') if kwargs.get('show_results') else False\n m_positive = kwargs.get('m') if kwargs.get('m') else False\n n_negative = kwargs.get('n') if kwargs.get('n') else False\n\n if model is None:\n if model_type == 'normal':\n model = self.cb.last_model\n elif model_type == 'best':\n model = self.cb.best_model\n elif model_type == 'test':\n model = self.model\n elif model_type == 'bootstrap':\n model = self.model\n\n test_acc, test_loss = 0., 0.\n batch_val_counter = 0\n y_hat_auc, label_auc = [], []\n device = self.device\n\n with torch.no_grad():\n model.eval()\n\n # validation loop\n for _, (inputs, labels) in enumerate(test_dataloader):\n if isinstance(inputs, dict):\n for key in ['CC', 'MLO']:\n inputs[key] = inputs[key].to(device)\n labels = Variable(labels.to(device))\n else:\n inputs = Variable(inputs.to(device))\n labels = Variable(labels.to(device))\n outputs = model(inputs) # forward pass\n loss = self.loss_criterion(outputs, labels) # compute loss\n test_loss += loss.item() * labels.size(0)\n\n # calculate acc\n test_acc += calc_acc(outputs, labels).item()\n batch_val_counter += labels.size(0)\n\n # Store auc for malignant\n label_auc = np.append(label_auc, labels.cpu().detach().numpy())\n y_hat_auc = np.append(y_hat_auc, torch.softmax(outputs, dim=1)[:, 1].cpu().detach().numpy())\n\n # enter show result mode\n if self.mini_batch == 1 and show_results:\n print(f'{labels.item()} {torch.softmax(outputs, dim=1)[:, 1].item():.3f}')\n\n # Find average test loss and test accuracy\n avg_test_loss = test_loss/batch_val_counter\n avg_test_acc = test_acc/batch_val_counter\n\n print(f\"Model: {model_type} - Test accuracy : {avg_test_acc:.3f}\" +\n f\" Test loss : {avg_test_loss:.4f}\", end='')\n\n # calculate AUC TEST\n auc_mal_val = roc_auc_score(label_auc.ravel(), y_hat_auc.ravel())\n # print(f' AUC Malignant: {auc_mal_val:.4f}', end='')\n if m_positive and n_negative:\n auc_final = f'{auc_mal_val:.4f}±{calc_auc_desv(m_positive, n_negative, auc_mal_val):.4f}'\n # print(f'±{calc_auc_desv(m_positive, n_negative, auc_mal_val):.4f}')\n print(f' AUC Malignant: {auc_final}')\n else:\n auc_final = f'{auc_mal_val:.4f}'\n print(f' AUC Malignant: {auc_final}')\n # print()\n\n if self.make_plots:\n show_auc(label_auc, y_hat_auc, self.title, show_plt=False)\n \n # return auc_mal_val\n return auc_final\n\n\n # Not fully tested yet (2021-05)\n # it seems to be working - maybe integrate in single function as above\n # and use kwargs to indicate that it is test-data- aug?\n def run_test_data_aug_auc(self, test_dataloader, model_type, **kwargs):\n \"\"\" Run test from test_dataloader, calculating AUC and ROC curve\n --> Using test-data augmentation: rotation 0°, 90°, 180°, 270°\n --> All rotated sample will be infered and AUC will consider all.\n According to model_type:\n if model_type = 'normal' : use last saved model\n if model_type = 'best' : use best model\n If we are running test iunference only can pass model through kwargs.\n Uses: loss function from Trainer\n Input: test_dataloader\n \"\"\"\n calc_acc = kwargs.get('accuracy') if kwargs.get('accuracy') else acc\n model = kwargs.get('model') if kwargs.get('model') else None\n\n if model is None:\n if model_type == 'normal':\n model = self.cb.last_model\n elif model_type == 'best':\n model = self.cb.best_model\n elif model_type == 'test':\n model = self.model\n\n test_acc, test_loss = 0., 0.\n batch_val_counter = 0\n y_hat_auc, label_auc = [], []\n device = self.device\n\n with torch.no_grad():\n model.eval()\n\n # validation loop\n for _, (inputs, labels) in enumerate(test_dataloader):\n for rot in range(0,4):\n \n # print(rot, inputs.shape)\n inputs = torch.rot90(inputs, rot, [2, 3])\n # inputs = Variable(inputs.to(device))\n # labels = Variable(labels.to(device))\n # print(counter, rot, inputs.shape)\n\n inputs = Variable(inputs.to(device))\n labels = Variable(labels.to(device))\n\n # img = inputs.cpu().detach().numpy()\n # img = img.transpose(0,2,3,1)\n # print(img[0, :, :, 0:3].shape)\n # cv2.imwrite('thrash/test-aug_'+str(rot)+'.png', img[0, :, :, 0:3]*65535)\n\n outputs = model(inputs) # forward pass\n loss = self.loss_criterion(outputs, labels) # compute loss\n test_loss += loss.item() * labels.size(0)\n\n # calculate acc\n test_acc += calc_acc(outputs, labels).item()\n batch_val_counter += labels.size(0)\n\n # Store auc for malignant\n label_auc = np.append(label_auc, labels.cpu().detach().numpy())\n y_hat_auc = np.append(y_hat_auc, torch.softmax(outputs, dim=1)[:, 1].cpu().detach().numpy())\n\n # enter show result mode\n if self.mini_batch == 1:\n print(f'{labels.item()} {torch.softmax(outputs, dim=1)[:, 1].item():.3f}')\n\n print('batch_val_counter ', batch_val_counter)\n\n # Find average test loss and test accuracy\n avg_test_loss = test_loss/batch_val_counter\n avg_test_acc = test_acc/batch_val_counter\n\n print(f\"Model: {model_type} - Test accuracy : {avg_test_acc:.3f}\" +\n f\" Test loss : {avg_test_loss:.4f}\", end='')\n\n # calculate AUC TEST\n auc_mal_val = roc_auc_score(label_auc.ravel(), y_hat_auc.ravel())\n print(f' AUC Malignant: {auc_mal_val:.4f}')\n\n if self.make_plots:\n show_auc(label_auc, y_hat_auc, self.title, show_plt=False)\n \n return auc_mal_val\n"
] | [
[
"torch.softmax",
"torch.cat",
"torch.rot90",
"torch.cuda.amp.autocast",
"torch.cuda.amp.GradScaler",
"torch.no_grad",
"torch.argmax"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
QDaria/pennylane | [
"5a28983fc7bd950cde8a4014e54261fef4b54293",
"5a28983fc7bd950cde8a4014e54261fef4b54293",
"5a28983fc7bd950cde8a4014e54261fef4b54293",
"5a28983fc7bd950cde8a4014e54261fef4b54293"
] | [
"tests/templates/test_subroutines/test_qmc.py",
"pennylane/ops/qubit/arithmetic_ops.py",
"tests/transforms/test_adjoint.py",
"tests/interfaces/test_batch_jax.py"
] | [
"# Copyright 2018-2021 Xanadu Quantum Technologies Inc.\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n\n# http://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport numpy as np\nimport pytest\nfrom scipy.stats import norm\n\nimport pennylane as qml\nfrom pennylane.templates.subroutines.qmc import (\n QuantumMonteCarlo,\n _make_V,\n _make_Z,\n func_to_unitary,\n make_Q,\n probs_to_unitary,\n)\nfrom pennylane.wires import Wires\n\n\nclass TestProbsToUnitary:\n \"\"\"Tests for the probs_to_unitary function\"\"\"\n\n def test_invalid_distribution_sum_to_not_one(self):\n \"\"\"Test if a ValueError is raised when a distribution that does not sum to one is input\"\"\"\n p = np.ones(4)\n with pytest.raises(ValueError, match=\"A valid probability distribution of non-negative\"):\n probs_to_unitary(p)\n\n def test_invalid_distribution_negative(self):\n \"\"\"Test if a ValueError is raised when a distribution with a negative value is input\"\"\"\n p = [2, 0, 0, -1]\n with pytest.raises(ValueError, match=\"A valid probability distribution of non-negative\"):\n probs_to_unitary(p)\n\n ps = [\n [0.46085261032920616, 0.5391473896707938],\n [0.2111821738452515, 0.4235979103670337, 0.36521991578771484],\n [0.3167916924190049, 0.2651843704361695, 0.1871934980886578, 0.23083043905616774],\n [0.8123242419241959, 0.07990911578859018, 0.07983919018902215, 0.027927452098191852],\n ]\n\n @pytest.mark.parametrize(\"p\", ps)\n def test_fixed_examples(self, p):\n \"\"\"Test if the correct unitary is returned for fixed input examples. A correct unitary has\n its first column equal to the square root of the distribution and satisfies\n U @ U.T = U.T @ U = I.\"\"\"\n unitary = probs_to_unitary(p)\n assert np.allclose(np.sqrt(p), unitary[:, 0])\n assert np.allclose(unitary @ unitary.T, np.eye(len(unitary)))\n assert np.allclose(unitary.T @ unitary, np.eye(len(unitary)))\n\n\nclass TestFuncToUnitary:\n \"\"\"Tests for the func_to_unitary function\"\"\"\n\n def test_not_bounded_func(self):\n \"\"\"Test if a ValueError is raised if a function that evaluates outside of the [0, 1]\n interval is provided\"\"\"\n func = lambda i: np.sin(i)\n\n with pytest.raises(ValueError, match=\"func must be bounded within the interval\"):\n func_to_unitary(func, 8)\n\n def test_example(self):\n \"\"\"Test for a fixed example if the returned unitary maps input states to the\n expected output state as well as if the unitary satisfies U @ U.T = U.T @ U = I.\"\"\"\n M = 8\n func = lambda i: np.sin(i) ** 2\n\n r = func_to_unitary(func, M)\n\n for i in range(M):\n # The control qubit is the last qubit, so we have to look at every other term\n # using [::2].\n output_state = r[::2][i]\n output_0 = output_state[::2]\n output_1 = output_state[1::2]\n assert np.allclose(output_0[i], np.sqrt(1 - func(i)))\n assert np.allclose(output_1[i], np.sqrt(func(i)))\n\n assert np.allclose(r @ r.T, np.eye(2 * M))\n assert np.allclose(r.T @ r, np.eye(2 * M))\n\n def test_example_with_pl(self):\n \"\"\"Test for a fixed example if the returned unitary behaves as expected\n when used within a PennyLane circuit, i.e., so that the probability of the final control\n wire encodes the function.\"\"\"\n wires = 3\n M = 2**wires\n func = lambda i: np.sin(i) ** 2\n\n r = func_to_unitary(func, M)\n\n dev = qml.device(\"default.qubit\", wires=(wires + 1))\n\n @qml.qnode(dev)\n def apply_r(input_state):\n qml.QubitStateVector(input_state, wires=range(wires))\n qml.QubitUnitary(r, wires=range(wires + 1))\n return qml.probs(wires)\n\n for i, state in enumerate(np.eye(M)):\n p = apply_r(state)[1]\n assert np.allclose(p, func(i))\n\n\ndef test_V():\n \"\"\"Test for the _make_V function\"\"\"\n dim = 4\n\n V_expected = -np.eye(dim)\n V_expected[1, 1] = V_expected[3, 3] = 1\n V = _make_V(dim)\n\n assert np.allclose(V, V_expected)\n\n\ndef test_Z():\n \"\"\"Test for the _make_Z function\"\"\"\n dim = 4\n\n Z_expected = -np.eye(dim)\n Z_expected[0, 0] = 1\n Z = _make_Z(dim)\n\n assert np.allclose(Z, Z_expected)\n\n\ndef test_Q():\n \"\"\"Test for the make_Q function using a fixed example\"\"\"\n\n A = np.array(\n [\n [0.85358423 - 0.32239299j, -0.12753659 + 0.38883306j],\n [0.39148136 - 0.11915985j, 0.34064316 - 0.84646648j],\n ]\n )\n R = np.array(\n [\n [\n 0.45885289 + 0.03972856j,\n 0.2798685 - 0.05981098j,\n 0.64514642 - 0.51555038j,\n 0.11015177 - 0.10877695j,\n ],\n [\n 0.19407005 - 0.35483005j,\n 0.29756077 + 0.80153453j,\n -0.19147104 + 0.0507968j,\n 0.15553799 - 0.20493631j,\n ],\n [\n 0.35083011 - 0.20807392j,\n -0.27602911 - 0.13934692j,\n 0.11874165 + 0.34532609j,\n -0.45945242 - 0.62734969j,\n ],\n [\n -0.11379919 - 0.66706921j,\n -0.21120956 - 0.2165113j,\n 0.30133006 + 0.23367271j,\n 0.54593491 + 0.08446372j,\n ],\n ]\n )\n\n Q_expected = np.array(\n [\n [\n -0.46513201 - 1.38777878e-17j,\n -0.13035515 - 2.23341802e-01j,\n -0.74047856 + 7.08652160e-02j,\n -0.0990036 - 3.91977176e-01j,\n ],\n [\n 0.13035515 - 2.23341802e-01j,\n 0.46494302 + 0.00000000e00j,\n 0.05507901 - 1.19182067e-01j,\n -0.80370146 - 2.31904873e-01j,\n ],\n [\n -0.74047856 - 7.08652160e-02j,\n -0.05507901 - 1.19182067e-01j,\n 0.62233412 - 2.77555756e-17j,\n -0.0310774 - 2.02894077e-01j,\n ],\n [\n 0.0990036 - 3.91977176e-01j,\n -0.80370146 + 2.31904873e-01j,\n 0.0310774 - 2.02894077e-01j,\n -0.30774091 + 2.77555756e-17j,\n ],\n ]\n )\n\n Q = make_Q(A, R)\n\n assert np.allclose(Q, Q_expected)\n\n\nclass TestQuantumMonteCarlo:\n \"\"\"Tests for the QuantumMonteCarlo template\"\"\"\n\n @staticmethod\n def func(i):\n return np.sin(i) ** 2\n\n def test_non_flat(self):\n \"\"\"Test if a ValueError is raised when a non-flat array is input\"\"\"\n p = np.ones((4, 1)) / 4\n with pytest.raises(ValueError, match=\"The probability distribution must be specified as a\"):\n QuantumMonteCarlo(p, self.func, range(3), range(3, 5))\n\n def test_wrong_size_p(self):\n \"\"\"Test if a ValueError is raised when a probability distribution is passed whose length\n cannot be mapped to qubits\"\"\"\n p = np.ones(5) / 5\n with pytest.raises(ValueError, match=\"The probability distribution must have a length\"):\n QuantumMonteCarlo(p, self.func, range(3), range(3, 5))\n\n def test_unexpected_target_wires_number(self):\n \"\"\"Test if a ValueError is raised when the number of target wires is incompatible with the\n expected number of target wires inferred from the length of the input probability\n distribution\"\"\"\n p = np.ones(4) / 4\n with pytest.raises(\n ValueError,\n match=\"The probability distribution of dimension 4 requires\" \" 3 target wires\",\n ):\n QuantumMonteCarlo(p, self.func, range(4), range(4, 6))\n\n def test_expected_circuit(self):\n \"\"\"Test if the circuit applied when using the QMC template is the same as the expected\n circuit for a fixed example\"\"\"\n p = np.ones(4) / 4\n target_wires, estimation_wires = Wires(range(3)), Wires(range(3, 5))\n\n op = QuantumMonteCarlo(p, self.func, target_wires, estimation_wires)\n tape = op.expand()\n\n # Do expansion in two steps to avoid also decomposing the first QubitUnitary\n queue_before_qpe = tape.operations[:2]\n\n # 2-qubit decomposition has 10 operations, and after is a 3-qubit gate so start at 11\n queue_after_qpe = tape.expand().operations[11:]\n\n A = probs_to_unitary(p)\n R = func_to_unitary(self.func, 4)\n\n assert len(queue_before_qpe) == 2\n assert queue_before_qpe[0].name == \"QubitUnitary\"\n assert queue_before_qpe[1].name == \"QubitUnitary\"\n assert np.allclose(queue_before_qpe[0].matrix, A)\n assert np.allclose(queue_before_qpe[1].matrix, R)\n assert queue_before_qpe[0].wires == target_wires[:-1]\n assert queue_before_qpe[1].wires == target_wires\n\n Q = make_Q(A, R)\n\n with qml.tape.QuantumTape() as qpe_tape:\n qml.QuantumPhaseEstimation(Q, target_wires, estimation_wires)\n\n qpe_tape = qpe_tape.expand()\n\n assert len(queue_after_qpe) == len(qpe_tape.operations)\n assert all(o1.name == o2.name for o1, o2 in zip(queue_after_qpe, qpe_tape.operations))\n assert all(\n np.allclose(o1.matrix, o2.matrix)\n for o1, o2 in zip(queue_after_qpe, qpe_tape.operations)\n )\n assert all(o1.wires == o2.wires for o1, o2 in zip(queue_after_qpe, qpe_tape.operations))\n\n def test_expected_value(self):\n \"\"\"Test that the QuantumMonteCarlo template can correctly estimate the expectation value\n following the example in the usage details\"\"\"\n m = 5\n M = 2**m\n\n xmax = np.pi\n xs = np.linspace(-xmax, xmax, M)\n\n probs = np.array([norm().pdf(x) for x in xs])\n probs /= np.sum(probs)\n\n func = lambda i: np.cos(xs[i]) ** 2\n\n estimates = []\n\n for n in range(4, 11):\n N = 2**n\n\n target_wires = range(m + 1)\n estimation_wires = range(m + 1, n + m + 1)\n\n dev = qml.device(\"default.qubit\", wires=(n + m + 1))\n\n @qml.qnode(dev)\n def circuit():\n qml.QuantumMonteCarlo(\n probs, func, target_wires=target_wires, estimation_wires=estimation_wires\n )\n return qml.probs(estimation_wires)\n\n phase_estimated = np.argmax(circuit()[: int(N / 2)]) / N\n mu_estimated = (1 - np.cos(np.pi * phase_estimated)) / 2\n estimates.append(mu_estimated)\n\n exact = 0.432332358381693654\n\n # Check that the error is monotonically decreasing\n for i in range(len(estimates) - 1):\n err1 = np.abs(estimates[i] - exact)\n err2 = np.abs(estimates[i + 1] - exact)\n assert err1 >= err2\n\n assert np.allclose(estimates[-1], exact, rtol=1e-3)\n\n def test_expected_value_custom_wires(self):\n \"\"\"Test that the QuantumMonteCarlo template can correctly estimate the expectation value\n following the example in the usage details when the wires have custom labels\"\"\"\n m = 5\n M = 2**m\n\n xmax = np.pi\n xs = np.linspace(-xmax, xmax, M)\n\n probs = np.array([norm().pdf(x) for x in xs])\n probs /= np.sum(probs)\n\n func = lambda i: np.cos(xs[i]) ** 2\n\n n = 10\n N = 2**n\n\n target_wires = [0, \"a\", -1.1, -10, \"bbb\", 1000]\n estimation_wires = [\"bob\", -3, 42, \"penny\", \"lane\", 247, \"straw\", \"berry\", 5.5, 6.6]\n\n dev = qml.device(\"default.qubit\", wires=target_wires + estimation_wires)\n\n @qml.qnode(dev)\n def circuit():\n qml.QuantumMonteCarlo(\n probs, func, target_wires=target_wires, estimation_wires=estimation_wires\n )\n return qml.probs(estimation_wires)\n\n phase_estimated = np.argmax(circuit()[: int(N / 2)]) / N\n mu_estimated = (1 - np.cos(np.pi * phase_estimated)) / 2\n\n exact = 0.432332358381693654\n assert np.allclose(mu_estimated, exact, rtol=1e-3)\n\n def test_id(self):\n \"\"\"Tests that the id attribute can be set.\"\"\"\n xs = np.linspace(-np.pi, np.pi, 2**5)\n probs = np.array([norm().pdf(x) for x in xs])\n probs /= np.sum(probs)\n func = lambda i: np.cos(xs[i]) ** 2\n\n target_wires = [0, \"a\", -1.1, -10, \"bbb\", 1000]\n estimation_wires = [\"bob\", -3, 42, \"penny\", \"lane\", 247, \"straw\", \"berry\", 5.5, 6.6]\n\n template = qml.QuantumMonteCarlo(\n probs, func, target_wires=target_wires, estimation_wires=estimation_wires, id=\"a\"\n )\n\n assert template.id == \"a\"\n",
"# Copyright 2018-2021 Xanadu Quantum Technologies Inc.\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n\n# http://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\nThis submodule contains the discrete-variable quantum operations that perform\narithmetic operations on their input states.\n\"\"\"\n# pylint:disable=abstract-method,arguments-differ,protected-access\nimport numpy as np\n\nimport pennylane as qml\nfrom pennylane.operation import Operation\n\n\nclass QubitCarry(Operation):\n r\"\"\"QubitCarry(wires)\n Apply the ``QubitCarry`` operation to four input wires.\n\n This operation performs the transformation:\n\n .. math::\n |a\\rangle |b\\rangle |c\\rangle |d\\rangle \\rightarrow |a\\rangle |b\\rangle |b\\oplus c\\rangle |bc \\oplus d\\oplus (b\\oplus c)a\\rangle\n\n .. figure:: ../../_static/ops/QubitCarry.svg\n :align: center\n :width: 60%\n :target: javascript:void(0);\n\n See `here <https://arxiv.org/abs/quant-ph/0008033v1>`__ for more information.\n\n .. note::\n The first wire should be used to input a carry bit from previous operations. The final wire\n holds the carry bit of this operation and the input state on this wire should be\n :math:`|0\\rangle`.\n\n **Details:**\n\n * Number of wires: 4\n * Number of parameters: 0\n\n Args:\n wires (Sequence[int]): the wires the operation acts on\n\n **Example**\n\n The ``QubitCarry`` operation maps the state :math:`|0110\\rangle` to :math:`|0101\\rangle`, where\n the last qubit denotes the carry value:\n\n .. code-block::\n\n input_bitstring = (0, 1, 1, 0)\n\n @qml.qnode(dev)\n def circuit(basis_state):\n qml.BasisState(basis_state, wires=[0, 1, 2, 3])\n qml.QubitCarry(wires=[0, 1, 2, 3])\n return qml.probs(wires=[0, 1, 2, 3])\n\n probs = circuit(input_bitstring)\n probs_indx = np.argwhere(probs == 1).flatten()[0]\n bitstrings = list(itertools.product(range(2), repeat=4))\n output_bitstring = bitstrings[probs_indx]\n\n The output bitstring is\n\n >>> output_bitstring\n (0, 1, 0, 1)\n\n The action of ``QubitCarry`` is to add wires ``1`` and ``2``. The modulo-two result is output\n in wire ``2`` with a carry value output in wire ``3``. In this case, :math:`1 \\oplus 1 = 0` with\n a carry, so we have:\n\n >>> bc_sum = output_bitstring[2]\n >>> bc_sum\n 0\n >>> carry = output_bitstring[3]\n >>> carry\n 1\n \"\"\"\n num_wires = 4\n num_params = 0\n _mat = np.array(\n [\n [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0],\n ]\n )\n\n @classmethod\n def _matrix(cls, *params):\n return QubitCarry._mat\n\n @staticmethod\n def decomposition(wires):\n decomp_ops = [\n qml.Toffoli(wires=wires[1:]),\n qml.CNOT(wires=[wires[1], wires[2]]),\n qml.Toffoli(wires=[wires[0], wires[2], wires[3]]),\n ]\n return decomp_ops\n\n\nclass QubitSum(Operation):\n r\"\"\"QubitSum(wires)\n Apply a ``QubitSum`` operation on three input wires.\n\n This operation performs the transformation:\n\n .. math::\n |a\\rangle |b\\rangle |c\\rangle \\rightarrow |a\\rangle |b\\rangle |a\\oplus b\\oplus c\\rangle\n\n\n .. figure:: ../../_static/ops/QubitSum.svg\n :align: center\n :width: 40%\n :target: javascript:void(0);\n\n See `here <https://arxiv.org/abs/quant-ph/0008033v1>`__ for more information.\n\n **Details:**\n\n * Number of wires: 3\n * Number of parameters: 0\n\n Args:\n wires (Sequence[int]): the wires the operation acts on\n\n **Example**\n\n The ``QubitSum`` operation maps the state :math:`|010\\rangle` to :math:`|011\\rangle`, with the\n final wire holding the modulo-two sum of the first two wires:\n\n .. code-block::\n\n input_bitstring = (0, 1, 0)\n\n @qml.qnode(dev)\n def circuit(basis_state):\n qml.BasisState(basis_state, wires = [0, 1, 2])\n qml.QubitSum(wires=[0, 1, 2])\n return qml.probs(wires=[0, 1, 2])\n\n probs = circuit(input_bitstring)\n probs_indx = np.argwhere(probs == 1).flatten()[0]\n bitstrings = list(itertools.product(range(2), repeat=3))\n output_bitstring = bitstrings[probs_indx]\n\n The output bitstring is\n\n >>> output_bitstring\n (0, 1, 1)\n\n The action of ``QubitSum`` is to add wires ``0``, ``1``, and ``2``. The modulo-two result is\n output in wire ``2``. In this case, :math:`0 \\oplus 1 \\oplus 0 = 1`, so we have:\n\n >>> abc_sum = output_bitstring[2]\n >>> abc_sum\n 1\n \"\"\"\n num_wires = 3\n num_params = 0\n _mat = np.array(\n [\n [1, 0, 0, 0, 0, 0, 0, 0],\n [0, 1, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 1, 0, 0, 0, 0],\n [0, 0, 1, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 1, 0, 0],\n [0, 0, 0, 0, 1, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 1, 0],\n [0, 0, 0, 0, 0, 0, 0, 1],\n ]\n )\n\n def label(self, decimals=None, base_label=None):\n return super().label(decimals=decimals, base_label=base_label or \"Σ\")\n\n @classmethod\n def _matrix(cls, *params):\n return QubitSum._mat\n\n @staticmethod\n def decomposition(wires):\n decomp_ops = [\n qml.CNOT(wires=[wires[1], wires[2]]),\n qml.CNOT(wires=[wires[0], wires[2]]),\n ]\n return decomp_ops\n\n def adjoint(self):\n return QubitSum(wires=self.wires)\n",
"# Copyright 2018-2021 Xanadu Quantum Technologies Inc.\r\n\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\nimport pytest\r\nimport numpy as np\r\n\r\nimport pennylane as qml\r\nfrom pennylane.transforms.adjoint import adjoint\r\n\r\n\r\ndef test_adjoint_on_function():\r\n \"\"\"Test that adjoint works when applied to a function\"\"\"\r\n\r\n dev = qml.device(\"default.qubit\", wires=1)\r\n\r\n def my_op():\r\n qml.RX(0.123, wires=0)\r\n qml.RY(2.32, wires=0)\r\n qml.RZ(1.95, wires=0)\r\n\r\n @qml.qnode(dev)\r\n def my_circuit():\r\n qml.PauliX(wires=0)\r\n qml.PauliZ(wires=0)\r\n my_op()\r\n adjoint(my_op)()\r\n qml.PauliY(wires=0)\r\n return qml.state()\r\n\r\n np.testing.assert_allclose(my_circuit(), np.array([1.0j, 0.0]), atol=1e-6, rtol=1e-6)\r\n\r\n\r\ndef test_adjoint_directly_on_op():\r\n \"\"\"Test that adjoint works when directly applyed to an op\"\"\"\r\n\r\n dev = qml.device(\"default.qubit\", wires=1)\r\n\r\n @qml.qnode(dev)\r\n def my_circuit():\r\n adjoint(qml.RX)(np.pi / 4.0, wires=0)\r\n return qml.state()\r\n\r\n np.testing.assert_allclose(my_circuit(), np.array([0.92388, 0.382683j]), atol=1e-6, rtol=1e-6)\r\n\r\n\r\ndef test_barrier_adjoint():\r\n \"\"\"Check that the adjoint for the Barrier is working\"\"\"\r\n dev = qml.device(\"default.qubit\", wires=1)\r\n\r\n @qml.qnode(dev)\r\n def my_circuit():\r\n adjoint(qml.Barrier)(wires=0)\r\n return qml.state()\r\n\r\n assert my_circuit()[0] == 1.0\r\n\r\n\r\ndef test_wirecut_adjoint():\r\n \"\"\"Check that the adjoint for the WireCut is working\"\"\"\r\n dev = qml.device(\"default.qubit\", wires=1)\r\n\r\n @qml.qnode(dev)\r\n def my_circuit():\r\n adjoint(qml.WireCut)(wires=0)\r\n return qml.state()\r\n\r\n assert np.isclose(my_circuit()[0], 1.0)\r\n\r\n\r\ndef test_identity_adjoint():\r\n \"\"\"Check that the adjoint for Identity is working\"\"\"\r\n dev = qml.device(\"default.qubit\", wires=2, shots=100)\r\n\r\n @qml.qnode(dev)\r\n def circuit():\r\n identity()\r\n qml.adjoint(identity)()\r\n return qml.state()\r\n\r\n def identity():\r\n qml.PauliX(wires=0)\r\n qml.Identity(0)\r\n qml.CNOT(wires=[0, 1])\r\n\r\n assert circuit()[0] == 1.0\r\n\r\n queue = circuit.tape.queue\r\n\r\n assert queue[1].name == \"Identity\"\r\n assert queue[4].name == \"Identity\"\r\n\r\n\r\ndef test_nested_adjoint():\r\n \"\"\"Test that adjoint works when nested with other adjoints\"\"\"\r\n dev = qml.device(\"default.qubit\", wires=1)\r\n\r\n @qml.qnode(dev)\r\n def my_circuit():\r\n adjoint(adjoint(qml.RX))(np.pi / 4.0, wires=0)\r\n return qml.state()\r\n\r\n np.testing.assert_allclose(my_circuit(), np.array([0.92388, -0.382683j]), atol=1e-6, rtol=1e-6)\r\n\r\n\r\ndef test_nested_adjoint_on_function():\r\n \"\"\"Test that adjoint works when nested with other adjoints\"\"\"\r\n\r\n def my_op():\r\n qml.RX(0.123, wires=0)\r\n qml.RY(2.32, wires=0)\r\n qml.RZ(1.95, wires=0)\r\n\r\n dev = qml.device(\"default.qubit\", wires=1)\r\n\r\n @qml.qnode(dev)\r\n def my_circuit():\r\n adjoint(my_op)()\r\n qml.Hadamard(wires=0)\r\n adjoint(adjoint(my_op))()\r\n return qml.state()\r\n\r\n np.testing.assert_allclose(\r\n my_circuit(), np.array([-0.995707, 0.068644 + 6.209710e-02j]), atol=1e-6, rtol=1e-6\r\n )\r\n\r\n\r\nwith qml.tape.JacobianTape() as tape:\r\n qml.PauliX(0)\r\n qml.Hadamard(1)\r\n\r\nnoncallable_objects = [\r\n qml.RX(0.2, wires=0),\r\n qml.AngleEmbedding(list(range(2)), wires=range(2)),\r\n [qml.Hadamard(1), qml.RX(-0.2, wires=1)],\r\n tape,\r\n]\r\n\r\n\r\[email protected](\"obj\", noncallable_objects)\r\ndef test_error_adjoint_on_noncallable(obj):\r\n \"\"\"Test that an error is raised if qml.adjoint is applied to an object that\r\n is not callable, as it silently does not have any effect on those.\"\"\"\r\n with pytest.raises(ValueError, match=f\"{type(obj)} is not callable.\"):\r\n adjoint(obj)\r\n\r\n\r\nclass TestOutsideOfQueuing:\r\n \"\"\"Test that operations and templates work with the adjoint transform when\r\n created outside of a queuing context\"\"\"\r\n\r\n non_param_ops = [(qml.S, 0), (qml.PauliZ, 3), (qml.CNOT, [32, 3])]\r\n\r\n @pytest.mark.parametrize(\"op,wires\", non_param_ops)\r\n def test_single_op_non_param_adjoint(self, op, wires):\r\n \"\"\"Test that the adjoint correctly inverts non-parametrized\r\n operations\"\"\"\r\n op_adjoint = adjoint(op)(wires=wires)\r\n expected = op(wires=wires).adjoint()\r\n\r\n assert type(op_adjoint) == type(expected)\r\n assert op_adjoint.wires == expected.wires\r\n\r\n param_ops = [(qml.RX, [0.123], 0), (qml.Rot, [0.1, 0.2, 0.3], [1]), (qml.CRY, [0.1], [1, 4])]\r\n\r\n @pytest.mark.parametrize(\"op,par,wires\", param_ops)\r\n def test_single_op_param_adjoint(self, op, par, wires):\r\n \"\"\"Test that the adjoint correctly inverts operations with a single\r\n parameter\"\"\"\r\n param_op_adjoint = adjoint(op)(*par, wires=wires)\r\n expected = op(*par, wires=wires).adjoint()\r\n\r\n assert type(param_op_adjoint) == type(expected)\r\n assert param_op_adjoint.parameters == expected.parameters\r\n assert param_op_adjoint.wires == expected.wires\r\n\r\n template_ops = [\r\n (qml.templates.AngleEmbedding, [np.ones((1))], [2, 3]),\r\n (qml.templates.StronglyEntanglingLayers, [np.ones((1, 2, 3))], [2, 3]),\r\n ]\r\n\r\n @pytest.mark.parametrize(\"template, par, wires\", template_ops)\r\n def test_templates_adjoint(self, template, par, wires):\r\n \"\"\"Test that the adjoint correctly inverts templates\"\"\"\r\n res = adjoint(template)(*par, wires=wires)\r\n result = res if hasattr(res, \"__iter__\") else [res] # handle single operation case\r\n expected_ops = template(*par, wires=wires)\r\n\r\n expected_ops = expected_ops.expand().operations\r\n for o1, o2 in zip(result, reversed(expected_ops)):\r\n o2 = o2.adjoint()\r\n assert type(o1) == type(o2)\r\n assert o1.parameters == o2.parameters\r\n assert o1.wires == o2.wires\r\n\r\n def test_cv_template_adjoint(self):\r\n \"\"\"Test that the adjoint correctly inverts CV templates\"\"\"\r\n template, par, wires = qml.templates.Interferometer, [[1], [0.3], [0.2, 0.3]], [2, 3]\r\n result = adjoint(template)(*par, wires=wires).expand().operations\r\n expected_ops = template(*par, wires=wires).expand().operations\r\n\r\n for o1, o2 in zip(result, reversed(expected_ops)):\r\n o2 = o2.adjoint()\r\n assert type(o1) == type(o2)\r\n assert o1.parameters == o2.parameters\r\n assert o1.wires == o2.wires\r\n\r\n\r\nfn = lambda func, *args, **kwargs: adjoint(func)(*args, **kwargs)\r\n\r\n\r\nclass TestTemplateIntegration:\r\n \"\"\"Test that templates work correctly with the adjoint transform\"\"\"\r\n\r\n def test_angle_embedding(self):\r\n \"\"\"Test that the adjoint correctly inverts angle embedding\"\"\"\r\n dev = qml.device(\"default.qubit\", wires=3)\r\n template = qml.templates.AngleEmbedding\r\n\r\n @qml.qnode(dev)\r\n def circuit(weights):\r\n template(features=weights, wires=[0, 1, 2])\r\n fn(template, features=weights, wires=[0, 1, 2])\r\n return qml.state()\r\n\r\n weights = np.array([0.2, 0.5, 0.8])\r\n res = circuit(weights)\r\n assert len(np.nonzero(res)) == 1\r\n\r\n def test_amplitude_embedding(self):\r\n \"\"\"Test that the adjoint correctly inverts amplitude embedding\"\"\"\r\n dev = qml.device(\"default.qubit\", wires=3)\r\n template = qml.templates.AmplitudeEmbedding\r\n\r\n @qml.qnode(dev)\r\n def circuit(weights):\r\n template(features=weights, wires=[0, 1, 2])\r\n fn(template, features=weights, wires=[0, 1, 2])\r\n return qml.state()\r\n\r\n weights = np.array([0.2, 0.5, 0.8, 0.6, 0.1, 0.6, 0.1, 0.5]) / np.sqrt(1.92)\r\n res = circuit(weights)\r\n assert len(np.nonzero(res)) == 1\r\n\r\n def test_basis_embedding(self):\r\n \"\"\"Test that the adjoint correctly inverts basis embedding\"\"\"\r\n dev = qml.device(\"default.qubit\", wires=3)\r\n template = qml.templates.BasisEmbedding\r\n\r\n @qml.qnode(dev)\r\n def circuit(weights):\r\n template(features=weights, wires=[0, 1, 2])\r\n fn(template, features=weights, wires=[0, 1, 2])\r\n return qml.state()\r\n\r\n weights = np.array([1, 0, 1])\r\n res = circuit(weights)\r\n expected = np.zeros([2**3])\r\n expected[0] = 1.0\r\n assert np.allclose(res, expected)\r\n\r\n def test_displacement_embedding(self):\r\n \"\"\"Test that the adjoint correctly inverts displacement embedding\"\"\"\r\n dev = qml.device(\"default.gaussian\", wires=3)\r\n template = qml.templates.DisplacementEmbedding\r\n\r\n @qml.qnode(dev)\r\n def circuit(weights):\r\n template(features=weights, wires=[0, 1, 2])\r\n fn(template, features=weights, wires=[0, 1, 2])\r\n return qml.expval(qml.NumberOperator(0))\r\n\r\n weights = np.array([0.6, 0.2, 0.1])\r\n res = circuit(weights)\r\n assert np.allclose(res, 0.0)\r\n\r\n def test_squeezing_embedding(self):\r\n \"\"\"Test that the adjoint correctly inverts squeezing embedding\"\"\"\r\n dev = qml.device(\"default.gaussian\", wires=3)\r\n template = qml.templates.SqueezingEmbedding\r\n\r\n @qml.qnode(dev)\r\n def circuit(weights):\r\n template(features=weights, wires=[0, 1, 2])\r\n fn(template, features=weights, wires=[0, 1, 2])\r\n return qml.expval(qml.NumberOperator(0))\r\n\r\n weights = np.array([0.6, 0.2, 0.1])\r\n res = circuit(weights)\r\n assert np.allclose(res, 0.0)\r\n\r\n def test_qaoa_embedding(self):\r\n \"\"\"Test that the adjoint correctly inverts qaoa embedding\"\"\"\r\n dev = qml.device(\"default.qubit\", wires=3)\r\n template = qml.templates.QAOAEmbedding\r\n\r\n @qml.qnode(dev)\r\n def circuit(features, weights):\r\n template(features=features, weights=weights, wires=[0, 1, 2])\r\n fn(template, features=features, weights=weights, wires=[0, 1, 2])\r\n return qml.state()\r\n\r\n features = np.array([1.0, 2.0, 3.0])\r\n weights = np.random.random(template.shape(2, 3))\r\n\r\n res = circuit(features, weights)\r\n expected = np.zeros([2**3])\r\n expected[0] = 1.0\r\n\r\n assert np.allclose(res, expected)\r\n\r\n def test_iqp_embedding(self):\r\n \"\"\"Test that the adjoint correctly inverts iqp embedding\"\"\"\r\n dev = qml.device(\"default.qubit\", wires=3)\r\n template = qml.templates.IQPEmbedding\r\n\r\n @qml.qnode(dev)\r\n def circuit(features):\r\n template(features=features, wires=[0, 1, 2])\r\n fn(template, features=features, wires=[0, 1, 2])\r\n return qml.state()\r\n\r\n features = np.array([1.0, 2.0, 3.0])\r\n res = circuit(features)\r\n expected = np.zeros([2**3])\r\n expected[0] = 1.0\r\n\r\n assert np.allclose(res, expected)\r\n\r\n @pytest.mark.parametrize(\r\n \"template\",\r\n [\r\n qml.templates.BasicEntanglerLayers,\r\n qml.templates.StronglyEntanglingLayers,\r\n qml.templates.RandomLayers,\r\n ],\r\n )\r\n def test_layers(self, template):\r\n \"\"\"Test that the adjoint correctly inverts layers\"\"\"\r\n dev = qml.device(\"default.qubit\", wires=3)\r\n\r\n @qml.qnode(dev)\r\n def circuit(weights):\r\n template(weights=weights, wires=[0, 1, 2])\r\n fn(template, weights=weights, wires=[0, 1, 2])\r\n return qml.state()\r\n\r\n weights = np.random.random(template.shape(2, 3))\r\n res = circuit(weights)\r\n expected = np.zeros([2**3])\r\n expected[0] = 1.0\r\n\r\n assert np.allclose(res, expected)\r\n\r\n @pytest.mark.parametrize(\r\n \"template\",\r\n [\r\n qml.templates.ParticleConservingU1,\r\n qml.templates.ParticleConservingU2,\r\n ],\r\n )\r\n def test_particle_conserving(self, template):\r\n \"\"\"Test that the adjoint correctly inverts particle conserving layers\"\"\"\r\n dev = qml.device(\"default.qubit\", wires=3)\r\n init_state = np.array([0, 1, 1])\r\n\r\n @qml.qnode(dev)\r\n def circuit(weights):\r\n template(weights=weights, init_state=init_state, wires=[0, 1, 2])\r\n fn(template, weights=weights, init_state=init_state, wires=[0, 1, 2])\r\n return qml.state()\r\n\r\n weights = np.random.random(template.shape(2, 3))\r\n res = circuit(weights)\r\n expected = np.zeros([2**3])\r\n expected[0] = 1.0\r\n\r\n assert np.allclose(res, expected)\r\n\r\n def test_simplified_two_design(self):\r\n \"\"\"Test that the adjoint correctly inverts the simplified two design\"\"\"\r\n dev = qml.device(\"default.qubit\", wires=3)\r\n template = qml.templates.SimplifiedTwoDesign\r\n\r\n @qml.qnode(dev)\r\n def circuit(data, weights):\r\n template(initial_layer_weights=data, weights=weights, wires=[0, 1, 2])\r\n fn(template, initial_layer_weights=data, weights=weights, wires=[0, 1, 2])\r\n return qml.state()\r\n\r\n weights = [np.random.random(s) for s in template.shape(2, 3)]\r\n res = circuit(weights[0], *weights[1:])\r\n expected = np.zeros([2**3])\r\n expected[0] = 1.0\r\n\r\n assert np.allclose(res, expected)\r\n\r\n def test_approx_time_evolution(self):\r\n \"\"\"Test that the adjoint correctly inverts the approx time evolution\"\"\"\r\n dev = qml.device(\"default.qubit\", wires=3)\r\n template = qml.templates.ApproxTimeEvolution\r\n\r\n coeffs = [1, 1]\r\n obs = [qml.PauliX(0), qml.PauliX(1)]\r\n H = qml.Hamiltonian(coeffs, obs)\r\n\r\n @qml.qnode(dev)\r\n def circuit(t):\r\n template(H, t, 1)\r\n fn(template, H, t, 1)\r\n return qml.state()\r\n\r\n res = circuit(0.5)\r\n expected = np.zeros([2**3])\r\n expected[0] = 1.0\r\n assert np.allclose(res, expected)\r\n\r\n def test_arbitrary_unitary(self):\r\n \"\"\"Test that the adjoint correctly inverts the arbitrary unitary\"\"\"\r\n dev = qml.device(\"default.qubit\", wires=3)\r\n template = qml.templates.ArbitraryUnitary\r\n\r\n @qml.qnode(dev)\r\n def circuit(weights):\r\n template(weights=weights, wires=[0, 1, 2])\r\n fn(template, weights=weights, wires=[0, 1, 2])\r\n return qml.state()\r\n\r\n weights = np.random.random(template.shape(3))\r\n res = circuit(weights)\r\n expected = np.zeros([2**3])\r\n expected[0] = 1.0\r\n\r\n assert np.allclose(res, expected)\r\n\r\n def test_single_excitation(self):\r\n \"\"\"Test that the adjoint correctly inverts the single excitation unitary\"\"\"\r\n dev = qml.device(\"default.qubit\", wires=3)\r\n template = qml.templates.FermionicSingleExcitation\r\n\r\n @qml.qnode(dev)\r\n def circuit(weights):\r\n template(weight=weights, wires=[0, 1, 2])\r\n fn(template, weight=weights, wires=[0, 1, 2])\r\n return qml.state()\r\n\r\n res = circuit(0.6)\r\n expected = np.zeros([2**3])\r\n expected[0] = 1.0\r\n\r\n assert np.allclose(res, expected)\r\n\r\n def test_double_excitation(self):\r\n \"\"\"Test that the adjoint correctly inverts the double excitation unitary\"\"\"\r\n dev = qml.device(\"default.qubit\", wires=4)\r\n template = qml.templates.FermionicDoubleExcitation\r\n\r\n @qml.qnode(dev)\r\n def circuit(weights):\r\n template(weight=weights, wires1=[0, 1], wires2=[2, 3])\r\n fn(template, weight=weights, wires1=[0, 1], wires2=[2, 3])\r\n return qml.state()\r\n\r\n res = circuit(0.6)\r\n expected = np.zeros([2**4])\r\n expected[0] = 1.0\r\n\r\n assert np.allclose(res, expected)\r\n\r\n def test_interferometer(self):\r\n \"\"\"Test that the adjoint correctly inverts squeezing embedding\"\"\"\r\n dev = qml.device(\"default.gaussian\", wires=3)\r\n template = qml.templates.Interferometer\r\n r = 1.5\r\n\r\n @qml.qnode(dev)\r\n def circuit(weights):\r\n qml.Squeezing(r, 0, wires=0)\r\n qml.Squeezing(r, 0, wires=1)\r\n qml.Squeezing(r, 0, wires=2)\r\n template(*weights, wires=[0, 1, 2])\r\n fn(template, *weights, wires=[0, 1, 2])\r\n return qml.expval(qml.NumberOperator(0))\r\n\r\n weights = [\r\n np.random.random([3 * (3 - 1) // 2]),\r\n np.random.random([3 * (3 - 1) // 2]),\r\n np.random.random([3]),\r\n ]\r\n res = circuit(weights)\r\n assert np.allclose(res, np.sinh(r) ** 2)\r\n\r\n def test_gate_fabric(self):\r\n \"\"\"Test that the adjoint correctly inverts the gate fabric template\"\"\"\r\n dev = qml.device(\"default.qubit\", wires=4)\r\n template = qml.templates.GateFabric\r\n\r\n @qml.qnode(dev)\r\n def circuit(weights):\r\n template(weights=weights, wires=[0, 1, 2, 3], init_state=[1, 1, 0, 0])\r\n fn(template, weights=weights, wires=[0, 1, 2, 3], init_state=[1, 1, 0, 0])\r\n return qml.state()\r\n\r\n res = circuit([[[0.6, 0.8]]])\r\n expected = np.zeros([2**4])\r\n expected[0] = 1.0\r\n\r\n assert np.allclose(res, expected)\r\n",
"# Copyright 2018-2021 Xanadu Quantum Technologies Inc.\r\n\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n\"\"\"Unit tests for the jax interface\"\"\"\r\nimport functools\r\n\r\nimport pytest\r\n\r\njax = pytest.importorskip(\"jax\")\r\njnp = pytest.importorskip(\"jax.numpy\")\r\nimport numpy as np\r\n\r\nimport pennylane as qml\r\nfrom pennylane.gradients import param_shift\r\nfrom pennylane.interfaces.batch import execute\r\nfrom pennylane.interfaces.batch import InterfaceUnsupportedError\r\n\r\n\r\[email protected](\"interface\", [\"jax-jit\", \"jax-python\"])\r\nclass TestJaxExecuteUnitTests:\r\n \"\"\"Unit tests for jax execution\"\"\"\r\n\r\n def test_jacobian_options(self, mocker, interface, tol):\r\n \"\"\"Test setting jacobian options\"\"\"\r\n spy = mocker.spy(qml.gradients, \"param_shift\")\r\n\r\n a = jnp.array([0.1, 0.2])\r\n\r\n dev = qml.device(\"default.qubit\", wires=1)\r\n\r\n def cost(a, device):\r\n with qml.tape.JacobianTape() as tape:\r\n qml.RY(a[0], wires=0)\r\n qml.RX(a[1], wires=0)\r\n qml.expval(qml.PauliZ(0))\r\n\r\n return execute(\r\n [tape],\r\n device,\r\n gradient_fn=param_shift,\r\n gradient_kwargs={\"shift\": np.pi / 4},\r\n interface=interface,\r\n )[0][0]\r\n\r\n res = jax.grad(cost)(a, device=dev)\r\n\r\n for args in spy.call_args_list:\r\n assert args[1][\"shift\"] == np.pi / 4\r\n\r\n def test_incorrect_mode(self, interface):\r\n \"\"\"Test that an error is raised if an gradient transform\r\n is used with mode=forward\"\"\"\r\n a = jnp.array([0.1, 0.2])\r\n\r\n dev = qml.device(\"default.qubit\", wires=1)\r\n\r\n def cost(a, device):\r\n with qml.tape.JacobianTape() as tape:\r\n qml.RY(a[0], wires=0)\r\n qml.RX(a[1], wires=0)\r\n qml.expval(qml.PauliZ(0))\r\n\r\n return execute(\r\n [tape],\r\n device,\r\n gradient_fn=param_shift,\r\n mode=\"forward\",\r\n interface=interface,\r\n )[0]\r\n\r\n with pytest.raises(\r\n ValueError, match=\"Gradient transforms cannot be used with mode='forward'\"\r\n ):\r\n res = jax.grad(cost)(a, device=dev)\r\n\r\n def test_unknown_interface(self, interface):\r\n \"\"\"Test that an error is raised if the interface is unknown\"\"\"\r\n a = jnp.array([0.1, 0.2])\r\n\r\n dev = qml.device(\"default.qubit\", wires=1)\r\n\r\n def cost(a, device):\r\n with qml.tape.JacobianTape() as tape:\r\n qml.RY(a[0], wires=0)\r\n qml.RX(a[1], wires=0)\r\n qml.expval(qml.PauliZ(0))\r\n\r\n return execute(\r\n [tape],\r\n device,\r\n gradient_fn=param_shift,\r\n interface=\"None\",\r\n )[0]\r\n\r\n with pytest.raises(ValueError, match=\"Unknown interface\"):\r\n cost(a, device=dev)\r\n\r\n def test_forward_mode(self, interface, mocker):\r\n \"\"\"Test that forward mode uses the `device.execute_and_gradients` pathway\"\"\"\r\n dev = qml.device(\"default.qubit\", wires=1)\r\n spy = mocker.spy(dev, \"execute_and_gradients\")\r\n\r\n def cost(a):\r\n with qml.tape.JacobianTape() as tape:\r\n qml.RY(a[0], wires=0)\r\n qml.RX(a[1], wires=0)\r\n qml.expval(qml.PauliZ(0))\r\n\r\n return execute(\r\n [tape],\r\n dev,\r\n gradient_fn=\"device\",\r\n interface=interface,\r\n gradient_kwargs={\r\n \"method\": \"adjoint_jacobian\",\r\n \"use_device_state\": True,\r\n },\r\n )[0]\r\n\r\n a = jnp.array([0.1, 0.2])\r\n cost(a)\r\n\r\n # adjoint method only performs a single device execution, but gets both result and gradient\r\n assert dev.num_executions == 1\r\n spy.assert_called()\r\n\r\n def test_backward_mode(self, interface, mocker):\r\n \"\"\"Test that backward mode uses the `device.batch_execute` and `device.gradients` pathway\"\"\"\r\n dev = qml.device(\"default.qubit\", wires=1)\r\n spy_execute = mocker.spy(qml.devices.DefaultQubit, \"batch_execute\")\r\n spy_gradients = mocker.spy(qml.devices.DefaultQubit, \"gradients\")\r\n\r\n def cost(a):\r\n with qml.tape.JacobianTape() as tape:\r\n qml.RY(a[0], wires=0)\r\n qml.RX(a[1], wires=0)\r\n qml.expval(qml.PauliZ(0))\r\n\r\n return execute(\r\n [tape],\r\n dev,\r\n gradient_fn=\"device\",\r\n mode=\"backward\",\r\n interface=interface,\r\n gradient_kwargs={\"method\": \"adjoint_jacobian\"},\r\n )[0][0]\r\n\r\n a = jnp.array([0.1, 0.2])\r\n cost(a)\r\n\r\n assert dev.num_executions == 1\r\n spy_execute.assert_called()\r\n spy_gradients.assert_not_called()\r\n\r\n jax.grad(cost)(a)\r\n spy_gradients.assert_called()\r\n\r\n def test_max_diff_error(self, interface):\r\n \"\"\"Test that an error is being raised if max_diff > 1 for the JAX\r\n interface.\"\"\"\r\n a = jnp.array([0.1, 0.2])\r\n\r\n dev = qml.device(\"default.qubit\", wires=1)\r\n\r\n with pytest.raises(\r\n InterfaceUnsupportedError,\r\n match=\"The JAX interface only supports first order derivatives.\",\r\n ):\r\n with qml.tape.JacobianTape() as tape:\r\n qml.RY(a[0], wires=0)\r\n qml.RX(a[1], wires=0)\r\n qml.expval(qml.PauliZ(0))\r\n\r\n execute(\r\n [tape],\r\n dev,\r\n interface=interface,\r\n gradient_fn=param_shift,\r\n gradient_kwargs={\"shift\": np.pi / 4},\r\n max_diff=2,\r\n )\r\n\r\n\r\[email protected](\"interface\", [\"jax-jit\", \"jax-python\"])\r\nclass TestCaching:\r\n \"\"\"Test for caching behaviour\"\"\"\r\n\r\n def test_cache_maxsize(self, interface, mocker):\r\n \"\"\"Test the cachesize property of the cache\"\"\"\r\n dev = qml.device(\"default.qubit\", wires=1)\r\n spy = mocker.spy(qml.interfaces.batch, \"cache_execute\")\r\n\r\n def cost(a, cachesize):\r\n with qml.tape.JacobianTape() as tape:\r\n qml.RY(a[0], wires=0)\r\n qml.RX(a[1], wires=0)\r\n qml.expval(qml.PauliZ(0))\r\n\r\n return execute(\r\n [tape],\r\n dev,\r\n gradient_fn=param_shift,\r\n cachesize=cachesize,\r\n interface=interface,\r\n )[0][0]\r\n\r\n params = jnp.array([0.1, 0.2])\r\n jax.grad(cost)(params, cachesize=2)\r\n cache = spy.call_args[0][1]\r\n\r\n assert cache.maxsize == 2\r\n assert cache.currsize == 2\r\n assert len(cache) == 2\r\n\r\n def test_custom_cache(self, interface, mocker):\r\n \"\"\"Test the use of a custom cache object\"\"\"\r\n dev = qml.device(\"default.qubit\", wires=1)\r\n spy = mocker.spy(qml.interfaces.batch, \"cache_execute\")\r\n\r\n def cost(a, cache):\r\n with qml.tape.JacobianTape() as tape:\r\n qml.RY(a[0], wires=0)\r\n qml.RX(a[1], wires=0)\r\n qml.expval(qml.PauliZ(0))\r\n\r\n return execute([tape], dev, gradient_fn=param_shift, cache=cache, interface=interface,)[\r\n 0\r\n ][0]\r\n\r\n custom_cache = {}\r\n params = jnp.array([0.1, 0.2])\r\n jax.grad(cost)(params, cache=custom_cache)\r\n\r\n cache = spy.call_args[0][1]\r\n assert cache is custom_cache\r\n\r\n def test_custom_cache_multiple(self, interface, mocker):\r\n \"\"\"Test the use of a custom cache object with multiple tapes\"\"\"\r\n dev = qml.device(\"default.qubit\", wires=1)\r\n spy = mocker.spy(qml.interfaces.batch, \"cache_execute\")\r\n\r\n a = jnp.array(0.1)\r\n b = jnp.array(0.2)\r\n\r\n def cost(a, b, cache):\r\n with qml.tape.JacobianTape() as tape1:\r\n qml.RY(a, wires=0)\r\n qml.RX(b, wires=0)\r\n qml.expval(qml.PauliZ(0))\r\n\r\n with qml.tape.JacobianTape() as tape2:\r\n qml.RY(a, wires=0)\r\n qml.RX(b, wires=0)\r\n qml.expval(qml.PauliZ(0))\r\n\r\n res = execute(\r\n [tape1, tape2],\r\n dev,\r\n gradient_fn=param_shift,\r\n cache=cache,\r\n interface=interface,\r\n )\r\n return res[0][0]\r\n\r\n custom_cache = {}\r\n jax.grad(cost)(a, b, cache=custom_cache)\r\n\r\n cache = spy.call_args[0][1]\r\n assert cache is custom_cache\r\n\r\n def test_caching_param_shift(self, interface, tol):\r\n \"\"\"Test that, when using parameter-shift transform,\r\n caching produces the optimum number of evaluations.\"\"\"\r\n dev = qml.device(\"default.qubit\", wires=1)\r\n\r\n def cost(a, cache):\r\n with qml.tape.JacobianTape() as tape:\r\n qml.RY(a[0], wires=0)\r\n qml.RX(a[1], wires=0)\r\n qml.expval(qml.PauliZ(0))\r\n\r\n return execute([tape], dev, gradient_fn=param_shift, cache=cache, interface=interface,)[\r\n 0\r\n ][0]\r\n\r\n # Without caching, 5 evaluations are required to compute\r\n # the Jacobian: 1 (forward pass) + 2 (backward pass) * (2 shifts * 2 params)\r\n params = jnp.array([0.1, 0.2])\r\n jax.grad(cost)(params, cache=None)\r\n assert dev.num_executions == 5\r\n\r\n # With caching, 5 evaluations are required to compute\r\n # the Jacobian: 1 (forward pass) + (2 shifts * 2 params)\r\n dev._num_executions = 0\r\n jac_fn = jax.grad(cost)\r\n grad1 = jac_fn(params, cache=True)\r\n assert dev.num_executions == 5\r\n\r\n # Check that calling the cost function again\r\n # continues to evaluate the device (that is, the cache\r\n # is emptied between calls)\r\n grad2 = jac_fn(params, cache=True)\r\n assert dev.num_executions == 10\r\n assert np.allclose(grad1, grad2, atol=tol, rtol=0)\r\n\r\n # Check that calling the cost function again\r\n # with different parameters produces a different Jacobian\r\n grad2 = jac_fn(2 * params, cache=True)\r\n assert dev.num_executions == 15\r\n assert not np.allclose(grad1, grad2, atol=tol, rtol=0)\r\n\r\n def test_caching_adjoint_backward(self, interface):\r\n \"\"\"Test that caching produces the optimum number of adjoint evaluations\r\n when mode=backward\"\"\"\r\n dev = qml.device(\"default.qubit\", wires=2)\r\n params = jnp.array([0.1, 0.2, 0.3])\r\n\r\n def cost(a, cache):\r\n with qml.tape.JacobianTape() as tape:\r\n qml.RY(a[0], wires=0)\r\n qml.RX(a[1], wires=0)\r\n qml.RY(a[2], wires=0)\r\n qml.expval(qml.PauliZ(0))\r\n\r\n return execute(\r\n [tape],\r\n dev,\r\n gradient_fn=\"device\",\r\n cache=cache,\r\n mode=\"backward\",\r\n interface=interface,\r\n gradient_kwargs={\"method\": \"adjoint_jacobian\"},\r\n )[0][0]\r\n\r\n # Without caching, 2 evaluations are required.\r\n # 1 for the forward pass, and one per output dimension\r\n # on the backward pass.\r\n jax.grad(cost)(params, cache=None)\r\n assert dev.num_executions == 2\r\n\r\n # With caching, also 2 evaluations are required. One\r\n # for the forward pass, and one for the backward pass.\r\n dev._num_executions = 0\r\n jac_fn = jax.grad(cost)\r\n grad1 = jac_fn(params, cache=True)\r\n assert dev.num_executions == 2\r\n\r\n\r\nexecute_kwargs = [\r\n {\"gradient_fn\": param_shift},\r\n {\r\n \"gradient_fn\": \"device\",\r\n \"mode\": \"forward\",\r\n \"gradient_kwargs\": {\"method\": \"adjoint_jacobian\", \"use_device_state\": True},\r\n },\r\n {\r\n \"gradient_fn\": \"device\",\r\n \"mode\": \"backward\",\r\n \"gradient_kwargs\": {\"method\": \"adjoint_jacobian\"},\r\n },\r\n]\r\n\r\n\r\[email protected](\"execute_kwargs\", execute_kwargs)\r\[email protected](\"interface\", [\"jax-jit\", \"jax-python\"])\r\nclass TestJaxExecuteIntegration:\r\n \"\"\"Test the jax interface execute function\r\n integrates well for both forward and backward execution\"\"\"\r\n\r\n def test_execution(self, execute_kwargs, interface):\r\n \"\"\"Test execution\"\"\"\r\n dev = qml.device(\"default.qubit\", wires=1)\r\n\r\n def cost(a, b):\r\n with qml.tape.JacobianTape() as tape1:\r\n qml.RY(a, wires=0)\r\n qml.RX(b, wires=0)\r\n qml.expval(qml.PauliZ(0))\r\n\r\n with qml.tape.JacobianTape() as tape2:\r\n qml.RY(a, wires=0)\r\n qml.RX(b, wires=0)\r\n qml.expval(qml.PauliZ(0))\r\n\r\n return execute([tape1, tape2], dev, interface=interface, **execute_kwargs)\r\n\r\n a = jnp.array(0.1)\r\n b = jnp.array(0.2)\r\n res = cost(a, b)\r\n\r\n assert len(res) == 2\r\n assert res[0].shape == (1,)\r\n assert res[1].shape == (1,)\r\n\r\n def test_scalar_jacobian(self, execute_kwargs, interface, tol):\r\n \"\"\"Test scalar jacobian calculation\"\"\"\r\n a = jnp.array(0.1)\r\n dev = qml.device(\"default.qubit\", wires=2)\r\n\r\n def cost(a):\r\n with qml.tape.JacobianTape() as tape:\r\n qml.RY(a, wires=0)\r\n qml.expval(qml.PauliZ(0))\r\n return execute([tape], dev, interface=interface, **execute_kwargs)[0][0]\r\n\r\n res = jax.grad(cost)(a)\r\n assert res.shape == ()\r\n\r\n # compare to standard tape jacobian\r\n with qml.tape.JacobianTape() as tape:\r\n qml.RY(a, wires=0)\r\n qml.expval(qml.PauliZ(0))\r\n\r\n tape.trainable_params = [0]\r\n tapes, fn = param_shift(tape)\r\n expected = fn(dev.batch_execute(tapes))\r\n\r\n assert expected.shape == (1, 1)\r\n assert np.allclose(res, np.squeeze(expected), atol=tol, rtol=0)\r\n\r\n def test_reusing_quantum_tape(self, execute_kwargs, interface, tol):\r\n \"\"\"Test re-using a quantum tape by passing new parameters\"\"\"\r\n a = jnp.array(0.1)\r\n b = jnp.array(0.2)\r\n\r\n dev = qml.device(\"default.qubit\", wires=2)\r\n\r\n with qml.tape.JacobianTape() as tape:\r\n qml.RY(a, wires=0)\r\n qml.RX(b, wires=1)\r\n qml.CNOT(wires=[0, 1])\r\n qml.expval(qml.PauliZ(0))\r\n\r\n assert tape.trainable_params == [0, 1]\r\n\r\n def cost(a, b):\r\n\r\n # An explicit call to _update() is required here to update the\r\n # trainable parameters in between tape executions.\r\n # This is different from how the autograd interface works.\r\n # Unless the update is issued, the validation check related to the\r\n # number of provided parameters fails in the tape: (len(params) !=\r\n # required_length) and the tape produces incorrect results.\r\n tape._update()\r\n tape.set_parameters([a, b])\r\n return execute([tape], dev, interface=interface, **execute_kwargs)[0][0]\r\n\r\n jac_fn = jax.grad(cost)\r\n jac = jac_fn(a, b)\r\n\r\n a = jnp.array(0.54)\r\n b = jnp.array(0.8)\r\n\r\n # check that the cost function continues to depend on the\r\n # values of the parameters for subsequent calls\r\n res2 = cost(2 * a, b)\r\n expected = [np.cos(2 * a)]\r\n assert np.allclose(res2, expected, atol=tol, rtol=0)\r\n\r\n jac_fn = jax.grad(lambda a, b: cost(2 * a, b))\r\n jac = jac_fn(a, b)\r\n expected = -2 * np.sin(2 * a)\r\n assert np.allclose(jac, expected, atol=tol, rtol=0)\r\n\r\n def test_classical_processing_single_tape(self, execute_kwargs, interface, tol):\r\n \"\"\"Test classical processing within the quantum tape for a single tape\"\"\"\r\n a = jnp.array(0.1)\r\n b = jnp.array(0.2)\r\n c = jnp.array(0.3)\r\n\r\n def cost(a, b, c, device):\r\n with qml.tape.JacobianTape() as tape:\r\n qml.RY(a * c, wires=0)\r\n qml.RZ(b, wires=0)\r\n qml.RX(c + c**2 + jnp.sin(a), wires=0)\r\n qml.expval(qml.PauliZ(0))\r\n\r\n return execute([tape], device, interface=interface, **execute_kwargs)[0][0]\r\n\r\n dev = qml.device(\"default.qubit\", wires=2)\r\n res = jax.grad(cost, argnums=(0, 1, 2))(a, b, c, device=dev)\r\n assert len(res) == 3\r\n\r\n def test_classical_processing_multiple_tapes(self, execute_kwargs, interface, tol):\r\n \"\"\"Test classical processing within the quantum tape for multiple\r\n tapes\"\"\"\r\n dev = qml.device(\"default.qubit\", wires=2)\r\n params = jax.numpy.array([0.3, 0.2])\r\n\r\n def cost_fn(x):\r\n with qml.tape.JacobianTape() as tape1:\r\n qml.Hadamard(0)\r\n qml.RY(x[0], wires=[0])\r\n qml.CNOT(wires=[0, 1])\r\n qml.expval(qml.PauliZ(0))\r\n\r\n with qml.tape.JacobianTape() as tape2:\r\n qml.Hadamard(0)\r\n qml.CRX(2 * x[0] * x[1], wires=[0, 1])\r\n qml.RX(2 * x[1], wires=[1])\r\n qml.expval(qml.PauliZ(0))\r\n\r\n result = execute(\r\n tapes=[tape1, tape2], device=dev, interface=interface, **execute_kwargs\r\n )\r\n return (result[0] + result[1] - 7 * result[1])[0]\r\n\r\n res = jax.grad(cost_fn)(params)\r\n assert res.shape == (2,)\r\n\r\n def test_multiple_tapes_output(self, execute_kwargs, interface, tol):\r\n \"\"\"Test the output types for the execution of multiple quantum tapes\"\"\"\r\n dev = qml.device(\"default.qubit\", wires=2)\r\n params = jax.numpy.array([0.3, 0.2])\r\n\r\n def cost_fn(x):\r\n with qml.tape.JacobianTape() as tape1:\r\n qml.Hadamard(0)\r\n qml.RY(x[0], wires=[0])\r\n qml.CNOT(wires=[0, 1])\r\n qml.expval(qml.PauliZ(0))\r\n\r\n with qml.tape.JacobianTape() as tape2:\r\n qml.Hadamard(0)\r\n qml.CRX(2 * x[0] * x[1], wires=[0, 1])\r\n qml.RX(2 * x[1], wires=[1])\r\n qml.expval(qml.PauliZ(0))\r\n\r\n return execute(tapes=[tape1, tape2], device=dev, interface=interface, **execute_kwargs)\r\n\r\n res = cost_fn(params)\r\n assert isinstance(res, list)\r\n assert all(isinstance(r, jnp.ndarray) for r in res)\r\n assert all(r.shape == (1,) for r in res)\r\n\r\n @pytest.mark.xfail\r\n def test_matrix_parameter(self, execute_kwargs, interface, tol):\r\n \"\"\"Test that the jax interface works correctly\r\n with a matrix parameter\"\"\"\r\n a = jnp.array(0.1)\r\n U = qml.RY(a, wires=0).matrix\r\n\r\n def cost(U, device):\r\n with qml.tape.JacobianTape() as tape:\r\n qml.PauliX(0)\r\n qml.QubitUnitary(U, wires=0)\r\n qml.expval(qml.PauliZ(0))\r\n\r\n tape.trainable_params = [0]\r\n return execute([tape], device, interface=interface, **execute_kwargs)[0][0]\r\n\r\n dev = qml.device(\"default.qubit\", wires=2)\r\n res = cost(U, device=dev)\r\n assert np.allclose(res, -np.cos(a), atol=tol, rtol=0)\r\n\r\n jac_fn = jax.grad(cost, argnums=(0))\r\n res = jac_fn(U, device=dev)\r\n assert np.allclose(res, np.sin(a), atol=tol, rtol=0)\r\n\r\n def test_differentiable_expand(self, execute_kwargs, interface, tol):\r\n \"\"\"Test that operation and nested tapes expansion\r\n is differentiable\"\"\"\r\n\r\n class U3(qml.U3):\r\n def expand(self):\r\n tape = qml.tape.JacobianTape()\r\n theta, phi, lam = self.data\r\n wires = self.wires\r\n tape._ops += [\r\n qml.Rot(lam, theta, -lam, wires=wires),\r\n qml.PhaseShift(phi + lam, wires=wires),\r\n ]\r\n return tape\r\n\r\n def cost_fn(a, p, device):\r\n tape = qml.tape.JacobianTape()\r\n\r\n with tape:\r\n qml.RX(a, wires=0)\r\n U3(*p, wires=0)\r\n qml.expval(qml.PauliX(0))\r\n\r\n tape = tape.expand(stop_at=lambda obj: device.supports_operation(obj.name))\r\n return execute([tape], device, interface=interface, **execute_kwargs)[0][0]\r\n\r\n a = jnp.array(0.1)\r\n p = jnp.array([0.1, 0.2, 0.3])\r\n\r\n dev = qml.device(\"default.qubit\", wires=1)\r\n res = cost_fn(a, p, device=dev)\r\n expected = np.cos(a) * np.cos(p[1]) * np.sin(p[0]) + np.sin(a) * (\r\n np.cos(p[2]) * np.sin(p[1]) + np.cos(p[0]) * np.cos(p[1]) * np.sin(p[2])\r\n )\r\n assert np.allclose(res, expected, atol=tol, rtol=0)\r\n\r\n jac_fn = jax.grad(cost_fn, argnums=(1))\r\n res = jac_fn(a, p, device=dev)\r\n expected = jnp.array(\r\n [\r\n np.cos(p[1]) * (np.cos(a) * np.cos(p[0]) - np.sin(a) * np.sin(p[0]) * np.sin(p[2])),\r\n np.cos(p[1]) * np.cos(p[2]) * np.sin(a)\r\n - np.sin(p[1])\r\n * (np.cos(a) * np.sin(p[0]) + np.cos(p[0]) * np.sin(a) * np.sin(p[2])),\r\n np.sin(a)\r\n * (np.cos(p[0]) * np.cos(p[1]) * np.cos(p[2]) - np.sin(p[1]) * np.sin(p[2])),\r\n ]\r\n )\r\n assert np.allclose(res, expected, atol=tol, rtol=0)\r\n\r\n def test_independent_expval(self, execute_kwargs, interface):\r\n \"\"\"Tests computing an expectation value that is independent of trainable\r\n parameters.\"\"\"\r\n dev = qml.device(\"default.qubit\", wires=2)\r\n params = jnp.array([0.1, 0.2, 0.3])\r\n\r\n def cost(a, cache):\r\n with qml.tape.JacobianTape() as tape:\r\n qml.RY(a[0], wires=0)\r\n qml.RX(a[1], wires=0)\r\n qml.RY(a[2], wires=0)\r\n qml.expval(qml.PauliZ(1))\r\n\r\n res = qml.interfaces.batch.execute(\r\n [tape], dev, cache=cache, interface=interface, **execute_kwargs\r\n )\r\n return res[0][0]\r\n\r\n res = jax.grad(cost)(params, cache=None)\r\n assert res.shape == (3,)\r\n\r\n @pytest.mark.parametrize(\r\n \"ret, mes\",\r\n [\r\n ([qml.expval(qml.PauliZ(0)), qml.expval(qml.PauliZ(1))], \"single return type\"),\r\n ([qml.state()], \"Only Variance and Expectation\"),\r\n ],\r\n )\r\n def test_raises_for_jax_jit(self, execute_kwargs, interface, ret, mes):\r\n \"\"\"Tests multiple measurements and unsupported measurements raise an\r\n error for the jit JAX interface.\"\"\"\r\n dev = qml.device(\"default.qubit\", wires=2)\r\n params = jnp.array([0.1, 0.2, 0.3])\r\n\r\n def cost(a, cache):\r\n with qml.tape.JacobianTape() as tape:\r\n qml.RY(a[0], wires=0)\r\n qml.RX(a[1], wires=0)\r\n qml.RY(a[2], wires=0)\r\n [qml.apply(r) for r in ret]\r\n\r\n res = qml.interfaces.batch.execute(\r\n # Test only applicable for the jax jit interface\r\n [tape],\r\n dev,\r\n cache=cache,\r\n interface=\"jax-jit\",\r\n **execute_kwargs\r\n )\r\n return res[0][0]\r\n\r\n with pytest.raises(InterfaceUnsupportedError, match=mes):\r\n cost(params, cache=None)\r\n\r\n\r\[email protected](\"execute_kwargs\", execute_kwargs)\r\nclass TestVectorValued:\r\n \"\"\"Test vector-valued returns for the JAX Python interface.\"\"\"\r\n\r\n def test_multiple_expvals(self, execute_kwargs):\r\n \"\"\"Tests computing multiple expectation values in a tape.\"\"\"\r\n fwd_mode = execute_kwargs.get(\"mode\", \"not forward\") == \"forward\"\r\n if fwd_mode:\r\n pytest.skip(\"The forward mode is tested separately as it should raise an error.\")\r\n\r\n dev = qml.device(\"default.qubit\", wires=2)\r\n params = jnp.array([0.1, 0.2, 0.3])\r\n\r\n def cost(a, cache):\r\n with qml.tape.JacobianTape() as tape:\r\n qml.RY(a[0], wires=0)\r\n qml.RX(a[1], wires=0)\r\n qml.RY(a[2], wires=0)\r\n qml.expval(qml.PauliZ(0))\r\n qml.expval(qml.PauliZ(1))\r\n\r\n res = qml.interfaces.batch.execute(\r\n [tape], dev, cache=cache, interface=\"jax-python\", **execute_kwargs\r\n )\r\n return res[0]\r\n\r\n res = jax.jacobian(cost)(params, cache=None)\r\n assert res.shape == (2, 3)\r\n\r\n def test_multiple_expvals_single_par(self, execute_kwargs):\r\n \"\"\"Tests computing multiple expectation values in a tape with a single\r\n trainable parameter.\"\"\"\r\n fwd_mode = execute_kwargs.get(\"mode\", \"not forward\") == \"forward\"\r\n if fwd_mode:\r\n pytest.skip(\"The forward mode is tested separately as it should raise an error.\")\r\n\r\n dev = qml.device(\"default.qubit\", wires=2)\r\n params = jnp.array([0.1])\r\n\r\n def cost(a, cache):\r\n with qml.tape.JacobianTape() as tape:\r\n qml.RY(a[0], wires=0)\r\n qml.expval(qml.PauliZ(0))\r\n qml.expval(qml.PauliZ(1))\r\n\r\n res = qml.interfaces.batch.execute(\r\n [tape], dev, cache=cache, interface=\"jax-python\", **execute_kwargs\r\n )\r\n return res[0]\r\n\r\n res = jax.jacobian(cost)(params, cache=None)\r\n assert res.shape == (2, 1)\r\n\r\n def test_multi_tape_jacobian(self, execute_kwargs):\r\n \"\"\"Test the jacobian computation with multiple tapes.\"\"\"\r\n fwd_mode = execute_kwargs.get(\"mode\", \"not forward\") == \"forward\"\r\n if fwd_mode:\r\n pytest.skip(\"The forward mode is tested separately as it should raise an error.\")\r\n\r\n def cost(x, y, device, interface, ek):\r\n with qml.tape.JacobianTape() as tape1:\r\n qml.RX(x, wires=[0])\r\n qml.RY(y, wires=[1])\r\n qml.CNOT(wires=[0, 1])\r\n qml.expval(qml.PauliZ(0))\r\n qml.expval(qml.PauliZ(1))\r\n\r\n with qml.tape.JacobianTape() as tape2:\r\n qml.RX(x, wires=[0])\r\n qml.RY(y, wires=[1])\r\n qml.CNOT(wires=[0, 1])\r\n qml.expval(qml.PauliZ(0))\r\n qml.expval(qml.PauliZ(1))\r\n\r\n return qml.execute([tape1, tape2], device, **ek, interface=interface)[0]\r\n\r\n dev = qml.device(\"default.qubit\", wires=2)\r\n x = jnp.array(0.543)\r\n y = jnp.array(-0.654)\r\n\r\n x_ = np.array(0.543)\r\n y_ = np.array(-0.654)\r\n\r\n res = jax.jacobian(cost, argnums=(0, 1))(\r\n x, y, dev, interface=\"jax-python\", ek=execute_kwargs\r\n )\r\n\r\n exp = qml.jacobian(cost, argnum=(0, 1))(\r\n x_, y_, dev, interface=\"autograd\", ek=execute_kwargs\r\n )\r\n for r, e in zip(res, exp):\r\n assert jnp.allclose(r, e, atol=1e-7)\r\n\r\n def test_multi_tape_jacobian_probs_expvals(self, execute_kwargs):\r\n \"\"\"Test the jacobian computation with multiple tapes with probability\r\n and expectation value computations.\"\"\"\r\n fwd_mode = execute_kwargs.get(\"mode\", \"not forward\") == \"forward\"\r\n if fwd_mode:\r\n pytest.skip(\"The forward mode is tested separately as it should raise an error.\")\r\n\r\n adjoint = execute_kwargs.get(\"gradient_kwargs\", {}).get(\"method\", \"\") == \"adjoint_jacobian\"\r\n if adjoint:\r\n pytest.skip(\"The adjoint diff method doesn't support probabilities.\")\r\n\r\n def cost(x, y, device, interface, ek):\r\n with qml.tape.JacobianTape() as tape1:\r\n qml.RX(x, wires=[0])\r\n qml.RY(y, wires=[1])\r\n qml.CNOT(wires=[0, 1])\r\n qml.expval(qml.PauliZ(0))\r\n qml.expval(qml.PauliZ(1))\r\n\r\n with qml.tape.JacobianTape() as tape2:\r\n qml.RX(x, wires=[0])\r\n qml.RY(y, wires=[1])\r\n qml.CNOT(wires=[0, 1])\r\n qml.probs(wires=[0])\r\n qml.probs(wires=[1])\r\n\r\n return qml.execute([tape1, tape2], device, **ek, interface=interface)[0]\r\n\r\n dev = qml.device(\"default.qubit\", wires=2)\r\n x = jnp.array(0.543)\r\n y = jnp.array(-0.654)\r\n\r\n x_ = np.array(0.543)\r\n y_ = np.array(-0.654)\r\n\r\n res = jax.jacobian(cost, argnums=(0, 1))(\r\n x, y, dev, interface=\"jax-python\", ek=execute_kwargs\r\n )\r\n\r\n exp = qml.jacobian(cost, argnum=(0, 1))(\r\n x_, y_, dev, interface=\"autograd\", ek=execute_kwargs\r\n )\r\n for r, e in zip(res, exp):\r\n assert jnp.allclose(r, e, atol=1e-7)\r\n\r\n def test_multiple_expvals_raises_fwd_device_grad(self, execute_kwargs):\r\n \"\"\"Tests computing multiple expectation values in a tape.\"\"\"\r\n fwd_mode = execute_kwargs.get(\"mode\", \"not forward\") == \"forward\"\r\n if not fwd_mode:\r\n pytest.skip(\"Forward mode is not turned on.\")\r\n\r\n dev = qml.device(\"default.qubit\", wires=2)\r\n params = jnp.array([0.1, 0.2, 0.3])\r\n\r\n def cost(a, cache):\r\n with qml.tape.JacobianTape() as tape:\r\n qml.RY(a[0], wires=0)\r\n qml.RX(a[1], wires=0)\r\n qml.RY(a[2], wires=0)\r\n qml.expval(qml.PauliZ(0))\r\n qml.expval(qml.PauliZ(1))\r\n\r\n res = qml.interfaces.batch.execute(\r\n [tape], dev, cache=cache, interface=\"jax-python\", **execute_kwargs\r\n )\r\n return res[0]\r\n\r\n with pytest.raises(InterfaceUnsupportedError):\r\n jax.jacobian(cost)(params, cache=None)\r\n\r\n\r\ndef test_diff_method_None_jit():\r\n \"\"\"Test that jitted execution works when `gradient_fn=None`.\"\"\"\r\n\r\n dev = qml.device(\"default.qubit.jax\", wires=1, shots=10)\r\n\r\n @jax.jit\r\n def wrapper(x):\r\n with qml.tape.QuantumTape() as tape:\r\n qml.RX(x, wires=0)\r\n qml.expval(qml.PauliZ(0))\r\n\r\n return qml.execute([tape], dev, gradient_fn=None)\r\n\r\n assert jnp.allclose(wrapper(jnp.array(0.0))[0], 1.0)\r\n"
] | [
[
"numpy.allclose",
"numpy.linspace",
"numpy.sqrt",
"numpy.abs",
"numpy.eye",
"numpy.cos",
"numpy.ones",
"numpy.sin",
"scipy.stats.norm",
"numpy.array",
"numpy.sum"
],
[
"numpy.array"
],
[
"numpy.random.random",
"numpy.sqrt",
"numpy.allclose",
"numpy.nonzero",
"numpy.sinh",
"numpy.ones",
"numpy.array",
"numpy.zeros"
],
[
"numpy.allclose",
"numpy.squeeze",
"numpy.cos",
"numpy.sin",
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
kgizdov/hep_ml | [
"114ac9e896c3a601761092760a7b315f448d59c6"
] | [
"tests/test_nnet.py"
] | [
"from __future__ import division, print_function\n\nimport numpy\nfrom sklearn.linear_model.logistic import LogisticRegression\nfrom sklearn.metrics import roc_auc_score, mean_squared_error, log_loss\nfrom sklearn.base import clone\nfrom sklearn.datasets import make_blobs\n\nfrom hep_ml import nnet\nfrom hep_ml.commonutils import generate_sample\nfrom hep_ml.nnet import MLPRegressor\nfrom hep_ml.preprocessing import BinTransformer, IronTransformer\n\n__author__ = 'Alex Rogozhnikov'\n\nnn_types = [\n nnet.SimpleNeuralNetwork,\n nnet.MLPClassifier,\n nnet.SoftmaxNeuralNetwork,\n nnet.RBFNeuralNetwork,\n nnet.PairwiseNeuralNetwork,\n nnet.PairwiseSoftplusNeuralNetwork,\n]\n\n\n# TODO test pipelines, bagging and boosting\n\ndef check_single_classification_network(neural_network, n_samples=200, n_features=7, distance=0.8, retry_attempts=3):\n X, y = generate_sample(n_samples=n_samples, n_features=n_features, distance=distance)\n # each combination is tried 3 times. before raising exception\n\n for retry_attempt in range(retry_attempts):\n # to initial state\n neural_network = clone(neural_network)\n neural_network.set_params(random_state=42 + retry_attempt)\n print(neural_network)\n neural_network.fit(X, y)\n quality = roc_auc_score(y, neural_network.predict_proba(X)[:, 1])\n # checking that computations don't fail\n computed_loss = neural_network.compute_loss(X, y, sample_weight=y * 0 + 1)\n if quality > 0.8:\n break\n else:\n print('attempt {} : {}'.format(retry_attempt, quality))\n if retry_attempt == retry_attempts - 1:\n raise RuntimeError('quality of model is too low: {} {}'.format(quality, neural_network))\n\n\ndef test_classification_nnets():\n \"\"\"\n checking combinations of losses, nn_types, trainers, most of them are used once during tests.\n \"\"\"\n attempts = max(len(nnet.losses), len(nnet.trainers), len(nn_types))\n losses_shift = numpy.random.randint(10)\n trainers_shift = numpy.random.randint(10)\n for combination in range(attempts):\n loss = list(nnet.losses.keys())[(combination + losses_shift) % len(nnet.losses)]\n trainer = list(nnet.trainers.keys())[(combination + trainers_shift) % len(nnet.trainers)]\n\n nn_type = nn_types[combination % len(nn_types)]\n neural_network = nn_type(layers=[5], loss=loss, trainer=trainer, epochs=200)\n yield check_single_classification_network, neural_network\n\n\ndef test_regression_nnets():\n from sklearn.datasets import make_regression\n X, y = make_regression(n_samples=300, n_features=20, n_informative=10, bias=5)\n print(y[:20])\n\n original_mse = mean_squared_error(y, y * 0 + y.mean())\n for loss in ['mse_loss', 'smooth_huber_loss']:\n reg = MLPRegressor(layers=(5,), loss=loss)\n reg.fit(X, y)\n p = reg.predict(X)\n print(numpy.sort(abs(p))[-10:])\n mse = mean_squared_error(y, p)\n assert mse < original_mse * 0.3\n\n # fitting a constant\n y[:] = 100.\n for loss in ['mse_loss', 'smooth_huber_loss']:\n reg = MLPRegressor(layers=(1,), loss=loss, epochs=300)\n reg.fit(X, y)\n print(mean_squared_error(y, reg.predict(X)))\n assert mean_squared_error(y, reg.predict(X)) < 5., \"doesn't fit constant\"\n\n\ndef compare_nnets_quality(n_samples=200, n_features=7, distance=0.8):\n X, y = generate_sample(n_samples=n_samples, n_features=n_features, distance=distance)\n # checking all possible combinations\n for loss in ['log_loss']: # nnet.losses:\n for NNType in nn_types:\n for trainer in nnet.trainers:\n nn = NNType(layers=[5], loss=loss, trainer=trainer, epochs=100, random_state=42)\n nn.fit(X, y)\n print(roc_auc_score(y, nn.predict_proba(X)[:, 1]), nn)\n\n lr = LogisticRegression().fit(X, y)\n print(roc_auc_score(y, lr.predict_proba(X)[:, 1]), lr)\n\n\ndef test_network_with_scaler(n_samples=200, n_features=15, distance=0.5):\n X, y = generate_sample(n_samples=n_samples, n_features=n_features, distance=distance)\n for scaler in [BinTransformer(max_bins=16), IronTransformer()]:\n clf = nnet.SimpleNeuralNetwork(scaler=scaler, epochs=300)\n clf.fit(X, y)\n\n p = clf.predict_proba(X)\n assert roc_auc_score(y, p[:, 1]) > 0.8, 'quality is too low for model: {}'.format(clf)\n\n\ndef test_adaptive_methods(n_samples=200, n_features=15, distance=0.5):\n X, y = generate_sample(n_samples=n_samples, n_features=n_features, distance=distance)\n for trainer in ['sgd', 'adadelta']:\n clf = nnet.SimpleNeuralNetwork(trainer=trainer, trainer_parameters={'batch': 1})\n clf.fit(X, y)\n assert roc_auc_score(y, clf.predict_proba(X)[:, 1]) > 0.8, 'quality is too low for model: {}'.format(clf)\n\n\ndef test_reproducibility(n_samples=200, n_features=15, distance=0.5):\n X, y = generate_sample(n_samples=n_samples, n_features=n_features, distance=distance)\n for trainer in nnet.trainers.keys():\n clf1 = nnet.MLPClassifier(trainer=trainer, random_state=42).fit(X, y)\n clf2 = nnet.MLPClassifier(trainer=trainer, random_state=42).fit(X, y)\n assert numpy.allclose(clf1.predict_proba(X), clf2.predict_proba(X))\n\n\ndef test_multiclassification(n_samples=200, n_features=10):\n for n_classes in [2, 3, 4]:\n X, y = make_blobs(n_samples=n_samples, centers=n_classes, n_features=n_features)\n losses = []\n for n_epochs in [1, 10, 100]:\n clf = nnet.MLPMultiClassifier(epochs=n_epochs).fit(X, y)\n loss1 = log_loss(y, clf.predict_proba(X))\n loss2 = clf.compute_loss(X, y)\n assert numpy.allclose(loss1, loss2), 'computed losses are different'\n losses.append(loss1)\n\n assert losses[0] > losses[-1], 'loss is not decreasing'\n"
] | [
[
"sklearn.metrics.roc_auc_score",
"numpy.allclose",
"sklearn.metrics.mean_squared_error",
"sklearn.datasets.make_regression",
"sklearn.base.clone",
"sklearn.linear_model.logistic.LogisticRegression",
"sklearn.datasets.make_blobs",
"numpy.random.randint"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
jopetty/transd-dev | [
"0078dfd8a049f5b97a7b3be6e883821e4994d4c0"
] | [
"src/models/modules/rnn_decoder.py"
] | [
"import random\nfrom typing import Dict\n\nimport torch\nfrom torch import Tensor, nn\nfrom torch.nn import functional as F\n\n\nclass RNNDecoder(nn.Module):\n @property\n def max_gen_length(self) -> int:\n return self.hparams[\"dec_max_gen_length\"]\n\n @property\n def EOS_idx(self) -> int:\n return self.hparams[\"dec_EOS_idx\"]\n\n def __init__(self, hparams: dict) -> None:\n super().__init__()\n\n self.hparams = hparams\n\n self.embedding = nn.Embedding(\n hparams[\"dec_vocab_size\"], hparams[\"dec_embedding_size\"]\n )\n self.unit = nn.RNN(\n hparams[\"dec_embedding_size\"],\n hparams[\"dec_hidden_size\"],\n num_layers=hparams[\"dec_num_layers\"],\n batch_first=True,\n )\n self.output = nn.Linear(hparams[\"dec_hidden_size\"], hparams[\"dec_vocab_size\"])\n\n def forward_step(self, step_input: Dict[str, Tensor]) -> Dict[str, Tensor]:\n\n # Unsqueeze if only one batch is present\n no_squeeze = lambda a: a.unsqueeze(0) if a.shape == 2 else a\n\n # print(\"Step Input\")\n # for key in step_input:\n # print(f\"{key}: {step_input[key].shape}\")\n\n h = no_squeeze(step_input[\"h\"])\n unit_input = no_squeeze(F.relu(self.embedding(step_input[\"x\"])))\n _, state = self.unit(unit_input, h)\n y = self.output(no_squeeze(state[-1, :, :]))\n\n # print(f\"h: {h.shape}\")\n # print(f\"unit_input: {unit_input.shape}\")\n # print(f\"unk: {unk.shape}\")\n # print(f\"state: {state.shape}\")\n # print(f\"state[-1]: {state[-1].shape}\")\n # print(f\"y: {y.shape}\")\n\n return {\"y\": y, \"h\": state}\n\n def get_step_input(self, dec_input: Dict[str, Tensor]) -> Dict[str, Tensor]:\n\n if \"h\" in dec_input:\n h = dec_input[\"h\"]\n elif \"encoder_last_state\" in dec_input:\n h = torch.transpose(dec_input[\"encoder_last_state\"], 0, 1)\n else:\n raise ValueError(\n f\"You must provide a hidden input in dec_input '{dec_input}'\"\n )\n\n if \"x\" in dec_input:\n x = dec_input[\"x\"]\n elif \"transform\" in dec_input:\n # print(\"No x found\")\n # print(dec_input[\"transform\"][:, 1:-1].shape)\n x = dec_input[\"transform\"][:, 1:-1]\n else:\n raise ValueError(\n f\"You must provide a step input in dec_input '{dec_input}'\"\n )\n\n step_input = {\"x\": x, \"h\": h}\n\n if \"encoder_output\" in dec_input:\n step_input[\"encoder_output\"] = dec_input[\"encoder_output\"]\n\n return step_input\n\n def forward(self, dec_input: Dict[str, Tensor], tf_ratio) -> Dict[str, Tensor]:\n\n is_teacher_forcing = random.random() < tf_ratio\n\n batch_size: int = dec_input[\"encoder_output\"].shape[0]\n hidden_size: int = self.output.in_features\n vocab_size: int = self.output.out_features\n gen_length = (\n dec_input[\"target\"][0].shape[0]\n if is_teacher_forcing\n else self.max_gen_length\n )\n\n dec_step_input = self.get_step_input(dec_input)\n\n has_finished = torch.zeros(batch_size, dtype=torch.bool)\n dec_output = torch.zeros(gen_length, batch_size, vocab_size)\n dec_hidden = torch.zeros(gen_length, batch_size, hidden_size)\n\n for i in range(gen_length):\n\n # print(f\"STEP {i} (tf={is_teacher_forcing})\")\n\n step_result = self.forward_step(dec_step_input)\n step_prediction = step_result[\"y\"].argmax(dim=-1)\n\n # for key in step_result:\n # print(f\"step_result[{key}]: {step_result[key].shape}\")\n # print(\"dec_hidden: \", dec_hidden.shape)\n\n dec_output[i] = step_result[\"y\"]\n dec_hidden[i] = step_result[\"h\"]\n\n has_finished[step_prediction == self.EOS_idx] = True\n if all(has_finished):\n break\n else:\n x = dec_input[\"target\"][:, i] if is_teacher_forcing else step_prediction\n step_result[\"x\"] = x.unsqueeze(-1)\n step_result[\"encoder_output\"] = dec_input[\"encoder_output\"]\n\n dec_step_input = self.get_step_input(step_result)\n\n output = {\n \"logits\": torch.transpose(dec_output, 0, 1),\n \"predictions\": torch.transpose(dec_output, 0, 1).argmax(dim=-1),\n \"decoder_hiddens\": dec_hidden,\n }\n\n return output\n"
] | [
[
"torch.transpose",
"torch.zeros",
"torch.nn.RNN",
"torch.nn.Embedding",
"torch.nn.Linear"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
continue-nature/google-research | [
"7011fe008efc4f11592ace842dbd4c9dffd46c29"
] | [
"capsule_em/norb/norb_record.py"
] | [
"# coding=utf-8\n# Copyright 2020 The Google Research Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Input utility functions for norb.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\n\nimport os.path\nimport numpy as np\nimport tensorflow.compat.v1 as tf\n\n\ndef _read_and_decode(filename_queue, image_pixel=96, distort=0):\n \"\"\"Read a norb tf record file.\"\"\"\n reader = tf.TFRecordReader()\n _, serialized_example = reader.read(filename_queue)\n features = tf.parse_single_example(\n serialized_example,\n # Defaults are not specified since both keys are required.\n features={\n 'image_raw': tf.FixedLenFeature([], tf.string),\n 'label': tf.FixedLenFeature([], tf.int64),\n 'height': tf.FixedLenFeature([], tf.int64),\n 'width': tf.FixedLenFeature([], tf.int64),\n 'depth': tf.FixedLenFeature([], tf.int64),\n 'meta': tf.FixedLenFeature([4], tf.int64),\n })\n\n # Convert from a scalar string tensor (whose single string has\n # length image_pixels) to a uint8 tensor with shape\n # [image_pixels].\n image = tf.decode_raw(features['image_raw'], tf.uint8)\n height = tf.cast(features['height'], tf.int32)\n depth = tf.cast(features['depth'], tf.int32)\n image = tf.reshape(image, tf.stack([depth, height, height]))\n image = tf.transpose(image, [1, 2, 0])\n image = tf.cast(image, tf.float32)\n print(image.get_shape()[0].value)\n if image_pixel < 96:\n print('image resizing to {}'.format(image_pixel))\n image = tf.image.resize_images(image, [image_pixel, image_pixel])\n orig_images = image\n\n if image_pixel == 48:\n new_dim = 32\n elif image_pixel == 32:\n new_dim = 22\n if distort == 1:\n image = tf.image.random_brightness(image, max_delta=63)\n image = tf.image.random_contrast(image, lower=0.2, upper=1.8)\n image = tf.random_crop(image, tf.stack([new_dim, new_dim, depth]))\n # 0.26179938779 is 15 degress in radians\n image = tf.image.per_image_standardization(image)\n image_pixel = new_dim\n elif distort == 2:\n image = tf.image.resize_image_with_crop_or_pad(image, new_dim, new_dim)\n image = tf.image.per_image_standardization(image)\n image_pixel = new_dim\n else:\n image = image * (1.0 / 255.0)\n image = tf.div(\n tf.subtract(image, tf.reduce_min(image)),\n tf.subtract(tf.reduce_max(image), tf.reduce_min(image)))\n\n # Convert label from a scalar uint8 tensor to an int32 scalar.\n label = tf.cast(features['label'], tf.int32)\n\n return image, label, image_pixel, orig_images\n\n\nbxs_m2 = [[1, 1], [1, -1], [-1, 1], [-1, -1]]\n\n\ndef inputs(train_dir,\n batch_size,\n split,\n multi,\n image_pixel=96,\n distort=False,\n patching=False):\n \"\"\"Reads input data num_epochs times.\"\"\"\n if multi:\n filename = os.path.join(train_dir, '{}duo-az.tfrecords'.format(split))\n else:\n filename = os.path.join(train_dir, '{}.tfrecords'.format(split))\n\n with tf.name_scope('input'):\n filename_queue = tf.train.string_input_producer([filename])\n\n if distort:\n d = 1 + (split == 'test')\n else:\n d = 0\n\n # Even when reading in multiple threads, share the filename\n # queue.\n image, label, dim, orig_image = _read_and_decode(\n filename_queue, image_pixel=image_pixel, distort=d)\n orig_image.set_shape([48, 48, 1 + multi])\n image.set_shape([dim, dim, 1 + multi])\n image = tf.transpose(image, [2, 0, 1])\n\n if split == 'train':\n images, sparse_labels = tf.train.shuffle_batch(\n [image, label],\n batch_size=batch_size,\n num_threads=2,\n capacity=2000 + 3 * batch_size,\n # Ensures a minimum amount of shuffling of examples.\n min_after_dequeue=2000)\n else:\n images, sparse_labels, orig_images = tf.train.batch(\n [image, label, orig_image],\n batch_size=batch_size,\n num_threads=1,\n capacity=1000 + 3 * batch_size)\n if patching:\n t_images = tf.tile(orig_images, [4, 1, 1, 1])\n c_images = tf.image.extract_glimpse(\n t_images, [32, 32], bxs_m2, centered=True, normalized=False)\n c2images = tf.image.extract_glimpse(\n t_images, [32, 32],\n 2 * np.array(bxs_m2),\n centered=True,\n normalized=False)\n c3images = tf.image.extract_glimpse(\n t_images, [32, 32],\n 3 * np.array(bxs_m2),\n centered=True,\n normalized=False)\n c_images = tf.map_fn(tf.image.per_image_standardization, c_images)\n c2images = tf.map_fn(tf.image.per_image_standardization, c2images)\n c3images = tf.map_fn(tf.image.per_image_standardization, c3images)\n c_images = tf.transpose(c_images, [0, 3, 1, 2])\n c2images = tf.transpose(c2images, [0, 3, 1, 2])\n c3images = tf.transpose(c3images, [0, 3, 1, 2])\n # cc_images = tf.concat([images, m_images, c_images], axis=0)\n # cc_labels = tf.tile(sparse_labels, [9])\n cc_images = tf.concat([images, c_images, c2images, c3images], axis=0)\n cc_labels = tf.tile(sparse_labels, [13])\n features = {\n 'images': images,\n 'labels': tf.one_hot(sparse_labels, 5),\n 'recons_image': images,\n 'recons_label': sparse_labels,\n 'height': dim,\n 'depth': 1 + multi,\n 'num_classes': 5,\n 'cc_images': cc_images,\n 'cc_recons_label': cc_labels,\n 'cc_labels': tf.one_hot(cc_labels, 5),\n }\n\n return features\n"
] | [
[
"tensorflow.compat.v1.concat",
"tensorflow.compat.v1.image.per_image_standardization",
"tensorflow.compat.v1.TFRecordReader",
"tensorflow.compat.v1.image.random_brightness",
"tensorflow.compat.v1.image.random_contrast",
"tensorflow.compat.v1.decode_raw",
"tensorflow.compat.v1.image.resize_images",
"tensorflow.compat.v1.one_hot",
"tensorflow.compat.v1.train.batch",
"tensorflow.compat.v1.train.shuffle_batch",
"tensorflow.compat.v1.image.extract_glimpse",
"tensorflow.compat.v1.transpose",
"tensorflow.compat.v1.name_scope",
"tensorflow.compat.v1.train.string_input_producer",
"tensorflow.compat.v1.map_fn",
"tensorflow.compat.v1.FixedLenFeature",
"tensorflow.compat.v1.tile",
"tensorflow.compat.v1.cast",
"tensorflow.compat.v1.reduce_max",
"numpy.array",
"tensorflow.compat.v1.stack",
"tensorflow.compat.v1.image.resize_image_with_crop_or_pad",
"tensorflow.compat.v1.reduce_min"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
haowen-xu/ml-essentials | [
"ca44186be37887461205227c32995f1485b4ff41"
] | [
"mltk/data/loaders.py"
] | [
"\"\"\"\nSimple dataset loaders.\n\nFor more datasets and more comprehensive loaders, you may turn to dedicated\nlibraries like `fuel`.\n\"\"\"\n\nimport gzip\nimport hashlib\nimport os\nimport pickle\nfrom typing import *\n\nimport idx2numpy\nimport numpy as np\n\nfrom ..typing_ import *\nfrom ..utils import CacheDir, validate_enum_arg\n\n__all__ = ['load_mnist', 'load_fashion_mnist', 'load_cifar10', 'load_cifar100']\n\n_MNIST_LIKE_FILE_NAMES = {\n 'train_x': 'train-images-idx3-ubyte.gz',\n 'train_y': 'train-labels-idx1-ubyte.gz',\n 'test_x': 't10k-images-idx3-ubyte.gz',\n 'test_y': 't10k-labels-idx1-ubyte.gz',\n}\n_MNIST_URI_PREFIX = 'http://yann.lecun.com/exdb/mnist/'\n_MNIST_FILE_MD5 = {\n 'train_x': 'f68b3c2dcbeaaa9fbdd348bbdeb94873',\n 'train_y': 'd53e105ee54ea40749a09fcbcd1e9432',\n 'test_x': '9fb629c4189551a2d022fa330f9573f3',\n 'test_y': 'ec29112dd5afa0611ce80d1b7f02629c',\n}\n_FASHION_MNIST_URI_PREFIX = 'http://fashion-mnist.s3-website.eu-central-1.' \\\n 'amazonaws.com/'\n_FASHION_MNIST_FILE_MD5 = {\n 'train_x': '8d4fb7e6c68d591d4c3dfef9ec88bf0d',\n 'train_y': '25c81989df183df01b3e8a0aad5dffbe',\n 'test_x': 'bef4ecab320f06d8554ea6380940ec79',\n 'test_y': 'bb300cfdad3c16e7a12a480ee83cd310',\n}\n\n_CIFAR_10_URI = 'https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz'\n_CIFAR_10_MD5 = 'c58f30108f718f92721af3b95e74349a'\n_CIFAR_10_CONTENT_DIR = 'cifar-10-batches-py'\n_CIFAR_100_URI = 'https://www.cs.toronto.edu/~kriz/cifar-100-python.tar.gz'\n_CIFAR_100_MD5 = 'eb9058c3a382ffc7106e4002c42a8d85'\n_CIFAR_100_CONTENT_DIR = 'cifar-100-python'\n\n\ndef _validate_x_shape(shape, default_shape):\n shape = tuple(int(v) for v in shape)\n default_shape = tuple(int(v) for v in default_shape)\n value_size = int(np.prod(default_shape))\n\n if np.prod(shape) != value_size:\n raise ValueError(f'`x_shape` does not product to {value_size}: {shape}')\n return shape\n\n\ndef load_mnist_like(uri_prefix: str,\n file_md5: Dict[str, str],\n cache_name: str,\n x_shape: Sequence[int] = (28, 28),\n x_dtype: ArrayDType = np.uint8,\n y_dtype: ArrayDType = np.int32\n ) -> Tuple[XYArrayTuple, XYArrayTuple]:\n \"\"\"\n Load an MNIST-like dataset as NumPy arrays.\n\n Args:\n uri_prefix: Common prefix of the URIs in `remote_files`.\n file_md5: The remote file MD5 hash sums, a dict of\n `{'train_x': ..., 'train_y': ..., 'test_x': ..., 'test_y': ...}`,\n where each value is the md5 sum.\n cache_name: Name of the cache directory.\n x_shape: Reshape each digit into this shape.\n x_dtype: Cast each digit into this data type.\n y_dtype: Cast each label into this data type.\n\n Returns:\n The ``(train_x, train_y), (test_x, test_y)`` arrays.\n \"\"\"\n\n def _fetch_array(array_name):\n uri = uri_prefix + _MNIST_LIKE_FILE_NAMES[array_name]\n md5 = file_md5[array_name]\n path = CacheDir(cache_name).download(\n uri, hasher=hashlib.md5(), expected_hash=md5)\n with gzip.open(path, 'rb') as f:\n return idx2numpy.convert_from_file(f)\n\n # check arguments\n x_shape = _validate_x_shape(x_shape, (28, 28))\n\n # load data\n train_x = _fetch_array('train_x').astype(x_dtype)\n train_y = _fetch_array('train_y').astype(y_dtype)\n test_x = _fetch_array('test_x').astype(x_dtype)\n test_y = _fetch_array('test_y').astype(y_dtype)\n\n assert(len(train_x) == len(train_y) == 60000)\n assert(len(test_x) == len(test_y) == 10000)\n\n # change shape\n train_x = train_x.reshape([len(train_x)] + list(x_shape))\n test_x = test_x.reshape([len(test_x)] + list(x_shape))\n\n return (train_x, train_y), (test_x, test_y)\n\n\ndef load_mnist(x_shape: Sequence[int] = (28, 28),\n x_dtype: ArrayDType = np.uint8,\n y_dtype: ArrayDType = np.int32\n ) -> Tuple[XYArrayTuple, XYArrayTuple]:\n \"\"\"\n Load an MNIST dataset as NumPy arrays.\n\n Args:\n x_shape: Reshape each digit into this shape.\n x_dtype: Cast each digit into this data type.\n y_dtype: Cast each label into this data type.\n\n Returns:\n The ``(train_x, train_y), (test_x, test_y)`` arrays.\n \"\"\"\n return load_mnist_like(\n _MNIST_URI_PREFIX, _MNIST_FILE_MD5, 'mnist', x_shape, x_dtype, y_dtype)\n\n\ndef load_fashion_mnist(x_shape: Sequence[int] = (28, 28),\n x_dtype: ArrayDType = np.uint8,\n y_dtype: ArrayDType = np.int32\n ) -> Tuple[XYArrayTuple, XYArrayTuple]:\n \"\"\"\n Load an MNIST dataset as NumPy arrays.\n\n Args:\n x_shape: Reshape each digit into this shape.\n x_dtype: Cast each digit into this data type.\n y_dtype: Cast each label into this data type.\n\n Returns:\n The ``(train_x, train_y), (test_x, test_y)`` arrays.\n \"\"\"\n return load_mnist_like(\n _FASHION_MNIST_URI_PREFIX, _FASHION_MNIST_FILE_MD5, 'fashion_mnist',\n x_shape, x_dtype, y_dtype)\n\n\ndef _cifar_load_batch(path, x_shape, x_dtype, y_dtype, expected_batch_label,\n labels_key='labels'):\n # load from file\n with open(path, 'rb') as f:\n d = {\n k.decode('utf-8'): v\n for k, v in pickle.load(f, encoding='bytes').items()\n }\n d['batch_label'] = d['batch_label'].decode('utf-8')\n assert(d['batch_label'] == expected_batch_label)\n\n data = np.asarray(d['data'], dtype=x_dtype)\n labels = np.asarray(d[labels_key], dtype=y_dtype)\n\n # change shape\n data = data.reshape((data.shape[0], 3, 32, 32))\n data = np.transpose(data, (0, 2, 3, 1))\n if x_shape:\n data = data.reshape([data.shape[0]] + list(x_shape))\n\n return data, labels\n\n\ndef load_cifar10(x_shape: Sequence[int] = (32, 32, 3),\n x_dtype: ArrayDType = np.float32,\n y_dtype: ArrayDType = np.int32) -> Tuple[XYArrayTuple, XYArrayTuple]:\n \"\"\"\n Load the CIFAR-10 dataset as NumPy arrays.\n\n Args:\n x_shape: Reshape each digit into this shape.\n x_dtype: Cast each digit into this data type.\n y_dtype: Cast each label into this data type.\n\n Returns:\n The ``(train_x, train_y), (test_x, test_y)`` arrays.\n \"\"\"\n # check the arguments\n x_shape = _validate_x_shape(x_shape, (32, 32, 3))\n\n # fetch data\n path = CacheDir('cifar').download_and_extract(\n _CIFAR_10_URI, hasher=hashlib.md5(), expected_hash=_CIFAR_10_MD5)\n data_dir = os.path.join(path, _CIFAR_10_CONTENT_DIR)\n\n # load the data\n train_num = 50000\n train_x = np.zeros((train_num,) + x_shape, dtype=x_dtype)\n train_y = np.zeros((train_num,), dtype=y_dtype)\n\n for i in range(1, 6):\n path = os.path.join(data_dir, 'data_batch_{}'.format(i))\n x, y = _cifar_load_batch(\n path, x_shape=x_shape, x_dtype=x_dtype, y_dtype=y_dtype,\n expected_batch_label='training batch {} of 5'.format(i)\n )\n (train_x[(i - 1) * 10000: i * 10000, ...],\n train_y[(i - 1) * 10000: i * 10000]) = x, y\n\n path = os.path.join(data_dir, 'test_batch')\n test_x, test_y = _cifar_load_batch(\n path, x_shape=x_shape, x_dtype=x_dtype, y_dtype=y_dtype,\n expected_batch_label='testing batch 1 of 1'\n )\n assert(len(test_x) == len(test_y) == 10000)\n\n return (train_x, train_y), (test_x, test_y)\n\n\ndef load_cifar100(label_mode: str = 'fine',\n x_shape: Sequence[int] = (32, 32, 3),\n x_dtype: ArrayDType = np.float32,\n y_dtype: ArrayDType = np.int32) -> Tuple[XYArrayTuple, XYArrayTuple]:\n \"\"\"\n Load the CIFAR-100 dataset as NumPy arrays.\n\n Args:\n label_mode: One of {\"fine\", \"coarse\"}.\n x_shape: Reshape each digit into this shape.\n x_dtype: Cast each digit into this data type.\n y_dtype: Cast each label into this data type.\n\n Returns:\n The ``(train_x, train_y), (test_x, test_y)`` arrays.\n \"\"\"\n # check the arguments\n label_mode = validate_enum_arg('label_mode', label_mode, ('fine', 'coarse'))\n x_shape = _validate_x_shape(x_shape, (32, 32, 3))\n\n # fetch data\n path = CacheDir('cifar').download_and_extract(\n _CIFAR_100_URI, hasher=hashlib.md5(), expected_hash=_CIFAR_100_MD5)\n data_dir = os.path.join(path, _CIFAR_100_CONTENT_DIR)\n\n # load the data\n path = os.path.join(data_dir, 'train')\n train_x, train_y = _cifar_load_batch(\n path, x_shape=x_shape, x_dtype=x_dtype, y_dtype=y_dtype,\n expected_batch_label='training batch 1 of 1',\n labels_key='{}_labels'.format(label_mode)\n )\n assert(len(train_x) == len(train_y) == 50000)\n\n path = os.path.join(data_dir, 'test')\n test_x, test_y = _cifar_load_batch(\n path, x_shape=x_shape, x_dtype=x_dtype, y_dtype=y_dtype,\n expected_batch_label='testing batch 1 of 1',\n labels_key='{}_labels'.format(label_mode)\n )\n assert(len(test_x) == len(test_y) == 10000)\n\n return (train_x, train_y), (test_x, test_y)\n"
] | [
[
"numpy.asarray",
"numpy.zeros",
"numpy.prod",
"numpy.transpose"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
fxia22/gibson_demos | [
"5f8d253694b23b41c53959774203ba5787578b74",
"5f8d253694b23b41c53959774203ba5787578b74"
] | [
"igibson/test/test_motion_planning.py",
"igibson/test/benchmark/benchmark_interactive_scene_rendering.py"
] | [
"import igibson\nfrom igibson.envs.igibson_env import iGibsonEnv\nfrom time import time\nimport os\nfrom igibson.utils.assets_utils import download_assets, download_demo_data\nfrom igibson.utils.motion_planning_wrapper import MotionPlanningWrapper\nimport numpy as np\nimport matplotlib.pyplot as plt\n\ndef test_occupancy_grid():\n print(\"Test env\")\n download_assets()\n download_demo_data()\n config_filename = os.path.join(igibson.root_path, 'test', 'test_house_occupancy_grid.yaml')\n \n nav_env = iGibsonEnv(config_file=config_filename, mode='headless')\n nav_env.reset()\n nav_env.robots[0].set_position_orientation([0,0,0],[0,0,0,1])\n nav_env.simulator.step()\n\n action = nav_env.action_space.sample()\n ts = nav_env.step(action)\n assert np.sum(ts[0]['occupancy_grid'] == 0) > 0\n assert np.sum(ts[0]['occupancy_grid'] == 1) > 0\n plt.imshow(ts[0]['occupancy_grid'][:,:,0])\n plt.colorbar()\n plt.savefig('occupancy_grid.png')\n nav_env.clean()\n\n\ndef test_base_planning():\n print(\"Test env\")\n download_assets()\n download_demo_data()\n config_filename = os.path.join(igibson.root_path, 'test', 'test_house_occupancy_grid.yaml')\n\n nav_env = iGibsonEnv(config_file=config_filename, mode='headless')\n motion_planner = MotionPlanningWrapper(nav_env)\n state = nav_env.reset()\n nav_env.robots[0].set_position_orientation([0,0,0],[0,0,0,1])\n nav_env.simulator.step()\n plan = None\n itr = 0\n while plan is None and itr < 10:\n plan = motion_planner.plan_base_motion([0.5,0,0])\n print(plan)\n itr += 1\n motion_planner.dry_run_base_plan(plan)\n\n assert len(plan) > 0 \n nav_env.clean()\n\n",
"#!/usr/bin/env python\n\nfrom igibson.simulator import Simulator\nfrom igibson.scenes.igibson_indoor_scene import InteractiveIndoorScene\nfrom igibson.robots.turtlebot_robot import Turtlebot\nfrom igibson.render.mesh_renderer.mesh_renderer_settings import MeshRendererSettings\nfrom igibson.utils.constants import AVAILABLE_MODALITIES\nfrom igibson.utils.utils import parse_config\nfrom igibson.utils.constants import NamedRenderingPresets\nimport os\nimport igibson\nimport time\nimport random\nimport matplotlib.pyplot as plt\nfrom igibson.utils.assets_utils import get_ig_assets_version\nfrom igibson.utils.assets_utils import get_scene_path\nimport pickle as pkl\nimport numpy as np\n\n\ndef benchmark_rendering(scene_list, rendering_presets_list, modality_list):\n config = parse_config(os.path.join(igibson.root_path, 'test', 'test.yaml'))\n assets_version = get_ig_assets_version()\n print('assets_version', assets_version)\n result = {}\n for scene_name in scene_list:\n for rendering_preset in rendering_presets_list:\n scene = InteractiveIndoorScene(\n scene_name, texture_randomization=False, object_randomization=False)\n settings = NamedRenderingPresets[rendering_preset]\n if rendering_preset == 'VISUAL_RL':\n image_width = 128\n image_height = 128\n else:\n image_width = 512\n image_height = 512\n s = Simulator(mode='headless',\n image_width=image_width,\n image_height=image_height,\n device_idx=0,\n rendering_settings=settings,\n physics_timestep=1/240.0\n )\n s.import_ig_scene(scene)\n turtlebot = Turtlebot(config)\n s.import_robot(turtlebot)\n\n for mode in modality_list:\n for _ in range(10):\n s.step()\n _ = s.renderer.render_robot_cameras(modes=(mode))\n start = time.time()\n for _ in range(200):\n _ = s.renderer.render_robot_cameras(modes=(mode))\n end = time.time()\n fps = 200 / (end - start)\n result[(scene_name, rendering_preset, mode)] = fps\n s.disconnect()\n return result\n\ndef main():\n scenes = [\"Beechwood_0_int\",\n \"Beechwood_1_int\",\n \"Benevolence_0_int\",\n \"Benevolence_1_int\",\n \"Benevolence_2_int\",\n \"Ihlen_0_int\",\n \"Ihlen_1_int\",\n \"Merom_0_int\",\n \"Merom_1_int\",\n \"Pomaria_0_int\",\n \"Pomaria_1_int\",\n \"Pomaria_2_int\",\n \"Rs_int\",\n \"Wainscott_0_int\",\n \"Wainscott_1_int\"]\n rendering_settings = ['VISUAL_RL', 'PERCEPTION']\n modalities = list(AVAILABLE_MODALITIES)\n\n result = benchmark_rendering(\n scenes,\n rendering_settings,\n modalities\n )\n\n aggregated_result = {}\n for rendering_setting in rendering_settings:\n for modality in modalities:\n all_scenes = []\n for item in result.keys():\n if item[1] == rendering_setting and item[2] == modality:\n all_scenes.append(result[item])\n aggregated_result[('MEAN', rendering_setting, modality)] = np.mean(all_scenes)\n aggregated_result[('MAX', rendering_setting, modality)] = np.max(all_scenes)\n aggregated_result[('MIN', rendering_setting, modality)] = np.min(all_scenes)\n\n print(result)\n plt.figure(figsize=(5,30))\n plt.tight_layout()\n plt.barh([\"-\".join(item) for item in result.keys()], result.values())\n for i, v in enumerate(result.values()):\n plt.text(v + 3, i, '{:.1f}'.format(v), color='blue', fontweight='bold')\n plt.xlabel('fps')\n plt.savefig('benchmark_rendering.pdf', bbox_inches = \"tight\")\n pkl.dump(result, open('rendering_benchmark_results.pkl', 'wb'))\n\n plt.figure(figsize=(5, 30))\n plt.tight_layout()\n plt.barh([\"-\".join(item) for item in aggregated_result.keys()], aggregated_result.values())\n for i, v in enumerate(aggregated_result.values()):\n plt.text(v + 3, i, '{:.1f}'.format(v), color='blue', fontweight='bold')\n plt.xlabel('fps')\n plt.savefig('benchmark_rendering_stats.pdf', bbox_inches=\"tight\")\n pkl.dump(aggregated_result, open('rendering_benchmark_results_stats.pkl', 'wb'))\n\nif __name__ == \"__main__\":\n main()\n"
] | [
[
"matplotlib.pyplot.colorbar",
"matplotlib.pyplot.imshow",
"numpy.sum",
"matplotlib.pyplot.savefig"
],
[
"matplotlib.pyplot.tight_layout",
"numpy.min",
"matplotlib.pyplot.savefig",
"numpy.max",
"numpy.mean",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.figure"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
adityabasu1/Event-Extraction-NLP | [
"98faa88d36f09330ebce6fc180ab2f087776f2e1"
] | [
"Joint_Event_Extraction.py"
] | [
"import sys\nimport os\nimport numpy as np\nimport random\n\nfrom collections import OrderedDict\nimport pickle\nimport datetime\nfrom tqdm import tqdm\nfrom recordclass import recordclass\nimport math\nimport torch\nimport torch.autograd as autograd\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nimport json\n\n# Helper funcs\ndef custom_print(*msg):\n for i in range(0, len(msg)):\n if i == len(msg) - 1:\n print(msg[i])\n logger.write(str(msg[i]) + '\\n')\n else:\n print(msg[i], ' ', end='')\n logger.write(str(msg[i]))\n\n\ndef load_word_embedding(embed_file, vocab):\n custom_print('vocab length:', len(vocab))\n embed_vocab = OrderedDict()\n rev_embed_vocab = OrderedDict()\n embed_matrix = list()\n\n embed_vocab['<PAD>'] = 0\n rev_embed_vocab[0] = '<PAD>'\n embed_matrix.append(np.zeros(word_embed_dim, dtype=np.float32))\n\n embed_vocab['<UNK>'] = 1\n rev_embed_vocab[1] = '<UNK>'\n embed_matrix.append(np.random.uniform(-0.25, 0.25, word_embed_dim))\n\n embed_vocab['<SOS>'] = 2\n rev_embed_vocab[2] = '<SOS>'\n embed_matrix.append(np.random.uniform(-0.25, 0.25, word_embed_dim))\n\n embed_vocab['<EOS>'] = 3\n rev_embed_vocab[3] = '<EOS>'\n embed_matrix.append(np.random.uniform(-0.25, 0.25, word_embed_dim))\n\n word_idx = 4\n with open(embed_file, \"r\") as f:\n for line in f:\n parts = line.split()\n if len(parts) < word_embed_dim + 1:\n continue\n word = parts[0]\n if word in vocab and vocab[word] >= word_min_freq:\n vec = [np.float32(val) for val in parts[1:]]\n embed_matrix.append(vec)\n embed_vocab[word] = word_idx\n rev_embed_vocab[word_idx] = word\n word_idx += 1\n\n for word in vocab:\n if word not in embed_vocab and vocab[word] >= word_min_freq:\n embed_matrix.append(np.random.uniform(-0.25, 0.25, word_embed_dim))\n embed_vocab[word] = word_idx\n rev_embed_vocab[word_idx] = word\n word_idx += 1\n\n custom_print('embed dictionary length:', len(embed_vocab))\n return embed_vocab, rev_embed_vocab, np.array(embed_matrix, dtype=np.float32)\n\n\ndef build_vocab(data, events, arguments, roles, vocab_file, embed_file):\n vocab = OrderedDict()\n char_v = OrderedDict()\n char_v['<PAD>'] = 0\n char_v['<UNK>'] = 1\n char_v[';'] = 2\n char_v['|'] = 3\n char_idx = 4\n for d in data:\n for word in d.SrcWords:\n if word not in vocab:\n vocab[word] = 1\n else:\n vocab[word] += 1\n\n for c in word:\n if c not in char_v:\n char_v[c] = char_idx\n char_idx += 1\n\n for event in events:\n vocab[event] = word_min_freq\n for argument in arguments:\n vocab[argument] = word_min_freq\n for role in roles:\n vocab[role] = word_min_freq\n\n vocab[';'] = word_min_freq\n vocab['|'] = word_min_freq\n\n word_v, rev_word_v, embed_matrix = load_word_embedding(embed_file, vocab)\n output = open(vocab_file, 'wb')\n pickle.dump([word_v, char_v], output)\n output.close()\n return word_v, rev_word_v, char_v, embed_matrix\n\n\ndef load_vocab(vocab_file):\n with open(vocab_file, 'rb') as f:\n word_v, char_v = pickle.load(f)\n return word_v, char_v\n\ndef get_adj_mat(amat):\n K = 5\n adj_mat = np.zeros((len(amat), len(amat)), np.float32)\n for i in range(len(amat)):\n for j in range(len(amat)):\n if 0 <= amat[i][j] <= K:\n adj_mat[i][j] = 1.0 / math.pow(2, amat[i][j])\n else:\n adj_mat[i][j] = 0\n return adj_mat\n\n\n\ndef get_data(src_lines, trg_lines, datatype):\n samples = []\n uid = 1\n src_len = -1\n trg_len = -1\n for i in range(0, len(src_lines)):\n src_line = src_lines[i].strip()\n trg_line = trg_lines[i].strip()\n src_words = src_line.split()\n\n if datatype == 1:\n tuples = trg_line.strip().split('|')\n random.shuffle(tuples)\n new_trg_line = ' | '.join(tuples)\n assert len(trg_line.split()) == len(new_trg_line.split())\n trg_line = new_trg_line\n\n trg_words = list()\n trg_words.append('<SOS>')\n trg_words += trg_line.split()\n trg_words.append('<EOS>')\n\n if datatype == 1 and (len(src_words) > max_src_len or len(trg_words) > max_trg_len + 1):\n continue\n if len(src_words) > src_len:\n src_len = len(src_words)\n if len(trg_words) > trg_len:\n trg_len = len(trg_words)\n \n sample = Sample(Id=uid, SrcLen=len(src_words), SrcWords=src_words, TrgLen=len(trg_words),\n TrgWords=trg_words) #c\n samples.append(sample)\n \n uid += 1\n print(src_len)\n print(trg_len)\n return samples\n\n\ndef read_data(src_file, trg_file, datatype):\n reader = open(src_file)\n src_lines = reader.readlines()\n reader.close()\n\n reader = open(trg_file)\n trg_lines = reader.readlines()\n reader.close()\n\n # tot_len = 100\n # src_lines = src_lines[0:min(tot_len, len(src_lines))]\n # trg_lines = trg_lines[0:min(tot_len, len(trg_lines))]\n # adj_lines = adj_lines[0:min(tot_len, len(adj_lines))]\n\n data = get_data(src_lines, trg_lines, datatype)\n return data\n\n\n#event_lines, argument_lines, roles_lines\n\n# to add option for less detailed checks\n\ndef check_event_trigger(ref_string, pred_string):\n return (ref_string == pred_string)\n pass\n\ndef check_event_type(ref_string, pred_string, event_lines):\n if granular_mode == 0:\n if pred_string in event_lines:\n return (ref_string == pred_string)\n else:\n # print(\"invalid prediction\")\n return False\n pass\n\n if granular_mode == 1:\n pred_token = pred_string.split(\":\")[0]\n ref_token = ref_string.split(\":\")[0]\n return (pred_token == ref_token)\n pass\n\n\ndef check_event_argument(ref_string, pred_string):\n return (ref_string == pred_string)\n pass\n\ndef check_argument_type(ref_string, pred_string, argument_lines):\n if granular_mode == 0:\n if pred_string in argument_lines:\n return (ref_string == pred_string)\n else:\n # print(\"invalid prediction\")\n return False\n pass\n\n if granular_mode == 1:\n pred_token = pred_string.split(\":\")[0]\n ref_token = ref_string.split(\":\")[0]\n return (pred_token == ref_token)\n pass\n\ndef check_argument_role(ref_string, pred_string, roles_lines):\n if pred_string in roles_lines:\n return (ref_string == pred_string)\n else:\n # print(\"invalid prediction\")\n return False\n pass\n\ndef calculate_f1(ref_lines, pred_lines, event_lines, argument_lines, roles_lines):\n\n list_of_tracking_metrics = ['predicted_tuples',\n 'ground_truth_tuples',\n 'correct_predictions',\n 'events_count',\n 'correct_events',\n 'correct_event_type',\n 'correct_arguments',\n 'correct_argment_types',\n 'correct_argument_roles'\n ]\n\n metric_counts = dict.fromkeys(list_of_tracking_metrics, 0)\n \n\n for i in range(0, min(len(ref_lines), len(pred_lines))):\n \n ref_line = ref_lines[i].strip()\n pred_line = pred_lines[i].strip()\n\n ref_tuples = ref_line.split('|')\n pred_tuples = pred_line.split('|')\n\n # find a way to compare multiple tuples\n\n # correct - t1 | t2 | t3\n # pred - p1 | p2\n # postives = 3 [number of ground truths minus nones]\n # predicted_pos = 2 [number of preds minus nones]\n # TP = correct preds \n # TP + FP = predicted\n # TP + FN = positives \n # Precision = correct / predicted_pos \n # Recall = correct / positives\n # f = pr/p+r\n\n # handling repeated predictions \n # set_of_preds = set()\n # for pred_tuple in pred_tuples:\n # set_of_preds.add(pred_tuple.strip())\n # pred_tuples = list(set_of_preds)\n\n for pred_tuple in pred_tuples:\n pred_strings = pred_tuple.split(';')\n if(len(pred_strings) < 3):\n continue\n\n\n # in the case of no argument detection, we only calculate the event trigger scores\n if(pred_strings[2].strip().lower()) == 'none':\n max_matches = 0\n part_matches = []\n\n for ref_tuple in ref_tuples:\n # ssss\n ev1, ev2 = cal_f1_for_pair(ref_tuple, pred_tuple, event_lines)\n\n pair_score = ev1+ev2\n\n if pair_score > max_matches:\n max_matches = pair_score\n part_matches = (ev1, ev2)\n pass\n pass\n\n metric_counts['events_count'] += 1\n if ev1 == 1:\n metric_counts['correct_events'] += 1\n if ev2 == 1:\n metric_counts['correct_event_type'] += 1\n\n continue\n \n max_matches = 0\n part_matches = cal_f1_for_tuple(ref_tuples[0], pred_tuple, event_lines, argument_lines, roles_lines)\n\n for ref_tuple in ref_tuples:\n res = cal_f1_for_tuple(ref_tuple, pred_tuple, event_lines, argument_lines, roles_lines)\n\n tuple_score = sum(res)\n\n if tuple_score >= max_matches:\n max_matches = tuple_score\n part_matches = res\n pass\n pass\n\n metric_counts['predicted_tuples'] += 1\n metric_counts['events_count'] += 1\n\n if max_matches >= 4:\n metric_counts['correct_predictions'] += 1\n if part_matches[0] == 1:\n metric_counts['correct_events'] += 1\n if part_matches[1] == 1:\n metric_counts['correct_event_type'] += 1\n if part_matches[2] == 1:\n metric_counts['correct_arguments'] += 1\n if part_matches[3] == 1:\n metric_counts['correct_argment_types'] += 1\n if part_matches[4] == 1:\n metric_counts['correct_argument_roles'] += 1\n pass\n \n for ref_tuple in ref_tuples:\n if(ref_tuple.split(';')[2].strip().lower()) != 'none':\n metric_counts['ground_truth_tuples'] += 1\n\n pass\n \n print(metric_counts)\n\n precision = float(metric_counts['correct_predictions'] / (metric_counts['predicted_tuples'] + 1e-08))\n recall = float(metric_counts['correct_predictions'] / (metric_counts['ground_truth_tuples'] + 1e-08))\n f1 = 2 * precision * recall / (precision + recall + 1e-08)\n precision = round(precision, 3)\n recall = round(recall, 3)\n f1 = round(f1, 3)\n\n print(\"Partwise Results\")\n \n event_acc = metric_counts['correct_events']/ (metric_counts['events_count'] + 1e-08)\n evtype_acc = metric_counts['correct_event_type']/ (metric_counts['events_count'] + 1e-08)\n argument_acc = metric_counts['correct_arguments']/ (metric_counts['predicted_tuples'] + 1e-08)\n argtype_acc = metric_counts['correct_argment_types']/ (metric_counts['predicted_tuples'] + 1e-08)\n role_acc = metric_counts['correct_argument_roles']/ (metric_counts['predicted_tuples'] + 1e-08)\n\n\n print(f'Event Trigger Word Accuracy: {event_acc}')\n print(f'Event Type Accuracy: {evtype_acc}')\n print(f'Argument Identification Accuracy: {argument_acc}')\n print(f'Argument Type Accuracy: {argtype_acc}')\n print(f'Argument Role Accuracy: {role_acc}')\n\n print(f'Macro f-score: {f1}')\n\n targ_file = os.path.join(trg_data_folder, 'Results_logger.txt')\n\n f = open(targ_file, \"a\")\n\n f.write(f'Event Trigger Word Accuracy: {event_acc}')\n f.write(\"\\n\")\n f.write(f'Event Type Accuracy: {evtype_acc}')\n f.write(\"\\n\")\n f.write(f'Argument Identification Accuracy: {argument_acc}')\n f.write(\"\\n\")\n f.write(f'Argument Type Accuracy: {argtype_acc}')\n f.write(\"\\n\")\n f.write(f'Argument Role Accuracy: {role_acc}')\n f.write(\"\\n\")\n\n f.write(f'Macro f-score: {f1}')\n f.write(\"\\n\")\n\n f.close()\n\n\n return f1\n\ndef cal_f1_for_pair(ref_tuple: str ,\n pred_tuple: str,\n event_lines: list\n ) -> list:\n \n ref_strings = ref_tuple.split(';')\n pred_strings = pred_tuple.split(';')\n\n ev1 = int( check_event_trigger(ref_strings[0].strip(), pred_strings[0].strip()) )\n ev2 = int( check_event_type(ref_strings[1].strip(), pred_strings[1].strip(), event_lines) )\n\n return ev1, ev2\n\ndef cal_f1_for_tuple(ref_tuple: str ,\n pred_tuple: str,\n event_lines: list,\n argument_lines: list,\n roles_lines: list\n ) -> list:\n\n ref_strings = ref_tuple.split(';')\n pred_strings = pred_tuple.split(';')\n\n if (len (pred_strings) != 5 ):\n if (len (pred_strings) >= 2 ):\n ev1 = int( check_event_trigger(ref_strings[0].strip(), pred_strings[0].strip()) )\n ev2 = int( check_event_type(ref_strings[1].strip(), pred_strings[1].strip(), event_lines) )\n return [ev1, ev2, 0, 0, 0]\n return list([0,0,0,0,0])\n\n ev1 = int( check_event_trigger(ref_strings[0].strip(), pred_strings[0].strip()) )\n ev2 = int( check_event_type(ref_strings[1].strip(), pred_strings[1].strip(), event_lines) )\n ev3 = int( check_event_argument(ref_strings[2].strip(), pred_strings[2].strip()) )\n ev4 = int( check_argument_type(ref_strings[3].strip(), pred_strings[3].strip(), argument_lines) )\n ev5 = int( check_argument_role(ref_strings[4].strip(), pred_strings[4].strip(), roles_lines) )\n\n ret = [ev1, ev2, ev3, ev4, ev5]\n \n return ret\n\n\n\ndef get_model(model_id):\n if model_id == 1:\n return SeqToSeqModel()\n\ndef write_test_res(data, preds, attns, outfile):\n writer = open(outfile, 'w')\n for i in range(0, len(data)):\n pred_words = get_pred_words(preds[i], attns[i], data[i].SrcWords)[:-1]\n writer.write(' '.join(pred_words) + '\\n')\n writer.close()\n\n\ndef set_random_seeds(seed):\n random.seed(seed)\n np.random.seed(seed)\n torch.manual_seed(seed)\n if n_gpu > 1:\n torch.cuda.manual_seed_all(seed)\n\ndef get_max_len(sample_batch):\n src_max_len = len(sample_batch[0].SrcWords)\n for idx in range(1, len(sample_batch)):\n if len(sample_batch[idx].SrcWords) > src_max_len:\n src_max_len = len(sample_batch[idx].SrcWords)\n\n trg_max_len = len(sample_batch[0].TrgWords)\n for idx in range(1, len(sample_batch)):\n if len(sample_batch[idx].TrgWords) > trg_max_len:\n trg_max_len = len(sample_batch[idx].TrgWords)\n\n return src_max_len, trg_max_len\n\ndef get_words_index_seq(words, max_len):\n seq = list()\n for word in words:\n if word in word_vocab:\n seq.append(word_vocab[word])\n else:\n seq.append(word_vocab['<UNK>'])\n pad_len = max_len - len(words)\n for i in range(0, pad_len):\n seq.append(word_vocab['<PAD>'])\n return seq\n\n\ndef get_target_words_index_seq(words, max_len):\n seq = list()\n for word in words:\n if word in word_vocab:\n seq.append(word_vocab[word])\n else:\n seq.append(word_vocab['<UNK>'])\n pad_len = max_len - len(words)\n for i in range(0, pad_len):\n seq.append(word_vocab['<EOS>'])\n return seq\n\n\ndef get_padded_mask(cur_len, max_len):\n mask_seq = list()\n for i in range(0, cur_len):\n mask_seq.append(0)\n pad_len = max_len - cur_len\n for i in range(0, pad_len):\n mask_seq.append(1)\n return mask_seq\n\n\ndef get_target_vocab_mask(src_words):\n mask = []\n for i in range(0, len(word_vocab)):\n mask.append(1)\n for word in src_words:\n if word in word_vocab:\n mask[word_vocab[word]] = 0\n # events, arguments, roles\n for event in events:\n mask[word_vocab[event]] = 0\n for argument in arguments:\n mask[word_vocab[argument]] = 0\n for role in roles:\n mask[word_vocab[role]] = 0\n\n mask[word_vocab['<UNK>']] = 0\n mask[word_vocab['<EOS>']] = 0\n mask[word_vocab[';']] = 0\n mask[word_vocab['|']] = 0\n return mask\n\n\ndef get_rel_mask(trg_words, max_len):\n mask_seq = list()\n for word in trg_words:\n mask_seq.append(0)\n # if word in relations:\n # mask_seq.append(0)\n # else:\n # mask_seq.append(1)\n pad_len = max_len - len(trg_words)\n for i in range(0, pad_len):\n mask_seq.append(1)\n return mask_seq\n\n\ndef get_char_seq(words, max_len):\n char_seq = list()\n for i in range(0, conv_filter_size - 1):\n char_seq.append(char_vocab['<PAD>'])\n for word in words:\n for c in word[0:min(len(word), max_word_len)]:\n if c in char_vocab:\n char_seq.append(char_vocab[c])\n else:\n char_seq.append(char_vocab['<UNK>'])\n pad_len = max_word_len - len(word)\n for i in range(0, pad_len):\n char_seq.append(char_vocab['<PAD>'])\n for i in range(0, conv_filter_size - 1):\n char_seq.append(char_vocab['<PAD>'])\n\n pad_len = max_len - len(words)\n for i in range(0, pad_len):\n for i in range(0, max_word_len + conv_filter_size - 1):\n char_seq.append(char_vocab['<PAD>'])\n return char_seq\n\n\n\ndef get_relations(file_name):\n rels = []\n reader = open(file_name)\n lines = reader.readlines()\n reader.close()\n for line in lines:\n rels.append(line.strip())\n return rels\n\ndef get_batch_data(cur_samples, is_training=False):\n \"\"\"\n Returns the training samples and labels as numpy array\n \"\"\"\n batch_src_max_len, batch_trg_max_len = get_max_len(cur_samples)\n src_words_list = list()\n src_words_mask_list = list()\n src_char_seq = list()\n\n trg_words_list = list()\n trg_vocab_mask = list()\n adj_lst = []\n\n target = list()\n cnt = 0\n for sample in cur_samples:\n src_words_list.append(get_words_index_seq(sample.SrcWords, batch_src_max_len))\n src_words_mask_list.append(get_padded_mask(sample.SrcLen, batch_src_max_len))\n src_char_seq.append(get_char_seq(sample.SrcWords, batch_src_max_len))\n trg_vocab_mask.append(get_target_vocab_mask(sample.SrcWords))\n\n # cur_masked_adj = np.zeros((batch_src_max_len, batch_src_max_len), dtype=np.float32)\n # cur_masked_adj[:len(sample.SrcWords), :len(sample.SrcWords)] = sample.AdjMat\n # adj_lst.append(cur_masked_adj)\n\n if is_training:\n padded_trg_words = get_words_index_seq(sample.TrgWords, batch_trg_max_len)\n trg_words_list.append(padded_trg_words)\n target.append(padded_trg_words[1:])\n else:\n trg_words_list.append(get_words_index_seq(['<SOS>'], 1))\n cnt += 1\n\n return {'src_words': np.array(src_words_list, dtype=np.float32),\n 'src_chars': np.array(src_char_seq),\n 'src_words_mask': np.array(src_words_mask_list),\n 'adj': np.array(adj_lst),\n 'trg_vocab_mask': np.array(trg_vocab_mask),\n 'trg_words': np.array(trg_words_list, dtype=np.int32),\n 'target': np.array(target)}\n\ndef shuffle_data(data):\n custom_print(len(data))\n data.sort(key=lambda x: x.SrcLen)\n num_batch = int(len(data) / batch_size)\n rand_idx = random.sample(range(num_batch), num_batch)\n new_data = []\n for idx in rand_idx:\n new_data += data[batch_size * idx: batch_size * (idx + 1)]\n if len(new_data) < len(data):\n new_data += data[num_batch * batch_size:]\n return new_data\n\n\ndef get_pred_words(preds, attns, src_words):\n pred_words = []\n for i in range(0, max_trg_len):\n word_idx = preds[i]\n if word_vocab['<EOS>'] == word_idx:\n pred_words.append('<EOS>')\n break\n elif att_type != 'None' and copy_on and word_vocab['<UNK>'] == word_idx:\n word_idx = attns[i]\n pred_words.append(src_words[word_idx])\n else:\n pred_words.append(rev_word_vocab[word_idx])\n return pred_words\n\n\nclass WordEmbeddings(nn.Module):\n def __init__(self, vocab_size, embed_dim, pre_trained_embed_matrix, drop_out_rate):\n super(WordEmbeddings, self).__init__()\n self.embeddings = nn.Embedding(vocab_size, embed_dim, padding_idx=0)\n self.embeddings.weight.data.copy_(torch.from_numpy(pre_trained_embed_matrix))\n self.dropout = nn.Dropout(drop_out_rate)\n\n def forward(self, words_seq):\n word_embeds = self.embeddings(words_seq)\n word_embeds = self.dropout(word_embeds)\n return word_embeds\n\n def weight(self):\n return self.embeddings.weight\n\n# Potentially use a pretrained BERT - 509\nclass CharEmbeddings(nn.Module):\n def __init__(self, vocab_size, embed_dim, drop_out_rate):\n super(CharEmbeddings, self).__init__()\n\n # Layers\n self.embeddings = nn.Embedding(vocab_size, embed_dim, padding_idx=0)\n self.dropout = nn.Dropout(drop_out_rate)\n\n def forward(self, words_seq):\n char_embeds = self.embeddings(words_seq)\n char_embeds = self.dropout(char_embeds)\n return char_embeds\n\n\n# DONT CHANGE CLASSES\n# 543\nclass Encoder(nn.Module):\n def __init__(self, input_dim, hidden_dim, layers, is_bidirectional, drop_out_rate):\n super(Encoder, self).__init__()\n self.input_dim = input_dim\n self.hidden_dim = hidden_dim\n self.layers = layers\n self.is_bidirectional = is_bidirectional\n self.drop_rate = drop_out_rate\n self.char_embeddings = CharEmbeddings(len(char_vocab), char_embed_dim, drop_rate)\n # Remove In case we want to BERT \n\n self.lstm = nn.LSTM(self.input_dim, self.hidden_dim, self.layers, batch_first=True,\n bidirectional=self.is_bidirectional)\n self.dropout = nn.Dropout(self.drop_rate)\n self.conv1d = nn.Conv1d(char_embed_dim, char_feature_size, conv_filter_size)\n self.max_pool = nn.MaxPool1d(max_word_len + conv_filter_size - 1, max_word_len + conv_filter_size - 1)\n\n def forward(self, words_input, char_seq, adj, is_training=False):\n char_embeds = self.char_embeddings(char_seq)\n char_embeds = char_embeds.permute(0, 2, 1)\n\n char_feature = torch.tanh(self.max_pool(self.conv1d(char_embeds)))\n char_feature = char_feature.permute(0, 2, 1)\n\n words_input = torch.cat((words_input, char_feature), -1)\n\n outputs, hc = self.lstm(words_input)\n outputs = self.dropout(outputs)\n \n return outputs\n\n\n# 597\nclass Attention(nn.Module):\n def __init__(self, input_dim):\n super(Attention, self).__init__()\n self.input_dim = input_dim\n self.linear_ctx = nn.Linear(self.input_dim, self.input_dim, bias=False)\n self.linear_query = nn.Linear(self.input_dim, self.input_dim, bias=True)\n self.v = nn.Linear(self.input_dim, 1)\n\n def forward(self, s_prev, enc_hs, src_mask):\n uh = self.linear_ctx(enc_hs)\n wq = self.linear_query(s_prev)\n wquh = torch.tanh(wq + uh)\n attn_weights = self.v(wquh).squeeze()\n attn_weights.data.masked_fill_(src_mask.data, -float('inf'))\n attn_weights = F.softmax(attn_weights, dim=-1)\n ctx = torch.bmm(attn_weights.unsqueeze(1), enc_hs).squeeze()\n return ctx, attn_weights\n\n# 617\nclass NGram_Attention(nn.Module):\n def __init__(self, input_dim, N):\n super(NGram_Attention, self).__init__()\n self.input_dim = input_dim\n self.layers = N\n self.V_layers = nn.ModuleList()\n self.W_layers = nn.ModuleList()\n for i in range(N):\n self.V_layers.append(nn.Linear(input_dim, input_dim))\n self.W_layers.append(nn.Linear(input_dim, input_dim))\n\n def forward(self, s_prev, enc_hs, src_mask):\n att = torch.bmm(s_prev.unsqueeze(1), self.V_layers[0](enc_hs).transpose(1, 2)).squeeze()\n att.data.masked_fill_(src_mask.data, -float('inf'))\n att = F.softmax(att, dim=-1)\n ctx = self.W_layers[0](torch.bmm(att.unsqueeze(1), enc_hs).squeeze())\n for i in range(1, self.layers):\n enc_hs_ngram = torch.nn.AvgPool1d(i+1, 1)(enc_hs.transpose(1, 2)).transpose(1, 2)\n n_mask = src_mask.unsqueeze(1).float()\n n_mask = torch.nn.AvgPool1d(i+1, 1)(n_mask).squeeze()\n n_mask[n_mask > 0] = 1\n n_mask = n_mask.byte()\n n_att = torch.bmm(s_prev.unsqueeze(1), self.V_layers[i](enc_hs_ngram).transpose(1, 2)).squeeze()\n n_att.data.masked_fill_(n_mask.data, -float('inf'))\n n_att = F.softmax(n_att, dim=-1)\n ctx += self.W_layers[i](torch.bmm(n_att.unsqueeze(1), enc_hs_ngram).squeeze())\n return ctx, att\n\n# 588\ndef mean_over_time(x, mask):\n x.data.masked_fill_(mask.unsqueeze(2).data, 0)\n x = torch.sum(x, dim=1)\n time_steps = torch.sum(mask.eq(0), dim=1, keepdim=True).float()\n x /= time_steps\n return x\n\n# 645\nclass Decoder(nn.Module):\n def __init__(self, input_dim, hidden_dim, layers, drop_out_rate, max_length):\n super(Decoder, self).__init__()\n \n self.input_dim = input_dim\n self.hidden_dim = hidden_dim\n self.layers = layers\n self.drop_rate = drop_out_rate\n self.max_length = max_length\n\n if att_type == 'None':\n self.lstm = nn.LSTMCell(2 * self.input_dim, self.hidden_dim, self.layers)\n elif att_type == 'Unigram':\n self.attention = Attention(input_dim)\n self.lstm = nn.LSTMCell(2 * self.input_dim, self.hidden_dim, self.layers)\n else:\n self.attention = NGram_Attention(input_dim, 3)\n self.lstm = nn.LSTMCell(3 * self.input_dim, self.hidden_dim, self.layers)\n\n self.dropout = nn.Dropout(self.drop_rate)\n self.ent_out = nn.Linear(self.input_dim, len(word_vocab))\n\n def forward(self, y_prev, h_prev, enc_hs, src_word_embeds, src_mask, is_training=False):\n src_time_steps = enc_hs.size()[1]\n if att_type == 'None':\n ctx = mean_over_time(enc_hs, src_mask)\n attn_weights = torch.zeros(src_mask.size()).cuda()\n elif att_type == 'Unigram':\n s_prev = h_prev[0]\n s_prev = s_prev.unsqueeze(1)\n s_prev = s_prev.repeat(1, src_time_steps, 1)\n ctx, attn_weights = self.attention(s_prev, enc_hs, src_mask)\n else:\n last_index = src_mask.size()[1] - torch.sum(src_mask, dim=-1).long() - 1\n last_index = last_index.unsqueeze(1).unsqueeze(1).repeat(1, 1, enc_hs.size()[-1])\n enc_last = torch.gather(enc_hs, 1, last_index).squeeze()\n ctx, attn_weights = self.attention(enc_last, src_word_embeds, src_mask)\n ctx = torch.cat((enc_last, ctx), -1)\n\n y_prev = y_prev.squeeze()\n s_cur = torch.cat((y_prev, ctx), 1)\n hidden, cell_state = self.lstm(s_cur, h_prev)\n hidden = self.dropout(hidden)\n output = self.ent_out(hidden)\n return output, (hidden, cell_state), attn_weights\n\n# 690\n\nclass SeqToSeqModel(nn.Module):\n def __init__(self):\n super(SeqToSeqModel, self).__init__()\n self.word_embeddings = WordEmbeddings(len(word_vocab), word_embed_dim, word_embed_matrix, drop_rate)\n self.encoder = Encoder(enc_inp_size, int(enc_hidden_size/2), layers, True, drop_rate)\n self.decoder = Decoder(dec_inp_size, dec_hidden_size, layers, drop_rate, max_trg_len)\n\n def forward(self, src_words_seq, src_chars_seq, src_mask, trg_words_seq, trg_vocab_mask, adj, is_training=False):\n src_word_embeds = self.word_embeddings(src_words_seq)\n trg_word_embeds = self.word_embeddings(trg_words_seq)\n\n batch_len = src_word_embeds.size()[0]\n \n if is_training:\n time_steps = trg_word_embeds.size()[1] - 1\n else:\n time_steps = max_trg_len\n\n encoder_output = self.encoder(src_word_embeds, src_chars_seq, adj, is_training)\n\n h0 = autograd.Variable(torch.FloatTensor(torch.zeros(batch_len, word_embed_dim)))\n h0 = h0.cuda()\n c0 = autograd.Variable(torch.FloatTensor(torch.zeros(batch_len, word_embed_dim)))\n c0 = c0.cuda()\n dec_hid = (h0, c0)\n\n if is_training:\n dec_inp = trg_word_embeds[:, 0, :]\n dec_out, dec_hid, dec_attn = self.decoder(dec_inp, dec_hid, encoder_output, src_word_embeds,\n src_mask, is_training) \n dec_out = dec_out.view(-1, len(word_vocab))\n dec_out = F.log_softmax(dec_out, dim=-1)\n dec_out = dec_out.unsqueeze(1)\n\n for t in range(1, time_steps):\n dec_inp = trg_word_embeds[:, t, :]\n cur_dec_out, dec_hid, dec_attn = self.decoder(dec_inp, dec_hid, encoder_output, src_word_embeds,\n src_mask, is_training)\n cur_dec_out = cur_dec_out.view(-1, len(word_vocab))\n dec_out = torch.cat((dec_out, F.log_softmax(cur_dec_out, dim=-1).unsqueeze(1)), 1)\n else:\n dec_inp = trg_word_embeds[:, 0, :]\n dec_out, dec_hid, dec_attn = self.decoder(dec_inp, dec_hid, encoder_output, src_word_embeds,\n src_mask, is_training)\n dec_out = dec_out.view(-1, len(word_vocab))\n if copy_on:\n dec_out.data.masked_fill_(trg_vocab_mask.data, -float('inf'))\n dec_out = F.log_softmax(dec_out, dim=-1)\n topv, topi = dec_out.topk(1)\n dec_out_v, dec_out_i = dec_out.topk(1)\n dec_attn_v, dec_attn_i = dec_attn.topk(1)\n\n for t in range(1, time_steps):\n dec_inp = self.word_embeddings(topi.squeeze().detach())\n cur_dec_out, dec_hid, cur_dec_attn = self.decoder(dec_inp, dec_hid, encoder_output, src_word_embeds,\n src_mask, is_training)\n cur_dec_out = cur_dec_out.view(-1, len(word_vocab))\n if copy_on:\n cur_dec_out.data.masked_fill_(trg_vocab_mask.data, -float('inf'))\n cur_dec_out = F.log_softmax(cur_dec_out, dim=-1)\n topv, topi = cur_dec_out.topk(1)\n cur_dec_out_v, cur_dec_out_i = cur_dec_out.topk(1)\n dec_out_i = torch.cat((dec_out_i, cur_dec_out_i), 1)\n cur_dec_attn_v, cur_dec_attn_i = cur_dec_attn.topk(1)\n dec_attn_i = torch.cat((dec_attn_i, cur_dec_attn_i), 1)\n\n if is_training:\n dec_out = dec_out.view(-1, len(word_vocab))\n return dec_out\n else:\n return dec_out_i, dec_attn_i\n\ndef predict(samples, model, model_id):\n pred_batch_size = batch_size\n batch_count = math.ceil(len(samples) / pred_batch_size)\n move_last_batch = False\n if len(samples) - batch_size * (batch_count - 1) == 1:\n move_last_batch = True\n batch_count -= 1\n \n preds = list()\n attns = list()\n \n model.eval()\n \n set_random_seeds(random_seed)\n \n start_time = datetime.datetime.now()\n \n for batch_idx in tqdm(range(0, batch_count)):\n batch_start = batch_idx * pred_batch_size\n batch_end = min(len(samples), batch_start + pred_batch_size)\n if batch_idx == batch_count - 1 and move_last_batch:\n batch_end = len(samples)\n\n cur_batch = samples[batch_start:batch_end]\n cur_samples_input = get_batch_data(cur_batch, False)\n\n src_words_seq = torch.from_numpy(cur_samples_input['src_words'].astype('long'))\n src_words_mask = torch.from_numpy(cur_samples_input['src_words_mask'].astype('uint8'))\n trg_vocab_mask = torch.from_numpy(cur_samples_input['trg_vocab_mask'].astype('uint8'))\n trg_words_seq = torch.from_numpy(cur_samples_input['trg_words'].astype('long'))\n adj = torch.from_numpy(cur_samples_input['adj'].astype('float32'))\n src_chars_seq = torch.from_numpy(cur_samples_input['src_chars'].astype('long'))\n\n if torch.cuda.is_available():\n src_words_seq = src_words_seq.cuda()\n src_words_mask = src_words_mask.cuda()\n trg_vocab_mask = trg_vocab_mask.cuda()\n trg_words_seq = trg_words_seq.cuda()\n adj = adj.cuda()\n src_chars_seq = src_chars_seq.cuda()\n\n src_words_seq = autograd.Variable(src_words_seq)\n src_words_mask = autograd.Variable(src_words_mask)\n trg_vocab_mask = autograd.Variable(trg_vocab_mask)\n adj = autograd.Variable(adj)\n src_chars_seq = autograd.Variable(src_chars_seq)\n\n trg_words_seq = autograd.Variable(trg_words_seq)\n with torch.no_grad():\n outputs = model(src_words_seq, src_chars_seq, src_words_mask, trg_words_seq, trg_vocab_mask, adj,False)\n\n preds += list(outputs[0].data.cpu().numpy())\n attns += list(outputs[1].data.cpu().numpy())\n model.zero_grad()\n end_time = datetime.datetime.now()\n custom_print('Prediction time:', end_time - start_time)\n return preds, attns\n\ndef train_model(model_id, train_samples, dev_samples, best_model_file):\n train_size = len(train_samples)\n batch_count = int(math.ceil(train_size/batch_size))\n move_last_batch = False\n \n if len(train_samples) - batch_size * (batch_count - 1) == 1:\n move_last_batch = True\n batch_count -= 1\n \n custom_print(batch_count)\n\n # model = get_model(model_id)\n model = SeqToSeqModel()\n\n pytorch_total_params = sum(p.numel() for p in model.parameters() if p.requires_grad)\n custom_print('Parameters size:', pytorch_total_params)\n\n custom_print(model)\n\n if torch.cuda.is_available():\n model.cuda()\n if n_gpu > 1:\n model = torch.nn.DataParallel(model)\n\n criterion = nn.NLLLoss(ignore_index=0)\n optimizer = optim.Adam(model.parameters())\n\n custom_print(optimizer)\n\n best_dev_acc = -1.0\n best_epoch_idx = -1\n best_epoch_seed = -1\n\n for epoch_idx in range(0, num_epoch):\n model.train()\n model.zero_grad()\n\n custom_print('Epoch:', epoch_idx + 1)\n cur_seed = random_seed + epoch_idx + 1\n set_random_seeds(cur_seed)\n\n cur_shuffled_train_data = shuffle_data(train_samples)\n\n start_time = datetime.datetime.now()\n train_loss_val = 0.0\n\n for batch_idx in tqdm(range(0, batch_count)):\n batch_start = batch_idx * batch_size\n batch_end = min(len(cur_shuffled_train_data), batch_start + batch_size)\n\n if batch_idx == batch_count - 1 and move_last_batch:\n batch_end = len(cur_shuffled_train_data)\n\n cur_batch = cur_shuffled_train_data[batch_start:batch_end]\n cur_samples_input = get_batch_data(cur_batch, True)\n\n # np arrays to tensors\n src_words_seq = torch.from_numpy(cur_samples_input['src_words'].astype('long'))\n src_words_mask = torch.from_numpy(cur_samples_input['src_words_mask'].astype('uint8'))\n trg_vocab_mask = torch.from_numpy(cur_samples_input['trg_vocab_mask'].astype('uint8'))\n trg_words_seq = torch.from_numpy(cur_samples_input['trg_words'].astype('long'))\n adj = torch.from_numpy(cur_samples_input['adj'].astype('float32'))\n src_chars_seq = torch.from_numpy(cur_samples_input['src_chars'].astype('long'))\n\n target = torch.from_numpy(cur_samples_input['target'].astype('long'))\n\n if torch.cuda.is_available():\n src_words_seq = src_words_seq.cuda()\n src_words_mask = src_words_mask.cuda()\n trg_vocab_mask = trg_vocab_mask.cuda()\n trg_words_seq = trg_words_seq.cuda()\n adj = adj.cuda()\n src_chars_seq = src_chars_seq.cuda()\n\n target = target.cuda()\n\n src_words_seq = autograd.Variable(src_words_seq)\n src_words_mask = autograd.Variable(src_words_mask)\n trg_vocab_mask = autograd.Variable(trg_vocab_mask)\n trg_words_seq = autograd.Variable(trg_words_seq)\n adj = autograd.Variable(adj)\n src_chars_seq = autograd.Variable(src_chars_seq)\n\n target = autograd.Variable(target)\n\n outputs = model(src_words_seq, src_chars_seq, src_words_mask, trg_words_seq, trg_vocab_mask, adj, True)\n\n target = target.view(-1, 1).squeeze()\n loss = criterion(outputs, target)\n loss.backward()\n torch.nn.utils.clip_grad_norm_(model.parameters(), 10.0)\n\n if (batch_idx + 1) % update_freq == 0:\n optimizer.step()\n model.zero_grad()\n\n train_loss_val += loss.item()\n\n train_loss_val /= batch_count\n end_time = datetime.datetime.now()\n custom_print('Training loss:', train_loss_val)\n custom_print('Training time:', end_time - start_time)\n\n custom_print('\\nDev Results\\n')\n set_random_seeds(random_seed)\n dev_preds, dev_attns = predict(dev_samples, model, model_id)\n \n write_test_res(dev_samples, dev_preds, dev_attns, os.path.join(trg_data_folder, 'dev.out'))\n\n ref_lines = open(trg_dev_file).read().splitlines()\n pred_lines = open(os.path.join(trg_data_folder, 'dev.out')).read().splitlines()\n event_lines = open(events_file).read().splitlines()\n argument_lines = open(arguments_file).read().splitlines()\n roles_lines = open(roles_file).read().splitlines()\n\n dev_acc = calculate_f1(ref_lines, pred_lines, event_lines, argument_lines, roles_lines)\n\n\n # pred_pos, gt_pos, correct_pos = get_F1(dev_samples, dev_preds, dev_attns)\n # custom_print(pred_pos, '\\t', gt_pos, '\\t', correct_pos)\n # p = float(correct_pos) / (pred_pos + 1e-8)\n # r = float(correct_pos) / (gt_pos + 1e-8)\n # dev_acc = (2 * p * r) / (p + r + 1e-8)\n # custom_print('F1:', dev_acc)\n\n if dev_acc >= best_dev_acc:\n best_epoch_idx = epoch_idx + 1\n best_epoch_seed = cur_seed\n custom_print('model saved......')\n best_dev_acc = dev_acc\n torch.save(model.state_dict(), best_model_file)\n\n custom_print('\\n\\n')\n if epoch_idx + 1 - best_epoch_idx >= early_stop_cnt:\n break\n\n custom_print('*******')\n custom_print('Best Epoch:', best_epoch_idx)\n custom_print('Best Epoch Seed:', best_epoch_seed)\n\n\nif __name__ == \"__main__\":\n \n os.environ['CUDA_VISIBLE_DEVICES'] = sys.argv[1]\n random_seed = int(sys.argv[2])\n src_data_folder = sys.argv[3]\n trg_data_folder = sys.argv[4]\n job_mode = sys.argv[5]\n embedding_type = sys.argv[6]\n granular_mode = 1\n\n n_gpu = torch.cuda.device_count()\n set_random_seeds(random_seed)\n\n\n if not os.path.exists(trg_data_folder):\n os.mkdir(trg_data_folder)\n model_name = 1\n\n #Tunable Hyperparameters\n\n batch_size = 32\n num_epoch = 30\n max_src_len = 100\n max_trg_len = 50\n\n if embedding_type == 'w2v':\n embedding_file = os.path.join(src_data_folder, 'w2v.txt')\n else:\n embedding_file = os.path.join(src_data_folder, 'Bert_embeddings.txt')\n\n update_freq = 1\n enc_type = ['LSTM', 'GCN', 'LSTM-GCN'][0]\n att_type = ['None', 'Unigram', 'N-Gram-Enc'][1]\n\n copy_on = True\n\n gcn_num_layers = 3\n\n if embedding_type == 'w2v':\n word_embed_dim = 300\n else:\n word_embed_dim = 768\n \n word_min_freq = 2\n char_embed_dim = 50\n char_feature_size = 50\n conv_filter_size = 3\n max_word_len = 10\n\n enc_inp_size = word_embed_dim + char_feature_size\n enc_hidden_size = word_embed_dim\n dec_inp_size = enc_hidden_size\n dec_hidden_size = dec_inp_size\n\n drop_rate = 0.3\n layers = 1\n early_stop_cnt = 20\n sample_cnt = 0\n Sample = recordclass(\"Sample\", \"Id SrcLen SrcWords TrgLen TrgWords\")\n\n events_file = os.path.join(src_data_folder, 'event_types.txt')\n arguments_file = os.path.join(src_data_folder, 'arguments.txt')\n roles_file = os.path.join(src_data_folder, 'roles.txt')\n\n events = get_relations(events_file)\n arguments = get_relations(arguments_file)\n roles = get_relations(roles_file)\n\n\n # train a model\n if job_mode == 'train':\n logger = open(os.path.join(trg_data_folder, 'training.log'), 'w')\n custom_print(sys.argv)\n custom_print(max_src_len, max_trg_len, drop_rate, layers)\n custom_print('loading data......')\n model_file_name = os.path.join(trg_data_folder, 'model.h5py')\n src_train_file = os.path.join(src_data_folder, 'train.sent')\n trg_train_file = os.path.join(src_data_folder, 'train.tup')\n train_data = read_data(src_train_file, trg_train_file, 1)\n\n src_dev_file = os.path.join(src_data_folder, 'dev.sent')\n trg_dev_file = os.path.join(src_data_folder, 'dev.tup')\n dev_data = read_data(src_dev_file, trg_dev_file, 2)\n\n custom_print('Training data size:', len(train_data))\n custom_print('Development data size:', len(dev_data))\n\n custom_print(\"preparing vocabulary......\")\n save_vocab = os.path.join(trg_data_folder, 'vocab.pkl')\n word_vocab, rev_word_vocab, char_vocab, word_embed_matrix = build_vocab(train_data, events, arguments, roles, save_vocab,\n embedding_file)\n\n custom_print(\"Training started......\")\n train_model(model_name, train_data, dev_data, model_file_name)\n logger.close()\n\n if job_mode == 'test':\n logger = open(os.path.join(trg_data_folder, 'test.log'), 'w')\n custom_print(sys.argv)\n custom_print(\"loading word vectors......\")\n vocab_file_name = os.path.join(trg_data_folder, 'vocab.pkl')\n word_vocab, char_vocab = load_vocab(vocab_file_name)\n\n rev_word_vocab = OrderedDict()\n for word in word_vocab:\n idx = word_vocab[word]\n rev_word_vocab[idx] = word\n\n word_embed_matrix = np.zeros((len(word_vocab), word_embed_dim), dtype=np.float32)\n custom_print('vocab size:', len(word_vocab))\n\n src_test_file = os.path.join(src_data_folder, 'test.sent')\n trg_test_file = os.path.join(src_data_folder, 'test.tup')\n test_data = read_data(src_test_file, trg_test_file, 3)\n\n custom_print('Test data size:', len(test_data))\n\n custom_print('seed:', random_seed)\n model_file = os.path.join(trg_data_folder, 'model.h5py')\n\n best_model = get_model(model_name)\n custom_print(best_model)\n if torch.cuda.is_available():\n best_model.cuda()\n if n_gpu > 1:\n best_model = torch.nn.DataParallel(best_model)\n best_model.load_state_dict(torch.load(model_file))\n\n custom_print('\\nTest Results\\n')\n set_random_seeds(random_seed)\n test_preds, test_attns = predict(test_data, best_model, model_name)\n\n custom_print('Copy On')\n write_test_res(test_data, test_preds, test_attns, os.path.join(trg_data_folder, 'test.out'))\n\n # ref_lines = open(trg_test_file).readlines()\n # pred_lines = open(os.path.join(trg_data_folder, 'test.out')).readlines()\n # event_lines = open(events_file).readlines()\n # argument_lines = open(arguments_file).readlines()\n # roles_lines = open(roles_file).readlines()\n\n ref_lines = open(trg_test_file).read().splitlines()\n pred_lines = open(os.path.join(trg_data_folder, 'test.out')).read().splitlines()\n event_lines = open(events_file).read().splitlines()\n argument_lines = open(arguments_file).read().splitlines()\n roles_lines = open(roles_file).read().splitlines()\n\n mode = 1\n custom_print('Overall F1')\n # custom_print(cal_f1(ref_lines, pred_lines, event_lines, argument_lines, roles_lines, mode))\n calculate_f1(ref_lines, pred_lines, event_lines, argument_lines, roles_lines)\n\n copy_on = False\n custom_print('Copy Off')\n set_random_seeds(random_seed)\n test_preds, test_attns = predict(test_data, best_model, model_name)\n write_test_res(test_data, test_preds, test_attns, os.path.join(trg_data_folder, 'test_without_copy.out'))\n\n # ref_lines = open(trg_test_file).readlines()\n # pred_lines = open(os.path.join(trg_data_folder, 'test_without_copy.out')).readlines()\n # event_lines = open(events_file).readlines()\n # argument_lines = open(arguments_file).readlines()\n # roles_lines = open(roles_file).readlines()\n\n ref_lines = open(trg_test_file).read().splitlines()\n pred_lines = open(os.path.join(trg_data_folder, 'test_without_copy.out')).read().splitlines()\n event_lines = open(events_file).read().splitlines()\n argument_lines = open(arguments_file).read().splitlines()\n roles_lines = open(roles_file).read().splitlines()\n\n mode = 1\n custom_print('Overall F1')\n # custom_print(cal_f1(ref_lines, pred_lines, event_lines, argument_lines, roles_lines, mode))\n calculate_f1(ref_lines, pred_lines, event_lines, argument_lines, roles_lines)\n logger.close()\n\n\n\n\n"
] | [
[
"torch.nn.functional.softmax",
"torch.cat",
"torch.load",
"torch.zeros",
"torch.sum",
"torch.nn.Embedding",
"torch.tanh",
"torch.no_grad",
"torch.cuda.is_available",
"torch.cuda.manual_seed_all",
"torch.autograd.Variable",
"torch.nn.Dropout",
"torch.from_numpy",
"torch.nn.LSTMCell",
"torch.nn.MaxPool1d",
"numpy.float32",
"numpy.zeros",
"torch.nn.NLLLoss",
"torch.nn.ModuleList",
"torch.nn.Linear",
"torch.nn.Conv1d",
"torch.cuda.device_count",
"numpy.array",
"numpy.random.seed",
"torch.nn.LSTM",
"torch.nn.functional.log_softmax",
"torch.manual_seed",
"torch.gather",
"numpy.random.uniform",
"torch.nn.DataParallel",
"torch.nn.AvgPool1d"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
samellem/autodp | [
"fd14fed07e0bb67fca5f7e82bbdab6cf60b339d3"
] | [
"test/unit_test_fdp_to_approxdp_conversion.py"
] | [
"from autodp.mechanism_zoo import GaussianMechanism\nfrom autodp.dp_bank import get_eps_ana_gaussian\n\nimport numpy as np\n\nfrom absl.testing import absltest\nfrom absl.testing import parameterized\n\nparams = [0.05, 0.1, 0.2, 0.5, 1.0, 2.0, 5.0, 10.0]\n\n\ndef _fdp_conversion(sigma):\n\n delta_list = [0,1e-8, 1e-6, 1e-4, 1e-2, 0.3, 0.5, 1]\n\n # f-DP implementation\n gm3 = GaussianMechanism(sigma, name='GM3', RDP_off=True, approxDP_off=True, fdp_off=False)\n\n # direct approxdp implementation\n agm = lambda x: get_eps_ana_gaussian(sigma, x)\n\n eps_direct = np.array([agm(delta) for delta in delta_list])\n\n # the fdp is converted by numerical methods from privacy profile.\n eps_converted = np.array([gm3.get_approxDP(delta) for delta in delta_list])\n max_diff = eps_direct - eps_converted\n\n rel_diff = max_diff / (eps_direct+1e-10)\n\n if np.isinf(eps_direct[0]) and np.isinf(eps_converted[0]):\n rel_diff[0] = 0\n return rel_diff\n\n\n_fdp_conversion(1.0)\n\nclass Test_approxDP2fDP_Conversion(parameterized.TestCase):\n\n @parameterized.parameters(p for p in params)\n def test_fdp_conversion(self, sigma):\n max_diff = _fdp_conversion(sigma)\n self.assertSequenceAlmostEqual(max_diff, np.zeros_like(max_diff), places=2)\n\n\nif __name__ == '__main__':\n absltest.main()\n\n"
] | [
[
"numpy.zeros_like",
"numpy.isinf"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
GautamV234/pyro | [
"d5474ebc6101b330bf9060a3731830d4b6a585d5",
"d5474ebc6101b330bf9060a3731830d4b6a585d5"
] | [
"pyro/contrib/gp/models/gpr.py",
"pyro/contrib/gp/models/sgpr.py"
] | [
"# Copyright (c) 2017-2019 Uber Technologies, Inc.\n# SPDX-License-Identifier: Apache-2.0\n\nimport torch\nimport torch.distributions as torchdist\nfrom torch.distributions import constraints\n\nimport pyro\nimport pyro.distributions as dist\nfrom pyro.contrib.gp.models.model import GPModel\nfrom pyro.contrib.gp.util import conditional\nfrom pyro.nn.module import PyroParam, pyro_method\nfrom pyro.util import warn_if_nan\n\n\nclass GPRegression(GPModel):\n r\"\"\"\n Gaussian Process Regression model.\n\n The core of a Gaussian Process is a covariance function :math:`k` which governs\n the similarity between input points. Given :math:`k`, we can establish a\n distribution over functions :math:`f` by a multivarite normal distribution\n\n .. math:: p(f(X)) = \\mathcal{N}(0, k(X, X)),\n\n where :math:`X` is any set of input points and :math:`k(X, X)` is a covariance\n matrix whose entries are outputs :math:`k(x, z)` of :math:`k` over input pairs\n :math:`(x, z)`. This distribution is usually denoted by\n\n .. math:: f \\sim \\mathcal{GP}(0, k).\n\n .. note:: Generally, beside a covariance matrix :math:`k`, a Gaussian Process can\n also be specified by a mean function :math:`m` (which is a zero-value function\n by default). In that case, its distribution will be\n\n .. math:: p(f(X)) = \\mathcal{N}(m(X), k(X, X)).\n\n Given inputs :math:`X` and their noisy observations :math:`y`, the Gaussian Process\n Regression model takes the form\n\n .. math::\n f &\\sim \\mathcal{GP}(0, k(X, X)),\\\\\n y & \\sim f + \\epsilon,\n\n where :math:`\\epsilon` is Gaussian noise.\n\n .. note:: This model has :math:`\\mathcal{O}(N^3)` complexity for training,\n :math:`\\mathcal{O}(N^3)` complexity for testing. Here, :math:`N` is the number\n of train inputs.\n\n Reference:\n\n [1] `Gaussian Processes for Machine Learning`,\n Carl E. Rasmussen, Christopher K. I. Williams\n\n :param torch.Tensor X: A input data for training. Its first dimension is the number\n of data points.\n :param torch.Tensor y: An output data for training. Its last dimension is the\n number of data points.\n :param ~pyro.contrib.gp.kernels.kernel.Kernel kernel: A Pyro kernel object, which\n is the covariance function :math:`k`.\n :param torch.Tensor noise: Variance of Gaussian noise of this model.\n :param callable mean_function: An optional mean function :math:`m` of this Gaussian\n process. By default, we use zero mean.\n :param float jitter: A small positive term which is added into the diagonal part of\n a covariance matrix to help stablize its Cholesky decomposition.\n \"\"\"\n\n def __init__(self, X, y, kernel, noise=None, mean_function=None, jitter=1e-6):\n assert isinstance(\n X, torch.Tensor\n ), \"X needs to be a torch Tensor instead of a {}\".format(type(X))\n if y is not None:\n assert isinstance(\n y, torch.Tensor\n ), \"y needs to be a torch Tensor instead of a {}\".format(type(y))\n super().__init__(X, y, kernel, mean_function, jitter)\n\n noise = self.X.new_tensor(1.0) if noise is None else noise\n self.noise = PyroParam(noise, constraints.positive)\n\n @pyro_method\n def model(self):\n self.set_mode(\"model\")\n\n N = self.X.size(0)\n Kff = self.kernel(self.X)\n Kff.view(-1)[:: N + 1] += self.jitter + self.noise # add noise to diagonal\n Lff = torch.linalg.cholesky(Kff)\n\n zero_loc = self.X.new_zeros(self.X.size(0))\n f_loc = zero_loc + self.mean_function(self.X)\n if self.y is None:\n f_var = Lff.pow(2).sum(dim=-1)\n return f_loc, f_var\n else:\n return pyro.sample(\n self._pyro_get_fullname(\"y\"),\n dist.MultivariateNormal(f_loc, scale_tril=Lff)\n .expand_by(self.y.shape[:-1])\n .to_event(self.y.dim() - 1),\n obs=self.y,\n )\n\n @pyro_method\n def guide(self):\n self.set_mode(\"guide\")\n self._load_pyro_samples()\n\n def forward(self, Xnew, full_cov=False, noiseless=True):\n r\"\"\"\n Computes the mean and covariance matrix (or variance) of Gaussian Process\n posterior on a test input data :math:`X_{new}`:\n\n .. math:: p(f^* \\mid X_{new}, X, y, k, \\epsilon) = \\mathcal{N}(loc, cov).\n\n .. note:: The noise parameter ``noise`` (:math:`\\epsilon`) together with\n kernel's parameters have been learned from a training procedure (MCMC or\n SVI).\n\n :param torch.Tensor Xnew: A input data for testing. Note that\n ``Xnew.shape[1:]`` must be the same as ``self.X.shape[1:]``.\n :param bool full_cov: A flag to decide if we want to predict full covariance\n matrix or just variance.\n :param bool noiseless: A flag to decide if we want to include noise in the\n prediction output or not.\n :returns: loc and covariance matrix (or variance) of :math:`p(f^*(X_{new}))`\n :rtype: tuple(torch.Tensor, torch.Tensor)\n \"\"\"\n self._check_Xnew_shape(Xnew)\n self.set_mode(\"guide\")\n\n N = self.X.size(0)\n Kff = self.kernel(self.X).contiguous()\n Kff.view(-1)[:: N + 1] += self.jitter + self.noise # add noise to the diagonal\n Lff = torch.linalg.cholesky(Kff)\n\n y_residual = self.y - self.mean_function(self.X)\n loc, cov = conditional(\n Xnew,\n self.X,\n self.kernel,\n y_residual,\n None,\n Lff,\n full_cov,\n jitter=self.jitter,\n )\n\n if full_cov and not noiseless:\n M = Xnew.size(0)\n cov = cov.contiguous()\n cov.view(-1, M * M)[:, :: M + 1] += self.noise # add noise to the diagonal\n if not full_cov and not noiseless:\n cov = cov + self.noise\n\n return loc + self.mean_function(Xnew), cov\n\n def iter_sample(self, noiseless=True):\n r\"\"\"\n Iteratively constructs a sample from the Gaussian Process posterior.\n\n Recall that at test input points :math:`X_{new}`, the posterior is\n multivariate Gaussian distributed with mean and covariance matrix\n given by :func:`forward`.\n\n This method samples lazily from this multivariate Gaussian. The advantage\n of this approach is that later query points can depend upon earlier ones.\n Particularly useful when the querying is to be done by an optimisation\n routine.\n\n .. note:: The noise parameter ``noise`` (:math:`\\epsilon`) together with\n kernel's parameters have been learned from a training procedure (MCMC or\n SVI).\n\n :param bool noiseless: A flag to decide if we want to add sampling noise\n to the samples beyond the noise inherent in the GP posterior.\n :returns: sampler\n :rtype: function\n \"\"\"\n noise = self.noise.detach()\n X = self.X.clone().detach()\n y = self.y.clone().detach()\n N = X.size(0)\n Kff = self.kernel(X).contiguous()\n Kff.view(-1)[:: N + 1] += noise # add noise to the diagonal\n\n outside_vars = {\"X\": X, \"y\": y, \"N\": N, \"Kff\": Kff}\n\n def sample_next(xnew, outside_vars):\n \"\"\"Repeatedly samples from the Gaussian process posterior,\n conditioning on previously sampled values.\n \"\"\"\n warn_if_nan(xnew)\n\n # Variables from outer scope\n X, y, Kff = outside_vars[\"X\"], outside_vars[\"y\"], outside_vars[\"Kff\"]\n\n # Compute Cholesky decomposition of kernel matrix\n Lff = torch.linalg.cholesky(Kff)\n y_residual = y - self.mean_function(X)\n\n # Compute conditional mean and variance\n loc, cov = conditional(\n xnew, X, self.kernel, y_residual, None, Lff, False, jitter=self.jitter\n )\n if not noiseless:\n cov = cov + noise\n\n ynew = torchdist.Normal(\n loc + self.mean_function(xnew), cov.sqrt()\n ).rsample()\n\n # Update kernel matrix\n N = outside_vars[\"N\"]\n Kffnew = Kff.new_empty(N + 1, N + 1)\n Kffnew[:N, :N] = Kff\n cross = self.kernel(X, xnew).squeeze()\n end = self.kernel(xnew, xnew).squeeze()\n Kffnew[N, :N] = cross\n Kffnew[:N, N] = cross\n # No noise, just jitter for numerical stability\n Kffnew[N, N] = end + self.jitter\n # Heuristic to avoid adding degenerate points\n if Kffnew.logdet() > -15.0:\n outside_vars[\"Kff\"] = Kffnew\n outside_vars[\"N\"] += 1\n outside_vars[\"X\"] = torch.cat((X, xnew))\n outside_vars[\"y\"] = torch.cat((y, ynew))\n\n return ynew\n\n return lambda xnew: sample_next(xnew, outside_vars)\n",
"# Copyright (c) 2017-2019 Uber Technologies, Inc.\n# SPDX-License-Identifier: Apache-2.0\n\nimport torch\nfrom torch.distributions import constraints\nfrom torch.nn import Parameter\n\nimport pyro\nimport pyro.distributions as dist\nfrom pyro.contrib.gp.models.model import GPModel\nfrom pyro.nn.module import PyroParam, pyro_method\n\n\nclass SparseGPRegression(GPModel):\n \"\"\"\n Sparse Gaussian Process Regression model.\n\n In :class:`.GPRegression` model, when the number of input data :math:`X` is large,\n the covariance matrix :math:`k(X, X)` will require a lot of computational steps to\n compute its inverse (for log likelihood and for prediction). By introducing an\n additional inducing-input parameter :math:`X_u`, we can reduce computational cost\n by approximate :math:`k(X, X)` by a low-rank Nymstr\\u00F6m approximation :math:`Q`\n (see reference [1]), where\n\n .. math:: Q = k(X, X_u) k(X,X)^{-1} k(X_u, X).\n\n Given inputs :math:`X`, their noisy observations :math:`y`, and the inducing-input\n parameters :math:`X_u`, the model takes the form:\n\n .. math::\n u & \\\\sim \\\\mathcal{GP}(0, k(X_u, X_u)),\\\\\\\\\n f & \\\\sim q(f \\\\mid X, X_u) = \\\\mathbb{E}_{p(u)}q(f\\\\mid X, X_u, u),\\\\\\\\\n y & \\\\sim f + \\\\epsilon,\n\n where :math:`\\\\epsilon` is Gaussian noise and the conditional distribution\n :math:`q(f\\\\mid X, X_u, u)` is an approximation of\n\n .. math:: p(f\\\\mid X, X_u, u) = \\\\mathcal{N}(m, k(X, X) - Q),\n\n whose terms :math:`m` and :math:`k(X, X) - Q` is derived from the joint\n multivariate normal distribution:\n\n .. math:: [f, u] \\\\sim \\\\mathcal{GP}(0, k([X, X_u], [X, X_u])).\n\n This class implements three approximation methods:\n\n + Deterministic Training Conditional (DTC):\n\n .. math:: q(f\\\\mid X, X_u, u) = \\\\mathcal{N}(m, 0),\n\n which in turns will imply\n\n .. math:: f \\\\sim \\\\mathcal{N}(0, Q).\n\n + Fully Independent Training Conditional (FITC):\n\n .. math:: q(f\\\\mid X, X_u, u) = \\\\mathcal{N}(m, diag(k(X, X) - Q)),\n\n which in turns will correct the diagonal part of the approximation in DTC:\n\n .. math:: f \\\\sim \\\\mathcal{N}(0, Q + diag(k(X, X) - Q)).\n\n + Variational Free Energy (VFE), which is similar to DTC but has an additional\n `trace_term` in the model's log likelihood. This additional term makes \"VFE\"\n equivalent to the variational approach in :class:`.SparseVariationalGP`\n (see reference [2]).\n\n .. note:: This model has :math:`\\\\mathcal{O}(NM^2)` complexity for training,\n :math:`\\\\mathcal{O}(NM^2)` complexity for testing. Here, :math:`N` is the number\n of train inputs, :math:`M` is the number of inducing inputs.\n\n References:\n\n [1] `A Unifying View of Sparse Approximate Gaussian Process Regression`,\n Joaquin Qui\\u00F1onero-Candela, Carl E. Rasmussen\n\n [2] `Variational learning of inducing variables in sparse Gaussian processes`,\n Michalis Titsias\n\n :param torch.Tensor X: A input data for training. Its first dimension is the number\n of data points.\n :param torch.Tensor y: An output data for training. Its last dimension is the\n number of data points.\n :param ~pyro.contrib.gp.kernels.kernel.Kernel kernel: A Pyro kernel object, which\n is the covariance function :math:`k`.\n :param torch.Tensor Xu: Initial values for inducing points, which are parameters\n of our model.\n :param torch.Tensor noise: Variance of Gaussian noise of this model.\n :param callable mean_function: An optional mean function :math:`m` of this Gaussian\n process. By default, we use zero mean.\n :param str approx: One of approximation methods: \"DTC\", \"FITC\", and \"VFE\"\n (default).\n :param float jitter: A small positive term which is added into the diagonal part of\n a covariance matrix to help stablize its Cholesky decomposition.\n :param str name: Name of this model.\n \"\"\"\n\n def __init__(\n self, X, y, kernel, Xu, noise=None, mean_function=None, approx=None, jitter=1e-6\n ):\n\n assert isinstance(\n X, torch.Tensor\n ), \"X needs to be a torch Tensor instead of a {}\".format(type(X))\n if y is not None:\n assert isinstance(\n y, torch.Tensor\n ), \"y needs to be a torch Tensor instead of a {}\".format(type(y))\n assert isinstance(\n Xu, torch.Tensor\n ), \"Xu needs to be a torch Tensor instead of a {}\".format(type(Xu))\n\n super().__init__(X, y, kernel, mean_function, jitter)\n\n self.Xu = Parameter(Xu)\n\n noise = self.X.new_tensor(1.0) if noise is None else noise\n self.noise = PyroParam(noise, constraints.positive)\n\n if approx is None:\n self.approx = \"VFE\"\n elif approx in [\"DTC\", \"FITC\", \"VFE\"]:\n self.approx = approx\n else:\n raise ValueError(\n \"The sparse approximation method should be one of \"\n \"'DTC', 'FITC', 'VFE'.\"\n )\n\n @pyro_method\n def model(self):\n self.set_mode(\"model\")\n\n # W = (inv(Luu) @ Kuf).T\n # Qff = Kfu @ inv(Kuu) @ Kuf = W @ W.T\n # Fomulas for each approximation method are\n # DTC: y_cov = Qff + noise, trace_term = 0\n # FITC: y_cov = Qff + diag(Kff - Qff) + noise, trace_term = 0\n # VFE: y_cov = Qff + noise, trace_term = tr(Kff-Qff) / noise\n # y_cov = W @ W.T + D\n # trace_term is added into log_prob\n\n N = self.X.size(0)\n M = self.Xu.size(0)\n Kuu = self.kernel(self.Xu).contiguous()\n Kuu.view(-1)[:: M + 1] += self.jitter # add jitter to the diagonal\n Luu = torch.linalg.cholesky(Kuu)\n Kuf = self.kernel(self.Xu, self.X)\n W = Kuf.triangular_solve(Luu, upper=False)[0].t()\n\n D = self.noise.expand(N)\n if self.approx == \"FITC\" or self.approx == \"VFE\":\n Kffdiag = self.kernel(self.X, diag=True)\n Qffdiag = W.pow(2).sum(dim=-1)\n if self.approx == \"FITC\":\n D = D + Kffdiag - Qffdiag\n else: # approx = \"VFE\"\n trace_term = (Kffdiag - Qffdiag).sum() / self.noise\n trace_term = trace_term.clamp(min=0)\n\n zero_loc = self.X.new_zeros(N)\n f_loc = zero_loc + self.mean_function(self.X)\n if self.y is None:\n f_var = D + W.pow(2).sum(dim=-1)\n return f_loc, f_var\n else:\n if self.approx == \"VFE\":\n pyro.factor(self._pyro_get_fullname(\"trace_term\"), -trace_term / 2.0)\n\n return pyro.sample(\n self._pyro_get_fullname(\"y\"),\n dist.LowRankMultivariateNormal(f_loc, W, D)\n .expand_by(self.y.shape[:-1])\n .to_event(self.y.dim() - 1),\n obs=self.y,\n )\n\n @pyro_method\n def guide(self):\n self.set_mode(\"guide\")\n self._load_pyro_samples()\n\n def forward(self, Xnew, full_cov=False, noiseless=True):\n r\"\"\"\n Computes the mean and covariance matrix (or variance) of Gaussian Process\n posterior on a test input data :math:`X_{new}`:\n\n .. math:: p(f^* \\mid X_{new}, X, y, k, X_u, \\epsilon) = \\mathcal{N}(loc, cov).\n\n .. note:: The noise parameter ``noise`` (:math:`\\epsilon`), the inducing-point\n parameter ``Xu``, together with kernel's parameters have been learned from\n a training procedure (MCMC or SVI).\n\n :param torch.Tensor Xnew: A input data for testing. Note that\n ``Xnew.shape[1:]`` must be the same as ``self.X.shape[1:]``.\n :param bool full_cov: A flag to decide if we want to predict full covariance\n matrix or just variance.\n :param bool noiseless: A flag to decide if we want to include noise in the\n prediction output or not.\n :returns: loc and covariance matrix (or variance) of :math:`p(f^*(X_{new}))`\n :rtype: tuple(torch.Tensor, torch.Tensor)\n \"\"\"\n self._check_Xnew_shape(Xnew)\n self.set_mode(\"guide\")\n\n # W = inv(Luu) @ Kuf\n # Ws = inv(Luu) @ Kus\n # D as in self.model()\n # K = I + W @ inv(D) @ W.T = L @ L.T\n # S = inv[Kuu + Kuf @ inv(D) @ Kfu]\n # = inv(Luu).T @ inv[I + inv(Luu)@ Kuf @ inv(D)@ Kfu @ inv(Luu).T] @ inv(Luu)\n # = inv(Luu).T @ inv[I + W @ inv(D) @ W.T] @ inv(Luu)\n # = inv(Luu).T @ inv(K) @ inv(Luu)\n # = inv(Luu).T @ inv(L).T @ inv(L) @ inv(Luu)\n # loc = Ksu @ S @ Kuf @ inv(D) @ y = Ws.T @ inv(L).T @ inv(L) @ W @ inv(D) @ y\n # cov = Kss - Ksu @ inv(Kuu) @ Kus + Ksu @ S @ Kus\n # = kss - Ksu @ inv(Kuu) @ Kus + Ws.T @ inv(L).T @ inv(L) @ Ws\n\n N = self.X.size(0)\n M = self.Xu.size(0)\n\n # TODO: cache these calculations to get faster inference\n\n Kuu = self.kernel(self.Xu).contiguous()\n Kuu.view(-1)[:: M + 1] += self.jitter # add jitter to the diagonal\n Luu = torch.linalg.cholesky(Kuu)\n\n Kuf = self.kernel(self.Xu, self.X)\n\n W = Kuf.triangular_solve(Luu, upper=False)[0]\n D = self.noise.expand(N)\n if self.approx == \"FITC\":\n Kffdiag = self.kernel(self.X, diag=True)\n Qffdiag = W.pow(2).sum(dim=0)\n D = D + Kffdiag - Qffdiag\n\n W_Dinv = W / D\n K = W_Dinv.matmul(W.t()).contiguous()\n K.view(-1)[:: M + 1] += 1 # add identity matrix to K\n L = torch.linalg.cholesky(K)\n\n # get y_residual and convert it into 2D tensor for packing\n y_residual = self.y - self.mean_function(self.X)\n y_2D = y_residual.reshape(-1, N).t()\n W_Dinv_y = W_Dinv.matmul(y_2D)\n\n # End caching ----------\n\n Kus = self.kernel(self.Xu, Xnew)\n Ws = Kus.triangular_solve(Luu, upper=False)[0]\n pack = torch.cat((W_Dinv_y, Ws), dim=1)\n Linv_pack = pack.triangular_solve(L, upper=False)[0]\n # unpack\n Linv_W_Dinv_y = Linv_pack[:, : W_Dinv_y.shape[1]]\n Linv_Ws = Linv_pack[:, W_Dinv_y.shape[1] :]\n\n C = Xnew.size(0)\n loc_shape = self.y.shape[:-1] + (C,)\n loc = Linv_W_Dinv_y.t().matmul(Linv_Ws).reshape(loc_shape)\n\n if full_cov:\n Kss = self.kernel(Xnew).contiguous()\n if not noiseless:\n Kss.view(-1)[:: C + 1] += self.noise # add noise to the diagonal\n Qss = Ws.t().matmul(Ws)\n cov = Kss - Qss + Linv_Ws.t().matmul(Linv_Ws)\n cov_shape = self.y.shape[:-1] + (C, C)\n cov = cov.expand(cov_shape)\n else:\n Kssdiag = self.kernel(Xnew, diag=True)\n if not noiseless:\n Kssdiag = Kssdiag + self.noise\n Qssdiag = Ws.pow(2).sum(dim=0)\n cov = Kssdiag - Qssdiag + Linv_Ws.pow(2).sum(dim=0)\n cov_shape = self.y.shape[:-1] + (C,)\n cov = cov.expand(cov_shape)\n\n return loc + self.mean_function(Xnew), cov\n"
] | [
[
"torch.linalg.cholesky",
"torch.cat"
],
[
"torch.linalg.cholesky",
"torch.nn.Parameter",
"torch.cat"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
aphearin/c3dev | [
"d36d083c9eb688640670dbe066bf299777a78ba7",
"d36d083c9eb688640670dbe066bf299777a78ba7"
] | [
"c3dev/galmocks/data_loaders/load_tng_data.py",
"c3dev/galmocks/galhalo_models/galsampler_phase_space.py"
] | [
"\"\"\"\n\"\"\"\nfrom collections import OrderedDict\nimport numpy as np\nfrom halotools.utils import sliding_conditional_percentile\nfrom astropy.table import Table\nfrom ..utils.galprops import compute_lg_ssfr\n\n\nSANDY_SCRATCH_PATH = \"/global/cscratch1/sd/sihany/TNG300-1/output\"\nBEBOP = \"/lcrc/project/halotools/C3EMC/TNG300-1\"\nNERSC = \"/global/cfs/cdirs/desi/users/aphearin/C3EMC/TNG300-1\"\nTNG_LBOX = 205.0\n\n\ndef load_tng_subhalos(drn=NERSC, snapNum=55):\n import illustris_python as il\n\n subhalos = il.groupcat.loadSubhalos(drn, snapNum)\n return subhalos\n\n\ndef load_tng_host_halos(drn=NERSC, snapNum=55):\n import illustris_python as il\n\n host_halos = il.groupcat.loadHalos(drn, snapNum)\n return host_halos\n\n\ndef get_value_added_tng_data(subs, hosts):\n hosts[\"halo_id\"] = np.arange(len(hosts[\"GroupMass\"])).astype(int)\n\n host_keys_to_keep = [\"halo_id\", \"GroupFirstSub\", \"GroupPos\", \"GroupVel\"]\n tng_hosts = Table(OrderedDict([(key, hosts[key]) for key in host_keys_to_keep]))\n tng_hosts.rename_column(\"GroupPos\", \"pos\")\n tng_hosts.rename_column(\"GroupVel\", \"vel\")\n tng_hosts[\"logmh\"] = np.log10(hosts[\"GroupMass\"]) + 10\n tng_hosts[\"pos\"] = tng_hosts[\"pos\"] / 1000\n\n tng = Table()\n tng[\"host_halo_logmh\"] = tng_hosts[\"logmh\"][subs[\"SubhaloGrNr\"]]\n tng[\"host_halo_pos\"] = tng_hosts[\"pos\"][subs[\"SubhaloGrNr\"]]\n tng[\"host_halo_vel\"] = tng_hosts[\"vel\"][subs[\"SubhaloGrNr\"]]\n\n tng[\"subhalo_pos\"] = subs[\"SubhaloPos\"] / 1000\n tng[\"subhalo_vel\"] = subs[\"SubhaloVel\"]\n tng[\"subhalo_mass\"] = subs[\"SubhaloMass\"] * 1e10\n tng[\"subhalo_vmax\"] = subs[\"SubhaloVmax\"]\n tng[\"subhalo_vdisp\"] = subs[\"SubhaloVelDisp\"]\n\n tng[\"stellar_metallicity\"] = subs[\"SubhaloStarMetallicity\"]\n tng[\"subhalo_mgas\"] = subs[\"SubhaloMassType\"][:, 0] * 1e10\n tng[\"subhalo_dm\"] = subs[\"SubhaloMassType\"][:, 1] * 1e10\n tng[\"mstar\"] = subs[\"SubhaloMassType\"][:, 4] * 1e10\n tng[\"sfr\"] = subs[\"SubhaloSFR\"]\n tng[\"lgssfr\"] = compute_lg_ssfr(tng[\"mstar\"], tng[\"sfr\"])\n\n tng[\"host_halo_index\"] = subs[\"SubhaloGrNr\"]\n\n subhalo_id = np.arange(len(subs[\"SubhaloGrNr\"])).astype(int)\n subhalo_cen_id = subhalo_id[tng_hosts[\"GroupFirstSub\"]]\n tng[\"is_central\"] = subhalo_cen_id == subhalo_id\n\n # Broadcast properties of the central subhalo to each host\n tng_hosts[\"central_subhalo_vmax\"] = subs[\"SubhaloVmax\"][tng_hosts[\"GroupFirstSub\"]]\n tng_hosts[\"central_subhalo_vdisp\"] = subs[\"SubhaloVelDisp\"][\n tng_hosts[\"GroupFirstSub\"]\n ]\n\n # Broadcast properties of the central subhalo to each group member\n tng[\"host_halo_vmax\"] = tng_hosts[\"central_subhalo_vmax\"][subs[\"SubhaloGrNr\"]]\n tng[\"host_halo_vdisp\"] = tng_hosts[\"central_subhalo_vdisp\"][subs[\"SubhaloGrNr\"]]\n\n tng_hosts[\"p_vmax\"] = sliding_conditional_percentile(\n tng_hosts[\"logmh\"], tng_hosts[\"central_subhalo_vmax\"], 101\n )\n tng_hosts[\"p_vdisp\"] = sliding_conditional_percentile(\n tng_hosts[\"logmh\"], tng_hosts[\"central_subhalo_vdisp\"], 101\n )\n tng[\"host_halo_p_vmax\"] = tng_hosts[\"p_vmax\"][subs[\"SubhaloGrNr\"]]\n tng[\"host_halo_p_vdisp\"] = tng_hosts[\"p_vdisp\"][subs[\"SubhaloGrNr\"]]\n\n return tng, tng_hosts\n",
"\"\"\"\n\"\"\"\nimport numpy as np\nfrom jax import random as jran\nfrom ..utils.galmatch import calculate_indx_correspondence\n\n\ndef inherit_host_centric_posvel(\n ran_key,\n is_sat_source,\n is_sat_target,\n logmh_host_source,\n logmh_host_target,\n pos_host_source,\n pos_host_target,\n vel_host_source,\n vel_host_target,\n pos_source,\n vel_source,\n dlogmh=0.25,\n):\n pos_sats, vel_sats = _inherit_host_centric_posvel_matching_mhost(\n ran_key,\n logmh_host_source[is_sat_source],\n logmh_host_target[is_sat_target],\n pos_host_source[is_sat_source],\n pos_host_target[is_sat_target],\n vel_host_source[is_sat_source],\n vel_host_target[is_sat_target],\n pos_source[is_sat_source],\n vel_source[is_sat_source],\n dlogmh,\n )\n\n pos_target = np.copy(pos_host_target)\n vel_target = np.copy(vel_host_target)\n pos_target[is_sat_target] = pos_sats\n vel_target[is_sat_target] = vel_sats\n\n return pos_target, vel_target\n\n\ndef _inherit_host_centric_posvel_matching_mhost(\n ran_key,\n logmh_host_source,\n logmh_host_target,\n pos_host_source,\n pos_host_target,\n vel_host_source,\n vel_host_target,\n pos_source,\n vel_source,\n dlogmh,\n):\n delta_pos_source = pos_source - pos_host_source\n delta_vel_source = vel_source - vel_host_source\n\n source_key, target_key = jran.split(ran_key, 2)\n uran_source = np.array(\n (jran.uniform(source_key, shape=logmh_host_source.shape) - 0.5) * 0.05\n )\n uran_target = np.array(\n (jran.uniform(target_key, shape=logmh_host_target.shape) - 0.5) * dlogmh\n )\n\n dd_match, indx_match = calculate_indx_correspondence(\n (logmh_host_source + uran_source,), (logmh_host_target + uran_target,)\n )\n delta_pos_target = delta_pos_source[indx_match]\n delta_vel_target = delta_vel_source[indx_match]\n\n pos_target = pos_host_target + delta_pos_target\n vel_target = vel_host_target + delta_vel_target\n return pos_target, vel_target\n\n\ndef add_central_velbias(is_cen_target, vel_source, vel_host_source, vel_host_target):\n delta_vel_source = vel_source - vel_host_source\n vel_target = np.copy(vel_host_target)\n vel_target[is_cen_target] = (\n vel_target[is_cen_target] + delta_vel_source[is_cen_target]\n )\n return vel_target\n"
] | [
[
"numpy.log10"
],
[
"numpy.copy"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
yurivict/incubator-mxnet | [
"3d38dbde744954854015919d4faf56ac1aea16de"
] | [
"python/mxnet/model.py"
] | [
"# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\n# pylint: disable=fixme, invalid-name, too-many-arguments, too-many-locals, too-many-lines\n# pylint: disable=too-many-branches, too-many-statements\n\"\"\"MXNet model module\"\"\"\nfrom __future__ import absolute_import, print_function\n\nimport os\nimport time\nimport logging\nimport warnings\nfrom collections import namedtuple\nimport numpy as np\n\nfrom . import io\nfrom . import ndarray as nd\nfrom . import symbol as sym\nfrom . import optimizer as opt\nfrom . import metric\nfrom . import kvstore as kvs\nfrom .context import Context, cpu\nfrom .initializer import Uniform\nfrom .optimizer import get_updater\nfrom .executor_manager import DataParallelExecutorManager, _check_arguments, _load_data\nfrom .io import DataDesc\nfrom .base import mx_real_t\n\nBASE_ESTIMATOR = object\n\ntry:\n from sklearn.base import BaseEstimator\n BASE_ESTIMATOR = BaseEstimator\nexcept ImportError:\n SKLEARN_INSTALLED = False\n\n# Parameter to pass to batch_end_callback\nBatchEndParam = namedtuple('BatchEndParams',\n ['epoch',\n 'nbatch',\n 'eval_metric',\n 'locals'])\n\ndef _create_sparse_kvstore(kvstore):\n \"\"\"Create kvstore assuming some parameters' storage types are row_sparse.\n\n Parameters\n ----------\n kvstore : KVStore or str\n The kvstore.\n\n Returns\n -------\n kvstore : KVStore\n update_on_kvstore : bool. Always True.\n \"\"\"\n # always update on kvstore\n update_on_kvstore = True\n if isinstance(kvstore, kvs.KVStore):\n kv = kvstore\n elif isinstance(kvstore, str):\n kv = kvs.create(kvstore)\n else:\n raise TypeError(\"Cannot create '%s' KVStore with row_sparse parameters. \"\n \"The type must be KVStore or str.\" % kvstore)\n return (kv, update_on_kvstore)\n\ndef _create_kvstore(kvstore, num_device, arg_params):\n \"\"\"Create kvstore\n This function select and create a proper kvstore if given the kvstore type.\n\n Parameters\n ----------\n kvstore : KVStore or str\n The kvstore.\n num_device : int\n The number of devices\n arg_params : dict of str to `NDArray`.\n Model parameter, dict of name to `NDArray` of net's weights.\n \"\"\"\n update_on_kvstore = bool(int(os.getenv('MXNET_UPDATE_ON_KVSTORE', \"1\")))\n if kvstore is None:\n kv = None\n elif isinstance(kvstore, kvs.KVStore):\n kv = kvstore\n elif isinstance(kvstore, str):\n # create kvstore using the string type\n if num_device == 1 and 'dist' not in kvstore:\n # no need to use kv for single device and single machine\n kv = None\n else:\n kv = kvs.create(kvstore)\n if kvstore == 'local':\n # automatically select a proper local\n max_size = max(np.prod(param.shape) for param in\n arg_params.values())\n if max_size > 1024 * 1024 * 16:\n update_on_kvstore = False\n else:\n raise TypeError('kvstore must be KVStore, str or None')\n\n if kv is None:\n update_on_kvstore = False\n\n return (kv, update_on_kvstore)\n\ndef _initialize_kvstore(kvstore, param_arrays, arg_params, param_names, update_on_kvstore):\n \"\"\"Initialize kvstore\"\"\"\n for idx, param_on_devs in enumerate(param_arrays):\n name = param_names[idx]\n kvstore.init(name, arg_params[name])\n\n if update_on_kvstore:\n kvstore.pull(name, param_on_devs, priority=-idx)\n\ndef _update_params_on_kvstore_nccl(param_arrays, grad_arrays, kvstore, param_names):\n \"\"\"Perform update of param_arrays from grad_arrays on NCCL kvstore.\"\"\"\n valid_indices = [index for index, grad_list in\n enumerate(grad_arrays) if grad_list[0] is not None]\n valid_grad_arrays = [grad_arrays[i] for i in valid_indices]\n valid_param_arrays = [param_arrays[i] for i in valid_indices]\n valid_param_names = [param_names[i] for i in valid_indices]\n size = len(valid_grad_arrays)\n start = 0\n # Use aggregation by default only with NCCL\n default_batch = '16'\n batch = int(os.getenv('MXNET_UPDATE_AGGREGATION_SIZE', default_batch))\n while start < size:\n end = start + batch if start + batch < size else size\n # push gradient, priority is negative index\n kvstore.push(valid_param_names[start:end], valid_grad_arrays[start:end], priority=-start)\n # pull back the weights\n kvstore.pull(valid_param_names[start:end], valid_param_arrays[start:end], priority=-start)\n start = end\n\ndef _update_params_on_kvstore(param_arrays, grad_arrays, kvstore, param_names):\n \"\"\"Perform update of param_arrays from grad_arrays on kvstore.\"\"\"\n for index, pair in enumerate(zip(param_arrays, grad_arrays)):\n arg_list, grad_list = pair\n if grad_list[0] is None:\n continue\n name = param_names[index]\n # push gradient, priority is negative index\n kvstore.push(name, grad_list, priority=-index)\n # pull back the weights\n kvstore.pull(name, arg_list, priority=-index)\n\ndef _update_params(param_arrays, grad_arrays, updater, num_device,\n kvstore=None, param_names=None):\n \"\"\"Perform update of param_arrays from grad_arrays not on kvstore.\"\"\"\n updates = [[] for _ in range(num_device)]\n for i, pair in enumerate(zip(param_arrays, grad_arrays)):\n arg_list, grad_list = pair\n if grad_list[0] is None:\n continue\n index = i\n if kvstore:\n name = param_names[index]\n # push gradient, priority is negative index\n kvstore.push(name, grad_list, priority=-index)\n # pull back the sum gradients, to the same locations.\n kvstore.pull(name, grad_list, priority=-index)\n for k, p in enumerate(zip(arg_list, grad_list)):\n # faked an index here, to make optimizer create diff\n # state for the same index but on diff devs, TODO(mli)\n # use a better solution later\n w, g = p\n updates[k].append((index*num_device+k, g, w))\n for dev_updates in updates:\n # update params if param_arrays and grad_arrays are not empty\n if dev_updates:\n i, w, g = zip(*dev_updates)\n updater(i, w, g)\n\n\ndef _multiple_callbacks(callbacks, *args, **kwargs):\n \"\"\"Sends args and kwargs to any configured callbacks.\n This handles the cases where the 'callbacks' variable\n is ``None``, a single function, or a list.\n \"\"\"\n if isinstance(callbacks, list):\n for cb in callbacks:\n cb(*args, **kwargs)\n return\n if callbacks:\n callbacks(*args, **kwargs)\n\n\ndef _train_multi_device(symbol, ctx, arg_names, param_names, aux_names,\n arg_params, aux_params,\n begin_epoch, end_epoch, epoch_size, optimizer,\n kvstore, update_on_kvstore,\n train_data, eval_data=None, eval_metric=None,\n epoch_end_callback=None, batch_end_callback=None,\n logger=None, work_load_list=None, monitor=None,\n eval_end_callback=None,\n eval_batch_end_callback=None, sym_gen=None):\n \"\"\"Internal training function on multiple devices.\n This function will also work for single device as well.\n\n Parameters\n ----------\n symbol : Symbol\n The network configuration.\n ctx : list of Context\n The training devices.\n arg_names: list of str\n Name of all arguments of the network.\n param_names: list of str\n Name of all trainable parameters of the network.\n aux_names: list of str\n Name of all auxiliary states of the network.\n arg_params : dict of str to NDArray\n Model parameter, dict of name to NDArray of net's weights.\n aux_params : dict of str to NDArray\n Model parameter, dict of name to NDArray of net's auxiliary states.\n begin_epoch : int\n The begining training epoch.\n end_epoch : int\n The end training epoch.\n epoch_size : int, optional\n Number of batches in a epoch. In default, it is set to\n ``ceil(num_train_examples / batch_size)``.\n optimizer : Optimizer\n The optimization algorithm\n train_data : DataIter\n Training data iterator.\n eval_data : DataIter\n Validation data iterator.\n eval_metric : EvalMetric\n An evaluation function or a list of evaluation functions.\n epoch_end_callback : callable(epoch, symbol, arg_params, aux_states)\n A callback that is invoked at end of each epoch.\n This can be used to checkpoint model each epoch.\n batch_end_callback : callable(BatchEndParams)\n A callback that is invoked at end of each batch.\n This can be used to measure speed, get result from evaluation metric. etc.\n kvstore : KVStore\n The KVStore.\n update_on_kvstore : bool\n Whether or not perform weight updating on kvstore.\n logger : logging logger\n When not specified, default logger will be used.\n work_load_list : list of float or int, optional\n The list of work load for different devices,\n in the same order as ``ctx``.\n monitor : Monitor, optional\n Monitor installed to executor,\n for monitoring outputs, weights, and gradients for debugging.\n Notes\n -----\n - This function will inplace update the NDArrays in `arg_params` and `aux_states`.\n \"\"\"\n if logger is None:\n logger = logging\n executor_manager = DataParallelExecutorManager(symbol=symbol,\n sym_gen=sym_gen,\n ctx=ctx,\n train_data=train_data,\n param_names=param_names,\n arg_names=arg_names,\n aux_names=aux_names,\n work_load_list=work_load_list,\n logger=logger)\n if monitor:\n executor_manager.install_monitor(monitor)\n\n executor_manager.set_params(arg_params, aux_params)\n\n if not update_on_kvstore:\n updater = get_updater(optimizer)\n else:\n kvstore.set_optimizer(optimizer)\n\n if kvstore:\n _initialize_kvstore(kvstore=kvstore,\n param_arrays=executor_manager.param_arrays,\n arg_params=arg_params,\n param_names=executor_manager.param_names,\n update_on_kvstore=update_on_kvstore)\n\n # Now start training\n train_data.reset()\n for epoch in range(begin_epoch, end_epoch):\n # Training phase\n tic = time.time()\n eval_metric.reset()\n nbatch = 0\n # Iterate over training data.\n while True:\n do_reset = True\n for data_batch in train_data:\n executor_manager.load_data_batch(data_batch)\n\n if monitor is not None:\n monitor.tic()\n\n executor_manager.forward(is_train=True)\n executor_manager.backward()\n\n if update_on_kvstore:\n if 'nccl' in kvstore.type:\n _update_params_on_kvstore_nccl(executor_manager.param_arrays,\n executor_manager.grad_arrays,\n kvstore, executor_manager.param_names)\n else:\n _update_params_on_kvstore(executor_manager.param_arrays,\n executor_manager.grad_arrays,\n kvstore, executor_manager.param_names)\n else:\n _update_params(executor_manager.param_arrays,\n executor_manager.grad_arrays,\n updater=updater,\n num_device=len(ctx),\n kvstore=kvstore,\n param_names=executor_manager.param_names)\n\n if monitor is not None:\n monitor.toc_print()\n\n # evaluate at end, so we can lazy copy\n executor_manager.update_metric(eval_metric, data_batch.label)\n\n nbatch += 1\n # batch callback (for print purpose)\n if batch_end_callback is not None:\n batch_end_params = BatchEndParam(epoch=epoch,\n nbatch=nbatch,\n eval_metric=eval_metric,\n locals=locals())\n _multiple_callbacks(batch_end_callback, batch_end_params)\n\n # this epoch is done possibly earlier\n if epoch_size is not None and nbatch >= epoch_size:\n do_reset = False\n break\n\n if do_reset:\n logger.info('Epoch[%d] Resetting Data Iterator', epoch)\n train_data.reset()\n\n # this epoch is done\n if epoch_size is None or nbatch >= epoch_size:\n break\n\n toc = time.time()\n logger.info('Epoch[%d] Time cost=%.3f', epoch, (toc - tic))\n\n if epoch_end_callback or epoch + 1 == end_epoch:\n executor_manager.copy_to(arg_params, aux_params)\n\n _multiple_callbacks(epoch_end_callback, epoch, symbol, arg_params, aux_params)\n\n # evaluation\n if eval_data:\n eval_metric.reset()\n eval_data.reset()\n total_num_batch = 0\n for i, eval_batch in enumerate(eval_data):\n executor_manager.load_data_batch(eval_batch)\n executor_manager.forward(is_train=False)\n executor_manager.update_metric(eval_metric, eval_batch.label)\n if eval_batch_end_callback is not None:\n batch_end_params = BatchEndParam(epoch=epoch,\n nbatch=i,\n eval_metric=eval_metric,\n locals=locals())\n _multiple_callbacks(eval_batch_end_callback, batch_end_params)\n total_num_batch += 1\n if eval_end_callback is not None:\n eval_end_params = BatchEndParam(epoch=epoch,\n nbatch=total_num_batch,\n eval_metric=eval_metric,\n locals=locals())\n _multiple_callbacks(eval_end_callback, eval_end_params)\n eval_data.reset()\n # end of all epochs\n\n\ndef save_checkpoint(prefix, epoch, symbol, arg_params, aux_params, remove_amp_cast=True):\n \"\"\"Checkpoint the model data into file.\n\n Parameters\n ----------\n prefix : str\n Prefix of model name.\n epoch : int\n The epoch number of the model.\n symbol : Symbol\n The input Symbol.\n arg_params : dict of str to NDArray\n Model parameter, dict of name to NDArray of net's weights.\n aux_params : dict of str to NDArray\n Model parameter, dict of name to NDArray of net's auxiliary states.\n remove_amp_cast : bool, optional\n Whether to remove the amp_cast and amp_multicast operators, before saving the model.\n Notes\n -----\n - ``prefix-symbol.json`` will be saved for symbol.\n - ``prefix-epoch.params`` will be saved for parameters.\n \"\"\"\n if symbol is not None:\n symbol.save('%s-symbol.json' % prefix, remove_amp_cast=remove_amp_cast)\n\n save_dict = {('arg:%s' % k) : v.as_in_context(cpu()) for k, v in arg_params.items()}\n save_dict.update({('aux:%s' % k) : v.as_in_context(cpu()) for k, v in aux_params.items()})\n param_name = '%s-%04d.params' % (prefix, epoch)\n nd.save(param_name, save_dict)\n logging.info('Saved checkpoint to \\\"%s\\\"', param_name)\n\n\ndef load_params(prefix, epoch):\n \"\"\"Load params from a file\n \"\"\"\n save_dict = nd.load(\"%s-%04d.params\" % (prefix, epoch))\n arg_params = {}\n aux_params = {}\n if not save_dict:\n logging.warning(\"Params file '%s' is empty\", '%s-%04d.params' % (prefix, epoch))\n return (arg_params, aux_params)\n for k, v in save_dict.items():\n tp, name = k.split(\":\", 1)\n if tp == \"arg\":\n arg_params[name] = v\n if tp == \"aux\":\n aux_params[name] = v\n return (arg_params, aux_params)\n\ndef load_checkpoint(prefix, epoch):\n \"\"\"Load model checkpoint from file.\n\n Parameters\n ----------\n prefix : str\n Prefix of model name.\n epoch : int\n Epoch number of model we would like to load.\n\n Returns\n -------\n symbol : Symbol\n The symbol configuration of computation network.\n arg_params : dict of str to NDArray\n Model parameter, dict of name to NDArray of net's weights.\n aux_params : dict of str to NDArray\n Model parameter, dict of name to NDArray of net's auxiliary states.\n\n Notes\n -----\n - Symbol will be loaded from ``prefix-symbol.json``.\n - Parameters will be loaded from ``prefix-epoch.params``.\n \"\"\"\n symbol = sym.load('%s-symbol.json' % prefix)\n arg_params, aux_params = load_params(prefix, epoch)\n return (symbol, arg_params, aux_params)\n\nfrom .callback import LogValidationMetricsCallback # pylint: disable=wrong-import-position\n\nclass FeedForward(BASE_ESTIMATOR):\n \"\"\"Model class of MXNet for training and predicting feedforward nets.\n This class is designed for a single-data single output supervised network.\n\n Parameters\n ----------\n symbol : Symbol\n The symbol configuration of computation network.\n ctx : Context or list of Context, optional\n The device context of training and prediction.\n To use multi GPU training, pass in a list of gpu contexts.\n num_epoch : int, optional\n Training parameter, number of training epochs(epochs).\n epoch_size : int, optional\n Number of batches in a epoch. In default, it is set to\n ``ceil(num_train_examples / batch_size)``.\n optimizer : str or Optimizer, optional\n Training parameter, name or optimizer object for training.\n initializer : initializer function, optional\n Training parameter, the initialization scheme used.\n numpy_batch_size : int, optional\n The batch size of training data.\n Only needed when input array is numpy.\n arg_params : dict of str to NDArray, optional\n Model parameter, dict of name to NDArray of net's weights.\n aux_params : dict of str to NDArray, optional\n Model parameter, dict of name to NDArray of net's auxiliary states.\n allow_extra_params : boolean, optional\n Whether allow extra parameters that are not needed by symbol\n to be passed by aux_params and ``arg_params``.\n If this is True, no error will be thrown when ``aux_params`` and ``arg_params``\n contain more parameters than needed.\n begin_epoch : int, optional\n The begining training epoch.\n kwargs : dict\n The additional keyword arguments passed to optimizer.\n \"\"\"\n def __init__(self, symbol, ctx=None,\n num_epoch=None, epoch_size=None, optimizer='sgd',\n initializer=Uniform(0.01),\n numpy_batch_size=128,\n arg_params=None, aux_params=None,\n allow_extra_params=False,\n begin_epoch=0,\n **kwargs):\n warnings.warn(\n '\\033[91mmxnet.model.FeedForward has been deprecated. ' + \\\n 'Please use mxnet.mod.Module instead.\\033[0m',\n DeprecationWarning, stacklevel=2)\n\n if isinstance(symbol, sym.Symbol):\n self.symbol = symbol\n self.sym_gen = None\n else:\n assert(callable(symbol))\n self.symbol = None\n self.sym_gen = symbol\n\n # model parameters\n self.arg_params = arg_params\n self.aux_params = aux_params\n self.allow_extra_params = allow_extra_params\n\n self.argument_checked = False\n if self.sym_gen is None:\n self._check_arguments()\n\n # basic configuration\n if ctx is None:\n ctx = [cpu()]\n elif isinstance(ctx, Context):\n ctx = [ctx]\n self.ctx = ctx\n # training parameters\n self.num_epoch = num_epoch\n self.epoch_size = epoch_size\n self.kwargs = kwargs.copy()\n self.optimizer = optimizer\n self.initializer = initializer\n self.numpy_batch_size = numpy_batch_size\n # internal helper state\n self._pred_exec = None\n self.begin_epoch = begin_epoch\n\n def _check_arguments(self):\n \"\"\"verify the argument of the default symbol and user provided parameters\"\"\"\n if self.argument_checked:\n return\n\n assert(self.symbol is not None)\n self.argument_checked = True\n\n # check if symbol contain duplicated names.\n _check_arguments(self.symbol)\n # rematch parameters to delete useless ones\n if self.allow_extra_params:\n if self.arg_params:\n arg_names = set(self.symbol.list_arguments())\n self.arg_params = {k : v for k, v in self.arg_params.items()\n if k in arg_names}\n if self.aux_params:\n aux_names = set(self.symbol.list_auxiliary_states())\n self.aux_params = {k : v for k, v in self.aux_params.items()\n if k in aux_names}\n\n\n @staticmethod\n def _is_data_arg(name):\n \"\"\"Check if name is a data argument.\"\"\"\n return name.endswith('data') or name.endswith('label')\n\n def _init_params(self, inputs, overwrite=False):\n \"\"\"Initialize weight parameters and auxiliary states.\"\"\"\n inputs = [x if isinstance(x, DataDesc) else DataDesc(*x) for x in inputs]\n input_shapes = {item.name: item.shape for item in inputs}\n arg_shapes, _, aux_shapes = self.symbol.infer_shape(**input_shapes)\n assert arg_shapes is not None\n input_dtypes = {item.name: item.dtype for item in inputs}\n arg_dtypes, _, aux_dtypes = self.symbol.infer_type(**input_dtypes)\n assert arg_dtypes is not None\n\n arg_names = self.symbol.list_arguments()\n input_names = input_shapes.keys()\n param_names = [key for key in arg_names if key not in input_names]\n aux_names = self.symbol.list_auxiliary_states()\n\n param_name_attrs = [x for x in zip(arg_names, arg_shapes, arg_dtypes)\n if x[0] in param_names]\n arg_params = {k : nd.zeros(shape=s, dtype=t)\n for k, s, t in param_name_attrs}\n aux_name_attrs = [x for x in zip(aux_names, aux_shapes, aux_dtypes)\n if x[0] in aux_names]\n aux_params = {k : nd.zeros(shape=s, dtype=t)\n for k, s, t in aux_name_attrs}\n\n for k, v in arg_params.items():\n if self.arg_params and k in self.arg_params and (not overwrite):\n arg_params[k][:] = self.arg_params[k][:]\n else:\n self.initializer(k, v)\n\n for k, v in aux_params.items():\n if self.aux_params and k in self.aux_params and (not overwrite):\n aux_params[k][:] = self.aux_params[k][:]\n else:\n self.initializer(k, v)\n\n self.arg_params = arg_params\n self.aux_params = aux_params\n return (arg_names, list(param_names), aux_names)\n\n def __getstate__(self):\n this = self.__dict__.copy()\n this['_pred_exec'] = None\n return this\n\n def __setstate__(self, state):\n self.__dict__.update(state)\n\n def _init_predictor(self, input_shapes, type_dict=None):\n \"\"\"Initialize the predictor module for running prediction.\"\"\"\n shapes = {name: self.arg_params[name].shape for name in self.arg_params}\n shapes.update(dict(input_shapes))\n if self._pred_exec is not None:\n arg_shapes, _, _ = self.symbol.infer_shape(**shapes)\n assert arg_shapes is not None, \"Incomplete input shapes\"\n pred_shapes = [x.shape for x in self._pred_exec.arg_arrays]\n if arg_shapes == pred_shapes:\n return\n # for now only use the first device\n pred_exec = self.symbol.simple_bind(\n self.ctx[0], grad_req='null', type_dict=type_dict, **shapes)\n pred_exec.copy_params_from(self.arg_params, self.aux_params)\n\n _check_arguments(self.symbol)\n self._pred_exec = pred_exec\n\n def _init_iter(self, X, y, is_train):\n \"\"\"Initialize the iterator given input.\"\"\"\n if isinstance(X, (np.ndarray, nd.NDArray)):\n if y is None:\n if is_train:\n raise ValueError('y must be specified when X is numpy.ndarray')\n y = np.zeros(X.shape[0])\n if not isinstance(y, (np.ndarray, nd.NDArray)):\n raise TypeError('y must be ndarray when X is numpy.ndarray')\n if X.shape[0] != y.shape[0]:\n raise ValueError(\"The numbers of data points and labels not equal\")\n if y.ndim == 2 and y.shape[1] == 1:\n y = y.flatten()\n if y.ndim != 1:\n raise ValueError(\"Label must be 1D or 2D (with 2nd dimension being 1)\")\n if is_train:\n return io.NDArrayIter(X, y, min(X.shape[0], self.numpy_batch_size),\n shuffle=is_train, last_batch_handle='roll_over')\n else:\n return io.NDArrayIter(X, y, min(X.shape[0], self.numpy_batch_size), shuffle=False)\n if not isinstance(X, io.DataIter):\n raise TypeError('X must be DataIter, NDArray or numpy.ndarray')\n return X\n\n def _init_eval_iter(self, eval_data):\n \"\"\"Initialize the iterator given eval_data.\"\"\"\n if eval_data is None:\n return eval_data\n if isinstance(eval_data, (tuple, list)) and len(eval_data) == 2:\n if eval_data[0] is not None:\n if eval_data[1] is None and isinstance(eval_data[0], io.DataIter):\n return eval_data[0]\n input_data = (np.array(eval_data[0]) if isinstance(eval_data[0], list)\n else eval_data[0])\n input_label = (np.array(eval_data[1]) if isinstance(eval_data[1], list)\n else eval_data[1])\n return self._init_iter(input_data, input_label, is_train=True)\n else:\n raise ValueError(\"Eval data is NONE\")\n if not isinstance(eval_data, io.DataIter):\n raise TypeError('Eval data must be DataIter, or ' \\\n 'NDArray/numpy.ndarray/list pair (i.e. tuple/list of length 2)')\n return eval_data\n\n def predict(self, X, num_batch=None, return_data=False, reset=True):\n \"\"\"Run the prediction, always only use one device.\n\n Parameters\n ----------\n X : mxnet.DataIter\n num_batch : int or None\n The number of batch to run. Go though all batches if ``None``.\n Returns\n -------\n y : numpy.ndarray or a list of numpy.ndarray if the network has multiple outputs.\n The predicted value of the output.\n \"\"\"\n X = self._init_iter(X, None, is_train=False)\n\n if reset:\n X.reset()\n data_shapes = X.provide_data\n data_names = [x[0] for x in data_shapes]\n type_dict = dict((key, value.dtype) for (key, value) in self.arg_params.items())\n for x in X.provide_data:\n if isinstance(x, DataDesc):\n type_dict[x.name] = x.dtype\n else:\n type_dict[x[0]] = mx_real_t\n\n self._init_predictor(data_shapes, type_dict)\n batch_size = X.batch_size\n data_arrays = [self._pred_exec.arg_dict[name] for name in data_names]\n output_list = [[] for _ in range(len(self._pred_exec.outputs))]\n if return_data:\n data_list = [[] for _ in X.provide_data]\n label_list = [[] for _ in X.provide_label]\n\n i = 0\n for batch in X:\n\n _load_data(batch, data_arrays)\n self._pred_exec.forward(is_train=False)\n padded = batch.pad\n real_size = batch_size - padded\n\n for o_list, o_nd in zip(output_list, self._pred_exec.outputs):\n o_list.append(o_nd[0:real_size].asnumpy())\n\n if return_data:\n for j, x in enumerate(batch.data):\n data_list[j].append(x[0:real_size].asnumpy())\n for j, x in enumerate(batch.label):\n label_list[j].append(x[0:real_size].asnumpy())\n i += 1\n if num_batch is not None and i == num_batch:\n break\n\n outputs = [np.concatenate(x) for x in output_list]\n if len(outputs) == 1:\n outputs = outputs[0]\n\n if return_data:\n data = [np.concatenate(x) for x in data_list]\n label = [np.concatenate(x) for x in label_list]\n if len(data) == 1:\n data = data[0]\n if len(label) == 1:\n label = label[0]\n return outputs, data, label\n else:\n return outputs\n\n def score(self, X, eval_metric='acc', num_batch=None, batch_end_callback=None, reset=True):\n \"\"\"Run the model given an input and calculate the score\n as assessed by an evaluation metric.\n\n Parameters\n ----------\n X : mxnet.DataIter\n eval_metric : metric.metric\n The metric for calculating score.\n num_batch : int or None\n The number of batches to run. Go though all batches if ``None``.\n Returns\n -------\n s : float\n The final score.\n \"\"\"\n # setup metric\n if not isinstance(eval_metric, metric.EvalMetric):\n eval_metric = metric.create(eval_metric)\n\n X = self._init_iter(X, None, is_train=False)\n if reset:\n X.reset()\n\n data_shapes = X.provide_data\n data_names = [x[0] for x in data_shapes]\n type_dict = dict((key, value.dtype) for (key, value) in self.arg_params.items())\n for x in X.provide_data:\n if isinstance(x, DataDesc):\n type_dict[x.name] = x.dtype\n else:\n type_dict[x[0]] = mx_real_t\n\n self._init_predictor(data_shapes, type_dict)\n data_arrays = [self._pred_exec.arg_dict[name] for name in data_names]\n\n for i, batch in enumerate(X):\n if num_batch is not None and i == num_batch:\n break\n _load_data(batch, data_arrays)\n self._pred_exec.forward(is_train=False)\n eval_metric.update(batch.label, self._pred_exec.outputs)\n\n if batch_end_callback is not None:\n batch_end_params = BatchEndParam(epoch=0,\n nbatch=i,\n eval_metric=eval_metric,\n locals=locals())\n _multiple_callbacks(batch_end_callback, batch_end_params)\n return eval_metric.get()[1]\n\n def fit(self, X, y=None, eval_data=None, eval_metric='acc',\n epoch_end_callback=None, batch_end_callback=None, kvstore='local', logger=None,\n work_load_list=None, monitor=None, eval_end_callback=LogValidationMetricsCallback(),\n eval_batch_end_callback=None):\n \"\"\"Fit the model.\n\n Parameters\n ----------\n X : DataIter, or numpy.ndarray/NDArray\n Training data. If `X` is a `DataIter`, the name or (if name not available)\n the position of its outputs should match the corresponding variable\n names defined in the symbolic graph.\n y : numpy.ndarray/NDArray, optional\n Training set label.\n If X is ``numpy.ndarray`` or `NDArray`, `y` is required to be set.\n While y can be 1D or 2D (with 2nd dimension as 1), its first dimension must be\n the same as `X`, i.e. the number of data points and labels should be equal.\n eval_data : DataIter or numpy.ndarray/list/NDArray pair\n If eval_data is numpy.ndarray/list/NDArray pair,\n it should be ``(valid_data, valid_label)``.\n eval_metric : metric.EvalMetric or str or callable\n The evaluation metric. This could be the name of evaluation metric\n or a custom evaluation function that returns statistics\n based on a minibatch.\n epoch_end_callback : callable(epoch, symbol, arg_params, aux_states)\n A callback that is invoked at end of each epoch.\n This can be used to checkpoint model each epoch.\n batch_end_callback: callable(epoch)\n A callback that is invoked at end of each batch for purposes of printing.\n kvstore: KVStore or str, optional\n The KVStore or a string kvstore type: 'local', 'dist_sync', 'dist_async'\n In default uses 'local', often no need to change for single machiine.\n logger : logging logger, optional\n When not specified, default logger will be used.\n work_load_list : float or int, optional\n The list of work load for different devices,\n in the same order as `ctx`.\n\n Note\n ----\n KVStore behavior\n - 'local', multi-devices on a single machine, will automatically choose best type.\n - 'dist_sync', multiple machines communicating via BSP.\n - 'dist_async', multiple machines with asynchronous communication.\n \"\"\"\n\n data = self._init_iter(X, y, is_train=True)\n eval_data = self._init_eval_iter(eval_data)\n\n if self.sym_gen:\n self.symbol = self.sym_gen(data.default_bucket_key) # pylint: disable=no-member\n self._check_arguments()\n self.kwargs[\"sym\"] = self.symbol\n\n arg_names, param_names, aux_names = \\\n self._init_params(data.provide_data+data.provide_label)\n\n # setup metric\n if not isinstance(eval_metric, metric.EvalMetric):\n eval_metric = metric.create(eval_metric)\n\n # create kvstore\n (kvstore, update_on_kvstore) = _create_kvstore(\n kvstore, len(self.ctx), self.arg_params)\n\n param_idx2name = {}\n if update_on_kvstore:\n param_idx2name.update(enumerate(param_names))\n else:\n for i, n in enumerate(param_names):\n for k in range(len(self.ctx)):\n param_idx2name[i*len(self.ctx)+k] = n\n self.kwargs[\"param_idx2name\"] = param_idx2name\n\n # init optmizer\n if isinstance(self.optimizer, str):\n batch_size = data.batch_size\n if kvstore and 'dist' in kvstore.type and '_async' not in kvstore.type:\n batch_size *= kvstore.num_workers\n optimizer = opt.create(self.optimizer,\n rescale_grad=(1.0/batch_size),\n **(self.kwargs))\n elif isinstance(self.optimizer, opt.Optimizer):\n if not optimizer.idx2name:\n optimizer.idx2name = param_idx2name.copy()\n optimizer = self.optimizer\n\n # do training\n _train_multi_device(self.symbol, self.ctx, arg_names, param_names, aux_names,\n self.arg_params, self.aux_params,\n begin_epoch=self.begin_epoch, end_epoch=self.num_epoch,\n epoch_size=self.epoch_size,\n optimizer=optimizer,\n train_data=data, eval_data=eval_data,\n eval_metric=eval_metric,\n epoch_end_callback=epoch_end_callback,\n batch_end_callback=batch_end_callback,\n kvstore=kvstore, update_on_kvstore=update_on_kvstore,\n logger=logger, work_load_list=work_load_list, monitor=monitor,\n eval_end_callback=eval_end_callback,\n eval_batch_end_callback=eval_batch_end_callback,\n sym_gen=self.sym_gen)\n\n\n def save(self, prefix, epoch=None, remove_amp_cast=True):\n \"\"\"Checkpoint the model checkpoint into file.\n You can also use `pickle` to do the job if you only work on Python.\n The advantage of `load` and `save` (as compared to `pickle`) is that\n the resulting file can be loaded from other MXNet language bindings.\n One can also directly `load`/`save` from/to cloud storage(S3, HDFS)\n\n Parameters\n ----------\n prefix : str\n Prefix of model name.\n remove_amp_cast : bool, optional\n Whether to remove the amp_cast and amp_multicast operators, before saving the model.\n\n Notes\n -----\n - ``prefix-symbol.json`` will be saved for symbol.\n - ``prefix-epoch.params`` will be saved for parameters.\n \"\"\"\n if epoch is None:\n epoch = self.num_epoch\n assert epoch is not None\n save_checkpoint(prefix, epoch, self.symbol, self.arg_params, self.aux_params, remove_amp_cast=remove_amp_cast)\n\n @staticmethod\n def load(prefix, epoch, ctx=None, **kwargs):\n \"\"\"Load model checkpoint from file.\n\n Parameters\n ----------\n prefix : str\n Prefix of model name.\n epoch : int\n epoch number of model we would like to load.\n ctx : Context or list of Context, optional\n The device context of training and prediction.\n kwargs : dict\n Other parameters for model, including `num_epoch`, optimizer and `numpy_batch_size`.\n\n Returns\n -------\n model : FeedForward\n The loaded model that can be used for prediction.\n\n Notes\n -----\n - ``prefix-symbol.json`` will be saved for symbol.\n - ``prefix-epoch.params`` will be saved for parameters.\n \"\"\"\n symbol, arg_params, aux_params = load_checkpoint(prefix, epoch)\n return FeedForward(symbol, ctx=ctx,\n arg_params=arg_params, aux_params=aux_params,\n begin_epoch=epoch,\n **kwargs)\n\n @staticmethod\n def create(symbol, X, y=None, ctx=None,\n num_epoch=None, epoch_size=None, optimizer='sgd', initializer=Uniform(0.01),\n eval_data=None, eval_metric='acc',\n epoch_end_callback=None, batch_end_callback=None,\n kvstore='local', logger=None, work_load_list=None,\n eval_end_callback=LogValidationMetricsCallback(),\n eval_batch_end_callback=None, **kwargs):\n \"\"\"Functional style to create a model.\n This function is more consistent with functional\n languages such as R, where mutation is not allowed.\n\n Parameters\n ----------\n symbol : Symbol\n The symbol configuration of a computation network.\n X : DataIter\n Training data.\n y : numpy.ndarray, optional\n If `X` is a ``numpy.ndarray``, `y` must be set.\n ctx : Context or list of Context, optional\n The device context of training and prediction.\n To use multi-GPU training, pass in a list of GPU contexts.\n num_epoch : int, optional\n The number of training epochs(epochs).\n epoch_size : int, optional\n Number of batches in a epoch. In default, it is set to\n ``ceil(num_train_examples / batch_size)``.\n optimizer : str or Optimizer, optional\n The name of the chosen optimizer, or an optimizer object, used for training.\n initializer : initializer function, optional\n The initialization scheme used.\n eval_data : DataIter or numpy.ndarray pair\n If `eval_set` is ``numpy.ndarray`` pair, it should\n be (`valid_data`, `valid_label`).\n eval_metric : metric.EvalMetric or str or callable\n The evaluation metric. Can be the name of an evaluation metric\n or a custom evaluation function that returns statistics\n based on a minibatch.\n epoch_end_callback : callable(epoch, symbol, arg_params, aux_states)\n A callback that is invoked at end of each epoch.\n This can be used to checkpoint model each epoch.\n batch_end_callback: callable(epoch)\n A callback that is invoked at end of each batch for print purposes.\n kvstore: KVStore or str, optional\n The KVStore or a string kvstore type: 'local', 'dist_sync', 'dis_async'.\n Defaults to 'local', often no need to change for single machine.\n logger : logging logger, optional\n When not specified, default logger will be used.\n work_load_list : list of float or int, optional\n The list of work load for different devices,\n in the same order as `ctx`.\n \"\"\"\n model = FeedForward(symbol, ctx=ctx, num_epoch=num_epoch,\n epoch_size=epoch_size,\n optimizer=optimizer, initializer=initializer, **kwargs)\n model.fit(X, y, eval_data=eval_data, eval_metric=eval_metric,\n epoch_end_callback=epoch_end_callback,\n batch_end_callback=batch_end_callback,\n kvstore=kvstore,\n logger=logger,\n work_load_list=work_load_list,\n eval_end_callback=eval_end_callback,\n eval_batch_end_callback=eval_batch_end_callback)\n return model\n"
] | [
[
"numpy.concatenate",
"numpy.array",
"numpy.zeros",
"numpy.prod"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
feiwu77777/Face-detection-and-tracking | [
"1135d2d93d5b667110551dc7e4b985b5861eb380"
] | [
"eval_tiny_one_image.py"
] | [
"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon Dec 10 15:49:15 2018\r\n\r\n@author: fei.wu\r\n\"\"\"\r\n\r\n# -*- coding: utf-8 -*-\r\nfrom __future__ import absolute_import\r\nfrom __future__ import division\r\nfrom __future__ import print_function\r\n\r\nimport tensorflow as tf\r\nimport tiny_face_model\r\nimport util\r\nimport cv2\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport pickle\r\nimport pylab as pl\r\nfrom scipy.special import expit\r\n\r\nMAX_INPUT_DIM = 5000.0\r\n\r\ndef overlay_bounding_boxes(raw_img, refined_bboxes, lw):\r\n \"\"\"Overlay bounding boxes of face on images.\r\n Args:\r\n raw_img:\r\n A target image.\r\n refined_bboxes:\r\n Bounding boxes of detected faces.\r\n lw: \r\n Line width of bounding boxes. If zero specified,\r\n this is determined based on confidence of each detection.\r\n Returns:\r\n None.\r\n \"\"\"\r\n\r\n # Overlay bounding boxes on an image with the color based on the confidence.\r\n for r in refined_bboxes:\r\n _score = expit(r[4])\r\n cm_idx = int(np.ceil(_score * 255))\r\n rect_color = [int(np.ceil(x * 255)) for x in util.cm_data[cm_idx]] # parula\r\n _lw = lw\r\n if lw == 0: # line width of each bounding box is adaptively determined.\r\n bw, bh = r[2] - r[0] + 1, r[3] - r[0] + 1\r\n _lw = 1 if min(bw, bh) <= 20 else max(2, min(3, min(bh / 20, bw / 20)))\r\n _lw = int(np.ceil(_lw * _score))\r\n\r\n _r = [int(x) for x in r[:4]]\r\n cv2.rectangle(raw_img, (_r[0], _r[1]), (_r[2], _r[3]), rect_color, _lw)\r\n\r\n\r\ndef evaluate(weight_file_path, frame, prob_thresh=0.5, nms_thresh=0.1, lw=3, display=False):\r\n \"\"\"Detect faces in images.\r\n Args:\r\n prob_thresh:\r\n The threshold of detection confidence.\r\n nms_thresh:\r\n The overlap threshold of non maximum suppression\r\n weight_file_path: \r\n A pretrained weight file in the pickle format \r\n generated by matconvnet_hr101_to_tf.py.\r\n data_dir: \r\n A directory which contains images.\r\n output_dir: \r\n A directory into which images with detected faces are output.\r\n lw: \r\n Line width of bounding boxes. If zero specified,\r\n this is determined based on confidence of each detection.\r\n display:\r\n Display tiny face images on window.\r\n Returns:\r\n None.\r\n \"\"\"\r\n\r\n # placeholder of input images. Currently batch size of one is supported.\r\n x = tf.placeholder(tf.float32, [1, None, None, 3]) # n, h, w, c\r\n\r\n # Create the tiny face model which weights are loaded from a pretrained model.\r\n model = tiny_face_model.Model(weight_file_path)\r\n score_final = model.tiny_face(x)\r\n\r\n # Load an average image and clusters(reference boxes of templates).\r\n with open(weight_file_path, \"rb\") as f:\r\n _, mat_params_dict = pickle.load(f)\r\n\r\n average_image = model.get_data_by_key(\"average_image\")\r\n clusters = model.get_data_by_key(\"clusters\")\r\n clusters_h = clusters[:, 3] - clusters[:, 1] + 1\r\n clusters_w = clusters[:, 2] - clusters[:, 0] + 1\r\n normal_idx = np.where(clusters[:, 4] == 1)\r\n\r\n # main\r\n with tf.Session() as sess:\r\n sess.run(tf.global_variables_initializer())\r\n raw_img = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\r\n raw_img_f = raw_img.astype(np.float32)\r\n \r\n def _calc_scales():\r\n raw_h, raw_w = raw_img.shape[0], raw_img.shape[1]\r\n min_scale = min(np.floor(np.log2(np.max(clusters_w[normal_idx] / raw_w))),\r\n np.floor(np.log2(np.max(clusters_h[normal_idx] / raw_h))))\r\n max_scale = min(1.0, -np.log2(max(raw_h, raw_w) / MAX_INPUT_DIM))\r\n scales_down = pl.frange(min_scale, 0, 1.)\r\n scales_up = pl.frange(0.5, max_scale, 0.5)\r\n scales_pow = np.hstack((scales_down, scales_up))\r\n scales = np.power(2.0, scales_pow)\r\n return scales\r\n \r\n scales = _calc_scales()\r\n\r\n # initialize output\r\n bboxes = np.empty(shape=(0, 5))\r\n \r\n # process input at different scales\r\n for s in scales:\r\n img = cv2.resize(raw_img_f, (0, 0), fx=s, fy=s, interpolation=cv2.INTER_LINEAR)\r\n img = img - average_image\r\n img = img[np.newaxis, :]\r\n \r\n # we don't run every template on every scale ids of templates to ignore\r\n tids = list(range(4, 12)) + ([] if s <= 1.0 else list(range(18, 25)))\r\n ignoredTids = list(set(range(0, clusters.shape[0])) - set(tids))\r\n \r\n # run through the net\r\n score_final_tf = sess.run(score_final, feed_dict={x: img})\r\n \r\n # collect scores\r\n score_cls_tf, score_reg_tf = score_final_tf[:, :, :, :25], score_final_tf[:, :, :, 25:125]\r\n prob_cls_tf = expit(score_cls_tf)\r\n prob_cls_tf[0, :, :, ignoredTids] = 0.0\r\n \r\n def _calc_bounding_boxes():\r\n # threshold for detection\r\n _, fy, fx, fc = np.where(prob_cls_tf > prob_thresh)\r\n \r\n # interpret heatmap into bounding boxes\r\n cy = fy * 8 - 1\r\n cx = fx * 8 - 1\r\n ch = clusters[fc, 3] - clusters[fc, 1] + 1\r\n cw = clusters[fc, 2] - clusters[fc, 0] + 1\r\n \r\n # extract bounding box refinement\r\n Nt = clusters.shape[0]\r\n tx = score_reg_tf[0, :, :, 0:Nt]\r\n ty = score_reg_tf[0, :, :, Nt:2*Nt]\r\n tw = score_reg_tf[0, :, :, 2*Nt:3*Nt]\r\n th = score_reg_tf[0, :, :, 3*Nt:4*Nt]\r\n \r\n # refine bounding boxes\r\n dcx = cw * tx[fy, fx, fc]\r\n dcy = ch * ty[fy, fx, fc]\r\n rcx = cx + dcx\r\n rcy = cy + dcy\r\n rcw = cw * np.exp(tw[fy, fx, fc])\r\n rch = ch * np.exp(th[fy, fx, fc])\r\n \r\n scores = score_cls_tf[0, fy, fx, fc]\r\n tmp_bboxes = np.vstack((rcx - rcw / 2, rcy - rch / 2, rcx + rcw / 2, rcy + rch / 2))\r\n tmp_bboxes = np.vstack((tmp_bboxes / s, scores))\r\n tmp_bboxes = tmp_bboxes.transpose()\r\n return tmp_bboxes\r\n \r\n tmp_bboxes = _calc_bounding_boxes()\r\n bboxes = np.vstack((bboxes, tmp_bboxes)) # <class 'tuple'>: (5265, 5) \r\n \r\n # non maximum suppression\r\n # refind_idx = util.nms(bboxes, nms_thresh)\r\n refind_idx = tf.image.non_max_suppression(tf.convert_to_tensor(bboxes[:, :4], dtype=tf.float32),\r\n tf.convert_to_tensor(bboxes[:, 4], dtype=tf.float32),\r\n max_output_size=bboxes.shape[0], iou_threshold=nms_thresh)\r\n refind_idx = sess.run(refind_idx)\r\n refined_bboxes = bboxes[refind_idx]\r\n overlay_bounding_boxes(raw_img, refined_bboxes, lw)\r\n if display:\r\n # plt.axis('off')\r\n plt.imshow(raw_img)\r\n plt.show() \r\n return refined_bboxes\r\n\r\n\r\ndef main(frame):\r\n print(\"Searching faces...\")\r\n with tf.Graph().as_default():\r\n faces = evaluate(\r\n weight_file_path= \"weights.pckl\", frame = frame,\r\n prob_thresh=0.7, nms_thresh=0.1, #non max suppression threshold,\r\n lw=2, display= False)\r\n return faces\r\n\r\n"
] | [
[
"tensorflow.convert_to_tensor",
"numpy.hstack",
"matplotlib.pyplot.imshow",
"tensorflow.Graph",
"scipy.special.expit",
"numpy.power",
"numpy.vstack",
"tensorflow.placeholder",
"numpy.ceil",
"tensorflow.global_variables_initializer",
"numpy.max",
"tensorflow.Session",
"numpy.exp",
"matplotlib.pyplot.show",
"numpy.where",
"numpy.empty"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"1.12",
"1.4",
"1.13",
"1.5",
"1.7",
"0.12",
"1.0",
"1.2"
]
}
] |
valanm22/pytorch-lightning | [
"5d190eabd28671a6222741f5dd9ee3f214e519b1",
"5d190eabd28671a6222741f5dd9ee3f214e519b1"
] | [
"pytorch_lightning/trainer/trainer.py",
"tests/deprecated_api/test_remove_1-8.py"
] | [
"# Copyright The PyTorch Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Trainer to automate the training.\"\"\"\nimport inspect\nimport logging\nimport math\nimport os\nimport traceback\nimport warnings\nfrom argparse import ArgumentParser, Namespace\nfrom copy import deepcopy\nfrom datetime import timedelta\nfrom pathlib import Path\nfrom typing import Any, Callable, cast, Dict, Iterable, List, Optional, Tuple, Type, Union\nfrom weakref import proxy\n\nimport torch\nfrom packaging.version import Version\nfrom torch.optim import Optimizer\nfrom torch.utils.data import DataLoader\n\nimport pytorch_lightning as pl\nfrom pytorch_lightning.accelerators import Accelerator, GPUAccelerator, IPUAccelerator, TPUAccelerator\nfrom pytorch_lightning.callbacks import Callback, EarlyStopping, ModelCheckpoint, ProgressBarBase\nfrom pytorch_lightning.callbacks.prediction_writer import BasePredictionWriter\nfrom pytorch_lightning.core.datamodule import LightningDataModule\nfrom pytorch_lightning.core.optimizer import LightningOptimizer\nfrom pytorch_lightning.loggers import LightningLoggerBase\nfrom pytorch_lightning.loggers.base import DummyLogger, LoggerCollection\nfrom pytorch_lightning.loggers.tensorboard import TensorBoardLogger\nfrom pytorch_lightning.loops import PredictionLoop, TrainingEpochLoop\nfrom pytorch_lightning.loops.dataloader.evaluation_loop import EvaluationLoop\nfrom pytorch_lightning.loops.fit_loop import FitLoop\nfrom pytorch_lightning.loops.utilities import _parse_loop_limits, _reset_progress\nfrom pytorch_lightning.plugins import (\n ApexMixedPrecisionPlugin,\n NativeMixedPrecisionPlugin,\n PLUGIN_INPUT,\n PrecisionPlugin,\n)\nfrom pytorch_lightning.plugins.environments.slurm_environment import SLURMEnvironment\nfrom pytorch_lightning.profiler import (\n AdvancedProfiler,\n BaseProfiler,\n PassThroughProfiler,\n PyTorchProfiler,\n SimpleProfiler,\n XLAProfiler,\n)\nfrom pytorch_lightning.strategies import ParallelStrategy, Strategy\nfrom pytorch_lightning.strategies.ddp_spawn import DDPSpawnStrategy\nfrom pytorch_lightning.trainer.callback_hook import TrainerCallbackHookMixin\nfrom pytorch_lightning.trainer.configuration_validator import verify_loop_configurations\nfrom pytorch_lightning.trainer.connectors.accelerator_connector import AcceleratorConnector\nfrom pytorch_lightning.trainer.connectors.callback_connector import CallbackConnector\nfrom pytorch_lightning.trainer.connectors.checkpoint_connector import CheckpointConnector\nfrom pytorch_lightning.trainer.connectors.data_connector import DataConnector\nfrom pytorch_lightning.trainer.connectors.logger_connector import LoggerConnector\nfrom pytorch_lightning.trainer.connectors.logger_connector.result import _ResultCollection\nfrom pytorch_lightning.trainer.connectors.signal_connector import SignalConnector\nfrom pytorch_lightning.trainer.data_loading import TrainerDataLoadingMixin\nfrom pytorch_lightning.trainer.optimizers import TrainerOptimizersMixin\nfrom pytorch_lightning.trainer.states import RunningStage, TrainerFn, TrainerState, TrainerStatus\nfrom pytorch_lightning.trainer.supporters import CombinedLoader\nfrom pytorch_lightning.tuner.lr_finder import _LRFinder\nfrom pytorch_lightning.tuner.tuning import Tuner\nfrom pytorch_lightning.utilities import (\n _IPU_AVAILABLE,\n _TPU_AVAILABLE,\n AMPType,\n device_parser,\n GradClipAlgorithmType,\n parsing,\n)\nfrom pytorch_lightning.utilities.apply_func import apply_to_collection\nfrom pytorch_lightning.utilities.argparse import (\n _defaults_from_env_vars,\n add_argparse_args,\n from_argparse_args,\n parse_argparser,\n parse_env_variables,\n)\nfrom pytorch_lightning.utilities.auto_restart import _add_capture_metadata_collate\nfrom pytorch_lightning.utilities.cloud_io import get_filesystem\nfrom pytorch_lightning.utilities.data import _auto_add_worker_init_fn, has_len_all_ranks\nfrom pytorch_lightning.utilities.distributed import distributed_available\nfrom pytorch_lightning.utilities.exceptions import ExitGracefullyException, MisconfigurationException\nfrom pytorch_lightning.utilities.imports import _fault_tolerant_training\nfrom pytorch_lightning.utilities.meta import is_on_meta_device, materialize_module\nfrom pytorch_lightning.utilities.model_helpers import is_overridden\nfrom pytorch_lightning.utilities.rank_zero import rank_zero_deprecation, rank_zero_info, rank_zero_warn\nfrom pytorch_lightning.utilities.seed import isolate_rng\nfrom pytorch_lightning.utilities.signature_utils import is_param_in_hook_signature\nfrom pytorch_lightning.utilities.types import (\n _EVALUATE_OUTPUT,\n _PATH,\n _PREDICT_OUTPUT,\n EVAL_DATALOADERS,\n LRSchedulerConfig,\n STEP_OUTPUT,\n TRAIN_DATALOADERS,\n)\nfrom pytorch_lightning.utilities.warnings import PossibleUserWarning\n\nlog = logging.getLogger(__name__)\n# warnings to ignore in trainer\nwarnings.filterwarnings(\n \"ignore\", message=\"torch.distributed.reduce_op is deprecated, please use torch.distributed.ReduceOp instead\"\n)\n\n\nclass Trainer(\n TrainerCallbackHookMixin, # TODO: Remove in v1.8\n TrainerOptimizersMixin, # TODO: Remove in v1.8\n TrainerDataLoadingMixin, # TODO: Remove in v1.8\n):\n @_defaults_from_env_vars\n def __init__(\n self,\n logger: Union[LightningLoggerBase, Iterable[LightningLoggerBase], bool] = True,\n checkpoint_callback: Optional[bool] = None,\n enable_checkpointing: bool = True,\n callbacks: Optional[Union[List[Callback], Callback]] = None,\n default_root_dir: Optional[str] = None,\n gradient_clip_val: Optional[Union[int, float]] = None,\n gradient_clip_algorithm: Optional[str] = None,\n process_position: int = 0,\n num_nodes: int = 1,\n num_processes: Optional[int] = None,\n devices: Optional[Union[List[int], str, int]] = None,\n gpus: Optional[Union[List[int], str, int]] = None,\n auto_select_gpus: bool = False,\n tpu_cores: Optional[Union[List[int], str, int]] = None,\n ipus: Optional[int] = None,\n log_gpu_memory: Optional[str] = None, # TODO: Remove in 1.7\n progress_bar_refresh_rate: Optional[int] = None, # TODO: remove in v1.7\n enable_progress_bar: bool = True,\n overfit_batches: Union[int, float] = 0.0,\n track_grad_norm: Union[int, float, str] = -1,\n check_val_every_n_epoch: int = 1,\n fast_dev_run: Union[int, bool] = False,\n accumulate_grad_batches: Optional[Union[int, Dict[int, int]]] = None,\n max_epochs: Optional[int] = None,\n min_epochs: Optional[int] = None,\n max_steps: int = -1,\n min_steps: Optional[int] = None,\n max_time: Optional[Union[str, timedelta, Dict[str, int]]] = None,\n limit_train_batches: Optional[Union[int, float]] = None,\n limit_val_batches: Optional[Union[int, float]] = None,\n limit_test_batches: Optional[Union[int, float]] = None,\n limit_predict_batches: Optional[Union[int, float]] = None,\n val_check_interval: Optional[Union[int, float]] = None,\n flush_logs_every_n_steps: Optional[int] = None,\n log_every_n_steps: int = 50,\n accelerator: Optional[Union[str, Accelerator]] = None,\n strategy: Optional[Union[str, Strategy]] = None,\n sync_batchnorm: bool = False,\n precision: Union[int, str] = 32,\n enable_model_summary: bool = True,\n weights_summary: Optional[str] = \"top\",\n weights_save_path: Optional[str] = None, # TODO: Remove in 1.8\n num_sanity_val_steps: int = 2,\n resume_from_checkpoint: Optional[Union[Path, str]] = None,\n profiler: Optional[Union[BaseProfiler, str]] = None,\n benchmark: Optional[bool] = None,\n deterministic: bool = False,\n reload_dataloaders_every_n_epochs: int = 0,\n auto_lr_find: Union[bool, str] = False,\n replace_sampler_ddp: bool = True,\n detect_anomaly: bool = False,\n auto_scale_batch_size: Union[str, bool] = False,\n prepare_data_per_node: Optional[bool] = None,\n plugins: Optional[Union[PLUGIN_INPUT, List[PLUGIN_INPUT]]] = None,\n amp_backend: str = \"native\",\n amp_level: Optional[str] = None,\n move_metrics_to_cpu: bool = False,\n multiple_trainloader_mode: str = \"max_size_cycle\",\n stochastic_weight_avg: bool = False,\n terminate_on_nan: Optional[bool] = None,\n ) -> None:\n r\"\"\"\n Customize every aspect of training via flags.\n\n Args:\n\n accelerator: Supports passing different accelerator types (\"cpu\", \"gpu\", \"tpu\", \"ipu\", \"auto\")\n as well as custom accelerator instances.\n\n .. deprecated:: v1.5\n Passing training strategies (e.g., 'ddp') to ``accelerator`` has been deprecated in v1.5.0\n and will be removed in v1.7.0. Please use the ``strategy`` argument instead.\n\n accumulate_grad_batches: Accumulates grads every k batches or as set up in the dict.\n Default: ``None``.\n\n amp_backend: The mixed precision backend to use (\"native\" or \"apex\").\n Default: ``'native''``.\n\n amp_level: The optimization level to use (O1, O2, etc...). By default it will be set to \"O2\"\n if ``amp_backend`` is set to \"apex\".\n\n auto_lr_find: If set to True, will make trainer.tune() run a learning rate finder,\n trying to optimize initial learning for faster convergence. trainer.tune() method will\n set the suggested learning rate in self.lr or self.learning_rate in the LightningModule.\n To use a different key set a string instead of True with the key name.\n Default: ``False``.\n\n auto_scale_batch_size: If set to True, will `initially` run a batch size\n finder trying to find the largest batch size that fits into memory.\n The result will be stored in self.batch_size in the LightningModule.\n Additionally, can be set to either `power` that estimates the batch size through\n a power search or `binsearch` that estimates the batch size through a binary search.\n Default: ``False``.\n\n auto_select_gpus: If enabled and ``gpus`` is an integer, pick available\n gpus automatically. This is especially useful when\n GPUs are configured to be in \"exclusive mode\", such\n that only one process at a time can access them.\n Default: ``False``.\n\n benchmark: Sets ``torch.backends.cudnn.benchmark``.\n Defaults to ``True`` if :paramref:`~pytorch_lightning.trainer.trainer.Trainer.deterministic`\n is ``False``. Overwrite to manually set a different value. Default: ``None``.\n\n callbacks: Add a callback or list of callbacks.\n Default: ``None``.\n\n checkpoint_callback: If ``True``, enable checkpointing.\n Default: ``None``.\n\n .. deprecated:: v1.5\n ``checkpoint_callback`` has been deprecated in v1.5 and will be removed in v1.7.\n Please consider using ``enable_checkpointing`` instead.\n\n enable_checkpointing: If ``True``, enable checkpointing.\n It will configure a default ModelCheckpoint callback if there is no user-defined ModelCheckpoint in\n :paramref:`~pytorch_lightning.trainer.trainer.Trainer.callbacks`.\n Default: ``True``.\n\n check_val_every_n_epoch: Check val every n train epochs.\n Default: ``1``.\n\n\n default_root_dir: Default path for logs and weights when no logger/ckpt_callback passed.\n Default: ``os.getcwd()``.\n Can be remote file paths such as `s3://mybucket/path` or 'hdfs://path/'\n\n detect_anomaly: Enable anomaly detection for the autograd engine.\n Default: ``False``.\n\n deterministic: If ``True``, sets whether PyTorch operations must use deterministic algorithms.\n Default: ``False``.\n\n devices: Will be mapped to either `gpus`, `tpu_cores`, `num_processes` or `ipus`,\n based on the accelerator type.\n\n fast_dev_run: Runs n if set to ``n`` (int) else 1 if set to ``True`` batch(es)\n of train, val and test to find any bugs (ie: a sort of unit test).\n Default: ``False``.\n\n flush_logs_every_n_steps: How often to flush logs to disk (defaults to every 100 steps).\n\n .. deprecated:: v1.5\n ``flush_logs_every_n_steps`` has been deprecated in v1.5 and will be removed in v1.7.\n Please configure flushing directly in the logger instead.\n\n gpus: Number of GPUs to train on (int) or which GPUs to train on (list or str) applied per node\n Default: ``None``.\n\n gradient_clip_val: The value at which to clip gradients. Passing ``gradient_clip_val=None`` disables\n gradient clipping. If using Automatic Mixed Precision (AMP), the gradients will be unscaled before.\n Default: ``None``.\n\n gradient_clip_algorithm: The gradient clipping algorithm to use. Pass ``gradient_clip_algorithm=\"value\"``\n to clip by value, and ``gradient_clip_algorithm=\"norm\"`` to clip by norm. By default it will\n be set to ``\"norm\"``.\n\n limit_train_batches: How much of training dataset to check (float = fraction, int = num_batches).\n Default: ``1.0``.\n\n limit_val_batches: How much of validation dataset to check (float = fraction, int = num_batches).\n Default: ``1.0``.\n\n limit_test_batches: How much of test dataset to check (float = fraction, int = num_batches).\n Default: ``1.0``.\n\n limit_predict_batches: How much of prediction dataset to check (float = fraction, int = num_batches).\n Default: ``1.0``.\n\n logger: Logger (or iterable collection of loggers) for experiment tracking. A ``True`` value uses\n the default ``TensorBoardLogger``. ``False`` will disable logging. If multiple loggers are\n provided and the `save_dir` property of that logger is not set, local files (checkpoints,\n profiler traces, etc.) are saved in ``default_root_dir`` rather than in the ``log_dir`` of any\n of the individual loggers.\n Default: ``True``.\n\n log_gpu_memory: None, 'min_max', 'all'. Might slow performance.\n\n .. deprecated:: v1.5\n Deprecated in v1.5.0 and will be removed in v1.7.0\n Please use the ``DeviceStatsMonitor`` callback directly instead.\n\n log_every_n_steps: How often to log within steps.\n Default: ``50``.\n\n prepare_data_per_node: If True, each LOCAL_RANK=0 will call prepare data.\n Otherwise only NODE_RANK=0, LOCAL_RANK=0 will prepare data\n\n .. deprecated:: v1.5\n Deprecated in v1.5.0 and will be removed in v1.7.0\n Please set ``prepare_data_per_node`` in ``LightningDataModule`` and/or\n ``LightningModule`` directly instead.\n\n process_position: Orders the progress bar when running multiple models on same machine.\n\n .. deprecated:: v1.5\n ``process_position`` has been deprecated in v1.5 and will be removed in v1.7.\n Please pass :class:`~pytorch_lightning.callbacks.progress.TQDMProgressBar` with ``process_position``\n directly to the Trainer's ``callbacks`` argument instead.\n\n progress_bar_refresh_rate: How often to refresh progress bar (in steps). Value ``0`` disables progress bar.\n Ignored when a custom progress bar is passed to :paramref:`~Trainer.callbacks`. Default: None, means\n a suitable value will be chosen based on the environment (terminal, Google COLAB, etc.).\n\n .. deprecated:: v1.5\n ``progress_bar_refresh_rate`` has been deprecated in v1.5 and will be removed in v1.7.\n Please pass :class:`~pytorch_lightning.callbacks.progress.TQDMProgressBar` with ``refresh_rate``\n directly to the Trainer's ``callbacks`` argument instead. To disable the progress bar,\n pass ``enable_progress_bar = False`` to the Trainer.\n\n enable_progress_bar: Whether to enable to progress bar by default.\n Default: ``False``.\n\n profiler: To profile individual steps during training and assist in identifying bottlenecks.\n Default: ``None``.\n\n overfit_batches: Overfit a fraction of training data (float) or a set number of batches (int).\n Default: ``0.0``.\n\n plugins: Plugins allow modification of core behavior like ddp and amp, and enable custom lightning plugins.\n Default: ``None``.\n\n precision: Double precision (64), full precision (32), half precision (16) or bfloat16 precision (bf16).\n Can be used on CPU, GPU, TPUs or IPUs.\n Default: ``32``.\n\n max_epochs: Stop training once this number of epochs is reached. Disabled by default (None).\n If both max_epochs and max_steps are not specified, defaults to ``max_epochs = 1000``.\n To enable infinite training, set ``max_epochs = -1``.\n\n min_epochs: Force training for at least these many epochs. Disabled by default (None).\n\n max_steps: Stop training after this number of steps. Disabled by default (-1). If ``max_steps = -1``\n and ``max_epochs = None``, will default to ``max_epochs = 1000``. To enable infinite training, set\n ``max_epochs`` to ``-1``.\n\n min_steps: Force training for at least these number of steps. Disabled by default (``None``).\n\n max_time: Stop training after this amount of time has passed. Disabled by default (``None``).\n The time duration can be specified in the format DD:HH:MM:SS (days, hours, minutes seconds), as a\n :class:`datetime.timedelta`, or a dictionary with keys that will be passed to\n :class:`datetime.timedelta`.\n\n num_nodes: Number of GPU nodes for distributed training.\n Default: ``1``.\n\n num_processes: Number of processes for distributed training with ``accelerator=\"cpu\"``.\n Default: ``1``.\n\n num_sanity_val_steps: Sanity check runs n validation batches before starting the training routine.\n Set it to `-1` to run all batches in all validation dataloaders.\n Default: ``2``.\n\n reload_dataloaders_every_n_epochs: Set to a non-negative integer to reload dataloaders every n epochs.\n Default: ``0``.\n\n replace_sampler_ddp: Explicitly enables or disables sampler replacement. If not specified this\n will toggled automatically when DDP is used. By default it will add ``shuffle=True`` for\n train sampler and ``shuffle=False`` for val/test sampler. If you want to customize it,\n you can set ``replace_sampler_ddp=False`` and add your own distributed sampler.\n\n resume_from_checkpoint: Path/URL of the checkpoint from which training is resumed. If there is\n no checkpoint file at the path, an exception is raised. If resuming from mid-epoch checkpoint,\n training will start from the beginning of the next epoch.\n\n .. deprecated:: v1.5\n ``resume_from_checkpoint`` is deprecated in v1.5 and will be removed in v2.0.\n Please pass the path to ``Trainer.fit(..., ckpt_path=...)`` instead.\n\n strategy: Supports different training strategies with aliases\n as well custom training type plugins.\n Default: ``None``.\n\n sync_batchnorm: Synchronize batch norm layers between process groups/whole world.\n Default: ``False``.\n\n terminate_on_nan: If set to True, will terminate training (by raising a `ValueError`) at the\n end of each training batch, if any of the parameters or the loss are NaN or +/-inf.\n\n .. deprecated:: v1.5\n Trainer argument ``terminate_on_nan`` was deprecated in v1.5 and will be removed in 1.7.\n Please use ``detect_anomaly`` instead.\n\n detect_anomaly: Enable anomaly detection for the autograd engine.\n Default: ``False``.\n\n tpu_cores: How many TPU cores to train on (1 or 8) / Single TPU to train on (1)\n Default: ``None``.\n\n ipus: How many IPUs to train on.\n Default: ``None``.\n\n track_grad_norm: -1 no tracking. Otherwise tracks that p-norm. May be set to 'inf' infinity-norm. If using\n Automatic Mixed Precision (AMP), the gradients will be unscaled before logging them.\n Default: ``-1``.\n\n val_check_interval: How often to check the validation set. Pass a ``float`` in the range [0.0, 1.0] to check\n after a fraction of the training epoch. Pass an ``int`` to check after a fixed number of training\n batches.\n Default: ``1.0``.\n\n enable_model_summary: Whether to enable model summarization by default.\n Default: ``True``.\n\n weights_summary: Prints a summary of the weights when training begins.\n\n .. deprecated:: v1.5\n ``weights_summary`` has been deprecated in v1.5 and will be removed in v1.7.\n To disable the summary, pass ``enable_model_summary = False`` to the Trainer.\n To customize the summary, pass :class:`~pytorch_lightning.callbacks.model_summary.ModelSummary`\n directly to the Trainer's ``callbacks`` argument.\n\n weights_save_path: Where to save weights if specified. Will override default_root_dir\n for checkpoints only. Use this if for whatever reason you need the checkpoints\n stored in a different place than the logs written in `default_root_dir`.\n Can be remote file paths such as `s3://mybucket/path` or 'hdfs://path/'\n Defaults to `default_root_dir`.\n\n .. deprecated:: v1.6\n ``weights_save_path`` has been deprecated in v1.6 and will be removed in v1.8. Please pass\n ``dirpath`` directly to the :class:`~pytorch_lightning.callbacks.model_checkpoint.ModelCheckpoint`\n callback.\n\n move_metrics_to_cpu: Whether to force internal logged metrics to be moved to cpu.\n This can save some gpu memory, but can make training slower. Use with attention.\n Default: ``False``.\n\n multiple_trainloader_mode: How to loop over the datasets when there are multiple train loaders.\n In 'max_size_cycle' mode, the trainer ends one epoch when the largest dataset is traversed,\n and smaller datasets reload when running out of their data. In 'min_size' mode, all the datasets\n reload when reaching the minimum length of datasets.\n Default: ``\"max_size_cycle\"``.\n\n stochastic_weight_avg: Whether to use `Stochastic Weight Averaging (SWA)\n <https://pytorch.org/blog/pytorch-1.6-now-includes-stochastic-weight-averaging/>`_.\n Default: ``False``.\n\n .. deprecated:: v1.5\n ``stochastic_weight_avg`` has been deprecated in v1.5 and will be removed in v1.7.\n Please pass :class:`~pytorch_lightning.callbacks.stochastic_weight_avg.StochasticWeightAveraging`\n directly to the Trainer's ``callbacks`` argument instead.\n \"\"\"\n super().__init__()\n Trainer._log_api_event(\"init\")\n log.detail(f\"{self.__class__.__name__}: Initializing trainer with parameters: {locals()}\")\n self.state = TrainerState()\n\n gpu_ids, tpu_cores = self._parse_devices(gpus, auto_select_gpus, tpu_cores)\n\n # init connectors\n self._data_connector = DataConnector(self, multiple_trainloader_mode)\n\n self._accelerator_connector = AcceleratorConnector(\n num_processes=num_processes,\n devices=devices,\n tpu_cores=tpu_cores,\n ipus=ipus,\n accelerator=accelerator,\n strategy=strategy,\n gpus=gpus,\n gpu_ids=gpu_ids,\n num_nodes=num_nodes,\n sync_batchnorm=sync_batchnorm,\n benchmark=benchmark,\n replace_sampler_ddp=replace_sampler_ddp,\n deterministic=deterministic,\n precision=precision,\n amp_type=amp_backend,\n amp_level=amp_level,\n plugins=plugins,\n )\n self._logger_connector = LoggerConnector(self, log_gpu_memory)\n self._callback_connector = CallbackConnector(self)\n self._checkpoint_connector = CheckpointConnector(self, resume_from_checkpoint)\n self._signal_connector = SignalConnector(self)\n self.tuner = Tuner(self)\n\n min_steps, max_steps, min_epochs, max_epochs, max_time = _parse_loop_limits(\n min_steps, max_steps, min_epochs, max_epochs, max_time\n )\n fit_loop = FitLoop(min_epochs=min_epochs, max_epochs=max_epochs)\n training_epoch_loop = TrainingEpochLoop(min_steps=min_steps, max_steps=max_steps)\n fit_loop.connect(epoch_loop=training_epoch_loop)\n\n # default .fit() loop\n self.fit_loop = fit_loop\n\n # default .validate() loop\n self.validate_loop = EvaluationLoop()\n\n # default .test() loop\n self.test_loop = EvaluationLoop()\n\n # default .predict() loop\n self.predict_loop = PredictionLoop()\n\n # set when a checkpoint is loaded via `Trainer.{fit,validate,test,predict}`.\n self._ckpt_path: Optional[str] = None\n\n # .validate(), predict() and .test() set these when they load a checkpoint. They will be removed in favor of\n # the unified read-only `Trainer.ckpt_path` attribute in v1.8\n self._validated_ckpt_path: Optional[str] = None # TODO: remove in v1.8\n self._tested_ckpt_path: Optional[str] = None # TODO: remove in v1.8\n self._predicted_ckpt_path: Optional[str] = None # TODO: remove in v1.8\n\n # todo: remove in v1.7\n self._weights_summary: Optional[str] = None\n\n # init callbacks\n # Declare attributes to be set in _callback_connector on_trainer_init\n self._callback_connector.on_trainer_init(\n callbacks,\n checkpoint_callback,\n enable_checkpointing,\n enable_progress_bar,\n progress_bar_refresh_rate,\n process_position,\n default_root_dir,\n weights_save_path,\n enable_model_summary,\n weights_summary,\n stochastic_weight_avg,\n max_time,\n accumulate_grad_batches,\n )\n\n # hook\n self._call_callback_hooks(\"on_init_start\")\n\n # init data flags\n self.check_val_every_n_epoch: int\n self._data_connector.on_trainer_init(\n check_val_every_n_epoch,\n reload_dataloaders_every_n_epochs,\n prepare_data_per_node,\n )\n\n if terminate_on_nan is not None:\n rank_zero_deprecation(\n \"Trainer argument `terminate_on_nan` was deprecated in v1.5 and will be removed in 1.7.\"\n \" Please use `Trainer(detect_anomaly=True)` instead.\"\n )\n if not isinstance(terminate_on_nan, bool):\n raise TypeError(f\"`terminate_on_nan` should be a bool, got {terminate_on_nan}.\")\n\n # gradient clipping\n if gradient_clip_val is not None and not isinstance(gradient_clip_val, (int, float)):\n raise TypeError(f\"`gradient_clip_val` should be an int or a float. Got {gradient_clip_val}.\")\n\n if gradient_clip_algorithm is not None and not GradClipAlgorithmType.supported_type(\n gradient_clip_algorithm.lower()\n ):\n raise MisconfigurationException(\n f\"`gradient_clip_algorithm` {gradient_clip_algorithm} is invalid. \"\n f\"Allowed algorithms: {GradClipAlgorithmType.supported_types()}.\"\n )\n\n # gradient norm tracking\n if track_grad_norm != -1 and not (\n (isinstance(track_grad_norm, (int, float)) or track_grad_norm == \"inf\") and float(track_grad_norm) > 0\n ):\n raise MisconfigurationException(\n f\"`track_grad_norm` must be a positive number or 'inf' (infinity norm). Got {track_grad_norm}.\"\n )\n\n self._terminate_on_nan = terminate_on_nan\n self.gradient_clip_val: Union[int, float] = gradient_clip_val\n self.gradient_clip_algorithm = (\n GradClipAlgorithmType(gradient_clip_algorithm.lower())\n if gradient_clip_algorithm is not None\n else gradient_clip_algorithm\n )\n self.track_grad_norm: float = float(track_grad_norm)\n\n self._detect_anomaly: bool = detect_anomaly\n self._setup_on_init(num_sanity_val_steps)\n\n # configure tuner\n self.tuner.on_trainer_init(auto_lr_find, auto_scale_batch_size)\n\n # configure profiler\n self.__init_profiler(profiler)\n\n # init logger flags\n self._loggers: List[LightningLoggerBase]\n self._logger_connector.on_trainer_init(logger, flush_logs_every_n_steps, log_every_n_steps, move_metrics_to_cpu)\n\n # init debugging flags\n self.val_check_interval: Union[int, float]\n self._init_debugging_flags(\n limit_train_batches,\n limit_val_batches,\n limit_test_batches,\n limit_predict_batches,\n val_check_interval,\n overfit_batches,\n fast_dev_run,\n )\n\n # Callback system\n self._call_callback_hooks(\"on_init_end\")\n\n def _init_debugging_flags(\n self,\n limit_train_batches: Optional[Union[int, float]],\n limit_val_batches: Optional[Union[int, float]],\n limit_test_batches: Optional[Union[int, float]],\n limit_predict_batches: Optional[Union[int, float]],\n val_check_interval: Optional[Union[int, float]],\n overfit_batches: Union[int, float],\n fast_dev_run: Union[int, bool],\n ) -> None:\n if isinstance(fast_dev_run, int) and (fast_dev_run < 0):\n raise MisconfigurationException(\n f\"fast_dev_run={fast_dev_run} is not a valid configuration. It should be >= 0.\"\n )\n\n self.fast_dev_run = fast_dev_run\n\n # set fast_dev_run=True when it is 1, used while logging\n if fast_dev_run == 1:\n self.fast_dev_run = True\n\n if fast_dev_run:\n num_batches = int(fast_dev_run)\n limit_train_batches = num_batches\n limit_val_batches = num_batches\n limit_test_batches = num_batches\n limit_predict_batches = num_batches\n self.fit_loop.max_steps = num_batches\n self.num_sanity_val_steps = 0\n self.fit_loop.max_epochs = 1\n val_check_interval = 1.0\n self.check_val_every_n_epoch = 1\n self.loggers = [DummyLogger()] if self.loggers else []\n\n rank_zero_info(\n \"Running in fast_dev_run mode: will run a full train,\"\n f\" val, test and prediction loop using {num_batches} batch(es).\"\n )\n\n self.limit_train_batches = _determine_batch_limits(limit_train_batches, \"limit_train_batches\")\n self.limit_val_batches = _determine_batch_limits(limit_val_batches, \"limit_val_batches\")\n self.limit_test_batches = _determine_batch_limits(limit_test_batches, \"limit_test_batches\")\n self.limit_predict_batches = _determine_batch_limits(limit_predict_batches, \"limit_predict_batches\")\n self.val_check_interval = _determine_batch_limits(val_check_interval, \"val_check_interval\")\n self.overfit_batches = _determine_batch_limits(overfit_batches, \"overfit_batches\")\n self._determine_data_use_amount(self.overfit_batches)\n\n def _determine_data_use_amount(self, overfit_batches: float) -> None:\n \"\"\"Use less data for debugging purposes.\"\"\"\n if overfit_batches > 0:\n self.limit_train_batches = overfit_batches\n self.limit_val_batches = 0\n\n def _setup_on_init(self, num_sanity_val_steps: int) -> None:\n self._log_device_info()\n\n self.should_stop = False\n self.state = TrainerState()\n self.num_training_batches = float(\"inf\")\n self.train_dataloader = None\n\n if num_sanity_val_steps == -1:\n self.num_sanity_val_steps = float(\"inf\")\n else:\n self.num_sanity_val_steps = num_sanity_val_steps\n\n self.num_sanity_val_batches = []\n self.num_test_batches = []\n self.num_val_batches = []\n self.test_dataloaders = None\n self.val_dataloaders = None\n self._last_train_dl_reload_epoch = float(\"-inf\")\n self._last_val_dl_reload_epoch = float(\"-inf\")\n\n self.num_predict_batches = []\n\n def _call_and_handle_interrupt(self, trainer_fn: Callable, *args: Any, **kwargs: Any) -> Any:\n r\"\"\"\n Error handling, intended to be used only for main trainer function entry points (fit, validate, test, predict)\n as all errors should funnel through them\n\n Args:\n trainer_fn: one of (fit, validate, test, predict)\n *args: positional arguments to be passed to the `trainer_fn`\n **kwargs: keyword arguments to be passed to `trainer_fn`\n \"\"\"\n try:\n if self.strategy.launcher is not None:\n return self.strategy.launcher.launch(trainer_fn, *args, trainer=self, **kwargs)\n else:\n return trainer_fn(*args, **kwargs)\n # TODO: treat KeyboardInterrupt as BaseException (delete the code below) in v1.7\n except KeyboardInterrupt as exception:\n rank_zero_warn(\"Detected KeyboardInterrupt, attempting graceful shutdown...\")\n # user could press Ctrl+c many times... only shutdown once\n if not self.interrupted:\n self.state.status = TrainerStatus.INTERRUPTED\n self._call_callback_hooks(\"on_keyboard_interrupt\")\n self._call_callback_hooks(\"on_exception\", exception)\n except BaseException as exception:\n self.state.status = TrainerStatus.INTERRUPTED\n if distributed_available() and self.world_size > 1:\n # try syncing remaining processes, kill otherwise\n self.strategy.reconciliate_processes(traceback.format_exc())\n self._call_callback_hooks(\"on_exception\", exception)\n self._teardown()\n # teardown might access the stage so we reset it after\n self.state.stage = None\n raise\n\n def fit(\n self,\n model: \"pl.LightningModule\",\n train_dataloaders: Optional[Union[TRAIN_DATALOADERS, LightningDataModule]] = None,\n val_dataloaders: Optional[EVAL_DATALOADERS] = None,\n datamodule: Optional[LightningDataModule] = None,\n ckpt_path: Optional[str] = None,\n ) -> None:\n r\"\"\"\n Runs the full optimization routine.\n\n Args:\n model: Model to fit.\n\n train_dataloaders: A collection of :class:`torch.utils.data.DataLoader` or a\n :class:`~pytorch_lightning.core.datamodule.LightningDataModule` specifying training samples.\n In the case of multiple dataloaders, please see this :ref:`section <multiple-dataloaders>`.\n\n val_dataloaders: A :class:`torch.utils.data.DataLoader` or a sequence of them specifying validation samples.\n\n ckpt_path: Path/URL of the checkpoint from which training is resumed. If there is\n no checkpoint file at the path, an exception is raised. If resuming from mid-epoch checkpoint,\n training will start from the beginning of the next epoch.\n\n datamodule: An instance of :class:`~pytorch_lightning.core.datamodule.LightningDataModule`.\n \"\"\"\n self.strategy.model = model\n self._call_and_handle_interrupt(\n self._fit_impl, model, train_dataloaders, val_dataloaders, datamodule, ckpt_path\n )\n\n def _fit_impl(\n self,\n model: \"pl.LightningModule\",\n train_dataloaders: Optional[Union[TRAIN_DATALOADERS, LightningDataModule]] = None,\n val_dataloaders: Optional[EVAL_DATALOADERS] = None,\n datamodule: Optional[LightningDataModule] = None,\n ckpt_path: Optional[str] = None,\n ) -> None:\n Trainer._log_api_event(\"fit\")\n log.detail(f\"{self.__class__.__name__}: trainer fit stage\")\n\n self.state.fn = TrainerFn.FITTING\n self.state.status = TrainerStatus.RUNNING\n self.training = True\n self._last_train_dl_reload_epoch = float(\"-inf\")\n self._last_val_dl_reload_epoch = float(\"-inf\")\n\n # if a datamodule comes in as the second arg, then fix it for the user\n if isinstance(train_dataloaders, LightningDataModule):\n datamodule = train_dataloaders\n train_dataloaders = None\n # If you supply a datamodule you can't supply train_dataloader or val_dataloaders\n if (train_dataloaders is not None or val_dataloaders is not None) and datamodule is not None:\n raise MisconfigurationException(\n \"You cannot pass `train_dataloader` or `val_dataloaders` to `trainer.fit(datamodule=...)`\"\n )\n\n # links data to the trainer\n self._data_connector.attach_data(\n model, train_dataloaders=train_dataloaders, val_dataloaders=val_dataloaders, datamodule=datamodule\n )\n\n # TODO: ckpt_path only in v2.0\n ckpt_path = ckpt_path or self.resume_from_checkpoint\n self._ckpt_path = self.__set_ckpt_path(\n ckpt_path, model_provided=True, model_connected=self.lightning_module is not None\n )\n results = self._run(model, ckpt_path=self.ckpt_path)\n\n assert self.state.stopped\n self.training = False\n return results\n\n def validate(\n self,\n model: Optional[\"pl.LightningModule\"] = None,\n dataloaders: Optional[Union[EVAL_DATALOADERS, LightningDataModule]] = None,\n ckpt_path: Optional[str] = None,\n verbose: bool = True,\n datamodule: Optional[LightningDataModule] = None,\n ) -> _EVALUATE_OUTPUT:\n r\"\"\"\n Perform one evaluation epoch over the validation set.\n\n Args:\n model: The model to validate.\n\n dataloaders: A :class:`torch.utils.data.DataLoader` or a sequence of them,\n or a :class:`~pytorch_lightning.core.datamodule.LightningDataModule` specifying validation samples.\n\n ckpt_path: Either ``best`` or path to the checkpoint you wish to validate.\n If ``None`` and the model instance was passed, use the current weights.\n Otherwise, the best model checkpoint from the previous ``trainer.fit`` call will be loaded\n if a checkpoint callback is configured.\n\n verbose: If True, prints the validation results.\n\n datamodule: An instance of :class:`~pytorch_lightning.core.datamodule.LightningDataModule`.\n\n Returns:\n List of dictionaries with metrics logged during the validation phase, e.g., in model- or callback hooks\n like :meth:`~pytorch_lightning.core.lightning.LightningModule.validation_step`,\n :meth:`~pytorch_lightning.core.lightning.LightningModule.validation_epoch_end`, etc.\n The length of the list corresponds to the number of validation dataloaders used.\n \"\"\"\n self.strategy.model = model or self.lightning_module\n return self._call_and_handle_interrupt(self._validate_impl, model, dataloaders, ckpt_path, verbose, datamodule)\n\n def _validate_impl(\n self,\n model: Optional[\"pl.LightningModule\"] = None,\n dataloaders: Optional[Union[EVAL_DATALOADERS, LightningDataModule]] = None,\n ckpt_path: Optional[str] = None,\n verbose: bool = True,\n datamodule: Optional[LightningDataModule] = None,\n ) -> _EVALUATE_OUTPUT:\n # --------------------\n # SETUP HOOK\n # --------------------\n Trainer._log_api_event(\"validate\")\n log.detail(f\"{self.__class__.__name__}: trainer validate stage\")\n\n self.state.fn = TrainerFn.VALIDATING\n self.state.status = TrainerStatus.RUNNING\n self.validating = True\n\n # if a datamodule comes in as the second arg, then fix it for the user\n if isinstance(dataloaders, LightningDataModule):\n datamodule = dataloaders\n dataloaders = None\n # If you supply a datamodule you can't supply val_dataloaders\n if dataloaders is not None and datamodule:\n raise MisconfigurationException(\"You cannot pass both `trainer.validate(dataloaders=..., datamodule=...)`\")\n\n model_provided = model is not None\n model = model or self.lightning_module\n if model is None:\n raise MisconfigurationException(\n \"`model` must be provided to `trainer.validate()` when it hasn't been passed in a previous run\"\n )\n\n self.validate_loop.verbose = verbose\n\n # links data to the trainer\n self._data_connector.attach_data(model, val_dataloaders=dataloaders, datamodule=datamodule)\n\n self._ckpt_path = self.__set_ckpt_path(\n ckpt_path, model_provided=model_provided, model_connected=self.lightning_module is not None\n )\n\n self._validated_ckpt_path = self.ckpt_path # TODO: remove in v1.8\n\n # run validate\n results = self._run(model, ckpt_path=self.ckpt_path)\n\n assert self.state.stopped\n self.validating = False\n\n return results\n\n def test(\n self,\n model: Optional[\"pl.LightningModule\"] = None,\n dataloaders: Optional[Union[EVAL_DATALOADERS, LightningDataModule]] = None,\n ckpt_path: Optional[str] = None,\n verbose: bool = True,\n datamodule: Optional[LightningDataModule] = None,\n ) -> _EVALUATE_OUTPUT:\n r\"\"\"\n Perform one evaluation epoch over the test set.\n It's separated from fit to make sure you never run on your test set until you want to.\n\n Args:\n model: The model to test.\n\n dataloaders: A :class:`torch.utils.data.DataLoader` or a sequence of them,\n or a :class:`~pytorch_lightning.core.datamodule.LightningDataModule` specifying test samples.\n\n ckpt_path: Either ``best`` or path to the checkpoint you wish to test.\n If ``None`` and the model instance was passed, use the current weights.\n Otherwise, the best model checkpoint from the previous ``trainer.fit`` call will be loaded\n if a checkpoint callback is configured.\n\n verbose: If True, prints the test results.\n\n datamodule: An instance of :class:`~pytorch_lightning.core.datamodule.LightningDataModule`.\n\n Returns:\n List of dictionaries with metrics logged during the test phase, e.g., in model- or callback hooks\n like :meth:`~pytorch_lightning.core.lightning.LightningModule.test_step`,\n :meth:`~pytorch_lightning.core.lightning.LightningModule.test_epoch_end`, etc.\n The length of the list corresponds to the number of test dataloaders used.\n \"\"\"\n self.strategy.model = model or self.lightning_module\n return self._call_and_handle_interrupt(self._test_impl, model, dataloaders, ckpt_path, verbose, datamodule)\n\n def _test_impl(\n self,\n model: Optional[\"pl.LightningModule\"] = None,\n dataloaders: Optional[Union[EVAL_DATALOADERS, LightningDataModule]] = None,\n ckpt_path: Optional[str] = None,\n verbose: bool = True,\n datamodule: Optional[LightningDataModule] = None,\n ) -> _EVALUATE_OUTPUT:\n # --------------------\n # SETUP HOOK\n # --------------------\n Trainer._log_api_event(\"test\")\n log.detail(f\"{self.__class__.__name__}: trainer test stage\")\n\n self.state.fn = TrainerFn.TESTING\n self.state.status = TrainerStatus.RUNNING\n self.testing = True\n\n # if a datamodule comes in as the second arg, then fix it for the user\n if isinstance(dataloaders, LightningDataModule):\n datamodule = dataloaders\n dataloaders = None\n # If you supply a datamodule you can't supply test_dataloaders\n if dataloaders is not None and datamodule:\n raise MisconfigurationException(\"You cannot pass both `trainer.test(dataloaders=..., datamodule=...)`\")\n\n model_provided = model is not None\n model = model or self.lightning_module\n if model is None:\n raise MisconfigurationException(\n \"`model` must be provided to `trainer.test()` when it hasn't been passed in a previous run\"\n )\n\n self.test_loop.verbose = verbose\n\n # links data to the trainer\n self._data_connector.attach_data(model, test_dataloaders=dataloaders, datamodule=datamodule)\n\n self._ckpt_path = self.__set_ckpt_path(\n ckpt_path, model_provided=model_provided, model_connected=self.lightning_module is not None\n )\n\n self._tested_ckpt_path = self.ckpt_path # TODO: remove in v1.8\n\n # run test\n results = self._run(model, ckpt_path=self.ckpt_path)\n\n assert self.state.stopped\n self.testing = False\n\n return results\n\n def predict(\n self,\n model: Optional[\"pl.LightningModule\"] = None,\n dataloaders: Optional[Union[EVAL_DATALOADERS, LightningDataModule]] = None,\n datamodule: Optional[LightningDataModule] = None,\n return_predictions: Optional[bool] = None,\n ckpt_path: Optional[str] = None,\n ) -> Optional[_PREDICT_OUTPUT]:\n r\"\"\"\n Run inference on your data.\n This will call the model forward function to compute predictions. Useful to perform distributed\n and batched predictions. Logging is disabled in the predict hooks.\n\n Args:\n model: The model to predict with.\n\n dataloaders: A :class:`torch.utils.data.DataLoader` or a sequence of them,\n or a :class:`~pytorch_lightning.core.datamodule.LightningDataModule` specifying prediction samples.\n\n datamodule: The datamodule with a predict_dataloader method that returns one or more dataloaders.\n\n return_predictions: Whether to return predictions.\n ``True`` by default except when an accelerator that spawns processes is used (not supported).\n\n ckpt_path: Either ``best`` or path to the checkpoint you wish to predict.\n If ``None`` and the model instance was passed, use the current weights.\n Otherwise, the best model checkpoint from the previous ``trainer.fit`` call will be loaded\n if a checkpoint callback is configured.\n\n Returns:\n Returns a list of dictionaries, one for each provided dataloader containing their respective predictions.\n \"\"\"\n self.strategy.model = model or self.lightning_module\n return self._call_and_handle_interrupt(\n self._predict_impl, model, dataloaders, datamodule, return_predictions, ckpt_path\n )\n\n def _predict_impl(\n self,\n model: Optional[\"pl.LightningModule\"] = None,\n dataloaders: Optional[Union[EVAL_DATALOADERS, LightningDataModule]] = None,\n datamodule: Optional[LightningDataModule] = None,\n return_predictions: Optional[bool] = None,\n ckpt_path: Optional[str] = None,\n ) -> Optional[_PREDICT_OUTPUT]:\n # --------------------\n # SETUP HOOK\n # --------------------\n Trainer._log_api_event(\"predict\")\n log.detail(f\"{self.__class__.__name__}: trainer predict stage\")\n\n self.state.fn = TrainerFn.PREDICTING\n self.state.status = TrainerStatus.RUNNING\n self.predicting = True\n\n self.predict_loop.return_predictions = return_predictions\n\n # if a datamodule comes in as the second arg, then fix it for the user\n if isinstance(dataloaders, LightningDataModule):\n datamodule = dataloaders\n dataloaders = None\n if dataloaders is not None and datamodule:\n raise MisconfigurationException(\"You cannot pass both `trainer.predict(dataloaders=..., datamodule=...)`\")\n\n model_provided = model is not None\n model = model or self.lightning_module\n if model is None:\n raise MisconfigurationException(\n \"`model` must be provided to `trainer.predict()` when it hasn't been passed in a previous run\"\n )\n\n # links data to the trainer\n self._data_connector.attach_data(model, predict_dataloaders=dataloaders, datamodule=datamodule)\n\n self._ckpt_path = self.__set_ckpt_path(\n ckpt_path, model_provided=model_provided, model_connected=self.lightning_module is not None\n )\n\n self._predicted_ckpt_path = self.ckpt_path # TODO: remove in v1.8\n\n results = self._run(model, ckpt_path=self.ckpt_path)\n\n assert self.state.stopped\n self.predicting = False\n\n return results\n\n def tune(\n self,\n model: \"pl.LightningModule\",\n train_dataloaders: Optional[Union[TRAIN_DATALOADERS, LightningDataModule]] = None,\n val_dataloaders: Optional[EVAL_DATALOADERS] = None,\n datamodule: Optional[LightningDataModule] = None,\n scale_batch_size_kwargs: Optional[Dict[str, Any]] = None,\n lr_find_kwargs: Optional[Dict[str, Any]] = None,\n ) -> Dict[str, Optional[Union[int, _LRFinder]]]:\n r\"\"\"\n Runs routines to tune hyperparameters before training.\n\n Args:\n model: Model to tune.\n\n train_dataloaders: A collection of :class:`torch.utils.data.DataLoader` or a\n :class:`~pytorch_lightning.core.datamodule.LightningDataModule` specifying training samples.\n In the case of multiple dataloaders, please see this :ref:`section <multiple-dataloaders>`.\n\n val_dataloaders: A :class:`torch.utils.data.DataLoader` or a sequence of them specifying validation samples.\n\n datamodule: An instance of :class:`~pytorch_lightning.core.datamodule.LightningDataModule`.\n\n scale_batch_size_kwargs: Arguments for :func:`~pytorch_lightning.tuner.batch_size_scaling.scale_batch_size`\n\n lr_find_kwargs: Arguments for :func:`~pytorch_lightning.tuner.lr_finder.lr_find`\n \"\"\"\n Trainer._log_api_event(\"tune\")\n\n self.state.fn = TrainerFn.TUNING\n self.state.status = TrainerStatus.RUNNING\n self.tuning = True\n\n # if a datamodule comes in as the second arg, then fix it for the user\n if isinstance(train_dataloaders, LightningDataModule):\n datamodule = train_dataloaders\n train_dataloaders = None\n # If you supply a datamodule you can't supply train_dataloader or val_dataloaders\n if (train_dataloaders is not None or val_dataloaders is not None) and datamodule is not None:\n raise MisconfigurationException(\n \"You cannot pass `train_dataloader` or `val_dataloaders` to `trainer.tune(datamodule=...)`\"\n )\n\n # links data to the trainer\n self._data_connector.attach_data(\n model, train_dataloaders=train_dataloaders, val_dataloaders=val_dataloaders, datamodule=datamodule\n )\n\n with isolate_rng():\n result = self.tuner._tune(\n model, scale_batch_size_kwargs=scale_batch_size_kwargs, lr_find_kwargs=lr_find_kwargs\n )\n\n assert self.state.stopped\n self.tuning = False\n\n return result\n\n def _restore_modules_and_callbacks(self, checkpoint_path: Optional[_PATH] = None) -> None:\n # restore modules after setup\n self._checkpoint_connector.resume_start(checkpoint_path)\n self._checkpoint_connector.restore_model()\n self._checkpoint_connector.restore_datamodule()\n if self.state.fn == TrainerFn.FITTING:\n # restore callback states\n self._checkpoint_connector.restore_callbacks()\n\n def _run(\n self, model: \"pl.LightningModule\", ckpt_path: Optional[str] = None\n ) -> Optional[Union[_EVALUATE_OUTPUT, _PREDICT_OUTPUT]]:\n # clean hparams\n if hasattr(model, \"hparams\"):\n parsing.clean_namespace(model.hparams)\n\n # attach model to the training type plugin\n self.strategy.connect(model)\n\n self._callback_connector._attach_model_callbacks()\n self._callback_connector._attach_model_logging_functions()\n\n verify_loop_configurations(self)\n\n # hook\n log.detail(f\"{self.__class__.__name__}: preparing data\")\n self._data_connector.prepare_data()\n\n # ----------------------------\n # SET UP TRAINING\n # ----------------------------\n self._call_callback_hooks(\"on_before_accelerator_backend_setup\")\n log.detail(f\"{self.__class__.__name__}: setting up strategy environment\")\n self.strategy.setup_environment()\n self.__setup_profiler()\n\n self._call_setup_hook() # allow user to setup lightning_module in accelerator environment\n\n # check if we should delay restoring checkpoint till later\n if not self.strategy.restore_checkpoint_after_setup:\n log.detail(f\"{self.__class__.__name__}: restoring module and callbacks from checkpoint path: {ckpt_path}\")\n self._restore_modules_and_callbacks(ckpt_path)\n\n log.detail(f\"{self.__class__.__name__}: configuring sharded model\")\n self._call_configure_sharded_model() # allow user to setup in model sharded environment\n\n # ----------------------------\n # INSPECT THE CORE LOOPS\n # ----------------------------\n fr\"\"\"\n Lightning internal flow looks like this:\n {Trainer.fit} or {Trainer.test} or {Trainer.predict} ||\n | ||\n spawn processes ||\n {self.strategy.setup_environment} ||\n | ||\n setup accelerator ||\n and strategy || LIGHTNING\n | ||\n {self._run_stage} || FLOW\n | ||\n {self._run_train} || DIRECTION\n or {self._run_evaluate} ||\n or {self._run_predict} ||\n | ||\n results \\/\n This is used to guide readers to the core loops: train, test, predict.\n {self._run_predict} is the simplest to understand, use `Go to Definition` to read it :)\n \"\"\"\n\n # ----------------------------\n # TRAIN\n # ----------------------------\n\n # reset logger connector\n self._logger_connector.reset_results()\n self._logger_connector.reset_metrics()\n\n # strategy will configure model and move it to the device\n self.strategy.setup(self)\n\n # hook\n if self.state.fn == TrainerFn.FITTING:\n self._call_callback_hooks(\"on_fit_start\")\n self._call_lightning_module_hook(\"on_fit_start\")\n\n self._log_hyperparams()\n\n if self.strategy.restore_checkpoint_after_setup:\n log.detail(f\"{self.__class__.__name__}: restoring module and callbacks from checkpoint path: {ckpt_path}\")\n self._restore_modules_and_callbacks(ckpt_path)\n\n # restore optimizers, etc.\n log.detail(f\"{self.__class__.__name__}: restoring training state\")\n self._checkpoint_connector.restore_training_state()\n\n self._checkpoint_connector.resume_end()\n\n results = self._run_stage()\n\n log.detail(f\"{self.__class__.__name__}: trainer tearing down\")\n self._teardown()\n\n # ----------------------------\n # POST-Training CLEAN UP\n # ----------------------------\n # hook\n if self.state.fn == TrainerFn.FITTING:\n self._call_callback_hooks(\"on_fit_end\")\n self._call_lightning_module_hook(\"on_fit_end\")\n\n log.detail(f\"{self.__class__.__name__}: calling teardown hooks\")\n self._call_teardown_hook()\n\n self.state.status = TrainerStatus.FINISHED\n self.state.stage = None\n\n return results\n\n def _log_hyperparams(self) -> None:\n if not self.loggers:\n return\n # log hyper-parameters\n hparams_initial = None\n\n # save exp to get started (this is where the first experiment logs are written)\n datamodule_log_hyperparams = self.datamodule._log_hyperparams if self.datamodule is not None else False\n\n if self.lightning_module._log_hyperparams and datamodule_log_hyperparams:\n datamodule_hparams = self.datamodule.hparams_initial\n lightning_hparams = self.lightning_module.hparams_initial\n inconsistent_keys = []\n for key in lightning_hparams.keys() & datamodule_hparams.keys():\n lm_val, dm_val = lightning_hparams[key], datamodule_hparams[key]\n if type(lm_val) != type(dm_val):\n inconsistent_keys.append(key)\n elif isinstance(lm_val, torch.Tensor) and id(lm_val) != id(dm_val):\n inconsistent_keys.append(key)\n elif lm_val != dm_val:\n inconsistent_keys.append(key)\n if inconsistent_keys:\n raise MisconfigurationException(\n f\"Error while merging hparams: the keys {inconsistent_keys} are present \"\n \"in both the LightningModule's and LightningDataModule's hparams \"\n \"but have different values.\"\n )\n hparams_initial = {**lightning_hparams, **datamodule_hparams}\n elif self.lightning_module._log_hyperparams:\n hparams_initial = self.lightning_module.hparams_initial\n elif datamodule_log_hyperparams:\n hparams_initial = self.datamodule.hparams_initial\n\n for logger in self.loggers:\n if hparams_initial is not None:\n logger.log_hyperparams(hparams_initial)\n logger.log_graph(self.lightning_module)\n logger.save()\n\n def _teardown(self):\n \"\"\"This is the Trainer's internal teardown, unrelated to the `teardown` hooks in LightningModule and\n Callback; those are handled by :meth:`_call_teardown_hook`.\"\"\"\n self.strategy.post_dispatch(self)\n self.strategy.teardown()\n loop = self._active_loop\n # loop should never be `None` here but it can because we don't know the trainer stage with `ddp_spawn`\n if loop is not None:\n loop.teardown()\n self._logger_connector.teardown()\n self._signal_connector.teardown()\n\n def run_stage(self) -> None:\n rank_zero_deprecation(\n \"`Trainer.run_stage` is deprecated in v1.6 and will be removed in v1.8. Use\"\n \" `Trainer.{fit,validate,test,predict}` instead.\"\n )\n return self._run_stage()\n\n def _run_stage(self):\n self.strategy.barrier(\"run-stage\")\n self.strategy.dispatch(self)\n\n if self.evaluating:\n return self._run_evaluate()\n if self.predicting:\n return self._run_predict()\n return self._run_train()\n\n def _pre_training_routine(self):\n # wait for all to join if on distributed\n self.strategy.barrier(\"setup_training\")\n\n # register signals\n self._signal_connector.register_signal_handlers()\n\n # --------------------------\n # Pre-train\n # --------------------------\n self._call_callback_hooks(\"on_pretrain_routine_start\")\n self._call_lightning_module_hook(\"on_pretrain_routine_start\")\n\n self._call_callback_hooks(\"on_pretrain_routine_end\")\n self._call_lightning_module_hook(\"on_pretrain_routine_end\")\n\n def _run_train(self) -> None:\n self._pre_training_routine()\n\n with isolate_rng():\n self._run_sanity_check()\n\n # enable train mode\n self.model.train()\n torch.set_grad_enabled(True)\n\n self.fit_loop.trainer = self\n with torch.autograd.set_detect_anomaly(self._detect_anomaly):\n self.fit_loop.run()\n\n def _run_evaluate(self) -> _EVALUATE_OUTPUT:\n assert self.evaluating\n\n # reload dataloaders\n self._evaluation_loop._reload_evaluation_dataloaders()\n\n # reset trainer on this loop and all child loops in case user connected a custom loop\n self._evaluation_loop.trainer = self\n\n with self.profiler.profile(f\"run_{self.state.stage}_evaluation\"), torch.no_grad():\n eval_loop_results = self._evaluation_loop.run()\n\n # remove the tensors from the eval results\n for result in eval_loop_results:\n if isinstance(result, dict):\n for k, v in result.items():\n if isinstance(v, torch.Tensor):\n result[k] = v.cpu().item()\n\n return eval_loop_results\n\n def _run_predict(self) -> Optional[_PREDICT_OUTPUT]:\n self.reset_predict_dataloader(self.lightning_module)\n # reset trainer on this loop and all child loops in case user connected a custom loop\n self.predict_loop.trainer = self\n with torch.no_grad():\n return self.predict_loop.run()\n\n def _run_sanity_check(self) -> None:\n val_loop = self.fit_loop.epoch_loop.val_loop\n\n should_sanity_check = (\n self.enable_validation\n and self.num_sanity_val_steps > 0\n # do not sanity check if restarting because it would mess up the loaded state\n and not val_loop.restarting\n )\n\n # run tiny validation (if validation defined)\n # to make sure program won't crash during val\n if should_sanity_check:\n stage = self.state.stage\n self.sanity_checking = True\n\n # reset logger connector\n self._logger_connector.reset_results()\n self._logger_connector.reset_metrics()\n\n self._call_callback_hooks(\"on_sanity_check_start\")\n\n # reload dataloaders\n val_loop._reload_evaluation_dataloaders()\n self.num_sanity_val_batches = [\n min(self.num_sanity_val_steps, val_batches) for val_batches in self.num_val_batches\n ]\n\n # run eval step\n with torch.no_grad():\n val_loop.run()\n\n self._call_callback_hooks(\"on_sanity_check_end\")\n\n # reset logger connector\n self._logger_connector.reset_results()\n self._logger_connector.reset_metrics()\n\n # reset the progress tracking state after sanity checking. we don't need to set the state before\n # because sanity check only runs when we are not restarting\n _reset_progress(val_loop)\n\n # restore the previous stage when the sanity check if finished\n self.state.stage = stage\n\n def __set_ckpt_path(self, ckpt_path: Optional[str], model_provided: bool, model_connected: bool) -> Optional[str]:\n # fault-tolerance takes precedence\n from pytorch_lightning.callbacks.fault_tolerance import _FaultToleranceCheckpoint\n\n ft_checkpoints = [cb for cb in self.callbacks if isinstance(cb, _FaultToleranceCheckpoint)]\n if ft_checkpoints:\n ft_ckpt_path = ft_checkpoints[0].ckpt_path\n fs = get_filesystem(ft_ckpt_path)\n if fs.exists(ft_ckpt_path):\n return ft_ckpt_path\n\n if model_provided and ckpt_path is None:\n # use passed model to function without loading weights\n return\n\n fn = self.state.fn.value\n\n if model_connected and ckpt_path is None:\n rank_zero_warn(\n f\"`.{fn}(ckpt_path=None)` was called without a model.\"\n \" The best model of the previous `fit` call will be used.\"\n f\" You can pass `{fn}(ckpt_path='best')` to use and best model\"\n \" checkpoint and avoid this warning or\"\n \" `ckpt_path=trainer.checkpoint_callback.last_model_path` to use the last model.\"\n )\n ckpt_path = \"best\"\n\n if ckpt_path == \"best\":\n if len(self.checkpoint_callbacks) > 1:\n rank_zero_warn(\n f'`.{fn}(ckpt_path=\"best\")` is called with Trainer configured with multiple `ModelCheckpoint`'\n \" callbacks. It will use the best checkpoint path from first checkpoint callback.\"\n )\n\n if not self.checkpoint_callback:\n raise MisconfigurationException(\n f'`.{fn}(ckpt_path=\"best\")` is set but `ModelCheckpoint` is not configured.'\n )\n\n if not self.checkpoint_callback.best_model_path:\n if self.fast_dev_run:\n raise MisconfigurationException(\n f'You cannot execute `.{fn}(ckpt_path=\"best\")` with `fast_dev_run=True`.'\n f\" Please pass an exact checkpoint path to `.{fn}(ckpt_path=...)`\"\n )\n raise MisconfigurationException(\n f'`.{fn}(ckpt_path=\"best\")` is set but `ModelCheckpoint` is not configured to save the best model.'\n )\n # load best weights\n ckpt_path = self.checkpoint_callback.best_model_path\n\n if not ckpt_path:\n raise MisconfigurationException(\n f\"`.{fn}()` found no path for the best weights: {ckpt_path!r}. Please\"\n f\" specify a path for a checkpoint `.{fn}(ckpt_path=PATH)`\"\n )\n return ckpt_path\n\n def _call_setup_hook(self) -> None:\n fn = self.state.fn._setup_fn\n\n self.strategy.barrier(\"pre_setup\")\n\n if self.datamodule is not None:\n self.datamodule.setup(stage=fn)\n self._call_callback_hooks(\"setup\", stage=fn)\n self._call_lightning_module_hook(\"setup\", stage=fn)\n\n self.strategy.barrier(\"post_setup\")\n\n def _call_configure_sharded_model(self) -> None:\n with self.strategy.model_sharded_context():\n self._handle_meta_model()\n self._call_lightning_module_hook(\"configure_sharded_model\")\n self._call_callback_hooks(\"on_configure_sharded_model\")\n\n def _handle_meta_model(self) -> None:\n if not is_on_meta_device(self.lightning_module):\n return\n\n if isinstance(self.strategy, DDPSpawnStrategy):\n raise MisconfigurationException(\"LightningModule on meta device isn't supported with spawn.\")\n\n materialize_module(self.lightning_module)\n # the trainer reference is lost during materialization\n self.lightning_module.trainer = proxy(self)\n\n def _call_teardown_hook(self) -> None:\n fn = self.state.fn._setup_fn\n\n if self.datamodule is not None:\n self.datamodule.teardown(stage=fn)\n\n self._call_callback_hooks(\"teardown\", stage=fn)\n self._call_lightning_module_hook(\"teardown\", stage=fn)\n\n self.lightning_module._current_fx_name = None\n # these could have become stale if metrics are defined in `setup`\n self.lightning_module._metric_attributes = None\n\n # todo: TPU 8 cores hangs in flush with TensorBoard. Might do for all loggers.\n # It might be related to xla tensors blocked when moving the cpu kill loggers.\n for logger in self.loggers:\n logger.finalize(\"success\")\n\n # summarize profile results\n self.profiler.describe()\n\n def call_hook(\n self, hook_name: str, *args: Any, pl_module: Optional[\"pl.LightningModule\"] = None, **kwargs: Any\n ) -> Any:\n r\"\"\"\n .. deprecated:: v1.6\n The Trainer's `call_hook` method was deprecated in v1.6 and will be removed in v1.8.\n \"\"\"\n rank_zero_deprecation(\"The Trainer's `call_hook` method was deprecated in v1.6 and will be removed in v1.8.\")\n pl_module = self.lightning_module or pl_module\n if pl_module:\n prev_fx_name = pl_module._current_fx_name\n pl_module._current_fx_name = hook_name\n\n # always profile hooks\n with self.profiler.profile(hook_name):\n\n # first call trainer hook\n callback_fx = getattr(self, hook_name, None)\n if callable(callback_fx):\n callback_fx(*args, **kwargs)\n\n # next call hook in lightningModule\n output = None\n model_fx = getattr(pl_module, hook_name, None)\n if callable(model_fx):\n output = model_fx(*args, **kwargs)\n\n # call the strategy hook\n if hook_name not in (\"setup\", \"teardown\", \"on_train_start\") and hasattr(self.strategy, hook_name):\n strategy_hook = getattr(self.strategy, hook_name)\n strategy_output = strategy_hook(*args, **kwargs)\n output = strategy_output if output is None else output\n\n if pl_module:\n # restore current_fx when nested context\n pl_module._current_fx_name = prev_fx_name\n\n return output\n\n def _call_lightning_module_hook(\n self,\n hook_name: str,\n *args: Any,\n pl_module: Optional[\"pl.LightningModule\"] = None,\n **kwargs: Any,\n ) -> Any:\n pl_module = pl_module or self.lightning_module\n\n if pl_module is None:\n raise TypeError(\"No Lightning Module is available to call hooks on\")\n\n fn = getattr(pl_module, hook_name)\n if not callable(fn):\n return\n\n prev_fx_name = pl_module._current_fx_name\n pl_module._current_fx_name = hook_name\n\n with self.profiler.profile(f\"[LightningModule]{pl_module.__class__.__name__}.{hook_name}\"):\n output = fn(*args, **kwargs)\n\n # restore current_fx when nested context\n pl_module._current_fx_name = prev_fx_name\n\n return output\n\n def _call_callback_hooks(\n self,\n hook_name: str,\n *args: Any,\n **kwargs: Any,\n ) -> None:\n log.detail(f\"{self.__class__.__name__}: calling callback hook: {hook_name}\")\n # TODO: remove if block in v1.8\n if hook_name in (\"on_init_start\", \"on_init_end\"):\n # these `Callback` hooks are the only ones that do not take a lightning module.\n # we also don't profile bc profiler hasn't been set yet\n for callback in self.callbacks:\n fn = getattr(callback, hook_name)\n if callable(fn):\n fn(self, *args, **kwargs)\n return\n\n pl_module = self.lightning_module\n if pl_module:\n prev_fx_name = pl_module._current_fx_name\n pl_module._current_fx_name = hook_name\n\n # TODO: remove if block in v1.7\n if hook_name == \"on_train_batch_start\":\n with self.profiler.profile(hook_name):\n self._on_train_batch_start(*args, **kwargs)\n elif hook_name == \"on_train_batch_end\":\n with self.profiler.profile(hook_name):\n self._on_train_batch_end(*args, **kwargs)\n else:\n for callback in self.callbacks:\n fn = getattr(callback, hook_name)\n if callable(fn):\n with self.profiler.profile(f\"[Callback]{callback.state_key}.{hook_name}\"):\n fn(self, self.lightning_module, *args, **kwargs)\n\n if pl_module:\n # restore current_fx when nested context\n pl_module._current_fx_name = prev_fx_name\n\n # TODO: Delete this in v1.7 (deprecations: #9816 and #11148)\n def _on_train_batch_start(self, batch, batch_idx, dataloader_idx=0):\n r\"\"\"Called when the training batch begins. This function is needed because of two different deprecations affecting\n the original function in TrainerCallbackHookMixin: #9816 and #11148.\n \"\"\"\n for callback in self.callbacks:\n if is_param_in_hook_signature(callback.on_train_batch_start, \"dataloader_idx\", explicit=True):\n callback.on_train_batch_start(self, self.lightning_module, batch, batch_idx, 0)\n else:\n callback.on_train_batch_start(self, self.lightning_module, batch, batch_idx)\n\n # TODO: Delete this in v1.7 (deprecations: #9816 and #11148)\n def _on_train_batch_end(self, outputs: STEP_OUTPUT, batch, batch_idx, dataloader_idx=0):\n r\"\"\"Called when the training batch ends. This function is needed because of two different deprecations affecting\n the original function in TrainerCallbackHookMixin: #9816 and #11148.\n \"\"\"\n for callback in self.callbacks:\n if is_param_in_hook_signature(callback.on_train_batch_end, \"dataloader_idx\", explicit=True):\n callback.on_train_batch_end(self, self.lightning_module, outputs, batch, batch_idx, 0)\n else:\n callback.on_train_batch_end(self, self.lightning_module, outputs, batch, batch_idx)\n\n def _call_callbacks_state_dict(self) -> Dict[str, dict]:\n \"\"\"Called when saving a model checkpoint, calls and returns every callback's `state_dict`, keyed by\n `Callback.state_key`.\"\"\"\n callback_state_dicts = {}\n for callback in self.callbacks:\n state_dict = callback.state_dict()\n if state_dict:\n callback_state_dicts[callback.state_key] = state_dict\n return callback_state_dicts\n\n def _call_callbacks_on_save_checkpoint(self, checkpoint: Dict[str, Any]) -> None:\n \"\"\"Called when saving a model checkpoint, calls every callback's `on_save_checkpoint` hook.\n\n Will be removed in v1.8: If state is returned, we insert the callback state into\n ``checkpoint[\"callbacks\"][Callback.state_key]``. It overrides ``state_dict`` if already present.\n \"\"\"\n for callback in self.callbacks:\n # TODO: Add profiling for on_save_checkpoint hook\n state = callback.on_save_checkpoint(self, self.lightning_module, checkpoint)\n if state:\n # TODO: Add deprecation warning if state is returned (see reference PR #11887)\n checkpoint[\"callbacks\"][callback.state_key] = state\n\n def _call_callbacks_on_load_checkpoint(self, checkpoint: Dict[str, Any]) -> None:\n \"\"\"Called when loading a model checkpoint.\n\n Calls every callback's `on_load_checkpoint` hook. We have a dedicated function for this rather than using\n `_call_callback_hooks` because we have special logic for getting callback_states.\n \"\"\"\n callback_states: Dict[Union[Type, str], Dict] = checkpoint.get(\"callbacks\")\n\n if callback_states is None:\n return\n\n is_legacy_ckpt = Version(checkpoint[\"pytorch-lightning_version\"]) < Version(\"1.5.0dev\")\n current_callbacks_keys = {cb._legacy_state_key if is_legacy_ckpt else cb.state_key for cb in self.callbacks}\n difference = callback_states.keys() - current_callbacks_keys\n if difference:\n rank_zero_warn(\n \"Be aware that when using `ckpt_path`,\"\n \" callbacks used to create the checkpoint need to be provided during `Trainer` instantiation.\"\n f\" Please add the following callbacks: {list(difference)}.\",\n )\n\n for callback in self.callbacks:\n state = callback_states.get(callback.state_key, callback_states.get(callback._legacy_state_key))\n if state:\n state = deepcopy(state)\n # TODO: Add profiling for on_load_checkpoint hook\n callback.on_load_checkpoint(self, self.lightning_module, state)\n\n def _call_callbacks_load_state_dict(self, checkpoint: Dict[str, Any]) -> None:\n \"\"\"Called when loading a model checkpoint, calls every callback's `load_state_dict`.\"\"\"\n callback_states: Dict[Union[Type, str], Dict] = checkpoint.get(\"callbacks\")\n\n if callback_states is None:\n return\n\n for callback in self.callbacks:\n state = callback_states.get(callback.state_key, callback_states.get(callback._legacy_state_key))\n if state:\n state = deepcopy(state)\n callback.load_state_dict(state)\n\n def _call_strategy_hook(\n self,\n hook_name: str,\n *args: Any,\n **kwargs: Any,\n ) -> Any:\n pl_module = self.lightning_module\n prev_fx_name = pl_module._current_fx_name\n pl_module._current_fx_name = hook_name\n\n fn = getattr(self.strategy, hook_name)\n if not callable(fn):\n return\n\n with self.profiler.profile(f\"[Strategy]{self.strategy.__class__.__name__}.{hook_name}\"):\n output = fn(*args, **kwargs)\n\n # restore current_fx when nested context\n pl_module._current_fx_name = prev_fx_name\n\n return output\n\n @staticmethod\n def _parse_devices(\n gpus: Optional[Union[List[int], str, int]],\n auto_select_gpus: bool,\n tpu_cores: Optional[Union[List[int], str, int]],\n ) -> Tuple[Optional[List[int]], Optional[Union[List[int], int]]]:\n return device_parser._parse_devices(gpus, auto_select_gpus, tpu_cores)\n\n @staticmethod\n def _log_api_event(event: str) -> None:\n torch._C._log_api_usage_once(\"lightning.trainer.\" + event)\n\n def __init_profiler(self, profiler: Optional[Union[BaseProfiler, str]]) -> None:\n if isinstance(profiler, str):\n PROFILERS = {\n \"simple\": SimpleProfiler,\n \"advanced\": AdvancedProfiler,\n \"pytorch\": PyTorchProfiler,\n \"xla\": XLAProfiler,\n }\n profiler = profiler.lower()\n if profiler not in PROFILERS:\n raise MisconfigurationException(\n \"When passing string value for the `profiler` parameter of `Trainer`,\"\n f\" it can only be one of {list(PROFILERS.keys())}\"\n )\n profiler_class = PROFILERS[profiler]\n profiler = profiler_class()\n self.profiler: BaseProfiler = profiler or PassThroughProfiler()\n\n def __setup_profiler(self) -> None:\n local_rank = self.local_rank if self.world_size > 1 else None\n self.profiler._lightning_module = proxy(self.lightning_module)\n self.profiler.setup(stage=self.state.fn._setup_fn, local_rank=local_rank, log_dir=self.log_dir)\n\n def _log_device_info(self) -> None:\n rank_zero_info(\n f\"GPU available: {torch.cuda.is_available()}, used: {isinstance(self.accelerator, GPUAccelerator)}\"\n )\n\n num_tpu_cores = (\n self.tpu_cores if self.tpu_cores is not None and isinstance(self.accelerator, TPUAccelerator) else 0\n )\n rank_zero_info(f\"TPU available: {_TPU_AVAILABLE}, using: {num_tpu_cores} TPU cores\")\n\n num_ipus = self.ipus if self.ipus is not None else 0\n rank_zero_info(f\"IPU available: {_IPU_AVAILABLE}, using: {num_ipus} IPUs\")\n\n if torch.cuda.is_available() and not isinstance(self.accelerator, GPUAccelerator):\n rank_zero_warn(\n \"GPU available but not used. Set `accelerator` and `devices` using\"\n f\" `Trainer(accelerator='gpu', devices={GPUAccelerator.auto_device_count()})`.\",\n category=PossibleUserWarning,\n )\n\n if _TPU_AVAILABLE and not isinstance(self.accelerator, TPUAccelerator):\n rank_zero_warn(\n \"TPU available but not used. Set `accelerator` and `devices` using\"\n f\" `Trainer(accelerator='tpu', devices={TPUAccelerator.auto_device_count()})`.\"\n )\n\n if _IPU_AVAILABLE and not isinstance(self.accelerator, IPUAccelerator):\n rank_zero_warn(\n \"IPU available but not used. Set `accelerator` and `devices` using\"\n f\" `Trainer(accelerator='ipu', devices={IPUAccelerator.auto_device_count()})`.\"\n )\n\n \"\"\"\n Data loading methods\n \"\"\"\n\n def reset_train_dataloader(self, model: Optional[\"pl.LightningModule\"] = None) -> None:\n \"\"\"Resets the train dataloader and initialises required variables (number of batches, when to validate,\n etc.).\n\n Args:\n model: The ``LightningModule`` if calling this outside of the trainer scope.\n \"\"\"\n source = self._data_connector._train_dataloader_source\n pl_module = self.lightning_module or model\n has_step = is_overridden(\"training_step\", pl_module)\n enable_training = self.limit_train_batches > 0\n if not (source.is_defined() and has_step and enable_training):\n return\n\n self.train_dataloader = self._data_connector._request_dataloader(RunningStage.TRAINING, model=model)\n\n if self.overfit_batches > 0:\n self.train_dataloader = self._data_connector._resolve_overfit_batches(self.train_dataloader)\n\n # automatically add samplers\n self.train_dataloader = apply_to_collection(\n self.train_dataloader,\n (DataLoader, CombinedLoader),\n self._data_connector._prepare_dataloader,\n mode=RunningStage.TRAINING,\n )\n loaders = (\n self.train_dataloader.loaders\n if isinstance(self.train_dataloader, CombinedLoader)\n else self.train_dataloader\n )\n\n # check the workers recursively\n apply_to_collection(loaders, DataLoader, self._data_connector._worker_check, \"train_dataloader\")\n\n # add worker_init_fn for correct seeding in worker processes\n apply_to_collection(loaders, DataLoader, _auto_add_worker_init_fn, rank=self.global_rank)\n\n # add collate_fn to collect metadata for fault tolerant training\n if _fault_tolerant_training():\n apply_to_collection(loaders, DataLoader, _add_capture_metadata_collate)\n\n # wrap the sequence of train loaders to a CombinedLoader object for computing the num_training_batches\n if not isinstance(self.train_dataloader, CombinedLoader):\n self.train_dataloader = CombinedLoader(loaders, self._data_connector.multiple_trainloader_mode)\n\n module = model or self.lightning_module or self.datamodule\n self.num_training_batches = (\n len(self.train_dataloader)\n if has_len_all_ranks(self.train_dataloader, self.strategy, module)\n else float(\"inf\")\n )\n\n if isinstance(self.limit_train_batches, int):\n self.num_training_batches = min(self.num_training_batches, int(self.limit_train_batches))\n elif self.num_training_batches != float(\"inf\"):\n self.num_training_batches = int(self.num_training_batches * self.limit_train_batches)\n elif self.limit_train_batches != 1.0:\n raise MisconfigurationException(\n \"When using an IterableDataset for `limit_train_batches`,\"\n \" `Trainer(limit_train_batches)` must be `1.0` or an int. An int k specifies\"\n \" `num_training_batches` to use.\"\n )\n\n if isinstance(self.val_check_interval, int):\n self.val_check_batch = self.val_check_interval\n if self.val_check_batch > self.num_training_batches:\n raise ValueError(\n f\"`val_check_interval` ({self.val_check_interval}) must be less than or equal \"\n f\"to the number of the training batches ({self.num_training_batches}). \"\n \"If you want to disable validation set `limit_val_batches` to 0.0 instead.\"\n )\n else:\n if not has_len_all_ranks(self.train_dataloader, self.strategy, module):\n if self.val_check_interval == 1.0:\n self.val_check_batch = float(\"inf\")\n else:\n raise MisconfigurationException(\n \"When using an IterableDataset for `train_dataloader`,\"\n \" `Trainer(val_check_interval)` must be `1.0` or an int. An int k specifies\"\n \" checking validation every k training batches.\"\n )\n else:\n self.val_check_batch = int(self.num_training_batches * self.val_check_interval)\n self.val_check_batch = max(1, self.val_check_batch)\n\n if self.loggers and self.num_training_batches < self.log_every_n_steps:\n rank_zero_warn(\n f\"The number of training samples ({self.num_training_batches}) is smaller than the logging interval\"\n f\" Trainer(log_every_n_steps={self.log_every_n_steps}). Set a lower value for log_every_n_steps if\"\n \" you want to see logs for the training epoch.\",\n category=PossibleUserWarning,\n )\n\n # store epoch of dataloader reset for reload_dataloaders_every_n_epochs\n self._last_train_dl_reload_epoch = self.current_epoch\n\n def reset_val_dataloader(self, model: Optional[\"pl.LightningModule\"] = None) -> None:\n \"\"\"Resets the validation dataloader and determines the number of batches.\n\n Args:\n model: The ``LightningModule`` if called outside of the trainer scope.\n \"\"\"\n source = self._data_connector._val_dataloader_source\n pl_module = self.lightning_module or model\n has_step = is_overridden(\"validation_step\", pl_module)\n enable_validation = self.limit_val_batches > 0\n if source.is_defined() and has_step and enable_validation:\n self.num_val_batches, self.val_dataloaders = self._data_connector._reset_eval_dataloader(\n RunningStage.VALIDATING, model=pl_module\n )\n\n # store epoch of dataloader reset for reload_dataloaders_every_n_epochs\n self._last_val_dl_reload_epoch = self.current_epoch\n\n def reset_test_dataloader(self, model: Optional[\"pl.LightningModule\"] = None) -> None:\n \"\"\"Resets the test dataloader and determines the number of batches.\n\n Args:\n model: The ``LightningModule`` if called outside of the trainer scope.\n \"\"\"\n source = self._data_connector._test_dataloader_source\n pl_module = self.lightning_module or model\n has_step = is_overridden(\"test_step\", pl_module)\n enable_testing = self.limit_test_batches > 0\n if source.is_defined() and has_step and enable_testing:\n self.num_test_batches, self.test_dataloaders = self._data_connector._reset_eval_dataloader(\n RunningStage.TESTING, model=pl_module\n )\n\n def reset_predict_dataloader(self, model: Optional[\"pl.LightningModule\"] = None) -> None:\n \"\"\"Resets the predict dataloader and determines the number of batches.\n\n Args:\n model: The ``LightningModule`` if called outside of the trainer scope.\n \"\"\"\n source = self._data_connector._predict_dataloader_source\n pl_module = self.lightning_module or model\n enable_prediction = self.limit_predict_batches > 0\n if source.is_defined() and enable_prediction:\n self.num_predict_batches, self.predict_dataloaders = self._data_connector._reset_eval_dataloader(\n RunningStage.PREDICTING, model=pl_module\n )\n\n def reset_train_val_dataloaders(self, model: Optional[\"pl.LightningModule\"] = None) -> None:\n \"\"\"Resets train and val dataloaders if none are attached to the trainer.\n\n The val dataloader must be initialized before training loop starts, as the training loop\n inspects the val dataloader to determine whether to run the evaluation loop.\n Args:\n model: The ``LightningModule`` if called outside of the trainer scope.\n \"\"\"\n if self.train_dataloader is None:\n self.reset_train_dataloader(model=model)\n if self.val_dataloaders is None:\n self.reset_val_dataloader(model=model)\n\n \"\"\"\n Accelerator properties\n \"\"\"\n\n @property\n def accelerator(self) -> Accelerator:\n return self.strategy.accelerator\n\n @property\n def strategy(self) -> Strategy:\n return self._accelerator_connector.strategy\n\n @property\n def training_type_plugin(self) -> Strategy:\n rank_zero_deprecation(\n \"`Trainer.training_type_plugin` is deprecated in v1.6 and will be removed in v1.8. Use\"\n \" `Trainer.strategy` instead.\"\n )\n return self.strategy\n\n @property\n def precision_plugin(self) -> PrecisionPlugin:\n return self.strategy.precision_plugin\n\n @property\n def global_rank(self) -> int:\n return self.strategy.global_rank\n\n @property\n def local_rank(self) -> int:\n # some training types define a local rank\n return getattr(self.strategy, \"local_rank\", 0)\n\n @property\n def node_rank(self) -> int:\n # some training types define a node rank\n return getattr(self.strategy, \"node_rank\", 0)\n\n @property\n def world_size(self) -> int:\n # some training types define a world size\n return getattr(self.strategy, \"world_size\", 1)\n\n @property\n def should_rank_save_checkpoint(self) -> bool:\n rank_zero_deprecation(\n \"`Trainer.should_rank_save_checkpoint` is deprecated in v1.6 and will be removed in v1.8.\", stacklevel=5\n )\n strategy = self.strategy\n return (\n isinstance(strategy, pl.strategies.TPUSpawnStrategy) and strategy.local_rank == 0 or strategy.is_global_zero\n )\n\n @property\n def num_nodes(self) -> int:\n return getattr(self.strategy, \"num_nodes\", 1)\n\n @property\n def device_ids(self) -> List[int]:\n \"\"\"List of device indexes per node.\"\"\"\n devices = getattr(self.strategy, \"parallel_devices\", [self.strategy.root_device])\n device_ids = []\n for idx, device in enumerate(devices):\n if isinstance(device, torch.device):\n device_ids.append(device.index or idx)\n elif isinstance(device, int):\n device_ids.append(device)\n return device_ids\n\n @property\n def num_devices(self) -> int:\n \"\"\"Number of devices the trainer uses per node.\"\"\"\n return len(self.device_ids)\n\n @property\n def num_processes(self) -> int:\n return self._accelerator_connector.num_processes\n\n @property\n def root_gpu(self) -> Optional[int]:\n rank_zero_deprecation(\n \"`Trainer.root_gpu` is deprecated in v1.6 and will be removed in v1.8. \"\n \"Please use `Trainer.strategy.root_device.index` instead.\"\n )\n return self.strategy.root_device.index if isinstance(self.accelerator, GPUAccelerator) else None\n\n @property\n def tpu_cores(self) -> int:\n return self._accelerator_connector.tpu_cores\n\n @property\n def ipus(self) -> int:\n return self._accelerator_connector.num_ipus\n\n @property\n def num_gpus(self) -> int:\n rank_zero_deprecation(\n \"`Trainer.num_gpus` was deprecated in v1.6 and will be removed in v1.8.\"\n \" Please use `Trainer.num_devices` instead.\"\n )\n return self.num_devices if isinstance(self.accelerator, GPUAccelerator) else 0\n\n @property\n def devices(self) -> int:\n rank_zero_deprecation(\n \"`Trainer.devices` was deprecated in v1.6 and will be removed in v1.8.\"\n \" Please use `Trainer.num_devices` or `Trainer.device_ids` to get device information instead.\"\n )\n return self.num_devices\n\n @property\n def data_parallel_device_ids(self) -> Optional[List[int]]:\n return (\n self._accelerator_connector.parallel_device_ids if self._accelerator_connector.parallel_device_ids else None\n )\n\n @property\n def lightning_module(self) -> \"pl.LightningModule\":\n # TODO: this is actually an optional return\n return self.strategy.lightning_module\n\n @property\n def optimizers(self) -> List[Optimizer]:\n return self.strategy.optimizers\n\n @optimizers.setter\n def optimizers(self, new_optims: Optional[List[Optimizer]]) -> None:\n self.strategy.optimizers = new_optims\n\n @property\n def lightning_optimizers(self) -> Dict[int, LightningOptimizer]:\n rank_zero_deprecation(\n \"`Trainer.lightning_optimizers` is deprecated in v1.6 and will be removed in v1.8\", stacklevel=5\n )\n return self.strategy._lightning_optimizers\n\n @property\n def lr_scheduler_configs(self) -> List[LRSchedulerConfig]:\n return self.strategy.lr_scheduler_configs\n\n @property\n def lr_schedulers(self) -> List[Dict[str, Any]]:\n rank_zero_deprecation(\n \"`Trainer.lr_schedulers` is deprecated in v1.6 and will be removed in v1.8.\"\n \" You can use `trainer.lr_scheduler_configs` instead which contains dataclasses instead of dictionaries.\",\n stacklevel=5,\n )\n from dataclasses import asdict\n\n return [asdict(config) for config in self.strategy.lr_scheduler_configs]\n\n @property\n def optimizer_frequencies(self) -> List[int]:\n return self.strategy.optimizer_frequencies\n\n @optimizer_frequencies.setter\n def optimizer_frequencies(self, new_freqs: List[int]) -> None:\n self.strategy.optimizer_frequencies = new_freqs\n\n @property\n def amp_backend(self) -> Optional[AMPType]:\n if isinstance(self.precision_plugin, ApexMixedPrecisionPlugin):\n return AMPType.APEX\n if isinstance(self.precision_plugin, NativeMixedPrecisionPlugin):\n return AMPType.NATIVE\n return None\n\n @property\n def precision(self) -> Union[str, int]:\n return self.strategy.precision_plugin.precision\n\n @property\n def scaler(self) -> Optional[Any]:\n return getattr(self.precision_plugin, \"scaler\", None)\n\n @property\n def gpus(self) -> Optional[Union[List[int], str, int]]:\n return self._accelerator_connector.gpus\n\n @property\n def model(self) -> torch.nn.Module:\n \"\"\"The LightningModule, but possibly wrapped into DataParallel or DistributedDataParallel.\n\n To access the pure LightningModule, use\n :meth:`~pytorch_lightning.trainer.trainer.Trainer.lightning_module` instead.\n \"\"\"\n return self.strategy.model\n\n @model.setter\n def model(self, model: torch.nn.Module) -> None:\n \"\"\"Setter for the model, pass-through to accelerator and plugin where the model reference is stored. Used\n by the Tuner to reset the state of Trainer and Accelerator.\n\n Args:\n model: The LightningModule, possibly wrapped into DataParallel or DistributedDataParallel, depending\n on the backend.\n \"\"\"\n self.strategy.model = model\n\n \"\"\"\n General properties\n \"\"\"\n\n @property\n def log_dir(self) -> Optional[str]:\n if len(self.loggers) == 1:\n if isinstance(self.logger, TensorBoardLogger):\n dirpath = self.logger.log_dir\n else:\n dirpath = self.logger.save_dir\n else:\n dirpath = self.default_root_dir\n\n dirpath = self.strategy.broadcast(dirpath)\n return dirpath\n\n @property\n def use_amp(self) -> bool:\n rank_zero_deprecation(\n \"`Trainer.use_amp` is deprecated in v1.6.0 and will be removed in v1.8.0.\"\n \" Please use `Trainer.amp_backend` instead.\"\n )\n return self.precision == 16\n\n @property\n def is_global_zero(self) -> bool:\n return self.strategy.is_global_zero\n\n @property\n def slurm_job_id(self) -> Optional[int]:\n rank_zero_deprecation(\"Method `slurm_job_id` is deprecated in v1.6.0 and will be removed in v1.7.0.\")\n return SLURMEnvironment.job_id()\n\n @property\n def distributed_sampler_kwargs(self) -> Optional[dict]:\n if isinstance(self.strategy, ParallelStrategy):\n return self.strategy.distributed_sampler_kwargs\n\n @property\n def data_parallel(self) -> bool:\n return isinstance(self.strategy, ParallelStrategy)\n\n @property\n def progress_bar_dict(self) -> dict:\n \"\"\"Read-only for progress bar metrics.\"\"\"\n rank_zero_deprecation(\n \"`trainer.progress_bar_dict` is deprecated in v1.5 and will be removed in v1.7.\"\n \" Use `ProgressBarBase.get_metrics` instead.\"\n )\n ref_model = self.lightning_module\n ref_model = cast(pl.LightningModule, ref_model)\n if self.progress_bar_callback:\n return self.progress_bar_callback.get_metrics(self, ref_model)\n return self.progress_bar_metrics\n\n @property\n def enable_validation(self) -> bool:\n \"\"\"Check if we should run validation during training.\"\"\"\n return (\n self._data_connector._val_dataloader_source.is_defined()\n and is_overridden(\"validation_step\", self.lightning_module)\n and self.limit_val_batches > 0\n )\n\n @property\n def default_root_dir(self) -> str:\n \"\"\"The default location to save artifacts of loggers, checkpoints etc.\n\n It is used as a fallback if logger or checkpoint callback do not define specific save paths.\n \"\"\"\n if get_filesystem(self._default_root_dir).protocol == \"file\":\n return os.path.normpath(self._default_root_dir)\n return self._default_root_dir\n\n @property\n def weights_save_path(self) -> str:\n \"\"\"\n The default root location to save weights (checkpoints), e.g., when the\n :class:`~pytorch_lightning.callbacks.model_checkpoint.ModelCheckpoint` does not define a file path.\n\n .. deprecated:: v1.6\n `Trainer.weights_save_path` has been deprecated in v1.6 and will be removed in v1.8.\n \"\"\"\n rank_zero_deprecation(\"`Trainer.weights_save_path` has been deprecated in v1.6 and will be removed in v1.8.\")\n return self._weights_save_path_internal\n\n # TODO: Remove _weights_save_path_internal in v1.8\n @property\n def _weights_save_path_internal(self) -> str:\n \"\"\"This is an internal implementation of weights_save_path which allows weights_save_path to be used\n internally by the framework without emitting a deprecation warning.\n\n To be removed in v1.8.\n \"\"\"\n if get_filesystem(self._weights_save_path).protocol == \"file\":\n return os.path.normpath(self._weights_save_path)\n return self._weights_save_path\n\n @property\n def early_stopping_callback(self) -> Optional[EarlyStopping]:\n \"\"\"The first :class:`~pytorch_lightning.callbacks.early_stopping.EarlyStopping` callback in the\n Trainer.callbacks list, or ``None`` if it doesn't exist.\"\"\"\n callbacks = self.early_stopping_callbacks\n return callbacks[0] if len(callbacks) > 0 else None\n\n @property\n def early_stopping_callbacks(self) -> List[EarlyStopping]:\n \"\"\"A list of all instances of :class:`~pytorch_lightning.callbacks.early_stopping.EarlyStopping` found in\n the Trainer.callbacks list.\"\"\"\n return [c for c in self.callbacks if isinstance(c, EarlyStopping)]\n\n @property\n def prediction_writer_callbacks(self) -> List[BasePredictionWriter]:\n \"\"\"A list of all instances of :class:`~pytorch_lightning.callbacks.prediction_writer.BasePredictionWriter`\n found in the Trainer.callbacks list.\"\"\"\n return [cb for cb in self.callbacks if isinstance(cb, BasePredictionWriter)]\n\n @property\n def checkpoint_callback(self) -> Optional[ModelCheckpoint]:\n \"\"\"The first :class:`~pytorch_lightning.callbacks.model_checkpoint.ModelCheckpoint` callback in the\n Trainer.callbacks list, or ``None`` if it doesn't exist.\"\"\"\n callbacks = self.checkpoint_callbacks\n return callbacks[0] if len(callbacks) > 0 else None\n\n @property\n def checkpoint_callbacks(self) -> List[ModelCheckpoint]:\n \"\"\"A list of all instances of :class:`~pytorch_lightning.callbacks.model_checkpoint.ModelCheckpoint` found\n in the Trainer.callbacks list.\"\"\"\n return [c for c in self.callbacks if isinstance(c, ModelCheckpoint)]\n\n @property\n def progress_bar_callback(self) -> Optional[ProgressBarBase]:\n \"\"\"An instance of :class:`~pytorch_lightning.callbacks.progress.base.ProgressBarBase` found in the\n Trainer.callbacks list, or ``None`` if one doesn't exist.\"\"\"\n for c in self.callbacks:\n if isinstance(c, ProgressBarBase):\n return c\n return None\n\n @property\n def resume_from_checkpoint(self) -> Optional[Union[str, Path]]:\n resume_from_checkpoint = self._checkpoint_connector.resume_from_checkpoint_fit_path\n if resume_from_checkpoint is not None:\n rank_zero_deprecation(\n \"`trainer.resume_from_checkpoint` is deprecated in v1.5 and will be removed in v2.0.\"\n \" Specify the fit checkpoint path with `trainer.fit(ckpt_path=)` instead.\",\n stacklevel=5,\n )\n\n return resume_from_checkpoint\n\n @property\n def ckpt_path(self) -> Optional[str]:\n \"\"\"Set to the path/URL of a checkpoint loaded via :meth:`~pytorch_lightning.trainer.trainer.Trainer.fit`,\n :meth:`~pytorch_lightning.trainer.trainer.Trainer.validate`,\n :meth:`~pytorch_lightning.trainer.trainer.Trainer.test`, or\n :meth:`~pytorch_lightning.trainer.trainer.Trainer.predict`. ``None`` otherwise.\"\"\"\n return self._ckpt_path\n\n @property\n def validated_ckpt_path(self) -> Optional[str]:\n rank_zero_deprecation(\n \"The `Trainer.validated_ckpt_path` attribute was deprecated in v1.6 and will be removed in v1.8. The\"\n \" path of a checkpoint loaded via `Trainer.{fit,validate,test,predict}` should be accessed via\"\n \" `Trainer.ckpt_path` instead.\",\n stacklevel=5,\n )\n return self._validated_ckpt_path\n\n @validated_ckpt_path.setter\n def validated_ckpt_path(self, ckpt_path: Optional[str]) -> None:\n rank_zero_deprecation(\n \"The `Trainer.validated_ckpt_path` attribute was deprecated in v1.6 and will be removed in v1.8. The\"\n \" path of a checkpoint loaded via `Trainer.{fit,validate,test,predict}` should be accessed via the\"\n \" read-only `Trainer.ckpt_path`.\",\n stacklevel=5,\n )\n self._validated_ckpt_path = ckpt_path\n\n @property\n def tested_ckpt_path(self) -> Optional[str]:\n rank_zero_deprecation(\n \"The `Trainer.tested_ckpt_path` attribute was deprecated in v1.6 and will be removed in v1.8. The\"\n \" path of a checkpoint loaded via `Trainer.{fit,validate,test,predict}` should be accessed via\"\n \" `Trainer.ckpt_path` instead.\",\n stacklevel=5,\n )\n return self._tested_ckpt_path\n\n @tested_ckpt_path.setter\n def tested_ckpt_path(self, ckpt_path: Optional[str]) -> None:\n rank_zero_deprecation(\n \"The `Trainer.tested_ckpt_path` attribute was deprecated in v1.6 and will be removed in v1.8. The\"\n \" path of a checkpoint loaded via `Trainer.{fit,validate,test,predict}` should be accessed via the\"\n \" read-only `Trainer.ckpt_path` instead.\",\n stacklevel=5,\n )\n self._tested_ckpt_path = ckpt_path\n\n @property\n def predicted_ckpt_path(self) -> Optional[str]:\n rank_zero_deprecation(\n \"The `Trainer.predicted_ckpt_path` attribute was deprecated in v1.6 and will be removed in v1.8. The\"\n \" path of a checkpoint loaded via `Trainer.{fit,validate,test,predict}` should be accessed via\"\n \" `Trainer.ckpt_path` instead.\",\n stacklevel=5,\n )\n return self._predicted_ckpt_path\n\n @predicted_ckpt_path.setter\n def predicted_ckpt_path(self, ckpt_path: Optional[str]) -> None:\n rank_zero_deprecation(\n \"The `Trainer.predicted_ckpt_path` attribute was deprecated in v1.6 and will be removed in v1.8. The\"\n \" path of a checkpoint loaded via `Trainer.{fit,validate,test,predict}` should be accessed via the\"\n \" read-only `Trainer.ckpt_path` instead.\",\n stacklevel=5,\n )\n self._predicted_ckpt_path = ckpt_path\n\n def save_checkpoint(\n self, filepath: _PATH, weights_only: bool = False, storage_options: Optional[Any] = None\n ) -> None:\n r\"\"\"\n Runs routine to create a checkpoint.\n\n Args:\n filepath: Path where checkpoint is saved.\n weights_only: If ``True``, will only save the model weights.\n storage_options: parameter for how to save to storage, passed to ``CheckpointIO`` plugin\n\n \"\"\"\n self._checkpoint_connector.save_checkpoint(filepath, weights_only=weights_only, storage_options=storage_options)\n\n \"\"\"\n Parsing properties\n \"\"\"\n\n @classmethod\n def default_attributes(cls) -> dict:\n init_signature = inspect.signature(cls)\n return {k: v.default for k, v in init_signature.parameters.items()}\n\n @classmethod\n def get_deprecated_arg_names(cls) -> List:\n \"\"\"Returns a list with deprecated Trainer arguments.\"\"\"\n depr_arg_names = []\n for name, val in cls.__dict__.items():\n if name.startswith(\"DEPRECATED\") and isinstance(val, (tuple, list)):\n depr_arg_names.extend(val)\n return depr_arg_names\n\n @classmethod\n def from_argparse_args(cls: Any, args: Union[Namespace, ArgumentParser], **kwargs) -> Any:\n return from_argparse_args(cls, args, **kwargs)\n\n @classmethod\n def parse_argparser(cls, arg_parser: Union[ArgumentParser, Namespace]) -> Namespace:\n return parse_argparser(cls, arg_parser)\n\n @classmethod\n def match_env_arguments(cls) -> Namespace:\n return parse_env_variables(cls)\n\n @classmethod\n def add_argparse_args(cls, parent_parser: ArgumentParser, **kwargs) -> ArgumentParser:\n return add_argparse_args(cls, parent_parser, **kwargs)\n\n \"\"\"\n State properties\n \"\"\"\n\n @property\n def interrupted(self) -> bool:\n return self.state.status == TrainerStatus.INTERRUPTED\n\n @property\n def training(self) -> bool:\n return self.state.stage == RunningStage.TRAINING\n\n @training.setter\n def training(self, val: bool) -> None:\n if val:\n self.state.stage = RunningStage.TRAINING\n elif self.training:\n self.state.stage = None\n\n @property\n def testing(self) -> bool:\n return self.state.stage == RunningStage.TESTING\n\n @testing.setter\n def testing(self, val: bool) -> None:\n if val:\n self.state.stage = RunningStage.TESTING\n elif self.testing:\n self.state.stage = None\n\n @property\n def predicting(self) -> bool:\n return self.state.stage == RunningStage.PREDICTING\n\n @predicting.setter\n def predicting(self, val: bool) -> None:\n if val:\n self.state.stage = RunningStage.PREDICTING\n elif self.predicting:\n self.state.stage = None\n\n @property\n def tuning(self) -> bool:\n return self.state.stage == RunningStage.TUNING\n\n @tuning.setter\n def tuning(self, val: bool) -> None:\n if val:\n self.state.stage = RunningStage.TUNING\n elif self.tuning:\n self.state.stage = None\n\n @property\n def validating(self) -> bool:\n return self.state.stage == RunningStage.VALIDATING\n\n @validating.setter\n def validating(self, val: bool) -> None:\n if val:\n self.state.stage = RunningStage.VALIDATING\n elif self.validating:\n self.state.stage = None\n\n @property\n def evaluating(self) -> bool:\n return self.state.stage and self.state.stage.evaluating\n\n @property\n def sanity_checking(self) -> bool:\n return self.state.stage == RunningStage.SANITY_CHECKING\n\n @sanity_checking.setter\n def sanity_checking(self, val: bool) -> None:\n if val:\n self.state.stage = RunningStage.SANITY_CHECKING\n elif self.sanity_checking:\n self.state.stage = None\n\n \"\"\"\n Loop properties\n \"\"\"\n\n @property\n def global_step(self) -> int:\n \"\"\"The number of optimizer steps taken (does not reset each epoch).\n\n This includes multiple optimizers and TBPTT steps (if enabled).\n \"\"\"\n return self.fit_loop.epoch_loop.global_step\n\n @property\n def current_epoch(self) -> int:\n \"\"\"The current epoch, updated after the epoch end hooks are run.\"\"\"\n return self.fit_loop.epoch_progress.current.completed\n\n @property\n def max_epochs(self) -> int:\n return self.fit_loop.max_epochs\n\n @property\n def min_epochs(self) -> int:\n return self.fit_loop.min_epochs\n\n @property\n def max_steps(self) -> int:\n return self.fit_loop.max_steps\n\n @property\n def min_steps(self) -> Optional[int]:\n return self.fit_loop.min_steps\n\n @property\n def is_last_batch(self) -> bool:\n return self.fit_loop.epoch_loop.batch_progress.is_last_batch\n\n @property\n def fit_loop(self) -> FitLoop:\n return self._fit_loop\n\n @fit_loop.setter\n def fit_loop(self, loop: FitLoop):\n \"\"\"Attach a custom fit loop to this Trainer.\n\n It will run with\n :meth:`~pytorch_lightning.trainer.trainer.Trainer.fit`.\n \"\"\"\n loop.trainer = self\n self._fit_loop = loop\n\n @property\n def validate_loop(self) -> EvaluationLoop:\n return self._validate_loop\n\n @validate_loop.setter\n def validate_loop(self, loop: EvaluationLoop):\n \"\"\"Attach a custom validation loop to this Trainer.\n\n It will run with\n :meth:`~pytorch_lightning.trainer.trainer.Trainer.validate`. Note that this loop is different from the one\n running during training inside the :meth:`pytorch_lightning.trainer.trainer.Trainer.fit` call.\n \"\"\"\n loop.trainer = self\n self._validate_loop = loop\n\n @property\n def test_loop(self) -> EvaluationLoop:\n return self._test_loop\n\n @test_loop.setter\n def test_loop(self, loop: EvaluationLoop):\n \"\"\"Attach a custom test loop to this Trainer.\n\n It will run with\n :meth:`~pytorch_lightning.trainer.trainer.Trainer.test`.\n \"\"\"\n loop.trainer = self\n self._test_loop = loop\n\n @property\n def predict_loop(self) -> PredictionLoop:\n return self._predict_loop\n\n @predict_loop.setter\n def predict_loop(self, loop: PredictionLoop):\n \"\"\"Attach a custom prediction loop to this Trainer.\n\n It will run with\n :meth:`~pytorch_lightning.trainer.trainer.Trainer.predict`.\n \"\"\"\n loop.trainer = self\n self._predict_loop = loop\n\n @property\n def verbose_evaluate(self) -> bool:\n rank_zero_deprecation(\n \"The `Trainer.verbose_evaluate` property has been deprecated and will be removed in v1.8. The current value\"\n \" returned is the union of the validate and test loop values. You can choose which one to access with\"\n \" `trainer.{validate,test}_loop.verbose`.\",\n stacklevel=5,\n )\n return self.validate_loop.verbose or self.test_loop.verbose\n\n @verbose_evaluate.setter\n def verbose_evaluate(self, verbose: bool) -> None:\n rank_zero_deprecation(\n \"The `Trainer.verbose_evaluate` property has been deprecated and will be removed in v1.8. This will set\"\n \" the value for both trainer.{validate,test}_loop.verbose`.\",\n stacklevel=5,\n )\n self.validate_loop.verbose = verbose\n self.test_loop.verbose = verbose\n\n @property\n def _evaluation_loop(self) -> EvaluationLoop:\n if self.state.fn in (TrainerFn.FITTING, TrainerFn.TUNING):\n return self.fit_loop.epoch_loop.val_loop\n if self.state.fn == TrainerFn.VALIDATING:\n return self.validate_loop\n if self.state.fn == TrainerFn.TESTING:\n return self.test_loop\n raise RuntimeError(\"The `Trainer._evaluation_loop` property isn't defined. Accessed outside of scope\")\n\n @property\n def _active_loop(self) -> Optional[Union[FitLoop, EvaluationLoop, PredictionLoop]]:\n if self.training:\n return self.fit_loop\n if self.sanity_checking or self.evaluating:\n return self._evaluation_loop\n if self.predicting:\n return self.predict_loop\n\n \"\"\"\n Logging properties\n \"\"\"\n\n @property\n def logger(self) -> Optional[LightningLoggerBase]:\n if len(self.loggers) == 0:\n return None\n if len(self.loggers) == 1:\n return self.loggers[0]\n else:\n rank_zero_warn(\n \"Using trainer.logger when Trainer is configured to use multiple loggers.\"\n \" This behavior will change in v1.8 when LoggerCollection is removed, and\"\n \" trainer.logger will return the first logger in trainer.loggers\"\n )\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n return LoggerCollection(self.loggers)\n\n @logger.setter\n def logger(self, logger: Optional[LightningLoggerBase]) -> None:\n if not logger:\n self.loggers = []\n elif isinstance(logger, LoggerCollection):\n self.loggers = list(logger)\n else:\n self.loggers = [logger]\n\n @property\n def loggers(self) -> List[LightningLoggerBase]:\n return self._loggers\n\n @loggers.setter\n def loggers(self, loggers: Optional[List[LightningLoggerBase]]) -> None:\n self._loggers = loggers if loggers else []\n\n @property\n def callback_metrics(self) -> dict:\n return self._logger_connector.callback_metrics\n\n @property\n def logged_metrics(self) -> dict:\n return self._logger_connector.logged_metrics\n\n @property\n def progress_bar_metrics(self) -> dict:\n return self._logger_connector.progress_bar_metrics\n\n @property\n def _results(self) -> Optional[_ResultCollection]:\n active_loop = self._active_loop\n if active_loop is not None:\n return active_loop._results\n\n def _exit_gracefully_on_signal(self) -> None:\n if not _fault_tolerant_training() or not self._should_terminate_gracefully():\n return\n raise ExitGracefullyException(0)\n\n def _should_terminate_gracefully(self) -> bool:\n value = torch.tensor(int(self._terminate_gracefully), device=self.strategy.root_device)\n return self.strategy.reduce(value, reduce_op=\"sum\") > 0\n\n @property\n def weights_summary(self) -> Optional[str]:\n rank_zero_deprecation(\"`Trainer.weights_summary` is deprecated in v1.5 and will be removed in v1.7.\")\n return self._weights_summary\n\n @weights_summary.setter\n def weights_summary(self, val: Optional[str]) -> None:\n rank_zero_deprecation(\"Setting `Trainer.weights_summary` is deprecated in v1.5 and will be removed in v1.7.\")\n self._weights_summary = val\n\n \"\"\"\n Other\n \"\"\"\n\n @property\n def estimated_stepping_batches(self) -> Union[int, float]:\n r\"\"\"\n Estimated stepping batches for the complete training inferred from DataLoaders, gradient\n accumulation factor and distributed setup.\n\n Examples::\n\n def configure_optimizers(self):\n optimizer = ...\n scheduler = torch.optim.lr_scheduler.OneCycleLR(\n optimizer, max_lr=1e-3, total_steps=self.trainer.estimated_stepping_batches\n )\n return [optimizer], [scheduler]\n\n \"\"\"\n accumulation_scheduler = self.accumulation_scheduler\n\n if accumulation_scheduler.epochs != [0]:\n raise MisconfigurationException(\n \"Estimated stepping batches cannot be computed with different\"\n \" `accumulate_grad_batches` at different epochs.\"\n )\n\n # infinite training\n if self.max_epochs == -1 and self.max_steps == -1:\n return float(\"inf\")\n\n if self.train_dataloader is None:\n rank_zero_info(\"Loading `train_dataloader` to estimate number of stepping batches.\")\n self.reset_train_dataloader()\n\n total_batches = self.num_training_batches\n\n # iterable dataset\n if total_batches == float(\"inf\"):\n return self.max_steps\n\n self.accumulate_grad_batches = accumulation_scheduler.get_accumulate_grad_batches(self.current_epoch)\n effective_batch_size = self.accumulate_grad_batches\n max_estimated_steps = math.ceil(total_batches / effective_batch_size) * max(self.max_epochs, 1)\n\n max_estimated_steps = min(max_estimated_steps, self.max_steps) if self.max_steps != -1 else max_estimated_steps\n return max_estimated_steps\n\n @property\n def terminate_on_nan(self) -> bool:\n rank_zero_deprecation(\"`Trainer.terminate_on_nan` is deprecated in v1.5 and will be removed in 1.7.\")\n return self._terminate_on_nan\n\n @terminate_on_nan.setter\n def terminate_on_nan(self, val: bool) -> None:\n rank_zero_deprecation(\n f\"Setting `Trainer.terminate_on_nan = {val}` is deprecated in v1.5 and will be removed in 1.7.\"\n f\" Please set `Trainer(detect_anomaly={val})` instead.\"\n )\n self._terminate_on_nan = val # : 212\n\n\ndef _determine_batch_limits(batches: Optional[Union[int, float]], name: str) -> Union[int, float]:\n if batches is None:\n # batches is optional to know if the user passed a value so that we can show the above info messages only to the\n # users that set a value explicitly\n return 1.0\n\n # differentiating based on the type can be error-prone for users. show a message describing the chosen behaviour\n if isinstance(batches, int) and batches == 1:\n if name == \"limit_train_batches\":\n message = \"1 batch per epoch will be used.\"\n elif name == \"val_check_interval\":\n message = \"validation will run after every batch.\"\n else:\n message = \"1 batch will be used.\"\n rank_zero_info(f\"`Trainer({name}=1)` was configured so {message}\")\n elif isinstance(batches, float) and batches == 1.0:\n if name == \"limit_train_batches\":\n message = \"100% of the batches per epoch will be used.\"\n elif name == \"val_check_interval\":\n message = \"validation will run at the end of the training epoch.\"\n else:\n message = \"100% of the batches will be used.\"\n rank_zero_info(f\"`Trainer({name}=1.0)` was configured so {message}.\")\n\n if 0 <= batches <= 1:\n return batches\n if batches > 1 and batches % 1.0 == 0:\n return int(batches)\n raise MisconfigurationException(\n f\"You have passed invalid value {batches} for {name}, it has to be in [0.0, 1.0] or an int.\"\n )\n",
"# Copyright The PyTorch Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Test deprecated functionality which will be removed in v1.8.0.\"\"\"\nimport os\nimport time\nfrom unittest import mock\nfrom unittest.mock import Mock\n\nimport numpy as np\nimport pytest\nimport torch\nfrom torch import optim\n\nfrom pytorch_lightning import Callback, Trainer\nfrom pytorch_lightning.loggers import CSVLogger, LightningLoggerBase, LoggerCollection\nfrom pytorch_lightning.plugins.precision.precision_plugin import PrecisionPlugin\nfrom pytorch_lightning.plugins.training_type.ddp import DDPPlugin\nfrom pytorch_lightning.plugins.training_type.ddp2 import DDP2Plugin\nfrom pytorch_lightning.plugins.training_type.ddp_spawn import DDPSpawnPlugin\nfrom pytorch_lightning.plugins.training_type.deepspeed import DeepSpeedPlugin\nfrom pytorch_lightning.plugins.training_type.dp import DataParallelPlugin\nfrom pytorch_lightning.plugins.training_type.fully_sharded import DDPFullyShardedPlugin\nfrom pytorch_lightning.plugins.training_type.ipu import IPUPlugin\nfrom pytorch_lightning.plugins.training_type.sharded import DDPShardedPlugin\nfrom pytorch_lightning.plugins.training_type.sharded_spawn import DDPSpawnShardedPlugin\nfrom pytorch_lightning.plugins.training_type.single_device import SingleDevicePlugin\nfrom pytorch_lightning.plugins.training_type.single_tpu import SingleTPUPlugin\nfrom pytorch_lightning.plugins.training_type.tpu_spawn import TPUSpawnPlugin\nfrom pytorch_lightning.profiler import AbstractProfiler, AdvancedProfiler, SimpleProfiler\nfrom pytorch_lightning.strategies import ParallelStrategy\nfrom pytorch_lightning.trainer.configuration_validator import _check_datamodule_checkpoint_hooks\nfrom pytorch_lightning.trainer.states import RunningStage\nfrom pytorch_lightning.utilities.apply_func import move_data_to_device\nfrom pytorch_lightning.utilities.enums import DeviceType, DistributedType\nfrom pytorch_lightning.utilities.imports import _TORCHTEXT_LEGACY\nfrom pytorch_lightning.utilities.rank_zero import rank_zero_only, rank_zero_warn\nfrom tests.deprecated_api import no_deprecated_call\nfrom tests.helpers.boring_model import BoringDataModule, BoringModel\nfrom tests.helpers.runif import RunIf\nfrom tests.helpers.torchtext_utils import get_dummy_torchtext_data_iterator\n\n\ndef test_v1_8_0_deprecated_distributed_type_enum():\n\n with pytest.deprecated_call(match=\"has been deprecated in v1.6 and will be removed in v1.8.\"):\n _ = DistributedType.DDP\n\n\ndef test_v1_8_0_deprecated_device_type_enum():\n\n with pytest.deprecated_call(match=\"has been deprecated in v1.6 and will be removed in v1.8.\"):\n _ = DeviceType.CPU\n\n\[email protected](not _TORCHTEXT_LEGACY, reason=\"torchtext.legacy is deprecated.\")\ndef test_v1_8_0_deprecated_torchtext_batch():\n\n with pytest.deprecated_call(match=\"is deprecated and Lightning will remove support for it in v1.8\"):\n data_iterator, _ = get_dummy_torchtext_data_iterator(num_samples=3, batch_size=3)\n batch = next(iter(data_iterator))\n _ = move_data_to_device(batch=batch, device=torch.device(\"cpu\"))\n\n\ndef test_v1_8_0_on_init_start_end(tmpdir):\n class TestCallback(Callback):\n def on_init_start(self, trainer):\n print(\"Starting to init trainer!\")\n\n def on_init_end(self, trainer):\n print(\"Trainer is init now\")\n\n model = BoringModel()\n\n trainer = Trainer(\n callbacks=[TestCallback()],\n max_epochs=1,\n fast_dev_run=True,\n enable_progress_bar=False,\n logger=False,\n default_root_dir=tmpdir,\n )\n with pytest.deprecated_call(\n match=\"The `on_init_start` callback hook was deprecated in v1.6 and will be removed in v1.8\"\n ):\n trainer.fit(model)\n with pytest.deprecated_call(\n match=\"The `on_init_end` callback hook was deprecated in v1.6 and will be removed in v1.8\"\n ):\n trainer.validate(model)\n\n\ndef test_v1_8_0_deprecated_call_hook():\n trainer = Trainer(\n max_epochs=1,\n limit_val_batches=0.1,\n limit_train_batches=0.2,\n enable_progress_bar=False,\n logger=False,\n )\n with pytest.deprecated_call(match=\"was deprecated in v1.6 and will be removed in v1.8.\"):\n trainer.call_hook(\"test_hook\")\n\n\ndef test_v1_8_0_deprecated_warning_positional_category():\n with pytest.deprecated_call(match=r\"use `category=FutureWarning.\"):\n rank_zero_warn(\"foo\", FutureWarning)\n\n\ndef test_v1_8_0_deprecated_on_hpc_hooks(tmpdir):\n class TestModelSave(BoringModel):\n def on_hpc_save(self):\n print(\"on_hpc_save override\")\n\n class TestModelLoad(BoringModel):\n def on_hpc_load(self):\n print(\"on_hpc_load override\")\n\n save_model = TestModelSave()\n load_model = TestModelLoad()\n trainer = Trainer(default_root_dir=tmpdir, max_epochs=1, fast_dev_run=True)\n\n with pytest.deprecated_call(\n match=r\"Method `LightningModule.on_hpc_save` is deprecated in v1.6 and will be removed in v1.8.\"\n ):\n trainer.fit(save_model)\n with pytest.deprecated_call(\n match=r\"Method `LightningModule.on_hpc_load` is deprecated in v1.6 and will be removed in v1.8.\"\n ):\n trainer.fit(load_model)\n\n\ndef test_v1_8_0_deprecated_run_stage():\n trainer = Trainer()\n trainer._run_stage = Mock()\n with pytest.deprecated_call(match=\"`Trainer.run_stage` is deprecated in v1.6 and will be removed in v1.8.\"):\n trainer.run_stage()\n\n\ndef test_v1_8_0_trainer_verbose_evaluate():\n trainer = Trainer()\n with pytest.deprecated_call(match=\"verbose_evaluate` property has been deprecated and will be removed in v1.8\"):\n assert trainer.verbose_evaluate\n\n with pytest.deprecated_call(match=\"verbose_evaluate` property has been deprecated and will be removed in v1.8\"):\n trainer.verbose_evaluate = False\n\n\[email protected](\"fn_prefix\", [\"validated\", \"tested\", \"predicted\"])\ndef test_v1_8_0_trainer_ckpt_path_attributes(fn_prefix: str):\n test_attr = f\"{fn_prefix}_ckpt_path\"\n trainer = Trainer()\n with pytest.deprecated_call(match=f\"{test_attr}` attribute was deprecated in v1.6 and will be removed in v1.8\"):\n _ = getattr(trainer, test_attr)\n with pytest.deprecated_call(match=f\"{test_attr}` attribute was deprecated in v1.6 and will be removed in v1.8\"):\n setattr(trainer, test_attr, \"v\")\n\n\ndef test_v1_8_0_deprecated_trainer_should_rank_save_checkpoint(tmpdir):\n trainer = Trainer()\n with pytest.deprecated_call(\n match=r\"`Trainer.should_rank_save_checkpoint` is deprecated in v1.6 and will be removed in v1.8.\"\n ):\n _ = trainer.should_rank_save_checkpoint\n\n\ndef test_v1_8_0_deprecated_lr_scheduler():\n trainer = Trainer()\n with pytest.deprecated_call(match=r\"`Trainer.lr_schedulers` is deprecated in v1.6 and will be removed in v1.8.\"):\n assert trainer.lr_schedulers == []\n\n\ndef test_v1_8_0_trainer_optimizers_mixin():\n trainer = Trainer()\n model = BoringModel()\n trainer.strategy.connect(model)\n trainer.lightning_module.trainer = trainer\n\n with pytest.deprecated_call(\n match=r\"`TrainerOptimizersMixin.init_optimizers` was deprecated in v1.6 and will be removed in v1.8.\"\n ):\n trainer.init_optimizers(model)\n\n with pytest.deprecated_call(\n match=r\"`TrainerOptimizersMixin.convert_to_lightning_optimizers` was deprecated in v1.6 and will be removed in \"\n \"v1.8.\"\n ):\n trainer.convert_to_lightning_optimizers()\n\n\ndef test_v1_8_0_deprecate_trainer_callback_hook_mixin():\n methods_with_self = [\n \"on_before_accelerator_backend_setup\",\n \"on_configure_sharded_model\",\n \"on_init_start\",\n \"on_init_end\",\n \"on_fit_start\",\n \"on_fit_end\",\n \"on_sanity_check_start\",\n \"on_sanity_check_end\",\n \"on_train_epoch_start\",\n \"on_train_epoch_end\",\n \"on_validation_epoch_start\",\n \"on_validation_epoch_end\",\n \"on_test_epoch_start\",\n \"on_test_epoch_end\",\n \"on_predict_epoch_start\",\n \"on_epoch_start\",\n \"on_epoch_end\",\n \"on_train_start\",\n \"on_train_end\",\n \"on_pretrain_routine_start\",\n \"on_pretrain_routine_end\",\n \"on_batch_start\",\n \"on_batch_end\",\n \"on_validation_start\",\n \"on_validation_end\",\n \"on_test_start\",\n \"on_test_end\",\n \"on_predict_start\",\n \"on_predict_end\",\n \"on_after_backward\",\n ]\n methods_with_stage = [\n \"setup\",\n \"teardown\",\n ]\n methods_with_batch_batch_idx_dataloader_idx = [\n \"on_train_batch_start\",\n \"on_validation_batch_start\",\n \"on_test_batch_start\",\n \"on_predict_batch_start\",\n ]\n methods_with_outputs_batch_batch_idx_dataloader_idx = [\n \"on_train_batch_end\",\n \"on_validation_batch_end\",\n \"on_test_batch_end\",\n \"on_predict_batch_end\",\n ]\n methods_with_checkpoint = [\"on_save_checkpoint\", \"on_load_checkpoint\"]\n trainer = Trainer(\n max_epochs=1,\n limit_val_batches=0.1,\n limit_train_batches=0.2,\n enable_progress_bar=False,\n logger=False,\n )\n model = BoringModel()\n # need to attach model to trainer for testing of `on_pretrain_routine_start`\n trainer.strategy.connect(model)\n for method_name in methods_with_self:\n fn = getattr(trainer, method_name, None)\n with pytest.deprecated_call(match=\"was deprecated in v1.6 and will be removed in v1.8\"):\n fn()\n for method_name in methods_with_stage:\n fn = getattr(trainer, method_name)\n with pytest.deprecated_call(match=\"was deprecated in v1.6 and will be removed in v1.8\"):\n fn(stage=\"test\")\n for method_name in methods_with_batch_batch_idx_dataloader_idx:\n fn = getattr(trainer, method_name)\n with pytest.deprecated_call(match=\"was deprecated in v1.6 and will be removed in v1.8\"):\n fn(batch={}, batch_idx=0, dataloader_idx=0)\n for method_name in methods_with_outputs_batch_batch_idx_dataloader_idx:\n fn = getattr(trainer, method_name)\n with pytest.deprecated_call(match=\"was deprecated in v1.6 and will be removed in v1.8\"):\n fn(outputs=torch.tensor([[1.0, -1.0], [1.0, -1.0]]), batch={}, batch_idx=0, dataloader_idx=0)\n for method_name in methods_with_checkpoint:\n fn = getattr(trainer, method_name)\n with pytest.deprecated_call(match=\"was deprecated in v1.6 and will be removed in v1.8\"):\n fn(checkpoint={})\n with pytest.deprecated_call(match=\"was deprecated in v1.6 and will be removed in v1.8\"):\n trainer.on_predict_epoch_end(outputs=torch.tensor([[1.0, -1.0], [1.0, -1.0]]))\n with pytest.deprecated_call(match=\"was deprecated in v1.6 and will be removed in v1.8\"):\n trainer.on_exception(exception=Exception)\n with pytest.deprecated_call(match=\"was deprecated in v1.6 and will be removed in v1.8\"):\n trainer.on_before_backward(loss=torch.tensor([[1.0, -1.0], [1.0, -1.0]]))\n with pytest.deprecated_call(match=\"was deprecated in v1.6 and will be removed in v1.8\"):\n trainer.on_before_optimizer_step(\n optimizer=optim.SGD(model.parameters(), lr=0.01, momentum=0.9), optimizer_idx=0\n )\n with pytest.deprecated_call(match=\"was deprecated in v1.6 and will be removed in v1.8\"):\n trainer.on_before_zero_grad(optimizer=optim.SGD(model.parameters(), lr=0.01, momentum=0.9))\n\n\ndef test_v1_8_0_deprecated_training_type_plugin_property():\n trainer = Trainer()\n with pytest.deprecated_call(match=\"in v1.6 and will be removed in v1.8\"):\n trainer.training_type_plugin\n\n\ndef test_v1_8_0_deprecate_trainer_data_loading_mixin():\n trainer = Trainer(max_epochs=1)\n model = BoringModel()\n dm = BoringDataModule()\n trainer.fit(model, datamodule=dm)\n\n with pytest.deprecated_call(\n match=r\"`TrainerDataLoadingMixin.prepare_dataloader` was deprecated in v1.6 and will be removed in v1.8.\",\n ):\n trainer.prepare_dataloader(dataloader=model.train_dataloader, shuffle=False)\n with pytest.deprecated_call(\n match=r\"`TrainerDataLoadingMixin.request_dataloader` was deprecated in v1.6 and will be removed in v1.8.\",\n ):\n trainer.request_dataloader(stage=RunningStage.TRAINING)\n\n\ndef test_v_1_8_0_deprecated_device_stats_monitor_prefix_metric_keys():\n from pytorch_lightning.callbacks.device_stats_monitor import prefix_metric_keys\n\n with pytest.deprecated_call(match=\"in v1.6 and will be removed in v1.8\"):\n prefix_metric_keys({\"foo\": 1.0}, \"bar\")\n\n\[email protected](\n \"cls\",\n [\n DDPPlugin,\n DDP2Plugin,\n DDPSpawnPlugin,\n pytest.param(DeepSpeedPlugin, marks=RunIf(deepspeed=True)),\n DataParallelPlugin,\n DDPFullyShardedPlugin,\n pytest.param(IPUPlugin, marks=RunIf(ipu=True)),\n DDPShardedPlugin,\n DDPSpawnShardedPlugin,\n TPUSpawnPlugin,\n ],\n)\ndef test_v1_8_0_deprecated_training_type_plugin_classes(cls):\n old_name = cls.__name__\n new_name = old_name.replace(\"Plugin\", \"Strategy\")\n with pytest.deprecated_call(\n match=f\"{old_name}` is deprecated in v1.6 and will be removed in v1.8. Use .*{new_name}` instead.\"\n ):\n cls()\n\n\ndef test_v1_8_0_deprecated_single_device_plugin_class():\n with pytest.deprecated_call(\n match=(\n \"SingleDevicePlugin` is deprecated in v1.6 and will be removed in v1.8.\"\n \" Use `.*SingleDeviceStrategy` instead.\"\n )\n ):\n SingleDevicePlugin(\"cpu\")\n\n\n@RunIf(tpu=True)\ndef test_v1_8_0_deprecated_single_tpu_plugin_class():\n with pytest.deprecated_call(\n match=(\n \"SingleTPUPlugin` is deprecated in v1.6 and will be removed in v1.8.\" \" Use `.*SingleTPUStrategy` instead.\"\n )\n ):\n SingleTPUPlugin(0)\n\n\ndef test_v1_8_0_deprecated_lightning_optimizers():\n trainer = Trainer()\n with pytest.deprecated_call(\n match=\"Trainer.lightning_optimizers` is deprecated in v1.6 and will be removed in v1.8\"\n ):\n assert trainer.lightning_optimizers == {}\n\n\ndef test_v1_8_0_remove_on_batch_start_end(tmpdir):\n class TestCallback(Callback):\n def on_batch_start(self, *args, **kwargs):\n print(\"on_batch_start\")\n\n model = BoringModel()\n trainer = Trainer(\n callbacks=[TestCallback()],\n fast_dev_run=True,\n default_root_dir=tmpdir,\n )\n with pytest.deprecated_call(\n match=\"The `Callback.on_batch_start` hook was deprecated in v1.6 and will be removed in v1.8\"\n ):\n trainer.fit(model)\n\n class TestCallback(Callback):\n def on_batch_end(self, *args, **kwargs):\n print(\"on_batch_end\")\n\n trainer = Trainer(\n callbacks=[TestCallback()],\n fast_dev_run=True,\n default_root_dir=tmpdir,\n )\n with pytest.deprecated_call(\n match=\"The `Callback.on_batch_end` hook was deprecated in v1.6 and will be removed in v1.8\"\n ):\n trainer.fit(model)\n\n\ndef test_v1_8_0_on_configure_sharded_model(tmpdir):\n class TestCallback(Callback):\n def on_configure_sharded_model(self, trainer, model):\n print(\"Configuring sharded model\")\n\n model = BoringModel()\n\n trainer = Trainer(\n callbacks=[TestCallback()],\n max_epochs=1,\n fast_dev_run=True,\n enable_progress_bar=False,\n logger=False,\n default_root_dir=tmpdir,\n )\n with pytest.deprecated_call(\n match=\"The `on_configure_sharded_model` callback hook was deprecated in v1.6 and will be removed in v1.8.\"\n ):\n trainer.fit(model)\n\n\ndef test_v1_8_0_remove_on_epoch_start_end_lightning_module(tmpdir):\n class CustomModel(BoringModel):\n def on_epoch_start(self, *args, **kwargs):\n print(\"on_epoch_start\")\n\n model = CustomModel()\n trainer = Trainer(\n fast_dev_run=True,\n default_root_dir=tmpdir,\n )\n with pytest.deprecated_call(\n match=\"The `LightningModule.on_epoch_start` hook was deprecated in v1.6 and will be removed in v1.8\"\n ):\n trainer.fit(model)\n\n class CustomModel(BoringModel):\n def on_epoch_end(self, *args, **kwargs):\n print(\"on_epoch_end\")\n\n trainer = Trainer(\n fast_dev_run=True,\n default_root_dir=tmpdir,\n )\n\n model = CustomModel()\n with pytest.deprecated_call(\n match=\"The `LightningModule.on_epoch_end` hook was deprecated in v1.6 and will be removed in v1.8\"\n ):\n trainer.fit(model)\n\n\ndef test_v1_8_0_remove_on_pretrain_routine_start_end_lightning_module(tmpdir):\n class CustomModel(BoringModel):\n def on_pretrain_routine_start(self, *args, **kwargs):\n print(\"foo\")\n\n model = CustomModel()\n trainer = Trainer(\n fast_dev_run=True,\n default_root_dir=tmpdir,\n )\n with pytest.deprecated_call(\n match=\"The `LightningModule.on_pretrain_routine_start` hook was deprecated in v1.6 and will be removed in v1.8\"\n ):\n trainer.fit(model)\n\n class CustomModel(BoringModel):\n def on_pretrain_routine_end(self, *args, **kwargs):\n print(\"foo\")\n\n trainer = Trainer(\n fast_dev_run=True,\n default_root_dir=tmpdir,\n )\n\n model = CustomModel()\n with pytest.deprecated_call(\n match=\"The `LightningModule.on_pretrain_routine_end` hook was deprecated in v1.6 and will be removed in v1.8\"\n ):\n trainer.fit(model)\n\n\ndef test_v1_8_0_rank_zero_imports():\n\n import warnings\n\n from pytorch_lightning.utilities.distributed import rank_zero_debug, rank_zero_info\n from pytorch_lightning.utilities.warnings import LightningDeprecationWarning, rank_zero_deprecation, rank_zero_warn\n\n with pytest.deprecated_call(\n match=\"pytorch_lightning.utilities.distributed.rank_zero_debug has been deprecated in v1.6\"\n \" and will be removed in v1.8.\"\n ):\n rank_zero_debug(\"foo\")\n with pytest.deprecated_call(\n match=\"pytorch_lightning.utilities.distributed.rank_zero_info has been deprecated in v1.6\"\n \" and will be removed in v1.8.\"\n ):\n rank_zero_info(\"foo\")\n with pytest.deprecated_call(\n match=\"pytorch_lightning.utilities.warnings.rank_zero_warn has been deprecated in v1.6\"\n \" and will be removed in v1.8.\"\n ):\n rank_zero_warn(\"foo\")\n with pytest.deprecated_call(\n match=\"pytorch_lightning.utilities.warnings.rank_zero_deprecation has been deprecated in v1.6\"\n \" and will be removed in v1.8.\"\n ):\n rank_zero_deprecation(\"foo\")\n with pytest.deprecated_call(\n match=\"pytorch_lightning.utilities.warnings.LightningDeprecationWarning has been deprecated in v1.6\"\n \" and will be removed in v1.8.\"\n ):\n warnings.warn(\"foo\", LightningDeprecationWarning, stacklevel=5)\n\n\ndef test_v1_8_0_on_before_accelerator_backend_setup(tmpdir):\n class TestCallback(Callback):\n def on_before_accelerator_backend_setup(self, *args, **kwargs):\n print(\"on_before_accelerator_backend called.\")\n\n model = BoringModel()\n\n trainer = Trainer(\n callbacks=[TestCallback()],\n max_epochs=1,\n fast_dev_run=True,\n enable_progress_bar=False,\n logger=False,\n default_root_dir=tmpdir,\n )\n with pytest.deprecated_call(\n match=\"The `on_before_accelerator_backend_setup` callback hook was deprecated in v1.6\"\n \" and will be removed in v1.8\"\n ):\n trainer.fit(model)\n\n\ndef test_v1_8_0_logger_agg_parameters():\n class CustomLogger(LightningLoggerBase):\n @rank_zero_only\n def log_hyperparams(self, params):\n pass\n\n @rank_zero_only\n def log_metrics(self, metrics, step):\n pass\n\n @property\n def name(self):\n pass\n\n @property\n def version(self):\n pass\n\n with pytest.deprecated_call(\n match=\"The `agg_key_funcs` parameter for `LightningLoggerBase` was deprecated in v1.6\"\n \" and will be removed in v1.8.\"\n ):\n CustomLogger(agg_key_funcs={\"mean\", np.mean})\n\n with pytest.deprecated_call(\n match=\"The `agg_default_func` parameter for `LightningLoggerBase` was deprecated in v1.6\"\n \" and will be removed in v1.8.\"\n ):\n CustomLogger(agg_default_func=np.mean)\n\n # Should have no deprecation warning\n logger = CustomLogger()\n\n with pytest.deprecated_call(\n match=\"`LightningLoggerBase.update_agg_funcs` was deprecated in v1.6 and will be removed in v1.8.\"\n ):\n logger.update_agg_funcs()\n\n\ndef test_v1_8_0_deprecated_agg_and_log_metrics_override(tmpdir):\n class AggregationOverrideLogger(CSVLogger):\n @rank_zero_only\n def agg_and_log_metrics(self, metrics, step):\n self.log_metrics(metrics=metrics, step=step)\n\n logger = AggregationOverrideLogger(tmpdir)\n logger2 = CSVLogger(tmpdir)\n logger3 = CSVLogger(tmpdir)\n\n # Test single loggers\n with pytest.deprecated_call(\n match=\"`LightningLoggerBase.agg_and_log_metrics` is deprecated in v1.6 and will be removed\"\n \" in v1.8. `Trainer` will directly call `LightningLoggerBase.log_metrics` so custom\"\n \" loggers should not implement `LightningLoggerBase.agg_and_log_metrics`.\"\n ):\n Trainer(logger=logger)\n # Should have no deprecation warning\n Trainer(logger=logger2)\n\n # Test multiple loggers\n with pytest.deprecated_call(\n match=\"`LightningLoggerBase.agg_and_log_metrics` is deprecated in v1.6 and will be removed\"\n \" in v1.8. `Trainer` will directly call `LightningLoggerBase.log_metrics` so custom\"\n \" loggers should not implement `LightningLoggerBase.agg_and_log_metrics`.\"\n ):\n Trainer(logger=[logger, logger3])\n # Should have no deprecation warning\n Trainer(logger=[logger2, logger3])\n\n\ndef test_v1_8_0_callback_on_pretrain_routine_start_end(tmpdir):\n class TestCallback(Callback):\n def on_pretrain_routine_start(self, trainer, pl_module):\n print(\"on_pretrain_routine_start called.\")\n\n model = BoringModel()\n\n trainer = Trainer(\n callbacks=[TestCallback()],\n fast_dev_run=True,\n enable_progress_bar=False,\n default_root_dir=tmpdir,\n )\n with pytest.deprecated_call(\n match=\"The `Callback.on_pretrain_routine_start` hook has been deprecated in v1.6 and will be removed in v1.8\"\n ):\n trainer.fit(model)\n\n class TestCallback(Callback):\n def on_pretrain_routine_end(self, trainer, pl_module):\n print(\"on_pretrain_routine_end called.\")\n\n model = BoringModel()\n\n trainer = Trainer(\n callbacks=[TestCallback()],\n fast_dev_run=True,\n enable_progress_bar=False,\n default_root_dir=tmpdir,\n )\n with pytest.deprecated_call(\n match=\"The `Callback.on_pretrain_routine_end` hook has been deprecated in v1.6 and will be removed in v1.8\"\n ):\n trainer.fit(model)\n\n\ndef test_v1_8_0_weights_save_path(tmpdir):\n with pytest.deprecated_call(match=r\"Setting `Trainer\\(weights_save_path=\\)` has been deprecated in v1.6\"):\n trainer = Trainer(weights_save_path=tmpdir)\n with pytest.deprecated_call(match=r\"`Trainer.weights_save_path` has been deprecated in v1.6\"):\n _ = trainer.weights_save_path\n\n\ndef test_deprecated_epoch_outputs_format(tmpdir):\n class DeprecationModel(BoringModel):\n def __init__(self):\n super().__init__()\n self.truncated_bptt_steps = 1\n\n def training_step(self, batch, batch_idx, optimizer_idx, hiddens):\n output = super().training_step(batch, batch_idx)\n output[\"hiddens\"] = hiddens\n return output\n\n def tbptt_split_batch(self, batch, split_size):\n return [batch, batch]\n\n def training_epoch_end(self, outputs):\n ...\n\n def on_train_batch_end(self, outputs, batch, batch_idx) -> None:\n ...\n\n def configure_optimizers(self):\n return [torch.optim.Adam(self.parameters()), torch.optim.Adam(self.parameters())]\n\n trainer = Trainer(default_root_dir=tmpdir, fast_dev_run=True)\n model = DeprecationModel()\n batch_match = r\"on_train_batch_end.*will change in version v1.8 to \\(tbptt_steps, n_optimizers\\)\"\n with pytest.deprecated_call(match=batch_match):\n trainer.fit(model)\n\n class DeprecationModel2(DeprecationModel):\n def on_train_batch_end(self, *args, new_format=True):\n ...\n\n trainer = Trainer(default_root_dir=tmpdir, fast_dev_run=True)\n model = DeprecationModel()\n epoch_match = r\"training_epoch_end.*will change in version v1.8 to \\(n_batches, tbptt_steps, n_optimizers\\)\"\n with pytest.deprecated_call(match=epoch_match):\n trainer.fit(model)\n\n class NoDeprecationModel(DeprecationModel2):\n def training_epoch_end(self, outputs, new_format=True):\n ...\n\n trainer = Trainer(default_root_dir=tmpdir, fast_dev_run=True)\n model = NoDeprecationModel()\n with no_deprecated_call(match=\"will change in version v1.8.*new_format=True\"):\n trainer.fit(model)\n\n\[email protected](reruns=3)\[email protected]([\"action\", \"expected\"], [(\"a\", [3, 1]), (\"b\", [2]), (\"c\", [1])])\ndef test_simple_profiler_iterable_durations(tmpdir, action: str, expected: list):\n \"\"\"Ensure the reported durations are reasonably accurate.\"\"\"\n\n def _sleep_generator(durations):\n \"\"\"the profile_iterable method needs an iterable in which we can ensure that we're properly timing how long\n it takes to call __next__\"\"\"\n for duration in durations:\n time.sleep(duration)\n yield duration\n\n def _get_python_cprofile_total_duration(profile):\n return sum(x.inlinetime for x in profile.getstats())\n\n simple_profiler = SimpleProfiler()\n iterable = _sleep_generator(expected)\n\n with pytest.deprecated_call(\n match=\"`BaseProfiler.profile_iterable` is deprecated in v1.6 and will be removed in v1.8.\"\n ):\n for _ in simple_profiler.profile_iterable(iterable, action):\n pass\n\n # we exclude the last item in the recorded durations since that's when StopIteration is raised\n np.testing.assert_allclose(simple_profiler.recorded_durations[action][:-1], expected, rtol=0.2)\n\n advanced_profiler = AdvancedProfiler(dirpath=tmpdir, filename=\"profiler\")\n\n iterable = _sleep_generator(expected)\n\n with pytest.deprecated_call(\n match=\"`BaseProfiler.profile_iterable` is deprecated in v1.6 and will be removed in v1.8.\"\n ):\n for _ in advanced_profiler.profile_iterable(iterable, action):\n pass\n\n recorded_total_duration = _get_python_cprofile_total_duration(advanced_profiler.profiled_actions[action])\n expected_total_duration = np.sum(expected)\n np.testing.assert_allclose(recorded_total_duration, expected_total_duration, rtol=0.2)\n\n\ndef test_v1_8_0_logger_collection(tmpdir):\n logger1 = CSVLogger(tmpdir)\n logger2 = CSVLogger(tmpdir)\n\n trainer1 = Trainer(logger=logger1)\n trainer2 = Trainer(logger=[logger1, logger2])\n\n # Should have no deprecation warning\n trainer1.logger\n trainer1.loggers\n trainer2.loggers\n trainer2.logger\n\n with pytest.deprecated_call(match=\"`LoggerCollection` is deprecated in v1.6\"):\n LoggerCollection([logger1, logger2])\n\n\ndef test_v1_8_0_precision_plugin_checkpoint_hooks(tmpdir):\n class PrecisionPluginSaveHook(PrecisionPlugin):\n def on_save_checkpoint(self, checkpoint):\n print(\"override on_save_checkpoint\")\n\n class PrecisionPluginLoadHook(PrecisionPlugin):\n def on_load_checkpoint(self, checkpoint):\n print(\"override on_load_checkpoint\")\n\n model = BoringModel()\n\n precplugin_save = PrecisionPluginSaveHook()\n trainer = Trainer(default_root_dir=tmpdir, max_epochs=1, plugins=[precplugin_save])\n with pytest.deprecated_call(\n match=\"`PrecisionPlugin.on_save_checkpoint` was deprecated in\"\n \" v1.6 and will be removed in v1.8. Use `state_dict` instead.\"\n ):\n trainer.fit(model)\n\n precplugin_load = PrecisionPluginLoadHook()\n trainer = Trainer(default_root_dir=tmpdir, max_epochs=1, plugins=[precplugin_load])\n with pytest.deprecated_call(\n match=\"`PrecisionPlugin.on_load_checkpoint` was deprecated in\"\n \" v1.6 and will be removed in v1.8. Use `load_state_dict` instead.\"\n ):\n trainer.fit(model)\n\n\ndef test_v1_8_0_abstract_profiler():\n assert \"`AbstractProfiler` was deprecated in v1.6\" in AbstractProfiler.__doc__\n\n\ndef test_v1_8_0_datamodule_checkpointhooks():\n class CustomBoringDataModuleSave(BoringDataModule):\n def on_save_checkpoint(self, checkpoint):\n print(\"override on_save_checkpoint\")\n\n class CustomBoringDataModuleLoad(BoringDataModule):\n def on_load_checkpoint(self, checkpoint):\n print(\"override on_load_checkpoint\")\n\n trainer = Mock()\n\n trainer.datamodule = CustomBoringDataModuleSave()\n with pytest.deprecated_call(\n match=\"`LightningDataModule.on_save_checkpoint` was deprecated in\"\n \" v1.6 and will be removed in v1.8. Use `state_dict` instead.\"\n ):\n _check_datamodule_checkpoint_hooks(trainer)\n\n trainer.datamodule = CustomBoringDataModuleLoad()\n with pytest.deprecated_call(\n match=\"`LightningDataModule.on_load_checkpoint` was deprecated in\"\n \" v1.6 and will be removed in v1.8. Use `load_state_dict` instead.\"\n ):\n _check_datamodule_checkpoint_hooks(trainer)\n\n\ndef test_v1_8_0_trainer_use_amp(tmpdir):\n trainer = Trainer()\n\n with pytest.deprecated_call(match=\"`Trainer.use_amp` is deprecated in v1.6.0\"):\n _ = trainer.use_amp\n\n\ndef test_v1_8_0_lightning_module_use_amp():\n model = BoringModel()\n with pytest.deprecated_call(match=\"`LightningModule.use_amp` was deprecated in v1.6\"):\n _ = model.use_amp\n with pytest.deprecated_call(match=\"`LightningModule.use_amp` was deprecated in v1.6\"):\n model.use_amp = False\n\n\[email protected](os.environ, {\"PL_TORCH_DISTRIBUTED_BACKEND\": \"foo\"})\ndef test_v1_8_0_torch_distributed_backend_env():\n from pytorch_lightning.utilities.distributed import _get_process_group_backend_from_env\n\n with pytest.deprecated_call(\n match=\"Environment variable `PL_TORCH_DISTRIBUTED_BACKEND`\"\n \" was deprecated in v1.6 and will be removed in v1.8.\"\n ):\n _get_process_group_backend_from_env()\n\n\ndef test_parallel_strategy_torch_distributed_backend():\n class CustomParallel(ParallelStrategy):\n @property\n def root_device(self) -> torch.device:\n return torch.device(\"cpu\")\n\n def model_to_device(self):\n pass\n\n @property\n def is_global_zero(self):\n return True\n\n def broadcast(self, obj):\n return obj\n\n def reduce(self, tensor):\n return tensor\n\n def barrier(self):\n return\n\n def all_gather(self, tensor):\n return tensor\n\n strategy = CustomParallel()\n with pytest.deprecated_call(\n match=\"ParallelStrategy.torch_distributed_backend was deprecated\" \" in v1.6 and will be removed in v1.8.\"\n ):\n strategy.torch_distributed_backend\n\n\ndef test_trainer_config_device_ids():\n trainer = Trainer(devices=2)\n with pytest.deprecated_call(\n match=\"`Trainer.devices` was deprecated in v1.6 and will be removed in v1.8.\"\n \" Please use `Trainer.num_devices` or `Trainer.device_ids` to get device information instead.\"\n ):\n trainer.devices == 2\n\n\[email protected](\n [\"gpus\", \"expected_root_gpu\", \"strategy\"],\n [\n pytest.param(None, None, \"ddp\", id=\"None is None\"),\n pytest.param(0, None, \"ddp\", id=\"O gpus, expect gpu root device to be None.\"),\n pytest.param(1, 0, \"ddp\", id=\"1 gpu, expect gpu root device to be 0.\"),\n pytest.param(-1, 0, \"ddp\", id=\"-1 - use all gpus, expect gpu root device to be 0.\"),\n pytest.param(\"-1\", 0, \"ddp\", id=\"'-1' - use all gpus, expect gpu root device to be 0.\"),\n pytest.param(3, 0, \"ddp\", id=\"3 gpus, expect gpu root device to be 0.(backend:ddp)\"),\n ],\n)\ndef test_root_gpu_property(monkeypatch, gpus, expected_root_gpu, strategy):\n monkeypatch.setattr(torch.cuda, \"is_available\", lambda: True)\n monkeypatch.setattr(torch.cuda, \"device_count\", lambda: 16)\n with pytest.deprecated_call(\n match=\"`Trainer.root_gpu` is deprecated in v1.6 and will be removed in v1.8. \"\n \"Please use `Trainer.strategy.root_device.index` instead.\"\n ):\n assert Trainer(gpus=gpus, strategy=strategy).root_gpu == expected_root_gpu\n\n\[email protected](\n [\"gpus\", \"expected_root_gpu\", \"strategy\"],\n [\n pytest.param(None, None, None, id=\"None is None\"),\n pytest.param(None, None, \"ddp\", id=\"None is None\"),\n pytest.param(0, None, \"ddp\", id=\"None is None\"),\n ],\n)\ndef test_root_gpu_property_0_passing(monkeypatch, gpus, expected_root_gpu, strategy):\n monkeypatch.setattr(torch.cuda, \"device_count\", lambda: 0)\n with pytest.deprecated_call(\n match=\"`Trainer.root_gpu` is deprecated in v1.6 and will be removed in v1.8. \"\n \"Please use `Trainer.strategy.root_device.index` instead.\"\n ):\n assert Trainer(gpus=gpus, strategy=strategy).root_gpu == expected_root_gpu\n\n\[email protected](\n [\"gpus\", \"expected_num_gpus\", \"strategy\"],\n [\n pytest.param(None, 0, None, id=\"None - expect 0 gpu to use.\"),\n pytest.param(0, 0, None, id=\"Oth gpu, expect 1 gpu to use.\"),\n pytest.param(1, 1, None, id=\"1st gpu, expect 1 gpu to use.\"),\n pytest.param(-1, 16, \"ddp\", id=\"-1 - use all gpus\"),\n pytest.param(\"-1\", 16, \"ddp\", id=\"'-1' - use all gpus\"),\n pytest.param(3, 3, \"ddp\", id=\"3rd gpu - 1 gpu to use (backend:ddp)\"),\n ],\n)\ndef test_trainer_gpu_parse(monkeypatch, gpus, expected_num_gpus, strategy):\n monkeypatch.setattr(torch.cuda, \"is_available\", lambda: True)\n monkeypatch.setattr(torch.cuda, \"device_count\", lambda: 16)\n with pytest.deprecated_call(\n match=\"`Trainer.num_gpus` was deprecated in v1.6 and will be removed in v1.8.\"\n \" Please use `Trainer.num_devices` instead.\"\n ):\n assert Trainer(gpus=gpus, strategy=strategy).num_gpus == expected_num_gpus\n\n\[email protected](\n [\"gpus\", \"expected_num_gpus\", \"strategy\"],\n [\n pytest.param(None, 0, None, id=\"None - expect 0 gpu to use.\"),\n pytest.param(None, 0, \"ddp\", id=\"None - expect 0 gpu to use.\"),\n ],\n)\ndef test_trainer_num_gpu_0(monkeypatch, gpus, expected_num_gpus, strategy):\n monkeypatch.setattr(torch.cuda, \"device_count\", lambda: 0)\n with pytest.deprecated_call(\n match=\"`Trainer.num_gpus` was deprecated in v1.6 and will be removed in v1.8.\"\n \" Please use `Trainer.num_devices` instead.\"\n ):\n assert Trainer(gpus=gpus, strategy=strategy).num_gpus == expected_num_gpus\n"
] | [
[
"torch.autograd.set_detect_anomaly",
"torch._C._log_api_usage_once",
"torch.set_grad_enabled",
"torch.no_grad",
"torch.cuda.is_available"
],
[
"torch.device",
"torch.tensor",
"numpy.sum",
"numpy.testing.assert_allclose"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
saranyakrish14/glow | [
"3562fba6a77d2bb4aacf98a5bff5a737a93f6adc",
"3562fba6a77d2bb4aacf98a5bff5a737a93f6adc"
] | [
"torch_glow/tests/nodes/add_test.py",
"torch_glow/tests/nodes/sigmoid_test.py"
] | [
"from __future__ import absolute_import, division, print_function, unicode_literals\n\nimport torch\nfrom tests import utils\n\n\nclass SimpleAddModule(torch.nn.Module):\n def __init__(self, inplace=False):\n super(SimpleAddModule, self).__init__()\n self.inplace = inplace\n\n def forward(self, a, b):\n if b.size() == torch.Size([]):\n return (a * a).add(b.item())\n if self.inplace:\n c = a.add_(b)\n return c.add_(c)\n else:\n c = a.add(b)\n return c.add(c)\n\n\nclass TestAdd(utils.TorchGlowTestCase):\n @utils.deterministic_expand(\n [\n lambda: (\"basic\", SimpleAddModule(), torch.randn(4), torch.randn(4)),\n lambda: (\"inplace\", SimpleAddModule(True), torch.randn(4), torch.randn(4)),\n lambda: (\n \"broadcast\",\n SimpleAddModule(),\n torch.randn(8, 3, 4, 2),\n torch.randn(4, 2),\n ),\n lambda: (\n \"broadcast\",\n SimpleAddModule(),\n torch.randn(8, 3, 4, 2),\n torch.randn(1, 2),\n ),\n lambda: (\n \"broadcast\",\n SimpleAddModule(),\n torch.randn(4, 2),\n torch.randn(8, 3, 4, 2),\n ),\n lambda: (\"float\", SimpleAddModule(), torch.randn(4), torch.tensor(1.2345)),\n lambda: (\n \"float_and_int\",\n SimpleAddModule(),\n torch.randn(4),\n torch.tensor(42),\n True,\n ),\n lambda: (\n \"int32\",\n SimpleAddModule(),\n torch.torch.randint(-10, 10, (2, 4), dtype=torch.int32),\n torch.torch.randint(-10, 10, (2, 4), dtype=torch.int32),\n ),\n lambda: (\n \"int64\",\n SimpleAddModule(),\n torch.torch.randint(-10, 10, (2, 4), dtype=torch.int64),\n torch.torch.randint(-10, 10, (2, 4), dtype=torch.int64),\n ),\n ]\n )\n def test_add(self, _, module, a, b, skip_to_glow=False):\n utils.run_comparison_tests(\n module,\n (a, b),\n fusible_ops={\"aten::add_\"} if module.inplace else {\"aten::add\"},\n )\n",
"# isort:skip_file\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport torch\nfrom tests import utils\n\n\nclass SimpleSigmoidModel(torch.nn.Module):\n def __init__(self, inplace=False):\n super(SimpleSigmoidModel, self).__init__()\n self.inplace = inplace\n\n def forward(self, tensor):\n if self.inplace:\n other = tensor + tensor\n return other.sigmoid_()\n else:\n other = tensor + tensor\n return other.sigmoid()\n\n\nclass TestSigmoid(utils.TorchGlowTestCase):\n @utils.deterministic_expand(\n [\n lambda: (\"basic\", SimpleSigmoidModel(), torch.randn(6)),\n lambda: (\"inplace\", SimpleSigmoidModel(inplace=True), torch.randn(6)),\n ]\n )\n def test_sigmoid(self, _, module, tensor):\n utils.compare_tracing_methods(module, tensor, fusible_ops={\"aten::sigmoid\"})\n"
] | [
[
"torch.torch.randint",
"torch.randn",
"torch.Size",
"torch.tensor"
],
[
"torch.randn"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
kuangliu/pytorch-ssd | [
"02ed1cbe6962e791895ab1c455dc5ddfb87291b9"
] | [
"encoder.py"
] | [
"'''Encode target locations and labels.'''\nimport torch\n\nimport math\nimport itertools\n\nclass DataEncoder:\n def __init__(self):\n '''Compute default box sizes with scale and aspect transform.'''\n scale = 300.\n steps = [s / scale for s in (8, 16, 32, 64, 100, 300)]\n sizes = [s / scale for s in (30, 60, 111, 162, 213, 264, 315)]\n aspect_ratios = ((2,), (2,3), (2,3), (2,3), (2,), (2,))\n feature_map_sizes = (38, 19, 10, 5, 3, 1)\n\n num_layers = len(feature_map_sizes)\n\n boxes = []\n for i in range(num_layers):\n fmsize = feature_map_sizes[i]\n for h,w in itertools.product(range(fmsize), repeat=2):\n cx = (w + 0.5)*steps[i]\n cy = (h + 0.5)*steps[i]\n\n s = sizes[i]\n boxes.append((cx, cy, s, s))\n\n s = math.sqrt(sizes[i] * sizes[i+1])\n boxes.append((cx, cy, s, s))\n\n s = sizes[i]\n for ar in aspect_ratios[i]:\n boxes.append((cx, cy, s * math.sqrt(ar), s / math.sqrt(ar)))\n boxes.append((cx, cy, s / math.sqrt(ar), s * math.sqrt(ar)))\n\n self.default_boxes = torch.Tensor(boxes)\n\n def iou(self, box1, box2):\n '''Compute the intersection over union of two set of boxes, each box is [x1,y1,x2,y2].\n\n Args:\n box1: (tensor) bounding boxes, sized [N,4].\n box2: (tensor) bounding boxes, sized [M,4].\n\n Return:\n (tensor) iou, sized [N,M].\n '''\n N = box1.size(0)\n M = box2.size(0)\n\n lt = torch.max(\n box1[:,:2].unsqueeze(1).expand(N,M,2), # [N,2] -> [N,1,2] -> [N,M,2]\n box2[:,:2].unsqueeze(0).expand(N,M,2), # [M,2] -> [1,M,2] -> [N,M,2]\n )\n\n rb = torch.min(\n box1[:,2:].unsqueeze(1).expand(N,M,2), # [N,2] -> [N,1,2] -> [N,M,2]\n box2[:,2:].unsqueeze(0).expand(N,M,2), # [M,2] -> [1,M,2] -> [N,M,2]\n )\n\n wh = rb - lt # [N,M,2]\n wh[wh<0] = 0 # clip at 0\n inter = wh[:,:,0] * wh[:,:,1] # [N,M]\n\n area1 = (box1[:,2]-box1[:,0]) * (box1[:,3]-box1[:,1]) # [N,]\n area2 = (box2[:,2]-box2[:,0]) * (box2[:,3]-box2[:,1]) # [M,]\n area1 = area1.unsqueeze(1).expand_as(inter) # [N,] -> [N,1] -> [N,M]\n area2 = area2.unsqueeze(0).expand_as(inter) # [M,] -> [1,M] -> [N,M]\n\n iou = inter / (area1 + area2 - inter)\n return iou\n\n def encode(self, boxes, classes, threshold=0.5):\n '''Transform target bounding boxes and class labels to SSD boxes and classes.\n\n Match each object box to all the default boxes, pick the ones with the\n Jaccard-Index > 0.5:\n Jaccard(A,B) = AB / (A+B-AB)\n\n Args:\n boxes: (tensor) object bounding boxes (xmin,ymin,xmax,ymax) of a image, sized [#obj, 4].\n classes: (tensor) object class labels of a image, sized [#obj,].\n threshold: (float) Jaccard index threshold\n\n Returns:\n boxes: (tensor) bounding boxes, sized [#obj, 8732, 4].\n classes: (tensor) class labels, sized [8732,]\n '''\n default_boxes = self.default_boxes\n num_default_boxes = default_boxes.size(0)\n num_objs = boxes.size(0)\n\n iou = self.iou( # [#obj,8732]\n boxes,\n torch.cat([default_boxes[:,:2] - default_boxes[:,2:]/2,\n default_boxes[:,:2] + default_boxes[:,2:]/2], 1)\n )\n\n iou, max_idx = iou.max(0) # [1,8732]\n max_idx.squeeze_(0) # [8732,]\n iou.squeeze_(0) # [8732,]\n\n boxes = boxes[max_idx] # [8732,4]\n variances = [0.1, 0.2]\n cxcy = (boxes[:,:2] + boxes[:,2:])/2 - default_boxes[:,:2] # [8732,2]\n cxcy /= variances[0] * default_boxes[:,2:]\n wh = (boxes[:,2:] - boxes[:,:2]) / default_boxes[:,2:] # [8732,2]\n wh = torch.log(wh) / variances[1]\n loc = torch.cat([cxcy, wh], 1) # [8732,4]\n\n conf = 1 + classes[max_idx] # [8732,], background class = 0\n conf[iou<threshold] = 0 # background\n return loc, conf\n\n def nms(self, bboxes, scores, threshold=0.5, mode='union'):\n '''Non maximum suppression.\n\n Args:\n bboxes: (tensor) bounding boxes, sized [N,4].\n scores: (tensor) bbox scores, sized [N,].\n threshold: (float) overlap threshold.\n mode: (str) 'union' or 'min'.\n\n Returns:\n keep: (tensor) selected indices.\n\n Ref:\n https://github.com/rbgirshick/py-faster-rcnn/blob/master/lib/nms/py_cpu_nms.py\n '''\n x1 = bboxes[:,0]\n y1 = bboxes[:,1]\n x2 = bboxes[:,2]\n y2 = bboxes[:,3]\n\n areas = (x2-x1) * (y2-y1)\n _, order = scores.sort(0, descending=True)\n\n keep = []\n while order.numel() > 0:\n i = order[0]\n keep.append(i)\n\n if order.numel() == 1:\n break\n\n xx1 = x1[order[1:]].clamp(min=x1[i])\n yy1 = y1[order[1:]].clamp(min=y1[i])\n xx2 = x2[order[1:]].clamp(max=x2[i])\n yy2 = y2[order[1:]].clamp(max=y2[i])\n\n w = (xx2-xx1).clamp(min=0)\n h = (yy2-yy1).clamp(min=0)\n inter = w*h\n\n if mode == 'union':\n ovr = inter / (areas[i] + areas[order[1:]] - inter)\n elif mode == 'min':\n ovr = inter / areas[order[1:]].clamp(max=areas[i])\n else:\n raise TypeError('Unknown nms mode: %s.' % mode)\n\n ids = (ovr<=threshold).nonzero().squeeze()\n if ids.numel() == 0:\n break\n order = order[ids+1]\n return torch.LongTensor(keep)\n\n def decode(self, loc, conf):\n '''Transform predicted loc/conf back to real bbox locations and class labels.\n\n Args:\n loc: (tensor) predicted loc, sized [8732,4].\n conf: (tensor) predicted conf, sized [8732,21].\n\n Returns:\n boxes: (tensor) bbox locations, sized [#obj, 4].\n labels: (tensor) class labels, sized [#obj,1].\n '''\n variances = [0.1, 0.2]\n wh = torch.exp(loc[:,2:]*variances[1]) * self.default_boxes[:,2:]\n cxcy = loc[:,:2] * variances[0] * self.default_boxes[:,2:] + self.default_boxes[:,:2]\n boxes = torch.cat([cxcy-wh/2, cxcy+wh/2], 1) # [8732,4]\n\n max_conf, labels = conf.max(1) # [8732,1]\n ids = labels.squeeze(1).nonzero().squeeze(1) # [#boxes,]\n\n keep = self.nms(boxes[ids], max_conf[ids].squeeze(1))\n return boxes[ids][keep], labels[ids][keep], max_conf[ids][keep]\n"
] | [
[
"torch.LongTensor",
"torch.Tensor",
"torch.cat",
"torch.exp",
"torch.log"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
johnmgregoire/JCAPGeneratePrintCode | [
"afc1dbe6125d0024a46889011ab653ed24016fe4"
] | [
"platemapgenerator_calccompsforsingleplate.py"
] | [
"import time, copy, pickle\nimport os, os.path\nimport sys\nimport numpy, pylab\n\nsys.path.append('C:/Users/Gregoire/Documents/PythonCode/JCAP')\nfrom readplatemap import *\n\nmodelpath='C:/Users/Gregoire/Documents/CaltechWork/platemaps/Quaternarysingleplate/plate333_1map_full.txt'\nnewpath='C:/Users/Gregoire/Documents/CaltechWork/platemaps/Quaternarysingleplate/plate20intervwbin.txt'\n\nwritelines=[]\nf=open(modelpath, mode='r')\nls=f.readlines()[:2]\nwritelines+=[l.strip() for l in ls]\nf.close()\n\ndlist=readsingleplatemaptxt(modelpath, returnfiducials=False)\ndlistsrc=readplatemaptxt(codes=[0, 1, 2, 3])\n\nsmpsrc=numpy.array([d['Sample'] for d in dlistsrc])\ncodesrc=numpy.array([d['code'] for d in dlistsrc])\n\nintervs=20\ncomps=[[1.0*b/intervs, 1.0*c/intervs, 1.0*(intervs-a-b-c)/intervs, 1.0*a/intervs] for a in numpy.arange(0,intervs+1)[::-1] for b in numpy.arange(0,intervs+1-a) for c in numpy.arange(0, intervs+1-a-b)][::-1]\n\ndef genbinarycomps(intervs, elind1, elind2, ndim=4):\n aa=numpy.linspace(0.,1.,intervs+1)\n c=numpy.zeros((len(aa), ndim), dtype='float64')\n c[:, elind1]=aa\n c[:, elind2]=1.-aa\n return c\n\ncomps2=comps\ncodes=[0]*len(comps)\nbinintervs=5\nfor i, j in [(0, 1), (0, 2), (0, 3), (1, 2), (1, 3), (2, 3)]:\n comps2+=list(genbinarycomps(binintervs, i, j))+[numpy.zeros(4, dtype='float64')] #add 6 compositions in binary line and then zeros\n codes+=[4]*6+[1]\ncomps2+=[numpy.zeros(4, dtype='float64')]*6 #6 more zeros to round out the 1819 code0 samples in a standard platemap\ncodes+=[1]*6\ncomps2=[numpy.array(c) for c in comps2]\n\ncomps2pop=copy.copy(comps2)\ncodespop=copy.copy(codes)\n\nfor d in dlist:\n if d['code']==0:\n c=comps2pop.pop(0)\n cd=codespop.pop(0)\n for k, v in zip(['A', 'B', 'C', 'D'], c):\n d[k]=v\n d['code']=cd\n\nk_f=[\\\n('Sample','%04d'),\\\n('x','%.2f'),\\\n('y','%.2f'),\\\n('dx','%.2f'),\\\n('dx','%.2f'),\\\n('A','%.3f'),\\\n('B','%.3f'),\\\n('C','%.3f'),\\\n('D','%.3f'),\\\n('E','%.3f'),\\\n('F','%.3f'),\\\n('G','%.3f'),\\\n('H','%.3f'),\\\n('code','%d'),\\\n]\n\nwritelines+=[', '.join([f %d[k] for k, f in k_f]) for d in dlist]\n\nf=open(newpath, mode='w')\nf.write('\\n'.join(writelines))\nf.close()\n\nsys.path.append('C:/Users/Gregoire/Documents/PythonCode/ternaryplot')\nfrom myquaternaryutility import QuaternaryPlot\n\nfor d in dlist:\n c=numpy.array([d[el] for el in ['A', 'B', 'C', 'D']])\n if c.sum()>0:\n c/=c.sum()\n d['compositions']=c\n\ncarr=numpy.array([d['compositions'] for d in dlist])\nstpq=QuaternaryPlot(111)\nstpq.scatter(carr)\npylab.show()\n"
] | [
[
"numpy.arange",
"numpy.array",
"numpy.zeros",
"numpy.linspace"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
hfurkanbozkurt/syne-tune | [
"05ee2668f0155b40c3ee3b61e4b3d58f3f9f3c4f",
"05ee2668f0155b40c3ee3b61e4b3d58f3f9f3c4f",
"05ee2668f0155b40c3ee3b61e4b3d58f3f9f3c4f"
] | [
"syne_tune/optimizer/schedulers/searchers/bayesopt/utils/test_objects.py",
"syne_tune/optimizer/schedulers/searchers/bayesopt/models/meanstd_acqfunc_impl.py",
"syne_tune/optimizer/schedulers/searchers/bayesopt/utils/comparison_gpy.py"
] | [
"# Copyright 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\").\n# You may not use this file except in compliance with the License.\n# A copy of the License is located at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# or in the \"license\" file accompanying this file. This file is distributed\n# on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either\n# express or implied. See the License for the specific language governing\n# permissions and limitations under the License.\n# Could eventually remove this code: Is this needed in unit tests?\n\n\"\"\"\nObject definitions that are used for testing.\n\"\"\"\n\nfrom typing import Iterator, Tuple, Dict, List, Optional, Union\nimport numpy as np\n\nfrom syne_tune.optimizer.schedulers.searchers.bayesopt.datatypes.common \\\n import Hyperparameter, Configuration, dictionarize_objective\nfrom syne_tune.config_space import Categorical, loguniform, randint, \\\n choice, uniform\nfrom syne_tune.optimizer.schedulers.searchers.bayesopt.datatypes.hp_ranges \\\n import HyperparameterRanges\nfrom syne_tune.optimizer.schedulers.searchers.bayesopt.datatypes.hp_ranges_factory \\\n import make_hyperparameter_ranges\nfrom syne_tune.optimizer.schedulers.searchers.bayesopt.datatypes.tuning_job_state \\\n import TuningJobState\nfrom syne_tune.optimizer.schedulers.searchers.bayesopt.datatypes.common import \\\n TrialEvaluations, PendingEvaluation\nfrom syne_tune.optimizer.schedulers.searchers.bayesopt.gpautograd.constants \\\n import MCMCConfig, OptimizationConfig\nfrom syne_tune.optimizer.schedulers.searchers.bayesopt.gpautograd.gp_regression \\\n import GaussianProcessRegression\nfrom syne_tune.optimizer.schedulers.searchers.bayesopt.gpautograd.gpr_mcmc \\\n import GPRegressionMCMC\nfrom syne_tune.optimizer.schedulers.searchers.bayesopt.gpautograd.kernel \\\n import Matern52, KernelFunction\nfrom syne_tune.optimizer.schedulers.searchers.bayesopt.gpautograd.warping \\\n import WarpedKernel, Warping\nfrom syne_tune.optimizer.schedulers.searchers.bayesopt.tuning_algorithms.base_classes \\\n import CandidateGenerator\nfrom syne_tune.optimizer.schedulers.searchers.bayesopt.tuning_algorithms.common \\\n import ExclusionList\n\n\ndef build_kernel(state: TuningJobState,\n do_warping: bool = False) -> KernelFunction:\n dims, warping_ranges = dimensionality_and_warping_ranges(state.hp_ranges)\n kernel = Matern52(dims, ARD=True)\n if do_warping:\n return WarpedKernel(\n kernel=kernel, warping=Warping(dims, warping_ranges))\n else:\n return kernel\n\n\ndef default_gpmodel(\n state: TuningJobState, random_seed: int,\n optimization_config: OptimizationConfig) -> GaussianProcessRegression:\n return GaussianProcessRegression(\n kernel=build_kernel(state),\n optimization_config=optimization_config,\n random_seed=random_seed\n )\n\n\ndef default_gpmodel_mcmc(\n state: TuningJobState, random_seed: int,\n mcmc_config: MCMCConfig) -> GPRegressionMCMC:\n return GPRegressionMCMC(\n build_kernel=lambda: build_kernel(state),\n mcmc_config=mcmc_config,\n random_seed=random_seed\n )\n\n\ndef dimensionality_and_warping_ranges(hp_ranges: HyperparameterRanges) -> \\\n Tuple[int, Dict[int, Tuple[float, float]]]:\n lower_config = dict()\n upper_config = dict()\n for name, hp_range in hp_ranges.config_space.items():\n if not isinstance(hp_range, Categorical):\n lower_config[name] = hp_range.lower\n upper_config[name] = hp_range.upper\n else:\n lower_config[name] = hp_range.categories[0]\n upper_config[name] = hp_range.categories[0]\n lower_internal = hp_ranges.to_ndarray(lower_config)\n upper_internal = hp_ranges.to_ndarray(upper_config)\n dims = 0\n warping_ranges = dict()\n for name in hp_ranges.internal_keys:\n hp_range = hp_ranges.config_space[name]\n if not isinstance(hp_range, Categorical):\n _lower = lower_internal[dims]\n _upper = upper_internal[dims]\n if _upper > _lower: # exclude cases where max equal to min\n warping_ranges[dims] = (_lower, _upper)\n else:\n assert _lower == _upper\n dims += 1\n else:\n # For binary, we use a single dimension, not 2\n sz = len(hp_range.categories)\n if sz == 2:\n sz = 1\n dims += sz\n return dims, warping_ranges\n\n\nclass RepeatedCandidateGenerator(CandidateGenerator):\n \"\"\"Generates candidates from a fixed set. Used to test the deduplication logic.\"\"\"\n def __init__(self, n_unique_candidates: int):\n self.config_space = {\n 'a': uniform(0, n_unique_candidates),\n 'b': randint(0, n_unique_candidates),\n 'c': choice([f\"value_{i}\" for i in range(n_unique_candidates)])}\n self.hp_ranges = make_hyperparameter_ranges(self.config_space)\n self.all_unique_candidates = [\n {'a': 1.0*j, 'b': j, 'c': f\"value_{j}\"}\n for j in range(n_unique_candidates)]\n\n def generate_candidates(self) -> Iterator[Configuration]:\n i = 0\n while True:\n i += 1\n yield self.all_unique_candidates[i % len(self.all_unique_candidates)]\n\n\n# Example black box function, with adjustable location of global minimum.\n# Potentially could catch issues with optimizer, e.g. if the optimizer\n# ignoring somehow candidates on the edge of search space.\n# A simple quadratic function is used.\nclass Quadratic3d:\n def __init__(self, local_minima, active_metric, metric_names):\n # local_minima: point where local_minima is located\n self.local_minima = np.array(local_minima).astype('float')\n self.local_minima[0] = np.log10(self.local_minima[0])\n self.active_metric = active_metric\n self.metric_names = metric_names\n\n @property\n def search_space(self):\n config_space = {\n 'x': loguniform(1.0, 100.0),\n 'y': randint(0, 2),\n 'z': choice(['0.0', '1.0', '2.0'])}\n return make_hyperparameter_ranges(config_space)\n\n @property\n def f_min(self):\n return 0.0\n\n def __call__(self, candidate):\n p = np.array([float(hp) for hp in candidate])\n p[0] = np.log10(p[0])\n return dictionarize_objective(np.sum((self.local_minima - p) ** 2))\n\n\ndef tuples_to_configs(config_tpls: List[Tuple[Hyperparameter, ...]],\n hp_ranges: HyperparameterRanges) -> List[Configuration]:\n \"\"\"\n Many unit tests write configs as tuples.\n\n \"\"\"\n return [hp_ranges.tuple_to_config(x) for x in config_tpls]\n\n\ndef create_exclusion_set(\n candidates_tpl, hp_ranges: HyperparameterRanges,\n is_dict: bool = False) -> ExclusionList:\n \"\"\"\n Creates exclusion list from set of tuples.\n\n \"\"\"\n if not is_dict:\n candidates_tpl = tuples_to_configs(candidates_tpl, hp_ranges)\n config_for_trial = {\n str(trial_id): config for trial_id, config in enumerate(candidates_tpl)}\n state = TuningJobState(\n hp_ranges=hp_ranges,\n config_for_trial=config_for_trial,\n trials_evaluations=[],\n failed_trials=[str(x) for x in range(len(candidates_tpl))])\n return ExclusionList(state)\n\n\nTupleOrDict = Union[tuple, dict]\n\n\ndef create_tuning_job_state(\n hp_ranges: HyperparameterRanges, cand_tuples: List[TupleOrDict],\n metrics: List[Dict],\n pending_tuples: Optional[List[TupleOrDict]] = None,\n failed_tuples: Optional[List[TupleOrDict]] = None) -> TuningJobState:\n \"\"\"\n Builds `TuningJobState` from basics, where configs are given as tuples or\n as dicts.\n\n NOTE: We assume that all configs in the different lists are different!\n\n \"\"\"\n if cand_tuples and isinstance(cand_tuples[0], tuple):\n configs = tuples_to_configs(cand_tuples, hp_ranges)\n else:\n configs = cand_tuples\n trials_evaluations = [TrialEvaluations(trial_id=str(trial_id), metrics=y)\n for trial_id, y in enumerate(metrics)]\n pending_evaluations = None\n if pending_tuples is not None:\n sz = len(configs)\n extra = len(pending_tuples)\n if pending_tuples and isinstance(pending_tuples[0], tuple):\n extra_configs = tuples_to_configs(pending_tuples, hp_ranges)\n else:\n extra_configs = pending_tuples\n configs.extend(extra_configs)\n pending_evaluations = [PendingEvaluation(trial_id=str(trial_id))\n for trial_id in range(sz, sz + extra)]\n failed_trials = None\n if failed_tuples is not None:\n sz = len(configs)\n extra = len(failed_tuples)\n if failed_tuples and isinstance(failed_tuples[0], tuple):\n extra_configs = tuples_to_configs(failed_tuples, hp_ranges)\n else:\n extra_configs = failed_tuples\n configs.extend(extra_configs)\n failed_trials = [str(x) for x in range(sz, sz + extra)]\n\n config_for_trial = {\n str(trial_id): config for trial_id, config in enumerate(configs)}\n return TuningJobState(\n hp_ranges=hp_ranges,\n config_for_trial=config_for_trial,\n trials_evaluations=trials_evaluations,\n failed_trials=failed_trials,\n pending_evaluations=pending_evaluations)\n",
"# Copyright 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\").\n# You may not use this file except in compliance with the License.\n# A copy of the License is located at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# or in the \"license\" file accompanying this file. This file is distributed\n# on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either\n# express or implied. See the License for the specific language governing\n# permissions and limitations under the License.\nimport numpy as np\nfrom typing import Dict, Optional, Set, List, Tuple\nimport logging\nfrom scipy.stats import norm\nimport itertools\n\nfrom syne_tune.optimizer.schedulers.searchers.bayesopt.models.meanstd_acqfunc \\\n import MeanStdAcquisitionFunction, HeadWithGradient, \\\n SamplePredictionsPerOutput, CurrentBestProvider\nfrom syne_tune.optimizer.schedulers.searchers.bayesopt.models.model_base \\\n import BaseSurrogateModel\nfrom syne_tune.optimizer.schedulers.searchers.bayesopt.tuning_algorithms.base_classes \\\n import SurrogateOutputModel, SurrogateModel\nfrom syne_tune.optimizer.schedulers.searchers.bayesopt.utils.density \\\n import get_quantiles\n\nlogger = logging.getLogger(__name__)\n\n\nMIN_COST = 1e-12 # For numerical stability when dividing EI / cost\nMIN_STD_CONSTRAINT = 1e-12 # For numerical stability when computing the constraint probability in CEI\n\n\ndef _extract_active_and_secondary_metric(model_output_names, active_metric):\n \"\"\"\n Returns the active metric and the secondary metric (such as the cost or constraint metric) from model_output_names.\n \"\"\"\n\n assert len(model_output_names) == 2, f\"The model should consist of exactly 2 outputs, \" \\\n f\"while the current outputs are {model_output_names}\"\n assert active_metric in model_output_names, f\"{active_metric} is not a valid metric. \" \\\n f\"The metric name must match one of the following metrics \" \\\n f\"in the model output: {model_output_names}\"\n if model_output_names[0] == active_metric:\n secondary_metric = model_output_names[1]\n else:\n secondary_metric = model_output_names[0]\n logger.debug(\n f\"There are two metrics in the output: {model_output_names}. \"\n f\"The metric to optimize was set to '{active_metric}'. \"\n f\"The secondary metric is assumed to be '{secondary_metric}'\")\n return active_metric, secondary_metric\n\n\ndef _postprocess_gradient(grad: np.ndarray, nf: int) -> np.ndarray:\n if nf > 1:\n assert grad.size == nf # Sanity check\n return grad / nf\n else:\n return np.mean(grad, keepdims=True)\n\n\nclass EIAcquisitionFunction(MeanStdAcquisitionFunction):\n \"\"\"\n Minus expected improvement acquisition function\n (minus because the convention is to always minimize acquisition functions)\n\n \"\"\"\n def __init__(\n self, model: SurrogateOutputModel, active_metric: str = None,\n jitter: float = 0.01):\n assert isinstance(model, SurrogateModel)\n super().__init__(model, active_metric)\n self.jitter = jitter\n\n def _head_needs_current_best(self) -> bool:\n return True\n\n def _compute_head(\n self, output_to_predictions: SamplePredictionsPerOutput,\n current_best: Optional[np.ndarray]) -> np.ndarray:\n assert current_best is not None\n means, stds = self._extract_mean_and_std(output_to_predictions)\n\n # phi, Phi is PDF and CDF of Gaussian\n phi, Phi, u = get_quantiles(self.jitter, current_best, means, stds)\n return np.mean((-stds) * (u * Phi + phi), axis=1)\n\n def _compute_head_and_gradient(\n self, output_to_predictions: SamplePredictionsPerOutput,\n current_best: Optional[np.ndarray]) -> HeadWithGradient:\n assert current_best is not None\n mean, std = self._extract_mean_and_std(output_to_predictions)\n nf_mean = mean.size\n assert current_best.size == nf_mean\n\n # phi, Phi is PDF and CDF of Gaussian\n phi, Phi, u = get_quantiles(self.jitter, current_best, mean, std)\n f_acqu = std * (u * Phi + phi)\n dh_dmean = _postprocess_gradient(Phi, nf=nf_mean)\n dh_dstd = _postprocess_gradient(-phi, nf=1)\n return HeadWithGradient(\n hval=-np.mean(f_acqu),\n gradient={self.active_metric: dict(mean=dh_dmean, std=dh_dstd)})\n\n\nclass LCBAcquisitionFunction(MeanStdAcquisitionFunction):\n \"\"\"\n Lower confidence bound (LCB) acquisition function:\n\n h(mean, std) = mean - kappa * std\n\n \"\"\"\n def __init__(\n self, model: SurrogateOutputModel, kappa: float,\n active_metric: str = None):\n super().__init__(model, active_metric)\n assert isinstance(model, SurrogateModel)\n assert kappa > 0, 'kappa must be positive'\n self.kappa = kappa\n\n def _head_needs_current_best(self) -> bool:\n return False\n\n def _compute_head(self, output_to_predictions: SamplePredictionsPerOutput,\n current_best: Optional[np.ndarray]) -> np.ndarray:\n means, stds = self._extract_mean_and_std(output_to_predictions)\n return np.mean(means - stds * self.kappa, axis=1)\n\n def _compute_head_and_gradient(\n self, output_to_predictions: SamplePredictionsPerOutput,\n current_best: Optional[np.ndarray]) -> HeadWithGradient:\n mean, std = self._extract_mean_and_std(output_to_predictions)\n nf_mean = mean.size\n\n dh_dmean = np.ones_like(mean) / nf_mean\n dh_dstd = (-self.kappa) * np.ones_like(std)\n return HeadWithGradient(\n hval=np.mean(mean - std * self.kappa),\n gradient={self.active_metric: dict(mean=dh_dmean, std=dh_dstd)})\n\n\nclass EIpuAcquisitionFunction(MeanStdAcquisitionFunction):\n \"\"\"\n Minus cost-aware expected improvement acquisition function.\n\n This is defined as\n\n EIpu(x) = EI(x) / power(cost(x), exponent_cost),\n\n where EI(x) is expected improvement, cost(x) is the predictive mean of\n a cost model, and `exponent_cost` is an exponent in (0, 1].\n\n `exponent_cost` scales the influence of the cost term on the acquisition\n function. See also:\n\n Lee etal.\n Cost-aware Bayesian Optimization\n https://arxiv.org/abs/2003.10870\n\n Note: two metrics are expected in the model output: the main objective and the cost.\n The main objective needs to be indicated as active_metric when initializing EIpuAcquisitionFunction.\n The cost is automatically assumed to be the other metric.\n\n \"\"\"\n def __init__(\n self, model: SurrogateOutputModel, active_metric: str = None,\n exponent_cost: float = 1.0, jitter: float = 0.01):\n super().__init__(model, active_metric)\n assert 0 < exponent_cost <= 1, \\\n f\"exponent_cost = {exponent_cost} must lie in (0, 1]\"\n self.jitter = jitter\n self.exponent_cost = exponent_cost\n self.active_metric, self.cost_metric = _extract_active_and_secondary_metric(\n self.model_output_names, active_metric)\n\n def _head_needs_current_best(self) -> bool:\n return True\n\n def _output_to_keys_predict(self) -> Dict[str, Set[str]]:\n \"\"\"\n The cost model may be deterministic, as the acquisition function\n only needs the mean.\n \"\"\"\n return {\n self.model_output_names[0]: {'mean', 'std'},\n self.model_output_names[1]: {'mean'}}\n\n def _compute_head(\n self, output_to_predictions: SamplePredictionsPerOutput,\n current_best: Optional[np.ndarray]) -> np.ndarray:\n \"\"\"\n Returns minus the cost-aware expected improvement.\n \"\"\"\n assert current_best is not None\n means, stds = self._extract_mean_and_std(output_to_predictions)\n pred_costs = self._extract_positive_cost(output_to_predictions)\n\n # phi, Phi is PDF and CDF of Gaussian\n phi, Phi, u = get_quantiles(self.jitter, current_best, means, stds)\n f_acqu = stds * (u * Phi + phi) * np.power(pred_costs,\n -self.exponent_cost)\n return -np.mean(f_acqu, axis=1)\n\n def _compute_head_and_gradient(\n self, output_to_predictions: SamplePredictionsPerOutput,\n current_best: Optional[np.ndarray]) -> HeadWithGradient:\n \"\"\"\n Returns minus cost-aware expected improvement and, for each output model, the gradients\n with respect to the mean and standard deviation of that model.\n \"\"\"\n assert current_best is not None\n mean, std = self._extract_mean_and_std(output_to_predictions)\n pred_cost = self._extract_positive_cost(output_to_predictions)\n nf_active = mean.size\n nf_cost = pred_cost.size\n\n # phi, Phi is PDF and CDF of Gaussian\n phi, Phi, u = get_quantiles(self.jitter, current_best, mean, std)\n inv_cost_power = np.power(pred_cost, -self.exponent_cost)\n f_acqu = std * (u * Phi + phi) * inv_cost_power\n\n dh_dmean_active = _postprocess_gradient(\n Phi * inv_cost_power, nf=nf_active)\n dh_dstd_active = _postprocess_gradient(-phi * inv_cost_power, nf=1)\n # Flip the sign twice: once because of the derivative of 1 / x, and\n # once because the head is actually - f_ei\n dh_dmean_cost = _postprocess_gradient(\n self.exponent_cost * f_acqu / pred_cost, nf=nf_cost)\n\n gradient = {\n self.active_metric: dict(mean=dh_dmean_active, std=dh_dstd_active),\n self.cost_metric: dict(mean=dh_dmean_cost)}\n return HeadWithGradient(hval=-np.mean(f_acqu), gradient=gradient)\n\n def _extract_positive_cost(self, output_to_predictions):\n pred_cost = output_to_predictions[self.cost_metric]['mean']\n if np.any(pred_cost) < 0.0:\n logger.warning(f'The model for {self.cost_metric} predicted some negative cost. '\n f'Capping the minimum cost at {MIN_COST}.')\n pred_cost = np.maximum(pred_cost, MIN_COST) # ensure that the predicted cost/run-time is positive\n return pred_cost\n\n\nclass ConstraintCurrentBestProvider(CurrentBestProvider):\n \"\"\"\n Here, `current_best` depends on two models, for active and\n constraint metric.\n\n \"\"\"\n def __init__(self, current_best_list: List[np.ndarray],\n num_samples_active: int):\n list_size = len(current_best_list)\n assert list_size > 0 and list_size % num_samples_active == 0\n self._active_and_constraint_current_best = [\n v.reshape((1, -1)) for v in current_best_list]\n self._num_samples_active = num_samples_active\n\n def __call__(self, positions: Tuple[int, ...]) -> Optional[np.ndarray]:\n flat_pos = positions[1] * self._num_samples_active + positions[0]\n return self._active_and_constraint_current_best[flat_pos]\n\n\nclass CEIAcquisitionFunction(MeanStdAcquisitionFunction):\n \"\"\"\n Minus constrained expected improvement acquisition function.\n (minus because the convention is to always minimize the acquisition function)\n\n This is defined as CEI(x) = EI(x) * P(c(x) <= 0), where EI is the standard expected improvement with respect\n to the current *feasible best*, and P(c(x) <= 0) is the probability that the hyperparameter\n configuration x satisfies the constraint modeled by c(x).\n\n If there are no feasible hyperparameters yet, the current feasible best is undefined. Thus, CEI is\n reduced to the P(c(x) <= 0) term until a feasible configuration is found.\n\n Two metrics are expected in the model output: the main objective and the constraint metric.\n The main objective needs to be indicated as active_metric when initializing CEIAcquisitionFunction.\n The constraint is automatically assumed to be the other metric.\n\n References on CEI:\n Gardner et al., Bayesian Optimization with Inequality Constraints. In ICML, 2014.\n Gelbart et al., Bayesian Optimization with Unknown Constraints. In UAI, 2014.\n\n \"\"\"\n def __init__(\n self, model: SurrogateOutputModel, active_metric: str = None,\n jitter: float = 0.01):\n super().__init__(model, active_metric)\n self.jitter = jitter\n self._feasible_best_list = None\n self.active_metric, self.constraint_metric = _extract_active_and_secondary_metric(\n self.model_output_names, active_metric)\n\n def _head_needs_current_best(self) -> bool:\n return True\n\n def _compute_head(self, output_to_predictions: SamplePredictionsPerOutput,\n current_best: Optional[np.ndarray]) -> np.ndarray:\n \"\"\"\n Returns minus the constrained expected improvement (- CEI).\n \"\"\"\n assert current_best is not None\n means, stds = self._extract_mean_and_std(output_to_predictions)\n means_constr, stds_constr = self._extract_mean_and_std(\n output_to_predictions, metric=self.constraint_metric)\n\n # Compute the probability of satisfying the constraint P(c(x) <= 0)\n constr_probs = norm.cdf(- means_constr / (stds_constr + MIN_STD_CONSTRAINT))\n # If for some fantasies there are not feasible candidates, there is also no current_best (i.e., a nan).\n # The acquisition function is replaced by only the P(c(x) <= 0) term when no feasible best exist.\n feas_idx = ~np.isnan(current_best).reshape((1, -1))\n\n # phi, Phi is PDF and CDF of Gaussian\n phi, Phi, u = get_quantiles(self.jitter, current_best, means, stds)\n f_ei = stds * (u * Phi + phi)\n # CEI(x) = EI(x) * P(c(x) <= 0) if feasible best exists, CEI(x) = P(c(x) <= 0) otherwise\n f_acqu = np.where(feas_idx, f_ei * constr_probs, constr_probs)\n return -np.mean(f_acqu, axis=1)\n\n def _compute_head_and_gradient(\n self, output_to_predictions: SamplePredictionsPerOutput,\n current_best: Optional[np.ndarray]) -> HeadWithGradient:\n \"\"\"\n Returns minus cost-aware expected improvement (- CEI) and, for each output model, the gradients\n with respect to the mean and standard deviation of that model.\n \"\"\"\n assert current_best is not None\n mean, std = self._extract_mean_and_std(output_to_predictions)\n mean_constr, std_constr = self._extract_mean_and_std(\n output_to_predictions, metric=self.constraint_metric)\n nf_mean = mean.size\n nf_constr = mean_constr.size\n\n # Compute the probability of satisfying the constraint P(c(x) <= 0)\n std_constr = std_constr + MIN_STD_CONSTRAINT\n z = - mean_constr / std_constr\n constr_prob = norm.cdf(z)\n # Useful variables for computing the head gradients\n mean_over_squared_std_constr = mean_constr / std_constr ** 2\n inverse_std_constr = 1. / std_constr\n phi_constr = norm.pdf(z)\n\n # If for some fantasies there are not feasible candidates, there is also no current_best (i.e., a nan).\n # The acquisition function is replaced by only the P(c(x) <= 0) term when no feasible best exist.\n feas_idx = ~np.isnan(current_best)\n phi, Phi, u = get_quantiles(self.jitter, current_best, mean, std) # phi, Phi is PDF and CDF of Gaussian\n f_ei = std * (u * Phi + phi)\n f_acqu = np.where(feas_idx, f_ei * constr_prob, constr_prob) # CEI(x) = EI(x) * P(c(x) <= 0) if feasible best\n # exists, CEI(x) = P(c(x) <= 0) otherwise\n dh_dmean_constraint_feas = f_ei * inverse_std_constr * phi_constr\n dh_dstd_constraint_feas = - f_ei * mean_over_squared_std_constr * phi_constr\n dh_dmean_active_feas = Phi * constr_prob\n dh_dstd_active_feas = - phi * constr_prob\n dh_dmean_constraint_infeas = inverse_std_constr * phi_constr\n dh_dstd_constraint_infeas = - mean_over_squared_std_constr * phi_constr\n dh_dmean_active_infeas = np.zeros_like(phi_constr)\n dh_dstd_active_infeas = np.zeros_like(phi_constr)\n dh_dmean_active = _postprocess_gradient(\n np.where(feas_idx, dh_dmean_active_feas, dh_dmean_active_infeas),\n nf=nf_mean)\n dh_dstd_active = _postprocess_gradient(\n np.where(feas_idx, dh_dstd_active_feas, dh_dstd_active_infeas),\n nf=1)\n dh_dmean_constraint = _postprocess_gradient(\n np.where(feas_idx, dh_dmean_constraint_feas,\n dh_dmean_constraint_infeas), nf=nf_constr)\n dh_dstd_constraint = _postprocess_gradient(\n np.where(feas_idx, dh_dstd_constraint_feas,\n dh_dstd_constraint_infeas), nf=1)\n gradient = {\n self.active_metric: dict(mean=dh_dmean_active,\n std=dh_dstd_active),\n self.constraint_metric: dict(mean=dh_dmean_constraint,\n std=dh_dstd_constraint)}\n return HeadWithGradient(hval=-np.mean(f_acqu), gradient=gradient)\n\n def _get_current_bests_internal(\n self, model: SurrogateOutputModel) -> CurrentBestProvider:\n active_model = model[self.active_metric]\n assert isinstance(active_model, BaseSurrogateModel)\n all_means_active = active_model.predict_mean_current_candidates()\n num_samples_active = len(all_means_active)\n constraint_model = model[self.constraint_metric]\n assert isinstance(constraint_model, BaseSurrogateModel)\n all_means_constraint = constraint_model.predict_mean_current_candidates()\n common_shape = all_means_active[0].shape\n assert all(x.shape == common_shape for x in all_means_constraint), \\\n \"Shape mismatch between models for predict_mean_current_candidates\"\n current_best_list = []\n for means_constraint, means_active in itertools.product(\n all_means_constraint, all_means_active):\n # Remove all infeasible candidates (i.e., where means_constraint\n # is >= 0)\n means_active[means_constraint >= 0] = np.nan\n # Compute the current *feasible* best (separately for every fantasy)\n min_across_observations = np.nanmin(means_active, axis=0)\n current_best_list.append(min_across_observations)\n return ConstraintCurrentBestProvider(\n current_best_list, num_samples_active)\n",
"# Copyright 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\").\n# You may not use this file except in compliance with the License.\n# A copy of the License is located at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# or in the \"license\" file accompanying this file. This file is distributed\n# on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either\n# express or implied. See the License for the specific language governing\n# permissions and limitations under the License.\nfrom typing import Optional, List, Dict\nimport numpy as np\nimport scipy.linalg as spl\nimport copy\n\nfrom syne_tune.config_space import uniform\nfrom syne_tune.optimizer.schedulers.searchers.bayesopt.datatypes.common \\\n import TrialEvaluations, Configuration, dictionarize_objective, \\\n INTERNAL_METRIC_NAME\nfrom syne_tune.optimizer.schedulers.searchers.bayesopt.datatypes.hp_ranges_factory \\\n import make_hyperparameter_ranges\nfrom syne_tune.optimizer.schedulers.searchers.bayesopt.datatypes.tuning_job_state \\\n import TuningJobState\nfrom syne_tune.optimizer.schedulers.searchers.bayesopt.models.gp_model \\\n import get_internal_candidate_evaluations\n\n\nclass ThreeHumpCamel(object):\n @property\n def search_space(self):\n return [{'min': -5.0, 'max': 5.0},\n {'min': -5.0, 'max': 5.0}]\n\n def evaluate(self, x1, x2):\n return 2 * x1 ** 2 - 1.05 * x1 ** 4 + x1 ** 6 / 6 + x1 * x2 + x2 ** 2\n\n\ndef branin_function(x1, x2, r=6):\n return (x2 - (5.1 / (4 * np.pi ** 2)) * x1 ** 2 + (5 / np.pi) * x1 - r) ** 2 + \\\n 10 * (1 - 1 / (8 * np.pi)) * np.cos(x1) + 10\n\n\nclass Branin(object):\n @property\n def search_space(self):\n return [{'min': -5.0, 'max': 10.0},\n {'min': 0.0, 'max': 15.0}]\n\n def evaluate(self, x1, x2):\n return branin_function(x1, x2)\n\n\nclass BraninWithR(Branin):\n def __init__(self, r):\n self.r = r\n\n def evaluate(self, x1, x2):\n return branin_function(x1, x2, r=self.r)\n\n\nclass Ackley(object):\n @property\n def search_space(self):\n const = 32.768\n return [{'min': -const, 'max': const},\n {'min': -const, 'max': const}]\n\n def evaluate(self, x1, x2):\n a = 20\n b = 0.2\n c = 2 * np.pi\n ssq = (x1 ** 2) + (x2 ** 2)\n scos = np.cos(c * x1) + np.cos(c * x2)\n return -a * np.exp(-b * np.sqrt(0.5 * ssq)) - np.exp(0.5 * scos) + \\\n (a + np.exp(1))\n\n\nclass SimpleQuadratic(object):\n @property\n def search_space(self):\n return [{'min': 0.0, 'max': 1.0},\n {'min': 0.0, 'max': 1.0}]\n\n def evaluate(self, x1, x2):\n return 2 * (x1 - 0.5)**2 + (x2 - 0.5)**2\n\n\ndef _decode_input(x, lim):\n mn, mx = lim['min'], lim['max']\n return x * (mx - mn) + mn\n\n\ndef evaluate_blackbox(bb_func, inputs: np.ndarray) -> np.ndarray:\n num_dims = inputs.shape[1]\n input_list = []\n for x, lim in zip(np.split(inputs, num_dims, axis=1), bb_func.search_space):\n input_list.append(_decode_input(x, lim))\n return bb_func.evaluate(*input_list)\n\n\n# NOTE: Inputs will always be in [0, 1] (so come in encoded form). They are\n# only scaled to their native ranges (linearly) when evaluations of the\n# blackbox are done. This avoids silly errors.\ndef sample_data(\n bb_cls, num_train: int, num_grid: int,\n expand_datadct: bool = True) -> dict:\n bb_func = bb_cls()\n ss_limits = bb_func.search_space\n num_dims = len(ss_limits)\n # Sample training inputs\n train_inputs = np.random.uniform(\n low=0.0, high=1.0, size=(num_train, num_dims))\n # Training targets (currently, no noise is added)\n train_targets = evaluate_blackbox(bb_func, train_inputs).reshape((-1,))\n # Inputs for prediction (regular grid)\n grids = [np.linspace(0.0, 1.0, num_grid)] * num_dims\n grids2 = tuple(np.meshgrid(*grids))\n test_inputs = np.hstack([x.reshape(-1, 1) for x in grids2])\n # Also evaluate true function on grid\n true_targets = evaluate_blackbox(bb_func, test_inputs).reshape((-1,))\n data = {\n 'ss_limits': ss_limits,\n 'train_inputs': train_inputs,\n 'train_targets': train_targets,\n 'test_inputs': test_inputs,\n 'grid_shape': grids2[0].shape,\n 'true_targets': true_targets}\n if expand_datadct:\n # Make sure that ours and GPy below receive exactly the same inputs\n data = expand_data(data)\n return data\n\n\ndef expand_data(data: dict) -> dict:\n \"\"\"\n Appends derived entries to data dict, which have non-elementary types.\n \"\"\"\n if 'state' not in data:\n data = copy.copy(data)\n state = data_to_state(data)\n data_internal = get_internal_candidate_evaluations(\n state, active_metric=INTERNAL_METRIC_NAME, normalize_targets=True,\n num_fantasy_samples=20)\n data['state'] = state\n data['train_inputs'] = data_internal.features\n data['train_targets_normalized'] = data_internal.targets\n return data\n\n\n# Recall that inputs in data are encoded, so we have to decode them to their\n# native ranges for `trials_evaluations`\ndef data_to_state(data: dict) -> TuningJobState:\n configs, cs = decode_inputs(data['train_inputs'], data['ss_limits'])\n config_for_trial = {\n str(trial_id): config for trial_id, config in enumerate(configs)}\n trials_evaluations = [\n TrialEvaluations(trial_id=str(trial_id),\n metrics=dictionarize_objective(y))\n for trial_id, y in enumerate(data['train_targets'])]\n return TuningJobState(\n hp_ranges=make_hyperparameter_ranges(cs),\n config_for_trial=config_for_trial,\n trials_evaluations=trials_evaluations)\n\n\ndef decode_inputs(inputs: np.ndarray, ss_limits) -> \\\n (List[Configuration], Dict):\n cs_names = [f\"x{i}\" for i in range(len(ss_limits))]\n cs = {\n name: uniform(lower=lims['min'], upper=lims['max'])\n for name, lims in zip(cs_names, ss_limits)}\n x_mult = []\n x_add = []\n for lim in ss_limits:\n mn, mx = lim['min'], lim['max']\n x_mult.append(mx - mn)\n x_add.append(mn)\n x_mult = np.array(x_mult)\n x_add = np.array(x_add)\n configs = []\n for x in inputs:\n x_decoded = x * x_mult + x_add\n config_dct = dict(zip(cs_names, x_decoded))\n configs.append(config_dct)\n return configs, cs\n\n\ndef assert_equal_candidates(candidates1, candidates2, hp_ranges, decimal=5):\n inputs1 = hp_ranges.to_ndarray_matrix(candidates1)\n inputs2 = hp_ranges.to_ndarray_matrix(candidates2)\n np.testing.assert_almost_equal(inputs1, inputs2, decimal=decimal)\n\n\ndef assert_equal_randomstate(randomstate1, randomstate2):\n assert str(randomstate1.get_state()) == str(randomstate2.get_state())\n\n\ndef compare_gpy_predict_posterior_marginals(\n test_intermediates: dict, noise_variance_gpy: Optional[float] = None):\n \"\"\"\n Compares all intermediates of cholesky_computations and\n predict_posterior_marginals to using GPy and NumPy.\n\n Currently, this is restricted:\n - Kernel must be Matern52 with ARD\n - Mean function must be constant 0\n\n :param test_intermediates: Intermediates computed using our code\n :param noise_variance_gpy: Overrides noise_variance in test_intermediates.\n Use this if jitter was added during the posterior state computation.\n\n \"\"\"\n import GPy\n # Create GPy kernel and model\n num_data = test_intermediates['features'].shape[0]\n num_dims = test_intermediates['features'].shape[1]\n lengthscales = [\n 1.0 / test_intermediates['inv_bw{}'.format(i)]\n for i in range(num_dims)]\n kernel = GPy.kern.Matern52(\n num_dims,\n variance=test_intermediates['covariance_scale'],\n lengthscale=lengthscales,\n ARD=True)\n if noise_variance_gpy is None:\n noise_variance_gpy = test_intermediates['noise_variance']\n model = GPy.models.GPRegression(\n test_intermediates['features'],\n test_intermediates['targets'].reshape((-1, 1)),\n kernel=kernel, noise_var=noise_variance_gpy)\n # Compare intermediates step by step (cholesky_computations)\n kernel_mat_gpy = kernel.K(test_intermediates['features'], X2=None)\n np.testing.assert_almost_equal(\n test_intermediates['kernel_mat'], kernel_mat_gpy, decimal=5)\n sys_mat_gpy = kernel_mat_gpy + np.diag(np.ones(num_data)) * \\\n noise_variance_gpy\n np.testing.assert_almost_equal(\n test_intermediates['sys_mat'], sys_mat_gpy, decimal=5)\n chol_fact_gpy = spl.cholesky(sys_mat_gpy, lower=True)\n # Use test_intermediates['sys_mat'] instead:\n #chol_fact_gpy = spl.cholesky(test_intermediates['sys_mat'], lower=True)\n np.testing.assert_almost_equal(\n test_intermediates['chol_fact'], chol_fact_gpy, decimal=4)\n # Mean function must be constant 0\n centered_y = test_intermediates['targets'].reshape((-1, 1))\n np.testing.assert_almost_equal(\n test_intermediates['centered_y'], centered_y, decimal=9)\n pred_mat_gpy = spl.solve_triangular(chol_fact_gpy, centered_y, lower=True)\n np.testing.assert_almost_equal(\n test_intermediates['pred_mat'], pred_mat_gpy, decimal=3)\n # Compare intermediates step by step (predict_posterior_marginals)\n k_tr_te_gpy = kernel.K(test_intermediates['features'],\n X2=test_intermediates['test_features'])\n np.testing.assert_almost_equal(\n test_intermediates['k_tr_te'], k_tr_te_gpy, decimal=5)\n linv_k_tr_te_gpy = spl.solve_triangular(chol_fact_gpy, k_tr_te_gpy, lower=True)\n np.testing.assert_almost_equal(\n test_intermediates['linv_k_tr_te'], linv_k_tr_te_gpy, decimal=4)\n pred_means_gpy = np.dot(linv_k_tr_te_gpy.T, pred_mat_gpy)\n np.testing.assert_almost_equal(\n test_intermediates['pred_means'], pred_means_gpy, decimal=4)\n k_tr_diag_gpy = kernel.Kdiag(\n test_intermediates['test_features']).reshape((-1,))\n tvec_gpy = np.sum(np.square(linv_k_tr_te_gpy), axis=0).reshape((-1,))\n pred_vars_gpy = k_tr_diag_gpy - tvec_gpy\n np.testing.assert_almost_equal(\n test_intermediates['pred_vars'], pred_vars_gpy, decimal=4)\n # Also test against GPy predict\n pred_means_gpy2, pred_vars_gpy2 = model.predict(\n test_intermediates['test_features'], include_likelihood=False)\n pred_vars_gpy2 = pred_vars_gpy2.reshape((-1,))\n np.testing.assert_almost_equal(pred_means_gpy, pred_means_gpy2, decimal=3)\n np.testing.assert_almost_equal(pred_vars_gpy, pred_vars_gpy2, decimal=3)\n"
] | [
[
"numpy.array",
"numpy.log10",
"numpy.sum"
],
[
"numpy.maximum",
"scipy.stats.norm.cdf",
"scipy.stats.norm.pdf",
"numpy.power",
"numpy.ones_like",
"numpy.isnan",
"numpy.nanmin",
"numpy.mean",
"numpy.zeros_like",
"numpy.any",
"numpy.where"
],
[
"numpy.square",
"numpy.dot",
"numpy.split",
"numpy.sqrt",
"numpy.meshgrid",
"numpy.linspace",
"numpy.cos",
"numpy.ones",
"numpy.testing.assert_almost_equal",
"scipy.linalg.cholesky",
"numpy.exp",
"numpy.random.uniform",
"numpy.array",
"scipy.linalg.solve_triangular"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"0.12",
"0.14",
"0.15"
],
"tensorflow": []
}
] |
uhrwecker/GRDonuts | [
"3087aeb5c169251bdb711b425dcc3040ff962da7",
"3087aeb5c169251bdb711b425dcc3040ff962da7"
] | [
"util/utility.py",
"vis/simple.py"
] | [
"import numpy as np\n\nclass UtilInverse():\n def __init__(self, verbose=True):\n self.verbose = verbose\n\n def find_nearest_ind(self, array, value):\n index = []\n for ind in range(len(array)-1):\n if array[ind] < value and array[ind+1] > value:\n index.append(ind)\n if array[ind] > value and array[ind+1] < value:\n index.append(ind)\n return index\n\n def sort_array_by_column(self, array, order=['f0']):\n bits = 'i8'+',i8'*(len(array[0])-1)\n array.view(bits).sort(order=order, axis=0)\n return array\n\n \n\nclass UtilStability():\n def __init__(self, verbose=True):\n self.verbose = verbose\n\n def retrieve_extrema(self, w, r):\n self.check_for_stable_point(w, self.verbose)\n \n min_mask = np.r_[True, w[1:] < w[:-1]] & np.r_[w[:-1] < w[1:], True]\n max_mask = np.r_[True, w[1:] > w[:-1]] & np.r_[w[:-1] > w[1:], True]\n\n w_min = w[min_mask]\n r_min = r[min_mask]\n w_max = w[max_mask]\n r_max = r[max_mask]\n\n try:\n\n if w_min[0] == w[0]:\n w_min = np.delete(w_min, 0)\n r_min = np.delete(r_min, 0)\n\n if w_max[-1] == w[-1]:\n w_max = np.delete(w_max, -1)\n r_max = np.delete(r_max, -1)\n\n if self.verbose:\n print('Simple extremum analysis: ')\n print('- W has maximum/a at w='+str(w_max.tolist()))\n print('- W has minimum/a at w='+str(w_min.tolist()))\n\n return w_min.tolist(), w_max.tolist(), r_min.tolist(), r_max.tolist()\n except:\n return [0], [0], [0], [0]\n\n def check_for_stable_point(self, w, exit_if_not_stable=False):\n '''\n Checks if array has at least one minimum and\n its maximum is only local\n '''\n min_mask = np.r_[True, w[1:] < w[:-1]] & np.r_[w[:-1] < w[1:], True]\n max_mask = np.r_[True, w[1:] > w[:-1]] & np.r_[w[:-1] > w[1:], True]\n\n\n w_min = w[min_mask]\n w_max = w[max_mask]\n\n## if w_max[0] == w[0] or w_max[0] == w[1]:\n## '''\n## The potentianl comes from +inf, so its not a stable point.\n## '''\n## raise ValueError()\n\n if len(w_min) < 2 and len(w_max) < 2:\n '''\n The function is monotonically. There is no stable point.\n '''\n self._error_monotonically(exit_if_not_stable)\n\n elif len(w_min) < 1 or len(w_max) < 1:\n '''\n The function has either a local maximum OR local minimum, but not\n both, thus is not stable\n '''\n self._error_only_one_extremum(exit_if_not_stable)\n\n elif w_max[0] > w_max[1]:\n '''\n The potential is not closed, there is no Roche limit.\n Matter will extend into infitiy.\n '''\n self._error_no_roche_limit(exit_if_not_stable)\n\n elif self.verbose and len(w_min) > 1 and len(w_max) > 1:\n print('Potential is possibly stable')\n\n return 0\n\n def closure_rating_function(self, w, r):\n wmin, wmax, rmin, rmax = self.retrieve_extrema(w, r)\n\n int_l = np.where(r == rmax[0])[0][0]\n int_r = np.where(w > wmax[0])[0][0]\n\n area_func = abs(w[int_l:int_r] - wmax[-1])\n\n area = np.trapz(area_func)\n\n return area\n \n\n def _error_monotonically(self, flag):\n if flag:\n raise ValueError('Potential not closed, potential is monotonically.')\n else:\n if self.verbose:\n print('WARNING: Potential not closed, potential is monotonically.')\n \n def _error_only_one_extremum(self, flag):\n if flag:\n raise ValueError('Potential not closed, only has one extremum.')\n else:\n if self.verbose:\n print('WARNING: Potential not closed, only has one extremum.')\n\n\n def _error_no_roche_limit(self, flag):\n if flag:\n raise ValueError('Potential is not closed, matter extends into infinity.')\n else:\n if self.verbose:\n print('WARNING: Potential not close, no Roche limit.')\n",
"import numpy as np\nimport matplotlib.pyplot as pl\n\nclass Plotter():\n def __init__(self, figsize=(10, 8), save=None):\n self.figure = pl.figure(figsize=figsize)\n self.save = save\n\n def plot(self, potential, *args, label='', xmargin=0, ymargin=0.003):\n raise NotImplementedError()\n\n def _adjust_plot(self, r, xm, ym, wmin, wmax, ax):\n ax.set_xlim(r[0]-xm, r[-1]+xm)\n ax.set_ylim(wmin-ym, wmax+ym)\n ax.set_xlabel('R/M')\n ax.set_ylabel('W')\n ax.legend()\n\nclass SimplePlotter(Plotter):\n def __init__(self, figsize=(10, 8), save=None):\n super().__init__(figsize=figsize, save=save)\n\n def plot(self, potential, label='', xmargin=0, ymargin=0.003):\n w = potential.get_w()\n r = potential.get_r()\n wmin, wmax, _, _ = potential.util.retrieve_extrema(w, r)\n pl.plot(r, w, label=label)\n self._adjust_plot(r, xmargin, ymargin, wmin[0], wmax[0])\n if self.save:\n pl.savefig(self.save)\n else:\n pl.show()\n\n\nclass ScharPlotter(Plotter):\n def __init__(self, figsize=(10, 8), save=None, show=True,\n max_show=False):\n super().__init__(figsize=figsize, save=save)\n\n def plot(self, pot_list, label_list, xmargin=0, ymargin=0.003):\n yminmin = 1000\n ymaxmax = -1000\n for item, label in zip(pot_list, label_list):\n w = item.get_w()\n r = item.get_r()\n wmin, wmax, _, _ = item.util.retrieve_extrema(w, r)\n pl.plot(r, w, label=label)\n if wmin[0] < yminmin:\n yminmin = wmin[0]\n if wmax[0] > ymaxmax:\n ymaxmax = wmax[0]\n self._adjust_plot(r, xmargin, ymargin, wmin[0], wmax[0])\n if self.save:\n pl.savefig(self.save)\n else:\n pl.show()\n \n\nclass BeautyPlotter(Plotter):\n def __init__(self, figsize=(10, 8), save=None, show=True):\n super().__init__(figsize=figsize, save=save)\n\n def plot(self, potential, label='', xmargin=0, ymargin=0.003,\n show=True, fill=True, max_show=True, ax=None):\n if not ax:\n ax = pl.gca()\n r = potential.get_r()\n w = potential.get_w()\n wmin, wmax, rmin, rmax = potential.util.retrieve_extrema(w, r)\n try:\n int_l = np.where(r == rmax[0])[0][0]\n except:\n int_l = np.where(r == rmax[0])[0]\n try:\n int_r = np.where(w > wmax[0])[0][0]\n except:\n raise ValueError('Increase r_range.')\n \n if max_show:\n ax.axhline(wmax, alpha=0.5, c='red', ls='--')\n if fill:\n if int_l < int_r:\n ax.fill_between(r[int_l:int_r], [wmax[0] for n in r[int_l:int_r]],\n w[int_l:int_r], color='green', alpha=0.4)\n else:\n ax.fill_between(r[int_r:int_l], [wmax[0] for n in r[int_r:int_l]],\n w[int_r:int_l], color='green', alpha=0.4)\n ax.plot(r, w, label=label)\n self._adjust_plot(r, xmargin, ymargin, wmin[0], wmax[0], ax)\n\n if self.save:\n pl.savefig(self.save)\n if show:\n return ax\n\n \n\n \n"
] | [
[
"numpy.delete",
"numpy.where",
"numpy.trapz"
],
[
"matplotlib.pyplot.gca",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.show",
"numpy.where",
"matplotlib.pyplot.figure"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
blokhinnv/dgl | [
"bcf92f6c21afd4ad48a86d2ee543386099190791"
] | [
"python/dgl/distributed/dist_graph.py"
] | [
"\"\"\"Define distributed graph.\"\"\"\n\nfrom collections.abc import MutableMapping\nfrom collections import namedtuple\n\nimport os\nimport numpy as np\n\nfrom ..heterograph import DGLHeteroGraph\nfrom ..convert import heterograph as dgl_heterograph\nfrom ..convert import graph as dgl_graph\nfrom ..transform import compact_graphs\nfrom .. import heterograph_index\nfrom .. import backend as F\nfrom ..base import NID, EID, NTYPE, ETYPE, ALL, is_all\nfrom .kvstore import KVServer, get_kvstore\nfrom .._ffi.ndarray import empty_shared_mem\nfrom ..frame import infer_scheme\nfrom .partition import load_partition, load_partition_book\nfrom .graph_partition_book import PartitionPolicy, get_shared_mem_partition_book\nfrom .graph_partition_book import HeteroDataName, parse_hetero_data_name\nfrom .graph_partition_book import NodePartitionPolicy, EdgePartitionPolicy\nfrom .shared_mem_utils import _to_shared_mem, _get_ndata_path, _get_edata_path, DTYPE_DICT\nfrom . import rpc\nfrom . import role\nfrom .server_state import ServerState\nfrom .rpc_server import start_server\nfrom .graph_services import find_edges as dist_find_edges\nfrom .graph_services import out_degrees as dist_out_degrees\nfrom .graph_services import in_degrees as dist_in_degrees\nfrom .dist_tensor import DistTensor\n\nINIT_GRAPH = 800001\n\nclass InitGraphRequest(rpc.Request):\n \"\"\" Init graph on the backup servers.\n\n When the backup server starts, they don't load the graph structure.\n This request tells the backup servers that they can map to the graph structure\n with shared memory.\n \"\"\"\n def __init__(self, graph_name):\n self._graph_name = graph_name\n\n def __getstate__(self):\n return self._graph_name\n\n def __setstate__(self, state):\n self._graph_name = state\n\n def process_request(self, server_state):\n if server_state.graph is None:\n server_state.graph = _get_graph_from_shared_mem(self._graph_name)\n return InitGraphResponse(self._graph_name)\n\nclass InitGraphResponse(rpc.Response):\n \"\"\" Ack the init graph request\n \"\"\"\n def __init__(self, graph_name):\n self._graph_name = graph_name\n\n def __getstate__(self):\n return self._graph_name\n\n def __setstate__(self, state):\n self._graph_name = state\n\ndef _copy_graph_to_shared_mem(g, graph_name, graph_format):\n new_g = g.shared_memory(graph_name, formats=graph_format)\n # We should share the node/edge data to the client explicitly instead of putting them\n # in the KVStore because some of the node/edge data may be duplicated.\n new_g.ndata['inner_node'] = _to_shared_mem(g.ndata['inner_node'],\n _get_ndata_path(graph_name, 'inner_node'))\n new_g.ndata[NID] = _to_shared_mem(g.ndata[NID], _get_ndata_path(graph_name, NID))\n\n new_g.edata['inner_edge'] = _to_shared_mem(g.edata['inner_edge'],\n _get_edata_path(graph_name, 'inner_edge'))\n new_g.edata[EID] = _to_shared_mem(g.edata[EID], _get_edata_path(graph_name, EID))\n new_g.edata[ETYPE] = _to_shared_mem(g.edata[ETYPE], _get_edata_path(graph_name, ETYPE))\n return new_g\n\nFIELD_DICT = {'inner_node': F.int32, # A flag indicates whether the node is inside a partition.\n 'inner_edge': F.int32, # A flag indicates whether the edge is inside a partition.\n NID: F.int64,\n EID: F.int64,\n NTYPE: F.int32,\n ETYPE: F.int32}\n\ndef _get_shared_mem_ndata(g, graph_name, name):\n ''' Get shared-memory node data from DistGraph server.\n\n This is called by the DistGraph client to access the node data in the DistGraph server\n with shared memory.\n '''\n shape = (g.number_of_nodes(),)\n dtype = FIELD_DICT[name]\n dtype = DTYPE_DICT[dtype]\n data = empty_shared_mem(_get_ndata_path(graph_name, name), False, shape, dtype)\n dlpack = data.to_dlpack()\n return F.zerocopy_from_dlpack(dlpack)\n\ndef _get_shared_mem_edata(g, graph_name, name):\n ''' Get shared-memory edge data from DistGraph server.\n\n This is called by the DistGraph client to access the edge data in the DistGraph server\n with shared memory.\n '''\n shape = (g.number_of_edges(),)\n dtype = FIELD_DICT[name]\n dtype = DTYPE_DICT[dtype]\n data = empty_shared_mem(_get_edata_path(graph_name, name), False, shape, dtype)\n dlpack = data.to_dlpack()\n return F.zerocopy_from_dlpack(dlpack)\n\ndef _get_graph_from_shared_mem(graph_name):\n ''' Get the graph from the DistGraph server.\n\n The DistGraph server puts the graph structure of the local partition in the shared memory.\n The client can access the graph structure and some metadata on nodes and edges directly\n through shared memory to reduce the overhead of data access.\n '''\n g, ntypes, etypes = heterograph_index.create_heterograph_from_shared_memory(graph_name)\n if g is None:\n return None\n g = DGLHeteroGraph(g, ntypes, etypes)\n\n g.ndata['inner_node'] = _get_shared_mem_ndata(g, graph_name, 'inner_node')\n g.ndata[NID] = _get_shared_mem_ndata(g, graph_name, NID)\n\n g.edata['inner_edge'] = _get_shared_mem_edata(g, graph_name, 'inner_edge')\n g.edata[EID] = _get_shared_mem_edata(g, graph_name, EID)\n g.edata[ETYPE] = _get_shared_mem_edata(g, graph_name, ETYPE)\n return g\n\nNodeSpace = namedtuple('NodeSpace', ['data'])\nEdgeSpace = namedtuple('EdgeSpace', ['data'])\n\nclass HeteroNodeView(object):\n \"\"\"A NodeView class to act as G.nodes for a DistGraph.\"\"\"\n __slots__ = ['_graph']\n\n def __init__(self, graph):\n self._graph = graph\n\n def __getitem__(self, key):\n assert isinstance(key, str)\n return NodeSpace(data=NodeDataView(self._graph, key))\n\nclass HeteroEdgeView(object):\n \"\"\"A NodeView class to act as G.nodes for a DistGraph.\"\"\"\n __slots__ = ['_graph']\n\n def __init__(self, graph):\n self._graph = graph\n\n def __getitem__(self, key):\n assert isinstance(key, str)\n return EdgeSpace(data=EdgeDataView(self._graph, key))\n\nclass NodeDataView(MutableMapping):\n \"\"\"The data view class when dist_graph.ndata[...].data is called.\n \"\"\"\n __slots__ = ['_graph', '_data']\n\n def __init__(self, g, ntype=None):\n self._graph = g\n # When this is created, the server may already load node data. We need to\n # initialize the node data in advance.\n names = g._get_ndata_names(ntype)\n if ntype is None:\n self._data = g._ndata_store\n else:\n if ntype in g._ndata_store:\n self._data = g._ndata_store[ntype]\n else:\n self._data = {}\n g._ndata_store[ntype] = self._data\n for name in names:\n assert name.is_node()\n policy = PartitionPolicy(name.policy_str, g.get_partition_book())\n dtype, shape, _ = g._client.get_data_meta(str(name))\n # We create a wrapper on the existing tensor in the kvstore.\n self._data[name.get_name()] = DistTensor(shape, dtype, name.get_name(),\n part_policy=policy)\n\n def _get_names(self):\n return list(self._data.keys())\n\n def __getitem__(self, key):\n return self._data[key]\n\n def __setitem__(self, key, val):\n self._data[key] = val\n\n def __delitem__(self, key):\n del self._data[key]\n\n def __len__(self):\n # The number of node data may change. Let's count it every time we need them.\n # It's not called frequently. It should be fine.\n return len(self._data)\n\n def __iter__(self):\n return iter(self._data)\n\n def __repr__(self):\n reprs = {}\n for name in self._data:\n dtype = F.dtype(self._data[name])\n shape = F.shape(self._data[name])\n reprs[name] = 'DistTensor(shape={}, dtype={})'.format(str(shape), str(dtype))\n return repr(reprs)\n\nclass EdgeDataView(MutableMapping):\n \"\"\"The data view class when G.edges[...].data is called.\n \"\"\"\n __slots__ = ['_graph', '_data']\n\n def __init__(self, g, etype=None):\n self._graph = g\n # When this is created, the server may already load edge data. We need to\n # initialize the edge data in advance.\n names = g._get_edata_names(etype)\n if etype is None:\n self._data = g._edata_store\n else:\n if etype in g._edata_store:\n self._data = g._edata_store[etype]\n else:\n self._data = {}\n g._edata_store[etype] = self._data\n for name in names:\n assert name.is_edge()\n policy = PartitionPolicy(name.policy_str, g.get_partition_book())\n dtype, shape, _ = g._client.get_data_meta(str(name))\n # We create a wrapper on the existing tensor in the kvstore.\n self._data[name.get_name()] = DistTensor(shape, dtype, name.get_name(),\n part_policy=policy)\n\n def _get_names(self):\n return list(self._data.keys())\n\n def __getitem__(self, key):\n return self._data[key]\n\n def __setitem__(self, key, val):\n self._data[key] = val\n\n def __delitem__(self, key):\n del self._data[key]\n\n def __len__(self):\n # The number of edge data may change. Let's count it every time we need them.\n # It's not called frequently. It should be fine.\n return len(self._data)\n\n def __iter__(self):\n return iter(self._data)\n\n def __repr__(self):\n reprs = {}\n for name in self._data:\n dtype = F.dtype(self._data[name])\n shape = F.shape(self._data[name])\n reprs[name] = 'DistTensor(shape={}, dtype={})'.format(str(shape), str(dtype))\n return repr(reprs)\n\n\nclass DistGraphServer(KVServer):\n ''' The DistGraph server.\n\n This DistGraph server loads the graph data and sets up a service so that trainers and\n samplers can read data of a graph partition (graph structure, node data and edge data)\n from remote machines. A server is responsible for one graph partition.\n\n Currently, each machine runs only one main server with a set of backup servers to handle\n clients' requests. The main server and the backup servers all handle the requests for the same\n graph partition. They all share the partition data (graph structure and node/edge data) with\n shared memory.\n\n By default, the partition data is shared with the DistGraph clients that run on\n the same machine. However, a user can disable shared memory option. This is useful for the case\n that a user wants to run the server and the client on different machines.\n\n Parameters\n ----------\n server_id : int\n The server ID (start from 0).\n ip_config : str\n Path of IP configuration file.\n num_servers : int\n Server count on each machine.\n num_clients : int\n Total number of client nodes.\n part_config : string\n The path of the config file generated by the partition tool.\n disable_shared_mem : bool\n Disable shared memory.\n graph_format : str or list of str\n The graph formats.\n '''\n def __init__(self, server_id, ip_config, num_servers,\n num_clients, part_config, disable_shared_mem=False,\n graph_format=('csc', 'coo')):\n super(DistGraphServer, self).__init__(server_id=server_id,\n ip_config=ip_config,\n num_servers=num_servers,\n num_clients=num_clients)\n self.ip_config = ip_config\n self.num_servers = num_servers\n # Load graph partition data.\n if self.is_backup_server():\n # The backup server doesn't load the graph partition. It'll initialized afterwards.\n self.gpb, graph_name, ntypes, etypes = load_partition_book(part_config, self.part_id)\n self.client_g = None\n else:\n self.client_g, node_feats, edge_feats, self.gpb, graph_name, \\\n ntypes, etypes = load_partition(part_config, self.part_id)\n print('load ' + graph_name)\n # Create the graph formats specified the users.\n self.client_g = self.client_g.formats(graph_format)\n self.client_g.create_formats_()\n if not disable_shared_mem:\n self.client_g = _copy_graph_to_shared_mem(self.client_g, graph_name, graph_format)\n\n if not disable_shared_mem:\n self.gpb.shared_memory(graph_name)\n assert self.gpb.partid == self.part_id\n for ntype in ntypes:\n node_name = HeteroDataName(True, ntype, None)\n self.add_part_policy(PartitionPolicy(node_name.policy_str, self.gpb))\n for etype in etypes:\n edge_name = HeteroDataName(False, etype, None)\n self.add_part_policy(PartitionPolicy(edge_name.policy_str, self.gpb))\n\n if not self.is_backup_server():\n for name in node_feats:\n # The feature name has the following format: node_type + \"/\" + feature_name to avoid\n # feature name collision for different node types.\n ntype, feat_name = name.split('/')\n data_name = HeteroDataName(True, ntype, feat_name)\n self.init_data(name=str(data_name), policy_str=data_name.policy_str,\n data_tensor=node_feats[name])\n for name in edge_feats:\n # The feature name has the following format: edge_type + \"/\" + feature_name to avoid\n # feature name collision for different edge types.\n etype, feat_name = name.split('/')\n data_name = HeteroDataName(False, etype, feat_name)\n self.init_data(name=str(data_name), policy_str=data_name.policy_str,\n data_tensor=edge_feats[name])\n\n def start(self):\n \"\"\" Start graph store server.\n \"\"\"\n # start server\n server_state = ServerState(kv_store=self, local_g=self.client_g, partition_book=self.gpb)\n print('start graph service on server {} for part {}'.format(self.server_id, self.part_id))\n start_server(server_id=self.server_id,\n ip_config=self.ip_config,\n num_servers=self.num_servers,\n num_clients=self.num_clients, server_state=server_state)\n\nclass DistGraph:\n '''The class for accessing a distributed graph.\n\n This class provides a subset of DGLGraph APIs for accessing partitioned graph data in\n distributed GNN training and inference. Thus, its main use case is to work with\n distributed sampling APIs to generate mini-batches and perform forward and\n backward computation on the mini-batches.\n\n The class can run in two modes: the standalone mode and the distributed mode.\n\n * When a user runs the training script normally, ``DistGraph`` will be in the standalone mode.\n In this mode, the input data must be constructed by\n :py:meth:`~dgl.distributed.partition.partition_graph` with only one partition. This mode is\n used for testing and debugging purpose. In this mode, users have to provide ``part_config``\n so that ``DistGraph`` can load the input graph.\n * When a user runs the training script with the distributed launch script, ``DistGraph`` will\n be set into the distributed mode. This is used for actual distributed training. All data of\n partitions are loaded by the ``DistGraph`` servers, which are created by DGL's launch script.\n ``DistGraph`` connects with the servers to access the partitioned graph data.\n\n Currently, the ``DistGraph`` servers and clients run on the same set of machines\n in the distributed mode. ``DistGraph`` uses shared-memory to access the partition data\n in the local machine. This gives the best performance for distributed training\n\n Users may want to run ``DistGraph`` servers and clients on separate sets of machines.\n In this case, a user may want to disable shared memory by passing\n ``disable_shared_mem=False`` when creating ``DistGraphServer``. When shared memory is disabled,\n a user has to pass a partition book.\n\n Parameters\n ----------\n graph_name : str\n The name of the graph. This name has to be the same as the one used for\n partitioning a graph in :py:meth:`dgl.distributed.partition.partition_graph`.\n gpb : GraphPartitionBook, optional\n The partition book object. Normally, users do not need to provide the partition book.\n This argument is necessary only when users want to run server process and trainer\n processes on different machines.\n part_config : str, optional\n The path of partition configuration file generated by\n :py:meth:`dgl.distributed.partition.partition_graph`. It's used in the standalone mode.\n\n Examples\n --------\n The example shows the creation of ``DistGraph`` in the standalone mode.\n\n >>> dgl.distributed.partition_graph(g, 'graph_name', 1, num_hops=1, part_method='metis',\n ... out_path='output/', reshuffle=True)\n >>> g = dgl.distributed.DistGraph('graph_name', part_config='output/graph_name.json')\n\n The example shows the creation of ``DistGraph`` in the distributed mode.\n\n >>> g = dgl.distributed.DistGraph('graph-name')\n\n The code below shows the mini-batch training using ``DistGraph``.\n\n >>> def sample(seeds):\n ... seeds = th.LongTensor(np.asarray(seeds))\n ... frontier = dgl.distributed.sample_neighbors(g, seeds, 10)\n ... return dgl.to_block(frontier, seeds)\n >>> dataloader = dgl.distributed.DistDataLoader(dataset=nodes, batch_size=1000,\n ... collate_fn=sample, shuffle=True)\n >>> for block in dataloader:\n ... feat = g.ndata['features'][block.srcdata[dgl.NID]]\n ... labels = g.ndata['labels'][block.dstdata[dgl.NID]]\n ... pred = model(block, feat)\n\n Note\n ----\n DGL's distributed training by default runs server processes and trainer processes on the same\n set of machines. If users need to run them on different sets of machines, it requires\n manually setting up servers and trainers. The setup is not fully tested yet.\n '''\n def __init__(self, graph_name, gpb=None, part_config=None):\n self.graph_name = graph_name\n self._gpb_input = gpb\n if os.environ.get('DGL_DIST_MODE', 'standalone') == 'standalone':\n assert part_config is not None, \\\n 'When running in the standalone model, the partition config file is required'\n self._client = get_kvstore()\n assert self._client is not None, \\\n 'Distributed module is not initialized. Please call dgl.distributed.initialize.'\n # Load graph partition data.\n g, node_feats, edge_feats, self._gpb, _, _, _ = load_partition(part_config, 0)\n assert self._gpb.num_partitions() == 1, \\\n 'The standalone mode can only work with the graph data with one partition'\n if self._gpb is None:\n self._gpb = gpb\n self._g = g\n for name in node_feats:\n # The feature name has the following format: node_type + \"/\" + feature_name.\n ntype, feat_name = name.split('/')\n self._client.add_data(str(HeteroDataName(True, ntype, feat_name)),\n node_feats[name],\n NodePartitionPolicy(self._gpb, ntype=ntype))\n for name in edge_feats:\n # The feature name has the following format: edge_type + \"/\" + feature_name.\n etype, feat_name = name.split('/')\n self._client.add_data(str(HeteroDataName(False, etype, feat_name)),\n edge_feats[name],\n EdgePartitionPolicy(self._gpb, etype=etype))\n self._client.map_shared_data(self._gpb)\n rpc.set_num_client(1)\n else:\n self._init()\n # Tell the backup servers to load the graph structure from shared memory.\n for server_id in range(self._client.num_servers):\n rpc.send_request(server_id, InitGraphRequest(graph_name))\n for server_id in range(self._client.num_servers):\n rpc.recv_response()\n self._client.barrier()\n\n self._ndata_store = {}\n self._edata_store = {}\n self._ndata = NodeDataView(self)\n self._edata = EdgeDataView(self)\n\n self._num_nodes = 0\n self._num_edges = 0\n for part_md in self._gpb.metadata():\n self._num_nodes += int(part_md['num_nodes'])\n self._num_edges += int(part_md['num_edges'])\n\n # When we store node/edge types in a list, they are stored in the order of type IDs.\n self._ntype_map = {ntype:i for i, ntype in enumerate(self.ntypes)}\n self._etype_map = {etype:i for i, etype in enumerate(self.etypes)}\n\n # Get canonical edge types.\n # TODO(zhengda) this requires the server to store the graph with coo format.\n eid = []\n for etype in self.etypes:\n type_eid = F.zeros((1,), F.int64, F.cpu())\n eid.append(self._gpb.map_to_homo_eid(type_eid, etype))\n eid = F.cat(eid, 0)\n src, dst = dist_find_edges(self, eid)\n src_tids, _ = self._gpb.map_to_per_ntype(src)\n dst_tids, _ = self._gpb.map_to_per_ntype(dst)\n self._canonical_etypes = []\n etype_ids = F.arange(0, len(self.etypes))\n for src_tid, etype_id, dst_tid in zip(src_tids, etype_ids, dst_tids):\n src_tid = F.as_scalar(src_tid)\n etype_id = F.as_scalar(etype_id)\n dst_tid = F.as_scalar(dst_tid)\n self._canonical_etypes.append((self.ntypes[src_tid], self.etypes[etype_id],\n self.ntypes[dst_tid]))\n self._etype2canonical = {}\n for src_type, etype, dst_type in self._canonical_etypes:\n if etype in self._etype2canonical:\n self._etype2canonical[etype] = ()\n else:\n self._etype2canonical[etype] = (src_type, etype, dst_type)\n\n def _init(self):\n self._client = get_kvstore()\n assert self._client is not None, \\\n 'Distributed module is not initialized. Please call dgl.distributed.initialize.'\n self._g = _get_graph_from_shared_mem(self.graph_name)\n self._gpb = get_shared_mem_partition_book(self.graph_name, self._g)\n if self._gpb is None:\n self._gpb = self._gpb_input\n self._client.map_shared_data(self._gpb)\n\n def __getstate__(self):\n return self.graph_name, self._gpb, self._canonical_etypes\n\n def __setstate__(self, state):\n self.graph_name, self._gpb_input, self._canonical_etypes = state\n self._init()\n\n self._etype2canonical = {}\n for src_type, etype, dst_type in self._canonical_etypes:\n if etype in self._etype2canonical:\n self._etype2canonical[etype] = ()\n else:\n self._etype2canonical[etype] = (src_type, etype, dst_type)\n self._ndata_store = {}\n self._edata_store = {}\n self._ndata = NodeDataView(self)\n self._edata = EdgeDataView(self)\n self._num_nodes = 0\n self._num_edges = 0\n for part_md in self._gpb.metadata():\n self._num_nodes += int(part_md['num_nodes'])\n self._num_edges += int(part_md['num_edges'])\n\n @property\n def local_partition(self):\n ''' Return the local partition on the client\n\n DistGraph provides a global view of the distributed graph. Internally,\n it may contains a partition of the graph if it is co-located with\n the server. When servers and clients run on separate sets of machines,\n this returns None.\n\n Returns\n -------\n DGLGraph\n The local partition\n '''\n return self._g\n\n @property\n def nodes(self):\n '''Return a node view\n '''\n return HeteroNodeView(self)\n\n @property\n def edges(self):\n '''Return an edge view\n '''\n return HeteroEdgeView(self)\n\n @property\n def ndata(self):\n \"\"\"Return the data view of all the nodes.\n\n Returns\n -------\n NodeDataView\n The data view in the distributed graph storage.\n \"\"\"\n assert len(self.ntypes) == 1, \"ndata only works for a graph with one node type.\"\n return self._ndata\n\n @property\n def edata(self):\n \"\"\"Return the data view of all the edges.\n\n Returns\n -------\n EdgeDataView\n The data view in the distributed graph storage.\n \"\"\"\n assert len(self.etypes) == 1, \"edata only works for a graph with one edge type.\"\n return self._edata\n\n @property\n def idtype(self):\n \"\"\"The dtype of graph index\n\n Returns\n -------\n backend dtype object\n th.int32/th.int64 or tf.int32/tf.int64 etc.\n\n See Also\n --------\n long\n int\n \"\"\"\n # TODO(da?): describe when self._g is None and idtype shouldn't be called.\n return F.int64\n\n @property\n def device(self):\n \"\"\"Get the device context of this graph.\n\n Examples\n --------\n The following example uses PyTorch backend.\n\n >>> g = dgl.bipartite(([0, 1, 1, 2], [0, 0, 2, 1]), 'user', 'plays', 'game')\n >>> print(g.device)\n device(type='cpu')\n >>> g = g.to('cuda:0')\n >>> print(g.device)\n device(type='cuda', index=0)\n\n Returns\n -------\n Device context object\n \"\"\"\n # TODO(da?): describe when self._g is None and device shouldn't be called.\n return F.cpu()\n\n @property\n def ntypes(self):\n \"\"\"Return the list of node types of this graph.\n\n Returns\n -------\n list of str\n\n Examples\n --------\n\n >>> g = DistGraph(\"test\")\n >>> g.ntypes\n ['_U']\n \"\"\"\n return self._gpb.ntypes\n\n @property\n def etypes(self):\n \"\"\"Return the list of edge types of this graph.\n\n Returns\n -------\n list of str\n\n Examples\n --------\n\n >>> g = DistGraph(\"test\")\n >>> g.etypes\n ['_E']\n \"\"\"\n # Currently, we only support a graph with one edge type.\n return self._gpb.etypes\n\n @property\n def canonical_etypes(self):\n \"\"\"Return all the canonical edge types in the graph.\n\n A canonical edge type is a string triplet ``(str, str, str)``\n for source node type, edge type and destination node type.\n\n Returns\n -------\n list[(str, str, str)]\n All the canonical edge type triplets in a list.\n\n Notes\n -----\n DGL internally assigns an integer ID for each edge type. The returned\n edge type names are sorted according to their IDs.\n\n See Also\n --------\n etypes\n\n Examples\n --------\n The following example uses PyTorch backend.\n\n >>> import dgl\n >>> import torch\n\n >>> g = DistGraph(\"test\")\n >>> g.canonical_etypes\n [('user', 'follows', 'user'),\n ('user', 'follows', 'game'),\n ('user', 'plays', 'game')]\n \"\"\"\n return self._canonical_etypes\n\n def to_canonical_etype(self, etype):\n \"\"\"Convert an edge type to the corresponding canonical edge type in the graph.\n\n A canonical edge type is a string triplet ``(str, str, str)``\n for source node type, edge type and destination node type.\n\n The function expects the given edge type name can uniquely identify a canonical edge\n type. DGL will raise error if this is not the case.\n\n Parameters\n ----------\n etype : str or (str, str, str)\n If :attr:`etype` is an edge type (str), it returns the corresponding canonical edge\n type in the graph. If :attr:`etype` is already a canonical edge type,\n it directly returns the input unchanged.\n\n Returns\n -------\n (str, str, str)\n The canonical edge type corresponding to the edge type.\n\n Examples\n --------\n The following example uses PyTorch backend.\n\n >>> import dgl\n >>> import torch\n\n >>> g = DistGraph(\"test\")\n >>> g.canonical_etypes\n [('user', 'follows', 'user'),\n ('user', 'follows', 'game'),\n ('user', 'plays', 'game')]\n\n >>> g.to_canonical_etype('plays')\n ('user', 'plays', 'game')\n >>> g.to_canonical_etype(('user', 'plays', 'game'))\n ('user', 'plays', 'game')\n\n See Also\n --------\n canonical_etypes\n \"\"\"\n if etype is None:\n if len(self.etypes) != 1:\n raise DGLError('Edge type name must be specified if there are more than one '\n 'edge types.')\n etype = self.etypes[0]\n if isinstance(etype, tuple):\n return etype\n else:\n ret = self._etype2canonical.get(etype, None)\n if ret is None:\n raise DGLError('Edge type \"{}\" does not exist.'.format(etype))\n if len(ret) != 3:\n raise DGLError('Edge type \"{}\" is ambiguous. Please use canonical edge type '\n 'in the form of (srctype, etype, dsttype)'.format(etype))\n return ret\n\n def get_ntype_id(self, ntype):\n \"\"\"Return the ID of the given node type.\n\n ntype can also be None. If so, there should be only one node type in the\n graph.\n\n Parameters\n ----------\n ntype : str\n Node type\n\n Returns\n -------\n int\n \"\"\"\n if ntype is None:\n if len(self._ntype_map) != 1:\n raise DGLError('Node type name must be specified if there are more than one '\n 'node types.')\n return 0\n return self._ntype_map[ntype]\n\n def get_etype_id(self, etype):\n \"\"\"Return the id of the given edge type.\n\n etype can also be None. If so, there should be only one edge type in the\n graph.\n\n Parameters\n ----------\n etype : str or tuple of str\n Edge type\n\n Returns\n -------\n int\n \"\"\"\n if etype is None:\n if len(self._etype_map) != 1:\n raise DGLError('Edge type name must be specified if there are more than one '\n 'edge types.')\n return 0\n return self._etype_map[etype]\n\n def number_of_nodes(self, ntype=None):\n \"\"\"Alias of :func:`num_nodes`\"\"\"\n return self.num_nodes(ntype)\n\n def number_of_edges(self, etype=None):\n \"\"\"Alias of :func:`num_edges`\"\"\"\n return self.num_edges(etype)\n\n def num_nodes(self, ntype=None):\n \"\"\"Return the total number of nodes in the distributed graph.\n\n Parameters\n ----------\n ntype : str, optional\n The node type name. If given, it returns the number of nodes of the\n type. If not given (default), it returns the total number of nodes of all types.\n\n Returns\n -------\n int\n The number of nodes\n\n Examples\n --------\n >>> g = dgl.distributed.DistGraph('ogb-product')\n >>> print(g.num_nodes())\n 2449029\n \"\"\"\n if ntype is None:\n if len(self.ntypes) == 1:\n return self._gpb._num_nodes(self.ntypes[0])\n else:\n return sum([self._gpb._num_nodes(ntype) for ntype in self.ntypes])\n return self._gpb._num_nodes(ntype)\n\n def num_edges(self, etype=None):\n \"\"\"Return the total number of edges in the distributed graph.\n\n Parameters\n ----------\n etype : str or (str, str, str), optional\n The type name of the edges. The allowed type name formats are:\n\n * ``(str, str, str)`` for source node type, edge type and destination node type.\n * or one ``str`` edge type name if the name can uniquely identify a\n triplet format in the graph.\n\n If not provided, return the total number of edges regardless of the types\n in the graph.\n\n Returns\n -------\n int\n The number of edges\n\n Examples\n --------\n >>> g = dgl.distributed.DistGraph('ogb-product')\n >>> print(g.num_edges())\n 123718280\n \"\"\"\n if etype is None:\n if len(self.etypes) == 1:\n return self._gpb._num_edges(self.etypes[0])\n else:\n return sum([self._gpb._num_edges(etype) for etype in self.etypes])\n return self._gpb._num_edges(etype)\n\n def out_degrees(self, u=ALL):\n \"\"\"Return the out-degree(s) of the given nodes.\n\n It computes the out-degree(s).\n It does not support heterogeneous graphs yet.\n\n Parameters\n ----------\n u : node IDs\n The node IDs. The allowed formats are:\n\n * ``int``: A single node.\n * Int Tensor: Each element is a node ID. The tensor must have the same device type\n and ID data type as the graph's.\n * iterable[int]: Each element is a node ID.\n\n If not given, return the in-degrees of all the nodes.\n\n Returns\n -------\n int or Tensor\n The out-degree(s) of the node(s) in a Tensor. The i-th element is the out-degree\n of the i-th input node. If :attr:`v` is an ``int``, return an ``int`` too.\n\n Examples\n --------\n The following example uses PyTorch backend.\n\n >>> import dgl\n >>> import torch\n\n Query for all nodes.\n\n >>> g.out_degrees()\n tensor([2, 2, 0, 0])\n\n Query for nodes 1 and 2.\n\n >>> g.out_degrees(torch.tensor([1, 2]))\n tensor([2, 0])\n\n See Also\n --------\n in_degrees\n \"\"\"\n if is_all(u):\n u = F.arange(0, self.number_of_nodes())\n return dist_out_degrees(self, u)\n\n def in_degrees(self, v=ALL):\n \"\"\"Return the in-degree(s) of the given nodes.\n\n It computes the in-degree(s).\n It does not support heterogeneous graphs yet.\n\n Parameters\n ----------\n v : node IDs\n The node IDs. The allowed formats are:\n\n * ``int``: A single node.\n * Int Tensor: Each element is a node ID. The tensor must have the same device type\n and ID data type as the graph's.\n * iterable[int]: Each element is a node ID.\n\n If not given, return the in-degrees of all the nodes.\n\n Returns\n -------\n int or Tensor\n The in-degree(s) of the node(s) in a Tensor. The i-th element is the in-degree\n of the i-th input node. If :attr:`v` is an ``int``, return an ``int`` too.\n\n Examples\n --------\n The following example uses PyTorch backend.\n\n >>> import dgl\n >>> import torch\n\n Query for all nodes.\n\n >>> g.in_degrees()\n tensor([0, 2, 1, 1])\n\n Query for nodes 1 and 2.\n\n >>> g.in_degrees(torch.tensor([1, 2]))\n tensor([2, 1])\n\n See Also\n --------\n out_degrees\n \"\"\"\n if is_all(v):\n v = F.arange(0, self.number_of_nodes())\n return dist_in_degrees(self, v)\n\n def node_attr_schemes(self):\n \"\"\"Return the node feature schemes.\n\n Each feature scheme is a named tuple that stores the shape and data type\n of the node feature.\n\n Returns\n -------\n dict of str to schemes\n The schemes of node feature columns.\n\n Examples\n --------\n The following uses PyTorch backend.\n\n >>> g.node_attr_schemes()\n {'h': Scheme(shape=(4,), dtype=torch.float32)}\n\n See Also\n --------\n edge_attr_schemes\n \"\"\"\n schemes = {}\n for key in self.ndata:\n schemes[key] = infer_scheme(self.ndata[key])\n return schemes\n\n def edge_attr_schemes(self):\n \"\"\"Return the edge feature schemes.\n\n Each feature scheme is a named tuple that stores the shape and data type\n of the edge feature.\n\n Returns\n -------\n dict of str to schemes\n The schemes of edge feature columns.\n\n Examples\n --------\n The following uses PyTorch backend.\n\n >>> g.edge_attr_schemes()\n {'h': Scheme(shape=(4,), dtype=torch.float32)}\n\n See Also\n --------\n node_attr_schemes\n \"\"\"\n schemes = {}\n for key in self.edata:\n schemes[key] = infer_scheme(self.edata[key])\n return schemes\n\n def rank(self):\n ''' The rank of the current DistGraph.\n\n This returns a unique number to identify the DistGraph object among all of\n the client processes.\n\n Returns\n -------\n int\n The rank of the current DistGraph.\n '''\n return role.get_global_rank()\n\n def find_edges(self, edges, etype=None):\n \"\"\" Given an edge ID array, return the source\n and destination node ID array ``s`` and ``d``. ``s[i]`` and ``d[i]``\n are source and destination node ID for edge ``eid[i]``.\n\n Parameters\n ----------\n edges : Int Tensor\n Each element is an ID. The tensor must have the same device type\n and ID data type as the graph's.\n\n etype : str or (str, str, str), optional\n The type names of the edges. The allowed type name formats are:\n\n * ``(str, str, str)`` for source node type, edge type and destination node type.\n * or one ``str`` edge type name if the name can uniquely identify a\n triplet format in the graph.\n\n Can be omitted if the graph has only one type of edges.\n\n Returns\n -------\n tensor\n The source node ID array.\n tensor\n The destination node ID array.\n \"\"\"\n if etype is None:\n assert len(self.etypes) == 1, 'find_edges requires etype for heterogeneous graphs.'\n\n gpb = self.get_partition_book()\n if len(gpb.etypes) > 1:\n # if etype is a canonical edge type (str, str, str), extract the edge type\n if len(etype) == 3:\n etype = etype[1]\n edges = gpb.map_to_homo_eid(edges, etype)\n src, dst = dist_find_edges(self, edges)\n if len(gpb.ntypes) > 1:\n _, src = gpb.map_to_per_ntype(src)\n _, dst = gpb.map_to_per_ntype(dst)\n return src, dst\n\n def edge_subgraph(self, edges, relabel_nodes=True, store_ids=True):\n \"\"\"Return a subgraph induced on the given edges.\n\n An edge-induced subgraph is equivalent to creating a new graph using the given\n edges. In addition to extracting the subgraph, DGL also copies the features\n of the extracted nodes and edges to the resulting graph. The copy is *lazy*\n and incurs data movement only when needed.\n\n If the graph is heterogeneous, DGL extracts a subgraph per relation and composes\n them as the resulting graph. Thus, the resulting graph has the same set of relations\n as the input one.\n\n Parameters\n ----------\n edges : Int Tensor or dict[(str, str, str), Int Tensor]\n The edges to form the subgraph. Each element is an edge ID. The tensor must have\n the same device type and ID data type as the graph's.\n\n If the graph is homogeneous, one can directly pass an Int Tensor.\n Otherwise, the argument must be a dictionary with keys being edge types\n and values being the edge IDs in the above formats.\n relabel_nodes : bool, optional\n If True, it will remove the isolated nodes and relabel the incident nodes in the\n extracted subgraph.\n store_ids : bool, optional\n If True, it will store the raw IDs of the extracted edges in the ``edata`` of the\n resulting graph under name ``dgl.EID``; if ``relabel_nodes`` is ``True``, it will\n also store the raw IDs of the incident nodes in the ``ndata`` of the resulting\n graph under name ``dgl.NID``.\n\n Returns\n -------\n G : DGLGraph\n The subgraph.\n \"\"\"\n if isinstance(edges, dict):\n # TODO(zhengda) we need to directly generate subgraph of all relations with\n # one invocation.\n if isinstance(edges, tuple):\n subg = {etype: self.find_edges(edges[etype], etype[1]) for etype in edges}\n else:\n subg = {}\n for etype in edges:\n assert len(self._etype2canonical[etype]) == 3, \\\n 'the etype in input edges is ambiguous'\n subg[self._etype2canonical[etype]] = self.find_edges(edges[etype], etype)\n num_nodes = {ntype: self.number_of_nodes(ntype) for ntype in self.ntypes}\n subg = dgl_heterograph(subg, num_nodes_dict=num_nodes)\n for etype in edges:\n subg.edges[etype].data[EID] = edges[etype]\n else:\n assert len(self.etypes) == 1\n subg = self.find_edges(edges)\n subg = dgl_graph(subg, num_nodes=self.number_of_nodes())\n subg.edata[EID] = edges\n\n if relabel_nodes:\n subg = compact_graphs(subg)\n assert store_ids, 'edge_subgraph always stores original node/edge IDs.'\n return subg\n\n def get_partition_book(self):\n \"\"\"Get the partition information.\n\n Returns\n -------\n GraphPartitionBook\n Object that stores all graph partition information.\n \"\"\"\n return self._gpb\n\n def get_node_partition_policy(self, ntype):\n \"\"\"Get the partition policy for a node type.\n\n When creating a new distributed tensor, we need to provide a partition policy\n that indicates how to distribute data of the distributed tensor in a cluster\n of machines. When we load a distributed graph in the cluster, we have pre-defined\n partition policies for each node type and each edge type. By providing\n the node type, we can reference to the pre-defined partition policy for the node type.\n\n Parameters\n ----------\n ntype : str\n The node type\n\n Returns\n -------\n PartitionPolicy\n The partition policy for the node type.\n \"\"\"\n return NodePartitionPolicy(self.get_partition_book(), ntype)\n\n def get_edge_partition_policy(self, etype):\n \"\"\"Get the partition policy for an edge type.\n\n When creating a new distributed tensor, we need to provide a partition policy\n that indicates how to distribute data of the distributed tensor in a cluster\n of machines. When we load a distributed graph in the cluster, we have pre-defined\n partition policies for each node type and each edge type. By providing\n the edge type, we can reference to the pre-defined partition policy for the edge type.\n\n Parameters\n ----------\n etype : str\n The edge type\n\n Returns\n -------\n PartitionPolicy\n The partition policy for the edge type.\n \"\"\"\n return EdgePartitionPolicy(self.get_partition_book(), etype)\n\n def barrier(self):\n '''Barrier for all client nodes.\n\n This API blocks the current process untill all the clients invoke this API.\n Please use this API with caution.\n '''\n self._client.barrier()\n\n def _get_ndata_names(self, ntype=None):\n ''' Get the names of all node data.\n '''\n names = self._client.gdata_name_list()\n ndata_names = []\n for name in names:\n name = parse_hetero_data_name(name)\n right_type = (name.get_type() == ntype) if ntype is not None else True\n if name.is_node() and right_type:\n ndata_names.append(name)\n return ndata_names\n\n def _get_edata_names(self, etype=None):\n ''' Get the names of all edge data.\n '''\n names = self._client.gdata_name_list()\n edata_names = []\n for name in names:\n name = parse_hetero_data_name(name)\n right_type = (name.get_type() == etype) if etype is not None else True\n if name.is_edge() and right_type:\n edata_names.append(name)\n return edata_names\n\ndef _get_overlap(mask_arr, ids):\n \"\"\" Select the IDs given a boolean mask array.\n\n The boolean mask array indicates all of the IDs to be selected. We want to\n find the overlap between the IDs selected by the boolean mask array and\n the ID array.\n\n Parameters\n ----------\n mask_arr : 1D tensor\n A boolean mask array.\n ids : 1D tensor\n A vector with IDs.\n\n Returns\n -------\n 1D tensor\n The selected IDs.\n \"\"\"\n if isinstance(mask_arr, DistTensor):\n masks = mask_arr[ids]\n return F.boolean_mask(ids, masks)\n else:\n masks = F.gather_row(F.tensor(mask_arr), ids)\n return F.boolean_mask(ids, masks)\n\ndef _split_local(partition_book, rank, elements, local_eles):\n ''' Split the input element list with respect to data locality.\n '''\n num_clients = role.get_num_trainers()\n num_client_per_part = num_clients // partition_book.num_partitions()\n if rank is None:\n rank = role.get_trainer_rank()\n assert rank < num_clients, \\\n 'The input rank ({}) is incorrect. #Trainers: {}'.format(rank, num_clients)\n # all ranks of the clients in the same machine are in a contiguous range.\n client_id_in_part = rank % num_client_per_part\n local_eles = _get_overlap(elements, local_eles)\n\n # get a subset for the local client.\n size = len(local_eles) // num_client_per_part\n # if this isn't the last client in the partition.\n if client_id_in_part + 1 < num_client_per_part:\n return local_eles[(size * client_id_in_part):(size * (client_id_in_part + 1))]\n else:\n return local_eles[(size * client_id_in_part):]\n\ndef _even_offset(n, k):\n ''' Split an array of length n into k segments and the difference of thier length is\n at most 1. Return the offset of each segment.\n '''\n eles_per_part = n // k\n offset = np.array([0] + [eles_per_part] * k, dtype=int)\n offset[1 : n - eles_per_part * k + 1] += 1\n return np.cumsum(offset)\n\ndef _split_even_to_part(partition_book, elements):\n ''' Split the input element list evenly.\n '''\n # here we divide the element list as evenly as possible. If we use range partitioning,\n # the split results also respect the data locality. Range partitioning is the default\n # strategy.\n # TODO(zhengda) we need another way to divide the list for other partitioning strategy.\n if isinstance(elements, DistTensor):\n nonzero_count = elements.count_nonzero()\n else:\n elements = F.tensor(elements)\n nonzero_count = F.count_nonzero(elements)\n # compute the offset of each split and ensure that the difference of each partition size\n # is 1.\n offsets = _even_offset(nonzero_count, partition_book.num_partitions())\n assert offsets[-1] == nonzero_count\n\n # Get the elements that belong to the partition.\n partid = partition_book.partid\n left, right = offsets[partid], offsets[partid + 1]\n\n x = y = 0\n num_elements = len(elements)\n block_size = num_elements // partition_book.num_partitions()\n part_eles = None\n # compute the nonzero tensor of each partition instead of whole tensor to save memory\n for idx in range(0, num_elements, block_size):\n nonzero_block = F.nonzero_1d(elements[idx:min(idx+block_size, num_elements)])\n x = y\n y += len(nonzero_block)\n if y > left and x < right:\n start = max(x, left) - x\n end = min(y, right) - x\n tmp = nonzero_block[start:end] + idx\n if part_eles is None:\n part_eles = tmp\n else:\n part_eles = F.cat((part_eles, tmp), 0)\n elif x >= right:\n break\n\n return part_eles\n\ndef _split_random_within_part(partition_book, rank, part_eles):\n # If there are more than one client in a partition, we need to randomly select a subset of\n # elements in the partition for a client. We have to make sure that the set of elements\n # for different clients are disjoint.\n\n num_clients = role.get_num_trainers()\n num_client_per_part = num_clients // partition_book.num_partitions()\n if num_client_per_part == 1:\n return part_eles\n if rank is None:\n rank = role.get_trainer_rank()\n assert rank < num_clients, \\\n 'The input rank ({}) is incorrect. #Trainers: {}'.format(rank, num_clients)\n client_id_in_part = rank % num_client_per_part\n offset = _even_offset(len(part_eles), num_client_per_part)\n\n # We set the random seed for each partition, so that each process (client) in a partition\n # permute the elements in a partition in the same way, so each process gets a disjoint subset\n # of elements.\n np.random.seed(partition_book.partid)\n rand_idx = np.random.permutation(len(part_eles))\n rand_idx = rand_idx[offset[client_id_in_part] : offset[client_id_in_part + 1]]\n idx, _ = F.sort_1d(F.tensor(rand_idx))\n return F.gather_row(part_eles, idx)\n\ndef _split_by_trainer_id(partition_book, part_eles, trainer_id,\n num_client_per_part, client_id_in_part):\n # TODO(zhengda): MXNet cannot deal with empty tensors, which makes the implementation\n # much more difficult. Let's just use numpy for the computation for now. We just\n # perform operations on vectors. It shouldn't be too difficult.\n trainer_id = F.asnumpy(trainer_id)\n part_eles = F.asnumpy(part_eles)\n part_id = trainer_id // num_client_per_part\n trainer_id = trainer_id % num_client_per_part\n local_eles = part_eles[np.nonzero(part_id[part_eles] == partition_book.partid)[0]]\n # these are the Ids of the local elements in the partition. The Ids are global Ids.\n remote_eles = part_eles[np.nonzero(part_id[part_eles] != partition_book.partid)[0]]\n # these are the Ids of the remote nodes in the partition. The Ids are global Ids.\n local_eles_idx = np.concatenate(\n [np.nonzero(trainer_id[local_eles] == i)[0] for i in range(num_client_per_part)],\n # trainer_id[local_eles] is the trainer ids of local nodes in the partition and we\n # pick out the indices where the node belongs to each trainer i respectively, and\n # concatenate them.\n axis=0\n )\n # `local_eles_idx` is used to sort `local_eles` according to `trainer_id`. It is a\n # permutation of 0...(len(local_eles)-1)\n local_eles = local_eles[local_eles_idx]\n\n # evenly split local nodes to trainers\n local_offsets = _even_offset(len(local_eles), num_client_per_part)\n # evenly split remote nodes to trainers\n remote_offsets = _even_offset(len(remote_eles), num_client_per_part)\n\n client_local_eles = local_eles[\n local_offsets[client_id_in_part]:local_offsets[client_id_in_part + 1]]\n client_remote_eles = remote_eles[\n remote_offsets[client_id_in_part]:remote_offsets[client_id_in_part + 1]]\n client_eles = np.concatenate([client_local_eles, client_remote_eles], axis=0)\n return F.tensor(client_eles)\n\ndef node_split(nodes, partition_book=None, ntype='_N', rank=None, force_even=True,\n node_trainer_ids=None):\n ''' Split nodes and return a subset for the local rank.\n\n This function splits the input nodes based on the partition book and\n returns a subset of nodes for the local rank. This method is used for\n dividing workloads for distributed training.\n\n The input nodes are stored as a vector of masks. The length of the vector is\n the same as the number of nodes in a graph; 1 indicates that the vertex in\n the corresponding location exists.\n\n There are two strategies to split the nodes. By default, it splits the nodes\n in a way to maximize data locality. That is, all nodes that belong to a process\n are returned. If ``force_even`` is set to true, the nodes are split evenly so\n that each process gets almost the same number of nodes.\n\n When ``force_even`` is True, the data locality is still preserved if a graph is partitioned\n with Metis and the node/edge IDs are shuffled.\n In this case, majority of the nodes returned for a process are the ones that\n belong to the process. If node/edge IDs are not shuffled, data locality is not guaranteed.\n\n Parameters\n ----------\n nodes : 1D tensor or DistTensor\n A boolean mask vector that indicates input nodes.\n partition_book : GraphPartitionBook, optional\n The graph partition book\n ntype : str, optional\n The node type of the input nodes.\n rank : int, optional\n The rank of a process. If not given, the rank of the current process is used.\n force_even : bool, optional\n Force the nodes are split evenly.\n node_trainer_ids : 1D tensor or DistTensor, optional\n If not None, split the nodes to the trainers on the same machine according to\n trainer IDs assigned to each node. Otherwise, split randomly.\n\n Returns\n -------\n 1D-tensor\n The vector of node IDs that belong to the rank.\n '''\n if not isinstance(nodes, DistTensor):\n assert partition_book is not None, 'Regular tensor requires a partition book.'\n elif partition_book is None:\n partition_book = nodes.part_policy.partition_book\n\n assert len(nodes) == partition_book._num_nodes(ntype), \\\n 'The length of boolean mask vector should be the number of nodes in the graph.'\n if rank is None:\n rank = role.get_trainer_rank()\n if force_even:\n num_clients = role.get_num_trainers()\n num_client_per_part = num_clients // partition_book.num_partitions()\n assert num_clients % partition_book.num_partitions() == 0, \\\n 'The total number of clients should be multiple of the number of partitions.'\n part_nid = _split_even_to_part(partition_book, nodes)\n if num_client_per_part == 1:\n return part_nid\n elif node_trainer_ids is None:\n return _split_random_within_part(partition_book, rank, part_nid)\n else:\n trainer_id = node_trainer_ids[0:len(node_trainer_ids)]\n max_trainer_id = F.as_scalar(F.reduce_max(trainer_id)) + 1\n\n if max_trainer_id > num_clients:\n # We hope the partition scheme with trainer_id could be used when the number of\n # trainers is less than the `num_trainers_per_machine` previously assigned during\n # partitioning.\n assert max_trainer_id % num_clients == 0\n trainer_id //= (max_trainer_id // num_clients)\n\n client_id_in_part = rank % num_client_per_part\n return _split_by_trainer_id(partition_book, part_nid, trainer_id,\n num_client_per_part, client_id_in_part)\n else:\n # Get all nodes that belong to the rank.\n local_nids = partition_book.partid2nids(partition_book.partid)\n return _split_local(partition_book, rank, nodes, local_nids)\n\ndef edge_split(edges, partition_book=None, etype='_E', rank=None, force_even=True,\n edge_trainer_ids=None):\n ''' Split edges and return a subset for the local rank.\n\n This function splits the input edges based on the partition book and\n returns a subset of edges for the local rank. This method is used for\n dividing workloads for distributed training.\n\n The input edges can be stored as a vector of masks. The length of the vector is\n the same as the number of edges in a graph; 1 indicates that the edge in\n the corresponding location exists.\n\n There are two strategies to split the edges. By default, it splits the edges\n in a way to maximize data locality. That is, all edges that belong to a process\n are returned. If ``force_even`` is set to true, the edges are split evenly so\n that each process gets almost the same number of edges.\n\n When ``force_even`` is True, the data locality is still preserved if a graph is partitioned\n with Metis and the node/edge IDs are shuffled.\n In this case, majority of the nodes returned for a process are the ones that\n belong to the process. If node/edge IDs are not shuffled, data locality is not guaranteed.\n\n Parameters\n ----------\n edges : 1D tensor or DistTensor\n A boolean mask vector that indicates input edges.\n partition_book : GraphPartitionBook, optional\n The graph partition book\n etype : str, optional\n The edge type of the input edges.\n rank : int, optional\n The rank of a process. If not given, the rank of the current process is used.\n force_even : bool, optional\n Force the edges are split evenly.\n edge_trainer_ids : 1D tensor or DistTensor, optional\n If not None, split the edges to the trainers on the same machine according to\n trainer IDs assigned to each edge. Otherwise, split randomly.\n\n Returns\n -------\n 1D-tensor\n The vector of edge IDs that belong to the rank.\n '''\n if not isinstance(edges, DistTensor):\n assert partition_book is not None, 'Regular tensor requires a partition book.'\n elif partition_book is None:\n partition_book = edges.part_policy.partition_book\n assert len(edges) == partition_book._num_edges(etype), \\\n 'The length of boolean mask vector should be the number of edges in the graph.'\n if rank is None:\n rank = role.get_trainer_rank()\n if force_even:\n num_clients = role.get_num_trainers()\n num_client_per_part = num_clients // partition_book.num_partitions()\n assert num_clients % partition_book.num_partitions() == 0, \\\n 'The total number of clients should be multiple of the number of partitions.'\n part_eid = _split_even_to_part(partition_book, edges)\n if num_client_per_part == 1:\n return part_eid\n elif edge_trainer_ids is None:\n return _split_random_within_part(partition_book, rank, part_eid)\n else:\n trainer_id = edge_trainer_ids[0:len(edge_trainer_ids)]\n max_trainer_id = F.as_scalar(F.reduce_max(trainer_id)) + 1\n\n if max_trainer_id > num_clients:\n # We hope the partition scheme with trainer_id could be used when the number of\n # trainers is less than the `num_trainers_per_machine` previously assigned during\n # partitioning.\n assert max_trainer_id % num_clients == 0\n trainer_id //= (max_trainer_id // num_clients)\n\n client_id_in_part = rank % num_client_per_part\n return _split_by_trainer_id(partition_book, part_eid, trainer_id,\n num_client_per_part, client_id_in_part)\n else:\n # Get all edges that belong to the rank.\n local_eids = partition_book.partid2eids(partition_book.partid)\n return _split_local(partition_book, rank, edges, local_eids)\n\nrpc.register_service(INIT_GRAPH, InitGraphRequest, InitGraphResponse)\n"
] | [
[
"numpy.nonzero",
"numpy.random.seed",
"numpy.cumsum",
"numpy.concatenate",
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
minrk/discretisedfield | [
"251584f8d976a7fafdff5402d16327489407c4dd"
] | [
"discretisedfield/field.py"
] | [
"import pyvtk\nimport struct\nimport matplotlib\nimport numpy as np\nimport mpl_toolkits.axes_grid1\nimport discretisedfield as df\nimport ubermagutil.typesystem as ts\nimport discretisedfield.util as dfu\nimport matplotlib.pyplot as plt\n\n\[email protected](mesh=ts.Typed(expected_type=df.Mesh),\n dim=ts.Scalar(expected_type=int, unsigned=True, const=True),\n name=ts.Name(const=True))\nclass Field:\n \"\"\"Finite difference field.\n\n This class defines a finite difference field and enables certain\n operations for its analysis and visualisation. The field is\n defined on a finite difference mesh (`discretisedfield.Mesh`).\n\n Parameters\n ----------\n mesh : discretisedfield.Mesh\n Finite difference rectangular mesh.\n dim : int, optional\n Dimension of the field value. For instance, if `dim=3` the\n field is a three-dimensional vector field and for `dim=1`\n the field is a scalar field. Defaults to `dim=3`.\n value : array_like, callable, optional\n Please refer to the `value` property:\n :py:func:`~discretisedfield.Field.value`. Defaults to 0,\n meaning that if the value is not provided in the\n initialisation process, \"zero-field\" will be defined.\n norm : numbers.Real, callable, optional\n Please refer to the `norm` property:\n :py:func:`~discretisedfield.Field.norm`. Defaults to `None`\n (`norm=None` defines no norm).\n name : str, optional\n Field name (defaults to `'field'`). The field name must be a\n valid Python variable name string. More specifically, it must\n not contain spaces, or start with underscore or numeric\n character.\n\n Examples\n --------\n 1. Creating a uniform three-dimensional vector field on a\n nano-sized thin film.\n\n >>> import discretisedfield as df\n ...\n >>> p1 = (-50e-9, -25e-9, 0)\n >>> p2 = (50e-9, 25e-9, 5e-9)\n >>> cell = (1e-9, 1e-9, 0.1e-9)\n >>> mesh = df.Mesh(p1=p1, p2=p2, cell=cell)\n ...\n >>> dim = 3\n >>> value = (0, 0, 1)\n >>> field = df.Field(mesh=mesh, dim=dim, value=value)\n >>> field\n Field(mesh=...)\n\n 2. Creating a scalar field.\n\n >>> import discretisedfield as df\n ...\n >>> p1 = (-10, -10, -10)\n >>> p2 = (10, 10, 10)\n >>> n = (1, 1, 1)\n >>> mesh = df.Mesh(p1=p1, p2=p2, n=n)\n >>> dim = 1\n >>> value = 3.14\n >>> field = df.Field(mesh=mesh, dim=dim, value=value)\n >>> field\n Field(mesh=...)\n\n .. seealso:: :py:func:`~discretisedfield.Mesh`\n\n \"\"\"\n def __init__(self, mesh, dim=3, value=0, norm=None, name='field'):\n self.mesh = mesh\n self.dim = dim\n self.value = value\n self.norm = norm\n self.name = name\n\n @property\n def value(self):\n \"\"\"Field value representation.\n\n This propertry returns a representation of the field value if\n it exists. Otherwise, the `numpy.ndarray` containing all\n values from the field is returned.\n\n Parameters\n ----------\n value : 0, array_like, callable\n For scalar fields (`dim=1`) `numbers.Real` values are\n allowed. In the case of vector fields, \"array_like\" (list,\n tuple, numpy.ndarray) value with length equal to `dim`\n should be used. Finally, the value can also be a callable\n (e.g. Python function or another field), which for every\n coordinate in the mesh returns a valid value. If\n `value=0`, all values in the field will be set to zero\n independent of the field dimension.\n\n Returns\n -------\n array_like, callable, numbers.Real\n The value used (representation) for setting the field is\n returned. However, if the actual value of the field does\n not correspond to the initially used value anymore, a\n `numpy.ndarray` is returned containing all field values.\n\n Raises\n ------\n ValueError\n If unsupported type is passed\n\n Examples\n --------\n 1. Different ways of setting and getting the field value.\n\n >>> import discretisedfield as df\n ...\n >>> p1 = (0, 0, 0)\n >>> p2 = (2, 2, 1)\n >>> cell = (1, 1, 1)\n >>> mesh = df.Mesh(p1=p1, p2=p2, cell=cell)\n >>> value = (0, 0, 1)\n >>> # if value is not specified, zero-field is defined\n >>> field = df.Field(mesh=mesh, dim=3)\n >>> field.value\n 0\n >>> field.value = (0, 0, 1)\n >>> field.value\n (0, 0, 1)\n >>> # Setting the field value using a Python function (callable).\n >>> def value_function(pos):\n ... x, y, z = pos\n ... if x <= 1:\n ... return (0, 0, 1)\n ... else:\n ... return (0, 0, -1)\n >>> field.value = value_function\n >>> field.value\n <function value_function at ...>\n >>> # We now change the value of a single cell so that the\n >>> # representation used for initialising field is not valid\n >>> # anymore.\n >>> field.array[0, 0, 0, :] = (0, 0, 0)\n >>> field.value\n array(...)\n\n .. seealso:: :py:func:`~discretisedfield.Field.array`\n\n \"\"\"\n value_array = dfu.as_array(self.mesh, self.dim, self._value)\n if np.array_equal(self.array, value_array):\n return self._value\n else:\n return self.array\n\n @value.setter\n def value(self, val):\n self._value = val\n self.array = dfu.as_array(self.mesh, self.dim, val)\n\n @property\n def array(self):\n \"\"\"Numpy array of a field value.\n\n `array` has shape of `(self.mesh.n[0], self.mesh.n[1],\n self.mesh.n[2], dim)`.\n\n Parameters\n ----------\n array : numpy.ndarray\n Numpy array with dimensions `(self.mesh.n[0],\n self.mesh.n[1], self.mesh.n[2], dim)`\n\n Returns\n -------\n numpy.ndarray\n Field values array.\n\n Raises\n ------\n ValueError\n If setting the array with wrong type, shape, or value.\n\n Examples\n --------\n 1. Accessing and setting the field array.\n\n >>> import discretisedfield as df\n >>> import numpy as np\n ...\n >>> p1 = (0, 0, 0)\n >>> p2 = (1, 1, 1)\n >>> cell = (0.5, 1, 1)\n >>> mesh = df.Mesh(p1=p1, p2=p2, cell=cell)\n >>> value = (0, 0, 1)\n >>> field = df.Field(mesh=mesh, dim=3, value=value)\n >>> field.array\n array(...)\n >>> field.array.shape\n (2, 1, 1, 3)\n >>> field.array = np.ones(field.array.shape)\n >>> field.array\n array(...)\n\n .. seealso:: :py:func:`~discretisedfield.Field.value`\n\n \"\"\"\n return self._array\n\n @array.setter\n def array(self, val):\n if isinstance(val, np.ndarray) and \\\n val.shape == self.mesh.n + (self.dim,):\n self._array = val\n else:\n msg = (f'Unsupported type(val)={type(val)} '\n 'or invalid value dimensions.')\n raise ValueError(msg)\n\n @property\n def norm(self):\n \"\"\"Norm of a field.\n\n This property computes the norm of the field and returns it as\n a `discretisedfield.Field` object with `dim=1`. Norm of a\n scalar field cannot be set and `ValueError` is raised.\n\n Parameters\n ----------\n numbers.Real, numpy.ndarray\n Norm value\n\n Returns\n -------\n discretisedfield.Field\n Scalar field with norm values.\n\n Raises\n ------\n ValueError\n If setting the norm with wrong type, shape, or value. In\n addition, if the field is scalar (dim=1) or it contains\n zero vector values.\n\n Examples\n --------\n 1. Manipulating the field norm\n\n >>> import discretisedfield as df\n ...\n >>> p1 = (0, 0, 0)\n >>> p2 = (1, 1, 1)\n >>> cell = (1, 1, 1)\n >>> mesh = df.Mesh(p1=p1, p2=p2, cell=cell)\n >>> field = df.Field(mesh=mesh, dim=3, value=(0, 0, 1))\n >>> field.norm\n Field(...)\n >>> field.norm = 2\n >>> field.norm\n Field(...)\n >>> field.value = (1, 0, 0)\n >>> field.norm.array\n array([[[[1.]]]])\n\n \"\"\"\n current_norm = np.linalg.norm(self.array, axis=-1)[..., None]\n return Field(self.mesh, dim=1, value=current_norm, name='norm')\n\n @norm.setter\n def norm(self, val):\n if val is not None:\n if self.dim == 1:\n msg = f'Cannot set norm for field with dim={self.dim}.'\n raise ValueError(msg)\n\n if not np.all(self.norm.array):\n msg = 'Cannot normalise field with zero values.'\n raise ValueError(msg)\n\n self.array /= self.norm.array # normalise to 1\n self.array *= dfu.as_array(self.mesh, dim=1, val=val)\n\n @property\n def average(self):\n \"\"\"Field average.\n\n It computes the average of the field over the entire volume of\n the mesh.\n\n Returns\n -------\n tuple\n Field average tuple whose length equals to the field's\n dimension.\n\n Examples\n --------\n 1. Computing the vector field average.\n\n >>> import discretisedfield as df\n ...\n >>> p1 = (0, 0, 0)\n >>> p2 = (5, 5, 5)\n >>> cell = (1, 1, 1)\n >>> mesh = df.Mesh(p1=p1, p2=p2, cell=cell)\n >>> field1 = df.Field(mesh=mesh, dim=3, value=(0, 0, 1))\n >>> field1.average\n (0.0, 0.0, 1.0)\n >>> field2 = df.Field(mesh=mesh, dim=1, value=55)\n >>> field2.average\n (55.0,)\n\n \"\"\"\n return tuple(self.array.mean(axis=(0, 1, 2)))\n\n def __repr__(self):\n \"\"\"Field representation string.\n\n This method returns the string that can ideally be copied in\n another Python script so that exactly the same field object\n could be defined. However, this is usually not the case due to\n complex values used.\n\n Returns\n -------\n str\n Field representation string.\n\n Example\n -------\n 1. Getting field representation string.\n\n >>> import discretisedfield as df\n ...\n >>> p1 = (0, 0, 0)\n >>> p2 = (2, 2, 1)\n >>> cell = (1, 1, 1)\n >>> mesh = df.Mesh(p1=p1, p2=p2, cell=cell)\n >>> field = df.Field(mesh, dim=1, value=1)\n >>> repr(field)\n \"Field(mesh=...)\"\n\n \"\"\"\n return (f'Field(mesh={repr(self.mesh)}, '\n f'dim={self.dim}, name=\\'{self.name}\\')')\n\n def __call__(self, point):\n \"\"\"Sample the field at `point`.\n\n It returns the value of the discreatisation cell `point`\n belongs to. It always returns a tuple, whose length is the\n same as the dimension of the field.\n\n Parameters\n ----------\n point : (3,) array_like\n The mesh point coordinate :math:`(p_{x}, p_{y}, p_{z})`.\n\n Returns\n -------\n tuple\n A tuple, whose length is the same as the dimension of the\n field.\n\n Example\n -------\n 1. Sampling the field value\n\n >>> import discretisedfield as df\n ...\n >>> p1 = (0, 0, 0)\n >>> p2 = (20, 20, 20)\n >>> n = (20, 20, 20)\n >>> mesh = df.Mesh(p1=p1, p2=p2, n=n)\n >>> field = df.Field(mesh, dim=3, value=(1, 3, 4))\n >>> point = (10, 2, 3)\n >>> field(point)\n (1.0, 3.0, 4.0)\n\n \"\"\"\n value = self.array[self.mesh.point2index(point)]\n if self.dim > 1:\n value = tuple(value)\n return value\n\n def __getattr__(self, name):\n \"\"\"Extracting the component of the vector field.\n\n If `'x'`, `'y'`, or `'z'` is accessed, a new scalar field of\n that component will be returned. This method is effective for\n vector fields with dimension 2 or 3.\n\n Returns\n -------\n discretisedfield.Field\n Scalar field with vector field component values.\n\n Examples\n --------\n 1. Accessing the vector field components.\n\n >>> import discretisedfield as df\n ...\n >>> p1 = (0, 0, 0)\n >>> p2 = (2, 2, 2)\n >>> cell = (1, 1, 1)\n >>> mesh = df.Mesh(p1=p1, p2=p2, cell=cell)\n >>> field = df.Field(mesh=mesh, dim=3, value=(0, 0, 1))\n >>> field.x\n Field(...)\n >>> field.y\n Field(...)\n >>> field.z\n Field(...)\n >>> field.z.dim\n 1\n\n \"\"\"\n if name in list(dfu.axesdict.keys())[:self.dim] and 1 < self.dim <= 3:\n # Components x, y, and z make sense only for vector fields\n # with typical dimensions 2 and 3.\n component_array = self.array[..., dfu.axesdict[name]][..., None]\n fieldname = f'{self.name}-{name}'.format(self.name, name)\n return Field(mesh=self.mesh, dim=1,\n value=component_array, name=fieldname)\n else:\n msg = f'{type(self).__name__} object has no attribute {name}.'\n raise AttributeError(msg.format(type(self).__name__, name))\n\n def __dir__(self):\n \"\"\"Extension of the tab-completion list.\n\n Adds `'x'`, `'y'`, and `'z'`, depending on the dimension of\n the field, to the tab-completion list. This is effective in\n IPython or Jupyter notebook environment.\n\n \"\"\"\n if 1 < self.dim <= 3:\n extension = list(dfu.axesdict.keys())[:self.dim]\n else:\n extension = []\n return list(self.__dict__.keys()) + extension\n\n def __iter__(self):\n \"\"\"Generator yielding coordinates and values of all field cells.\n\n The discretisation cell coordinate corresponds to the cell\n centre point.\n\n Yields\n ------\n tuple (2,)\n The first value is the mesh cell coordinates (`px`, `py`,\n `pz`), whereas the second one is the field value.\n\n Examples\n --------\n 1. Iterating through the field coordinates and values\n\n >>> import discretisedfield as df\n ...\n >>> p1 = (0, 0, 0)\n >>> p2 = (2, 2, 1)\n >>> cell = (1, 1, 1)\n >>> mesh = df.Mesh(p1=p1, p2=p2, cell=cell)\n >>> field = df.Field(mesh, dim=3, value=(0, 0, 1))\n >>> for coord, value in field:\n ... print (coord, value)\n (0.5, 0.5, 0.5) (0.0, 0.0, 1.0)\n (1.5, 0.5, 0.5) (0.0, 0.0, 1.0)\n (0.5, 1.5, 0.5) (0.0, 0.0, 1.0)\n (1.5, 1.5, 0.5) (0.0, 0.0, 1.0)\n\n .. seealso:: :py:func:`~discretisedfield.Mesh.indices`\n\n \"\"\"\n for point in self.mesh.coordinates:\n yield point, self.__call__(point)\n\n def line(self, p1, p2, n=100):\n \"\"\"Sampling the field along the line.\n\n Given two points :math:`p_{1}` and :math:`p_{2}`, :math:`n`\n position coordinates are generated and the corresponding field\n values.\n\n .. math::\n\n \\\\mathbf{r}_{i} = i\\\\frac{\\\\mathbf{p}_{2} -\n \\\\mathbf{p}_{1}}{n-1}\n\n Parameters\n ----------\n p1, p2 : (3,) array_like\n Two points between which the line is generated.\n n : int\n Number of points on the line.\n\n Yields\n ------\n tuple\n The first element is the coordinate of the point on the\n line, whereas the second one is the value of the field.\n\n Raises\n ------\n ValueError\n If `p1` or `p2` is outside the mesh domain.\n\n Examples\n --------\n 1. Sampling the field along the line.\n\n >>> import discretisedfield as df\n ...\n >>> p1 = (0, 0, 0)\n >>> p2 = (2, 2, 2)\n >>> cell = (1, 1, 1)\n >>> mesh = df.Mesh(p1=p1, p2=p2, cell=cell)\n >>> field = df.Field(mesh, dim=2, value=(0, 3))\n >>> for coord, value in field.line(p1=(0, 0, 0), p2=(2, 0, 0), n=3):\n ... print(coord, value)\n (0.0, 0.0, 0.0) (0.0, 3.0)\n (1.0, 0.0, 0.0) (0.0, 3.0)\n (2.0, 0.0, 0.0) (0.0, 3.0)\n\n \"\"\"\n for point in self.mesh.line(p1=p1, p2=p2, n=n):\n yield point, self.__call__(point)\n\n def plane(self, *args, n=None, **kwargs):\n \"\"\"Slices the field with a plane.\n\n If one of the axes (`'x'`, `'y'`, or `'z'`) is passed as a\n string, a plane perpendicular to that axis is generated which\n intersects the field at its centre. Alternatively, if a keyword\n argument is passed (e.g. `x=1`), a plane perpendicular to the\n x-axis and intersecting it at x=1 is generated. The number of\n points in two dimensions on the plane can be defined using `n`\n (e.g. `n=(10, 15)`). Using the generated plane, a new\n \"two-dimensional\" field is created and returned.\n\n Parameters\n ----------\n n : tuple of length 2\n The number of points on the plane in two dimensions\n\n Returns\n ------\n discretisedfield.Field\n A field obtained as an intersection of mesh and the plane.\n\n Example\n -------\n 1. Intersecting the field with a plane.\n\n >>> import discretisedfield as df\n ...\n >>> p1 = (0, 0, 0)\n >>> p2 = (2, 2, 2)\n >>> cell = (1, 1, 1)\n >>> mesh = df.Mesh(p1=p1, p2=p2, cell=cell)\n >>> field = df.Field(mesh, dim=3)\n >>> field.plane(y=1)\n Field(mesh=...)\n\n \"\"\"\n plane_mesh = self.mesh.plane(*args, n=n, **kwargs)\n return self.__class__(plane_mesh, dim=self.dim, value=self)\n\n def write(self, filename, representation='txt', extend_scalar=False):\n \"\"\"Write the field in .ovf, .omf, .ohf, or vtk format.\n\n If the extension of the `filename` is `.vtk`, a VTK file is\n written\n (:py:func:`~discretisedfield.Field._writevtk`). Otherwise, for\n `.ovf`, `.omf`, or `.ohf` extensions, an OOMMF file is written\n (:py:func:`~discretisedfield.Field._writeovf`). The\n representation (`bin4`, 'bin8', or 'txt') is passed using\n `representation` argument.\n\n Parameters\n ----------\n filename : str\n Name of the file written. It depends on its extension the\n format it is going to be written as.\n representation : str\n In the case of OOMMF files (`.ovf`, `.omf`, or `.ohf`),\n representation can be specified (`bin4`, `bin8`, or\n `txt`). Defaults to 'txt'.\n extend_scalar : bool\n If True, a scalar field will be saved as a vector\n field. More precisely, if the value at a cell is 3, that\n cell will be saved as (3, 0, 0). This is valid only for\n the OVF file formats.\n\n Example\n -------\n 1. Write an .omf file and delete it from the disk\n\n >>> import os\n >>> import discretisedfield as df\n ...\n >>> p1 = (0, 0, -5)\n >>> p2 = (5, 15, 15)\n >>> n = (5, 15, 20)\n >>> mesh = df.Mesh(p1=p1, p2=p2, n=n)\n >>> field = df.Field(mesh, value=(5, 6, 7))\n >>> filename = 'mytestfile.omf'\n >>> field.write(filename) # write the file\n >>> os.remove(filename) # delete the file\n\n .. seealso:: :py:func:`~discretisedfield.Field.fromfile`\n\n \"\"\"\n if any([filename.endswith(ext) for ext in ['.omf', '.ovf', '.ohf']]):\n self._writeovf(filename, representation=representation,\n extend_scalar=extend_scalar)\n elif filename.endswith('.vtk'):\n self._writevtk(filename)\n else:\n msg = ('Allowed extensions for writing the field are '\n '.omf, .ovf, .ohf, and .vtk.')\n raise ValueError(msg)\n\n def _writeovf(self, filename, representation='txt', extend_scalar=False):\n \"\"\"Write the field in .ovf, .omf, or .ohf format.\n\n The extension of the `filename` should be `.ovf`, `.omf`, or\n `.ohf`. The representation (`bin4`, 'bin8', or 'txt') is\n passed using `representation` argument.\n\n Parameters\n ----------\n filename : str\n Name of the file written.\n representation : str\n Representation of the file (`bin4`, `bin8`, or\n `txt`). Defaults to 'txt'.\n extend_scalar : bool\n If True, a scalar field will be saved as a vector\n field. More precisely, if the value at a cell is 3, that\n cell will be saved as (3, 0, 0).\n\n Example\n -------\n 1. Write an .omf file and delete it from the disk\n\n >>> import os\n >>> import discretisedfield as df\n ...\n >>> p1 = (0, 0, -5)\n >>> p2 = (5, 15, 15)\n >>> n = (5, 15, 20)\n >>> mesh = df.Mesh(p1=p1, p2=p2, n=n)\n >>> field = df.Field(mesh, value=(5, 6, 7))\n >>> filename = 'mytestfile.omf'\n >>> field._writeovf(filename) # write the file\n >>> os.remove(filename) # delete the file\n\n \"\"\"\n if extend_scalar and self.dim == 1:\n write_dim = 3\n else:\n write_dim = self.dim\n header = ['OOMMF OVF 2.0',\n '',\n 'Segment count: 1',\n '',\n 'Begin: Segment',\n 'Begin: Header',\n '',\n 'Title: Field generated omf file',\n 'Desc: File generated by Field class',\n 'meshunit: m',\n 'meshtype: rectangular',\n f'xbase: {self.mesh.pmin[0] + self.mesh.cell[0]/2}',\n f'ybase: {self.mesh.pmin[1] + self.mesh.cell[1]/2}',\n f'zbase: {self.mesh.pmin[2] + self.mesh.cell[2]/2}',\n f'xnodes: {self.mesh.n[0]}',\n f'ynodes: {self.mesh.n[1]}',\n f'znodes: {self.mesh.n[2]}',\n f'xstepsize: {self.mesh.cell[0]}',\n f'ystepsize: {self.mesh.cell[1]}',\n f'zstepsize: {self.mesh.cell[2]}',\n f'xmin: {self.mesh.pmin[0]}',\n f'ymin: {self.mesh.pmin[1]}',\n f'zmin: {self.mesh.pmin[2]}',\n f'xmax: {self.mesh.pmax[0]}',\n f'ymax: {self.mesh.pmax[1]}',\n f'zmax: {self.mesh.pmax[2]}',\n f'valuedim: {write_dim}',\n f'valuelabels: {self.name}_x {self.name}_y {self.name}_z',\n 'valueunits: A/m A/m A/m',\n '',\n 'End: Header',\n '']\n\n if representation == 'bin4':\n header.append('Begin: Data Binary 4')\n footer = ['End: Data Binary 4',\n 'End: Segment']\n elif representation == 'bin8':\n header.append('Begin: Data Binary 8')\n footer = ['End: Data Binary 8',\n 'End: Segment']\n elif representation == 'txt':\n header.append('Begin: Data Text')\n footer = ['End: Data Text',\n 'End: Segment']\n\n # Write header lines to the ovf file.\n f = open(filename, 'w')\n f.write(''.join(map(lambda line: f'# {line}\\n', header)))\n f.close()\n\n binary_reps = {'bin4': (1234567.0, 'f'),\n 'bin8': (123456789012345.0, 'd')}\n\n if representation in binary_reps:\n # Reopen the file with binary write, appending to the end\n # of the file.\n f = open(filename, 'ab')\n\n # Add the 8 bit binary check value that OOMMF uses.\n packarray = [binary_reps[representation][0]]\n\n # Write data to the ovf file.\n for i in self.mesh.indices:\n for vi in self.array[i]:\n packarray.append(vi)\n\n v_bin = struct.pack(binary_reps[representation][1]*len(packarray),\n *packarray)\n f.write(v_bin)\n f.close()\n\n else:\n # Reopen the file for txt representation, appending to the\n # file.\n f = open(filename, 'a')\n for i in self.mesh.indices:\n if self.dim == 3:\n v = [vi for vi in self.array[i]]\n elif self.dim == 1:\n if extend_scalar:\n v = [self.array[i][0], 0.0, 0.0]\n else:\n v = [self.array[i][0]]\n else:\n msg = (f'Cannot write dim={self.dim} field.')\n raise TypeError(msg)\n for vi in v:\n f.write(' ' + str(vi))\n f.write('\\n')\n f.close()\n\n # Write footer lines to OOMMF file.\n f = open(filename, 'a')\n f.write(''.join(map(lambda line: f'# {line}\\n', footer)))\n f.close()\n\n def _writevtk(self, filename):\n \"\"\"Write the field in the VTK format.\n\n The extension of the `filename` should be `.vtk`.\n\n Parameters\n ----------\n filename : str\n Name of the file written.\n\n Example\n -------\n 1. Write a .vtk file and delete it from the disk\n\n >>> import os\n >>> import discretisedfield as df\n ...\n >>> p1 = (0, 0, -5)\n >>> p2 = (5, 15, 15)\n >>> n = (5, 15, 20)\n >>> mesh = df.Mesh(p1=p1, p2=p2, n=n)\n >>> field = df.Field(mesh, value=(5, 6, 7))\n >>> filename = 'mytestfile.vtk'\n >>> field._writevtk(filename) # write the file\n >>> os.remove(filename) # delete the file\n\n \"\"\"\n grid = [pmini + np.linspace(0, li, ni+1) for pmini, li, ni in\n zip(self.mesh.pmin, self.mesh.l, self.mesh.n)]\n\n structure = pyvtk.RectilinearGrid(*grid)\n vtkdata = pyvtk.VtkData(structure)\n\n vectors = [self.__call__(coord) for coord in self.mesh.coordinates]\n vtkdata.cell_data.append(pyvtk.Vectors(vectors, self.name))\n for i, component in enumerate(dfu.axesdict.keys()):\n name = f'{self.name}_{component}'\n vtkdata.cell_data.append(pyvtk.Scalars(list(zip(*vectors))[i],\n name))\n\n vtkdata.tofile(filename)\n\n @classmethod\n def fromfile(cls, filename, norm=None, name='field'):\n \"\"\"Read the field from .ovf, .omf, or .ohf file.\n\n The extension of the `filename` should be `.ovf`, `.omf`, or\n `.ohf`. If the field should be normalised, `norm` argument can\n be passed. The `name` of the field defaults to `'field'`. This\n is a `classmethod` and should be called as\n `discretisedfield.Field.fromfile('myfile.omf')`.\n\n Parameters\n ----------\n filename : str\n Name of the file to be read.\n norm : numbers.Real, numpy.ndarray, callable\n For details, refer to :py:func:`~discretisedfield.Field.value`.\n name : str\n Name of the field read.\n\n Returns\n -------\n discretisedfield.Field\n\n Example\n -------\n 1. Read a field from the .ovf file\n\n >>> import os\n >>> import discretisedfield as df\n ...\n >>> ovffile = os.path.join(os.path.dirname(__file__),\n ... 'tests', 'test_sample',\n ... 'mumax-output-linux.ovf')\n >>> field = df.Field.fromfile(ovffile)\n >>> field\n Field(mesh=...)\n\n .. seealso:: :py:func:`~discretisedfield.Field.write`\n\n \"\"\"\n mdatalist = ['xmin', 'ymin', 'zmin', 'xmax', 'ymax', 'zmax',\n 'xstepsize', 'ystepsize', 'zstepsize', 'valuedim']\n mdatadict = dict()\n\n try:\n with open(filename, 'r', encoding='utf-8') as ovffile:\n f = ovffile.read()\n lines = f.split('\\n')\n\n mdatalines = filter(lambda s: s.startswith('#'), lines)\n datalines = np.loadtxt(filter(lambda s: not s.startswith('#'),\n lines))\n\n for line in mdatalines:\n for mdatum in mdatalist:\n if mdatum in line:\n mdatadict[mdatum] = float(line.split()[-1])\n break\n\n except UnicodeDecodeError:\n with open(filename, 'rb') as ovffile:\n f = ovffile.read()\n lines = f.split(b'\\n')\n\n mdatalines = filter(lambda s: s.startswith(bytes('#', 'utf-8')),\n lines)\n\n for line in mdatalines:\n for mdatum in mdatalist:\n if bytes(mdatum, 'utf-8') in line:\n mdatadict[mdatum] = float(line.split()[-1])\n break\n\n header = b'# Begin: Data Binary '\n data_start = f.find(header)\n header = f[data_start:data_start + len(header) + 1]\n\n data_start += len(b'# Begin: Data Binary 8')\n data_end = f.find(b'# End: Data Binary ')\n\n # ordered by length\n newlines = [b'\\n\\r', b'\\r\\n', b'\\n']\n for nl in newlines:\n if f.startswith(nl, data_start):\n data_start += len(nl)\n break\n\n if b'4' in header:\n formatstr = '@f'\n checkvalue = 1234567.0\n elif b'8' in header:\n formatstr = '@d'\n checkvalue = 123456789012345.0\n\n listdata = list(struct.iter_unpack(formatstr,\n f[data_start:data_end]))\n datalines = np.array(listdata)\n\n if datalines[0] != checkvalue:\n # These two lines cannot be accessed via\n # tests. Therefore, they are excluded from coverage.\n msg = 'Binary Data cannot be read.' # pragma: no cover\n raise AssertionError(msg) # pragma: no cover\n\n datalines = datalines[1:] # check value removal\n\n p1 = (mdatadict[key] for key in ['xmin', 'ymin', 'zmin'])\n p2 = (mdatadict[key] for key in ['xmax', 'ymax', 'zmax'])\n cell = (mdatadict[key] for key in ['xstepsize', 'ystepsize',\n 'zstepsize'])\n dim = int(mdatadict['valuedim'])\n\n mesh = df.Mesh(p1=p1, p2=p2, cell=cell)\n\n field = df.Field(mesh, dim=dim, name=name)\n\n r_tuple = tuple(reversed(field.mesh.n)) + (int(mdatadict['valuedim']),)\n t_tuple = tuple(reversed(range(3))) + (3,)\n field.array = datalines.reshape(r_tuple).transpose(t_tuple)\n field.norm = norm # Normalise if norm is passed\n\n return field\n\n def mpl(self, figsize=None):\n \"\"\"Plots a field plane using matplotlib.\n\n Before the field can be plotted, it must be sliced with a\n plane (e.g. `field.plane(`z`)`). Otherwise, ValueError is\n raised. For vector fields, this method plots both `quiver`\n (vector) and `imshow` (scalar) plots. The `imshow` plot\n represents the value of the out-of-plane vector component and\n the `quiver` plot is not coloured. On the other hand, only\n `imshow` is plotted for scalar fields. Where the norm of the\n field is zero, no vectors are shown and those `imshow` pixels\n are not coloured. In order to use this function inside Jupyter\n notebook `%matplotlib inline` must be activated after\n `discretisedfield` is imported.\n\n Parameters\n ----------\n figsize : tuple, optional\n Length-2 tuple passed to the `matplotlib.figure` function.\n\n Raises\n ------\n ValueError\n If the field has not been sliced with a plane.\n\n Example\n -------\n >>> import discretisedfield as df\n ...\n >>> p1 = (0, 0, 0)\n >>> p2 = (100, 100, 100)\n >>> n = (10, 10, 10)\n >>> mesh = df.Mesh(p1=p1, p2=p2, n=n)\n >>> field = df.Field(mesh, dim=3, value=(1, 2, 0))\n >>> field.plane(z=50, n=(5, 5)).mpl()\n\n .. seealso:: :py:func:`~discretisedfield.Field.k3d_vectors`\n\n \"\"\"\n if not hasattr(self.mesh, 'info'):\n msg = ('Only sliced field can be plotted using mpl. '\n 'For instance, field.plane(\\'x\\').mpl().')\n raise ValueError(msg)\n\n fig = plt.figure(figsize=figsize)\n ax = fig.add_subplot(111)\n\n planeaxis = dfu.raxesdict[self.mesh.info['planeaxis']]\n\n if self.dim > 1:\n # Vector field has both quiver and imshow plots.\n self.quiver(ax=ax, headwidth=5)\n scfield = getattr(self, planeaxis)\n coloredplot = scfield.imshow(ax=ax, norm_field=self.norm)\n else:\n # Scalar field has only imshow.\n scfield = self\n coloredplot = scfield.imshow(ax=ax, norm_field=None)\n\n # Add colorbar to imshow plot.\n cbar = self.colorbar(ax, coloredplot)\n\n # Add labels.\n ax.set_xlabel(dfu.raxesdict[self.mesh.info['axis1']])\n ax.set_ylabel(dfu.raxesdict[self.mesh.info['axis2']])\n if self.dim > 1:\n cbar.ax.set_ylabel(planeaxis + ' component')\n\n def imshow(self, ax, norm_field=None, **kwargs):\n \"\"\"Plots a scalar field plane using `matplotlib.pyplot.imshow`.\n\n Before the field can be plotted, it must be sliced with a\n plane (e.g. `field.plane(`y`)`) and field must be of dimension\n 1 (scalar field). Otherwise, ValueError is raised. `imshow`\n adds the plot to `matplotlib.axes.Axes` passed via `ax`\n argument. If the scalar field plotted is extracted from a\n vector field, which has coordinates where the norm of the\n field is zero, the norm of that vector field can be passed\n using `norm_field` argument, so that pixels at those\n coordinates are not coloured. All other parameters accepted by\n `matplotlib.pyplot.imshow` can be passed. In order to use this\n function inside Jupyter notebook `%matplotlib inline` must be\n activated after `discretisedfield` is imported.\n\n Parameters\n ----------\n ax : matplotlib.axes.Axes\n Axes object to which the scalar plot will be added.\n norm_field : discretisedfield.Field, optional\n A (scalar) norm field used for determining whether certain\n pixels should be coloured.\n\n Returns\n -------\n matplotlib.image.AxesImage object\n\n Raises\n ------\n ValueError\n If the field has not been sliced with a plane or its\n dimension is not 1.\n\n Example\n -------\n >>> import discretisedfield as df\n ...\n >>> p1 = (0, 0, 0)\n >>> p2 = (100, 100, 100)\n >>> n = (10, 10, 10)\n >>> mesh = df.Mesh(p1=p1, p2=p2, n=n)\n >>> field = df.Field(mesh, dim=1, value=2)\n >>> fig = plt.figure()\n >>> ax = fig.add_subplot(111)\n >>> field.plane('y').imshow(ax=ax)\n <matplotlib.image.AxesImage object at ...>\n\n .. seealso:: :py:func:`~discretisedfield.Field.quiver`\n\n \"\"\"\n if not hasattr(self.mesh, 'info'):\n msg = ('Only sliced field can be plotted using imshow. '\n 'For instance, field.plane(\\'x\\').imshow(ax=ax).')\n raise ValueError(msg)\n if self.dim > 1:\n msg = ('Only scalar (dim=1) fields can be plotted. Consider '\n 'plotting one component, e.g. field.x.imshow(ax=ax) '\n 'or norm field.norm.imshow(ax=ax).')\n raise ValueError(msg)\n\n points, values = list(zip(*list(self)))\n\n # If norm_field is passed, set values where norm=0 to np.nan,\n # so that they are not plotted.\n if norm_field is not None:\n values = list(values) # make values mutable\n for i, point in enumerate(points):\n if norm_field(point) == 0:\n values[i] = np.nan\n\n # \"Unpack\" values inside arrays.\n values = [v[0] if not np.isnan(v) else v for v in values]\n else:\n # \"Unpack\" values inside arrays.\n values = list(zip(*values))\n\n points = list(zip(*points))\n\n extent = [self.mesh.pmin[self.mesh.info['axis1']],\n self.mesh.pmax[self.mesh.info['axis1']],\n self.mesh.pmin[self.mesh.info['axis2']],\n self.mesh.pmax[self.mesh.info['axis2']]]\n n = (self.mesh.n[self.mesh.info['axis2']],\n self.mesh.n[self.mesh.info['axis1']])\n\n imax = ax.imshow(np.array(values).reshape(n), origin='lower',\n extent=extent, **kwargs)\n\n return imax\n\n def quiver(self, ax=None, color_field=None, **kwargs):\n \"\"\"Plots a vector field plane using `matplotlib.pyplot.quiver`.\n\n Before the field can be plotted, it must be sliced with a\n plane (e.g. `field.plane(`y`)`) and field must be of dimension\n 3 (vector field). Otherwise, ValueError is raised. `quiver`\n adds the plot to `matplotlib.axes.Axes` passed via `ax`\n argument. If there are coordinates where the norm of the field\n is zero, vectors are not plotted at those coordinates. By\n default, plot is not coloured, but by passing a\n `discretisedfield.Field` object of dimension 1 as\n `color_field`, quiver plot will be coloured based on the\n values from the field. All other parameters accepted by\n `matplotlib.pyplot.quiver` can be passed. In order to use this\n function inside Jupyter notebook `%matplotlib inline` must be\n activated after `discretisedfield` is imported.\n\n Parameters\n ----------\n ax : matplotlib.axes.Axes\n Axes object to which the quiver plot will be added.\n color_field : discretisedfield.Field, optional\n A (scalar) field used for determining the colour of the\n quiver plot.\n\n Returns\n -------\n matplotlib.quiver.Quiver object\n\n Raises\n ------\n ValueError\n If the field has not been sliced with a plane or its\n dimension is not 3.\n\n Example\n -------\n >>> import discretisedfield as df\n ...\n >>> p1 = (0, 0, 0)\n >>> p2 = (100, 100, 100)\n >>> n = (10, 10, 10)\n >>> mesh = df.Mesh(p1=p1, p2=p2, n=n)\n >>> field = df.Field(mesh, dim=3, value=(1, 2, 0))\n >>> fig = plt.figure()\n >>> ax = fig.add_subplot(111)\n >>> field.plane(z=50).quiver(ax=ax, color_field=field.z)\n <matplotlib.quiver.Quiver object at ...>\n\n .. seealso:: :py:func:`~discretisedfield.Field.imshow`\n\n \"\"\"\n if not hasattr(self.mesh, 'info'):\n msg = ('Only sliced field can be plotted using quiver. '\n 'For instance, field.plane(\\'x\\').quiver(ax=ax).')\n raise ValueError(msg)\n if self.dim != 3:\n msg = 'Only three-dimensional (dim=3) fields can be plotted.'\n raise ValueError(msg)\n\n points, values = list(zip(*list(self)))\n\n # Remove values where norm is 0\n points, values = list(points), list(values) # make them mutable\n points = [p for p, v in zip(points, values)\n if not np.equal(v, 0).all()]\n values = [v for v in values if not np.equal(v, 0).all()]\n if color_field is not None:\n colors = [color_field(p) for p in points]\n colors = list(zip(*colors))\n\n # \"Unpack\" values inside arrays.\n points, values = list(zip(*points)), list(zip(*values))\n\n # Are there any vectors pointing out-of-plane? If yes, set the scale.\n if not any(values[self.mesh.info['axis1']] +\n values[self.mesh.info['axis2']]):\n kwargs['scale'] = 1\n\n kwargs['pivot'] = 'mid' # arrow at the centre of the cell\n\n if color_field is None:\n # quiver plot is not coloured.\n qvax = ax.quiver(points[self.mesh.info['axis1']],\n points[self.mesh.info['axis2']],\n values[self.mesh.info['axis1']],\n values[self.mesh.info['axis2']],\n **kwargs)\n\n else:\n # quiver plot is coloured.\n qvax = ax.quiver(points[self.mesh.info['axis1']],\n points[self.mesh.info['axis2']],\n values[self.mesh.info['axis1']],\n values[self.mesh.info['axis2']],\n colors,\n **kwargs)\n\n return qvax\n\n def colorbar(self, ax, coloredplot, cax=None, **kwargs):\n \"\"\"Adds a colorbar to the axes using `matplotlib.pyplot.colorbar`.\n\n Axes to which the colorbar should be added is passed via `ax`\n argument. If the colorbar axes are made before the method is\n called, they should be passed as `cax`. The plot to which the\n colorbar should correspond to is passed via `coloredplot`. All\n other parameters accepted by `matplotlib.pyplot.colorbar` can\n be passed. In order to use this function inside Jupyter\n notebook `%matplotlib inline` must be activated after\n `discretisedfield` is imported.\n\n Parameters\n ----------\n ax : matplotlib.axes.Axes\n Axes object to which the colorbar will be added.\n coloredplot : matplotlib.quiver.Quiver, matplotlib.image.AxesImage\n A plot to which the colorbar should correspond\n cax : matplotlib.axes.Axes, optional\n Colorbar axes.\n\n Returns\n -------\n matplotlib.colorbar.Colorbar\n\n Example\n -------\n >>> import discretisedfield as df\n ...\n >>> p1 = (0, 0, 0)\n >>> p2 = (100, 100, 100)\n >>> n = (10, 10, 10)\n >>> mesh = df.Mesh(p1=p1, p2=p2, n=n)\n >>> field = df.Field(mesh, dim=3, value=(1, 2, 0))\n >>> fig = plt.figure()\n >>> ax = fig.add_subplot(111)\n >>> coloredplot = field.plane(z=50).quiver(ax=ax, color_field=field.z)\n >>> field.colorbar(ax=ax, coloredplot=coloredplot)\n <matplotlib.colorbar.Colorbar object at ...>\n\n \"\"\"\n if cax is None:\n divider = mpl_toolkits.axes_grid1.make_axes_locatable(ax)\n cax = divider.append_axes('right', size='5%', pad=0.1)\n\n cbar = plt.colorbar(coloredplot, cax=cax, **kwargs)\n\n return cbar\n\n def k3d_nonzero(self, color=dfu.colormap[0], plot=None, **kwargs):\n \"\"\"Plots the voxels where the value of a scalar field is nonzero.\n\n All mesh cells where the value of the field is not zero will\n be marked using the same color. Only scalar fields can be\n plotted. Otherwise, ValueError is raised. Different colour of\n voxels can be passed in the RGB format using `color`\n parameter. This function is often used to look at the defined\n sample in the finite difference mesh, by inspecting its norm\n (`field.norm.k3d_nonzero`). If `plot` is passed as a\n `k3d.plot.Plot`, plot is added to it. Otherwise, a new k3d\n plot is created. All arguments allowed in `k3d.voxels()` can\n be passed. This function is to be called in Jupyter notebook.\n\n Parameters\n ----------\n color : int/hex, optional\n Voxel color in hexadecimal format.\n plot : k3d.plot.Plot, optional\n If this argument is passed, plot is added to\n it. Otherwise, a new k3d plot is created.\n\n Example\n -------\n >>> import discretisedfield as df\n ...\n >>> p1 = (-50, -50, -50)\n >>> p2 = (50, 50, 50)\n >>> n = (10, 10, 10)\n >>> mesh = df.Mesh(p1=p1, p2=p2, n=n)\n >>> field = df.Field(mesh, dim=3, value=(1, 2, 0))\n >>> def normfun(pos):\n ... x, y, z = pos\n ... if x**2 + y**2 < 30**2:\n ... return 1\n ... else:\n ... return 0\n >>> field.norm = normfun\n >>> field.norm.k3d_nonzero()\n Plot(...)\n\n .. seealso:: :py:func:`~discretisedfield.Field.k3d_voxels`\n \"\"\"\n if self.dim > 1:\n msg = ('Only scalar (dim=1) fields can be plotted. Consider '\n 'plotting one component, e.g. field.x.k3d_nonzero() '\n 'or norm field.norm.k3d_nonzero().')\n raise ValueError(msg)\n plot_array = np.copy(self.array) # make a deep copy\n plot_array = np.squeeze(plot_array) # remove an empty dimension\n plot_array = np.swapaxes(plot_array, 0, 2) # k3d: arrays are (z, y, x)\n plot_array[plot_array != 0] = 1 # all cells have the same colour\n\n # In the case of nano-sized samples, fix the order of\n # magnitude of the plot extent to avoid freezing the k3d plot.\n if np.any(np.divide(self.mesh.cell, 1e-9) < 1e3):\n pmin = np.divide(self.mesh.pmin, 1e-9)\n pmax = np.divide(self.mesh.pmax, 1e-9)\n else:\n pmin = self.mesh.pmin\n pmax = self.mesh.pmax\n\n dfu.voxels(plot_array, pmin, pmax, colormap=color,\n plot=plot, **kwargs)\n\n def k3d_voxels(self, norm_field=None, plot=None, **kwargs):\n \"\"\"Plots the scalar field as a coloured `k3d.voxels()` plot.\n\n At all mesh cells, a voxel will be plotted anc coloured\n according to its value. If the scalar field plotted is\n extracted from a vector field, which has coordinates where the\n norm of the field is zero, the norm of that vector field can\n be passed using `norm_field` argument, so that voxels at those\n coordinates are not showed. Only scalar fields can be\n plotted. Otherwise, ValueError is raised. If `plot` is passed\n as a `k3d.plot.Plot`, plot is added to it. Otherwise, a new\n k3d plot is created. All arguments allowed in `k3d.voxels()`\n can be passed. This function is to be called in Jupyter\n notebook.\n\n Parameters\n ----------\n norm_field : discretisedfield.Field, optional\n A (scalar) norm field used for determining whether certain\n voxels should be plotted.\n plot : k3d.plot.Plot, optional\n If this argument is passed, plot is added to\n it. Otherwise, a new k3d plot is created.\n\n Example\n -------\n >>> import discretisedfield as df\n ...\n >>> p1 = (-50, -50, -50)\n >>> p2 = (50, 50, 50)\n >>> n = (10, 10, 10)\n >>> mesh = df.Mesh(p1=p1, p2=p2, n=n)\n >>> field = df.Field(mesh, dim=3, value=(1, 2, 0))\n >>> def normfun(pos):\n ... x, y, z = pos\n ... if x**2 + y**2 < 30**2:\n ... return 1\n ... else:\n ... return 0\n >>> field.norm = normfun\n >>> field.x.k3d_voxels(norm_field=field.norm)\n Plot(...)\n\n .. seealso:: :py:func:`~discretisedfield.Field.k3d_vectors`\n\n \"\"\"\n if self.dim > 1:\n msg = ('Only scalar (dim=1) fields can be plotted. Consider '\n 'plotting one component, e.g. field.x.k3d_nonzero() '\n 'or norm field.norm.k3d_nonzero().')\n raise ValueError(msg)\n\n plot_array = np.copy(self.array) # make a deep copy\n plot_array = plot_array[..., 0] # remove an empty dimension\n\n plot_array -= plot_array.min()\n # In the case of uniform fields, division by zero can be\n # encountered.\n if plot_array.max() != 0:\n plot_array /= plot_array.max()\n plot_array *= 254\n plot_array += 1\n plot_array = plot_array.round()\n plot_array = plot_array.astype(int)\n\n if norm_field is not None:\n for index in self.mesh.indices:\n if norm_field(self.mesh.index2point(index)) == 0:\n plot_array[index] = 0\n\n plot_array = np.swapaxes(plot_array, 0, 2) # k3d: arrays are (z, y, x)\n\n cmap = matplotlib.cm.get_cmap('viridis', 256)\n colormap = [dfu.num2hexcolor(i, cmap) for i in range(cmap.N)]\n\n # In the case of nano-sized samples, fix the order of\n # magnitude of the plot extent to avoid freezing the k3d plot.\n if np.any(np.divide(self.mesh.cell, 1e-9) < 1e3):\n pmin = np.divide(self.mesh.pmin, 1e-9)\n pmax = np.divide(self.mesh.pmax, 1e-9)\n else:\n pmin = self.mesh.pmin\n pmax = self.mesh.pmax\n\n dfu.voxels(plot_array, pmin, pmax, colormap=colormap,\n plot=plot, **kwargs)\n\n def k3d_vectors(self, color_field=None, points=True, plot=None, **kwargs):\n \"\"\"Plots the vector field as a `k3d.vectors()` plot.\n\n At all mesh cells, a vector will be plotted if its norm is not\n zero. Vectors can be coloured according to the values of the\n scalar field passed as `color_field`. Only vector fields can\n be plotted. Otherwise, ValueError is raised. Points at the\n discretisation cell centres can be added by setting\n `points=True`. If `plot` is passed as a `k3d.plot.Plot`, plot\n is added to it. Otherwise, a new k3d plot is created. All\n arguments allowed in `k3d.vectors()` can be passed. This\n function is to be called in Jupyter notebook.\n\n Parameters\n ----------\n color_field : discretisedfield.Field, optional\n A (scalar) field used for determining the colours of\n vectors.\n points : bool, optional\n If `True`, points will be added to the discretisation cell\n centres.\n plot : k3d.plot.Plot, optional\n If this argument is passed, plot is added to\n it. Otherwise, a new k3d plot is created.\n\n Example\n -------\n 1. Plotting an entire vector field.\n\n >>> import discretisedfield as df\n ...\n >>> p1 = (-50, -50, -50)\n >>> p2 = (50, 50, 50)\n >>> n = (10, 10, 10)\n >>> mesh = df.Mesh(p1=p1, p2=p2, n=n)\n >>> field = df.Field(mesh, dim=3, value=(1, 2, 0))\n >>> field.k3d_vectors(color_field=field.x)\n Plot(...)\n\n 2. Plotting the slice of a vector field.\n\n >>> import discretisedfield as df\n ...\n >>> p1 = (-50, -50, -50)\n >>> p2 = (50, 50, 50)\n >>> n = (10, 10, 10)\n >>> mesh = df.Mesh(p1=p1, p2=p2, n=n)\n >>> field = df.Field(mesh, dim=3, value=(1, 2, 0))\n >>> field.plane('x').k3d_vectors(color_field=field.x)\n Plot(...)\n\n .. seealso:: :py:func:`~discretisedfield.Field.k3d_voxels`\n\n \"\"\"\n if self.dim != 3:\n msg = 'Only three-dimensional (dim=3) fields can be plotted.'\n raise ValueError(msg)\n\n coordinates, vectors, color_values = [], [], []\n norm = self.norm # assigned to be computed only once\n for coord, value in self:\n if norm(coord) > 0:\n coordinates.append(coord)\n vectors.append(value)\n if color_field is not None:\n color_values.append(color_field(coord)[0])\n\n coordinates, vectors = np.array(coordinates), np.array(vectors)\n\n # In the case of nano-sized samples, fix the order of\n # magnitude of the coordinates to avoid freezing the k3d plot.\n if np.any(np.divide(self.mesh.cell, 1e-9) < 1e3):\n coordinates /= 1e-9\n cell = np.divide(self.mesh.cell, 1e-9)\n else:\n cell = self.mesh.cell\n\n # Scale the vectors to correspond to the size of cells.\n vectors /= vectors.max()\n vectors *= 0.8*np.array(cell)\n\n # Middle of the arrow is at the cell centre.\n coordinates -= 0.5 * vectors\n\n if color_field is not None:\n color_values = np.array(color_values)\n color_values -= color_values.min()\n # In the case of uniform fields, division by zero can be\n # encountered.\n if color_values.max() != 0:\n color_values /= color_values.max()\n color_values *= 256\n color_values = color_values.round()\n color_values = color_values.astype(int)\n\n cmap = matplotlib.cm.get_cmap('viridis', 256)\n colors = []\n for c in color_values:\n color = dfu.num2hexcolor(c, cmap)\n colors.append((color, color))\n else:\n colors = []\n\n plot = dfu.vectors(coordinates, vectors, colors=colors,\n plot=plot, **kwargs)\n\n if points:\n dfu.points(coordinates + 0.5 * vectors, plot=plot)\n"
] | [
[
"numpy.swapaxes",
"numpy.array_equal",
"numpy.linspace",
"numpy.isnan",
"numpy.squeeze",
"numpy.linalg.norm",
"numpy.all",
"matplotlib.pyplot.colorbar",
"numpy.copy",
"numpy.equal",
"matplotlib.cm.get_cmap",
"numpy.array",
"numpy.divide",
"matplotlib.pyplot.figure"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Fei-Wang/dl-pytorch | [
"a7672603e2de7824d0ff7e97b69dedad3fd9d476"
] | [
"test/test_models/test_palm.py"
] | [
"import torch\n\nfrom luffy.models.palm import *\n\n\ndef test_palm_tony():\n model = PaLMTony(num_tokens=20000)\n\n tokens = torch.randint(0, 20000, (1, 2048))\n feat = model(tokens)\n assert feat.shape == (1, 2048, 20000)\n"
] | [
[
"torch.randint"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
bestetc/batchflow | [
"d2a843640383fbe860654236881483f755227e06",
"d2a843640383fbe860654236881483f755227e06",
"d2a843640383fbe860654236881483f755227e06",
"d2a843640383fbe860654236881483f755227e06",
"d2a843640383fbe860654236881483f755227e06",
"d2a843640383fbe860654236881483f755227e06"
] | [
"batchflow/models/tf/nn/train.py",
"batchflow/models/metrics/loss.py",
"batchflow/batch_image.py",
"batchflow/models/tf/utils.py",
"batchflow/models/torch/losses/lovasz.py",
"batchflow/models/tf/layers/drop_block.py"
] | [
"\"\"\" Helpers for training \"\"\"\nfrom math import pi\n\nimport tensorflow as tf\n\ndef piecewise_constant(global_step, *args, **kwargs):\n \"\"\" Constant learning rate decay (uses global_step param instead of x) \"\"\"\n return tf.train.piecewise_constant(global_step, *args, **kwargs)\n\ndef cyclic_learning_rate(learning_rate, global_step, max_lr, step_size=10,\n mode='tri', name='CyclicLearningRate'):\n \"\"\" This function varies the learning rate between the\n minimum (learning_rate) and the maximum (max_lr).\n It returns the decayed learning rate.\n\n Parameters\n ----------\n learning_rate : float or tf.Tensor\n The minimum learning rate boundary.\n global_step : int or tf.Tensor\n Global_step refers to the number of batches seen by the model.\n It is use for the cyclic computation. Must not be negative.\n max_lr : float or tf.Tensor\n The maximum learning rate boundary.\n step_size : int or tf.Tensor\n The number of iterations in half a cycle (the default is 10).\n mode : {'tri', 'sin', 'saw'}\n Set the learning rate change function.\n name : str\n Name of the operation (the default is 'CyclicLearningRate').\n\n Returns\n -------\n tf.Tensor\n\n Notes\n -----\n More detailed information about `mode`:\n\n If 'tri':\n Default, linearly increasing then linearly decreasing the\n learning rate at each cycle. Learning rate starting\n from (max_lr-learning_rate)/2 then decreasing to `learning_rate`.\n See `Leslie N. Smith, Cyclical Learning Rates for Training Neural Networks\n <https://arxiv.org/abs/1506.01186>`_ for more information.\n\n It is computed as::\n\n decayed_learning_rate = abs(mod((global_step + step_size / 4) / step_size, 1) - 0.5) *\n 2 * (max_lr - learning_rate) +\n learning_rate\n\n\n If 'sin':\n Learning rate changes as a sine wave, starting\n from (max_lr-learning_rate)/2 then decreasing to `learning_rate`.\n\n It is computed as::\n\n decayed_learning_rate = (learning_rate - max_lr) / 2 *\n sin(pi * global_step / step_size) +\n (max_lr + learning_rate) / 2\n\n\n If 'saw':\n Learning rate linearly increasing from `learning_rate` to `max_lr`\n and then sharply drops to `learning_rate` at each cycle.\n Learning rate starting from `learning_rate` then increasing.\n\n It is computed as::\n\n decayed_learning_rate = (max_lr - learning_rate) *\n (floor(global_step / step_size) - global_step / step_size) +\n learning_rate\n \"\"\"\n with tf.name_scope(name):\n learning_rate = tf.cast(learning_rate, dtype=tf.float32)\n global_step = tf.cast(global_step, dtype=tf.float32)\n step_size = tf.cast(step_size, dtype=tf.float32)\n max_lr = tf.cast(max_lr, dtype=tf.float32)\n\n if mode == 'tri':\n periodic_comp = tf.mod((global_step + step_size / 4) / step_size, 1)\n first_factor = tf.abs(periodic_comp - 0.5)\n second_factor = 2 * (max_lr - learning_rate)\n second_comp = learning_rate\n elif mode == 'sin':\n first_factor = (learning_rate - max_lr) / 2.\n second_factor = tf.sin((pi * global_step) / step_size)\n second_comp = (learning_rate + max_lr) / 2.\n elif mode == 'saw':\n first_factor = max_lr - learning_rate\n second_factor = tf.mod(global_step / step_size, 1)\n second_comp = learning_rate\n return first_factor * second_factor + second_comp\n",
"\"\"\" Loss as a Metrics to be used in research pipelines added with `run=True` \"\"\"\n\nimport numpy as np\n\nfrom .base import Metrics\n\n\nclass Loss(Metrics):\n \"\"\"\n This is a helper class to aggregate losses from pipelines\n that are used in Research objects with `run=True`,\n like test pipelines\n\n Parameters\n ----------\n loss : float\n loss value obtained from model\n \"\"\"\n\n def __init__(self, loss, batch_len):\n super().__init__()\n\n self.losses = [loss]\n self.batch_lengths = [batch_len]\n\n def agg_loss(args):\n losses, blens = args\n return np.sum(np.asarray(losses) * np.asarray(blens)) / np.sum(blens)\n\n self._agg_fn_dict.update(mean=agg_loss)\n\n def batchwise_loss(args):\n losses, _ = args\n return losses\n\n self._agg_fn_dict.update(batchwise=batchwise_loss)\n\n def append(self, metrics):\n \"\"\" Extend with data from another metrics. \"\"\"\n self.losses.extend(metrics.losses)\n self.batch_lengths.extend(metrics.batch_lengths)\n\n def loss(self):\n return self.losses, self.batch_lengths\n",
"\"\"\" Contains Batch classes for images \"\"\"\nimport os\nimport warnings\nfrom numbers import Number\n\nimport numpy as np\nimport PIL\nimport PIL.ImageOps\nimport PIL.ImageChops\nimport PIL.ImageFilter\nimport PIL.ImageEnhance\n\nfrom .batch import Batch\nfrom .decorators import action, apply_parallel, inbatch_parallel\nfrom .dsindex import FilesIndex\n\n\nclass BaseImagesBatch(Batch):\n \"\"\" Batch class for 2D images.\n\n Note, that if any class method is wrapped with `@apply_parallel` decorator\n than for inner calls (i.e. from other class methods) should be used version\n of desired method with underscores. (For example, if there is a decorated\n `method` than you need to call `_method_` from inside of `other_method`).\n Same is applicable for all child classes of :class:`batch.Batch`.\n \"\"\"\n components = \"images\", \"labels\", \"masks\"\n # Class-specific defaults for :meth:`.Batch.apply_parallel`\n apply_defaults = dict(target='for',\n post='_assemble',\n src='images',\n dst='images',\n )\n\n def _make_path(self, ix, src=None):\n \"\"\" Compose path.\n\n Parameters\n ----------\n ix : str\n element's index (filename)\n src : str\n Path to folder with images. Used if `self.index` is not `FilesIndex`.\n\n Returns\n -------\n path : str\n Full path to an element.\n \"\"\"\n\n if isinstance(src, FilesIndex):\n path = src.get_fullpath(ix)\n elif isinstance(self.index, FilesIndex):\n path = self.index.get_fullpath(ix)\n else:\n path = os.path.join(src, str(ix))\n return path\n\n def _load_image(self, ix, src=None, fmt=None, dst=\"images\"):\n \"\"\" Loads image.\n\n .. note:: Please note that ``dst`` must be ``str`` only, sequence is not allowed here.\n\n Parameters\n ----------\n src : str, dataset.FilesIndex, None\n path to the folder with an image. If src is None then it is determined from the index.\n dst : str\n Component to write images to.\n fmt : str\n Format of the an image\n\n Raises\n ------\n NotImplementedError\n If this method is not defined in a child class\n \"\"\"\n _ = self, ix, src, dst, fmt\n raise NotImplementedError(\"Must be implemented in a child class\")\n\n @action\n def load(self, *args, src=None, fmt=None, dst=None, **kwargs):\n \"\"\" Load data.\n\n .. note:: if `fmt='images'` than ``components`` must be a single component (str).\n .. note:: All parameters must be named only.\n\n Parameters\n ----------\n src : str, None\n Path to the folder with data. If src is None then path is determined from the index.\n fmt : {'image', 'blosc', 'csv', 'hdf5', 'feather'}\n Format of the file to download.\n dst : str, sequence\n components to download.\n \"\"\"\n if fmt == 'image':\n return self._load_image(src, fmt=fmt, dst=dst)\n return super().load(src=src, fmt=fmt, dst=dst, *args, **kwargs)\n\n\n def _dump_image(self, ix, src='images', dst=None, fmt=None):\n \"\"\" Saves image to dst.\n\n .. note:: Please note that ``src`` must be ``str`` only, sequence is not allowed here.\n\n Parameters\n ----------\n src : str\n Component to get images from.\n dst : str\n Folder where to dump. If dst is None then it is determined from index.\n\n Raises\n ------\n NotImplementedError\n If this method is not defined in a child class\n \"\"\"\n _ = self, ix, src, dst, fmt\n raise NotImplementedError(\"Must be implemented in a child class\")\n\n @action\n def dump(self, *args, dst=None, fmt=None, components=\"images\", **kwargs):\n \"\"\" Dump data.\n\n .. note:: If `fmt='images'` than ``dst`` must be a single component (str).\n\n .. note:: All parameters must be named only.\n\n Parameters\n ----------\n dst : str, None\n Path to the folder where to dump. If dst is None then path is determined from the index.\n fmt : {'image', 'blosc', 'csv', 'hdf5', 'feather'}\n Format of the file to save.\n components : str, sequence\n Components to save.\n ext: str\n Format to save images to.\n\n Returns\n -------\n self\n \"\"\"\n if fmt == 'image':\n return self._dump_image(components, dst, fmt=kwargs.pop('ext'))\n return super().dump(dst=dst, fmt=fmt, components=components, *args, **kwargs)\n\n\nclass ImagesBatch(BaseImagesBatch):\n \"\"\" Batch class for 2D images.\n\n Images are stored as numpy arrays of PIL.Image.\n\n PIL.Image has the following system of coordinates::\n\n X\n 0 -------------- >\n |\n |\n | images's pixels\n |\n |\n Y v\n\n Pixel's position is defined as (x, y)\n\n Note, that if any class method is wrapped with `@apply_parallel` decorator\n than for inner calls (i.e. from other class methods) should be used version\n of desired method with underscores. (For example, if there is a decorated\n `method` than you need to call `_method_` from inside of `other_method`).\n Same is applicable for all child classes of :class:`batch.Batch`.\n \"\"\"\n\n @classmethod\n def _get_image_shape(cls, image):\n if isinstance(image, PIL.Image.Image):\n return image.size\n return image.shape[:2]\n\n @property\n def image_shape(self):\n \"\"\": tuple - shape of the image\"\"\"\n _, shapes_count = np.unique([image.size for image in self.images], return_counts=True, axis=0)\n if len(shapes_count) == 1:\n if isinstance(self.images[0], PIL.Image.Image):\n return (*self.images[0].size, len(self.images[0].getbands()))\n return self.images[0].shape\n raise RuntimeError('Images have different shapes')\n\n @inbatch_parallel(init='indices', post='_assemble')\n def _load_image(self, ix, src=None, fmt=None, dst=\"images\"):\n \"\"\" Loads image\n\n .. note:: Please note that ``dst`` must be ``str`` only, sequence is not allowed here.\n\n Parameters\n ----------\n src : str, dataset.FilesIndex, None\n Path to the folder with an image. If src is None then it is determined from the index.\n dst : str\n Component to write images to.\n fmt : str\n Format of an image.\n \"\"\"\n return PIL.Image.open(self._make_path(ix, src))\n\n @inbatch_parallel(init='indices')\n def _dump_image(self, ix, src='images', dst=None, fmt=None):\n \"\"\" Saves image to dst.\n\n .. note:: Please note that ``src`` must be ``str`` only, sequence is not allowed here.\n\n Parameters\n ----------\n src : str\n Component to get images from.\n dst : str\n Folder where to dump.\n fmt : str\n Format of saved image.\n \"\"\"\n if dst is None:\n raise RuntimeError('You must specify `dst`')\n image = self.get(ix, src)\n ix = str(ix) + '.' + fmt if fmt is not None else str(ix)\n image.save(os.path.join(dst, ix))\n\n def _assemble_component(self, result, *args, component='images', **kwargs):\n \"\"\" Assemble one component after parallel execution.\n\n Parameters\n ----------\n result : sequence, array_like\n Results after inbatch_parallel.\n component : str\n component to assemble\n \"\"\"\n _ = args, kwargs\n if isinstance(result[0], PIL.Image.Image):\n setattr(self, component, np.asarray(result, dtype=object))\n else:\n try:\n setattr(self, component, np.stack(result))\n except ValueError:\n array_result = np.empty(len(result), dtype=object)\n array_result[:] = result\n setattr(self, component, array_result)\n\n @apply_parallel\n def to_pil(self, image, mode=None):\n \"\"\"converts images in Batch to PIL format\n\n Parameters\n ----------\n src : str\n Component to get images from. Default is 'images'.\n dst : str\n Component to write images to. Default is 'images'.\n \"\"\"\n if isinstance(image, PIL.Image.Image):\n return image\n\n if mode is None:\n if len(image.shape) == 2:\n mode = 'L'\n elif len(image.shape) == 3:\n if image.shape[-1] == 3:\n mode = 'RGB'\n elif image.shape[-1] == 1:\n mode = 'L'\n image = image[:, :, 0]\n elif image.shape[-1] == 2:\n mode = 'LA'\n elif image.shape[-1] == 4:\n mode = 'RGBA'\n else:\n raise ValueError('Unknown image type as image has', image.shape[-1], 'channels')\n elif mode == 'L' and len(image.shape) == 3:\n image = image[..., 0]\n return PIL.Image.fromarray(image, mode)\n\n def _calc_origin(self, image_shape, origin, background_shape):\n \"\"\" Calculate coordinate of the input image with respect to the background.\n\n Parameters\n ----------\n image_shape : sequence\n shape of the input image.\n origin : array_like, sequence, {'center', 'top_left', 'top_right', 'bottom_left', 'bottom_right', 'random'}\n Position of the input image with respect to the background. Can be one of:\n - 'center' - place the center of the input image on the center of the background and crop\n the input image accordingly.\n - 'top_left' - place the upper-left corner of the input image on the upper-left of the background\n and crop the input image accordingly.\n - 'top_right' - crop an image such that upper-right corners of\n an image and the cropping box coincide\n - 'bottom_left' - crop an image such that lower-left corners of\n an image and the cropping box coincide\n - 'bottom_right' - crop an image such that lower-right corners of\n an image and the cropping box coincide\n - 'random' - place the upper-left corner of the input image on the randomly sampled position\n in the background. Position is sampled uniformly such that there is no need for cropping.\n - other - sequence of ints or sequence of floats in [0, 1) interval;\n place the upper-left corner of the input image on the given position in the background.\n If `origin` is a sequence of floats in [0, 1), it defines a relative position of\n the origin in a valid region of image.\n\n background_shape : sequence\n shape of the background image.\n\n Returns\n -------\n sequence : calculated origin in the form (column, row)\n \"\"\"\n if isinstance(origin, str):\n if origin == 'top_left':\n origin = 0, 0\n elif origin == 'top_right':\n origin = (background_shape[0]-image_shape[0]+1, 0)\n elif origin == 'bottom_left':\n origin = (0, background_shape[1]-image_shape[1]+1)\n elif origin == 'bottom_right':\n origin = (background_shape[0]-image_shape[0]+1,\n background_shape[1]-image_shape[1]+1)\n elif origin == 'center':\n origin = np.maximum(0, np.asarray(background_shape) - image_shape) // 2\n elif origin == 'random':\n origin = (np.random.randint(background_shape[0]-image_shape[0]+1),\n np.random.randint(background_shape[1]-image_shape[1]+1))\n else:\n raise ValueError(\"If string, origin should be one of ['center', 'top_left', 'top_right', \"\n \"'bottom_left', 'bottom_right', 'random']. Got '{}'.\".format(origin))\n elif all(0 <= elem < 1 for elem in origin):\n region = ((background_shape[0]-image_shape[0]+1),\n (background_shape[1]-image_shape[1]+1))\n origin = np.asarray(origin) * region\n elif not all(isinstance(elem, int) for elem in origin):\n raise ValueError('If not a string, origin should be either a sequence of ints or sequence of '\n 'floats in [0, 1) interval. Got {}'.format(origin))\n\n return np.asarray(origin, dtype=np.int)\n\n @apply_parallel\n def scale(self, image, factor, preserve_shape=False, origin='center', resample=0):\n \"\"\" Scale the content of each image in the batch.\n\n Resulting shape is obtained as original_shape * factor.\n\n Parameters\n -----------\n factor : float, sequence\n resulting shape is obtained as original_shape * factor\n\n - float - scale all axes with the given factor\n - sequence (factor_1, factort_2, ...) - scale each axis with the given factor separately\n\n preserve_shape : bool\n whether to preserve the shape of the image after scaling\n\n origin : array-like, {'center', 'top_left', 'top_right', 'bottom_left', 'bottom_right', 'random'}\n Relevant only if `preserve_shape` is True.\n If `scale` < 1, defines position of the scaled image with respect to the original one's shape.\n If `scale` > 1, defines position of cropping box.\n\n Can be one of:\n\n - 'center' - place the center of the input image on the center of the background and crop\n the input image accordingly.\n - 'top_left' - place the upper-left corner of the input image on the upper-left of the background\n and crop the input image accordingly.\n - 'top_right' - crop an image such that upper-right corners of\n an image and the cropping box coincide\n - 'bottom_left' - crop an image such that lower-left corners of\n an image and the cropping box coincide\n - 'bottom_right' - crop an image such that lower-right corners of\n an image and the cropping box coincide\n - 'random' - place the upper-left corner of the input image on the randomly sampled position\n in the background. Position is sampled uniformly such that there is no need for cropping.\n - array_like - sequence of ints or sequence of floats in [0, 1) interval;\n place the upper-left corner of the input image on the given position in the background.\n If `origin` is a sequence of floats in [0, 1), it defines a relative position\n of the origin in a valid region of image.\n\n resample: int\n Parameter passed to PIL.Image.resize. Interpolation order\n src : str\n Component to get images from. Default is 'images'.\n dst : str\n Component to write images to. Default is 'images'.\n p : float\n Probability of applying the transform. Default is 1.\n\n Notes\n -----\n Using 'random' option for origin with `src` as list with multiple elements will not result in same crop for each\n element, as origin will be sampled independently for each `src` element.\n To randomly sample same origin for a number of components, use `R` named expression for `origin` argument.\n\n Returns\n -------\n self\n \"\"\"\n original_shape = self._get_image_shape(image)\n rescaled_shape = list(np.int32(np.ceil(np.asarray(original_shape)*factor)))\n rescaled_image = image.resize(rescaled_shape, resample=resample)\n if preserve_shape:\n rescaled_image = self._preserve_shape(original_shape, rescaled_image, origin)\n return rescaled_image\n\n @apply_parallel\n def crop(self, image, origin, shape, crop_boundaries=False):\n \"\"\" Crop an image.\n\n Extract image data from the window of the size given by `shape` and placed at `origin`.\n\n Parameters\n ----------\n origin : sequence, str\n Location of the cropping box. See :meth:`.ImagesBatch._calc_origin` for details.\n shape : sequence\n crop size in the form of (rows, columns)\n crop_boundaries : bool\n If `True` then crop is got only from image's area. Shape of the crop might diverge with the passed one\n src : str\n Component to get images from. Default is 'images'.\n dst : str\n Component to write images to. Default is 'images'.\n p : float\n Probability of applying the transform. Default is 1.\n\n Notes\n -----\n Using 'random' origin with `src` as list with multiple elements will not result in same crop for each\n element, as origin will be sampled independently for each `src` element.\n To randomly sample same origin for a number of components, use `R` named expression for `origin` argument.\n \"\"\"\n origin = self._calc_origin(shape, origin, image.size)\n right_bottom = origin + shape\n\n if crop_boundaries:\n out_of_boundaries = origin < 0\n origin[out_of_boundaries] = 0\n\n image_shape = np.asarray(image.size)\n out_of_boundaries = right_bottom > image_shape\n right_bottom[out_of_boundaries] = image_shape[out_of_boundaries]\n\n return image.crop((*origin, *right_bottom))\n\n @apply_parallel\n def put_on_background(self, image, background, origin, mask=None):\n \"\"\" Put an image on a background at given origin\n\n Parameters\n ----------\n background : PIL.Image, np.ndarray of np.uint8\n Blank background to put image on.\n origin : sequence, str\n Location of the cropping box. See :meth:`.ImagesBatch._calc_origin` for details.\n mask : None, PIL.Image, np.ndarray of np.uint8\n mask passed to PIL.Image.paste\n\n Notes\n -----\n Using 'random' origin with `src` as list with multiple elements will not result in same crop for each\n element, as origin will be sampled independently for each `src` element.\n To randomly sample same origin for a number of components, use `R` named expression for `origin` argument.\n \"\"\"\n if not isinstance(background, PIL.Image.Image):\n background = PIL.Image.fromarray(background)\n else:\n background = background.copy()\n\n if not isinstance(mask, PIL.Image.Image):\n mask = PIL.Image.fromarray(mask) if mask is not None else None\n\n origin = list(self._calc_origin(self._get_image_shape(image), origin,\n self._get_image_shape(background)))\n\n background.paste(image, origin, mask)\n\n return background\n\n def _preserve_shape(self, original_shape, transformed_image, origin='center'):\n \"\"\" Change the transformed image's shape by cropping and adding empty pixels to fit the shape of original image.\n\n Parameters\n ----------\n original_shape : sequence\n transformed_image : np.ndarray\n input_origin : array-like, {'center', 'top_left', 'random'}\n Position of the scaled image with respect to the original one's shape.\n - 'center' - place the center of the input image on the center of the background and crop\n the input image accordingly.\n - 'top_left' - place the upper-left corner of the input image on the upper-left of the background\n and crop the input image accordingly.\n - 'top_right' - crop an image such that upper-right corners of\n an image and the cropping box coincide\n - 'bottom_left' - crop an image such that lower-left corners of\n an image and the cropping box coincide\n - 'bottom_right' - crop an image such that lower-right corners of\n an image and the cropping box coincide\n - 'random' - place the upper-left corner of the input image on the randomly sampled position\n in the background. Position is sampled uniformly such that there is no need for cropping.\n - array_like - sequence of ints or sequence of floats in [0, 1) interval;\n place the upper-left corner of the input image on the given position in the background.\n If `origin` is a sequence of floats in [0, 1), it defines a relative position\n of the origin in a valid region of image.\n crop_origin: array-like, {'center', 'top_left', 'random'}\n Position of crop from transformed image.\n Has same values as `input_origin`.\n\n Returns\n -------\n np.ndarray : image after described actions\n \"\"\"\n transformed_shape = self._get_image_shape(transformed_image)\n if np.any(np.array(transformed_shape) < np.array(original_shape)):\n n_channels = len(transformed_image.getbands())\n if n_channels == 1:\n background = np.zeros(original_shape, dtype=np.uint8)\n else:\n background = np.zeros((*original_shape, n_channels), dtype=np.uint8)\n return self._put_on_background_(transformed_image, background, origin)\n return self._crop_(transformed_image, origin, original_shape, True)\n\n @apply_parallel\n def filter(self, image, mode, *args, **kwargs):\n \"\"\" Filters an image. Calls ``image.filter(getattr(PIL.ImageFilter, mode)(*args, **kwargs))``.\n\n For more details see `ImageFilter <http://pillow.readthedocs.io/en/stable/reference/ImageFilter.html>_`.\n\n Parameters\n ----------\n mode : str\n Name of the filter.\n src : str\n Component to get images from. Default is 'images'.\n dst : str\n Component to write images to. Default is 'images'.\n p : float\n Probability of applying the transform. Default is 1.\n \"\"\"\n return image.filter(getattr(PIL.ImageFilter, mode)(*args, **kwargs))\n\n @apply_parallel\n def transform(self, image, *args, **kwargs):\n \"\"\" Calls ``image.transform(*args, **kwargs)``.\n\n For more information see\n `<http://pillow.readthedocs.io/en/stable/reference/Image.html#PIL.Image.Image.transform>_`.\n\n Parameters\n ----------\n src : str\n Component to get images from. Default is 'images'.\n dst : str\n Component to write images to. Default is 'images'.\n p : float\n Probability of applying the transform. Default is 1.\n \"\"\"\n size = kwargs.pop('size', self._get_image_shape(image))\n return image.transform(*args, size=size, **kwargs)\n\n @apply_parallel\n def resize(self, image, size, *args, **kwargs):\n \"\"\" Calls ``image.resize(*args, **kwargs)``.\n\n For more details see `<https://pillow.readthedocs.io/en/stable/reference/Image.html#PIL.Image.Image.resize>_`.\n\n Parameters\n ----------\n size : tuple\n the resulting size of the image. If one of the components of tuple is None,\n corresponding dimension will be proportionally resized.\n src : str\n Component to get images from. Default is 'images'.\n dst : str\n Component to write images to. Default is 'images'.\n p : float\n Probability of applying the transform. Default is 1.\n \"\"\"\n if size[0] is None and size[1] is None:\n raise ValueError('At least one component of the parameter \"size\" must be a number.')\n if size[0] is None:\n new_size = (int(image.size[0] * size[1] / image.size[1]), size[1])\n elif size[1] is None:\n new_size = (size[0], int(image.size[1] * size[0] / image.size[0]))\n else:\n new_size = size\n\n return image.resize(new_size, *args, **kwargs)\n\n @apply_parallel\n def shift(self, image, offset, mode='const'):\n \"\"\" Shifts an image.\n\n Parameters\n ----------\n offset : (Number, Number)\n mode : {'const', 'wrap'}\n How to fill borders\n src : str\n Component to get images from. Default is 'images'.\n dst : str\n Component to write images to. Default is 'images'.\n p : float\n Probability of applying the transform. Default is 1.\n \"\"\"\n if mode == 'const':\n image = image.transform(size=image.size,\n method=PIL.Image.AFFINE,\n data=(1, 0, -offset[0], 0, 1, -offset[1]))\n elif mode == 'wrap':\n image = PIL.ImageChops.offset(image, *offset)\n else:\n raise ValueError(\"mode must be one of ['const', 'wrap']\")\n return image\n\n @apply_parallel\n def pad(self, image, *args, **kwargs):\n \"\"\" Calls ``PIL.ImageOps.expand``.\n\n For more details see `<http://pillow.readthedocs.io/en/stable/reference/ImageOps.html#PIL.ImageOps.expand>`_.\n\n Parameters\n ----------\n offset : sequence\n Size of the borders in pixels. The order is (left, top, right, bottom).\n mode : {'const', 'wrap'}\n Filling mode\n src : str\n Component to get images from. Default is 'images'.\n dst : str\n Component to write images to. Default is 'images'.\n p : float\n Probability of applying the transform. Default is 1.\n \"\"\"\n return PIL.ImageOps.expand(image, *args, **kwargs)\n\n @apply_parallel\n def rotate(self, image, *args, **kwargs):\n \"\"\" Rotates an image.\n\n kwargs are passed to PIL.Image.rotate\n\n Parameters\n ----------\n angle: Number\n In degrees counter clockwise.\n resample: int\n Interpolation order\n expand: bool\n Whether to expand the output to hold the whole image. Default is False.\n center: (Number, Number)\n Center of rotation. Default is the center of the image.\n src : str\n Component to get images from. Default is 'images'.\n dst : str\n Component to write images to. Default is 'images'.\n p : float\n Probability of applying the transform. Default is 1.\n \"\"\"\n return image.rotate(*args, **kwargs)\n\n @apply_parallel\n def flip(self, image, mode='lr'):\n \"\"\" Flips image.\n\n Parameters\n ----------\n mode : {'lr', 'ud'}\n\n - 'lr' - apply the left/right flip\n - 'ud' - apply the upside/down flip\n src : str\n Component to get images from. Default is 'images'.\n dst : str\n Component to write images to. Default is 'images'.\n p : float\n Probability of applying the transform. Default is 1.\n \"\"\"\n if mode == 'lr':\n return PIL.ImageOps.mirror(image)\n return PIL.ImageOps.flip(image)\n\n @apply_parallel\n def invert(self, image, channels='all'):\n \"\"\" Invert givn channels.\n\n Parameters\n ----------\n channels : int, sequence\n Indices of the channels to invert.\n src : str\n Component to get images from. Default is 'images'.\n dst : str\n Component to write images to. Default is 'images'.\n p : float\n Probability of applying the transform. Default is 1.\n \"\"\"\n if channels == 'all':\n image = PIL.ImageChops.invert(image)\n else:\n bands = list(image.split())\n channels = (channels,) if isinstance(channels, Number) else channels\n for channel in channels:\n bands[channel] = PIL.ImageChops.invert(bands[channel])\n image = PIL.Image.merge('RGB', bands)\n return image\n\n @apply_parallel\n def salt(self, image, p_noise=.015, color=255, size=(1, 1)):\n \"\"\" Set random pixel on image to givan value.\n\n Every pixel will be set to ``color`` value with probability ``p_noise``.\n\n Parameters\n ----------\n p_noise : float\n Probability of salting a pixel.\n color : float, int, sequence, callable\n Color's value.\n\n - int, float, sequence -- value of color\n - callable -- color is sampled for every chosen pixel (rules are the same as for int, float and sequence)\n size : int, sequence of int, callable\n Size of salt\n\n - int -- square salt with side ``size``\n - sequence -- recangular salt in the form (row, columns)\n - callable -- size is sampled for every chosen pixel (rules are the same as for int and sequence)\n src : str\n Component to get images from. Default is 'images'.\n dst : str\n Component to write images to. Default is 'images'.\n p : float\n Probability of applying the transform. Default is 1.\n \"\"\"\n mask_size = np.asarray(self._get_image_shape(image))\n mask_salt = np.random.binomial(1, p_noise, size=mask_size).astype(bool)\n image = np.array(image)\n if isinstance(size, (tuple, int)) and size in [1, (1, 1)] and not callable(color):\n image[mask_salt] = color\n else:\n size_lambda = size if callable(size) else lambda: size\n color_lambda = color if callable(color) else lambda: color\n mask_salt = np.where(mask_salt)\n for i in range(len(mask_salt[0])):\n current_size = size_lambda()\n current_size = (current_size, current_size) if isinstance(current_size, Number) else current_size\n left_top = np.asarray((mask_salt[0][i], mask_salt[1][i]))\n right_bottom = np.minimum(left_top + current_size, self._get_image_shape(image))\n image[left_top[0]:right_bottom[0], left_top[1]:right_bottom[1]] = color_lambda()\n\n return PIL.Image.fromarray(image)\n\n @apply_parallel\n def clip(self, image, low=0, high=255):\n \"\"\" Truncate image's pixels.\n\n Parameters\n ----------\n low : int, float, sequence\n Actual pixel's value is equal max(value, low). If sequence is given, then its length must coincide\n with the number of channels in an image and each channel is thresholded separately\n high : int, float, sequence\n Actual pixel's value is equal min(value, high). If sequence is given, then its length must coincide\n with the number of channels in an image and each channel is thresholded separately\n src : str\n Component to get images from. Default is 'images'.\n dst : str\n Component to write images to. Default is 'images'.\n p : float\n Probability of applying the transform. Default is 1.\n \"\"\"\n if isinstance(low, Number):\n low = tuple([low]*3)\n if isinstance(high, Number):\n high = tuple([high]*3)\n\n high = PIL.Image.new('RGB', image.size, high)\n low = PIL.Image.new('RGB', image.size, low)\n return PIL.ImageChops.lighter(PIL.ImageChops.darker(image, high), low)\n\n @apply_parallel\n def enhance(self, image, layout='hcbs', factor=(1, 1, 1, 1)):\n \"\"\" Apply enhancements from PIL.ImageEnhance to the image.\n\n Parameters\n ----------\n layout : str\n defines layout of operations, default is `hcbs`:\n h - color\n c - contrast\n b - brightness\n s - sharpness\n\n factor : float or tuple of float\n factor of enhancement for each operation listed in `layout`.\n \"\"\"\n enhancements = {\n 'h': 'Color',\n 'c': 'Contrast',\n 'b': 'Brightness',\n 's': 'Sharpness'\n }\n\n if isinstance(factor, float):\n factor = (factor,) * len(layout)\n if len(layout) != len(factor):\n raise ValueError(\"'layout' and 'factor' should be of same length!\")\n\n for alias, multiplier in zip(layout, factor):\n enhancement = enhancements.get(alias)\n if enhancement is None:\n raise ValueError('Unknown enhancement alias: ', alias)\n image = getattr(PIL.ImageEnhance, enhancement)(image).enhance(multiplier)\n\n return image\n\n @apply_parallel\n def multiply(self, image, multiplier=1., clip=False, preserve_type=False):\n \"\"\" Multiply each pixel by the given multiplier.\n\n Parameters\n ----------\n multiplier : float, sequence\n clip : bool\n whether to force image's pixels to be in [0, 255] or [0, 1.]\n preserve_type : bool\n Whether to preserve ``dtype`` of transformed images.\n If ``False`` is given then the resulting type will be ``np.float``.\n src : str\n Component to get images from. Default is 'images'.\n dst : str\n Component to write images to. Default is 'images'.\n p : float\n Probability of applying the transform. Default is 1.\n \"\"\"\n multiplier = np.float32(multiplier)\n if isinstance(image, PIL.Image.Image):\n if preserve_type is False:\n warnings.warn(\"Note that some info might be lost during `multiply` transformation since PIL.image \"\n \"stores data as `np.uint8`. To suppress this warning, use `preserve_type=True` or \"\n \"consider using `to_array` action before multiplication.\")\n return PIL.Image.fromarray(np.clip(multiplier*np.asarray(image), 0, 255).astype(np.uint8))\n dtype = image.dtype if preserve_type else np.float\n if clip:\n image = np.clip(multiplier*image, 0, 255 if dtype == np.uint8 else 1.)\n else:\n image = multiplier * image\n return image.astype(dtype)\n\n @apply_parallel\n def add(self, image, term=1., clip=False, preserve_type=False):\n \"\"\" Add term to each pixel.\n\n Parameters\n ----------\n term : float, sequence\n clip : bool\n whether to force image's pixels to be in [0, 255] or [0, 1.]\n preserve_type : bool\n Whether to preserve ``dtype`` of transformed images.\n If ``False`` is given then the resulting type will be ``np.float``.\n src : str\n Component to get images from. Default is 'images'.\n dst : str\n Component to write images to. Default is 'images'.\n p : float\n Probability of applying the transform. Default is 1.\n \"\"\"\n term = np.float32(term)\n if isinstance(image, PIL.Image.Image):\n return PIL.Image.fromarray(np.clip(term+np.asarray(image), 0, 255).astype(np.uint8))\n dtype = image.dtype if preserve_type else np.float\n if clip:\n image = np.clip(term+image, 0, 255 if dtype == np.uint8 else 1.)\n else:\n image = term + image\n return image.astype(dtype)\n\n @apply_parallel\n def pil_convert(self, image, mode=\"L\"):\n \"\"\" Convert image. Actually calls ``image.convert(mode)``.\n\n Parameters\n ----------\n mode : str\n Pass 'L' to convert to grayscale\n src : str\n Component to get images from. Default is 'images'.\n dst : str\n Component to write images to. Default is 'images'.\n p : float\n Probability of applying the transform. Default is 1.\n \"\"\"\n return image.convert(mode)\n\n @apply_parallel\n def posterize(self, image, bits=4):\n \"\"\" Posterizes image.\n\n More concretely, it quantizes pixels' values so that they have``2^bits`` colors\n\n Parameters\n ----------\n bits : int\n Number of bits used to store a color's component.\n src : str\n Component to get images from. Default is 'images'.\n dst : str\n Component to write images to. Default is 'images'.\n p : float\n Probability of applying the transform. Default is 1.\n \"\"\"\n return PIL.ImageOps.posterize(image, bits)\n\n @apply_parallel\n def cutout(self, image, origin, shape, color):\n \"\"\" Fills given areas with color\n\n .. note:: It is assumed that ``origins``, ``shapes`` and ``colors`` have the same length.\n\n Parameters\n ----------\n origin : sequence, str\n Location of the cropping box. See :meth:`.ImagesBatch._calc_origin` for details.\n shape : sequence, int\n Shape of a filled box. Can be one of:\n - sequence - crop size in the form of (rows, columns)\n - int - shape has squared form\n\n color : sequence, number\n Color of a filled box. Can be one of:\n\n - sequence - (r,g,b) form\n - number - grayscale\n src : str\n Component to get images from. Default is 'images'.\n dst : str\n Component to write images to. Default is 'images'.\n p : float\n Probability of applying the transform. Default is 1.\n\n Notes\n -----\n Using 'random' origin with `src` as list with multiple elements will not result in same crop for each\n element, as origin will be sampled independently for each `src` element.\n To randomly sample same origin for a number of components, use `R` named expression for `origin` argument.\n \"\"\"\n image = image.copy()\n shape = (shape, shape) if isinstance(shape, Number) else shape\n origin = self._calc_origin(shape, origin, self._get_image_shape(image))\n color = (color, color, color) if isinstance(color, Number) else color\n image.paste(PIL.Image.new('RGB', tuple(shape), tuple(color)), tuple(origin))\n return image\n\n def _assemble_patches(self, patches, *args, dst, **kwargs):\n \"\"\" Assembles patches after parallel execution.\n\n Parameters\n ----------\n patches : sequence\n Patches to gather. pathces.shape must be like (batch.size, patches_i, patch_height, patch_width, n_channels)\n dst : str\n Component to put patches in.\n \"\"\"\n _ = args, kwargs\n new_items = np.concatenate(patches)\n setattr(self, dst, new_items)\n\n @action\n @inbatch_parallel(init='indices', post='_assemble_patches')\n def split_to_patches(self, ix, patch_shape, stride=1, drop_last=False, src='images', dst=None):\n \"\"\" Splits image to patches.\n\n Small images with the same shape (``patch_shape``) are cropped from the original one with stride ``stride``.\n\n Parameters\n ----------\n patch_shape : int, sequence\n Patch's shape in the from (rows, columns). If int is given then patches have square shape.\n stride : int, square\n Step of the moving window from which patches are cropped. If int is given then the window has square shape.\n drop_last : bool\n Whether to drop patches whose window covers area out of the image.\n If False is passed then these patches are cropped from the edge of an image. See more in tutorials.\n src : str\n Component to get images from. Default is 'images'.\n dst : str\n Component to write images to. Default is 'images'.\n p : float\n Probability of applying the transform. Default is 1.\n \"\"\"\n _ = dst\n image = self.get(ix, src)\n image_shape = self._get_image_shape(image)\n image = np.array(image)\n stride = (stride, stride) if isinstance(stride, Number) else stride\n patch_shape = (patch_shape, patch_shape) if isinstance(patch_shape, Number) else patch_shape\n patches = []\n\n def _iterate_columns(row_from, row_to):\n column = 0\n while column < image_shape[1]-patch_shape[1]+1:\n patches.append(PIL.Image.fromarray(image[row_from:row_to, column:column+patch_shape[1]]))\n column += stride[1]\n if not drop_last and column + patch_shape[1] != image_shape[1]:\n patches.append(PIL.Image.fromarray(image[row_from:row_to,\n image_shape[1]-patch_shape[1]:image_shape[1]]))\n\n row = 0\n while row < image_shape[0]-patch_shape[0]+1:\n _iterate_columns(row, row+patch_shape[0])\n row += stride[0]\n if not drop_last and row + patch_shape[0] != image_shape[0]:\n _iterate_columns(image_shape[0]-patch_shape[0], image_shape[0])\n\n return np.array(patches, dtype=object)\n\n @apply_parallel\n def additive_noise(self, image, noise, clip=False, preserve_type=False):\n \"\"\" Add additive noise to an image.\n\n Parameters\n ----------\n noise : callable\n Distribution. Must have ``size`` parameter.\n clip : bool\n whether to force image's pixels to be in [0, 255] or [0, 1.]\n preserve_type : bool\n Whether to preserve ``dtype`` of transformed images.\n If ``False`` is given then the resulting type will be ``np.float``.\n src : str\n Component to get images from. Default is 'images'.\n dst : str\n Component to write images to. Default is 'images'.\n p : float\n Probability of applying the transform. Default is 1.\n \"\"\"\n noise = noise(size=(*image.size, len(image.getbands())) if isinstance(image, PIL.Image.Image) else image.shape)\n return self._add_(image, noise, clip, preserve_type)\n\n @apply_parallel\n def multiplicative_noise(self, image, noise, clip=False, preserve_type=False):\n \"\"\" Add multiplicative noise to an image.\n\n Parameters\n ----------\n noise : callable\n Distribution. Must have ``size`` parameter.\n clip : bool\n whether to force image's pixels to be in [0, 255] or [0, 1.]\n preserve_type : bool\n Whether to preserve ``dtype`` of transformed images.\n If ``False`` is given then the resulting type will be ``np.float``.\n src : str\n Component to get images from. Default is 'images'.\n dst : str\n Component to write images to. Default is 'images'.\n p : float\n Probability of applying the transform. Default is 1.\n \"\"\"\n noise = noise(size=(*image.size, len(image.getbands())) if isinstance(image, PIL.Image.Image) else image.shape)\n return self._multiply_(image, noise, clip, preserve_type)\n\n @apply_parallel\n def elastic_transform(self, image, alpha, sigma, **kwargs):\n \"\"\" Deformation of images as described by Simard, Steinkraus and Platt, `Best Practices for Convolutional\n Neural Networks applied to Visual Document Analysis <http://cognitivemedium.com/assets/rmnist/Simard.pdf>_`.\n\n Code slightly differs from `<https://gist.github.com/chsasank/4d8f68caf01f041a6453e67fb30f8f5a>`_.\n\n Parameters\n ----------\n alpha : number\n maximum of vectors' norms.\n sigma : number\n Smooth factor.\n src : str\n Component to get images from. Default is 'images'.\n dst : str\n Component to write images to. Default is 'images'.\n p : float\n Probability of applying the transform. Default is 1.\n \"\"\"\n image = np.array(image)\n # full shape is needed\n shape = image.shape\n if len(shape) == 2:\n image = image[..., None]\n shape = image.shape\n\n kwargs.setdefault('mode', 'constant')\n kwargs.setdefault('cval', 0)\n\n column_shift = self._sp_gaussian_filter_(np.random.uniform(-1, 1, size=shape), sigma, **kwargs) * alpha\n row_shift = self._sp_gaussian_filter_(np.random.uniform(-1, 1, size=shape), sigma, **kwargs) * alpha\n\n row, column, channel = np.meshgrid(range(shape[0]), range(shape[1]), range(shape[2]))\n\n indices = (column + column_shift, row + row_shift, channel)\n\n distored_image = self._sp_map_coordinates_(image, indices, order=1, mode='reflect')\n\n if shape[-1] == 1:\n return PIL.Image.fromarray(np.uint8(distored_image.reshape(image.shape))[..., 0])\n return PIL.Image.fromarray(np.uint8(distored_image.reshape(image.shape)))\n",
"\"\"\" Utility functions. \"\"\"\nimport tensorflow as tf\n\n\n\ndef get_shape(tensor, dynamic=False):\n \"\"\" Return shape of the input tensor without batch size.\n\n Parameters\n ----------\n tensor : tf.Tensor\n\n dynamic : bool\n If True, returns tensor which represents shape. If False, returns list of ints and/or Nones.\n\n Returns\n -------\n shape : tf.Tensor or list\n \"\"\"\n if dynamic:\n shape = tf.shape(tensor)\n else:\n shape = tensor.get_shape().as_list()\n return shape[1:]\n\ndef get_num_dims(tensor):\n \"\"\" Return a number of semantic dimensions (i.e. excluding batch and channels axis)\"\"\"\n shape = get_shape(tensor)\n dim = len(shape)\n return max(1, dim - 2)\n\n\ndef get_channels_axis(data_format='channels_last'):\n \"\"\" Return the integer channels axis based on string data format. \"\"\"\n return 1 if data_format == \"channels_first\" or data_format.startswith(\"NC\") else -1\n\ndef get_num_channels(tensor, data_format='channels_last'):\n \"\"\" Return number of channels in the input tensor.\n\n Parameters\n ----------\n tensor : tf.Tensor\n\n Returns\n -------\n shape : tuple of ints\n \"\"\"\n shape = tensor.get_shape().as_list()\n axis = get_channels_axis(data_format)\n return shape[axis]\n\n\ndef get_batch_size(tensor, dynamic=False):\n \"\"\" Return batch size (the length of the first dimension) of the input tensor.\n\n Parameters\n ----------\n tensor : tf.Tensor\n\n Returns\n -------\n batch size : int or None\n \"\"\"\n if dynamic:\n return tf.shape(tensor)[0]\n return tensor.get_shape().as_list()[0]\n\n\ndef get_spatial_dim(tensor):\n \"\"\" Return spatial dim of the input tensor (without channels and batch dimension).\n\n Parameters\n ----------\n tensor : tf.Tensor\n\n Returns\n -------\n dim : int\n \"\"\"\n return len(tensor.get_shape().as_list()) - 2\n\ndef get_spatial_shape(tensor, data_format='channels_last', dynamic=False):\n \"\"\" Return the tensor spatial shape (without batch and channels dimensions).\n\n Parameters\n ----------\n tensor : tf.Tensor\n\n dynamic : bool\n If True, returns tensor which represents shape. If False, returns list of ints and/or Nones.\n\n Returns\n -------\n shape : tf.Tensor or list\n \"\"\"\n if dynamic:\n shape = tf.shape(tensor)\n else:\n shape = tensor.get_shape().as_list()\n axis = slice(1, -1) if data_format == \"channels_last\" else slice(2, None)\n return shape[axis]\n",
"\"\"\" Implementation of the Lovasz Softmax loss.\nMaxim Berman, et al \"`The Lovász-Softmax loss: A tractable surrogate for the optimization\nof the intersection-over-union measure in neural networks <https://arxiv.org/abs/1705.08790>`_\"\n\nHeavily based on author's implementation: https://github.com/bermanmaxim/LovaszSoftmax\n\"\"\"\nimport torch\nfrom torch import nn\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\n\n\nclass BinaryLovaszLoss(nn.Module):\n \"\"\" Compute binary Lovasz loss.\n\n Parameters\n ----------\n per_image : bool\n Whether to aggregate loss at individual items or at entire batch.\n ignore : None or int\n Class to exclude from computations\n \"\"\"\n def __init__(self, per_image=False, ignore=None):\n super().__init__()\n self.per_image = per_image\n self.ignore = ignore\n\n def forward(self, prediction, target):\n if self.per_image:\n lst = [self.compute_loss(*self.flatten(logit.unsqueeze(0), label.unsqueeze(0)))\n for logit, label in zip(prediction, target)]\n loss = torch.mean(torch.stack(lst), dim=0)\n\n else:\n loss = self.compute_loss(*self.flatten(prediction, target))\n return loss\n\n def flatten(self, scores, labels):\n \"\"\" Flatten predictions and true labels and remove ignored class. \"\"\"\n scores = scores.view(-1)\n labels = labels.view(-1)\n if self.ignore is None:\n return scores, labels\n\n mask = labels != self.ignore\n return scores[mask], labels[mask]\n\n def compute_loss(self, logits, labels):\n \"\"\" Takes in flattened binary tensors and outputs binary Lovasz loss. \"\"\"\n if len(labels) == 0:\n # only void pixels, the gradients should be 0\n return logits.sum() * 0.0\n\n signs = 2.0 * labels.float() - 1.0\n errors = 1.0 - logits * Variable(signs)\n errors_sorted, perm = torch.sort(errors, dim=0, descending=True)\n\n gt_sorted = labels[perm.data]\n grad = lovasz_grad(gt_sorted)\n loss = torch.dot(F.relu(errors_sorted), Variable(grad))\n return loss\n\n\n\nclass LovaszLoss(nn.Module):\n \"\"\" Compute Lovasz Softmax loss.\n\n Parameters\n ----------\n per_image : bool\n Whether to aggregate loss at individual items or at entire batch.\n ignore : None or int\n Class to exclude from computations\n ignore_missing_classes : bool\n Whether to include missing in computations classes for averaging purposes.\n \"\"\"\n def __init__(self, per_image=False, ignore=None, ignore_missing_classes=True):\n super().__init__()\n self.per_image = per_image\n self.ignore = ignore\n self.ignore_missing_classes = ignore_missing_classes\n\n def forward(self, prediction, target):\n if self.per_image:\n lst = [self.compute_loss(*self.flatten(logit.unsqueeze(0), label.unsqueeze(0)))\n for logit, label in zip(prediction, target)]\n loss = torch.mean(torch.stack(lst), dim=0)\n\n else:\n loss = self.compute_loss(*self.flatten(prediction, target))\n return loss\n\n def flatten(self, probas, labels):\n \"\"\" Flatten predictions and true labels and remove ignored class. \"\"\"\n # Assume output of a sigmoid layer\n if probas.dim() == 3:\n probas = probas.unsqueeze(1)\n\n # Flatten\n C = probas.size(1)\n probas = probas.permute(0, 2, 3, 1).contiguous().view(-1, C) # B * H * W, C\n labels = labels.view(-1) # => B * H * W\n\n # Optional filtration\n if self.ignore is None:\n return probas, labels\n\n mask = labels != self.ignore\n return probas[mask], labels[mask]\n\n def compute_loss(self, probas, labels):\n \"\"\" Takes in flattened tensors and outputs binary Lovasz loss. \"\"\"\n if probas.numel() == 0:\n # only void pixels, the gradients should be 0\n return probas * 0.0\n\n C = probas.size(1)\n\n per_class_losses = []\n for c in range(C):\n gt_at_class = (labels == c).float() # foreground for class c\n\n # Don't count missing (in true labels) classes\n if self.ignore_missing_classes and gt_at_class.sum() == 0:\n continue\n\n class_pred = probas[:, c]\n\n errors = (Variable(gt_at_class) - class_pred).abs()\n errors_sorted, perm = torch.sort(errors, dim=0, descending=True)\n\n class_loss = torch.dot(errors_sorted, Variable(lovasz_grad(gt_at_class[perm.data])))\n per_class_losses.append(class_loss)\n return torch.mean(torch.stack(per_class_losses), dim=0)\n\n\ndef lovasz_grad(gt_sorted):\n \"\"\" Compute gradient of the Lovasz extension w.r.t sorted errors. See Alg. 1 in paper. \"\"\"\n gts = gt_sorted.sum()\n intersection = gts - gt_sorted.float().cumsum(0)\n union = gts + (1 - gt_sorted).float().cumsum(0)\n jaccard = 1.0 - intersection / union\n\n p = len(gt_sorted)\n if p > 1: # cover 1-pixel case\n jaccard[1:p] = jaccard[1:p] - jaccard[0:-1]\n return jaccard\n",
"\"\"\"\nGolnaz Ghiasi, Tsung-Yi Lin, Quoc V. Le \"`DropBlock: A regularization method for convolutional networks\n<https://arxiv.org/abs/1810.12890>`_\"\n\"\"\"\nimport tensorflow as tf\n\nfrom .layer import Layer, add_as_function\nfrom .pooling import MaxPooling\n\n# TODO:\n# When max_pooling allows for dynamic kernel size, implement block_size as fraction\n# of spatial_dims.\n# Write predefined callables to control dropout_rate\n\n\n@add_as_function\nclass Dropblock(Layer):\n \"\"\" Drop Block module\n Used for `O` letter in layout convention of :class:`~.tf.layers.ConvBlock`.\n\n Parameters\n ----------\n dropout_rate : float, tf.Tensor or callable.\n Default is 0\n block_size : int or tuple of ints\n Size of the block to drop. If tuple, should be of the same size as spatial\n dimensions of inputs.\n is_training : bool or tf.Tensor\n Default is True.\n data_format : str\n `channels_last` or `channels_first`. Default - 'channels_last'.\n global_step: misc\n If `dropout_rate` is callable, and `global_step` is passed to it as the\n first positional argument.\n seed : int\n Seed to use in tf.distributions.Bernoulli.sample method.\n \"\"\"\n def __init__(self, dropout_rate, block_size, data_format, global_step=None, seed=None, **kwargs):\n self.dropout_rate, self.block_size = dropout_rate, block_size\n self.data_format, self.global_step, self.seed = data_format, global_step, seed\n self.kwargs = kwargs\n\n def __call__(self, inputs, training):\n if callable(self.dropout_rate):\n self.dropout_rate = self.dropout_rate(self.global_step, **self.kwargs)\n\n if self.dropout_rate != 0.0:\n return tf.cond(training,\n true_fn=lambda: self._dropblock(inputs, self.dropout_rate, self.block_size,\n self.seed, self.data_format),\n false_fn=lambda: inputs,\n name='dropblock')\n return inputs\n\n\n def _dropblock(self, inputs, dropout_rate, block_size, seed, data_format):\n one = tf.convert_to_tensor([1], dtype=tf.int32)\n zeros_pad = tf.convert_to_tensor([[0, 0]], dtype=tf.int32)\n\n input_shape = tf.shape(inputs)\n\n if data_format == 'channels_first':\n spatial_dims, channels = input_shape[2:], input_shape[1:2]\n else:\n spatial_dims, channels = input_shape[1:-1], input_shape[-1:]\n spatial_ndim = spatial_dims.get_shape().as_list()[0]\n\n if isinstance(block_size, int):\n block_size = [block_size] * spatial_ndim\n block_size_tf = tf.convert_to_tensor(block_size, dtype=tf.int32)\n elif isinstance(block_size, tuple):\n if len(block_size) != spatial_ndim:\n raise ValueError('Length of `block_size` should be the same as spatial dimensions of input.')\n block_size_tf = tf.convert_to_tensor(block_size, dtype=tf.int32)\n else:\n raise ValueError('block_size should be int or tuple!')\n block_size_tf = tf.math.minimum(block_size_tf, spatial_dims)\n block_size_tf = tf.math.maximum(block_size_tf, one)\n\n spatial_dims_float = tf.cast(spatial_dims, dtype=tf.float32)\n block_size_tf_float = tf.cast(block_size_tf, dtype=tf.float32)\n\n inner_area = spatial_dims - block_size_tf + one\n inner_area_float = tf.cast(inner_area, dtype=tf.float32)\n\n gamma = (tf.convert_to_tensor(dropout_rate) * tf.math.reduce_prod(spatial_dims_float) /\n tf.math.reduce_prod(block_size_tf_float) / tf.math.reduce_prod(inner_area_float))\n\n # Mask is sampled for each featuremap independently and applied identically to all batch items\n noise_dist = tf.distributions.Bernoulli(probs=gamma, dtype=tf.float32)\n\n if data_format == 'channels_first':\n sampling_mask_shape = tf.concat((one, channels, inner_area), axis=0)\n else:\n sampling_mask_shape = tf.concat((one, inner_area, channels), axis=0)\n mask = noise_dist.sample(sampling_mask_shape, seed=seed)\n\n left_spatial_pad = (block_size_tf - one) // 2\n right_spatial_pad = block_size_tf - one - left_spatial_pad\n spatial_pads = tf.stack((left_spatial_pad, right_spatial_pad), axis=1)\n if data_format == 'channels_first':\n pad_shape = tf.concat((zeros_pad, zeros_pad, spatial_pads), axis=0)\n else:\n pad_shape = tf.concat((zeros_pad, spatial_pads, zeros_pad), axis=0)\n mask = tf.pad(mask, pad_shape)\n\n # Using max pool operation to extend sampled points to blocks of desired size\n pool_size = block_size\n strides = [1] * spatial_ndim\n mask = MaxPooling(pool_size=pool_size, pool_strides=strides, data_format=data_format, padding='same')(mask)\n mask = tf.cast(1 - mask, tf.float32)\n output = tf.multiply(inputs, mask)\n\n # Scaling the output as in inverted dropout\n output = output * tf.to_float(tf.size(mask)) / tf.reduce_sum(mask)\n return output\n"
] | [
[
"tensorflow.sin",
"tensorflow.cast",
"tensorflow.mod",
"tensorflow.train.piecewise_constant",
"tensorflow.name_scope",
"tensorflow.abs"
],
[
"numpy.asarray",
"numpy.sum"
],
[
"numpy.unique",
"numpy.asarray",
"numpy.clip",
"numpy.stack",
"numpy.concatenate",
"numpy.float32",
"numpy.random.binomial",
"numpy.random.uniform",
"numpy.array",
"numpy.where",
"numpy.zeros",
"numpy.random.randint"
],
[
"tensorflow.shape"
],
[
"torch.stack",
"torch.nn.functional.relu",
"torch.sort",
"torch.autograd.Variable"
],
[
"tensorflow.convert_to_tensor",
"tensorflow.multiply",
"tensorflow.concat",
"tensorflow.shape",
"tensorflow.stack",
"tensorflow.reduce_sum",
"tensorflow.math.maximum",
"tensorflow.cast",
"tensorflow.size",
"tensorflow.pad",
"tensorflow.distributions.Bernoulli",
"tensorflow.math.reduce_prod",
"tensorflow.math.minimum"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"1.12",
"1.4",
"1.13",
"1.5",
"1.7",
"0.12",
"1.0",
"1.2"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"1.12",
"1.4",
"1.13",
"1.5",
"1.7",
"0.12",
"1.0",
"1.2"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
}
] |
sayanmondal2098/pandas | [
"a1fee9199eba7ebf423880243936b9f1501d3d3a",
"a1fee9199eba7ebf423880243936b9f1501d3d3a"
] | [
"pandas/tests/io/parser/test_parse_dates.py",
"pandas/tests/io/test_pytables.py"
] | [
"# -*- coding: utf-8 -*-\n\n\"\"\"\nTests date parsing functionality for all of the\nparsers defined in parsers.py\n\"\"\"\n\nfrom datetime import date, datetime\nfrom io import StringIO\n\nimport numpy as np\nimport pytest\nimport pytz\n\nfrom pandas._libs.tslib import Timestamp\nfrom pandas._libs.tslibs import parsing\nfrom pandas.compat import lrange, parse_date\nfrom pandas.compat.numpy import np_array_datetime64_compat\n\nimport pandas as pd\nfrom pandas import DataFrame, DatetimeIndex, Index, MultiIndex\nfrom pandas.core.indexes.datetimes import date_range\nimport pandas.util.testing as tm\n\nimport pandas.io.date_converters as conv\nimport pandas.io.parsers as parsers\n\n\ndef test_separator_date_conflict(all_parsers):\n # Regression test for gh-4678\n #\n # Make sure thousands separator and\n # date parsing do not conflict.\n parser = all_parsers\n data = \"06-02-2013;13:00;1-000.215\"\n expected = DataFrame([[datetime(2013, 6, 2, 13, 0, 0), 1000.215]],\n columns=[\"Date\", 2])\n\n df = parser.read_csv(StringIO(data), sep=\";\", thousands=\"-\",\n parse_dates={\"Date\": [0, 1]}, header=None)\n tm.assert_frame_equal(df, expected)\n\n\[email protected](\"keep_date_col\", [True, False])\ndef test_multiple_date_col_custom(all_parsers, keep_date_col):\n data = \"\"\"\\\nKORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000\nKORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000\nKORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000\nKORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000\nKORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000\nKORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000\n\"\"\"\n parser = all_parsers\n\n def date_parser(*date_cols):\n \"\"\"\n Test date parser.\n\n Parameters\n ----------\n date_cols : args\n The list of data columns to parse.\n\n Returns\n -------\n parsed : Series\n \"\"\"\n return parsing.try_parse_dates(parsers._concat_date_cols(date_cols))\n\n result = parser.read_csv(StringIO(data), header=None,\n date_parser=date_parser, prefix=\"X\",\n parse_dates={\"actual\": [1, 2],\n \"nominal\": [1, 3]},\n keep_date_col=keep_date_col)\n expected = DataFrame([\n [datetime(1999, 1, 27, 19, 0), datetime(1999, 1, 27, 18, 56),\n \"KORD\", \"19990127\", \" 19:00:00\", \" 18:56:00\",\n 0.81, 2.81, 7.2, 0.0, 280.0],\n [datetime(1999, 1, 27, 20, 0), datetime(1999, 1, 27, 19, 56),\n \"KORD\", \"19990127\", \" 20:00:00\", \" 19:56:00\",\n 0.01, 2.21, 7.2, 0.0, 260.0],\n [datetime(1999, 1, 27, 21, 0), datetime(1999, 1, 27, 20, 56),\n \"KORD\", \"19990127\", \" 21:00:00\", \" 20:56:00\",\n -0.59, 2.21, 5.7, 0.0, 280.0],\n [datetime(1999, 1, 27, 21, 0), datetime(1999, 1, 27, 21, 18),\n \"KORD\", \"19990127\", \" 21:00:00\", \" 21:18:00\",\n -0.99, 2.01, 3.6, 0.0, 270.0],\n [datetime(1999, 1, 27, 22, 0), datetime(1999, 1, 27, 21, 56),\n \"KORD\", \"19990127\", \" 22:00:00\", \" 21:56:00\",\n -0.59, 1.71, 5.1, 0.0, 290.0],\n [datetime(1999, 1, 27, 23, 0), datetime(1999, 1, 27, 22, 56),\n \"KORD\", \"19990127\", \" 23:00:00\", \" 22:56:00\",\n -0.59, 1.71, 4.6, 0.0, 280.0],\n ], columns=[\"actual\", \"nominal\", \"X0\", \"X1\", \"X2\",\n \"X3\", \"X4\", \"X5\", \"X6\", \"X7\", \"X8\"])\n\n if not keep_date_col:\n expected = expected.drop([\"X1\", \"X2\", \"X3\"], axis=1)\n elif parser.engine == \"python\":\n expected[\"X1\"] = expected[\"X1\"].astype(np.int64)\n\n # Python can sometimes be flaky about how\n # the aggregated columns are entered, so\n # this standardizes the order.\n result = result[expected.columns]\n tm.assert_frame_equal(result, expected)\n\n\[email protected](\"keep_date_col\", [True, False])\ndef test_multiple_date_col(all_parsers, keep_date_col):\n data = \"\"\"\\\nKORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000\nKORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000\nKORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000\nKORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000\nKORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000\nKORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000\n\"\"\"\n parser = all_parsers\n result = parser.read_csv(StringIO(data), header=None,\n prefix=\"X\", parse_dates=[[1, 2], [1, 3]],\n keep_date_col=keep_date_col)\n expected = DataFrame([\n [datetime(1999, 1, 27, 19, 0), datetime(1999, 1, 27, 18, 56),\n \"KORD\", \"19990127\", \" 19:00:00\", \" 18:56:00\",\n 0.81, 2.81, 7.2, 0.0, 280.0],\n [datetime(1999, 1, 27, 20, 0), datetime(1999, 1, 27, 19, 56),\n \"KORD\", \"19990127\", \" 20:00:00\", \" 19:56:00\",\n 0.01, 2.21, 7.2, 0.0, 260.0],\n [datetime(1999, 1, 27, 21, 0), datetime(1999, 1, 27, 20, 56),\n \"KORD\", \"19990127\", \" 21:00:00\", \" 20:56:00\",\n -0.59, 2.21, 5.7, 0.0, 280.0],\n [datetime(1999, 1, 27, 21, 0), datetime(1999, 1, 27, 21, 18),\n \"KORD\", \"19990127\", \" 21:00:00\", \" 21:18:00\",\n -0.99, 2.01, 3.6, 0.0, 270.0],\n [datetime(1999, 1, 27, 22, 0), datetime(1999, 1, 27, 21, 56),\n \"KORD\", \"19990127\", \" 22:00:00\", \" 21:56:00\",\n -0.59, 1.71, 5.1, 0.0, 290.0],\n [datetime(1999, 1, 27, 23, 0), datetime(1999, 1, 27, 22, 56),\n \"KORD\", \"19990127\", \" 23:00:00\", \" 22:56:00\",\n -0.59, 1.71, 4.6, 0.0, 280.0],\n ], columns=[\"X1_X2\", \"X1_X3\", \"X0\", \"X1\", \"X2\",\n \"X3\", \"X4\", \"X5\", \"X6\", \"X7\", \"X8\"])\n\n if not keep_date_col:\n expected = expected.drop([\"X1\", \"X2\", \"X3\"], axis=1)\n elif parser.engine == \"python\":\n expected[\"X1\"] = expected[\"X1\"].astype(np.int64)\n\n tm.assert_frame_equal(result, expected)\n\n\ndef test_date_col_as_index_col(all_parsers):\n data = \"\"\"\\\nKORD,19990127 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000\nKORD,19990127 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000\nKORD,19990127 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000\nKORD,19990127 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000\nKORD,19990127 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000\n\"\"\"\n parser = all_parsers\n result = parser.read_csv(StringIO(data), header=None, prefix=\"X\",\n parse_dates=[1], index_col=1)\n\n index = Index([datetime(1999, 1, 27, 19, 0), datetime(1999, 1, 27, 20, 0),\n datetime(1999, 1, 27, 21, 0), datetime(1999, 1, 27, 21, 0),\n datetime(1999, 1, 27, 22, 0)], name=\"X1\")\n expected = DataFrame([\n [\"KORD\", \" 18:56:00\", 0.81, 2.81, 7.2, 0.0, 280.0],\n [\"KORD\", \" 19:56:00\", 0.01, 2.21, 7.2, 0.0, 260.0],\n [\"KORD\", \" 20:56:00\", -0.59, 2.21, 5.7, 0.0, 280.0],\n [\"KORD\", \" 21:18:00\", -0.99, 2.01, 3.6, 0.0, 270.0],\n [\"KORD\", \" 21:56:00\", -0.59, 1.71, 5.1, 0.0, 290.0],\n ], columns=[\"X0\", \"X2\", \"X3\", \"X4\", \"X5\", \"X6\", \"X7\"], index=index)\n tm.assert_frame_equal(result, expected)\n\n\ndef test_multiple_date_cols_int_cast(all_parsers):\n data = (\"KORD,19990127, 19:00:00, 18:56:00, 0.8100\\n\"\n \"KORD,19990127, 20:00:00, 19:56:00, 0.0100\\n\"\n \"KORD,19990127, 21:00:00, 20:56:00, -0.5900\\n\"\n \"KORD,19990127, 21:00:00, 21:18:00, -0.9900\\n\"\n \"KORD,19990127, 22:00:00, 21:56:00, -0.5900\\n\"\n \"KORD,19990127, 23:00:00, 22:56:00, -0.5900\")\n parse_dates = {\"actual\": [1, 2], \"nominal\": [1, 3]}\n parser = all_parsers\n\n result = parser.read_csv(StringIO(data), header=None,\n date_parser=conv.parse_date_time,\n parse_dates=parse_dates, prefix=\"X\")\n expected = DataFrame([\n [datetime(1999, 1, 27, 19, 0), datetime(1999, 1, 27, 18, 56),\n \"KORD\", 0.81],\n [datetime(1999, 1, 27, 20, 0), datetime(1999, 1, 27, 19, 56),\n \"KORD\", 0.01],\n [datetime(1999, 1, 27, 21, 0), datetime(1999, 1, 27, 20, 56),\n \"KORD\", -0.59],\n [datetime(1999, 1, 27, 21, 0), datetime(1999, 1, 27, 21, 18),\n \"KORD\", -0.99],\n [datetime(1999, 1, 27, 22, 0), datetime(1999, 1, 27, 21, 56),\n \"KORD\", -0.59],\n [datetime(1999, 1, 27, 23, 0), datetime(1999, 1, 27, 22, 56),\n \"KORD\", -0.59],\n ], columns=[\"actual\", \"nominal\", \"X0\", \"X4\"])\n\n # Python can sometimes be flaky about how\n # the aggregated columns are entered, so\n # this standardizes the order.\n result = result[expected.columns]\n tm.assert_frame_equal(result, expected)\n\n\ndef test_multiple_date_col_timestamp_parse(all_parsers):\n parser = all_parsers\n data = \"\"\"05/31/2012,15:30:00.029,1306.25,1,E,0,,1306.25\n05/31/2012,15:30:00.029,1306.25,8,E,0,,1306.25\"\"\"\n\n result = parser.read_csv(StringIO(data), parse_dates=[[0, 1]],\n header=None, date_parser=Timestamp)\n expected = DataFrame([\n [Timestamp(\"05/31/2012, 15:30:00.029\"),\n 1306.25, 1, \"E\", 0, np.nan, 1306.25],\n [Timestamp(\"05/31/2012, 15:30:00.029\"),\n 1306.25, 8, \"E\", 0, np.nan, 1306.25]\n ], columns=[\"0_1\", 2, 3, 4, 5, 6, 7])\n tm.assert_frame_equal(result, expected)\n\n\ndef test_multiple_date_cols_with_header(all_parsers):\n parser = all_parsers\n data = \"\"\"\\\nID,date,NominalTime,ActualTime,TDew,TAir,Windspeed,Precip,WindDir\nKORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000\nKORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000\nKORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000\nKORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000\nKORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000\nKORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000\"\"\"\n\n result = parser.read_csv(StringIO(data), parse_dates={\"nominal\": [1, 2]})\n expected = DataFrame([\n [datetime(1999, 1, 27, 19, 0), \"KORD\", \" 18:56:00\",\n 0.81, 2.81, 7.2, 0.0, 280.0],\n [datetime(1999, 1, 27, 20, 0), \"KORD\", \" 19:56:00\",\n 0.01, 2.21, 7.2, 0.0, 260.0],\n [datetime(1999, 1, 27, 21, 0), \"KORD\", \" 20:56:00\",\n -0.59, 2.21, 5.7, 0.0, 280.0],\n [datetime(1999, 1, 27, 21, 0), \"KORD\", \" 21:18:00\",\n -0.99, 2.01, 3.6, 0.0, 270.0],\n [datetime(1999, 1, 27, 22, 0), \"KORD\", \" 21:56:00\",\n -0.59, 1.71, 5.1, 0.0, 290.0],\n [datetime(1999, 1, 27, 23, 0), \"KORD\", \" 22:56:00\",\n -0.59, 1.71, 4.6, 0.0, 280.0],\n ], columns=[\"nominal\", \"ID\", \"ActualTime\", \"TDew\",\n \"TAir\", \"Windspeed\", \"Precip\", \"WindDir\"])\n tm.assert_frame_equal(result, expected)\n\n\[email protected](\"data,parse_dates,msg\", [\n (\"\"\"\\\ndate_NominalTime,date,NominalTime\nKORD1,19990127, 19:00:00\nKORD2,19990127, 20:00:00\"\"\", [[1, 2]], (\"New date column already \"\n \"in dict date_NominalTime\")),\n (\"\"\"\\\nID,date,nominalTime\nKORD,19990127, 19:00:00\nKORD,19990127, 20:00:00\"\"\", dict(ID=[1, 2]), \"Date column ID already in dict\")\n])\ndef test_multiple_date_col_name_collision(all_parsers, data, parse_dates, msg):\n parser = all_parsers\n\n with pytest.raises(ValueError, match=msg):\n parser.read_csv(StringIO(data), parse_dates=parse_dates)\n\n\ndef test_date_parser_int_bug(all_parsers):\n # see gh-3071\n parser = all_parsers\n data = (\"posix_timestamp,elapsed,sys,user,queries,query_time,rows,\"\n \"accountid,userid,contactid,level,silo,method\\n\"\n \"1343103150,0.062353,0,4,6,0.01690,3,\"\n \"12345,1,-1,3,invoice_InvoiceResource,search\\n\")\n\n result = parser.read_csv(\n StringIO(data), index_col=0, parse_dates=[0],\n date_parser=lambda x: datetime.utcfromtimestamp(int(x)))\n expected = DataFrame([[0.062353, 0, 4, 6, 0.01690, 3, 12345, 1, -1,\n 3, \"invoice_InvoiceResource\", \"search\"]],\n columns=[\"elapsed\", \"sys\", \"user\", \"queries\",\n \"query_time\", \"rows\", \"accountid\",\n \"userid\", \"contactid\", \"level\",\n \"silo\", \"method\"],\n index=Index([Timestamp(\"2012-07-24 04:12:30\")],\n name=\"posix_timestamp\"))\n tm.assert_frame_equal(result, expected)\n\n\ndef test_nat_parse(all_parsers):\n # see gh-3062\n parser = all_parsers\n df = DataFrame(dict({\"A\": np.asarray(lrange(10), dtype=\"float64\"),\n \"B\": pd.Timestamp(\"20010101\")}))\n df.iloc[3:6, :] = np.nan\n\n with tm.ensure_clean(\"__nat_parse_.csv\") as path:\n df.to_csv(path)\n\n result = parser.read_csv(path, index_col=0, parse_dates=[\"B\"])\n tm.assert_frame_equal(result, df)\n\n\ndef test_csv_custom_parser(all_parsers):\n data = \"\"\"A,B,C\n20090101,a,1,2\n20090102,b,3,4\n20090103,c,4,5\n\"\"\"\n parser = all_parsers\n result = parser.read_csv(\n StringIO(data),\n date_parser=lambda x: datetime.strptime(x, \"%Y%m%d\"))\n expected = parser.read_csv(StringIO(data), parse_dates=True)\n tm.assert_frame_equal(result, expected)\n\n\ndef test_parse_dates_implicit_first_col(all_parsers):\n data = \"\"\"A,B,C\n20090101,a,1,2\n20090102,b,3,4\n20090103,c,4,5\n\"\"\"\n parser = all_parsers\n result = parser.read_csv(StringIO(data), parse_dates=True)\n\n expected = parser.read_csv(StringIO(data), index_col=0,\n parse_dates=True)\n tm.assert_frame_equal(result, expected)\n\n\ndef test_parse_dates_string(all_parsers):\n data = \"\"\"date,A,B,C\n20090101,a,1,2\n20090102,b,3,4\n20090103,c,4,5\n\"\"\"\n parser = all_parsers\n result = parser.read_csv(StringIO(data), index_col=\"date\",\n parse_dates=[\"date\"])\n index = date_range(\"1/1/2009\", periods=3)\n index.name = \"date\"\n\n expected = DataFrame({\"A\": [\"a\", \"b\", \"c\"], \"B\": [1, 3, 4],\n \"C\": [2, 4, 5]}, index=index)\n tm.assert_frame_equal(result, expected)\n\n\n# Bug in https://github.com/dateutil/dateutil/issues/217\n# has been addressed, but we just don't pass in the `yearfirst`\[email protected](reason=\"yearfirst is not surfaced in read_*\")\[email protected](\"parse_dates\", [\n [[\"date\", \"time\"]],\n [[0, 1]]\n])\ndef test_yy_format_with_year_first(all_parsers, parse_dates):\n data = \"\"\"date,time,B,C\n090131,0010,1,2\n090228,1020,3,4\n090331,0830,5,6\n\"\"\"\n parser = all_parsers\n result = parser.read_csv(StringIO(data), index_col=0,\n parse_dates=parse_dates)\n index = DatetimeIndex([datetime(2009, 1, 31, 0, 10, 0),\n datetime(2009, 2, 28, 10, 20, 0),\n datetime(2009, 3, 31, 8, 30, 0)],\n dtype=object, name=\"date_time\")\n expected = DataFrame({\"B\": [1, 3, 5], \"C\": [2, 4, 6]}, index=index)\n tm.assert_frame_equal(result, expected)\n\n\[email protected](\"parse_dates\", [[0, 2], [\"a\", \"c\"]])\ndef test_parse_dates_column_list(all_parsers, parse_dates):\n data = \"a,b,c\\n01/01/2010,1,15/02/2010\"\n parser = all_parsers\n\n expected = DataFrame({\"a\": [datetime(2010, 1, 1)], \"b\": [1],\n \"c\": [datetime(2010, 2, 15)]})\n expected = expected.set_index([\"a\", \"b\"])\n\n result = parser.read_csv(StringIO(data), index_col=[0, 1],\n parse_dates=parse_dates, dayfirst=True)\n tm.assert_frame_equal(result, expected)\n\n\[email protected](\"index_col\", [[0, 1], [1, 0]])\ndef test_multi_index_parse_dates(all_parsers, index_col):\n data = \"\"\"index1,index2,A,B,C\n20090101,one,a,1,2\n20090101,two,b,3,4\n20090101,three,c,4,5\n20090102,one,a,1,2\n20090102,two,b,3,4\n20090102,three,c,4,5\n20090103,one,a,1,2\n20090103,two,b,3,4\n20090103,three,c,4,5\n\"\"\"\n parser = all_parsers\n index = MultiIndex.from_product([\n (datetime(2009, 1, 1), datetime(2009, 1, 2),\n datetime(2009, 1, 3)), (\"one\", \"two\", \"three\")],\n names=[\"index1\", \"index2\"])\n\n # Out of order.\n if index_col == [1, 0]:\n index = index.swaplevel(0, 1)\n\n expected = DataFrame([[\"a\", 1, 2], [\"b\", 3, 4], [\"c\", 4, 5],\n [\"a\", 1, 2], [\"b\", 3, 4], [\"c\", 4, 5],\n [\"a\", 1, 2], [\"b\", 3, 4], [\"c\", 4, 5]],\n columns=[\"A\", \"B\", \"C\"], index=index)\n result = parser.read_csv(StringIO(data), index_col=index_col,\n parse_dates=True)\n tm.assert_frame_equal(result, expected)\n\n\[email protected](\"kwargs\", [\n dict(dayfirst=True), dict(day_first=True)\n])\ndef test_parse_dates_custom_euro_format(all_parsers, kwargs):\n parser = all_parsers\n data = \"\"\"foo,bar,baz\n31/01/2010,1,2\n01/02/2010,1,NA\n02/02/2010,1,2\n\"\"\"\n if \"dayfirst\" in kwargs:\n df = parser.read_csv(StringIO(data), names=[\"time\", \"Q\", \"NTU\"],\n date_parser=lambda d: parse_date(d, **kwargs),\n header=0, index_col=0, parse_dates=True,\n na_values=[\"NA\"])\n exp_index = Index([datetime(2010, 1, 31), datetime(2010, 2, 1),\n datetime(2010, 2, 2)], name=\"time\")\n expected = DataFrame({\"Q\": [1, 1, 1], \"NTU\": [2, np.nan, 2]},\n index=exp_index, columns=[\"Q\", \"NTU\"])\n tm.assert_frame_equal(df, expected)\n else:\n msg = \"got an unexpected keyword argument 'day_first'\"\n with pytest.raises(TypeError, match=msg):\n parser.read_csv(StringIO(data), names=[\"time\", \"Q\", \"NTU\"],\n date_parser=lambda d: parse_date(d, **kwargs),\n skiprows=[0], index_col=0, parse_dates=True,\n na_values=[\"NA\"])\n\n\ndef test_parse_tz_aware(all_parsers):\n # See gh-1693\n parser = all_parsers\n data = \"Date,x\\n2012-06-13T01:39:00Z,0.5\"\n\n result = parser.read_csv(StringIO(data), index_col=0,\n parse_dates=True)\n expected = DataFrame({\"x\": [0.5]}, index=Index([Timestamp(\n \"2012-06-13 01:39:00+00:00\")], name=\"Date\"))\n tm.assert_frame_equal(result, expected)\n assert result.index.tz is pytz.utc\n\n\[email protected](\"parse_dates,index_col\", [\n ({\"nominal\": [1, 2]}, \"nominal\"),\n ({\"nominal\": [1, 2]}, 0),\n ([[1, 2]], 0),\n])\ndef test_multiple_date_cols_index(all_parsers, parse_dates, index_col):\n parser = all_parsers\n data = \"\"\"\nID,date,NominalTime,ActualTime,TDew,TAir,Windspeed,Precip,WindDir\nKORD1,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000\nKORD2,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000\nKORD3,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000\nKORD4,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000\nKORD5,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000\nKORD6,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000\n\"\"\"\n expected = DataFrame([\n [datetime(1999, 1, 27, 19, 0), \"KORD1\", \" 18:56:00\",\n 0.81, 2.81, 7.2, 0.0, 280.0],\n [datetime(1999, 1, 27, 20, 0), \"KORD2\", \" 19:56:00\",\n 0.01, 2.21, 7.2, 0.0, 260.0],\n [datetime(1999, 1, 27, 21, 0), \"KORD3\", \" 20:56:00\",\n -0.59, 2.21, 5.7, 0.0, 280.0],\n [datetime(1999, 1, 27, 21, 0), \"KORD4\", \" 21:18:00\",\n -0.99, 2.01, 3.6, 0.0, 270.0],\n [datetime(1999, 1, 27, 22, 0), \"KORD5\", \" 21:56:00\",\n -0.59, 1.71, 5.1, 0.0, 290.0],\n [datetime(1999, 1, 27, 23, 0), \"KORD6\", \" 22:56:00\",\n -0.59, 1.71, 4.6, 0.0, 280.0],\n ], columns=[\"nominal\", \"ID\", \"ActualTime\", \"TDew\",\n \"TAir\", \"Windspeed\", \"Precip\", \"WindDir\"])\n expected = expected.set_index(\"nominal\")\n\n if not isinstance(parse_dates, dict):\n expected.index.name = \"date_NominalTime\"\n\n result = parser.read_csv(StringIO(data), parse_dates=parse_dates,\n index_col=index_col)\n tm.assert_frame_equal(result, expected)\n\n\ndef test_multiple_date_cols_chunked(all_parsers):\n parser = all_parsers\n data = \"\"\"\\\nID,date,nominalTime,actualTime,A,B,C,D,E\nKORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000\nKORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000\nKORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000\nKORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000\nKORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000\nKORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000\n\"\"\"\n\n expected = DataFrame([\n [datetime(1999, 1, 27, 19, 0), \"KORD\", \" 18:56:00\",\n 0.81, 2.81, 7.2, 0.0, 280.0],\n [datetime(1999, 1, 27, 20, 0), \"KORD\", \" 19:56:00\",\n 0.01, 2.21, 7.2, 0.0, 260.0],\n [datetime(1999, 1, 27, 21, 0), \"KORD\", \" 20:56:00\",\n -0.59, 2.21, 5.7, 0.0, 280.0],\n [datetime(1999, 1, 27, 21, 0), \"KORD\", \" 21:18:00\",\n -0.99, 2.01, 3.6, 0.0, 270.0],\n [datetime(1999, 1, 27, 22, 0), \"KORD\", \" 21:56:00\",\n -0.59, 1.71, 5.1, 0.0, 290.0],\n [datetime(1999, 1, 27, 23, 0), \"KORD\", \" 22:56:00\",\n -0.59, 1.71, 4.6, 0.0, 280.0],\n ], columns=[\"nominal\", \"ID\", \"actualTime\", \"A\", \"B\", \"C\", \"D\", \"E\"])\n expected = expected.set_index(\"nominal\")\n\n reader = parser.read_csv(StringIO(data), parse_dates={\"nominal\": [1, 2]},\n index_col=\"nominal\", chunksize=2)\n chunks = list(reader)\n\n tm.assert_frame_equal(chunks[0], expected[:2])\n tm.assert_frame_equal(chunks[1], expected[2:4])\n tm.assert_frame_equal(chunks[2], expected[4:])\n\n\ndef test_multiple_date_col_named_index_compat(all_parsers):\n parser = all_parsers\n data = \"\"\"\\\nID,date,nominalTime,actualTime,A,B,C,D,E\nKORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000\nKORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000\nKORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000\nKORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000\nKORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000\nKORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000\n\"\"\"\n\n with_indices = parser.read_csv(StringIO(data),\n parse_dates={\"nominal\": [1, 2]},\n index_col=\"nominal\")\n with_names = parser.read_csv(StringIO(data), index_col=\"nominal\",\n parse_dates={\"nominal\": [\n \"date\", \"nominalTime\"]})\n tm.assert_frame_equal(with_indices, with_names)\n\n\ndef test_multiple_date_col_multiple_index_compat(all_parsers):\n parser = all_parsers\n data = \"\"\"\\\nID,date,nominalTime,actualTime,A,B,C,D,E\nKORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000\nKORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000\nKORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000\nKORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000\nKORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000\nKORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000\n\"\"\"\n result = parser.read_csv(StringIO(data), index_col=[\"nominal\", \"ID\"],\n parse_dates={\"nominal\": [1, 2]})\n expected = parser.read_csv(StringIO(data),\n parse_dates={\"nominal\": [1, 2]})\n\n expected = expected.set_index([\"nominal\", \"ID\"])\n tm.assert_frame_equal(result, expected)\n\n\[email protected](\"kwargs\", [dict(), dict(index_col=\"C\")])\ndef test_read_with_parse_dates_scalar_non_bool(all_parsers, kwargs):\n # see gh-5636\n parser = all_parsers\n msg = (\"Only booleans, lists, and dictionaries \"\n \"are accepted for the 'parse_dates' parameter\")\n data = \"\"\"A,B,C\n 1,2,2003-11-1\"\"\"\n\n with pytest.raises(TypeError, match=msg):\n parser.read_csv(StringIO(data), parse_dates=\"C\", **kwargs)\n\n\[email protected](\"parse_dates\", [\n (1,), np.array([4, 5]), {1, 3, 3}\n])\ndef test_read_with_parse_dates_invalid_type(all_parsers, parse_dates):\n parser = all_parsers\n msg = (\"Only booleans, lists, and dictionaries \"\n \"are accepted for the 'parse_dates' parameter\")\n data = \"\"\"A,B,C\n 1,2,2003-11-1\"\"\"\n\n with pytest.raises(TypeError, match=msg):\n parser.read_csv(StringIO(data), parse_dates=(1,))\n\n\ndef test_parse_dates_empty_string(all_parsers):\n # see gh-2263\n parser = all_parsers\n data = \"Date,test\\n2012-01-01,1\\n,2\"\n result = parser.read_csv(StringIO(data), parse_dates=[\"Date\"],\n na_filter=False)\n\n expected = DataFrame([[datetime(2012, 1, 1), 1], [pd.NaT, 2]],\n columns=[\"Date\", \"test\"])\n tm.assert_frame_equal(result, expected)\n\n\[email protected](\"data,kwargs,expected\", [\n (\"a\\n04.15.2016\", dict(parse_dates=[\"a\"]),\n DataFrame([datetime(2016, 4, 15)], columns=[\"a\"])),\n (\"a\\n04.15.2016\", dict(parse_dates=True, index_col=0),\n DataFrame(index=DatetimeIndex([\"2016-04-15\"], name=\"a\"))),\n (\"a,b\\n04.15.2016,09.16.2013\", dict(parse_dates=[\"a\", \"b\"]),\n DataFrame([[datetime(2016, 4, 15), datetime(2013, 9, 16)]],\n columns=[\"a\", \"b\"])),\n (\"a,b\\n04.15.2016,09.16.2013\", dict(parse_dates=True, index_col=[0, 1]),\n DataFrame(index=MultiIndex.from_tuples(\n [(datetime(2016, 4, 15), datetime(2013, 9, 16))], names=[\"a\", \"b\"]))),\n])\ndef test_parse_dates_no_convert_thousands(all_parsers, data, kwargs, expected):\n # see gh-14066\n parser = all_parsers\n\n result = parser.read_csv(StringIO(data), thousands=\".\", **kwargs)\n tm.assert_frame_equal(result, expected)\n\n\ndef test_parse_date_time_multi_level_column_name(all_parsers):\n data = \"\"\"\\\nD,T,A,B\ndate, time,a,b\n2001-01-05, 09:00:00, 0.0, 10.\n2001-01-06, 00:00:00, 1.0, 11.\n\"\"\"\n parser = all_parsers\n result = parser.read_csv(StringIO(data), header=[0, 1],\n parse_dates={\"date_time\": [0, 1]},\n date_parser=conv.parse_date_time)\n\n expected_data = [[datetime(2001, 1, 5, 9, 0, 0), 0., 10.],\n [datetime(2001, 1, 6, 0, 0, 0), 1., 11.]]\n expected = DataFrame(expected_data,\n columns=[\"date_time\", (\"A\", \"a\"), (\"B\", \"b\")])\n tm.assert_frame_equal(result, expected)\n\n\[email protected](\"data,kwargs,expected\", [\n (\"\"\"\\\ndate,time,a,b\n2001-01-05, 10:00:00, 0.0, 10.\n2001-01-05, 00:00:00, 1., 11.\n\"\"\", dict(header=0, parse_dates={\"date_time\": [0, 1]}),\n DataFrame([[datetime(2001, 1, 5, 10, 0, 0), 0.0, 10],\n [datetime(2001, 1, 5, 0, 0, 0), 1.0, 11.0]],\n columns=[\"date_time\", \"a\", \"b\"])),\n ((\"KORD,19990127, 19:00:00, 18:56:00, 0.8100\\n\"\n \"KORD,19990127, 20:00:00, 19:56:00, 0.0100\\n\"\n \"KORD,19990127, 21:00:00, 20:56:00, -0.5900\\n\"\n \"KORD,19990127, 21:00:00, 21:18:00, -0.9900\\n\"\n \"KORD,19990127, 22:00:00, 21:56:00, -0.5900\\n\"\n \"KORD,19990127, 23:00:00, 22:56:00, -0.5900\"),\n dict(header=None, parse_dates={\"actual\": [1, 2], \"nominal\": [1, 3]}),\n DataFrame([\n [datetime(1999, 1, 27, 19, 0), datetime(1999, 1, 27, 18, 56),\n \"KORD\", 0.81],\n [datetime(1999, 1, 27, 20, 0), datetime(1999, 1, 27, 19, 56),\n \"KORD\", 0.01],\n [datetime(1999, 1, 27, 21, 0), datetime(1999, 1, 27, 20, 56),\n \"KORD\", -0.59],\n [datetime(1999, 1, 27, 21, 0), datetime(1999, 1, 27, 21, 18),\n \"KORD\", -0.99],\n [datetime(1999, 1, 27, 22, 0), datetime(1999, 1, 27, 21, 56),\n \"KORD\", -0.59],\n [datetime(1999, 1, 27, 23, 0), datetime(1999, 1, 27, 22, 56),\n \"KORD\", -0.59]], columns=[\"actual\", \"nominal\", 0, 4])),\n])\ndef test_parse_date_time(all_parsers, data, kwargs, expected):\n parser = all_parsers\n result = parser.read_csv(StringIO(data), date_parser=conv.parse_date_time,\n **kwargs)\n\n # Python can sometimes be flaky about how\n # the aggregated columns are entered, so\n # this standardizes the order.\n result = result[expected.columns]\n tm.assert_frame_equal(result, expected)\n\n\ndef test_parse_date_fields(all_parsers):\n parser = all_parsers\n data = (\"year,month,day,a\\n2001,01,10,10.\\n\"\n \"2001,02,1,11.\")\n result = parser.read_csv(StringIO(data), header=0,\n parse_dates={\"ymd\": [0, 1, 2]},\n date_parser=conv.parse_date_fields)\n\n expected = DataFrame([[datetime(2001, 1, 10), 10.],\n [datetime(2001, 2, 1), 11.]], columns=[\"ymd\", \"a\"])\n tm.assert_frame_equal(result, expected)\n\n\ndef test_parse_date_all_fields(all_parsers):\n parser = all_parsers\n data = \"\"\"\\\nyear,month,day,hour,minute,second,a,b\n2001,01,05,10,00,0,0.0,10.\n2001,01,5,10,0,00,1.,11.\n\"\"\"\n result = parser.read_csv(StringIO(data), header=0,\n date_parser=conv.parse_all_fields,\n parse_dates={\"ymdHMS\": [0, 1, 2, 3, 4, 5]})\n expected = DataFrame([[datetime(2001, 1, 5, 10, 0, 0), 0.0, 10.0],\n [datetime(2001, 1, 5, 10, 0, 0), 1.0, 11.0]],\n columns=[\"ymdHMS\", \"a\", \"b\"])\n tm.assert_frame_equal(result, expected)\n\n\ndef test_datetime_fractional_seconds(all_parsers):\n parser = all_parsers\n data = \"\"\"\\\nyear,month,day,hour,minute,second,a,b\n2001,01,05,10,00,0.123456,0.0,10.\n2001,01,5,10,0,0.500000,1.,11.\n\"\"\"\n result = parser.read_csv(StringIO(data), header=0,\n date_parser=conv.parse_all_fields,\n parse_dates={\"ymdHMS\": [0, 1, 2, 3, 4, 5]})\n expected = DataFrame([[datetime(2001, 1, 5, 10, 0, 0,\n microsecond=123456), 0.0, 10.0],\n [datetime(2001, 1, 5, 10, 0, 0,\n microsecond=500000), 1.0, 11.0]],\n columns=[\"ymdHMS\", \"a\", \"b\"])\n tm.assert_frame_equal(result, expected)\n\n\ndef test_generic(all_parsers):\n parser = all_parsers\n data = \"year,month,day,a\\n2001,01,10,10.\\n2001,02,1,11.\"\n\n result = parser.read_csv(StringIO(data), header=0,\n parse_dates={\"ym\": [0, 1]},\n date_parser=lambda y, m: date(year=int(y),\n month=int(m),\n day=1))\n expected = DataFrame([[date(2001, 1, 1), 10, 10.],\n [date(2001, 2, 1), 1, 11.]],\n columns=[\"ym\", \"day\", \"a\"])\n tm.assert_frame_equal(result, expected)\n\n\ndef test_date_parser_resolution_if_not_ns(all_parsers):\n # see gh-10245\n parser = all_parsers\n data = \"\"\"\\\ndate,time,prn,rxstatus\n2013-11-03,19:00:00,126,00E80000\n2013-11-03,19:00:00,23,00E80000\n2013-11-03,19:00:00,13,00E80000\n\"\"\"\n\n def date_parser(dt, time):\n return np_array_datetime64_compat(dt + \"T\" + time + \"Z\",\n dtype=\"datetime64[s]\")\n\n result = parser.read_csv(StringIO(data), date_parser=date_parser,\n parse_dates={\"datetime\": [\"date\", \"time\"]},\n index_col=[\"datetime\", \"prn\"])\n\n datetimes = np_array_datetime64_compat([\"2013-11-03T19:00:00Z\"] * 3,\n dtype=\"datetime64[s]\")\n expected = DataFrame(data={\"rxstatus\": [\"00E80000\"] * 3},\n index=MultiIndex.from_tuples(\n [(datetimes[0], 126), (datetimes[1], 23),\n (datetimes[2], 13)], names=[\"datetime\", \"prn\"]))\n tm.assert_frame_equal(result, expected)\n\n\ndef test_parse_date_column_with_empty_string(all_parsers):\n # see gh-6428\n parser = all_parsers\n data = \"case,opdate\\n7,10/18/2006\\n7,10/18/2008\\n621, \"\n result = parser.read_csv(StringIO(data), parse_dates=[\"opdate\"])\n\n expected_data = [[7, \"10/18/2006\"],\n [7, \"10/18/2008\"],\n [621, \" \"]]\n expected = DataFrame(expected_data, columns=[\"case\", \"opdate\"])\n tm.assert_frame_equal(result, expected)\n\n\[email protected](\"data,expected\", [\n (\"a\\n135217135789158401\\n1352171357E+5\",\n DataFrame({\"a\": [135217135789158401,\n 135217135700000]}, dtype=\"float64\")),\n (\"a\\n99999999999\\n123456789012345\\n1234E+0\",\n DataFrame({\"a\": [99999999999,\n 123456789012345,\n 1234]}, dtype=\"float64\"))\n])\[email protected](\"parse_dates\", [True, False])\ndef test_parse_date_float(all_parsers, data, expected, parse_dates):\n # see gh-2697\n #\n # Date parsing should fail, so we leave the data untouched\n # (i.e. float precision should remain unchanged).\n parser = all_parsers\n\n result = parser.read_csv(StringIO(data), parse_dates=parse_dates)\n tm.assert_frame_equal(result, expected)\n\n\ndef test_parse_timezone(all_parsers):\n # see gh-22256\n parser = all_parsers\n data = \"\"\"dt,val\n 2018-01-04 09:01:00+09:00,23350\n 2018-01-04 09:02:00+09:00,23400\n 2018-01-04 09:03:00+09:00,23400\n 2018-01-04 09:04:00+09:00,23400\n 2018-01-04 09:05:00+09:00,23400\"\"\"\n result = parser.read_csv(StringIO(data), parse_dates=[\"dt\"])\n\n dti = pd.date_range(start=\"2018-01-04 09:01:00\",\n end=\"2018-01-04 09:05:00\", freq=\"1min\",\n tz=pytz.FixedOffset(540))\n expected_data = {\"dt\": dti, \"val\": [23350, 23400, 23400, 23400, 23400]}\n\n expected = DataFrame(expected_data)\n tm.assert_frame_equal(result, expected)\n",
"from contextlib import contextmanager\nimport datetime\nfrom datetime import timedelta\nfrom distutils.version import LooseVersion\nfrom io import BytesIO\nimport os\nimport tempfile\nfrom warnings import catch_warnings, simplefilter\n\nimport numpy as np\nimport pytest\n\nfrom pandas.compat import (\n PY35, PY36, is_platform_little_endian, is_platform_windows, lrange)\nimport pandas.util._test_decorators as td\n\nfrom pandas.core.dtypes.common import is_categorical_dtype\n\nimport pandas as pd\nfrom pandas import (\n Categorical, DataFrame, DatetimeIndex, Index, Int64Index, MultiIndex,\n RangeIndex, Series, Timestamp, bdate_range, concat, date_range, isna,\n timedelta_range)\nimport pandas.util.testing as tm\nfrom pandas.util.testing import (\n assert_frame_equal, assert_series_equal, set_timezone)\n\nfrom pandas.io import pytables as pytables # noqa:E402\nfrom pandas.io.formats.printing import pprint_thing\nfrom pandas.io.pytables import (\n ClosedFileError, HDFStore, PossibleDataLossError, Term, read_hdf)\nfrom pandas.io.pytables import TableIterator # noqa:E402\n\ntables = pytest.importorskip('tables')\n\n\n# TODO:\n# remove when gh-24839 is fixed; this affects numpy 1.16\n# and pytables 3.4.4\nxfail_non_writeable = pytest.mark.xfail(\n LooseVersion(np.__version__) >= LooseVersion('1.16') and\n LooseVersion(tables.__version__) < LooseVersion('3.5.1'),\n reason=('gh-25511, gh-24839. pytables needs a '\n 'release beyong 3.4.4 to support numpy 1.16x'))\n\n\n_default_compressor = ('blosc' if LooseVersion(tables.__version__) >=\n LooseVersion('2.2') else 'zlib')\n\n\nignore_natural_naming_warning = pytest.mark.filterwarnings(\n \"ignore:object name:tables.exceptions.NaturalNameWarning\"\n)\n\n# contextmanager to ensure the file cleanup\n\n\ndef safe_remove(path):\n if path is not None:\n try:\n os.remove(path)\n except OSError:\n pass\n\n\ndef safe_close(store):\n try:\n if store is not None:\n store.close()\n except IOError:\n pass\n\n\ndef create_tempfile(path):\n \"\"\" create an unopened named temporary file \"\"\"\n return os.path.join(tempfile.gettempdir(), path)\n\n\n@contextmanager\ndef ensure_clean_store(path, mode='a', complevel=None, complib=None,\n fletcher32=False):\n\n try:\n\n # put in the temporary path if we don't have one already\n if not len(os.path.dirname(path)):\n path = create_tempfile(path)\n\n store = HDFStore(path, mode=mode, complevel=complevel,\n complib=complib, fletcher32=False)\n yield store\n finally:\n safe_close(store)\n if mode == 'w' or mode == 'a':\n safe_remove(path)\n\n\n@contextmanager\ndef ensure_clean_path(path):\n \"\"\"\n return essentially a named temporary file that is not opened\n and deleted on existing; if path is a list, then create and\n return list of filenames\n \"\"\"\n try:\n if isinstance(path, list):\n filenames = [create_tempfile(p) for p in path]\n yield filenames\n else:\n filenames = [create_tempfile(path)]\n yield filenames[0]\n finally:\n for f in filenames:\n safe_remove(f)\n\n\n# set these parameters so we don't have file sharing\ntables.parameters.MAX_NUMEXPR_THREADS = 1\ntables.parameters.MAX_BLOSC_THREADS = 1\ntables.parameters.MAX_THREADS = 1\n\n\ndef _maybe_remove(store, key):\n \"\"\"For tests using tables, try removing the table to be sure there is\n no content from previous tests using the same table name.\"\"\"\n try:\n store.remove(key)\n except (ValueError, KeyError):\n pass\n\n\nclass Base(object):\n\n @classmethod\n def setup_class(cls):\n\n # Pytables 3.0.0 deprecates lots of things\n tm.reset_testing_mode()\n\n @classmethod\n def teardown_class(cls):\n\n # Pytables 3.0.0 deprecates lots of things\n tm.set_testing_mode()\n\n def setup_method(self, method):\n self.path = 'tmp.__%s__.h5' % tm.rands(10)\n\n def teardown_method(self, method):\n pass\n\n\[email protected]\nclass TestHDFStore(Base):\n\n def test_format_kwarg_in_constructor(self):\n # GH 13291\n with ensure_clean_path(self.path) as path:\n with pytest.raises(ValueError):\n HDFStore(path, format=\"table\")\n\n def test_context(self):\n path = create_tempfile(self.path)\n try:\n with HDFStore(path) as tbl:\n raise ValueError('blah')\n except ValueError:\n pass\n finally:\n safe_remove(path)\n\n try:\n with HDFStore(path) as tbl:\n tbl['a'] = tm.makeDataFrame()\n\n with HDFStore(path) as tbl:\n assert len(tbl) == 1\n assert type(tbl['a']) == DataFrame\n finally:\n safe_remove(path)\n\n def test_conv_read_write(self):\n path = create_tempfile(self.path)\n try:\n def roundtrip(key, obj, **kwargs):\n obj.to_hdf(path, key, **kwargs)\n return read_hdf(path, key)\n\n o = tm.makeTimeSeries()\n assert_series_equal(o, roundtrip('series', o))\n\n o = tm.makeStringSeries()\n assert_series_equal(o, roundtrip('string_series', o))\n\n o = tm.makeDataFrame()\n assert_frame_equal(o, roundtrip('frame', o))\n\n # table\n df = DataFrame(dict(A=lrange(5), B=lrange(5)))\n df.to_hdf(path, 'table', append=True)\n result = read_hdf(path, 'table', where=['index>2'])\n assert_frame_equal(df[df.index > 2], result)\n\n finally:\n safe_remove(path)\n\n def test_long_strings(self):\n\n # GH6166\n df = DataFrame({'a': tm.rands_array(100, size=10)},\n index=tm.rands_array(100, size=10))\n\n with ensure_clean_store(self.path) as store:\n store.append('df', df, data_columns=['a'])\n\n result = store.select('df')\n assert_frame_equal(df, result)\n\n def test_api(self):\n\n # GH4584\n # API issue when to_hdf doesn't acdept append AND format args\n with ensure_clean_path(self.path) as path:\n\n df = tm.makeDataFrame()\n df.iloc[:10].to_hdf(path, 'df', append=True, format='table')\n df.iloc[10:].to_hdf(path, 'df', append=True, format='table')\n assert_frame_equal(read_hdf(path, 'df'), df)\n\n # append to False\n df.iloc[:10].to_hdf(path, 'df', append=False, format='table')\n df.iloc[10:].to_hdf(path, 'df', append=True, format='table')\n assert_frame_equal(read_hdf(path, 'df'), df)\n\n with ensure_clean_path(self.path) as path:\n\n df = tm.makeDataFrame()\n df.iloc[:10].to_hdf(path, 'df', append=True)\n df.iloc[10:].to_hdf(path, 'df', append=True, format='table')\n assert_frame_equal(read_hdf(path, 'df'), df)\n\n # append to False\n df.iloc[:10].to_hdf(path, 'df', append=False, format='table')\n df.iloc[10:].to_hdf(path, 'df', append=True)\n assert_frame_equal(read_hdf(path, 'df'), df)\n\n with ensure_clean_path(self.path) as path:\n\n df = tm.makeDataFrame()\n df.to_hdf(path, 'df', append=False, format='fixed')\n assert_frame_equal(read_hdf(path, 'df'), df)\n\n df.to_hdf(path, 'df', append=False, format='f')\n assert_frame_equal(read_hdf(path, 'df'), df)\n\n df.to_hdf(path, 'df', append=False)\n assert_frame_equal(read_hdf(path, 'df'), df)\n\n df.to_hdf(path, 'df')\n assert_frame_equal(read_hdf(path, 'df'), df)\n\n with ensure_clean_store(self.path) as store:\n\n path = store._path\n df = tm.makeDataFrame()\n\n _maybe_remove(store, 'df')\n store.append('df', df.iloc[:10], append=True, format='table')\n store.append('df', df.iloc[10:], append=True, format='table')\n assert_frame_equal(store.select('df'), df)\n\n # append to False\n _maybe_remove(store, 'df')\n store.append('df', df.iloc[:10], append=False, format='table')\n store.append('df', df.iloc[10:], append=True, format='table')\n assert_frame_equal(store.select('df'), df)\n\n # formats\n _maybe_remove(store, 'df')\n store.append('df', df.iloc[:10], append=False, format='table')\n store.append('df', df.iloc[10:], append=True, format='table')\n assert_frame_equal(store.select('df'), df)\n\n _maybe_remove(store, 'df')\n store.append('df', df.iloc[:10], append=False, format='table')\n store.append('df', df.iloc[10:], append=True, format=None)\n assert_frame_equal(store.select('df'), df)\n\n with ensure_clean_path(self.path) as path:\n # Invalid.\n df = tm.makeDataFrame()\n\n with pytest.raises(ValueError):\n df.to_hdf(path, \"df\", append=True, format=\"f\")\n\n with pytest.raises(ValueError):\n df.to_hdf(path, \"df\", append=True, format=\"fixed\")\n\n with pytest.raises(TypeError):\n df.to_hdf(path, \"df\", append=True, format=\"foo\")\n\n with pytest.raises(TypeError):\n df.to_hdf(path, \"df\", append=False, format=\"bar\")\n\n # File path doesn't exist\n path = \"\"\n with pytest.raises(FileNotFoundError):\n read_hdf(path, \"df\")\n\n def test_api_default_format(self):\n\n # default_format option\n with ensure_clean_store(self.path) as store:\n df = tm.makeDataFrame()\n\n pd.set_option('io.hdf.default_format', 'fixed')\n _maybe_remove(store, 'df')\n store.put('df', df)\n assert not store.get_storer('df').is_table\n with pytest.raises(ValueError):\n store.append(\"df2\", df)\n\n pd.set_option('io.hdf.default_format', 'table')\n _maybe_remove(store, 'df')\n store.put('df', df)\n assert store.get_storer('df').is_table\n _maybe_remove(store, 'df2')\n store.append('df2', df)\n assert store.get_storer('df').is_table\n\n pd.set_option('io.hdf.default_format', None)\n\n with ensure_clean_path(self.path) as path:\n\n df = tm.makeDataFrame()\n\n pd.set_option('io.hdf.default_format', 'fixed')\n df.to_hdf(path, 'df')\n with HDFStore(path) as store:\n assert not store.get_storer('df').is_table\n with pytest.raises(ValueError):\n df.to_hdf(path, \"df2\", append=True)\n\n pd.set_option('io.hdf.default_format', 'table')\n df.to_hdf(path, 'df3')\n with HDFStore(path) as store:\n assert store.get_storer('df3').is_table\n df.to_hdf(path, 'df4', append=True)\n with HDFStore(path) as store:\n assert store.get_storer('df4').is_table\n\n pd.set_option('io.hdf.default_format', None)\n\n def test_keys(self):\n\n with ensure_clean_store(self.path) as store:\n store['a'] = tm.makeTimeSeries()\n store['b'] = tm.makeStringSeries()\n store['c'] = tm.makeDataFrame()\n\n assert len(store) == 3\n expected = {'/a', '/b', '/c'}\n assert set(store.keys()) == expected\n assert set(store) == expected\n\n def test_keys_ignore_hdf_softlink(self):\n\n # GH 20523\n # Puts a softlink into HDF file and rereads\n\n with ensure_clean_store(self.path) as store:\n\n df = DataFrame(dict(A=lrange(5), B=lrange(5)))\n store.put(\"df\", df)\n\n assert store.keys() == [\"/df\"]\n\n store._handle.create_soft_link(store._handle.root, \"symlink\", \"df\")\n\n # Should ignore the softlink\n assert store.keys() == [\"/df\"]\n\n def test_iter_empty(self):\n\n with ensure_clean_store(self.path) as store:\n # GH 12221\n assert list(store) == []\n\n def test_repr(self):\n\n with ensure_clean_store(self.path) as store:\n repr(store)\n store.info()\n store['a'] = tm.makeTimeSeries()\n store['b'] = tm.makeStringSeries()\n store['c'] = tm.makeDataFrame()\n\n df = tm.makeDataFrame()\n df['obj1'] = 'foo'\n df['obj2'] = 'bar'\n df['bool1'] = df['A'] > 0\n df['bool2'] = df['B'] > 0\n df['bool3'] = True\n df['int1'] = 1\n df['int2'] = 2\n df['timestamp1'] = Timestamp('20010102')\n df['timestamp2'] = Timestamp('20010103')\n df['datetime1'] = datetime.datetime(2001, 1, 2, 0, 0)\n df['datetime2'] = datetime.datetime(2001, 1, 3, 0, 0)\n df.loc[3:6, ['obj1']] = np.nan\n df = df._consolidate()._convert(datetime=True)\n\n with catch_warnings(record=True):\n simplefilter(\"ignore\", pd.errors.PerformanceWarning)\n store['df'] = df\n\n # make a random group in hdf space\n store._handle.create_group(store._handle.root, 'bah')\n\n assert store.filename in repr(store)\n assert store.filename in str(store)\n store.info()\n\n # storers\n with ensure_clean_store(self.path) as store:\n\n df = tm.makeDataFrame()\n store.append('df', df)\n\n s = store.get_storer('df')\n repr(s)\n str(s)\n\n @ignore_natural_naming_warning\n def test_contains(self):\n\n with ensure_clean_store(self.path) as store:\n store['a'] = tm.makeTimeSeries()\n store['b'] = tm.makeDataFrame()\n store['foo/bar'] = tm.makeDataFrame()\n assert 'a' in store\n assert 'b' in store\n assert 'c' not in store\n assert 'foo/bar' in store\n assert '/foo/bar' in store\n assert '/foo/b' not in store\n assert 'bar' not in store\n\n # gh-2694: tables.NaturalNameWarning\n with catch_warnings(record=True):\n store['node())'] = tm.makeDataFrame()\n assert 'node())' in store\n\n def test_versioning(self):\n\n with ensure_clean_store(self.path) as store:\n store['a'] = tm.makeTimeSeries()\n store['b'] = tm.makeDataFrame()\n df = tm.makeTimeDataFrame()\n _maybe_remove(store, 'df1')\n store.append('df1', df[:10])\n store.append('df1', df[10:])\n assert store.root.a._v_attrs.pandas_version == '0.15.2'\n assert store.root.b._v_attrs.pandas_version == '0.15.2'\n assert store.root.df1._v_attrs.pandas_version == '0.15.2'\n\n # write a file and wipe its versioning\n _maybe_remove(store, 'df2')\n store.append('df2', df)\n\n # this is an error because its table_type is appendable, but no\n # version info\n store.get_node('df2')._v_attrs.pandas_version = None\n with pytest.raises(Exception):\n store.select(\"df2\")\n\n def test_mode(self):\n\n df = tm.makeTimeDataFrame()\n\n def check(mode):\n\n with ensure_clean_path(self.path) as path:\n\n # constructor\n if mode in ['r', 'r+']:\n with pytest.raises(IOError):\n HDFStore(path, mode=mode)\n\n else:\n store = HDFStore(path, mode=mode)\n assert store._handle.mode == mode\n store.close()\n\n with ensure_clean_path(self.path) as path:\n\n # context\n if mode in ['r', 'r+']:\n with pytest.raises(IOError):\n with HDFStore(path, mode=mode) as store: # noqa\n pass\n else:\n with HDFStore(path, mode=mode) as store:\n assert store._handle.mode == mode\n\n with ensure_clean_path(self.path) as path:\n\n # conv write\n if mode in ['r', 'r+']:\n with pytest.raises(IOError):\n df.to_hdf(path, \"df\", mode=mode)\n df.to_hdf(path, 'df', mode='w')\n else:\n df.to_hdf(path, 'df', mode=mode)\n\n # conv read\n if mode in ['w']:\n with pytest.raises(ValueError):\n read_hdf(path, \"df\", mode=mode)\n else:\n result = read_hdf(path, 'df', mode=mode)\n assert_frame_equal(result, df)\n\n def check_default_mode():\n\n # read_hdf uses default mode\n with ensure_clean_path(self.path) as path:\n df.to_hdf(path, 'df', mode='w')\n result = read_hdf(path, 'df')\n assert_frame_equal(result, df)\n\n check('r')\n check('r+')\n check('a')\n check('w')\n check_default_mode()\n\n def test_reopen_handle(self):\n\n with ensure_clean_path(self.path) as path:\n\n store = HDFStore(path, mode='a')\n store['a'] = tm.makeTimeSeries()\n\n # invalid mode change\n with pytest.raises(PossibleDataLossError):\n store.open(\"w\")\n\n store.close()\n assert not store.is_open\n\n # truncation ok here\n store.open('w')\n assert store.is_open\n assert len(store) == 0\n store.close()\n assert not store.is_open\n\n store = HDFStore(path, mode='a')\n store['a'] = tm.makeTimeSeries()\n\n # reopen as read\n store.open('r')\n assert store.is_open\n assert len(store) == 1\n assert store._mode == 'r'\n store.close()\n assert not store.is_open\n\n # reopen as append\n store.open('a')\n assert store.is_open\n assert len(store) == 1\n assert store._mode == 'a'\n store.close()\n assert not store.is_open\n\n # reopen as append (again)\n store.open('a')\n assert store.is_open\n assert len(store) == 1\n assert store._mode == 'a'\n store.close()\n assert not store.is_open\n\n def test_open_args(self):\n\n with ensure_clean_path(self.path) as path:\n\n df = tm.makeDataFrame()\n\n # create an in memory store\n store = HDFStore(path, mode='a', driver='H5FD_CORE',\n driver_core_backing_store=0)\n store['df'] = df\n store.append('df2', df)\n\n tm.assert_frame_equal(store['df'], df)\n tm.assert_frame_equal(store['df2'], df)\n\n store.close()\n\n # the file should not have actually been written\n assert not os.path.exists(path)\n\n def test_flush(self):\n\n with ensure_clean_store(self.path) as store:\n store['a'] = tm.makeTimeSeries()\n store.flush()\n store.flush(fsync=True)\n\n def test_get(self):\n\n with ensure_clean_store(self.path) as store:\n store['a'] = tm.makeTimeSeries()\n left = store.get('a')\n right = store['a']\n tm.assert_series_equal(left, right)\n\n left = store.get('/a')\n right = store['/a']\n tm.assert_series_equal(left, right)\n\n with pytest.raises(KeyError):\n store.get(\"b\")\n\n @pytest.mark.parametrize('where, expected', [\n ('/', {\n '': ({'first_group', 'second_group'}, set()),\n '/first_group': (set(), {'df1', 'df2'}),\n '/second_group': ({'third_group'}, {'df3', 's1'}),\n '/second_group/third_group': (set(), {'df4'}),\n }),\n ('/second_group', {\n '/second_group': ({'third_group'}, {'df3', 's1'}),\n '/second_group/third_group': (set(), {'df4'}),\n })\n ])\n def test_walk(self, where, expected):\n # GH10143\n objs = {\n 'df1': pd.DataFrame([1, 2, 3]),\n 'df2': pd.DataFrame([4, 5, 6]),\n 'df3': pd.DataFrame([6, 7, 8]),\n 'df4': pd.DataFrame([9, 10, 11]),\n 's1': pd.Series([10, 9, 8]),\n # Next 3 items aren't pandas objects and should be ignored\n 'a1': np.array([[1, 2, 3], [4, 5, 6]]),\n 'tb1': np.array([(1, 2, 3), (4, 5, 6)], dtype='i,i,i'),\n 'tb2': np.array([(7, 8, 9), (10, 11, 12)], dtype='i,i,i')\n }\n\n with ensure_clean_store('walk_groups.hdf', mode='w') as store:\n store.put('/first_group/df1', objs['df1'])\n store.put('/first_group/df2', objs['df2'])\n store.put('/second_group/df3', objs['df3'])\n store.put('/second_group/s1', objs['s1'])\n store.put('/second_group/third_group/df4', objs['df4'])\n # Create non-pandas objects\n store._handle.create_array('/first_group', 'a1', objs['a1'])\n store._handle.create_table('/first_group', 'tb1', obj=objs['tb1'])\n store._handle.create_table('/second_group', 'tb2', obj=objs['tb2'])\n\n assert len(list(store.walk(where=where))) == len(expected)\n for path, groups, leaves in store.walk(where=where):\n assert path in expected\n expected_groups, expected_frames = expected[path]\n assert expected_groups == set(groups)\n assert expected_frames == set(leaves)\n for leaf in leaves:\n frame_path = '/'.join([path, leaf])\n obj = store.get(frame_path)\n if 'df' in leaf:\n tm.assert_frame_equal(obj, objs[leaf])\n else:\n tm.assert_series_equal(obj, objs[leaf])\n\n def test_getattr(self):\n\n with ensure_clean_store(self.path) as store:\n\n s = tm.makeTimeSeries()\n store['a'] = s\n\n # test attribute access\n result = store.a\n tm.assert_series_equal(result, s)\n result = getattr(store, 'a')\n tm.assert_series_equal(result, s)\n\n df = tm.makeTimeDataFrame()\n store['df'] = df\n result = store.df\n tm.assert_frame_equal(result, df)\n\n # errors\n for x in [\"d\", \"mode\", \"path\", \"handle\", \"complib\"]:\n with pytest.raises(AttributeError):\n getattr(store, x)\n\n # not stores\n for x in ['mode', 'path', 'handle', 'complib']:\n getattr(store, \"_%s\" % x)\n\n def test_put(self):\n\n with ensure_clean_store(self.path) as store:\n\n ts = tm.makeTimeSeries()\n df = tm.makeTimeDataFrame()\n store['a'] = ts\n store['b'] = df[:10]\n store['foo/bar/bah'] = df[:10]\n store['foo'] = df[:10]\n store['/foo'] = df[:10]\n store.put('c', df[:10], format='table')\n\n # not OK, not a table\n with pytest.raises(ValueError):\n store.put(\"b\", df[10:], append=True)\n\n # node does not currently exist, test _is_table_type returns False\n # in this case\n _maybe_remove(store, 'f')\n with pytest.raises(ValueError):\n store.put(\"f\", df[10:], append=True)\n\n # can't put to a table (use append instead)\n with pytest.raises(ValueError):\n store.put(\"c\", df[10:], append=True)\n\n # overwrite table\n store.put('c', df[:10], format='table', append=False)\n tm.assert_frame_equal(df[:10], store['c'])\n\n def test_put_string_index(self):\n\n with ensure_clean_store(self.path) as store:\n\n index = Index(\n [\"I am a very long string index: %s\" % i for i in range(20)])\n s = Series(np.arange(20), index=index)\n df = DataFrame({'A': s, 'B': s})\n\n store['a'] = s\n tm.assert_series_equal(store['a'], s)\n\n store['b'] = df\n tm.assert_frame_equal(store['b'], df)\n\n # mixed length\n index = Index(['abcdefghijklmnopqrstuvwxyz1234567890'] +\n [\"I am a very long string index: %s\" % i\n for i in range(20)])\n s = Series(np.arange(21), index=index)\n df = DataFrame({'A': s, 'B': s})\n store['a'] = s\n tm.assert_series_equal(store['a'], s)\n\n store['b'] = df\n tm.assert_frame_equal(store['b'], df)\n\n def test_put_compression(self):\n\n with ensure_clean_store(self.path) as store:\n df = tm.makeTimeDataFrame()\n\n store.put('c', df, format='table', complib='zlib')\n tm.assert_frame_equal(store['c'], df)\n\n # can't compress if format='fixed'\n with pytest.raises(ValueError):\n store.put(\"b\", df, format=\"fixed\", complib=\"zlib\")\n\n @td.skip_if_windows_python_3\n def test_put_compression_blosc(self):\n df = tm.makeTimeDataFrame()\n\n with ensure_clean_store(self.path) as store:\n\n # can't compress if format='fixed'\n with pytest.raises(ValueError):\n store.put('b', df, format='fixed', complib='blosc')\n\n store.put('c', df, format='table', complib='blosc')\n tm.assert_frame_equal(store['c'], df)\n\n def test_complibs_default_settings(self):\n # GH15943\n df = tm.makeDataFrame()\n\n # Set complevel and check if complib is automatically set to\n # default value\n with ensure_clean_path(self.path) as tmpfile:\n df.to_hdf(tmpfile, 'df', complevel=9)\n result = pd.read_hdf(tmpfile, 'df')\n tm.assert_frame_equal(result, df)\n\n with tables.open_file(tmpfile, mode='r') as h5file:\n for node in h5file.walk_nodes(where='/df', classname='Leaf'):\n assert node.filters.complevel == 9\n assert node.filters.complib == 'zlib'\n\n # Set complib and check to see if compression is disabled\n with ensure_clean_path(self.path) as tmpfile:\n df.to_hdf(tmpfile, 'df', complib='zlib')\n result = pd.read_hdf(tmpfile, 'df')\n tm.assert_frame_equal(result, df)\n\n with tables.open_file(tmpfile, mode='r') as h5file:\n for node in h5file.walk_nodes(where='/df', classname='Leaf'):\n assert node.filters.complevel == 0\n assert node.filters.complib is None\n\n # Check if not setting complib or complevel results in no compression\n with ensure_clean_path(self.path) as tmpfile:\n df.to_hdf(tmpfile, 'df')\n result = pd.read_hdf(tmpfile, 'df')\n tm.assert_frame_equal(result, df)\n\n with tables.open_file(tmpfile, mode='r') as h5file:\n for node in h5file.walk_nodes(where='/df', classname='Leaf'):\n assert node.filters.complevel == 0\n assert node.filters.complib is None\n\n # Check if file-defaults can be overridden on a per table basis\n with ensure_clean_path(self.path) as tmpfile:\n store = pd.HDFStore(tmpfile)\n store.append('dfc', df, complevel=9, complib='blosc')\n store.append('df', df)\n store.close()\n\n with tables.open_file(tmpfile, mode='r') as h5file:\n for node in h5file.walk_nodes(where='/df', classname='Leaf'):\n assert node.filters.complevel == 0\n assert node.filters.complib is None\n for node in h5file.walk_nodes(where='/dfc', classname='Leaf'):\n assert node.filters.complevel == 9\n assert node.filters.complib == 'blosc'\n\n def test_complibs(self):\n # GH14478\n df = tm.makeDataFrame()\n\n # Building list of all complibs and complevels tuples\n all_complibs = tables.filters.all_complibs\n # Remove lzo if its not available on this platform\n if not tables.which_lib_version('lzo'):\n all_complibs.remove('lzo')\n # Remove bzip2 if its not available on this platform\n if not tables.which_lib_version(\"bzip2\"):\n all_complibs.remove(\"bzip2\")\n\n all_levels = range(0, 10)\n all_tests = [(lib, lvl) for lib in all_complibs for lvl in all_levels]\n\n for (lib, lvl) in all_tests:\n with ensure_clean_path(self.path) as tmpfile:\n gname = 'foo'\n\n # Write and read file to see if data is consistent\n df.to_hdf(tmpfile, gname, complib=lib, complevel=lvl)\n result = pd.read_hdf(tmpfile, gname)\n tm.assert_frame_equal(result, df)\n\n # Open file and check metadata\n # for correct amount of compression\n h5table = tables.open_file(tmpfile, mode='r')\n for node in h5table.walk_nodes(where='/' + gname,\n classname='Leaf'):\n assert node.filters.complevel == lvl\n if lvl == 0:\n assert node.filters.complib is None\n else:\n assert node.filters.complib == lib\n h5table.close()\n\n def test_put_integer(self):\n # non-date, non-string index\n df = DataFrame(np.random.randn(50, 100))\n self._check_roundtrip(df, tm.assert_frame_equal)\n\n @xfail_non_writeable\n def test_put_mixed_type(self):\n df = tm.makeTimeDataFrame()\n df['obj1'] = 'foo'\n df['obj2'] = 'bar'\n df['bool1'] = df['A'] > 0\n df['bool2'] = df['B'] > 0\n df['bool3'] = True\n df['int1'] = 1\n df['int2'] = 2\n df['timestamp1'] = Timestamp('20010102')\n df['timestamp2'] = Timestamp('20010103')\n df['datetime1'] = datetime.datetime(2001, 1, 2, 0, 0)\n df['datetime2'] = datetime.datetime(2001, 1, 3, 0, 0)\n df.loc[3:6, ['obj1']] = np.nan\n df = df._consolidate()._convert(datetime=True)\n\n with ensure_clean_store(self.path) as store:\n _maybe_remove(store, 'df')\n\n # PerformanceWarning\n with catch_warnings(record=True):\n simplefilter(\"ignore\", pd.errors.PerformanceWarning)\n store.put('df', df)\n\n expected = store.get('df')\n tm.assert_frame_equal(expected, df)\n\n @pytest.mark.filterwarnings(\n \"ignore:object name:tables.exceptions.NaturalNameWarning\"\n )\n def test_append(self):\n\n with ensure_clean_store(self.path) as store:\n\n # this is allowed by almost always don't want to do it\n # tables.NaturalNameWarning):\n with catch_warnings(record=True):\n\n df = tm.makeTimeDataFrame()\n _maybe_remove(store, 'df1')\n store.append('df1', df[:10])\n store.append('df1', df[10:])\n tm.assert_frame_equal(store['df1'], df)\n\n _maybe_remove(store, 'df2')\n store.put('df2', df[:10], format='table')\n store.append('df2', df[10:])\n tm.assert_frame_equal(store['df2'], df)\n\n _maybe_remove(store, 'df3')\n store.append('/df3', df[:10])\n store.append('/df3', df[10:])\n tm.assert_frame_equal(store['df3'], df)\n\n # this is allowed by almost always don't want to do it\n # tables.NaturalNameWarning\n _maybe_remove(store, '/df3 foo')\n store.append('/df3 foo', df[:10])\n store.append('/df3 foo', df[10:])\n tm.assert_frame_equal(store['df3 foo'], df)\n\n # dtype issues - mizxed type in a single object column\n df = DataFrame(data=[[1, 2], [0, 1], [1, 2], [0, 0]])\n df['mixed_column'] = 'testing'\n df.loc[2, 'mixed_column'] = np.nan\n _maybe_remove(store, 'df')\n store.append('df', df)\n tm.assert_frame_equal(store['df'], df)\n\n # uints - test storage of uints\n uint_data = DataFrame({\n 'u08': Series(np.random.randint(0, high=255, size=5),\n dtype=np.uint8),\n 'u16': Series(np.random.randint(0, high=65535, size=5),\n dtype=np.uint16),\n 'u32': Series(np.random.randint(0, high=2**30, size=5),\n dtype=np.uint32),\n 'u64': Series([2**58, 2**59, 2**60, 2**61, 2**62],\n dtype=np.uint64)}, index=np.arange(5))\n _maybe_remove(store, 'uints')\n store.append('uints', uint_data)\n tm.assert_frame_equal(store['uints'], uint_data)\n\n # uints - test storage of uints in indexable columns\n _maybe_remove(store, 'uints')\n # 64-bit indices not yet supported\n store.append('uints', uint_data, data_columns=[\n 'u08', 'u16', 'u32'])\n tm.assert_frame_equal(store['uints'], uint_data)\n\n def test_append_series(self):\n\n with ensure_clean_store(self.path) as store:\n\n # basic\n ss = tm.makeStringSeries()\n ts = tm.makeTimeSeries()\n ns = Series(np.arange(100))\n\n store.append('ss', ss)\n result = store['ss']\n tm.assert_series_equal(result, ss)\n assert result.name is None\n\n store.append('ts', ts)\n result = store['ts']\n tm.assert_series_equal(result, ts)\n assert result.name is None\n\n ns.name = 'foo'\n store.append('ns', ns)\n result = store['ns']\n tm.assert_series_equal(result, ns)\n assert result.name == ns.name\n\n # select on the values\n expected = ns[ns > 60]\n result = store.select('ns', 'foo>60')\n tm.assert_series_equal(result, expected)\n\n # select on the index and values\n expected = ns[(ns > 70) & (ns.index < 90)]\n result = store.select('ns', 'foo>70 and index<90')\n tm.assert_series_equal(result, expected)\n\n # multi-index\n mi = DataFrame(np.random.randn(5, 1), columns=['A'])\n mi['B'] = np.arange(len(mi))\n mi['C'] = 'foo'\n mi.loc[3:5, 'C'] = 'bar'\n mi.set_index(['C', 'B'], inplace=True)\n s = mi.stack()\n s.index = s.index.droplevel(2)\n store.append('mi', s)\n tm.assert_series_equal(store['mi'], s)\n\n def test_store_index_types(self):\n # GH5386\n # test storing various index types\n\n with ensure_clean_store(self.path) as store:\n\n def check(format, index):\n df = DataFrame(np.random.randn(10, 2), columns=list('AB'))\n df.index = index(len(df))\n\n _maybe_remove(store, 'df')\n store.put('df', df, format=format)\n assert_frame_equal(df, store['df'])\n\n for index in [tm.makeFloatIndex, tm.makeStringIndex,\n tm.makeIntIndex, tm.makeDateIndex]:\n\n check('table', index)\n check('fixed', index)\n\n # period index currently broken for table\n # seee GH7796 FIXME\n check('fixed', tm.makePeriodIndex)\n # check('table',tm.makePeriodIndex)\n\n # unicode\n index = tm.makeUnicodeIndex\n check('table', index)\n check('fixed', index)\n\n @pytest.mark.skipif(not is_platform_little_endian(),\n reason=\"reason platform is not little endian\")\n def test_encoding(self):\n\n with ensure_clean_store(self.path) as store:\n df = DataFrame(dict(A='foo', B='bar'), index=range(5))\n df.loc[2, 'A'] = np.nan\n df.loc[3, 'B'] = np.nan\n _maybe_remove(store, 'df')\n store.append('df', df, encoding='ascii')\n tm.assert_frame_equal(store['df'], df)\n\n expected = df.reindex(columns=['A'])\n result = store.select('df', Term('columns=A', encoding='ascii'))\n tm.assert_frame_equal(result, expected)\n\n def test_latin_encoding(self):\n\n values = [[b'E\\xc9, 17', b'', b'a', b'b', b'c'],\n [b'E\\xc9, 17', b'a', b'b', b'c'],\n [b'EE, 17', b'', b'a', b'b', b'c'],\n [b'E\\xc9, 17', b'\\xf8\\xfc', b'a', b'b', b'c'],\n [b'', b'a', b'b', b'c'],\n [b'\\xf8\\xfc', b'a', b'b', b'c'],\n [b'A\\xf8\\xfc', b'', b'a', b'b', b'c'],\n [np.nan, b'', b'b', b'c'],\n [b'A\\xf8\\xfc', np.nan, b'', b'b', b'c']]\n\n def _try_decode(x, encoding='latin-1'):\n try:\n return x.decode(encoding)\n except AttributeError:\n return x\n # not sure how to remove latin-1 from code in python 2 and 3\n values = [[_try_decode(x) for x in y] for y in values]\n\n examples = []\n for dtype in ['category', object]:\n for val in values:\n examples.append(pd.Series(val, dtype=dtype))\n\n def roundtrip(s, key='data', encoding='latin-1', nan_rep=''):\n with ensure_clean_path(self.path) as store:\n s.to_hdf(store, key, format='table', encoding=encoding,\n nan_rep=nan_rep)\n retr = read_hdf(store, key)\n s_nan = s.replace(nan_rep, np.nan)\n if is_categorical_dtype(s_nan):\n assert is_categorical_dtype(retr)\n assert_series_equal(s_nan, retr, check_dtype=False,\n check_categorical=False)\n else:\n assert_series_equal(s_nan, retr)\n\n for s in examples:\n roundtrip(s)\n\n # fails:\n # for x in examples:\n # roundtrip(s, nan_rep=b'\\xf8\\xfc')\n\n def test_append_some_nans(self):\n\n with ensure_clean_store(self.path) as store:\n df = DataFrame({'A': Series(np.random.randn(20)).astype('int32'),\n 'A1': np.random.randn(20),\n 'A2': np.random.randn(20),\n 'B': 'foo', 'C': 'bar',\n 'D': Timestamp(\"20010101\"),\n 'E': datetime.datetime(2001, 1, 2, 0, 0)},\n index=np.arange(20))\n # some nans\n _maybe_remove(store, 'df1')\n df.loc[0:15, ['A1', 'B', 'D', 'E']] = np.nan\n store.append('df1', df[:10])\n store.append('df1', df[10:])\n tm.assert_frame_equal(store['df1'], df)\n\n # first column\n df1 = df.copy()\n df1.loc[:, 'A1'] = np.nan\n _maybe_remove(store, 'df1')\n store.append('df1', df1[:10])\n store.append('df1', df1[10:])\n tm.assert_frame_equal(store['df1'], df1)\n\n # 2nd column\n df2 = df.copy()\n df2.loc[:, 'A2'] = np.nan\n _maybe_remove(store, 'df2')\n store.append('df2', df2[:10])\n store.append('df2', df2[10:])\n tm.assert_frame_equal(store['df2'], df2)\n\n # datetimes\n df3 = df.copy()\n df3.loc[:, 'E'] = np.nan\n _maybe_remove(store, 'df3')\n store.append('df3', df3[:10])\n store.append('df3', df3[10:])\n tm.assert_frame_equal(store['df3'], df3)\n\n def test_append_all_nans(self):\n\n with ensure_clean_store(self.path) as store:\n\n df = DataFrame({'A1': np.random.randn(20),\n 'A2': np.random.randn(20)},\n index=np.arange(20))\n df.loc[0:15, :] = np.nan\n\n # nan some entire rows (dropna=True)\n _maybe_remove(store, 'df')\n store.append('df', df[:10], dropna=True)\n store.append('df', df[10:], dropna=True)\n tm.assert_frame_equal(store['df'], df[-4:])\n\n # nan some entire rows (dropna=False)\n _maybe_remove(store, 'df2')\n store.append('df2', df[:10], dropna=False)\n store.append('df2', df[10:], dropna=False)\n tm.assert_frame_equal(store['df2'], df)\n\n # tests the option io.hdf.dropna_table\n pd.set_option('io.hdf.dropna_table', False)\n _maybe_remove(store, 'df3')\n store.append('df3', df[:10])\n store.append('df3', df[10:])\n tm.assert_frame_equal(store['df3'], df)\n\n pd.set_option('io.hdf.dropna_table', True)\n _maybe_remove(store, 'df4')\n store.append('df4', df[:10])\n store.append('df4', df[10:])\n tm.assert_frame_equal(store['df4'], df[-4:])\n\n # nan some entire rows (string are still written!)\n df = DataFrame({'A1': np.random.randn(20),\n 'A2': np.random.randn(20),\n 'B': 'foo', 'C': 'bar'},\n index=np.arange(20))\n\n df.loc[0:15, :] = np.nan\n\n _maybe_remove(store, 'df')\n store.append('df', df[:10], dropna=True)\n store.append('df', df[10:], dropna=True)\n tm.assert_frame_equal(store['df'], df)\n\n _maybe_remove(store, 'df2')\n store.append('df2', df[:10], dropna=False)\n store.append('df2', df[10:], dropna=False)\n tm.assert_frame_equal(store['df2'], df)\n\n # nan some entire rows (but since we have dates they are still\n # written!)\n df = DataFrame({'A1': np.random.randn(20),\n 'A2': np.random.randn(20),\n 'B': 'foo', 'C': 'bar',\n 'D': Timestamp(\"20010101\"),\n 'E': datetime.datetime(2001, 1, 2, 0, 0)},\n index=np.arange(20))\n\n df.loc[0:15, :] = np.nan\n\n _maybe_remove(store, 'df')\n store.append('df', df[:10], dropna=True)\n store.append('df', df[10:], dropna=True)\n tm.assert_frame_equal(store['df'], df)\n\n _maybe_remove(store, 'df2')\n store.append('df2', df[:10], dropna=False)\n store.append('df2', df[10:], dropna=False)\n tm.assert_frame_equal(store['df2'], df)\n\n # Test to make sure defaults are to not drop.\n # Corresponding to Issue 9382\n df_with_missing = DataFrame(\n {'col1': [0, np.nan, 2], 'col2': [1, np.nan, np.nan]})\n\n with ensure_clean_path(self.path) as path:\n df_with_missing.to_hdf(path, 'df_with_missing', format='table')\n reloaded = read_hdf(path, 'df_with_missing')\n tm.assert_frame_equal(df_with_missing, reloaded)\n\n def test_read_missing_key_close_store(self):\n # GH 25766\n with ensure_clean_path(self.path) as path:\n df = pd.DataFrame({'a': range(2), 'b': range(2)})\n df.to_hdf(path, 'k1')\n\n with pytest.raises(KeyError):\n pd.read_hdf(path, 'k2')\n\n # smoke test to test that file is properly closed after\n # read with KeyError before another write\n df.to_hdf(path, 'k2')\n\n def test_append_frame_column_oriented(self):\n\n with ensure_clean_store(self.path) as store:\n\n # column oriented\n df = tm.makeTimeDataFrame()\n _maybe_remove(store, 'df1')\n store.append('df1', df.iloc[:, :2], axes=['columns'])\n store.append('df1', df.iloc[:, 2:])\n tm.assert_frame_equal(store['df1'], df)\n\n result = store.select('df1', 'columns=A')\n expected = df.reindex(columns=['A'])\n tm.assert_frame_equal(expected, result)\n\n # selection on the non-indexable\n result = store.select(\n 'df1', ('columns=A', 'index=df.index[0:4]'))\n expected = df.reindex(columns=['A'], index=df.index[0:4])\n tm.assert_frame_equal(expected, result)\n\n # this isn't supported\n with pytest.raises(TypeError):\n store.select('df1',\n 'columns=A and index>df.index[4]')\n\n def test_append_with_different_block_ordering(self):\n\n # GH 4096; using same frames, but different block orderings\n with ensure_clean_store(self.path) as store:\n\n for i in range(10):\n\n df = DataFrame(np.random.randn(10, 2), columns=list('AB'))\n df['index'] = range(10)\n df['index'] += i * 10\n df['int64'] = Series([1] * len(df), dtype='int64')\n df['int16'] = Series([1] * len(df), dtype='int16')\n\n if i % 2 == 0:\n del df['int64']\n df['int64'] = Series([1] * len(df), dtype='int64')\n if i % 3 == 0:\n a = df.pop('A')\n df['A'] = a\n\n df.set_index('index', inplace=True)\n\n store.append('df', df)\n\n # test a different ordering but with more fields (like invalid\n # combinate)\n with ensure_clean_store(self.path) as store:\n\n df = DataFrame(np.random.randn(10, 2),\n columns=list('AB'), dtype='float64')\n df['int64'] = Series([1] * len(df), dtype='int64')\n df['int16'] = Series([1] * len(df), dtype='int16')\n store.append('df', df)\n\n # store additional fields in different blocks\n df['int16_2'] = Series([1] * len(df), dtype='int16')\n with pytest.raises(ValueError):\n store.append('df', df)\n\n # store multile additional fields in different blocks\n df['float_3'] = Series([1.] * len(df), dtype='float64')\n with pytest.raises(ValueError):\n store.append('df', df)\n\n def test_append_with_strings(self):\n\n with ensure_clean_store(self.path) as store:\n with catch_warnings(record=True):\n\n def check_col(key, name, size):\n assert getattr(store.get_storer(key)\n .table.description, name).itemsize == size\n\n # avoid truncation on elements\n df = DataFrame([[123, 'asdqwerty'], [345, 'dggnhebbsdfbdfb']])\n store.append('df_big', df)\n tm.assert_frame_equal(store.select('df_big'), df)\n check_col('df_big', 'values_block_1', 15)\n\n # appending smaller string ok\n df2 = DataFrame([[124, 'asdqy'], [346, 'dggnhefbdfb']])\n store.append('df_big', df2)\n expected = concat([df, df2])\n tm.assert_frame_equal(store.select('df_big'), expected)\n check_col('df_big', 'values_block_1', 15)\n\n # avoid truncation on elements\n df = DataFrame([[123, 'asdqwerty'], [345, 'dggnhebbsdfbdfb']])\n store.append('df_big2', df, min_itemsize={'values': 50})\n tm.assert_frame_equal(store.select('df_big2'), df)\n check_col('df_big2', 'values_block_1', 50)\n\n # bigger string on next append\n store.append('df_new', df)\n df_new = DataFrame(\n [[124, 'abcdefqhij'], [346, 'abcdefghijklmnopqrtsuvwxyz']])\n with pytest.raises(ValueError):\n store.append('df_new', df_new)\n\n # min_itemsize on Series index (GH 11412)\n df = tm.makeMixedDataFrame().set_index('C')\n store.append('ss', df['B'], min_itemsize={'index': 4})\n tm.assert_series_equal(store.select('ss'), df['B'])\n\n # same as above, with data_columns=True\n store.append('ss2', df['B'], data_columns=True,\n min_itemsize={'index': 4})\n tm.assert_series_equal(store.select('ss2'), df['B'])\n\n # min_itemsize in index without appending (GH 10381)\n store.put('ss3', df, format='table',\n min_itemsize={'index': 6})\n # just make sure there is a longer string:\n df2 = df.copy().reset_index().assign(C='longer').set_index('C')\n store.append('ss3', df2)\n tm.assert_frame_equal(store.select('ss3'),\n pd.concat([df, df2]))\n\n # same as above, with a Series\n store.put('ss4', df['B'], format='table',\n min_itemsize={'index': 6})\n store.append('ss4', df2['B'])\n tm.assert_series_equal(store.select('ss4'),\n pd.concat([df['B'], df2['B']]))\n\n # with nans\n _maybe_remove(store, 'df')\n df = tm.makeTimeDataFrame()\n df['string'] = 'foo'\n df.loc[1:4, 'string'] = np.nan\n df['string2'] = 'bar'\n df.loc[4:8, 'string2'] = np.nan\n df['string3'] = 'bah'\n df.loc[1:, 'string3'] = np.nan\n store.append('df', df)\n result = store.select('df')\n tm.assert_frame_equal(result, df)\n\n with ensure_clean_store(self.path) as store:\n\n def check_col(key, name, size):\n assert getattr(store.get_storer(key)\n .table.description, name).itemsize, size\n\n df = DataFrame(dict(A='foo', B='bar'), index=range(10))\n\n # a min_itemsize that creates a data_column\n _maybe_remove(store, 'df')\n store.append('df', df, min_itemsize={'A': 200})\n check_col('df', 'A', 200)\n assert store.get_storer('df').data_columns == ['A']\n\n # a min_itemsize that creates a data_column2\n _maybe_remove(store, 'df')\n store.append('df', df, data_columns=['B'], min_itemsize={'A': 200})\n check_col('df', 'A', 200)\n assert store.get_storer('df').data_columns == ['B', 'A']\n\n # a min_itemsize that creates a data_column2\n _maybe_remove(store, 'df')\n store.append('df', df, data_columns=[\n 'B'], min_itemsize={'values': 200})\n check_col('df', 'B', 200)\n check_col('df', 'values_block_0', 200)\n assert store.get_storer('df').data_columns == ['B']\n\n # infer the .typ on subsequent appends\n _maybe_remove(store, 'df')\n store.append('df', df[:5], min_itemsize=200)\n store.append('df', df[5:], min_itemsize=200)\n tm.assert_frame_equal(store['df'], df)\n\n # invalid min_itemsize keys\n df = DataFrame(['foo', 'foo', 'foo', 'barh',\n 'barh', 'barh'], columns=['A'])\n _maybe_remove(store, 'df')\n with pytest.raises(ValueError):\n store.append('df', df, min_itemsize={'foo': 20, 'foobar': 20})\n\n def test_append_with_empty_string(self):\n\n with ensure_clean_store(self.path) as store:\n\n # with all empty strings (GH 12242)\n df = DataFrame({'x': ['a', 'b', 'c', 'd', 'e', 'f', '']})\n store.append('df', df[:-1], min_itemsize={'x': 1})\n store.append('df', df[-1:], min_itemsize={'x': 1})\n tm.assert_frame_equal(store.select('df'), df)\n\n def test_to_hdf_with_min_itemsize(self):\n\n with ensure_clean_path(self.path) as path:\n\n # min_itemsize in index with to_hdf (GH 10381)\n df = tm.makeMixedDataFrame().set_index('C')\n df.to_hdf(path, 'ss3', format='table', min_itemsize={'index': 6})\n # just make sure there is a longer string:\n df2 = df.copy().reset_index().assign(C='longer').set_index('C')\n df2.to_hdf(path, 'ss3', append=True, format='table')\n tm.assert_frame_equal(pd.read_hdf(path, 'ss3'),\n pd.concat([df, df2]))\n\n # same as above, with a Series\n df['B'].to_hdf(path, 'ss4', format='table',\n min_itemsize={'index': 6})\n df2['B'].to_hdf(path, 'ss4', append=True, format='table')\n tm.assert_series_equal(pd.read_hdf(path, 'ss4'),\n pd.concat([df['B'], df2['B']]))\n\n @pytest.mark.parametrize(\n \"format\",\n [pytest.param('fixed', marks=xfail_non_writeable),\n 'table'])\n def test_to_hdf_errors(self, format):\n\n data = ['\\ud800foo']\n ser = pd.Series(data, index=pd.Index(data))\n with ensure_clean_path(self.path) as path:\n # GH 20835\n ser.to_hdf(path, 'table', format=format, errors='surrogatepass')\n\n result = pd.read_hdf(path, 'table', errors='surrogatepass')\n tm.assert_series_equal(result, ser)\n\n def test_append_with_data_columns(self):\n\n with ensure_clean_store(self.path) as store:\n df = tm.makeTimeDataFrame()\n df.iloc[0, df.columns.get_loc('B')] = 1.\n _maybe_remove(store, 'df')\n store.append('df', df[:2], data_columns=['B'])\n store.append('df', df[2:])\n tm.assert_frame_equal(store['df'], df)\n\n # check that we have indices created\n assert(store._handle.root.df.table.cols.index.is_indexed is True)\n assert(store._handle.root.df.table.cols.B.is_indexed is True)\n\n # data column searching\n result = store.select('df', 'B>0')\n expected = df[df.B > 0]\n tm.assert_frame_equal(result, expected)\n\n # data column searching (with an indexable and a data_columns)\n result = store.select(\n 'df', 'B>0 and index>df.index[3]')\n df_new = df.reindex(index=df.index[4:])\n expected = df_new[df_new.B > 0]\n tm.assert_frame_equal(result, expected)\n\n # data column selection with a string data_column\n df_new = df.copy()\n df_new['string'] = 'foo'\n df_new.loc[1:4, 'string'] = np.nan\n df_new.loc[5:6, 'string'] = 'bar'\n _maybe_remove(store, 'df')\n store.append('df', df_new, data_columns=['string'])\n result = store.select('df', \"string='foo'\")\n expected = df_new[df_new.string == 'foo']\n tm.assert_frame_equal(result, expected)\n\n # using min_itemsize and a data column\n def check_col(key, name, size):\n assert getattr(store.get_storer(key)\n .table.description, name).itemsize == size\n\n with ensure_clean_store(self.path) as store:\n _maybe_remove(store, 'df')\n store.append('df', df_new, data_columns=['string'],\n min_itemsize={'string': 30})\n check_col('df', 'string', 30)\n _maybe_remove(store, 'df')\n store.append(\n 'df', df_new, data_columns=['string'], min_itemsize=30)\n check_col('df', 'string', 30)\n _maybe_remove(store, 'df')\n store.append('df', df_new, data_columns=['string'],\n min_itemsize={'values': 30})\n check_col('df', 'string', 30)\n\n with ensure_clean_store(self.path) as store:\n df_new['string2'] = 'foobarbah'\n df_new['string_block1'] = 'foobarbah1'\n df_new['string_block2'] = 'foobarbah2'\n _maybe_remove(store, 'df')\n store.append('df', df_new, data_columns=['string', 'string2'],\n min_itemsize={'string': 30, 'string2': 40,\n 'values': 50})\n check_col('df', 'string', 30)\n check_col('df', 'string2', 40)\n check_col('df', 'values_block_1', 50)\n\n with ensure_clean_store(self.path) as store:\n # multiple data columns\n df_new = df.copy()\n df_new.iloc[0, df_new.columns.get_loc('A')] = 1.\n df_new.iloc[0, df_new.columns.get_loc('B')] = -1.\n df_new['string'] = 'foo'\n\n sl = df_new.columns.get_loc('string')\n df_new.iloc[1:4, sl] = np.nan\n df_new.iloc[5:6, sl] = 'bar'\n\n df_new['string2'] = 'foo'\n sl = df_new.columns.get_loc('string2')\n df_new.iloc[2:5, sl] = np.nan\n df_new.iloc[7:8, sl] = 'bar'\n _maybe_remove(store, 'df')\n store.append(\n 'df', df_new, data_columns=['A', 'B', 'string', 'string2'])\n result = store.select('df',\n \"string='foo' and string2='foo'\"\n \" and A>0 and B<0\")\n expected = df_new[(df_new.string == 'foo') & (\n df_new.string2 == 'foo') & (df_new.A > 0) & (df_new.B < 0)]\n tm.assert_frame_equal(result, expected, check_index_type=False)\n\n # yield an empty frame\n result = store.select('df', \"string='foo' and string2='cool'\")\n expected = df_new[(df_new.string == 'foo') & (\n df_new.string2 == 'cool')]\n tm.assert_frame_equal(result, expected, check_index_type=False)\n\n with ensure_clean_store(self.path) as store:\n # doc example\n df_dc = df.copy()\n df_dc['string'] = 'foo'\n df_dc.loc[4:6, 'string'] = np.nan\n df_dc.loc[7:9, 'string'] = 'bar'\n df_dc['string2'] = 'cool'\n df_dc['datetime'] = Timestamp('20010102')\n df_dc = df_dc._convert(datetime=True)\n df_dc.loc[3:5, ['A', 'B', 'datetime']] = np.nan\n\n _maybe_remove(store, 'df_dc')\n store.append('df_dc', df_dc,\n data_columns=['B', 'C', 'string',\n 'string2', 'datetime'])\n result = store.select('df_dc', 'B>0')\n\n expected = df_dc[df_dc.B > 0]\n tm.assert_frame_equal(result, expected, check_index_type=False)\n\n result = store.select(\n 'df_dc', ['B > 0', 'C > 0', 'string == foo'])\n expected = df_dc[(df_dc.B > 0) & (df_dc.C > 0) & (\n df_dc.string == 'foo')]\n tm.assert_frame_equal(result, expected, check_index_type=False)\n\n with ensure_clean_store(self.path) as store:\n # doc example part 2\n np.random.seed(1234)\n index = date_range('1/1/2000', periods=8)\n df_dc = DataFrame(np.random.randn(8, 3), index=index,\n columns=['A', 'B', 'C'])\n df_dc['string'] = 'foo'\n df_dc.loc[4:6, 'string'] = np.nan\n df_dc.loc[7:9, 'string'] = 'bar'\n df_dc.loc[:, ['B', 'C']] = df_dc.loc[:, ['B', 'C']].abs()\n df_dc['string2'] = 'cool'\n\n # on-disk operations\n store.append('df_dc', df_dc, data_columns=[\n 'B', 'C', 'string', 'string2'])\n\n result = store.select('df_dc', 'B>0')\n expected = df_dc[df_dc.B > 0]\n tm.assert_frame_equal(result, expected)\n\n result = store.select(\n 'df_dc', ['B > 0', 'C > 0', 'string == \"foo\"'])\n expected = df_dc[(df_dc.B > 0) & (df_dc.C > 0) &\n (df_dc.string == 'foo')]\n tm.assert_frame_equal(result, expected)\n\n def test_create_table_index(self):\n\n with ensure_clean_store(self.path) as store:\n\n with catch_warnings(record=True):\n def col(t, column):\n return getattr(store.get_storer(t).table.cols, column)\n\n # data columns\n df = tm.makeTimeDataFrame()\n df['string'] = 'foo'\n df['string2'] = 'bar'\n store.append('f', df, data_columns=['string', 'string2'])\n assert(col('f', 'index').is_indexed is True)\n assert(col('f', 'string').is_indexed is True)\n assert(col('f', 'string2').is_indexed is True)\n\n # specify index=columns\n store.append(\n 'f2', df, index=['string'],\n data_columns=['string', 'string2'])\n assert(col('f2', 'index').is_indexed is False)\n assert(col('f2', 'string').is_indexed is True)\n assert(col('f2', 'string2').is_indexed is False)\n\n # try to index a non-table\n _maybe_remove(store, 'f2')\n store.put('f2', df)\n with pytest.raises(TypeError):\n store.create_table_index('f2')\n\n def test_append_hierarchical(self):\n index = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'],\n ['one', 'two', 'three']],\n codes=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3],\n [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],\n names=['foo', 'bar'])\n df = DataFrame(np.random.randn(10, 3), index=index,\n columns=['A', 'B', 'C'])\n\n with ensure_clean_store(self.path) as store:\n store.append('mi', df)\n result = store.select('mi')\n tm.assert_frame_equal(result, df)\n\n # GH 3748\n result = store.select('mi', columns=['A', 'B'])\n expected = df.reindex(columns=['A', 'B'])\n tm.assert_frame_equal(result, expected)\n\n with ensure_clean_path('test.hdf') as path:\n df.to_hdf(path, 'df', format='table')\n result = read_hdf(path, 'df', columns=['A', 'B'])\n expected = df.reindex(columns=['A', 'B'])\n tm.assert_frame_equal(result, expected)\n\n def test_column_multiindex(self):\n # GH 4710\n # recreate multi-indexes properly\n\n index = MultiIndex.from_tuples([('A', 'a'), ('A', 'b'),\n ('B', 'a'), ('B', 'b')],\n names=['first', 'second'])\n df = DataFrame(np.arange(12).reshape(3, 4), columns=index)\n expected = df.copy()\n if isinstance(expected.index, RangeIndex):\n expected.index = Int64Index(expected.index)\n\n with ensure_clean_store(self.path) as store:\n\n store.put('df', df)\n tm.assert_frame_equal(store['df'], expected,\n check_index_type=True,\n check_column_type=True)\n\n store.put('df1', df, format='table')\n tm.assert_frame_equal(store['df1'], expected,\n check_index_type=True,\n check_column_type=True)\n\n with pytest.raises(ValueError):\n store.put('df2', df, format='table', data_columns=['A'])\n with pytest.raises(ValueError):\n store.put('df3', df, format='table', data_columns=True)\n\n # appending multi-column on existing table (see GH 6167)\n with ensure_clean_store(self.path) as store:\n store.append('df2', df)\n store.append('df2', df)\n\n tm.assert_frame_equal(store['df2'], concat((df, df)))\n\n # non_index_axes name\n df = DataFrame(np.arange(12).reshape(3, 4),\n columns=Index(list('ABCD'), name='foo'))\n expected = df.copy()\n if isinstance(expected.index, RangeIndex):\n expected.index = Int64Index(expected.index)\n\n with ensure_clean_store(self.path) as store:\n\n store.put('df1', df, format='table')\n tm.assert_frame_equal(store['df1'], expected,\n check_index_type=True,\n check_column_type=True)\n\n def test_store_multiindex(self):\n\n # validate multi-index names\n # GH 5527\n with ensure_clean_store(self.path) as store:\n\n def make_index(names=None):\n return MultiIndex.from_tuples([(datetime.datetime(2013, 12, d),\n s, t)\n for d in range(1, 3)\n for s in range(2)\n for t in range(3)],\n names=names)\n\n # no names\n _maybe_remove(store, 'df')\n df = DataFrame(np.zeros((12, 2)), columns=[\n 'a', 'b'], index=make_index())\n store.append('df', df)\n tm.assert_frame_equal(store.select('df'), df)\n\n # partial names\n _maybe_remove(store, 'df')\n df = DataFrame(np.zeros((12, 2)), columns=[\n 'a', 'b'], index=make_index(['date', None, None]))\n store.append('df', df)\n tm.assert_frame_equal(store.select('df'), df)\n\n # series\n _maybe_remove(store, 's')\n s = Series(np.zeros(12), index=make_index(['date', None, None]))\n store.append('s', s)\n xp = Series(np.zeros(12), index=make_index(\n ['date', 'level_1', 'level_2']))\n tm.assert_series_equal(store.select('s'), xp)\n\n # dup with column\n _maybe_remove(store, 'df')\n df = DataFrame(np.zeros((12, 2)), columns=[\n 'a', 'b'], index=make_index(['date', 'a', 't']))\n with pytest.raises(ValueError):\n store.append('df', df)\n\n # dup within level\n _maybe_remove(store, 'df')\n df = DataFrame(np.zeros((12, 2)), columns=['a', 'b'],\n index=make_index(['date', 'date', 'date']))\n with pytest.raises(ValueError):\n store.append('df', df)\n\n # fully names\n _maybe_remove(store, 'df')\n df = DataFrame(np.zeros((12, 2)), columns=[\n 'a', 'b'], index=make_index(['date', 's', 't']))\n store.append('df', df)\n tm.assert_frame_equal(store.select('df'), df)\n\n def test_select_columns_in_where(self):\n\n # GH 6169\n # recreate multi-indexes when columns is passed\n # in the `where` argument\n index = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'],\n ['one', 'two', 'three']],\n codes=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3],\n [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],\n names=['foo_name', 'bar_name'])\n\n # With a DataFrame\n df = DataFrame(np.random.randn(10, 3), index=index,\n columns=['A', 'B', 'C'])\n\n with ensure_clean_store(self.path) as store:\n store.put('df', df, format='table')\n expected = df[['A']]\n\n tm.assert_frame_equal(store.select('df', columns=['A']), expected)\n\n tm.assert_frame_equal(store.select(\n 'df', where=\"columns=['A']\"), expected)\n\n # With a Series\n s = Series(np.random.randn(10), index=index,\n name='A')\n with ensure_clean_store(self.path) as store:\n store.put('s', s, format='table')\n tm.assert_series_equal(store.select('s', where=\"columns=['A']\"), s)\n\n def test_mi_data_columns(self):\n # GH 14435\n idx = pd.MultiIndex.from_arrays([date_range('2000-01-01', periods=5),\n range(5)], names=['date', 'id'])\n df = pd.DataFrame({'a': [1.1, 1.2, 1.3, 1.4, 1.5]}, index=idx)\n\n with ensure_clean_store(self.path) as store:\n store.append('df', df, data_columns=True)\n\n actual = store.select('df', where='id == 1')\n expected = df.iloc[[1], :]\n tm.assert_frame_equal(actual, expected)\n\n def test_pass_spec_to_storer(self):\n\n df = tm.makeDataFrame()\n\n with ensure_clean_store(self.path) as store:\n store.put('df', df)\n with pytest.raises(TypeError):\n store.select('df', columns=['A'])\n with pytest.raises(TypeError):\n store.select('df', where=[('columns=A')])\n\n @xfail_non_writeable\n def test_append_misc(self):\n\n with ensure_clean_store(self.path) as store:\n df = tm.makeDataFrame()\n store.append('df', df, chunksize=1)\n result = store.select('df')\n tm.assert_frame_equal(result, df)\n\n store.append('df1', df, expectedrows=10)\n result = store.select('df1')\n tm.assert_frame_equal(result, df)\n\n # more chunksize in append tests\n def check(obj, comparator):\n for c in [10, 200, 1000]:\n with ensure_clean_store(self.path, mode='w') as store:\n store.append('obj', obj, chunksize=c)\n result = store.select('obj')\n comparator(result, obj)\n\n df = tm.makeDataFrame()\n df['string'] = 'foo'\n df['float322'] = 1.\n df['float322'] = df['float322'].astype('float32')\n df['bool'] = df['float322'] > 0\n df['time1'] = Timestamp('20130101')\n df['time2'] = Timestamp('20130102')\n check(df, tm.assert_frame_equal)\n\n # empty frame, GH4273\n with ensure_clean_store(self.path) as store:\n\n # 0 len\n df_empty = DataFrame(columns=list('ABC'))\n store.append('df', df_empty)\n with pytest.raises(KeyError):\n store.select('df')\n\n # repeated append of 0/non-zero frames\n df = DataFrame(np.random.rand(10, 3), columns=list('ABC'))\n store.append('df', df)\n assert_frame_equal(store.select('df'), df)\n store.append('df', df_empty)\n assert_frame_equal(store.select('df'), df)\n\n # store\n df = DataFrame(columns=list('ABC'))\n store.put('df2', df)\n assert_frame_equal(store.select('df2'), df)\n\n def test_append_raise(self):\n\n with ensure_clean_store(self.path) as store:\n\n # test append with invalid input to get good error messages\n\n # list in column\n df = tm.makeDataFrame()\n df['invalid'] = [['a']] * len(df)\n assert df.dtypes['invalid'] == np.object_\n with pytest.raises(TypeError):\n store.append('df', df)\n\n # multiple invalid columns\n df['invalid2'] = [['a']] * len(df)\n df['invalid3'] = [['a']] * len(df)\n with pytest.raises(TypeError):\n store.append('df', df)\n\n # datetime with embedded nans as object\n df = tm.makeDataFrame()\n s = Series(datetime.datetime(2001, 1, 2), index=df.index)\n s = s.astype(object)\n s[0:5] = np.nan\n df['invalid'] = s\n assert df.dtypes['invalid'] == np.object_\n with pytest.raises(TypeError):\n store.append('df', df)\n\n # directly ndarray\n with pytest.raises(TypeError):\n store.append('df', np.arange(10))\n\n # series directly\n with pytest.raises(TypeError):\n store.append('df', Series(np.arange(10)))\n\n # appending an incompatible table\n df = tm.makeDataFrame()\n store.append('df', df)\n\n df['foo'] = 'foo'\n with pytest.raises(ValueError):\n store.append('df', df)\n\n def test_table_index_incompatible_dtypes(self):\n df1 = DataFrame({'a': [1, 2, 3]})\n df2 = DataFrame({'a': [4, 5, 6]},\n index=date_range('1/1/2000', periods=3))\n\n with ensure_clean_store(self.path) as store:\n store.put('frame', df1, format='table')\n with pytest.raises(TypeError):\n store.put('frame', df2, format='table', append=True)\n\n def test_table_values_dtypes_roundtrip(self):\n\n with ensure_clean_store(self.path) as store:\n df1 = DataFrame({'a': [1, 2, 3]}, dtype='f8')\n store.append('df_f8', df1)\n assert_series_equal(df1.dtypes, store['df_f8'].dtypes)\n\n df2 = DataFrame({'a': [1, 2, 3]}, dtype='i8')\n store.append('df_i8', df2)\n assert_series_equal(df2.dtypes, store['df_i8'].dtypes)\n\n # incompatible dtype\n with pytest.raises(ValueError):\n store.append('df_i8', df1)\n\n # check creation/storage/retrieval of float32 (a bit hacky to\n # actually create them thought)\n df1 = DataFrame(\n np.array([[1], [2], [3]], dtype='f4'), columns=['A'])\n store.append('df_f4', df1)\n assert_series_equal(df1.dtypes, store['df_f4'].dtypes)\n assert df1.dtypes[0] == 'float32'\n\n # check with mixed dtypes\n df1 = DataFrame({c: Series(np.random.randint(5), dtype=c)\n for c in ['float32', 'float64', 'int32',\n 'int64', 'int16', 'int8']})\n df1['string'] = 'foo'\n df1['float322'] = 1.\n df1['float322'] = df1['float322'].astype('float32')\n df1['bool'] = df1['float32'] > 0\n df1['time1'] = Timestamp('20130101')\n df1['time2'] = Timestamp('20130102')\n\n store.append('df_mixed_dtypes1', df1)\n result = store.select('df_mixed_dtypes1').get_dtype_counts()\n expected = Series({'float32': 2, 'float64': 1, 'int32': 1,\n 'bool': 1, 'int16': 1, 'int8': 1,\n 'int64': 1, 'object': 1, 'datetime64[ns]': 2})\n result = result.sort_index()\n expected = expected.sort_index()\n tm.assert_series_equal(result, expected)\n\n def test_table_mixed_dtypes(self):\n\n # frame\n df = tm.makeDataFrame()\n df['obj1'] = 'foo'\n df['obj2'] = 'bar'\n df['bool1'] = df['A'] > 0\n df['bool2'] = df['B'] > 0\n df['bool3'] = True\n df['int1'] = 1\n df['int2'] = 2\n df['timestamp1'] = Timestamp('20010102')\n df['timestamp2'] = Timestamp('20010103')\n df['datetime1'] = datetime.datetime(2001, 1, 2, 0, 0)\n df['datetime2'] = datetime.datetime(2001, 1, 3, 0, 0)\n df.loc[3:6, ['obj1']] = np.nan\n df = df._consolidate()._convert(datetime=True)\n\n with ensure_clean_store(self.path) as store:\n store.append('df1_mixed', df)\n tm.assert_frame_equal(store.select('df1_mixed'), df)\n\n def test_unimplemented_dtypes_table_columns(self):\n\n with ensure_clean_store(self.path) as store:\n\n dtypes = [('date', datetime.date(2001, 1, 2))]\n\n # currently not supported dtypes ####\n for n, f in dtypes:\n df = tm.makeDataFrame()\n df[n] = f\n with pytest.raises(TypeError):\n store.append('df1_%s' % n, df)\n\n # frame\n df = tm.makeDataFrame()\n df['obj1'] = 'foo'\n df['obj2'] = 'bar'\n df['datetime1'] = datetime.date(2001, 1, 2)\n df = df._consolidate()._convert(datetime=True)\n\n with ensure_clean_store(self.path) as store:\n # this fails because we have a date in the object block......\n with pytest.raises(TypeError):\n store.append('df_unimplemented', df)\n\n @xfail_non_writeable\n @pytest.mark.skipif(\n LooseVersion(np.__version__) == LooseVersion('1.15.0'),\n reason=(\"Skipping pytables test when numpy version is \"\n \"exactly equal to 1.15.0: gh-22098\"))\n def test_calendar_roundtrip_issue(self):\n\n # 8591\n # doc example from tseries holiday section\n weekmask_egypt = 'Sun Mon Tue Wed Thu'\n holidays = ['2012-05-01',\n datetime.datetime(2013, 5, 1), np.datetime64('2014-05-01')]\n bday_egypt = pd.offsets.CustomBusinessDay(\n holidays=holidays, weekmask=weekmask_egypt)\n dt = datetime.datetime(2013, 4, 30)\n dts = date_range(dt, periods=5, freq=bday_egypt)\n\n s = (Series(dts.weekday, dts).map(\n Series('Mon Tue Wed Thu Fri Sat Sun'.split())))\n\n with ensure_clean_store(self.path) as store:\n\n store.put('fixed', s)\n result = store.select('fixed')\n assert_series_equal(result, s)\n\n store.append('table', s)\n result = store.select('table')\n assert_series_equal(result, s)\n\n def test_roundtrip_tz_aware_index(self):\n # GH 17618\n time = pd.Timestamp('2000-01-01 01:00:00', tz='US/Eastern')\n df = pd.DataFrame(data=[0], index=[time])\n\n with ensure_clean_store(self.path) as store:\n store.put('frame', df, format='fixed')\n recons = store['frame']\n tm.assert_frame_equal(recons, df)\n assert recons.index[0].value == 946706400000000000\n\n def test_append_with_timedelta(self):\n # GH 3577\n # append timedelta\n\n df = DataFrame(dict(A=Timestamp('20130101'), B=[Timestamp(\n '20130101') + timedelta(days=i, seconds=10) for i in range(10)]))\n df['C'] = df['A'] - df['B']\n df.loc[3:5, 'C'] = np.nan\n\n with ensure_clean_store(self.path) as store:\n\n # table\n _maybe_remove(store, 'df')\n store.append('df', df, data_columns=True)\n result = store.select('df')\n assert_frame_equal(result, df)\n\n result = store.select('df', where=\"C<100000\")\n assert_frame_equal(result, df)\n\n result = store.select('df', where=\"C<pd.Timedelta('-3D')\")\n assert_frame_equal(result, df.iloc[3:])\n\n result = store.select('df', \"C<'-3D'\")\n assert_frame_equal(result, df.iloc[3:])\n\n # a bit hacky here as we don't really deal with the NaT properly\n\n result = store.select('df', \"C<'-500000s'\")\n result = result.dropna(subset=['C'])\n assert_frame_equal(result, df.iloc[6:])\n\n result = store.select('df', \"C<'-3.5D'\")\n result = result.iloc[1:]\n assert_frame_equal(result, df.iloc[4:])\n\n # fixed\n _maybe_remove(store, 'df2')\n store.put('df2', df)\n result = store.select('df2')\n assert_frame_equal(result, df)\n\n def test_remove(self):\n\n with ensure_clean_store(self.path) as store:\n\n ts = tm.makeTimeSeries()\n df = tm.makeDataFrame()\n store['a'] = ts\n store['b'] = df\n _maybe_remove(store, 'a')\n assert len(store) == 1\n tm.assert_frame_equal(df, store['b'])\n\n _maybe_remove(store, 'b')\n assert len(store) == 0\n\n # nonexistence\n with pytest.raises(KeyError):\n store.remove('a_nonexistent_store')\n\n # pathing\n store['a'] = ts\n store['b/foo'] = df\n _maybe_remove(store, 'foo')\n _maybe_remove(store, 'b/foo')\n assert len(store) == 1\n\n store['a'] = ts\n store['b/foo'] = df\n _maybe_remove(store, 'b')\n assert len(store) == 1\n\n # __delitem__\n store['a'] = ts\n store['b'] = df\n del store['a']\n del store['b']\n assert len(store) == 0\n\n def test_invalid_terms(self):\n\n with ensure_clean_store(self.path) as store:\n\n with catch_warnings(record=True):\n\n df = tm.makeTimeDataFrame()\n df['string'] = 'foo'\n df.loc[0:4, 'string'] = 'bar'\n\n store.put('df', df, format='table')\n\n # some invalid terms\n with pytest.raises(TypeError):\n Term()\n\n # more invalid\n with pytest.raises(ValueError):\n store.select('df', 'df.index[3]')\n\n with pytest.raises(SyntaxError):\n store.select('df', 'index>')\n\n # from the docs\n with ensure_clean_path(self.path) as path:\n dfq = DataFrame(np.random.randn(10, 4), columns=list(\n 'ABCD'), index=date_range('20130101', periods=10))\n dfq.to_hdf(path, 'dfq', format='table', data_columns=True)\n\n # check ok\n read_hdf(path, 'dfq',\n where=\"index>Timestamp('20130104') & columns=['A', 'B']\")\n read_hdf(path, 'dfq', where=\"A>0 or C>0\")\n\n # catch the invalid reference\n with ensure_clean_path(self.path) as path:\n dfq = DataFrame(np.random.randn(10, 4), columns=list(\n 'ABCD'), index=date_range('20130101', periods=10))\n dfq.to_hdf(path, 'dfq', format='table')\n\n with pytest.raises(ValueError):\n read_hdf(path, 'dfq', where=\"A>0 or C>0\")\n\n def test_same_name_scoping(self):\n\n with ensure_clean_store(self.path) as store:\n\n import pandas as pd\n df = DataFrame(np.random.randn(20, 2),\n index=pd.date_range('20130101', periods=20))\n store.put('df', df, format='table')\n expected = df[df.index > pd.Timestamp('20130105')]\n\n import datetime # noqa\n result = store.select('df', 'index>datetime.datetime(2013,1,5)')\n assert_frame_equal(result, expected)\n\n from datetime import datetime # noqa\n\n # technically an error, but allow it\n result = store.select('df', 'index>datetime.datetime(2013,1,5)')\n assert_frame_equal(result, expected)\n\n result = store.select('df', 'index>datetime(2013,1,5)')\n assert_frame_equal(result, expected)\n\n def test_series(self):\n\n s = tm.makeStringSeries()\n self._check_roundtrip(s, tm.assert_series_equal)\n\n ts = tm.makeTimeSeries()\n self._check_roundtrip(ts, tm.assert_series_equal)\n\n ts2 = Series(ts.index, Index(ts.index, dtype=object))\n self._check_roundtrip(ts2, tm.assert_series_equal)\n\n ts3 = Series(ts.values, Index(np.asarray(ts.index, dtype=object),\n dtype=object))\n self._check_roundtrip(ts3, tm.assert_series_equal,\n check_index_type=False)\n\n def test_sparse_series(self):\n\n s = tm.makeStringSeries()\n s.iloc[3:5] = np.nan\n ss = s.to_sparse()\n self._check_roundtrip(ss, tm.assert_series_equal,\n check_series_type=True)\n\n ss2 = s.to_sparse(kind='integer')\n self._check_roundtrip(ss2, tm.assert_series_equal,\n check_series_type=True)\n\n ss3 = s.to_sparse(fill_value=0)\n self._check_roundtrip(ss3, tm.assert_series_equal,\n check_series_type=True)\n\n def test_sparse_frame(self):\n\n s = tm.makeDataFrame()\n s.iloc[3:5, 1:3] = np.nan\n s.iloc[8:10, -2] = np.nan\n ss = s.to_sparse()\n\n self._check_double_roundtrip(ss, tm.assert_frame_equal,\n check_frame_type=True)\n\n ss2 = s.to_sparse(kind='integer')\n self._check_double_roundtrip(ss2, tm.assert_frame_equal,\n check_frame_type=True)\n\n ss3 = s.to_sparse(fill_value=0)\n self._check_double_roundtrip(ss3, tm.assert_frame_equal,\n check_frame_type=True)\n\n def test_float_index(self):\n\n # GH #454\n index = np.random.randn(10)\n s = Series(np.random.randn(10), index=index)\n self._check_roundtrip(s, tm.assert_series_equal)\n\n @xfail_non_writeable\n def test_tuple_index(self):\n\n # GH #492\n col = np.arange(10)\n idx = [(0., 1.), (2., 3.), (4., 5.)]\n data = np.random.randn(30).reshape((3, 10))\n DF = DataFrame(data, index=idx, columns=col)\n\n with catch_warnings(record=True):\n simplefilter(\"ignore\", pd.errors.PerformanceWarning)\n self._check_roundtrip(DF, tm.assert_frame_equal)\n\n @xfail_non_writeable\n @pytest.mark.filterwarnings(\"ignore::pandas.errors.PerformanceWarning\")\n def test_index_types(self):\n\n with catch_warnings(record=True):\n values = np.random.randn(2)\n\n func = lambda l, r: tm.assert_series_equal(l, r,\n check_dtype=True,\n check_index_type=True,\n check_series_type=True)\n\n with catch_warnings(record=True):\n ser = Series(values, [0, 'y'])\n self._check_roundtrip(ser, func)\n\n with catch_warnings(record=True):\n ser = Series(values, [datetime.datetime.today(), 0])\n self._check_roundtrip(ser, func)\n\n with catch_warnings(record=True):\n ser = Series(values, ['y', 0])\n self._check_roundtrip(ser, func)\n\n with catch_warnings(record=True):\n ser = Series(values, [datetime.date.today(), 'a'])\n self._check_roundtrip(ser, func)\n\n with catch_warnings(record=True):\n\n ser = Series(values, [0, 'y'])\n self._check_roundtrip(ser, func)\n\n ser = Series(values, [datetime.datetime.today(), 0])\n self._check_roundtrip(ser, func)\n\n ser = Series(values, ['y', 0])\n self._check_roundtrip(ser, func)\n\n ser = Series(values, [datetime.date.today(), 'a'])\n self._check_roundtrip(ser, func)\n\n ser = Series(values, [1.23, 'b'])\n self._check_roundtrip(ser, func)\n\n ser = Series(values, [1, 1.53])\n self._check_roundtrip(ser, func)\n\n ser = Series(values, [1, 5])\n self._check_roundtrip(ser, func)\n\n ser = Series(values, [datetime.datetime(\n 2012, 1, 1), datetime.datetime(2012, 1, 2)])\n self._check_roundtrip(ser, func)\n\n def test_timeseries_preepoch(self):\n\n dr = bdate_range('1/1/1940', '1/1/1960')\n ts = Series(np.random.randn(len(dr)), index=dr)\n try:\n self._check_roundtrip(ts, tm.assert_series_equal)\n except OverflowError:\n pytest.skip('known failer on some windows platforms')\n\n @xfail_non_writeable\n @pytest.mark.parametrize(\"compression\", [\n False, pytest.param(True, marks=td.skip_if_windows_python_3)\n ])\n def test_frame(self, compression):\n\n df = tm.makeDataFrame()\n\n # put in some random NAs\n df.values[0, 0] = np.nan\n df.values[5, 3] = np.nan\n\n self._check_roundtrip_table(df, tm.assert_frame_equal,\n compression=compression)\n self._check_roundtrip(df, tm.assert_frame_equal,\n compression=compression)\n\n tdf = tm.makeTimeDataFrame()\n self._check_roundtrip(tdf, tm.assert_frame_equal,\n compression=compression)\n\n with ensure_clean_store(self.path) as store:\n # not consolidated\n df['foo'] = np.random.randn(len(df))\n store['df'] = df\n recons = store['df']\n assert recons._data.is_consolidated()\n\n # empty\n self._check_roundtrip(df[:0], tm.assert_frame_equal)\n\n @xfail_non_writeable\n def test_empty_series_frame(self):\n s0 = Series()\n s1 = Series(name='myseries')\n df0 = DataFrame()\n df1 = DataFrame(index=['a', 'b', 'c'])\n df2 = DataFrame(columns=['d', 'e', 'f'])\n\n self._check_roundtrip(s0, tm.assert_series_equal)\n self._check_roundtrip(s1, tm.assert_series_equal)\n self._check_roundtrip(df0, tm.assert_frame_equal)\n self._check_roundtrip(df1, tm.assert_frame_equal)\n self._check_roundtrip(df2, tm.assert_frame_equal)\n\n @xfail_non_writeable\n @pytest.mark.parametrize(\n 'dtype', [np.int64, np.float64, np.object, 'm8[ns]', 'M8[ns]'])\n def test_empty_series(self, dtype):\n s = Series(dtype=dtype)\n self._check_roundtrip(s, tm.assert_series_equal)\n\n def test_can_serialize_dates(self):\n\n rng = [x.date() for x in bdate_range('1/1/2000', '1/30/2000')]\n frame = DataFrame(np.random.randn(len(rng), 4), index=rng)\n\n self._check_roundtrip(frame, tm.assert_frame_equal)\n\n def test_store_hierarchical(self):\n index = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'],\n ['one', 'two', 'three']],\n codes=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3],\n [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],\n names=['foo', 'bar'])\n frame = DataFrame(np.random.randn(10, 3), index=index,\n columns=['A', 'B', 'C'])\n\n self._check_roundtrip(frame, tm.assert_frame_equal)\n self._check_roundtrip(frame.T, tm.assert_frame_equal)\n self._check_roundtrip(frame['A'], tm.assert_series_equal)\n\n # check that the names are stored\n with ensure_clean_store(self.path) as store:\n store['frame'] = frame\n recons = store['frame']\n tm.assert_frame_equal(recons, frame)\n\n def test_store_index_name(self):\n df = tm.makeDataFrame()\n df.index.name = 'foo'\n\n with ensure_clean_store(self.path) as store:\n store['frame'] = df\n recons = store['frame']\n tm.assert_frame_equal(recons, df)\n\n def test_store_index_name_with_tz(self):\n # GH 13884\n df = pd.DataFrame({'A': [1, 2]})\n df.index = pd.DatetimeIndex([1234567890123456787, 1234567890123456788])\n df.index = df.index.tz_localize('UTC')\n df.index.name = 'foo'\n\n with ensure_clean_store(self.path) as store:\n store.put('frame', df, format='table')\n recons = store['frame']\n tm.assert_frame_equal(recons, df)\n\n @pytest.mark.parametrize('table_format', ['table', 'fixed'])\n def test_store_index_name_numpy_str(self, table_format):\n # GH #13492\n idx = pd.Index(pd.to_datetime([datetime.date(2000, 1, 1),\n datetime.date(2000, 1, 2)]),\n name='cols\\u05d2')\n idx1 = pd.Index(pd.to_datetime([datetime.date(2010, 1, 1),\n datetime.date(2010, 1, 2)]),\n name='rows\\u05d0')\n df = pd.DataFrame(np.arange(4).reshape(2, 2), columns=idx, index=idx1)\n\n # This used to fail, returning numpy strings instead of python strings.\n with ensure_clean_path(self.path) as path:\n df.to_hdf(path, 'df', format=table_format)\n df2 = read_hdf(path, 'df')\n\n assert_frame_equal(df, df2, check_names=True)\n\n assert type(df2.index.name) == str\n assert type(df2.columns.name) == str\n\n def test_store_series_name(self):\n df = tm.makeDataFrame()\n series = df['A']\n\n with ensure_clean_store(self.path) as store:\n store['series'] = series\n recons = store['series']\n tm.assert_series_equal(recons, series)\n\n @xfail_non_writeable\n @pytest.mark.parametrize(\"compression\", [\n False, pytest.param(True, marks=td.skip_if_windows_python_3)\n ])\n def test_store_mixed(self, compression):\n\n def _make_one():\n df = tm.makeDataFrame()\n df['obj1'] = 'foo'\n df['obj2'] = 'bar'\n df['bool1'] = df['A'] > 0\n df['bool2'] = df['B'] > 0\n df['int1'] = 1\n df['int2'] = 2\n return df._consolidate()\n\n df1 = _make_one()\n df2 = _make_one()\n\n self._check_roundtrip(df1, tm.assert_frame_equal)\n self._check_roundtrip(df2, tm.assert_frame_equal)\n\n with ensure_clean_store(self.path) as store:\n store['obj'] = df1\n tm.assert_frame_equal(store['obj'], df1)\n store['obj'] = df2\n tm.assert_frame_equal(store['obj'], df2)\n\n # check that can store Series of all of these types\n self._check_roundtrip(df1['obj1'], tm.assert_series_equal,\n compression=compression)\n self._check_roundtrip(df1['bool1'], tm.assert_series_equal,\n compression=compression)\n self._check_roundtrip(df1['int1'], tm.assert_series_equal,\n compression=compression)\n\n @pytest.mark.filterwarnings(\n \"ignore:\\\\nduplicate:pandas.io.pytables.DuplicateWarning\"\n )\n def test_select_with_dups(self):\n\n # single dtypes\n df = DataFrame(np.random.randn(10, 4), columns=['A', 'A', 'B', 'B'])\n df.index = date_range('20130101 9:30', periods=10, freq='T')\n\n with ensure_clean_store(self.path) as store:\n store.append('df', df)\n\n result = store.select('df')\n expected = df\n assert_frame_equal(result, expected, by_blocks=True)\n\n result = store.select('df', columns=df.columns)\n expected = df\n assert_frame_equal(result, expected, by_blocks=True)\n\n result = store.select('df', columns=['A'])\n expected = df.loc[:, ['A']]\n assert_frame_equal(result, expected)\n\n # dups across dtypes\n df = concat([DataFrame(np.random.randn(10, 4),\n columns=['A', 'A', 'B', 'B']),\n DataFrame(np.random.randint(0, 10, size=20)\n .reshape(10, 2),\n columns=['A', 'C'])],\n axis=1)\n df.index = date_range('20130101 9:30', periods=10, freq='T')\n\n with ensure_clean_store(self.path) as store:\n store.append('df', df)\n\n result = store.select('df')\n expected = df\n assert_frame_equal(result, expected, by_blocks=True)\n\n result = store.select('df', columns=df.columns)\n expected = df\n assert_frame_equal(result, expected, by_blocks=True)\n\n expected = df.loc[:, ['A']]\n result = store.select('df', columns=['A'])\n assert_frame_equal(result, expected, by_blocks=True)\n\n expected = df.loc[:, ['B', 'A']]\n result = store.select('df', columns=['B', 'A'])\n assert_frame_equal(result, expected, by_blocks=True)\n\n # duplicates on both index and columns\n with ensure_clean_store(self.path) as store:\n store.append('df', df)\n store.append('df', df)\n\n expected = df.loc[:, ['B', 'A']]\n expected = concat([expected, expected])\n result = store.select('df', columns=['B', 'A'])\n assert_frame_equal(result, expected, by_blocks=True)\n\n def test_overwrite_node(self):\n\n with ensure_clean_store(self.path) as store:\n store['a'] = tm.makeTimeDataFrame()\n ts = tm.makeTimeSeries()\n store['a'] = ts\n\n tm.assert_series_equal(store['a'], ts)\n\n def test_sparse_with_compression(self):\n\n # GH 2931\n\n # make sparse dataframe\n arr = np.random.binomial(n=1, p=.01, size=(1000, 10))\n df = DataFrame(arr).to_sparse(fill_value=0)\n\n # case 1: store uncompressed\n self._check_double_roundtrip(df, tm.assert_frame_equal,\n compression=False,\n check_frame_type=True)\n\n # case 2: store compressed (works)\n self._check_double_roundtrip(df, tm.assert_frame_equal,\n compression='zlib',\n check_frame_type=True)\n\n # set one series to be completely sparse\n df[0] = np.zeros(1000)\n\n # case 3: store df with completely sparse series uncompressed\n self._check_double_roundtrip(df, tm.assert_frame_equal,\n compression=False,\n check_frame_type=True)\n\n # case 4: try storing df with completely sparse series compressed\n # (fails)\n self._check_double_roundtrip(df, tm.assert_frame_equal,\n compression='zlib',\n check_frame_type=True)\n\n def test_select(self):\n\n with ensure_clean_store(self.path) as store:\n\n with catch_warnings(record=True):\n\n # select with columns=\n df = tm.makeTimeDataFrame()\n _maybe_remove(store, 'df')\n store.append('df', df)\n result = store.select('df', columns=['A', 'B'])\n expected = df.reindex(columns=['A', 'B'])\n tm.assert_frame_equal(expected, result)\n\n # equivalentsly\n result = store.select('df', [(\"columns=['A', 'B']\")])\n expected = df.reindex(columns=['A', 'B'])\n tm.assert_frame_equal(expected, result)\n\n # with a data column\n _maybe_remove(store, 'df')\n store.append('df', df, data_columns=['A'])\n result = store.select('df', ['A > 0'], columns=['A', 'B'])\n expected = df[df.A > 0].reindex(columns=['A', 'B'])\n tm.assert_frame_equal(expected, result)\n\n # all a data columns\n _maybe_remove(store, 'df')\n store.append('df', df, data_columns=True)\n result = store.select('df', ['A > 0'], columns=['A', 'B'])\n expected = df[df.A > 0].reindex(columns=['A', 'B'])\n tm.assert_frame_equal(expected, result)\n\n # with a data column, but different columns\n _maybe_remove(store, 'df')\n store.append('df', df, data_columns=['A'])\n result = store.select('df', ['A > 0'], columns=['C', 'D'])\n expected = df[df.A > 0].reindex(columns=['C', 'D'])\n tm.assert_frame_equal(expected, result)\n\n def test_select_dtypes(self):\n\n with ensure_clean_store(self.path) as store:\n # with a Timestamp data column (GH #2637)\n df = DataFrame(dict(\n ts=bdate_range('2012-01-01', periods=300),\n A=np.random.randn(300)))\n _maybe_remove(store, 'df')\n store.append('df', df, data_columns=['ts', 'A'])\n\n result = store.select('df', \"ts>=Timestamp('2012-02-01')\")\n expected = df[df.ts >= Timestamp('2012-02-01')]\n tm.assert_frame_equal(expected, result)\n\n # bool columns (GH #2849)\n df = DataFrame(np.random.randn(5, 2), columns=['A', 'B'])\n df['object'] = 'foo'\n df.loc[4:5, 'object'] = 'bar'\n df['boolv'] = df['A'] > 0\n _maybe_remove(store, 'df')\n store.append('df', df, data_columns=True)\n\n expected = (df[df.boolv == True] # noqa\n .reindex(columns=['A', 'boolv']))\n for v in [True, 'true', 1]:\n result = store.select('df', 'boolv == %s' % str(v),\n columns=['A', 'boolv'])\n tm.assert_frame_equal(expected, result)\n\n expected = (df[df.boolv == False] # noqa\n .reindex(columns=['A', 'boolv']))\n for v in [False, 'false', 0]:\n result = store.select(\n 'df', 'boolv == %s' % str(v), columns=['A', 'boolv'])\n tm.assert_frame_equal(expected, result)\n\n # integer index\n df = DataFrame(dict(A=np.random.rand(20), B=np.random.rand(20)))\n _maybe_remove(store, 'df_int')\n store.append('df_int', df)\n result = store.select(\n 'df_int', \"index<10 and columns=['A']\")\n expected = df.reindex(index=list(df.index)[0:10], columns=['A'])\n tm.assert_frame_equal(expected, result)\n\n # float index\n df = DataFrame(dict(A=np.random.rand(\n 20), B=np.random.rand(20), index=np.arange(20, dtype='f8')))\n _maybe_remove(store, 'df_float')\n store.append('df_float', df)\n result = store.select(\n 'df_float', \"index<10.0 and columns=['A']\")\n expected = df.reindex(index=list(df.index)[0:10], columns=['A'])\n tm.assert_frame_equal(expected, result)\n\n with ensure_clean_store(self.path) as store:\n\n # floats w/o NaN\n df = DataFrame(\n dict(cols=range(11), values=range(11)), dtype='float64')\n df['cols'] = (df['cols'] + 10).apply(str)\n\n store.append('df1', df, data_columns=True)\n result = store.select(\n 'df1', where='values>2.0')\n expected = df[df['values'] > 2.0]\n tm.assert_frame_equal(expected, result)\n\n # floats with NaN\n df.iloc[0] = np.nan\n expected = df[df['values'] > 2.0]\n\n store.append('df2', df, data_columns=True, index=False)\n result = store.select(\n 'df2', where='values>2.0')\n tm.assert_frame_equal(expected, result)\n\n # https://github.com/PyTables/PyTables/issues/282\n # bug in selection when 0th row has a np.nan and an index\n # store.append('df3',df,data_columns=True)\n # result = store.select(\n # 'df3', where='values>2.0')\n # tm.assert_frame_equal(expected, result)\n\n # not in first position float with NaN ok too\n df = DataFrame(\n dict(cols=range(11), values=range(11)), dtype='float64')\n df['cols'] = (df['cols'] + 10).apply(str)\n\n df.iloc[1] = np.nan\n expected = df[df['values'] > 2.0]\n\n store.append('df4', df, data_columns=True)\n result = store.select(\n 'df4', where='values>2.0')\n tm.assert_frame_equal(expected, result)\n\n # test selection with comparison against numpy scalar\n # GH 11283\n with ensure_clean_store(self.path) as store:\n df = tm.makeDataFrame()\n\n expected = df[df['A'] > 0]\n\n store.append('df', df, data_columns=True)\n np_zero = np.float64(0) # noqa\n result = store.select('df', where=[\"A>np_zero\"])\n tm.assert_frame_equal(expected, result)\n\n def test_select_with_many_inputs(self):\n\n with ensure_clean_store(self.path) as store:\n\n df = DataFrame(dict(ts=bdate_range('2012-01-01', periods=300),\n A=np.random.randn(300),\n B=range(300),\n users=['a'] * 50 + ['b'] * 50 + ['c'] * 100 +\n ['a%03d' % i for i in range(100)]))\n _maybe_remove(store, 'df')\n store.append('df', df, data_columns=['ts', 'A', 'B', 'users'])\n\n # regular select\n result = store.select('df', \"ts>=Timestamp('2012-02-01')\")\n expected = df[df.ts >= Timestamp('2012-02-01')]\n tm.assert_frame_equal(expected, result)\n\n # small selector\n result = store.select(\n 'df',\n \"ts>=Timestamp('2012-02-01') & users=['a','b','c']\")\n expected = df[(df.ts >= Timestamp('2012-02-01')) &\n df.users.isin(['a', 'b', 'c'])]\n tm.assert_frame_equal(expected, result)\n\n # big selector along the columns\n selector = ['a', 'b', 'c'] + ['a%03d' % i for i in range(60)]\n result = store.select(\n 'df',\n \"ts>=Timestamp('2012-02-01') and users=selector\")\n expected = df[(df.ts >= Timestamp('2012-02-01')) &\n df.users.isin(selector)]\n tm.assert_frame_equal(expected, result)\n\n selector = range(100, 200)\n result = store.select('df', 'B=selector')\n expected = df[df.B.isin(selector)]\n tm.assert_frame_equal(expected, result)\n assert len(result) == 100\n\n # big selector along the index\n selector = Index(df.ts[0:100].values)\n result = store.select('df', 'ts=selector')\n expected = df[df.ts.isin(selector.values)]\n tm.assert_frame_equal(expected, result)\n assert len(result) == 100\n\n def test_select_iterator(self):\n\n # single table\n with ensure_clean_store(self.path) as store:\n\n df = tm.makeTimeDataFrame(500)\n _maybe_remove(store, 'df')\n store.append('df', df)\n\n expected = store.select('df')\n\n results = [s for s in store.select('df', iterator=True)]\n result = concat(results)\n tm.assert_frame_equal(expected, result)\n\n results = [s for s in store.select('df', chunksize=100)]\n assert len(results) == 5\n result = concat(results)\n tm.assert_frame_equal(expected, result)\n\n results = [s for s in store.select('df', chunksize=150)]\n result = concat(results)\n tm.assert_frame_equal(result, expected)\n\n with ensure_clean_path(self.path) as path:\n\n df = tm.makeTimeDataFrame(500)\n df.to_hdf(path, 'df_non_table')\n\n with pytest.raises(TypeError):\n read_hdf(path, 'df_non_table', chunksize=100)\n\n with pytest.raises(TypeError):\n read_hdf(path, 'df_non_table', iterator=True)\n\n with ensure_clean_path(self.path) as path:\n\n df = tm.makeTimeDataFrame(500)\n df.to_hdf(path, 'df', format='table')\n\n results = [s for s in read_hdf(path, 'df', chunksize=100)]\n result = concat(results)\n\n assert len(results) == 5\n tm.assert_frame_equal(result, df)\n tm.assert_frame_equal(result, read_hdf(path, 'df'))\n\n # multiple\n\n with ensure_clean_store(self.path) as store:\n\n df1 = tm.makeTimeDataFrame(500)\n store.append('df1', df1, data_columns=True)\n df2 = tm.makeTimeDataFrame(500).rename(\n columns=lambda x: \"%s_2\" % x)\n df2['foo'] = 'bar'\n store.append('df2', df2)\n\n df = concat([df1, df2], axis=1)\n\n # full selection\n expected = store.select_as_multiple(\n ['df1', 'df2'], selector='df1')\n results = [s for s in store.select_as_multiple(\n ['df1', 'df2'], selector='df1', chunksize=150)]\n result = concat(results)\n tm.assert_frame_equal(expected, result)\n\n def test_select_iterator_complete_8014(self):\n\n # GH 8014\n # using iterator and where clause\n chunksize = 1e4\n\n # no iterator\n with ensure_clean_store(self.path) as store:\n\n expected = tm.makeTimeDataFrame(100064, 'S')\n _maybe_remove(store, 'df')\n store.append('df', expected)\n\n beg_dt = expected.index[0]\n end_dt = expected.index[-1]\n\n # select w/o iteration and no where clause works\n result = store.select('df')\n tm.assert_frame_equal(expected, result)\n\n # select w/o iterator and where clause, single term, begin\n # of range, works\n where = \"index >= '%s'\" % beg_dt\n result = store.select('df', where=where)\n tm.assert_frame_equal(expected, result)\n\n # select w/o iterator and where clause, single term, end\n # of range, works\n where = \"index <= '%s'\" % end_dt\n result = store.select('df', where=where)\n tm.assert_frame_equal(expected, result)\n\n # select w/o iterator and where clause, inclusive range,\n # works\n where = \"index >= '%s' & index <= '%s'\" % (beg_dt, end_dt)\n result = store.select('df', where=where)\n tm.assert_frame_equal(expected, result)\n\n # with iterator, full range\n with ensure_clean_store(self.path) as store:\n\n expected = tm.makeTimeDataFrame(100064, 'S')\n _maybe_remove(store, 'df')\n store.append('df', expected)\n\n beg_dt = expected.index[0]\n end_dt = expected.index[-1]\n\n # select w/iterator and no where clause works\n results = [s for s in store.select('df', chunksize=chunksize)]\n result = concat(results)\n tm.assert_frame_equal(expected, result)\n\n # select w/iterator and where clause, single term, begin of range\n where = \"index >= '%s'\" % beg_dt\n results = [s for s in store.select(\n 'df', where=where, chunksize=chunksize)]\n result = concat(results)\n tm.assert_frame_equal(expected, result)\n\n # select w/iterator and where clause, single term, end of range\n where = \"index <= '%s'\" % end_dt\n results = [s for s in store.select(\n 'df', where=where, chunksize=chunksize)]\n result = concat(results)\n tm.assert_frame_equal(expected, result)\n\n # select w/iterator and where clause, inclusive range\n where = \"index >= '%s' & index <= '%s'\" % (beg_dt, end_dt)\n results = [s for s in store.select(\n 'df', where=where, chunksize=chunksize)]\n result = concat(results)\n tm.assert_frame_equal(expected, result)\n\n def test_select_iterator_non_complete_8014(self):\n\n # GH 8014\n # using iterator and where clause\n chunksize = 1e4\n\n # with iterator, non complete range\n with ensure_clean_store(self.path) as store:\n\n expected = tm.makeTimeDataFrame(100064, 'S')\n _maybe_remove(store, 'df')\n store.append('df', expected)\n\n beg_dt = expected.index[1]\n end_dt = expected.index[-2]\n\n # select w/iterator and where clause, single term, begin of range\n where = \"index >= '%s'\" % beg_dt\n results = [s for s in store.select(\n 'df', where=where, chunksize=chunksize)]\n result = concat(results)\n rexpected = expected[expected.index >= beg_dt]\n tm.assert_frame_equal(rexpected, result)\n\n # select w/iterator and where clause, single term, end of range\n where = \"index <= '%s'\" % end_dt\n results = [s for s in store.select(\n 'df', where=where, chunksize=chunksize)]\n result = concat(results)\n rexpected = expected[expected.index <= end_dt]\n tm.assert_frame_equal(rexpected, result)\n\n # select w/iterator and where clause, inclusive range\n where = \"index >= '%s' & index <= '%s'\" % (beg_dt, end_dt)\n results = [s for s in store.select(\n 'df', where=where, chunksize=chunksize)]\n result = concat(results)\n rexpected = expected[(expected.index >= beg_dt) &\n (expected.index <= end_dt)]\n tm.assert_frame_equal(rexpected, result)\n\n # with iterator, empty where\n with ensure_clean_store(self.path) as store:\n\n expected = tm.makeTimeDataFrame(100064, 'S')\n _maybe_remove(store, 'df')\n store.append('df', expected)\n\n end_dt = expected.index[-1]\n\n # select w/iterator and where clause, single term, begin of range\n where = \"index > '%s'\" % end_dt\n results = [s for s in store.select(\n 'df', where=where, chunksize=chunksize)]\n assert 0 == len(results)\n\n def test_select_iterator_many_empty_frames(self):\n\n # GH 8014\n # using iterator and where clause can return many empty\n # frames.\n chunksize = int(1e4)\n\n # with iterator, range limited to the first chunk\n with ensure_clean_store(self.path) as store:\n\n expected = tm.makeTimeDataFrame(100000, 'S')\n _maybe_remove(store, 'df')\n store.append('df', expected)\n\n beg_dt = expected.index[0]\n end_dt = expected.index[chunksize - 1]\n\n # select w/iterator and where clause, single term, begin of range\n where = \"index >= '%s'\" % beg_dt\n results = [s for s in store.select(\n 'df', where=where, chunksize=chunksize)]\n result = concat(results)\n rexpected = expected[expected.index >= beg_dt]\n tm.assert_frame_equal(rexpected, result)\n\n # select w/iterator and where clause, single term, end of range\n where = \"index <= '%s'\" % end_dt\n results = [s for s in store.select(\n 'df', where=where, chunksize=chunksize)]\n\n assert len(results) == 1\n result = concat(results)\n rexpected = expected[expected.index <= end_dt]\n tm.assert_frame_equal(rexpected, result)\n\n # select w/iterator and where clause, inclusive range\n where = \"index >= '%s' & index <= '%s'\" % (beg_dt, end_dt)\n results = [s for s in store.select(\n 'df', where=where, chunksize=chunksize)]\n\n # should be 1, is 10\n assert len(results) == 1\n result = concat(results)\n rexpected = expected[(expected.index >= beg_dt) &\n (expected.index <= end_dt)]\n tm.assert_frame_equal(rexpected, result)\n\n # select w/iterator and where clause which selects\n # *nothing*.\n #\n # To be consistent with Python idiom I suggest this should\n # return [] e.g. `for e in []: print True` never prints\n # True.\n\n where = \"index <= '%s' & index >= '%s'\" % (beg_dt, end_dt)\n results = [s for s in store.select(\n 'df', where=where, chunksize=chunksize)]\n\n # should be []\n assert len(results) == 0\n\n @pytest.mark.filterwarnings(\n \"ignore:\\\\nthe :pandas.io.pytables.AttributeConflictWarning\"\n )\n def test_retain_index_attributes(self):\n\n # GH 3499, losing frequency info on index recreation\n df = DataFrame(dict(\n A=Series(lrange(3),\n index=date_range('2000-1-1', periods=3, freq='H'))))\n\n with ensure_clean_store(self.path) as store:\n _maybe_remove(store, 'data')\n store.put('data', df, format='table')\n\n result = store.get('data')\n tm.assert_frame_equal(df, result)\n\n for attr in ['freq', 'tz', 'name']:\n for idx in ['index', 'columns']:\n assert (getattr(getattr(df, idx), attr, None) ==\n getattr(getattr(result, idx), attr, None))\n\n # try to append a table with a different frequency\n with catch_warnings(record=True):\n df2 = DataFrame(dict(\n A=Series(lrange(3),\n index=date_range('2002-1-1',\n periods=3, freq='D'))))\n store.append('data', df2)\n\n assert store.get_storer('data').info['index']['freq'] is None\n\n # this is ok\n _maybe_remove(store, 'df2')\n df2 = DataFrame(dict(\n A=Series(lrange(3),\n index=[Timestamp('20010101'), Timestamp('20010102'),\n Timestamp('20020101')])))\n store.append('df2', df2)\n df3 = DataFrame(dict(\n A=Series(lrange(3),\n index=date_range('2002-1-1', periods=3,\n freq='D'))))\n store.append('df2', df3)\n\n @pytest.mark.filterwarnings(\n \"ignore:\\\\nthe :pandas.io.pytables.AttributeConflictWarning\"\n )\n def test_retain_index_attributes2(self):\n with ensure_clean_path(self.path) as path:\n\n with catch_warnings(record=True):\n\n df = DataFrame(dict(\n A=Series(lrange(3),\n index=date_range('2000-1-1',\n periods=3, freq='H'))))\n df.to_hdf(path, 'data', mode='w', append=True)\n df2 = DataFrame(dict(\n A=Series(lrange(3),\n index=date_range('2002-1-1', periods=3,\n freq='D'))))\n df2.to_hdf(path, 'data', append=True)\n\n idx = date_range('2000-1-1', periods=3, freq='H')\n idx.name = 'foo'\n df = DataFrame(dict(A=Series(lrange(3), index=idx)))\n df.to_hdf(path, 'data', mode='w', append=True)\n\n assert read_hdf(path, 'data').index.name == 'foo'\n\n with catch_warnings(record=True):\n\n idx2 = date_range('2001-1-1', periods=3, freq='H')\n idx2.name = 'bar'\n df2 = DataFrame(dict(A=Series(lrange(3), index=idx2)))\n df2.to_hdf(path, 'data', append=True)\n\n assert read_hdf(path, 'data').index.name is None\n\n def test_frame_select(self):\n\n df = tm.makeTimeDataFrame()\n\n with ensure_clean_store(self.path) as store:\n store.put('frame', df, format='table')\n date = df.index[len(df) // 2]\n\n crit1 = Term('index>=date')\n assert crit1.env.scope['date'] == date\n\n crit2 = (\"columns=['A', 'D']\")\n crit3 = ('columns=A')\n\n result = store.select('frame', [crit1, crit2])\n expected = df.loc[date:, ['A', 'D']]\n tm.assert_frame_equal(result, expected)\n\n result = store.select('frame', [crit3])\n expected = df.loc[:, ['A']]\n tm.assert_frame_equal(result, expected)\n\n # invalid terms\n df = tm.makeTimeDataFrame()\n store.append('df_time', df)\n with pytest.raises(ValueError):\n store.select('df_time', \"index>0\")\n\n # can't select if not written as table\n # store['frame'] = df\n # with pytest.raises(ValueError):\n # store.select('frame', [crit1, crit2])\n\n def test_frame_select_complex(self):\n # select via complex criteria\n\n df = tm.makeTimeDataFrame()\n df['string'] = 'foo'\n df.loc[df.index[0:4], 'string'] = 'bar'\n\n with ensure_clean_store(self.path) as store:\n store.put('df', df, format='table', data_columns=['string'])\n\n # empty\n result = store.select('df', 'index>df.index[3] & string=\"bar\"')\n expected = df.loc[(df.index > df.index[3]) & (df.string == 'bar')]\n tm.assert_frame_equal(result, expected)\n\n result = store.select('df', 'index>df.index[3] & string=\"foo\"')\n expected = df.loc[(df.index > df.index[3]) & (df.string == 'foo')]\n tm.assert_frame_equal(result, expected)\n\n # or\n result = store.select('df', 'index>df.index[3] | string=\"bar\"')\n expected = df.loc[(df.index > df.index[3]) | (df.string == 'bar')]\n tm.assert_frame_equal(result, expected)\n\n result = store.select('df', '(index>df.index[3] & '\n 'index<=df.index[6]) | string=\"bar\"')\n expected = df.loc[((df.index > df.index[3]) & (\n df.index <= df.index[6])) | (df.string == 'bar')]\n tm.assert_frame_equal(result, expected)\n\n # invert\n result = store.select('df', 'string!=\"bar\"')\n expected = df.loc[df.string != 'bar']\n tm.assert_frame_equal(result, expected)\n\n # invert not implemented in numexpr :(\n with pytest.raises(NotImplementedError):\n store.select('df', '~(string=\"bar\")')\n\n # invert ok for filters\n result = store.select('df', \"~(columns=['A','B'])\")\n expected = df.loc[:, df.columns.difference(['A', 'B'])]\n tm.assert_frame_equal(result, expected)\n\n # in\n result = store.select(\n 'df', \"index>df.index[3] & columns in ['A','B']\")\n expected = df.loc[df.index > df.index[3]].reindex(columns=[\n 'A', 'B'])\n tm.assert_frame_equal(result, expected)\n\n def test_frame_select_complex2(self):\n\n with ensure_clean_path(['parms.hdf', 'hist.hdf']) as paths:\n\n pp, hh = paths\n\n # use non-trivial selection criteria\n parms = DataFrame({'A': [1, 1, 2, 2, 3]})\n parms.to_hdf(pp, 'df', mode='w',\n format='table', data_columns=['A'])\n\n selection = read_hdf(pp, 'df', where='A=[2,3]')\n hist = DataFrame(np.random.randn(25, 1),\n columns=['data'],\n index=MultiIndex.from_tuples(\n [(i, j) for i in range(5)\n for j in range(5)],\n names=['l1', 'l2']))\n\n hist.to_hdf(hh, 'df', mode='w', format='table')\n\n expected = read_hdf(hh, 'df', where='l1=[2, 3, 4]')\n\n # sccope with list like\n l = selection.index.tolist() # noqa\n store = HDFStore(hh)\n result = store.select('df', where='l1=l')\n assert_frame_equal(result, expected)\n store.close()\n\n result = read_hdf(hh, 'df', where='l1=l')\n assert_frame_equal(result, expected)\n\n # index\n index = selection.index # noqa\n result = read_hdf(hh, 'df', where='l1=index')\n assert_frame_equal(result, expected)\n\n result = read_hdf(hh, 'df', where='l1=selection.index')\n assert_frame_equal(result, expected)\n\n result = read_hdf(hh, 'df', where='l1=selection.index.tolist()')\n assert_frame_equal(result, expected)\n\n result = read_hdf(hh, 'df', where='l1=list(selection.index)')\n assert_frame_equal(result, expected)\n\n # sccope with index\n store = HDFStore(hh)\n\n result = store.select('df', where='l1=index')\n assert_frame_equal(result, expected)\n\n result = store.select('df', where='l1=selection.index')\n assert_frame_equal(result, expected)\n\n result = store.select('df', where='l1=selection.index.tolist()')\n assert_frame_equal(result, expected)\n\n result = store.select('df', where='l1=list(selection.index)')\n assert_frame_equal(result, expected)\n\n store.close()\n\n def test_invalid_filtering(self):\n\n # can't use more than one filter (atm)\n\n df = tm.makeTimeDataFrame()\n\n with ensure_clean_store(self.path) as store:\n store.put('df', df, format='table')\n\n # not implemented\n with pytest.raises(NotImplementedError):\n store.select('df', \"columns=['A'] | columns=['B']\")\n\n # in theory we could deal with this\n with pytest.raises(NotImplementedError):\n store.select('df', \"columns=['A','B'] & columns=['C']\")\n\n def test_string_select(self):\n # GH 2973\n with ensure_clean_store(self.path) as store:\n\n df = tm.makeTimeDataFrame()\n\n # test string ==/!=\n df['x'] = 'none'\n df.loc[2:7, 'x'] = ''\n\n store.append('df', df, data_columns=['x'])\n\n result = store.select('df', 'x=none')\n expected = df[df.x == 'none']\n assert_frame_equal(result, expected)\n\n try:\n result = store.select('df', 'x!=none')\n expected = df[df.x != 'none']\n assert_frame_equal(result, expected)\n except Exception as detail:\n pprint_thing(\"[{0}]\".format(detail))\n pprint_thing(store)\n pprint_thing(expected)\n\n df2 = df.copy()\n df2.loc[df2.x == '', 'x'] = np.nan\n\n store.append('df2', df2, data_columns=['x'])\n result = store.select('df2', 'x!=none')\n expected = df2[isna(df2.x)]\n assert_frame_equal(result, expected)\n\n # int ==/!=\n df['int'] = 1\n df.loc[2:7, 'int'] = 2\n\n store.append('df3', df, data_columns=['int'])\n\n result = store.select('df3', 'int=2')\n expected = df[df.int == 2]\n assert_frame_equal(result, expected)\n\n result = store.select('df3', 'int!=2')\n expected = df[df.int != 2]\n assert_frame_equal(result, expected)\n\n def test_read_column(self):\n\n df = tm.makeTimeDataFrame()\n\n with ensure_clean_store(self.path) as store:\n _maybe_remove(store, 'df')\n\n # GH 17912\n # HDFStore.select_column should raise a KeyError\n # exception if the key is not a valid store\n with pytest.raises(KeyError,\n match='No object named df in the file'):\n store.select_column('df', 'index')\n\n store.append('df', df)\n # error\n with pytest.raises(KeyError):\n store.select_column('df', 'foo')\n\n with pytest.raises(Exception):\n store.select_column('df', 'index', where=['index>5'])\n\n # valid\n result = store.select_column('df', 'index')\n tm.assert_almost_equal(result.values, Series(df.index).values)\n assert isinstance(result, Series)\n\n # not a data indexable column\n with pytest.raises(ValueError):\n store.select_column('df', 'values_block_0')\n\n # a data column\n df2 = df.copy()\n df2['string'] = 'foo'\n store.append('df2', df2, data_columns=['string'])\n result = store.select_column('df2', 'string')\n tm.assert_almost_equal(result.values, df2['string'].values)\n\n # a data column with NaNs, result excludes the NaNs\n df3 = df.copy()\n df3['string'] = 'foo'\n df3.loc[4:6, 'string'] = np.nan\n store.append('df3', df3, data_columns=['string'])\n result = store.select_column('df3', 'string')\n tm.assert_almost_equal(result.values, df3['string'].values)\n\n # start/stop\n result = store.select_column('df3', 'string', start=2)\n tm.assert_almost_equal(result.values, df3['string'].values[2:])\n\n result = store.select_column('df3', 'string', start=-2)\n tm.assert_almost_equal(result.values, df3['string'].values[-2:])\n\n result = store.select_column('df3', 'string', stop=2)\n tm.assert_almost_equal(result.values, df3['string'].values[:2])\n\n result = store.select_column('df3', 'string', stop=-2)\n tm.assert_almost_equal(result.values, df3['string'].values[:-2])\n\n result = store.select_column('df3', 'string', start=2, stop=-2)\n tm.assert_almost_equal(result.values, df3['string'].values[2:-2])\n\n result = store.select_column('df3', 'string', start=-2, stop=2)\n tm.assert_almost_equal(result.values, df3['string'].values[-2:2])\n\n # GH 10392 - make sure column name is preserved\n df4 = DataFrame({'A': np.random.randn(10), 'B': 'foo'})\n store.append('df4', df4, data_columns=True)\n expected = df4['B']\n result = store.select_column('df4', 'B')\n tm.assert_series_equal(result, expected)\n\n def test_coordinates(self):\n df = tm.makeTimeDataFrame()\n\n with ensure_clean_store(self.path) as store:\n\n _maybe_remove(store, 'df')\n store.append('df', df)\n\n # all\n c = store.select_as_coordinates('df')\n assert((c.values == np.arange(len(df.index))).all())\n\n # get coordinates back & test vs frame\n _maybe_remove(store, 'df')\n\n df = DataFrame(dict(A=lrange(5), B=lrange(5)))\n store.append('df', df)\n c = store.select_as_coordinates('df', ['index<3'])\n assert((c.values == np.arange(3)).all())\n result = store.select('df', where=c)\n expected = df.loc[0:2, :]\n tm.assert_frame_equal(result, expected)\n\n c = store.select_as_coordinates('df', ['index>=3', 'index<=4'])\n assert((c.values == np.arange(2) + 3).all())\n result = store.select('df', where=c)\n expected = df.loc[3:4, :]\n tm.assert_frame_equal(result, expected)\n assert isinstance(c, Index)\n\n # multiple tables\n _maybe_remove(store, 'df1')\n _maybe_remove(store, 'df2')\n df1 = tm.makeTimeDataFrame()\n df2 = tm.makeTimeDataFrame().rename(columns=lambda x: \"%s_2\" % x)\n store.append('df1', df1, data_columns=['A', 'B'])\n store.append('df2', df2)\n\n c = store.select_as_coordinates('df1', ['A>0', 'B>0'])\n df1_result = store.select('df1', c)\n df2_result = store.select('df2', c)\n result = concat([df1_result, df2_result], axis=1)\n\n expected = concat([df1, df2], axis=1)\n expected = expected[(expected.A > 0) & (expected.B > 0)]\n tm.assert_frame_equal(result, expected)\n\n # pass array/mask as the coordinates\n with ensure_clean_store(self.path) as store:\n\n df = DataFrame(np.random.randn(1000, 2),\n index=date_range('20000101', periods=1000))\n store.append('df', df)\n c = store.select_column('df', 'index')\n where = c[DatetimeIndex(c).month == 5].index\n expected = df.iloc[where]\n\n # locations\n result = store.select('df', where=where)\n tm.assert_frame_equal(result, expected)\n\n # boolean\n result = store.select('df', where=where)\n tm.assert_frame_equal(result, expected)\n\n # invalid\n with pytest.raises(ValueError):\n store.select('df', where=np.arange(len(df), dtype='float64'))\n\n with pytest.raises(ValueError):\n store.select('df', where=np.arange(len(df) + 1))\n\n with pytest.raises(ValueError):\n store.select('df', where=np.arange(len(df)), start=5)\n\n with pytest.raises(ValueError):\n store.select('df', where=np.arange(len(df)), start=5, stop=10)\n\n # selection with filter\n selection = date_range('20000101', periods=500)\n result = store.select('df', where='index in selection')\n expected = df[df.index.isin(selection)]\n tm.assert_frame_equal(result, expected)\n\n # list\n df = DataFrame(np.random.randn(10, 2))\n store.append('df2', df)\n result = store.select('df2', where=[0, 3, 5])\n expected = df.iloc[[0, 3, 5]]\n tm.assert_frame_equal(result, expected)\n\n # boolean\n where = [True] * 10\n where[-2] = False\n result = store.select('df2', where=where)\n expected = df.loc[where]\n tm.assert_frame_equal(result, expected)\n\n # start/stop\n result = store.select('df2', start=5, stop=10)\n expected = df[5:10]\n tm.assert_frame_equal(result, expected)\n\n def test_append_to_multiple(self):\n df1 = tm.makeTimeDataFrame()\n df2 = tm.makeTimeDataFrame().rename(columns=lambda x: \"%s_2\" % x)\n df2['foo'] = 'bar'\n df = concat([df1, df2], axis=1)\n\n with ensure_clean_store(self.path) as store:\n\n # exceptions\n with pytest.raises(ValueError):\n store.append_to_multiple(\n {'df1': ['A', 'B'], 'df2': None}, df, selector='df3')\n\n with pytest.raises(ValueError):\n store.append_to_multiple(\n {'df1': None, 'df2': None}, df, selector='df3')\n\n with pytest.raises(ValueError):\n store.append_to_multiple('df1', df, 'df1')\n\n # regular operation\n store.append_to_multiple(\n {'df1': ['A', 'B'], 'df2': None}, df, selector='df1')\n result = store.select_as_multiple(\n ['df1', 'df2'], where=['A>0', 'B>0'], selector='df1')\n expected = df[(df.A > 0) & (df.B > 0)]\n tm.assert_frame_equal(result, expected)\n\n def test_append_to_multiple_dropna(self):\n df1 = tm.makeTimeDataFrame()\n df2 = tm.makeTimeDataFrame().rename(columns=lambda x: \"%s_2\" % x)\n df1.iloc[1, df1.columns.get_indexer(['A', 'B'])] = np.nan\n df = concat([df1, df2], axis=1)\n\n with ensure_clean_store(self.path) as store:\n\n # dropna=True should guarantee rows are synchronized\n store.append_to_multiple(\n {'df1': ['A', 'B'], 'df2': None}, df, selector='df1',\n dropna=True)\n result = store.select_as_multiple(['df1', 'df2'])\n expected = df.dropna()\n tm.assert_frame_equal(result, expected)\n tm.assert_index_equal(store.select('df1').index,\n store.select('df2').index)\n\n @pytest.mark.xfail(run=False,\n reason=\"append_to_multiple_dropna_false \"\n \"is not raising as failed\")\n def test_append_to_multiple_dropna_false(self):\n df1 = tm.makeTimeDataFrame()\n df2 = tm.makeTimeDataFrame().rename(columns=lambda x: \"%s_2\" % x)\n df1.iloc[1, df1.columns.get_indexer(['A', 'B'])] = np.nan\n df = concat([df1, df2], axis=1)\n\n with ensure_clean_store(self.path) as store:\n\n # dropna=False shouldn't synchronize row indexes\n store.append_to_multiple(\n {'df1a': ['A', 'B'], 'df2a': None}, df, selector='df1a',\n dropna=False)\n\n with pytest.raises(ValueError):\n store.select_as_multiple(['df1a', 'df2a'])\n\n assert not store.select('df1a').index.equals(\n store.select('df2a').index)\n\n def test_select_as_multiple(self):\n\n df1 = tm.makeTimeDataFrame()\n df2 = tm.makeTimeDataFrame().rename(columns=lambda x: \"%s_2\" % x)\n df2['foo'] = 'bar'\n\n with ensure_clean_store(self.path) as store:\n\n # no tables stored\n with pytest.raises(Exception):\n store.select_as_multiple(\n None, where=['A>0', 'B>0'], selector='df1')\n\n store.append('df1', df1, data_columns=['A', 'B'])\n store.append('df2', df2)\n\n # exceptions\n with pytest.raises(Exception):\n store.select_as_multiple(None, where=['A>0', 'B>0'],\n selector='df1')\n\n with pytest.raises(Exception):\n store.select_as_multiple([None], where=['A>0', 'B>0'],\n selector='df1')\n\n with pytest.raises(KeyError):\n store.select_as_multiple(\n ['df1', 'df3'], where=['A>0', 'B>0'], selector='df1')\n\n with pytest.raises(KeyError):\n store.select_as_multiple(\n ['df3'], where=['A>0', 'B>0'], selector='df1')\n\n with pytest.raises(KeyError):\n store.select_as_multiple(\n ['df1', 'df2'], where=['A>0', 'B>0'], selector='df4')\n\n # default select\n result = store.select('df1', ['A>0', 'B>0'])\n expected = store.select_as_multiple(\n ['df1'], where=['A>0', 'B>0'], selector='df1')\n tm.assert_frame_equal(result, expected)\n expected = store.select_as_multiple(\n 'df1', where=['A>0', 'B>0'], selector='df1')\n tm.assert_frame_equal(result, expected)\n\n # multiple\n result = store.select_as_multiple(\n ['df1', 'df2'], where=['A>0', 'B>0'], selector='df1')\n expected = concat([df1, df2], axis=1)\n expected = expected[(expected.A > 0) & (expected.B > 0)]\n tm.assert_frame_equal(result, expected)\n\n # multiple (diff selector)\n result = store.select_as_multiple(\n ['df1', 'df2'], where='index>df2.index[4]', selector='df2')\n expected = concat([df1, df2], axis=1)\n expected = expected[5:]\n tm.assert_frame_equal(result, expected)\n\n # test exception for diff rows\n store.append('df3', tm.makeTimeDataFrame(nper=50))\n with pytest.raises(ValueError):\n store.select_as_multiple(\n ['df1', 'df3'], where=['A>0', 'B>0'], selector='df1')\n\n @pytest.mark.skipif(\n LooseVersion(tables.__version__) < LooseVersion('3.1.0'),\n reason=(\"tables version does not support fix for nan selection \"\n \"bug: GH 4858\"))\n def test_nan_selection_bug_4858(self):\n\n with ensure_clean_store(self.path) as store:\n\n df = DataFrame(dict(cols=range(6), values=range(6)),\n dtype='float64')\n df['cols'] = (df['cols'] + 10).apply(str)\n df.iloc[0] = np.nan\n\n expected = DataFrame(dict(cols=['13.0', '14.0', '15.0'], values=[\n 3., 4., 5.]), index=[3, 4, 5])\n\n # write w/o the index on that particular column\n store.append('df', df, data_columns=True, index=['cols'])\n result = store.select('df', where='values>2.0')\n assert_frame_equal(result, expected)\n\n def test_start_stop_table(self):\n\n with ensure_clean_store(self.path) as store:\n\n # table\n df = DataFrame(dict(A=np.random.rand(20), B=np.random.rand(20)))\n store.append('df', df)\n\n result = store.select(\n 'df', \"columns=['A']\", start=0, stop=5)\n expected = df.loc[0:4, ['A']]\n tm.assert_frame_equal(result, expected)\n\n # out of range\n result = store.select(\n 'df', \"columns=['A']\", start=30, stop=40)\n assert len(result) == 0\n expected = df.loc[30:40, ['A']]\n tm.assert_frame_equal(result, expected)\n\n def test_start_stop_multiple(self):\n\n # GH 16209\n with ensure_clean_store(self.path) as store:\n\n df = DataFrame({\"foo\": [1, 2], \"bar\": [1, 2]})\n\n store.append_to_multiple({'selector': ['foo'], 'data': None}, df,\n selector='selector')\n result = store.select_as_multiple(['selector', 'data'],\n selector='selector', start=0,\n stop=1)\n expected = df.loc[[0], ['foo', 'bar']]\n tm.assert_frame_equal(result, expected)\n\n def test_start_stop_fixed(self):\n\n with ensure_clean_store(self.path) as store:\n\n # fixed, GH 8287\n df = DataFrame(dict(A=np.random.rand(20),\n B=np.random.rand(20)),\n index=pd.date_range('20130101', periods=20))\n store.put('df', df)\n\n result = store.select(\n 'df', start=0, stop=5)\n expected = df.iloc[0:5, :]\n tm.assert_frame_equal(result, expected)\n\n result = store.select(\n 'df', start=5, stop=10)\n expected = df.iloc[5:10, :]\n tm.assert_frame_equal(result, expected)\n\n # out of range\n result = store.select(\n 'df', start=30, stop=40)\n expected = df.iloc[30:40, :]\n tm.assert_frame_equal(result, expected)\n\n # series\n s = df.A\n store.put('s', s)\n result = store.select('s', start=0, stop=5)\n expected = s.iloc[0:5]\n tm.assert_series_equal(result, expected)\n\n result = store.select('s', start=5, stop=10)\n expected = s.iloc[5:10]\n tm.assert_series_equal(result, expected)\n\n # sparse; not implemented\n df = tm.makeDataFrame()\n df.iloc[3:5, 1:3] = np.nan\n df.iloc[8:10, -2] = np.nan\n dfs = df.to_sparse()\n store.put('dfs', dfs)\n with pytest.raises(NotImplementedError):\n store.select('dfs', start=0, stop=5)\n\n def test_select_filter_corner(self):\n\n df = DataFrame(np.random.randn(50, 100))\n df.index = ['%.3d' % c for c in df.index]\n df.columns = ['%.3d' % c for c in df.columns]\n\n with ensure_clean_store(self.path) as store:\n store.put('frame', df, format='table')\n\n crit = 'columns=df.columns[:75]'\n result = store.select('frame', [crit])\n tm.assert_frame_equal(result, df.loc[:, df.columns[:75]])\n\n crit = 'columns=df.columns[:75:2]'\n result = store.select('frame', [crit])\n tm.assert_frame_equal(result, df.loc[:, df.columns[:75:2]])\n\n def test_path_pathlib(self):\n df = tm.makeDataFrame()\n\n result = tm.round_trip_pathlib(\n lambda p: df.to_hdf(p, 'df'),\n lambda p: pd.read_hdf(p, 'df'))\n tm.assert_frame_equal(df, result)\n\n @pytest.mark.parametrize('start, stop', [(0, 2), (1, 2), (None, None)])\n def test_contiguous_mixed_data_table(self, start, stop):\n # GH 17021\n # ValueError when reading a contiguous mixed-data table ft. VLArray\n df = DataFrame({'a': Series([20111010, 20111011, 20111012]),\n 'b': Series(['ab', 'cd', 'ab'])})\n\n with ensure_clean_store(self.path) as store:\n store.append('test_dataset', df)\n\n result = store.select('test_dataset', start=start, stop=stop)\n assert_frame_equal(df[start:stop], result)\n\n def test_path_pathlib_hdfstore(self):\n df = tm.makeDataFrame()\n\n def writer(path):\n with pd.HDFStore(path) as store:\n df.to_hdf(store, 'df')\n\n def reader(path):\n with pd.HDFStore(path) as store:\n return pd.read_hdf(store, 'df')\n\n result = tm.round_trip_pathlib(writer, reader)\n tm.assert_frame_equal(df, result)\n\n def test_pickle_path_localpath(self):\n df = tm.makeDataFrame()\n result = tm.round_trip_pathlib(\n lambda p: df.to_hdf(p, 'df'),\n lambda p: pd.read_hdf(p, 'df'))\n tm.assert_frame_equal(df, result)\n\n def test_path_localpath_hdfstore(self):\n df = tm.makeDataFrame()\n\n def writer(path):\n with pd.HDFStore(path) as store:\n df.to_hdf(store, 'df')\n\n def reader(path):\n with pd.HDFStore(path) as store:\n return pd.read_hdf(store, 'df')\n\n result = tm.round_trip_localpath(writer, reader)\n tm.assert_frame_equal(df, result)\n\n def _check_roundtrip(self, obj, comparator, compression=False, **kwargs):\n\n options = {}\n if compression:\n options['complib'] = _default_compressor\n\n with ensure_clean_store(self.path, 'w', **options) as store:\n store['obj'] = obj\n retrieved = store['obj']\n comparator(retrieved, obj, **kwargs)\n\n def _check_double_roundtrip(self, obj, comparator, compression=False,\n **kwargs):\n options = {}\n if compression:\n options['complib'] = compression or _default_compressor\n\n with ensure_clean_store(self.path, 'w', **options) as store:\n store['obj'] = obj\n retrieved = store['obj']\n comparator(retrieved, obj, **kwargs)\n store['obj'] = retrieved\n again = store['obj']\n comparator(again, obj, **kwargs)\n\n def _check_roundtrip_table(self, obj, comparator, compression=False):\n options = {}\n if compression:\n options['complib'] = _default_compressor\n\n with ensure_clean_store(self.path, 'w', **options) as store:\n store.put('obj', obj, format='table')\n retrieved = store['obj']\n\n comparator(retrieved, obj)\n\n def test_multiple_open_close(self):\n # gh-4409: open & close multiple times\n\n with ensure_clean_path(self.path) as path:\n\n df = tm.makeDataFrame()\n df.to_hdf(path, 'df', mode='w', format='table')\n\n # single\n store = HDFStore(path)\n assert 'CLOSED' not in store.info()\n assert store.is_open\n\n store.close()\n assert 'CLOSED' in store.info()\n assert not store.is_open\n\n with ensure_clean_path(self.path) as path:\n\n if pytables._table_file_open_policy_is_strict:\n\n # multiples\n store1 = HDFStore(path)\n\n with pytest.raises(ValueError):\n HDFStore(path)\n\n store1.close()\n else:\n\n # multiples\n store1 = HDFStore(path)\n store2 = HDFStore(path)\n\n assert 'CLOSED' not in store1.info()\n assert 'CLOSED' not in store2.info()\n assert store1.is_open\n assert store2.is_open\n\n store1.close()\n assert 'CLOSED' in store1.info()\n assert not store1.is_open\n assert 'CLOSED' not in store2.info()\n assert store2.is_open\n\n store2.close()\n assert 'CLOSED' in store1.info()\n assert 'CLOSED' in store2.info()\n assert not store1.is_open\n assert not store2.is_open\n\n # nested close\n store = HDFStore(path, mode='w')\n store.append('df', df)\n\n store2 = HDFStore(path)\n store2.append('df2', df)\n store2.close()\n assert 'CLOSED' in store2.info()\n assert not store2.is_open\n\n store.close()\n assert 'CLOSED' in store.info()\n assert not store.is_open\n\n # double closing\n store = HDFStore(path, mode='w')\n store.append('df', df)\n\n store2 = HDFStore(path)\n store.close()\n assert 'CLOSED' in store.info()\n assert not store.is_open\n\n store2.close()\n assert 'CLOSED' in store2.info()\n assert not store2.is_open\n\n # ops on a closed store\n with ensure_clean_path(self.path) as path:\n\n df = tm.makeDataFrame()\n df.to_hdf(path, 'df', mode='w', format='table')\n\n store = HDFStore(path)\n store.close()\n\n with pytest.raises(ClosedFileError):\n store.keys()\n\n with pytest.raises(ClosedFileError):\n 'df' in store\n\n with pytest.raises(ClosedFileError):\n len(store)\n\n with pytest.raises(ClosedFileError):\n store['df']\n\n with pytest.raises(AttributeError):\n store.df\n\n with pytest.raises(ClosedFileError):\n store.select('df')\n\n with pytest.raises(ClosedFileError):\n store.get('df')\n\n with pytest.raises(ClosedFileError):\n store.append('df2', df)\n\n with pytest.raises(ClosedFileError):\n store.put('df3', df)\n\n with pytest.raises(ClosedFileError):\n store.get_storer('df2')\n\n with pytest.raises(ClosedFileError):\n store.remove('df2')\n\n with pytest.raises(ClosedFileError, match='file is not open'):\n store.select('df')\n\n def test_pytables_native_read(self, datapath):\n with ensure_clean_store(\n datapath('io', 'data', 'legacy_hdf/pytables_native.h5'),\n mode='r') as store:\n d2 = store['detector/readout']\n assert isinstance(d2, DataFrame)\n\n @pytest.mark.skipif(PY35 and is_platform_windows(),\n reason=\"native2 read fails oddly on windows / 3.5\")\n def test_pytables_native2_read(self, datapath):\n with ensure_clean_store(\n datapath('io', 'data', 'legacy_hdf', 'pytables_native2.h5'),\n mode='r') as store:\n str(store)\n d1 = store['detector']\n assert isinstance(d1, DataFrame)\n\n @xfail_non_writeable\n def test_legacy_table_fixed_format_read_py2(self, datapath):\n # GH 24510\n # legacy table with fixed format written in Python 2\n with ensure_clean_store(\n datapath('io', 'data', 'legacy_hdf',\n 'legacy_table_fixed_py2.h5'),\n mode='r') as store:\n result = store.select('df')\n expected = pd.DataFrame([[1, 2, 3, 'D']],\n columns=['A', 'B', 'C', 'D'],\n index=pd.Index(['ABC'],\n name='INDEX_NAME'))\n assert_frame_equal(expected, result)\n\n def test_legacy_table_read_py2(self, datapath):\n # issue: 24925\n # legacy table written in Python 2\n with ensure_clean_store(\n datapath('io', 'data', 'legacy_hdf',\n 'legacy_table_py2.h5'),\n mode='r') as store:\n result = store.select('table')\n\n expected = pd.DataFrame({\n \"a\": [\"a\", \"b\"],\n \"b\": [2, 3]\n })\n assert_frame_equal(expected, result)\n\n def test_copy(self):\n\n with catch_warnings(record=True):\n\n def do_copy(f, new_f=None, keys=None,\n propindexes=True, **kwargs):\n try:\n store = HDFStore(f, 'r')\n\n if new_f is None:\n import tempfile\n fd, new_f = tempfile.mkstemp()\n\n tstore = store.copy(\n new_f, keys=keys, propindexes=propindexes, **kwargs)\n\n # check keys\n if keys is None:\n keys = store.keys()\n assert set(keys) == set(tstore.keys())\n\n # check indices & nrows\n for k in tstore.keys():\n if tstore.get_storer(k).is_table:\n new_t = tstore.get_storer(k)\n orig_t = store.get_storer(k)\n\n assert orig_t.nrows == new_t.nrows\n\n # check propindixes\n if propindexes:\n for a in orig_t.axes:\n if a.is_indexed:\n assert new_t[a.name].is_indexed\n\n finally:\n safe_close(store)\n safe_close(tstore)\n try:\n os.close(fd)\n except (OSError, ValueError):\n pass\n safe_remove(new_f)\n\n # new table\n df = tm.makeDataFrame()\n\n try:\n path = create_tempfile(self.path)\n st = HDFStore(path)\n st.append('df', df, data_columns=['A'])\n st.close()\n do_copy(f=path)\n do_copy(f=path, propindexes=False)\n finally:\n safe_remove(path)\n\n def test_store_datetime_fractional_secs(self):\n\n with ensure_clean_store(self.path) as store:\n dt = datetime.datetime(2012, 1, 2, 3, 4, 5, 123456)\n series = Series([0], [dt])\n store['a'] = series\n assert store['a'].index[0] == dt\n\n def test_tseries_indices_series(self):\n\n with ensure_clean_store(self.path) as store:\n idx = tm.makeDateIndex(10)\n ser = Series(np.random.randn(len(idx)), idx)\n store['a'] = ser\n result = store['a']\n\n tm.assert_series_equal(result, ser)\n assert result.index.freq == ser.index.freq\n tm.assert_class_equal(result.index, ser.index, obj=\"series index\")\n\n idx = tm.makePeriodIndex(10)\n ser = Series(np.random.randn(len(idx)), idx)\n store['a'] = ser\n result = store['a']\n\n tm.assert_series_equal(result, ser)\n assert result.index.freq == ser.index.freq\n tm.assert_class_equal(result.index, ser.index, obj=\"series index\")\n\n def test_tseries_indices_frame(self):\n\n with ensure_clean_store(self.path) as store:\n idx = tm.makeDateIndex(10)\n df = DataFrame(np.random.randn(len(idx), 3), index=idx)\n store['a'] = df\n result = store['a']\n\n assert_frame_equal(result, df)\n assert result.index.freq == df.index.freq\n tm.assert_class_equal(result.index, df.index,\n obj=\"dataframe index\")\n\n idx = tm.makePeriodIndex(10)\n df = DataFrame(np.random.randn(len(idx), 3), idx)\n store['a'] = df\n result = store['a']\n\n assert_frame_equal(result, df)\n assert result.index.freq == df.index.freq\n tm.assert_class_equal(result.index, df.index,\n obj=\"dataframe index\")\n\n def test_unicode_index(self):\n\n unicode_values = ['\\u03c3', '\\u03c3\\u03c3']\n\n # PerformanceWarning\n with catch_warnings(record=True):\n simplefilter(\"ignore\", pd.errors.PerformanceWarning)\n s = Series(np.random.randn(len(unicode_values)), unicode_values)\n self._check_roundtrip(s, tm.assert_series_equal)\n\n def test_unicode_longer_encoded(self):\n # GH 11234\n char = '\\u0394'\n df = pd.DataFrame({'A': [char]})\n with ensure_clean_store(self.path) as store:\n store.put('df', df, format='table', encoding='utf-8')\n result = store.get('df')\n tm.assert_frame_equal(result, df)\n\n df = pd.DataFrame({'A': ['a', char], 'B': ['b', 'b']})\n with ensure_clean_store(self.path) as store:\n store.put('df', df, format='table', encoding='utf-8')\n result = store.get('df')\n tm.assert_frame_equal(result, df)\n\n @xfail_non_writeable\n def test_store_datetime_mixed(self):\n\n df = DataFrame(\n {'a': [1, 2, 3], 'b': [1., 2., 3.], 'c': ['a', 'b', 'c']})\n ts = tm.makeTimeSeries()\n df['d'] = ts.index[:3]\n self._check_roundtrip(df, tm.assert_frame_equal)\n\n # def test_cant_write_multiindex_table(self):\n # # for now, #1848\n # df = DataFrame(np.random.randn(10, 4),\n # index=[np.arange(5).repeat(2),\n # np.tile(np.arange(2), 5)])\n #\n # with pytest.raises(Exception):\n # store.put('foo', df, format='table')\n\n def test_append_with_diff_col_name_types_raises_value_error(self):\n df = DataFrame(np.random.randn(10, 1))\n df2 = DataFrame({'a': np.random.randn(10)})\n df3 = DataFrame({(1, 2): np.random.randn(10)})\n df4 = DataFrame({('1', 2): np.random.randn(10)})\n df5 = DataFrame({('1', 2, object): np.random.randn(10)})\n\n with ensure_clean_store(self.path) as store:\n name = 'df_%s' % tm.rands(10)\n store.append(name, df)\n\n for d in (df2, df3, df4, df5):\n with pytest.raises(ValueError):\n store.append(name, d)\n\n def test_query_with_nested_special_character(self):\n df = DataFrame({'a': ['a', 'a', 'c', 'b',\n 'test & test', 'c', 'b', 'e'],\n 'b': [1, 2, 3, 4, 5, 6, 7, 8]})\n expected = df[df.a == 'test & test']\n with ensure_clean_store(self.path) as store:\n store.append('test', df, format='table', data_columns=True)\n result = store.select('test', 'a = \"test & test\"')\n tm.assert_frame_equal(expected, result)\n\n def test_categorical(self):\n\n with ensure_clean_store(self.path) as store:\n\n # Basic\n _maybe_remove(store, 's')\n s = Series(Categorical(['a', 'b', 'b', 'a', 'a', 'c'], categories=[\n 'a', 'b', 'c', 'd'], ordered=False))\n store.append('s', s, format='table')\n result = store.select('s')\n tm.assert_series_equal(s, result)\n\n _maybe_remove(store, 's_ordered')\n s = Series(Categorical(['a', 'b', 'b', 'a', 'a', 'c'], categories=[\n 'a', 'b', 'c', 'd'], ordered=True))\n store.append('s_ordered', s, format='table')\n result = store.select('s_ordered')\n tm.assert_series_equal(s, result)\n\n _maybe_remove(store, 'df')\n df = DataFrame({\"s\": s, \"vals\": [1, 2, 3, 4, 5, 6]})\n store.append('df', df, format='table')\n result = store.select('df')\n tm.assert_frame_equal(result, df)\n\n # Dtypes\n _maybe_remove(store, 'si')\n s = Series([1, 1, 2, 2, 3, 4, 5]).astype('category')\n store.append('si', s)\n result = store.select('si')\n tm.assert_series_equal(result, s)\n\n _maybe_remove(store, 'si2')\n s = Series([1, 1, np.nan, 2, 3, 4, 5]).astype('category')\n store.append('si2', s)\n result = store.select('si2')\n tm.assert_series_equal(result, s)\n\n # Multiple\n _maybe_remove(store, 'df2')\n df2 = df.copy()\n df2['s2'] = Series(list('abcdefg')).astype('category')\n store.append('df2', df2)\n result = store.select('df2')\n tm.assert_frame_equal(result, df2)\n\n # Make sure the metadata is OK\n info = store.info()\n assert '/df2 ' in info\n # assert '/df2/meta/values_block_0/meta' in info\n assert '/df2/meta/values_block_1/meta' in info\n\n # unordered\n _maybe_remove(store, 's2')\n s = Series(Categorical(['a', 'b', 'b', 'a', 'a', 'c'], categories=[\n 'a', 'b', 'c', 'd'], ordered=False))\n store.append('s2', s, format='table')\n result = store.select('s2')\n tm.assert_series_equal(result, s)\n\n # Query\n _maybe_remove(store, 'df3')\n store.append('df3', df, data_columns=['s'])\n expected = df[df.s.isin(['b', 'c'])]\n result = store.select('df3', where=['s in [\"b\",\"c\"]'])\n tm.assert_frame_equal(result, expected)\n\n expected = df[df.s.isin(['b', 'c'])]\n result = store.select('df3', where=['s = [\"b\",\"c\"]'])\n tm.assert_frame_equal(result, expected)\n\n expected = df[df.s.isin(['d'])]\n result = store.select('df3', where=['s in [\"d\"]'])\n tm.assert_frame_equal(result, expected)\n\n expected = df[df.s.isin(['f'])]\n result = store.select('df3', where=['s in [\"f\"]'])\n tm.assert_frame_equal(result, expected)\n\n # Appending with same categories is ok\n store.append('df3', df)\n\n df = concat([df, df])\n expected = df[df.s.isin(['b', 'c'])]\n result = store.select('df3', where=['s in [\"b\",\"c\"]'])\n tm.assert_frame_equal(result, expected)\n\n # Appending must have the same categories\n df3 = df.copy()\n df3['s'].cat.remove_unused_categories(inplace=True)\n\n with pytest.raises(ValueError):\n store.append('df3', df3)\n\n # Remove, and make sure meta data is removed (its a recursive\n # removal so should be).\n result = store.select('df3/meta/s/meta')\n assert result is not None\n store.remove('df3')\n\n with pytest.raises(KeyError):\n store.select('df3/meta/s/meta')\n\n def test_categorical_conversion(self):\n\n # GH13322\n # Check that read_hdf with categorical columns doesn't return rows if\n # where criteria isn't met.\n obsids = ['ESP_012345_6789', 'ESP_987654_3210']\n imgids = ['APF00006np', 'APF0001imm']\n data = [4.3, 9.8]\n\n # Test without categories\n df = DataFrame(dict(obsids=obsids, imgids=imgids, data=data))\n\n # We are expecting an empty DataFrame matching types of df\n expected = df.iloc[[], :]\n with ensure_clean_path(self.path) as path:\n df.to_hdf(path, 'df', format='table', data_columns=True)\n result = read_hdf(path, 'df', where='obsids=B')\n tm.assert_frame_equal(result, expected)\n\n # Test with categories\n df.obsids = df.obsids.astype('category')\n df.imgids = df.imgids.astype('category')\n\n # We are expecting an empty DataFrame matching types of df\n expected = df.iloc[[], :]\n with ensure_clean_path(self.path) as path:\n df.to_hdf(path, 'df', format='table', data_columns=True)\n result = read_hdf(path, 'df', where='obsids=B')\n tm.assert_frame_equal(result, expected)\n\n def test_categorical_nan_only_columns(self):\n # GH18413\n # Check that read_hdf with categorical columns with NaN-only values can\n # be read back.\n df = pd.DataFrame({\n 'a': ['a', 'b', 'c', np.nan],\n 'b': [np.nan, np.nan, np.nan, np.nan],\n 'c': [1, 2, 3, 4],\n 'd': pd.Series([None] * 4, dtype=object)\n })\n df['a'] = df.a.astype('category')\n df['b'] = df.b.astype('category')\n df['d'] = df.b.astype('category')\n expected = df\n with ensure_clean_path(self.path) as path:\n df.to_hdf(path, 'df', format='table', data_columns=True)\n result = read_hdf(path, 'df')\n tm.assert_frame_equal(result, expected)\n\n def test_duplicate_column_name(self):\n df = DataFrame(columns=[\"a\", \"a\"], data=[[0, 0]])\n\n with ensure_clean_path(self.path) as path:\n with pytest.raises(ValueError):\n df.to_hdf(path, 'df', format='fixed')\n\n df.to_hdf(path, 'df', format='table')\n other = read_hdf(path, 'df')\n\n tm.assert_frame_equal(df, other)\n assert df.equals(other)\n assert other.equals(df)\n\n def test_round_trip_equals(self):\n # GH 9330\n df = DataFrame({\"B\": [1, 2], \"A\": [\"x\", \"y\"]})\n\n with ensure_clean_path(self.path) as path:\n df.to_hdf(path, 'df', format='table')\n other = read_hdf(path, 'df')\n tm.assert_frame_equal(df, other)\n assert df.equals(other)\n assert other.equals(df)\n\n def test_preserve_timedeltaindex_type(self):\n # GH9635\n # Storing TimedeltaIndexed DataFrames in fixed stores did not preserve\n # the type of the index.\n df = DataFrame(np.random.normal(size=(10, 5)))\n df.index = timedelta_range(\n start='0s', periods=10, freq='1s', name='example')\n\n with ensure_clean_store(self.path) as store:\n\n store['df'] = df\n assert_frame_equal(store['df'], df)\n\n def test_columns_multiindex_modified(self):\n # BUG: 7212\n # read_hdf store.select modified the passed columns parameters\n # when multi-indexed.\n\n df = DataFrame(np.random.rand(4, 5),\n index=list('abcd'),\n columns=list('ABCDE'))\n df.index.name = 'letters'\n df = df.set_index(keys='E', append=True)\n\n data_columns = df.index.names + df.columns.tolist()\n with ensure_clean_path(self.path) as path:\n df.to_hdf(path, 'df',\n mode='a',\n append=True,\n data_columns=data_columns,\n index=False)\n cols2load = list('BCD')\n cols2load_original = list(cols2load)\n df_loaded = read_hdf(path, 'df', columns=cols2load) # noqa\n assert cols2load_original == cols2load\n\n @ignore_natural_naming_warning\n def test_to_hdf_with_object_column_names(self):\n # GH9057\n # Writing HDF5 table format should only work for string-like\n # column types\n\n types_should_fail = [tm.makeIntIndex, tm.makeFloatIndex,\n tm.makeDateIndex, tm.makeTimedeltaIndex,\n tm.makePeriodIndex]\n types_should_run = [tm.makeStringIndex, tm.makeCategoricalIndex,\n tm.makeUnicodeIndex]\n\n for index in types_should_fail:\n df = DataFrame(np.random.randn(10, 2), columns=index(2))\n with ensure_clean_path(self.path) as path:\n with catch_warnings(record=True):\n msg = \"cannot have non-object label DataIndexableCol\"\n with pytest.raises(ValueError, match=msg):\n df.to_hdf(path, 'df', format='table',\n data_columns=True)\n\n for index in types_should_run:\n df = DataFrame(np.random.randn(10, 2), columns=index(2))\n with ensure_clean_path(self.path) as path:\n with catch_warnings(record=True):\n df.to_hdf(path, 'df', format='table', data_columns=True)\n result = pd.read_hdf(\n path, 'df', where=\"index = [{0}]\".format(df.index[0]))\n assert(len(result))\n\n def test_read_hdf_open_store(self):\n # GH10330\n # No check for non-string path_or-buf, and no test of open store\n df = DataFrame(np.random.rand(4, 5),\n index=list('abcd'),\n columns=list('ABCDE'))\n df.index.name = 'letters'\n df = df.set_index(keys='E', append=True)\n\n with ensure_clean_path(self.path) as path:\n df.to_hdf(path, 'df', mode='w')\n direct = read_hdf(path, 'df')\n store = HDFStore(path, mode='r')\n indirect = read_hdf(store, 'df')\n tm.assert_frame_equal(direct, indirect)\n assert store.is_open\n store.close()\n\n def test_read_hdf_iterator(self):\n df = DataFrame(np.random.rand(4, 5),\n index=list('abcd'),\n columns=list('ABCDE'))\n df.index.name = 'letters'\n df = df.set_index(keys='E', append=True)\n\n with ensure_clean_path(self.path) as path:\n df.to_hdf(path, 'df', mode='w', format='t')\n direct = read_hdf(path, 'df')\n iterator = read_hdf(path, 'df', iterator=True)\n assert isinstance(iterator, TableIterator)\n indirect = next(iterator.__iter__())\n tm.assert_frame_equal(direct, indirect)\n iterator.store.close()\n\n def test_read_hdf_errors(self):\n df = DataFrame(np.random.rand(4, 5),\n index=list('abcd'),\n columns=list('ABCDE'))\n\n with ensure_clean_path(self.path) as path:\n with pytest.raises(IOError):\n read_hdf(path, 'key')\n\n df.to_hdf(path, 'df')\n store = HDFStore(path, mode='r')\n store.close()\n\n with pytest.raises(IOError):\n read_hdf(store, 'df')\n\n def test_read_hdf_generic_buffer_errors(self):\n with pytest.raises(NotImplementedError):\n read_hdf(BytesIO(b''), 'df')\n\n def test_invalid_complib(self):\n df = DataFrame(np.random.rand(4, 5),\n index=list('abcd'),\n columns=list('ABCDE'))\n with ensure_clean_path(self.path) as path:\n with pytest.raises(ValueError):\n df.to_hdf(path, 'df', complib='foolib')\n # GH10443\n\n def test_read_nokey(self):\n df = DataFrame(np.random.rand(4, 5),\n index=list('abcd'),\n columns=list('ABCDE'))\n\n # Categorical dtype not supported for \"fixed\" format. So no need\n # to test with that dtype in the dataframe here.\n with ensure_clean_path(self.path) as path:\n df.to_hdf(path, 'df', mode='a')\n reread = read_hdf(path)\n assert_frame_equal(df, reread)\n df.to_hdf(path, 'df2', mode='a')\n\n with pytest.raises(ValueError):\n read_hdf(path)\n\n def test_read_nokey_table(self):\n # GH13231\n df = DataFrame({'i': range(5),\n 'c': Series(list('abacd'), dtype='category')})\n\n with ensure_clean_path(self.path) as path:\n df.to_hdf(path, 'df', mode='a', format='table')\n reread = read_hdf(path)\n assert_frame_equal(df, reread)\n df.to_hdf(path, 'df2', mode='a', format='table')\n\n with pytest.raises(ValueError):\n read_hdf(path)\n\n def test_read_nokey_empty(self):\n with ensure_clean_path(self.path) as path:\n store = HDFStore(path)\n store.close()\n\n with pytest.raises(ValueError):\n read_hdf(path)\n\n @td.skip_if_no('pathlib')\n def test_read_from_pathlib_path(self):\n\n # GH11773\n from pathlib import Path\n\n expected = DataFrame(np.random.rand(4, 5),\n index=list('abcd'),\n columns=list('ABCDE'))\n with ensure_clean_path(self.path) as filename:\n path_obj = Path(filename)\n\n expected.to_hdf(path_obj, 'df', mode='a')\n actual = read_hdf(path_obj, 'df')\n\n tm.assert_frame_equal(expected, actual)\n\n @td.skip_if_no('py.path')\n def test_read_from_py_localpath(self):\n\n # GH11773\n from py.path import local as LocalPath\n\n expected = DataFrame(np.random.rand(4, 5),\n index=list('abcd'),\n columns=list('ABCDE'))\n with ensure_clean_path(self.path) as filename:\n path_obj = LocalPath(filename)\n\n expected.to_hdf(path_obj, 'df', mode='a')\n actual = read_hdf(path_obj, 'df')\n\n tm.assert_frame_equal(expected, actual)\n\n def test_query_long_float_literal(self):\n # GH 14241\n df = pd.DataFrame({'A': [1000000000.0009,\n 1000000000.0011,\n 1000000000.0015]})\n\n with ensure_clean_store(self.path) as store:\n store.append('test', df, format='table', data_columns=True)\n\n cutoff = 1000000000.0006\n result = store.select('test', \"A < %.4f\" % cutoff)\n assert result.empty\n\n cutoff = 1000000000.0010\n result = store.select('test', \"A > %.4f\" % cutoff)\n expected = df.loc[[1, 2], :]\n tm.assert_frame_equal(expected, result)\n\n exact = 1000000000.0011\n result = store.select('test', 'A == %.4f' % exact)\n expected = df.loc[[1], :]\n tm.assert_frame_equal(expected, result)\n\n def test_query_compare_column_type(self):\n # GH 15492\n df = pd.DataFrame({'date': ['2014-01-01', '2014-01-02'],\n 'real_date': date_range('2014-01-01', periods=2),\n 'float': [1.1, 1.2],\n 'int': [1, 2]},\n columns=['date', 'real_date', 'float', 'int'])\n\n with ensure_clean_store(self.path) as store:\n store.append('test', df, format='table', data_columns=True)\n\n ts = pd.Timestamp('2014-01-01') # noqa\n result = store.select('test', where='real_date > ts')\n expected = df.loc[[1], :]\n tm.assert_frame_equal(expected, result)\n\n for op in ['<', '>', '==']:\n # non strings to string column always fail\n for v in [2.1, True, pd.Timestamp('2014-01-01'),\n pd.Timedelta(1, 's')]:\n query = 'date {op} v'.format(op=op)\n with pytest.raises(TypeError):\n store.select('test', where=query)\n\n # strings to other columns must be convertible to type\n v = 'a'\n for col in ['int', 'float', 'real_date']:\n query = '{col} {op} v'.format(op=op, col=col)\n with pytest.raises(ValueError):\n store.select('test', where=query)\n\n for v, col in zip(['1', '1.1', '2014-01-01'],\n ['int', 'float', 'real_date']):\n query = '{col} {op} v'.format(op=op, col=col)\n result = store.select('test', where=query)\n\n if op == '==':\n expected = df.loc[[0], :]\n elif op == '>':\n expected = df.loc[[1], :]\n else:\n expected = df.loc[[], :]\n tm.assert_frame_equal(expected, result)\n\n @pytest.mark.parametrize('format', ['fixed', 'table'])\n def test_read_hdf_series_mode_r(self, format):\n # GH 16583\n # Tests that reading a Series saved to an HDF file\n # still works if a mode='r' argument is supplied\n series = tm.makeFloatSeries()\n with ensure_clean_path(self.path) as path:\n series.to_hdf(path, key='data', format=format)\n result = pd.read_hdf(path, key='data', mode='r')\n tm.assert_series_equal(result, series)\n\n @pytest.mark.skipif(not PY36, reason=\"Need python 3.6\")\n def test_fspath(self):\n with tm.ensure_clean('foo.h5') as path:\n with pd.HDFStore(path) as store:\n assert os.fspath(store) == str(path)\n\n def test_read_py2_hdf_file_in_py3(self, datapath):\n # GH 16781\n\n # tests reading a PeriodIndex DataFrame written in Python2 in Python3\n\n # the file was generated in Python 2.7 like so:\n #\n # df = pd.DataFrame([1.,2,3], index=pd.PeriodIndex(\n # ['2015-01-01', '2015-01-02', '2015-01-05'], freq='B'))\n # df.to_hdf('periodindex_0.20.1_x86_64_darwin_2.7.13.h5', 'p')\n\n expected = pd.DataFrame([1., 2, 3], index=pd.PeriodIndex(\n ['2015-01-01', '2015-01-02', '2015-01-05'], freq='B'))\n\n with ensure_clean_store(\n datapath('io', 'data', 'legacy_hdf',\n 'periodindex_0.20.1_x86_64_darwin_2.7.13.h5'),\n mode='r') as store:\n result = store['p']\n assert_frame_equal(result, expected)\n\n\nclass TestHDFComplexValues(Base):\n # GH10447\n\n def test_complex_fixed(self):\n df = DataFrame(np.random.rand(4, 5).astype(np.complex64),\n index=list('abcd'),\n columns=list('ABCDE'))\n\n with ensure_clean_path(self.path) as path:\n df.to_hdf(path, 'df')\n reread = read_hdf(path, 'df')\n assert_frame_equal(df, reread)\n\n df = DataFrame(np.random.rand(4, 5).astype(np.complex128),\n index=list('abcd'),\n columns=list('ABCDE'))\n with ensure_clean_path(self.path) as path:\n df.to_hdf(path, 'df')\n reread = read_hdf(path, 'df')\n assert_frame_equal(df, reread)\n\n def test_complex_table(self):\n df = DataFrame(np.random.rand(4, 5).astype(np.complex64),\n index=list('abcd'),\n columns=list('ABCDE'))\n\n with ensure_clean_path(self.path) as path:\n df.to_hdf(path, 'df', format='table')\n reread = read_hdf(path, 'df')\n assert_frame_equal(df, reread)\n\n df = DataFrame(np.random.rand(4, 5).astype(np.complex128),\n index=list('abcd'),\n columns=list('ABCDE'))\n\n with ensure_clean_path(self.path) as path:\n df.to_hdf(path, 'df', format='table', mode='w')\n reread = read_hdf(path, 'df')\n assert_frame_equal(df, reread)\n\n @xfail_non_writeable\n def test_complex_mixed_fixed(self):\n complex64 = np.array([1.0 + 1.0j, 1.0 + 1.0j,\n 1.0 + 1.0j, 1.0 + 1.0j], dtype=np.complex64)\n complex128 = np.array([1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j],\n dtype=np.complex128)\n df = DataFrame({'A': [1, 2, 3, 4],\n 'B': ['a', 'b', 'c', 'd'],\n 'C': complex64,\n 'D': complex128,\n 'E': [1.0, 2.0, 3.0, 4.0]},\n index=list('abcd'))\n with ensure_clean_path(self.path) as path:\n df.to_hdf(path, 'df')\n reread = read_hdf(path, 'df')\n assert_frame_equal(df, reread)\n\n def test_complex_mixed_table(self):\n complex64 = np.array([1.0 + 1.0j, 1.0 + 1.0j,\n 1.0 + 1.0j, 1.0 + 1.0j], dtype=np.complex64)\n complex128 = np.array([1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j],\n dtype=np.complex128)\n df = DataFrame({'A': [1, 2, 3, 4],\n 'B': ['a', 'b', 'c', 'd'],\n 'C': complex64,\n 'D': complex128,\n 'E': [1.0, 2.0, 3.0, 4.0]},\n index=list('abcd'))\n\n with ensure_clean_store(self.path) as store:\n store.append('df', df, data_columns=['A', 'B'])\n result = store.select('df', where='A>2')\n assert_frame_equal(df.loc[df.A > 2], result)\n\n with ensure_clean_path(self.path) as path:\n df.to_hdf(path, 'df', format='table')\n reread = read_hdf(path, 'df')\n assert_frame_equal(df, reread)\n\n def test_complex_across_dimensions_fixed(self):\n with catch_warnings(record=True):\n complex128 = np.array(\n [1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j])\n s = Series(complex128, index=list('abcd'))\n df = DataFrame({'A': s, 'B': s})\n\n objs = [s, df]\n comps = [tm.assert_series_equal, tm.assert_frame_equal]\n for obj, comp in zip(objs, comps):\n with ensure_clean_path(self.path) as path:\n obj.to_hdf(path, 'obj', format='fixed')\n reread = read_hdf(path, 'obj')\n comp(obj, reread)\n\n def test_complex_across_dimensions(self):\n complex128 = np.array([1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j])\n s = Series(complex128, index=list('abcd'))\n df = DataFrame({'A': s, 'B': s})\n\n with catch_warnings(record=True):\n\n objs = [df]\n comps = [tm.assert_frame_equal]\n for obj, comp in zip(objs, comps):\n with ensure_clean_path(self.path) as path:\n obj.to_hdf(path, 'obj', format='table')\n reread = read_hdf(path, 'obj')\n comp(obj, reread)\n\n def test_complex_indexing_error(self):\n complex128 = np.array([1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j],\n dtype=np.complex128)\n df = DataFrame({'A': [1, 2, 3, 4],\n 'B': ['a', 'b', 'c', 'd'],\n 'C': complex128},\n index=list('abcd'))\n with ensure_clean_store(self.path) as store:\n with pytest.raises(TypeError):\n store.append('df', df, data_columns=['C'])\n\n def test_complex_series_error(self):\n complex128 = np.array([1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j])\n s = Series(complex128, index=list('abcd'))\n\n with ensure_clean_path(self.path) as path:\n with pytest.raises(TypeError):\n s.to_hdf(path, 'obj', format='t')\n\n with ensure_clean_path(self.path) as path:\n s.to_hdf(path, 'obj', format='t', index=False)\n reread = read_hdf(path, 'obj')\n tm.assert_series_equal(s, reread)\n\n def test_complex_append(self):\n df = DataFrame({'a': np.random.randn(100).astype(np.complex128),\n 'b': np.random.randn(100)})\n\n with ensure_clean_store(self.path) as store:\n store.append('df', df, data_columns=['b'])\n store.append('df', df)\n result = store.select('df')\n assert_frame_equal(pd.concat([df, df], 0), result)\n\n\nclass TestTimezones(Base):\n\n def _compare_with_tz(self, a, b):\n tm.assert_frame_equal(a, b)\n\n # compare the zones on each element\n for c in a.columns:\n for i in a.index:\n a_e = a.loc[i, c]\n b_e = b.loc[i, c]\n if not (a_e == b_e and a_e.tz == b_e.tz):\n raise AssertionError(\n \"invalid tz comparison [%s] [%s]\" % (a_e, b_e))\n\n def test_append_with_timezones_dateutil(self):\n\n from datetime import timedelta\n\n # use maybe_get_tz instead of dateutil.tz.gettz to handle the windows\n # filename issues.\n from pandas._libs.tslibs.timezones import maybe_get_tz\n gettz = lambda x: maybe_get_tz('dateutil/' + x)\n\n # as columns\n with ensure_clean_store(self.path) as store:\n\n _maybe_remove(store, 'df_tz')\n df = DataFrame(dict(A=[Timestamp('20130102 2:00:00', tz=gettz(\n 'US/Eastern')) + timedelta(hours=1) * i for i in range(5)]))\n\n store.append('df_tz', df, data_columns=['A'])\n result = store['df_tz']\n self._compare_with_tz(result, df)\n assert_frame_equal(result, df)\n\n # select with tz aware\n expected = df[df.A >= df.A[3]]\n result = store.select('df_tz', where='A>=df.A[3]')\n self._compare_with_tz(result, expected)\n\n # ensure we include dates in DST and STD time here.\n _maybe_remove(store, 'df_tz')\n df = DataFrame(dict(A=Timestamp('20130102',\n tz=gettz('US/Eastern')),\n B=Timestamp('20130603',\n tz=gettz('US/Eastern'))),\n index=range(5))\n store.append('df_tz', df)\n result = store['df_tz']\n self._compare_with_tz(result, df)\n assert_frame_equal(result, df)\n\n df = DataFrame(dict(A=Timestamp('20130102',\n tz=gettz('US/Eastern')),\n B=Timestamp('20130102', tz=gettz('EET'))),\n index=range(5))\n with pytest.raises(ValueError):\n store.append('df_tz', df)\n\n # this is ok\n _maybe_remove(store, 'df_tz')\n store.append('df_tz', df, data_columns=['A', 'B'])\n result = store['df_tz']\n self._compare_with_tz(result, df)\n assert_frame_equal(result, df)\n\n # can't append with diff timezone\n df = DataFrame(dict(A=Timestamp('20130102',\n tz=gettz('US/Eastern')),\n B=Timestamp('20130102', tz=gettz('CET'))),\n index=range(5))\n with pytest.raises(ValueError):\n store.append('df_tz', df)\n\n # as index\n with ensure_clean_store(self.path) as store:\n\n # GH 4098 example\n df = DataFrame(dict(A=Series(lrange(3), index=date_range(\n '2000-1-1', periods=3, freq='H', tz=gettz('US/Eastern')))))\n\n _maybe_remove(store, 'df')\n store.put('df', df)\n result = store.select('df')\n assert_frame_equal(result, df)\n\n _maybe_remove(store, 'df')\n store.append('df', df)\n result = store.select('df')\n assert_frame_equal(result, df)\n\n def test_append_with_timezones_pytz(self):\n\n from datetime import timedelta\n\n # as columns\n with ensure_clean_store(self.path) as store:\n\n _maybe_remove(store, 'df_tz')\n df = DataFrame(dict(A=[Timestamp('20130102 2:00:00',\n tz='US/Eastern') +\n timedelta(hours=1) * i\n for i in range(5)]))\n store.append('df_tz', df, data_columns=['A'])\n result = store['df_tz']\n self._compare_with_tz(result, df)\n assert_frame_equal(result, df)\n\n # select with tz aware\n self._compare_with_tz(store.select(\n 'df_tz', where='A>=df.A[3]'), df[df.A >= df.A[3]])\n\n _maybe_remove(store, 'df_tz')\n # ensure we include dates in DST and STD time here.\n df = DataFrame(dict(A=Timestamp('20130102', tz='US/Eastern'),\n B=Timestamp('20130603', tz='US/Eastern')),\n index=range(5))\n store.append('df_tz', df)\n result = store['df_tz']\n self._compare_with_tz(result, df)\n assert_frame_equal(result, df)\n\n df = DataFrame(dict(A=Timestamp('20130102', tz='US/Eastern'),\n B=Timestamp('20130102', tz='EET')),\n index=range(5))\n with pytest.raises(ValueError):\n store.append('df_tz', df)\n\n # this is ok\n _maybe_remove(store, 'df_tz')\n store.append('df_tz', df, data_columns=['A', 'B'])\n result = store['df_tz']\n self._compare_with_tz(result, df)\n assert_frame_equal(result, df)\n\n # can't append with diff timezone\n df = DataFrame(dict(A=Timestamp('20130102', tz='US/Eastern'),\n B=Timestamp('20130102', tz='CET')),\n index=range(5))\n with pytest.raises(ValueError):\n store.append('df_tz', df)\n\n # as index\n with ensure_clean_store(self.path) as store:\n\n # GH 4098 example\n df = DataFrame(dict(A=Series(lrange(3), index=date_range(\n '2000-1-1', periods=3, freq='H', tz='US/Eastern'))))\n\n _maybe_remove(store, 'df')\n store.put('df', df)\n result = store.select('df')\n assert_frame_equal(result, df)\n\n _maybe_remove(store, 'df')\n store.append('df', df)\n result = store.select('df')\n assert_frame_equal(result, df)\n\n def test_tseries_select_index_column(self):\n # GH7777\n # selecting a UTC datetimeindex column did\n # not preserve UTC tzinfo set before storing\n\n # check that no tz still works\n rng = date_range('1/1/2000', '1/30/2000')\n frame = DataFrame(np.random.randn(len(rng), 4), index=rng)\n\n with ensure_clean_store(self.path) as store:\n store.append('frame', frame)\n result = store.select_column('frame', 'index')\n assert rng.tz == DatetimeIndex(result.values).tz\n\n # check utc\n rng = date_range('1/1/2000', '1/30/2000', tz='UTC')\n frame = DataFrame(np.random.randn(len(rng), 4), index=rng)\n\n with ensure_clean_store(self.path) as store:\n store.append('frame', frame)\n result = store.select_column('frame', 'index')\n assert rng.tz == result.dt.tz\n\n # double check non-utc\n rng = date_range('1/1/2000', '1/30/2000', tz='US/Eastern')\n frame = DataFrame(np.random.randn(len(rng), 4), index=rng)\n\n with ensure_clean_store(self.path) as store:\n store.append('frame', frame)\n result = store.select_column('frame', 'index')\n assert rng.tz == result.dt.tz\n\n def test_timezones_fixed(self):\n with ensure_clean_store(self.path) as store:\n\n # index\n rng = date_range('1/1/2000', '1/30/2000', tz='US/Eastern')\n df = DataFrame(np.random.randn(len(rng), 4), index=rng)\n store['df'] = df\n result = store['df']\n assert_frame_equal(result, df)\n\n # as data\n # GH11411\n _maybe_remove(store, 'df')\n df = DataFrame({'A': rng,\n 'B': rng.tz_convert('UTC').tz_localize(None),\n 'C': rng.tz_convert('CET'),\n 'D': range(len(rng))}, index=rng)\n store['df'] = df\n result = store['df']\n assert_frame_equal(result, df)\n\n def test_fixed_offset_tz(self):\n rng = date_range('1/1/2000 00:00:00-07:00', '1/30/2000 00:00:00-07:00')\n frame = DataFrame(np.random.randn(len(rng), 4), index=rng)\n\n with ensure_clean_store(self.path) as store:\n store['frame'] = frame\n recons = store['frame']\n tm.assert_index_equal(recons.index, rng)\n assert rng.tz == recons.index.tz\n\n @td.skip_if_windows\n def test_store_timezone(self):\n # GH2852\n # issue storing datetime.date with a timezone as it resets when read\n # back in a new timezone\n\n # original method\n with ensure_clean_store(self.path) as store:\n\n today = datetime.date(2013, 9, 10)\n df = DataFrame([1, 2, 3], index=[today, today, today])\n store['obj1'] = df\n result = store['obj1']\n assert_frame_equal(result, df)\n\n # with tz setting\n with ensure_clean_store(self.path) as store:\n\n with set_timezone('EST5EDT'):\n today = datetime.date(2013, 9, 10)\n df = DataFrame([1, 2, 3], index=[today, today, today])\n store['obj1'] = df\n\n with set_timezone('CST6CDT'):\n result = store['obj1']\n\n assert_frame_equal(result, df)\n\n def test_legacy_datetimetz_object(self, datapath):\n # legacy from < 0.17.0\n # 8260\n expected = DataFrame(dict(A=Timestamp('20130102', tz='US/Eastern'),\n B=Timestamp('20130603', tz='CET')),\n index=range(5))\n with ensure_clean_store(\n datapath('io', 'data', 'legacy_hdf', 'datetimetz_object.h5'),\n mode='r') as store:\n result = store['df']\n assert_frame_equal(result, expected)\n\n def test_dst_transitions(self):\n # make sure we are not failing on transaitions\n with ensure_clean_store(self.path) as store:\n times = pd.date_range(\"2013-10-26 23:00\", \"2013-10-27 01:00\",\n tz=\"Europe/London\",\n freq=\"H\",\n ambiguous='infer')\n\n for i in [times, times + pd.Timedelta('10min')]:\n _maybe_remove(store, 'df')\n df = DataFrame({'A': range(len(i)), 'B': i}, index=i)\n store.append('df', df)\n result = store.select('df')\n assert_frame_equal(result, df)\n"
] | [
[
"pandas.util.testing.ensure_clean",
"pandas.Timestamp",
"pandas.MultiIndex.from_tuples",
"pandas.compat.numpy.np_array_datetime64_compat",
"pandas.DataFrame",
"pandas.util.testing.assert_frame_equal",
"pandas.compat.parse_date",
"pandas.DatetimeIndex",
"pandas.core.indexes.datetimes.date_range",
"pandas._libs.tslib.Timestamp",
"pandas.io.parsers._concat_date_cols",
"numpy.array",
"pandas.compat.lrange"
],
[
"pandas.io.pytables.read_hdf",
"pandas.util.testing.assert_class_equal",
"pandas.util.testing.ensure_clean",
"pandas.io.pytables.HDFStore",
"pandas.Series",
"pandas._libs.tslibs.timezones.maybe_get_tz",
"numpy.asarray",
"pandas.PeriodIndex",
"pandas.MultiIndex.from_tuples",
"pandas.DataFrame",
"pandas.util.testing.assert_frame_equal",
"pandas.util.testing.assert_index_equal",
"numpy.random.randn",
"pandas.isna",
"pandas.util.testing.makeDataFrame",
"numpy.random.randint",
"pandas.util.testing.makeTimeDataFrame",
"pandas.offsets.CustomBusinessDay",
"pandas.io.pytables.Term",
"numpy.arange",
"pandas.util.testing.assert_series_equal",
"pandas.DatetimeIndex",
"pandas.Index",
"pandas.util.testing.round_trip_pathlib",
"pandas.util.testing.makeDateIndex",
"pandas.Int64Index",
"pandas.util.testing.set_timezone",
"pandas.set_option",
"pandas.util.testing.reset_testing_mode",
"numpy.zeros",
"pandas.util.testing.rands_array",
"pandas.util.testing.round_trip_localpath",
"pandas.core.dtypes.common.is_categorical_dtype",
"pandas.concat",
"pandas.read_hdf",
"pandas.bdate_range",
"pandas.MultiIndex",
"pandas.compat.is_platform_windows",
"pandas.Categorical",
"pandas.util.testing.assert_almost_equal",
"pandas.Timedelta",
"pandas.compat.is_platform_little_endian",
"numpy.random.rand",
"pandas.HDFStore",
"pandas.date_range",
"numpy.random.binomial",
"numpy.array",
"pandas.timedelta_range",
"pandas.util.testing.makeTimeSeries",
"numpy.random.seed",
"pandas.util.testing.makeFloatSeries",
"pandas.util.testing.makeMixedDataFrame",
"pandas.util.testing.set_testing_mode",
"numpy.datetime64",
"pandas.util.testing.makeStringSeries",
"numpy.random.normal",
"pandas.util.testing.rands",
"numpy.float64",
"pandas.compat.lrange",
"pandas.Timestamp",
"pandas.io.formats.printing.pprint_thing",
"pandas.util.testing.makePeriodIndex",
"pandas.util._test_decorators.skip_if_no"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"0.19",
"0.24",
"0.20"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
sambuddinc/DLTK | [
"9511b0b9860118a9285c2fe730ea49dfe247cab6"
] | [
"data/IXI_HH/download_IXI_HH.py"
] | [
"# -*- coding: utf-8 -*-\n\"\"\"Download and extract the IXI Hammersmith Hospital 3T dataset\n\nurl: http://brain-development.org/ixi-dataset/\nref: IXI – Information eXtraction from Images (EPSRC GR/S21533/02)\n\n\"\"\"\nfrom __future__ import unicode_literals\nfrom __future__ import print_function\nfrom __future__ import division\nfrom __future__ import absolute_import\nfrom future.standard_library import install_aliases # py 2/3 compatability\ninstall_aliases()\n\nfrom urllib.request import FancyURLopener\n\nimport os.path\nimport tarfile\nimport pandas as pd\nimport glob\nimport SimpleITK as sitk\nimport numpy as np\n\nDOWNLOAD_IMAGES = True\nEXTRACT_IMAGES = True\nPROCESS_OTHER = True\nRESAMPLE_IMAGES = True\nCLEAN_UP = True\n\n\ndef resample_image(itk_image, out_spacing=(1.0, 1.0, 1.0), is_label=False):\n original_spacing = itk_image.GetSpacing()\n original_size = itk_image.GetSize()\n\n out_size = [int(np.round(original_size[0]*(original_spacing[0]/out_spacing[0]))),\n int(np.round(original_size[1]*(original_spacing[1]/out_spacing[1]))),\n int(np.round(original_size[2]*(original_spacing[2]/out_spacing[2])))]\n\n resample = sitk.ResampleImageFilter()\n resample.SetOutputSpacing(out_spacing)\n resample.SetSize(out_size)\n resample.SetOutputDirection(itk_image.GetDirection())\n resample.SetOutputOrigin(itk_image.GetOrigin())\n resample.SetTransform(sitk.Transform())\n resample.SetDefaultPixelValue(itk_image.GetPixelIDValue())\n\n if is_label:\n resample.SetInterpolator(sitk.sitkNearestNeighbor)\n else:\n resample.SetInterpolator(sitk.sitkBSpline)\n\n return resample.Execute(itk_image)\n\n\ndef reslice_image(itk_image, itk_ref, is_label=False):\n resample = sitk.ResampleImageFilter()\n resample.SetReferenceImage(itk_ref)\n\n if is_label:\n resample.SetInterpolator(sitk.sitkNearestNeighbor)\n else:\n resample.SetInterpolator(sitk.sitkBSpline)\n\n return resample.Execute(itk_image)\n\n\nurls = {}\nurls['t1'] = 'http://biomedic.doc.ic.ac.uk/brain-development/downloads/IXI/IXI-T1.tar'\nurls['t2'] = 'http://biomedic.doc.ic.ac.uk/brain-development/downloads/IXI/IXI-T2.tar'\nurls['pd'] = 'http://biomedic.doc.ic.ac.uk/brain-development/downloads/IXI/IXI-PD.tar'\nurls['mra'] = 'http://biomedic.doc.ic.ac.uk/brain-development/downloads/IXI/IXI-MRA.tar'\nurls['demographic'] = 'http://biomedic.doc.ic.ac.uk/brain-development/downloads/IXI/IXI.xls'\n\nfnames = {}\nfnames['t1'] = 't1.tar'\nfnames['t2'] = 't2.tar'\nfnames['pd'] = 'pd.tar'\nfnames['mra'] = 'mra.tar'\nfnames['demographic'] = 'demographic.xls'\n\n\nif DOWNLOAD_IMAGES:\n # Download all IXI data\n for key, url in urls.items():\n\n if not os.path.isfile(fnames[key]):\n print('Downloading {} from {}'.format(fnames[key], url))\n curr_file = FancyURLopener()\n curr_file.retrieve(url, fnames[key])\n else:\n print('File {} already exists. Skipping download.'.format(\n fnames[key]))\n\nif EXTRACT_IMAGES:\n # Extract the HH subset of IXI\n for key, fname in fnames.items():\n\n if (fname.endswith('.tar')):\n print('Extracting IXI HH data from {}.'.format(fnames[key]))\n output_dir = os.path.join('./orig/', key)\n\n if not os.path.exists(output_dir):\n os.makedirs(output_dir)\n\n t = tarfile.open(fname, 'r')\n for member in t.getmembers():\n if '-HH-' in member.name:\n t.extract(member, output_dir)\n\n\nif PROCESS_OTHER:\n # Process the demographic xls data and save to csv\n xls = pd.ExcelFile('demographic.xls')\n print(xls.sheet_names)\n\n df = xls.parse('Table')\n for index, row in df.iterrows():\n IXI_id = 'IXI{:03d}'.format(row['IXI_ID'])\n df.loc[index, 'IXI_ID'] = IXI_id\n\n t1_exists = len(glob.glob('./orig/t1/{}*.nii.gz'.format(IXI_id)))\n t2_exists = len(glob.glob('./orig/t2/{}*.nii.gz'.format(IXI_id)))\n pd_exists = len(glob.glob('./orig/pd/{}*.nii.gz'.format(IXI_id)))\n mra_exists = len(glob.glob('./orig/mra/{}*.nii.gz'.format(IXI_id)))\n\n # Check if each entry is complete and drop if not\n # if not t1_exists and not t2_exists and not pd_exists and not mra\n # exists:\n if not (t1_exists and t2_exists and pd_exists and mra_exists):\n df.drop(index, inplace=True)\n\n # Write to csv file\n df.to_csv('demographic_HH.csv', index=False)\n\nif RESAMPLE_IMAGES:\n # Resample the IXI HH T2 images to 1mm isotropic and reslice all\n # others to it\n df = pd.read_csv('demographic_HH.csv', dtype=object, keep_default_na=False,\n na_values=[]).as_matrix()\n\n for i in df:\n IXI_id = i[0]\n print('Resampling {}'.format(IXI_id))\n\n t1_fn = glob.glob('./orig/t1/{}*.nii.gz'.format(IXI_id))[0]\n t2_fn = glob.glob('./orig/t2/{}*.nii.gz'.format(IXI_id))[0]\n pd_fn = glob.glob('./orig/pd/{}*.nii.gz'.format(IXI_id))[0]\n mra_fn = glob.glob('./orig/mra/{}*.nii.gz'.format(IXI_id))[0]\n\n t1 = sitk.ReadImage(t1_fn)\n t2 = sitk.ReadImage(t2_fn)\n pd = sitk.ReadImage(pd_fn)\n mra = sitk.ReadImage(mra_fn)\n\n # Resample to 1mm isotropic resolution\n t2_1mm = resample_image(t2)\n t1_1mm = reslice_image(t1, t2_1mm)\n pd_1mm = reslice_image(pd, t2_1mm)\n mra_1mm = reslice_image(mra, t2_1mm)\n\n output_dir = os.path.join('./1mm/', IXI_id)\n if not os.path.exists(output_dir):\n os.makedirs(output_dir)\n\n print('T1: {} {}'.format(t1_1mm.GetSize(), t1_1mm.GetSpacing()))\n print('T2: {} {}'.format(t2_1mm.GetSize(), t2_1mm.GetSpacing()))\n print('PD: {} {}'.format(pd_1mm.GetSize(), pd_1mm.GetSpacing()))\n print('MRA: {} {}'.format(mra_1mm.GetSize(), mra_1mm.GetSpacing()))\n\n sitk.WriteImage(t1_1mm, os.path.join(output_dir, 'T1_1mm.nii.gz'))\n sitk.WriteImage(t2_1mm, os.path.join(output_dir, 'T2_1mm.nii.gz'))\n sitk.WriteImage(pd_1mm, os.path.join(output_dir, 'PD_1mm.nii.gz'))\n sitk.WriteImage(mra_1mm, os.path.join(output_dir, 'MRA_1mm.nii.gz'))\n\n # Resample to 2mm isotropic resolution\n t2_2mm = resample_image(t2, out_spacing=[2.0, 2.0, 2.0])\n t1_2mm = reslice_image(t1, t2_2mm)\n pd_2mm = reslice_image(pd, t2_2mm)\n mra_2mm = reslice_image(mra, t2_2mm)\n\n output_dir = os.path.join('./2mm/', IXI_id)\n if not os.path.exists(output_dir):\n os.makedirs(output_dir)\n\n print('T1: {} {}'.format(t2_2mm.GetSize(), t1_2mm.GetSpacing()))\n print('T2: {} {}'.format(t2_2mm.GetSize(), t2_2mm.GetSpacing()))\n print('PD: {} {}'.format(pd_2mm.GetSize(), pd_2mm.GetSpacing()))\n print('MRA: {} {}'.format(mra_2mm.GetSize(), mra_2mm.GetSpacing()))\n\n sitk.WriteImage(t1_2mm, os.path.join(output_dir, 'T1_2mm.nii.gz'))\n sitk.WriteImage(t2_2mm, os.path.join(output_dir, 'T2_2mm.nii.gz'))\n sitk.WriteImage(pd_2mm, os.path.join(output_dir, 'PD_2mm.nii.gz'))\n sitk.WriteImage(mra_2mm, os.path.join(output_dir, 'MRA_2mm.nii.gz'))\n\n\nif CLEAN_UP:\n # Remove the .tar files\n for key, fname in fnames.items():\n if (fname.endswith('.tar')):\n os.remove(fname)\n\n # Remove all data in original resolution\n os.system('rm -rf orig')\n"
] | [
[
"numpy.round"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
klarman-cell-observatory/scCloud.py | [
"5a04a2f22574db044d018656ac4705ec83840226",
"5a04a2f22574db044d018656ac4705ec83840226",
"5a04a2f22574db044d018656ac4705ec83840226"
] | [
"sccloud/misc/misc.py",
"sccloud/__init__.py",
"sccloud/tools/visualization.py"
] | [
"import numpy as np\nimport pandas as pd\nfrom typing import List\nfrom anndata import AnnData\n\nfrom sccloud.io import read_input\n\n\ndef search_genes(\n data: AnnData,\n gene_list: List[str],\n rec_key: str = \"de_res\",\n measure: str = \"percentage\",\n) -> pd.DataFrame:\n \"\"\"Extract and display gene expressions for each cluster from an `anndata` object.\n\n This function helps to see marker expressions in clusters via the interactive python environment.\n\n Parameters\n ----------\n\n data: ``anndata.AnnData``\n Annotated data matrix containing the expression matrix and differential expression results.\n\n gene_list: ``List[str]``\n A list of gene symbols.\n\n rec_key: ``str``, optional, default: ``\"de_res\"``\n Keyword of DE analysis result stored in ``data.varm``.\n\n measure : ``str``, optional, default: ``\"percentage\"``\n Can be either ``\"percentage\"`` or ``\"mean_logExpr\"``:\n * ``percentage`` shows the percentage of cells expressed the genes;\n * ``mean_logExpr`` shows the mean log expression.\n\n Returns\n -------\n ``pandas.DataFrame``\n A data frame containing marker expressions in each cluster.\n\n Examples\n --------\n >>> results = scc.search_genes(adata, ['CD3E', 'CD4', 'CD8'])\n \"\"\"\n\n columns = [x for x in data.varm[rec_key].dtype.names if x.startswith(measure + \":\")]\n df = pd.DataFrame(data=data.varm[rec_key][columns], index=data.var_names)\n return df.reindex(index=gene_list)\n\n\ndef search_de_genes(\n data: AnnData,\n gene_list: List[str],\n rec_key: str = \"de_res\",\n de_test: str = \"fisher\",\n de_alpha: float = 0.05,\n thre: float = 1.5,\n) -> pd.DataFrame:\n \"\"\"Extract and display differential expression analysis results of markers for each cluster.\n\n This function helps to see if markers are up or down regulated in each cluster via the interactive python environment: \n * ``++`` indicates up-regulated and fold change >= threshold;\n * ``+`` indicates up-regulated but fold change < threshold;\n * ``--`` indicates down-regulated and fold change <= 1 / threshold; \n * ``-`` indicates down-regulated but fold change > 1 / threshold;\n * ``?`` indicates not differentially expressed.\n\n Parameters\n ----------\n data: ``anndata.Anndata``\n Annotated data matrix containing the expression matrix and differential expression results.\n\n gene_list: ``List[str]``\n A list of gene symbols.\n\n rec_key: ``str``, optional, default: ``\"de_res\"``\n Keyword of DE analysis result stored in ``data.varm``.\n\n de_test : ``str``, optional, default: ``\"fisher\"``\n Differential expression test to look at, could be either ``t``, ``fisher`` or ``mwu``.\n\n de_alpha : ``float``, optional, default: ``0.05``\n False discovery rate.\n\n thre : ``float``, optional, default: ``1.5``\n Fold change threshold to determine if the marker is a strong DE (``++`` or ``--``) or weak DE (``+`` or ``-``).\n\n Returns\n -------\n ``pandas.DataFrame``\n A data frame containing marker differential expression results for each cluster.\n\n Examples\n --------\n >>> df = sccloud.misc.search_de_genes(adata, ['CD3E', 'CD4', 'CD8'], thre = 2.0)\n \"\"\"\n\n columns = [\n x for x in data.varm[rec_key].dtype.names if x.startswith(de_test + \"_qval:\")\n ]\n df_de = pd.DataFrame(data.varm[rec_key][columns], index=data.var_names)\n df_de = df_de.reindex(index=gene_list)\n\n columns = [\n x\n for x in data.varm[rec_key].dtype.names\n if (\n x.startswith(\"percentage_fold_change:\")\n if de_test == \"fisher\"\n else x.startswith(\"log_fold_change:\")\n )\n ]\n df_fc = pd.DataFrame(data.varm[rec_key][columns], index=data.var_names)\n df_fc = df_fc.reindex(index=gene_list)\n if de_test != \"fisher\":\n df_fc = np.exp(df_fc)\n\n results = np.zeros((len(gene_list), len(columns)), dtype=np.dtype(\"U4\"))\n results[:] = \"?\"\n results[np.isnan(df_de)] = \"NaN\"\n results[(df_de <= de_alpha).values & (df_fc > 1.0).values] = \"+\"\n results[(df_de <= de_alpha).values & (df_fc >= thre).values] = \"++\"\n results[(df_de <= de_alpha).values & (df_fc < 1.0).values] = \"-\"\n results[(df_de <= de_alpha).values & (df_fc <= 1.0 / thre).values] = \"--\"\n\n clusts = [x.rpartition(\":\")[2] for x in columns]\n df = pd.DataFrame(data=results, index=gene_list, columns=clusts)\n return df\n\n\ndef show_attributes(\n input_file: str,\n show_attributes: bool,\n show_gene_attributes: bool,\n show_values_for_attributes: str,\n) -> None:\n \"\"\" Show data attributes. For command line use.\n \"\"\"\n\n data = read_input(input_file, h5ad_mode=\"r\")\n if show_attributes:\n print(\n \"Available sample attributes in input dataset: {0}\".format(\n \", \".join(data.obs.columns.values)\n )\n )\n if show_gene_attributes:\n print(\n \"Available gene attributes in input dataset: {0}\".format(\n \", \".join(data.var.columns.values)\n )\n )\n if not show_values_for_attributes is None:\n for attr in show_values_for_attributes.split(\",\"):\n print(\n \"Available values for attribute {0}: {1}.\".format(\n attr, \", \".join(np.unique(data.obs[attr]))\n )\n )\n\n\ndef perform_oneway_anova(\n data: AnnData,\n glist: List[str],\n restriction_vec: List[str],\n group_str: str,\n fdr_alpha: float = 0.05,\n res_key: str = None,\n) -> pd.DataFrame:\n \"\"\"Perform one way ANOVA on a subset of cells (restricted by restriction_vec) grouped by group_str and control FDR at fdr_alpha.\n Parameters\n ----------\n\n data : `anndata` object\n An `anndata` object containing the expression matrix.\n glist : `list[str]`\n A list of gene symbols.\n restriction_vec : `list[str]`\n A vector of restrictions for selecting cells. Each restriction takes the format of attr:value,value,value\n group_str : `str`\n How to group selected cells for ANOVA analysis. If group_str is for pseudotime, it has two formats. 1) 'pseudotime:time:n', which divides cells by equal pseudotime invertal; 2) 'pseudotime:size:n' divides cells by equal number of cells.\n fdr_alpha : `float`, optional (default: 0.05)\n False discovery rate.\n res_key : `str`, optional (default: None)\n Store results into data using res_key, the grouping information is stored in obs and the results is stored in uns.\n\n Returns\n -------\n `pandas.DataFrame`\n Results for genes that pass FDR control.\n\n Examples\n --------\n >>> results = misc.perform_oneway_anova(data, ['CD3E', 'CD4', 'CD8'], [], 'pseudotime:size:10')\n \"\"\"\n\n from scipy.stats import f_oneway\n from statsmodels.stats.multitest import fdrcorrection as fdr\n\n selected = np.ones(data.shape[0], dtype=bool)\n for rest_str in restriction_vec:\n attr, value_str = rest_str.split(\":\")\n values = value_str.split(\",\")\n selected = selected & np.isin(data.obs[attr], values)\n\n gene_list = np.array(glist)\n gene_list = gene_list[np.isin(gene_list, data.var_names)]\n ngene = gene_list.size\n\n newdat = data[selected, :][:, gene_list].copy()\n newdat.X = newdat.X.toarray()\n\n group_values = group_str.split(\":\")\n group_names = []\n col_names = []\n\n ngr = 0\n group_idx = None\n\n if group_values[0] == \"pseudotime\":\n assert len(group_values) == 3\n div_by = group_values[1]\n ngr = int(group_values[2])\n\n group_idx = np.zeros((ngr, newdat.shape[0]), dtype=bool)\n pseudotimes = newdat.obs[\"pseudotime\"].values\n\n min_t = pseudotimes.min()\n max_t = pseudotimes.max()\n\n if div_by == \"time\":\n interval = (max_t - min_t) / ngr\n left = min_t - 1e-5\n for i in range(ngr):\n right = min_t + interval * (i + 1)\n name = \"({:.2f}, {:.2f}]\".format(left if left >= 0 else 0.0, right)\n group_names.append(name)\n group_idx[i] = (pseudotimes > left) & (pseudotimes <= right)\n left = right\n else:\n assert div_by == \"size\"\n ords = np.argsort(pseudotimes)\n quotient = ords.size // ngr\n residule = ords.size % ngr\n\n fr = 0\n for i in range(ngr):\n to = fr + quotient + (i < residule)\n name = \"[{:.2f}, {:.2f}]\".format(\n pseudotimes[ords[fr]], pseudotimes[ords[to - 1]]\n )\n group_names.append(name)\n group_idx[i][ords[fr:to]] = True\n fr = to\n\n else:\n assert len(group_values) == 2\n group_attr = group_values[0]\n tmp_str = group_values[1]\n groups_str = tmp_str.split(\";\")\n\n ngr = len(groups_str)\n group_idx = np.zeros((ngr, newdat.shape[0]), dtype=bool)\n\n for i, gstr in enumerate(groups_str):\n name, values = gstr.split(\"~\")\n group_names.append(name)\n group_idx[i] = np.isin(newdat.obs[group_attr], values.split(\",\"))\n\n for i in range(ngr):\n print(\"Group {} has {} cells.\".format(group_names[i], group_idx[i].sum()))\n\n np.warnings.filterwarnings(\"ignore\")\n stats = np.zeros((ngene, 3 + ngr * 2))\n for i in range(ngene):\n arr_list = []\n for j in range(ngr):\n arr = newdat.X[group_idx[j], i]\n stats[i, 3 + j * 2] = arr.mean()\n stats[i, 3 + j * 2 + 1] = (arr > 0).sum() * 100.0 / arr.size\n arr_list.append(arr)\n stats[i, 0], stats[i, 1] = f_oneway(*arr_list)\n if np.isnan(stats[i, 0]):\n stats[i, 0] = 0.0\n stats[i, 1] = 1.0\n passed, stats[:, 2] = fdr(stats[:, 1])\n\n cols = [\"fstat\", \"pval\", \"qval\"]\n for i in range(ngr):\n cols.extend([group_names[i] + \"_mean\", group_names[i] + \"_percent\"])\n raw_results = pd.DataFrame(stats, columns=cols, index=gene_list)\n\n results = raw_results[raw_results[\"qval\"] <= fdr_alpha]\n results = results.sort_values(\"qval\")\n\n if res_key is not None:\n data.uns[res_key] = raw_results\n data.obs[res_key] = \"background\"\n for i in range(ngr):\n idx = np.zeros(data.shape[0], dtype=bool)\n idx[selected] = group_idx[i]\n data.obs.loc[idx, res_key] = group_names[i]\n\n return results\n",
"try:\n get_ipython\nexcept NameError:\n import matplotlib\n\n matplotlib.use(\"Agg\")\n\nimport sys\nimport logging\nimport warnings\n\nlogger = logging.getLogger(\"sccloud\")\nlogger.setLevel(logging.DEBUG)\nch = logging.StreamHandler(sys.stdout)\nch.setLevel(logging.INFO)\nformatter = logging.Formatter(\"%(asctime)s - %(name)s - %(levelname)s - %(message)s\")\nch.setFormatter(formatter)\nlogger.addHandler(ch)\n\nwarnings.filterwarnings(\"ignore\", category=UserWarning, module='lightgbm')\n\nfrom .io import infer_file_format, read_input, write_output\nfrom .tools import (\n aggregate_matrices,\n qc_metrics,\n get_filter_stats,\n filter_data,\n log_norm,\n select_features,\n pca,\n highly_variable_features,\n set_group_attribute,\n correct_batch,\n neighbors,\n calc_kBET,\n calc_kSIM,\n diffmap,\n reduce_diffmap_to_3d,\n calc_pseudotime,\n louvain,\n leiden,\n spectral_louvain,\n spectral_leiden,\n tsne,\n fitsne,\n umap,\n fle,\n net_tsne,\n net_fitsne,\n net_umap,\n net_fle,\n de_analysis,\n markers,\n write_results_to_excel,\n find_markers,\n infer_path,\n)\nfrom .annotate_cluster import infer_cell_types, annotate, infer_cluster_names\nfrom .demuxEM import estimate_background_probs, demultiplex\nfrom .misc import search_genes, search_de_genes\n\nfrom scplot import (\n violin,\n heatmap,\n scatter,\n line,\n dotplot,\n scatter_matrix,\n embedding,\n composition_plot,\n variable_feature_plot,\n volcano,\n)\n\nfrom ._version import get_versions\n\n__version__ = get_versions()[\"version\"]\ndel get_versions\n",
"import time\nimport numpy as np\nimport scipy\nimport logging\nimport umap as umap_module\nimport forceatlas2 as fa2\nimport uuid\n\nfrom anndata import AnnData\nfrom joblib import effective_n_jobs\ntry:\n from MulticoreTSNE import MulticoreTSNE as TSNE\nexcept ImportError:\n print(\"Need Multicore-TSNE!\")\n\nfrom sccloud.tools import (\n update_rep,\n X_from_rep,\n W_from_rep,\n knn_is_cached,\n neighbors,\n net_train_and_predict,\n calculate_nearest_neighbors,\n calculate_affinity_matrix,\n construct_graph,\n)\n\nlogger = logging.getLogger(\"sccloud\")\n\n\ndef calc_tsne(\n X,\n n_jobs,\n n_components,\n perplexity,\n early_exaggeration,\n learning_rate,\n random_state,\n init=\"random\",\n n_iter=1000,\n n_iter_early_exag=250,\n):\n \"\"\"\n TODO: Typing\n \"\"\"\n tsne = TSNE(\n n_jobs=n_jobs,\n n_components=n_components,\n perplexity=perplexity,\n early_exaggeration=early_exaggeration,\n learning_rate=learning_rate,\n random_state=random_state,\n verbose=1,\n init=init,\n n_iter=n_iter,\n n_iter_early_exag=n_iter_early_exag,\n )\n X_tsne = tsne.fit_transform(X)\n logger.info(\"Final error = {}\".format(tsne.kl_divergence_))\n return X_tsne\n\n\ndef calc_fitsne(\n X,\n nthreads,\n no_dims,\n perplexity,\n early_exag_coeff,\n learning_rate,\n rand_seed,\n initialization=None,\n max_iter=1000,\n stop_early_exag_iter=250,\n mom_switch_iter=250,\n):\n \"\"\"\n TODO: Typing\n \"\"\"\n # FItSNE will change X content\n\n # Check if fftw3 is installed.\n import ctypes.util\n\n fftw3_loc = ctypes.util.find_library(\"fftw3\")\n if fftw3_loc is None:\n raise Exception(\"Please install 'fftw3' first to use the FIt-SNE feature!\")\n\n from fitsne import FItSNE\n\n return FItSNE(\n X.astype(\"float64\"),\n nthreads=nthreads,\n no_dims=no_dims,\n perplexity=perplexity,\n early_exag_coeff=early_exag_coeff,\n learning_rate=learning_rate,\n rand_seed=rand_seed,\n initialization=initialization,\n max_iter=max_iter,\n stop_early_exag_iter=stop_early_exag_iter,\n mom_switch_iter=mom_switch_iter,\n )\n\n\n# Running umap using our own kNN indices\ndef calc_umap(\n X,\n n_components,\n n_neighbors,\n min_dist,\n spread,\n random_state,\n init=\"spectral\",\n n_epochs=None,\n learning_rate=1.0,\n knn_indices=None,\n knn_dists=None,\n):\n \"\"\"\n TODO: Typing\n \"\"\"\n umap_obj = umap_module.UMAP(\n n_components=n_components,\n n_neighbors=n_neighbors,\n min_dist=min_dist,\n spread=spread,\n random_state=random_state,\n init=init,\n n_epochs=n_epochs,\n learning_rate=learning_rate,\n verbose=True,\n )\n\n embedding = None\n if X.shape[0] < 4096 or knn_indices is None:\n embedding = umap_obj.fit_transform(X)\n logger.info(\"using umap kNN graph {}\".format(X.shape[0]))\n else:\n assert knn_dists is not None\n # preprocessing codes adopted from UMAP's umap_.py fit function in order to use our own kNN graphs\n from sklearn.utils import check_random_state, check_array\n\n X = check_array(X, dtype=np.float32, accept_sparse=\"csr\")\n umap_obj._raw_data = X\n if umap_obj.a is None or umap_obj.b is None:\n umap_obj._a, umap_obj._b = umap_module.umap_.find_ab_params(\n umap_obj.spread, umap_obj.min_dist\n )\n else:\n umap_obj._a = umap_obj.a\n umap_obj._b = umap_obj.b\n umap_obj._metric_kwds = (\n umap_obj.metric_kwds if umap_obj.metric_kwds is not None else {}\n )\n umap_obj._target_metric_kwds = {}\n _init = (\n check_array(umap_obj.init, dtype=np.float32, accept_sparse=False)\n if isinstance(umap_obj.init, np.ndarray)\n else umap_obj.init\n )\n umap_obj._initial_alpha = umap_obj.learning_rate\n umap_obj._validate_parameters()\n\n if umap_obj.verbose:\n logger.info(str(umap_obj))\n\n if scipy.sparse.isspmatrix_csr(X):\n if not X.has_sorted_indices:\n X.sort_indices()\n umap_obj._sparse_data = True\n else:\n umap_obj._sparse_data = False\n\n _random_state = check_random_state(umap_obj.random_state)\n\n if umap_obj.verbose:\n logger.info(\"Construct fuzzy simplicial set\")\n\n umap_obj._small_data = False\n umap_obj.graph_ = umap_module.umap_.fuzzy_simplicial_set(\n X,\n umap_obj.n_neighbors,\n _random_state,\n umap_obj.metric,\n umap_obj._metric_kwds,\n knn_indices,\n knn_dists,\n umap_obj.angular_rp_forest,\n umap_obj.set_op_mix_ratio,\n umap_obj.local_connectivity,\n umap_obj.verbose,\n )\n\n _n_epochs = umap_obj.n_epochs if umap_obj.n_epochs is not None else 0\n if umap_obj.verbose:\n logger.info(\"Construct embedding\")\n embedding = umap_module.umap_.simplicial_set_embedding(\n X,\n umap_obj.graph_,\n umap_obj.n_components,\n umap_obj._initial_alpha,\n umap_obj._a,\n umap_obj._b,\n umap_obj.repulsion_strength,\n umap_obj.negative_sample_rate,\n _n_epochs,\n _init,\n _random_state,\n umap_obj.metric,\n umap_obj._metric_kwds,\n umap_obj.verbose,\n )\n\n return embedding\n\n\ndef calc_force_directed_layout(\n W,\n file_name,\n n_jobs,\n target_change_per_node,\n target_steps,\n is3d,\n memory,\n random_state,\n init=None,\n):\n \"\"\"\n TODO: Typing\n \"\"\"\n G = construct_graph(W)\n return fa2.forceatlas2(\n file_name,\n graph=G,\n n_jobs=n_jobs,\n target_change_per_node=target_change_per_node,\n target_steps=target_steps,\n is3d=is3d,\n memory=memory,\n random_state=random_state,\n init=init,\n )\n\n\ndef tsne(\n data: AnnData,\n rep: str = \"pca\",\n n_jobs: int = -1,\n n_components: int = 2,\n perplexity: float = 30,\n early_exaggeration: int = 12,\n learning_rate: float = 1000,\n random_state: int = 0,\n out_basis: str = \"tsne\",\n) -> None:\n \"\"\"Calculate tSNE embedding using MulticoreTSNE_ package.\n\n .. _MulticoreTSNE: https://github.com/DmitryUlyanov/Multicore-TSNE\n\n Parameters\n ----------\n data: ``anndata.AnnData``\n Annotated data matrix with rows for cells and columns for genes.\n\n rep: ``str``, optional, default: ``\"pca\"``\n Representation of data used for the calculation. By default, use PCA coordinates. If ``None``, use the count matrix ``data.X``.\n\n n_jobs: ``int``, optional, default: ``-1``\n Number of threads to use. If ``-1``, use all available threads.\n\n n_components: ``int``, optional, default: ``2``\n Dimension of calculated tSNE coordinates. By default, generate 2-dimensional data for 2D visualization.\n\n perplexity: ``float``, optional, default: ``30``\n The perplexity is related to the number of nearest neighbors used in other manifold learning algorithms. Larger datasets usually require a larger perplexity.\n\n early_exaggeration: ``int``, optional, default: ``12``\n Controls how tight natural clusters in the original space are in the embedded space, and how much space will be between them.\n\n learning_rate: ``float``, optional, default: ``1000``\n The learning rate can be a critical parameter, which should be between 100 and 1000.\n\n random_state: ``int``, optional, default: ``0``\n Random seed set for reproducing results.\n\n out_basis: ``str``, optional, default: ``\"tsne\"``\n Key name for calculated tSNE coordinates to store.\n\n Returns\n -------\n ``None``\n\n Update ``data.obsm``:\n * ``data.obsm['X_' + out_basis]``: tSNE coordinates of the data.\n\n Examples\n --------\n >>> scc.tsne(adata)\n \"\"\"\n start = time.time()\n rep = update_rep(rep)\n n_jobs = effective_n_jobs(n_jobs)\n\n data.obsm[\"X_\" + out_basis] = calc_tsne(\n X_from_rep(data, rep),\n n_jobs,\n n_components,\n perplexity,\n early_exaggeration,\n learning_rate,\n random_state,\n )\n\n end = time.time()\n logger.info(\"t-SNE is calculated. Time spent = {:.2f}s.\".format(end - start))\n\n\ndef fitsne(\n data: AnnData,\n rep: str = \"pca\",\n n_jobs: int = -1,\n n_components: int = 2,\n perplexity: float = 30,\n early_exaggeration: int = 12,\n learning_rate: float = 1000,\n random_state: int = 0,\n out_basis: str = \"fitsne\",\n) -> None:\n \"\"\"Calculate FIt-SNE embedding using fitsne_ package.\n\n .. _fitsne: https://github.com/KlugerLab/FIt-SNE\n\n Parameters\n ----------\n data: ``anndata.AnnData``\n Annotated data matrix with rows for cells and columns for genes.\n\n rep: ``str``, optional, default: ``\"pca\"``\n Representation of data used for the calculation. By default, use PCA coordinates. If ``None``, use the count matrix ``data.X``.\n\n n_jobs: ``int``, optional, default: ``-1``\n Number of threads to use. If ``-1``, use all available threads.\n\n n_components: ``int``, optional, default: ``2``\n Dimension of calculated FI-tSNE coordinates. By default, generate 2-dimensional data for 2D visualization.\n\n perplexity: ``float``, optional, default: ``30``\n The perplexity is related to the number of nearest neighbors used in other manifold learning algorithms. Larger datasets usually require a larger perplexity.\n\n early_exaggeration: ``int``, optional, default: ``12``\n Controls how tight natural clusters in the original space are in the embedded space, and how much space will be between them.\n\n learning_rate: ``float``, optional, default: ``1000``\n The learning rate can be a critical parameter, which should be between 100 and 1000.\n\n random_state: ``int``, optional, default: ``0``\n Random seed set for reproducing results.\n\n out_basis: ``str``, optional, default: ``\"fitsne\"``\n Key name for calculated FI-tSNE coordinates to store.\n\n Returns\n -------\n ``None``\n\n Update ``data.obsm``:\n * ``data.obsm['X_' + out_basis]``: FI-tSNE coordinates of the data.\n\n Examples\n --------\n >>> scc.fitsne(adata)\n \"\"\"\n start = time.time()\n\n rep = update_rep(rep)\n n_jobs = effective_n_jobs(n_jobs)\n\n data.obsm[\"X_\" + out_basis] = calc_fitsne(\n X_from_rep(data, rep),\n n_jobs,\n n_components,\n perplexity,\n early_exaggeration,\n learning_rate,\n random_state,\n )\n\n end = time.time()\n logger.info(\"FIt-SNE is calculated. Time spent = {:.2f}s.\".format(end - start))\n\n\ndef umap(\n data: AnnData,\n rep: str = \"pca\",\n n_components: int = 2,\n n_neighbors: int = 15,\n min_dist: float = 0.5,\n spread: float = 1.0,\n random_state: int = 0,\n out_basis: str = \"umap\",\n) -> None:\n \"\"\"Calculate UMAP embedding using umap-learn_ package.\n\n .. _umap-learn: https://github.com/lmcinnes/umap\n\n Parameters\n ----------\n data: ``anndata.AnnData``\n Annotated data matrix with rows for cells and columns for genes.\n\n rep: ``str``, optional, default: ``\"pca\"``\n Representation of data used for the calculation. By default, use PCA coordinates. If ``None``, use the count matrix ``data.X``.\n\n n_components: ``int``, optional, default: ``2``\n Dimension of calculated UMAP coordinates. By default, generate 2-dimensional data for 2D visualization.\n\n n_neighbors: ``int``, optional, default: ``15``\n Number of nearest neighbors considered during the computation.\n\n min_dist: ``float``, optional, default: ``0.5``\n The effective minimum distance between embedded data points.\n\n spread: ``float``, optional, default: ``1.0``\n The effective scale of embedded data points.\n\n random_state: ``int``, optional, default: ``0``\n Random seed set for reproducing results.\n\n out_basis: ``str``, optional, default: ``\"umap\"``\n Key name for calculated UMAP coordinates to store.\n\n Returns\n -------\n ``None``\n\n Update ``data.obsm``:\n * ``data.obsm['X_' + out_basis]``: UMAP coordinates of the data.\n\n Examples\n --------\n >>> scc.umap(adata)\n \"\"\"\n start = time.time()\n\n rep = update_rep(rep)\n indices_key = rep + \"_knn_indices\"\n distances_key = rep + \"_knn_distances\"\n\n X = X_from_rep(data, rep)\n if not knn_is_cached(data, indices_key, distances_key, n_neighbors):\n raise ValueError(\"Please run neighbors first!\")\n\n knn_indices = np.insert(\n data.uns[indices_key][:, 0 : n_neighbors - 1], 0, range(data.shape[0]), axis=1\n )\n knn_dists = np.insert(\n data.uns[distances_key][:, 0 : n_neighbors - 1], 0, 0.0, axis=1\n )\n data.obsm[\"X_\" + out_basis] = calc_umap(\n X,\n n_components,\n n_neighbors,\n min_dist,\n spread,\n random_state,\n knn_indices=knn_indices,\n knn_dists=knn_dists,\n )\n\n end = time.time()\n logger.info(\"UMAP is calculated. Time spent = {:.2f}s.\".format(end - start))\n\n\ndef fle(\n data: AnnData,\n file_name: str = None,\n n_jobs: int = -1,\n rep: str = \"diffmap\",\n K: int = 50,\n full_speed: bool = False,\n target_change_per_node: float = 2.0,\n target_steps: int = 5000,\n is3d: bool = False,\n memory: int = 8,\n random_state: int = 0,\n out_basis: str = \"fle\",\n) -> None:\n \"\"\"Construct the Force-directed (FLE) graph using ForceAtlas2_ implementation, with Python wrapper as forceatlas2-python_.\n\n .. _ForceAtlas2: https://github.com/klarman-cell-observatory/forceatlas2\n .. _forceatlas2-python: https://github.com/klarman-cell-observatory/forceatlas2-python\n\n Parameters\n ----------\n data: ``anndata.AnnData``\n Annotated data matrix with rows for cells and columns for genes.\n\n file_name: ``str``, optional, default: ``None``\n Temporary file to store the coordinates as the input to forceatlas2. If ``None``, use ``tempfile.mkstemp`` to generate file name.\n\n n_jobs: ``int``, optional, default: ``-1``\n Number of threads to use. If ``-1``, use all available threads.\n\n rep: ``str``, optional, default: ``\"diffmap\"``\n Representation of data used for the calculation. By default, use Diffusion Map coordinates. If ``None``, use the count matrix ``data.X``.\n\n K: ``int``, optional, default: ``50``\n Number of nearest neighbors to be considered during the computation.\n\n full_speed: ``bool``, optional, default: ``False``\n * If ``True``, use multiple threads in constructing ``hnsw`` index. However, the kNN results are not reproducible.\n * Otherwise, use only one thread to make sure results are reproducible.\n\n target_change_per_node: ``float``, optional, default: ``2.0``\n Target change per node to stop ForceAtlas2.\n\n target_steps: ``int``, optional, default: ``5000``\n Maximum number of iterations before stopping the ForceAtlas2 algorithm.\n\n is3d: ``bool``, optional, default: ``False``\n If ``True``, calculate 3D force-directed layout.\n\n memory: ``int``, optional, default: ``8``\n Memory size in GB for the Java FA2 component. By default, use 8GB memory.\n\n random_state: ``int``, optional, default: ``0``\n Random seed set for reproducing results.\n\n out_basis: ``str``, optional, default: ``\"fle\"``\n Key name for calculated FLE coordinates to store.\n\n Returns\n -------\n ``None``\n\n Update ``data.obsm``:\n * ``data.obsm['X_' + out_basis]``: FLE coordinates of the data.\n\n Examples\n --------\n >>> scc.fle(adata)\n \"\"\"\n start = time.time()\n\n if file_name is None:\n import tempfile\n\n _, file_name = tempfile.mkstemp()\n\n n_jobs = effective_n_jobs(n_jobs)\n rep = update_rep(rep)\n\n if (\"W_\" + rep) not in data.uns:\n neighbors(\n data,\n K=K,\n rep=rep,\n n_jobs=n_jobs,\n random_state=random_state,\n full_speed=full_speed,\n )\n\n data.obsm[\"X_\" + out_basis] = calc_force_directed_layout(\n W_from_rep(data, rep),\n file_name,\n n_jobs,\n target_change_per_node,\n target_steps,\n is3d,\n memory,\n random_state,\n )\n\n end = time.time()\n logger.info(\n \"Force-directed layout is calculated. Time spent = {:.2f}s.\".format(end - start)\n )\n\n\ndef select_cells(distances, frac, K=25, alpha=1.0, random_state=0):\n \"\"\"\n TODO: documentation (not user API)\n \"\"\"\n\n start_time = time.time()\n\n nsample = distances.shape[0]\n\n if K > distances.shape[1]:\n logger.info(\n \"Warning: in select_cells, K = {} > the number of calculated nearest neighbors!\\nSet K to {}\".format(\n K, distances.shape[1]\n )\n )\n K = distances.shape[1]\n\n probs = np.zeros(nsample)\n if alpha == 0.0:\n probs[:] = 1.0 # uniform\n elif alpha == 1.0:\n probs[:] = distances[:, K - 1]\n else:\n probs[:] = distances[:, K - 1] ** alpha\n probs /= probs.sum()\n\n np.random.seed(random_state)\n selected = np.zeros(nsample, dtype=bool)\n selected[\n np.random.choice(nsample, size=int(nsample * frac), replace=False, p=probs)\n ] = True\n\n end_time = time.time()\n logger.info(\n \"select_cells finished. Time spent = {:.2}s.\".format(end_time - start_time)\n )\n\n return selected\n\n\ndef net_tsne(\n data: AnnData,\n rep: str = \"pca\",\n n_jobs: int = -1,\n n_components: int = 2,\n perplexity: float = 30,\n early_exaggeration: int = 12,\n learning_rate: float = 1000,\n random_state: int = 0,\n select_frac: float = 0.1,\n select_K: int = 25,\n select_alpha: float = 1.0,\n net_alpha: float = 0.1,\n polish_learning_frac: float = 0.33,\n polish_n_iter: int = 150,\n out_basis: str = \"net_tsne\",\n) -> None:\n \"\"\"Calculate approximated tSNE embedding using Deep Learning model to improve the speed.\n\n In specific, the deep model used is MLPRegressor_, the *scikit-learn* implementation of Multi-layer Perceptron regressor.\n\n .. _MLPRegressor: https://scikit-learn.org/stable/modules/generated/sklearn.neural_network.MLPRegressor.html\n\n Parameters\n ----------\n data: ``anndata.AnnData``\n Annotated data matrix with rows for cells (``n_obs``) and columns for genes (``n_feature``).\n\n rep: ``str``, optional, default: ``\"pca\"``\n Representation of data used for the calculation. By default, use PCA coordinates. If ``None``, use the count matrix ``data.X``.\n\n n_jobs: ``int``, optional, default: ``-1``\n Number of threads to use. If ``-1``, use all available threads.\n\n n_components: ``int``, optional, default: ``2``\n Dimension of calculated tSNE coordinates. By default, generate 2-dimensional data for 2D visualization.\n\n perplexity: ``float``, optional, default: ``30``\n The perplexity is related to the number of nearest neighbors used in other manifold learning algorithms. Larger datasets usually require a larger perplexity.\n\n early_exaggeration: ``int``, optional, default: ``12``\n Controls how tight natural clusters in the original space are in the embedded space, and how much space will be between them.\n\n learning_rate: ``float``, optional, default: ``1000``\n The learning rate can be a critical parameter, which should be between 100 and 1000.\n\n random_state: ``int``, optional, default: ``0``\n Random seed set for reproducing results.\n\n select_frac: ``float``, optional, default: ``0.1``\n Down sampling fraction on the cells.\n\n select_K: ``int``, optional, default: ``25``\n Number of neighbors to be used to estimate local density for each data point for down sampling.\n\n select_alpha: ``float``, optional, default: ``1.0``\n Weight the down sample to be proportional to ``radius ** select_alpha``.\n\n net_alpha: ``float``, optional, default: ``0.1``\n L2 penalty (regularization term) parameter of the deep regressor.\n\n polish_learning_frac: ``float``, optional, default: ``0.33``\n After running the deep regressor to predict new coordinates, use ``polish_learning_frac`` * ``n_obs`` as the learning rate to polish the coordinates.\n\n polish_n_iter: ``int``, optional, default: ``150``\n Number of iterations for polishing tSNE run.\n\n out_basis: ``str``, optional, default: ``\"net_tsne\"``\n Key name for the approximated tSNE coordinates calculated.\n\n Returns\n -------\n ``None``\n\n Update ``data.obsm``:\n * ``data.obsm['X_' + out_basis]``: Net tSNE coordinates of the data.\n\n Update ``data.obs``:\n * ``data.obs['ds_selected']``: Boolean array to indicate which cells are selected during the down sampling phase.\n\n Examples\n --------\n >>> scc.net_tsne(adata)\n \"\"\"\n start = time.time()\n\n rep = update_rep(rep)\n indices_key = rep + \"_knn_indices\"\n distances_key = rep + \"_knn_distances\"\n\n if not knn_is_cached(data, indices_key, distances_key, select_K):\n raise ValueError(\"Please run neighbors first!\")\n\n n_jobs = effective_n_jobs(n_jobs)\n\n selected = select_cells(\n data.uns[distances_key],\n select_frac,\n K=select_K,\n alpha=select_alpha,\n random_state=random_state,\n )\n\n X_full = X_from_rep(data, rep)\n X = X_full[selected, :]\n X_tsne = calc_tsne(\n X,\n n_jobs,\n n_components,\n perplexity,\n early_exaggeration,\n learning_rate,\n random_state,\n )\n\n data.uns[\"X_\" + out_basis + \"_small\"] = X_tsne\n data.obs[\"ds_selected\"] = selected\n\n Y_init = np.zeros((data.shape[0], 2), dtype=np.float64)\n Y_init[selected, :] = X_tsne\n Y_init[~selected, :] = net_train_and_predict(\n X, X_tsne, X_full[~selected, :], net_alpha, random_state, verbose=True\n )\n\n data.obsm[\"X_\" + out_basis + \"_pred\"] = Y_init\n\n polish_learning_rate = polish_learning_frac * data.shape[0]\n data.obsm[\"X_\" + out_basis] = calc_tsne(\n X_full,\n n_jobs,\n n_components,\n perplexity,\n early_exaggeration,\n polish_learning_rate,\n random_state,\n init=Y_init,\n n_iter=polish_n_iter,\n n_iter_early_exag=0,\n )\n\n end = time.time()\n logger.info(\"Net tSNE is calculated. Time spent = {:.2f}s.\".format(end - start))\n\n\ndef net_fitsne(\n data: AnnData,\n rep: str = \"pca\",\n n_jobs: int = -1,\n n_components: int = 2,\n perplexity: float = 30,\n early_exaggeration: int = 12,\n learning_rate: float = 1000,\n random_state: int = 0,\n select_frac: float = 0.1,\n select_K: int = 25,\n select_alpha: float = 1.0,\n net_alpha: float = 0.1,\n polish_learning_frac: float = 0.5,\n polish_n_iter: int = 150,\n out_basis: \"str\" = \"net_fitsne\",\n) -> None:\n \"\"\"Calculate approximated FI-tSNE embedding using Deep Learning model to improve the speed.\n\n In specific, the deep model used is MLPRegressor_, the *scikit-learn* implementation of Multi-layer Perceptron regressor.\n\n .. _MLPRegressor: https://scikit-learn.org/stable/modules/generated/sklearn.neural_network.MLPRegressor.html\n\n Parameters\n ----------\n data: ``anndata.AnnData``\n Annotated data matrix with rows for cells (``n_obs``) and columns for genes (``n_feature``).\n\n rep: ``str``, optional, default: ``\"pca\"``\n Representation of data used for the calculation. By default, use PCA coordinates. If ``None``, use the count matrix ``data.X``.\n\n n_jobs: ``int``, optional, default: ``-1``\n Number of threads to use. If ``-1``, use all available threads.\n\n n_components: ``int``, optional, default: ``2``\n Dimension of calculated tSNE coordinates. By default, generate 2-dimensional data for 2D visualization.\n\n perplexity: ``float``, optional, default: ``30``\n The perplexity is related to the number of nearest neighbors used in other manifold learning algorithms. Larger datasets usually require a larger perplexity.\n\n early_exaggeration: ``int``, optional, default: ``12``\n Controls how tight natural clusters in the original space are in the embedded space, and how much space will be between them.\n\n learning_rate: ``float``, optional, default: ``1000``\n The learning rate can be a critical parameter, which should be between 100 and 1000.\n\n random_state: ``int``, optional, default: ``0``\n Random seed set for reproducing results.\n\n select_frac: ``float``, optional, default: ``0.1``\n Down sampling fraction on the cells.\n\n select_K: ``int``, optional, default: ``25``\n Number of neighbors to be used to estimate local density for each data point for down sampling.\n\n select_alpha: ``float``, optional, default: ``1.0``\n Weight the down sample to be proportional to ``radius ** select_alpha``.\n\n net_alpha: ``float``, optional, default: ``0.1``\n L2 penalty (regularization term) parameter of the deep regressor.\n\n polish_learning_frac: ``float``, optional, default: ``0.5``\n After running the deep regressor to predict new coordinates, use ``polish_learning_frac`` * ``n_obs`` as the learning rate to polish the coordinates.\n\n polish_n_iter: ``int``, optional, default: ``150``\n Number of iterations for polishing FI-tSNE run.\n\n out_basis: ``str``, optional, default: ``\"net_fitsne\"``\n Key name for the approximated FI-tSNE coordinates calculated.\n\n Returns\n -------\n ``None``\n\n Update ``data.obsm``:\n * ``data.obsm['X_' + out_basis]``: Net FI-tSNE coordinates of the data.\n\n Update ``data.obs``:\n * ``data.obs['ds_selected']``: Boolean array to indicate which cells are selected during the down sampling phase.\n\n Examples\n --------\n >>> scc.net_fitsne(adata)\n \"\"\"\n start = time.time()\n\n rep = update_rep(rep)\n indices_key = rep + \"_knn_indices\"\n distances_key = rep + \"_knn_distances\"\n\n if not knn_is_cached(data, indices_key, distances_key, select_K):\n raise ValueError(\"Please run neighbors first!\")\n\n n_jobs = effective_n_jobs(n_jobs)\n\n selected = select_cells(\n data.uns[distances_key],\n select_frac,\n K=select_K,\n alpha=select_alpha,\n random_state=random_state,\n )\n X_full = X_from_rep(data, rep)\n X = X_full[selected, :]\n X_fitsne = calc_fitsne(\n X,\n n_jobs,\n n_components,\n perplexity,\n early_exaggeration,\n learning_rate,\n random_state,\n )\n\n data.uns[\"X_\" + out_basis + \"_small\"] = X_fitsne\n data.obs[\"ds_selected\"] = selected\n\n Y_init = np.zeros((data.shape[0], 2), dtype=np.float64)\n Y_init[selected, :] = X_fitsne\n Y_init[~selected, :] = net_train_and_predict(\n X, X_fitsne, X_full[~selected, :], net_alpha, random_state, verbose=True\n )\n\n data.obsm[\"X_\" + out_basis + \"_pred\"] = Y_init\n\n polish_learning_rate = polish_learning_frac * data.shape[0]\n data.obsm[\"X_\" + out_basis] = calc_fitsne(\n X_full,\n n_jobs,\n n_components,\n perplexity,\n early_exaggeration,\n polish_learning_rate,\n random_state,\n initialization=Y_init,\n max_iter=polish_n_iter,\n stop_early_exag_iter=0,\n mom_switch_iter=0,\n )\n\n end = time.time()\n logger.info(\"Net FItSNE is calculated. Time spent = {:.2f}s.\".format(end - start))\n\n\ndef net_umap(\n data: AnnData,\n rep: str = \"pca\",\n n_jobs: int = -1,\n n_components: int = 2,\n n_neighbors: int = 15,\n min_dist: float = 0.5,\n spread: float = 1.0,\n random_state: int = 0,\n select_frac: float = 0.1,\n select_K: int = 25,\n select_alpha: float = 1.0,\n full_speed: bool = False,\n net_alpha: float = 0.1,\n polish_learning_rate: float = 10.0,\n polish_n_epochs: int = 30,\n out_basis: str = \"net_umap\",\n) -> None:\n \"\"\"Calculate approximated UMAP embedding using Deep Learning model to improve the speed.\n\n In specific, the deep model used is MLPRegressor_, the *scikit-learn* implementation of Multi-layer Perceptron regressor.\n\n .. _MLPRegressor: https://scikit-learn.org/stable/modules/generated/sklearn.neural_network.MLPRegressor.html\n\n Parameters\n ----------\n data: ``anndata.AnnData``\n Annotated data matrix with rows for cells and columns for genes.\n\n rep: ``str``, optional, default: ``\"pca\"``\n Representation of data used for the calculation. By default, use PCA coordinates. If ``None``, use the count matrix ``data.X``.\n\n n_components: ``int``, optional, default: ``2``\n Dimension of calculated UMAP coordinates. By default, generate 2-dimensional data for 2D visualization.\n\n n_neighbors: ``int``, optional, default: ``15``\n Number of nearest neighbors considered during the computation.\n\n min_dist: ``float``, optional, default: ``0.5``\n The effective minimum distance between embedded data points.\n\n spread: ``float``, optional, default: ``1.0``\n The effective scale of embedded data points.\n\n random_state: ``int``, optional, default: ``0``\n Random seed set for reproducing results.\n\n select_frac: ``float``, optional, default: ``0.1``\n Down sampling fraction on the cells.\n\n select_K: ``int``, optional, default: ``25``\n Number of neighbors to be used to estimate local density for each data point for down sampling.\n\n select_alpha: ``float``, optional, default: ``1.0``\n Weight the down sample to be proportional to ``radius ** select_alpha``.\n\n full_speed: ``bool``, optional, default: ``False``\n * If ``True``, use multiple threads in constructing ``hnsw`` index. However, the kNN results are not reproducible.\n * Otherwise, use only one thread to make sure results are reproducible.\n\n net_alpha: ``float``, optional, default: ``0.1``\n L2 penalty (regularization term) parameter of the deep regressor.\n\n polish_learning_frac: ``float``, optional, default: ``10.0``\n After running the deep regressor to predict new coordinates, use ``polish_learning_frac`` * ``n_obs`` as the learning rate to polish the coordinates.\n\n polish_n_iter: ``int``, optional, default: ``30``\n Number of iterations for polishing UMAP run.\n\n out_basis: ``str``, optional, default: ``\"net_umap\"``\n Key name for calculated UMAP coordinates to store.\n\n Returns\n -------\n ``None``\n\n Update ``data.obsm``:\n * ``data.obsm['X_' + out_basis]``: Net UMAP coordinates of the data.\n\n Update ``data.obs``:\n * ``data.obs['ds_selected']``: Boolean array to indicate which cells are selected during the down sampling phase.\n\n Examples\n --------\n >>> scc.net_umap(adata)\n \"\"\"\n start = time.time()\n\n rep = update_rep(rep)\n indices_key = rep + \"_knn_indices\"\n distances_key = rep + \"_knn_distances\"\n\n if not knn_is_cached(data, indices_key, distances_key, select_K):\n raise ValueError(\"Please run neighbors first!\")\n\n n_jobs = effective_n_jobs(n_jobs)\n\n selected = select_cells(\n data.uns[distances_key],\n select_frac,\n K=select_K,\n alpha=select_alpha,\n random_state=random_state,\n )\n X_full = X_from_rep(data, rep)\n X = X_full[selected, :]\n\n ds_indices_key = \"ds_\" + rep + \"_knn_indices\" # ds refers to down-sampling\n ds_distances_key = \"ds_\" + rep + \"_knn_distances\"\n indices, distances = calculate_nearest_neighbors(\n X,\n K=n_neighbors,\n n_jobs=n_jobs,\n random_state=random_state,\n full_speed=full_speed,\n )\n data.uns[ds_indices_key] = indices\n data.uns[ds_distances_key] = distances\n\n knn_indices = np.insert(\n data.uns[ds_indices_key][:, 0 : n_neighbors - 1], 0, range(X.shape[0]), axis=1\n )\n knn_dists = np.insert(\n data.uns[ds_distances_key][:, 0 : n_neighbors - 1], 0, 0.0, axis=1\n )\n\n X_umap = calc_umap(\n X,\n n_components,\n n_neighbors,\n min_dist,\n spread,\n random_state,\n knn_indices=knn_indices,\n knn_dists=knn_dists,\n )\n\n data.uns[\"X_\" + out_basis + \"_small\"] = X_umap\n data.obs[\"ds_selected\"] = selected\n\n Y_init = np.zeros((data.shape[0], 2), dtype=np.float64)\n Y_init[selected, :] = X_umap\n Y_init[~selected, :] = net_train_and_predict(\n X, X_umap, X_full[~selected, :], net_alpha, random_state, verbose=True\n )\n\n data.obsm[\"X_\" + out_basis + \"_pred\"] = Y_init\n\n knn_indices = np.insert(\n data.uns[indices_key][:, 0 : n_neighbors - 1], 0, range(data.shape[0]), axis=1\n )\n knn_dists = np.insert(\n data.uns[distances_key][:, 0 : n_neighbors - 1], 0, 0.0, axis=1\n )\n\n data.obsm[\"X_\" + out_basis] = calc_umap(\n X_full,\n n_components,\n n_neighbors,\n min_dist,\n spread,\n random_state,\n init=Y_init,\n n_epochs=polish_n_epochs,\n learning_rate=polish_learning_rate,\n knn_indices=knn_indices,\n knn_dists=knn_dists,\n )\n\n end = time.time()\n logger.info(\"Net UMAP is calculated. Time spent = {:.2f}s.\".format(end - start))\n\n\ndef net_fle(\n data: AnnData,\n file_name: str = None,\n n_jobs: int = -1,\n rep: str = \"diffmap\",\n K: int = 50,\n full_speed: bool = False,\n target_change_per_node: float = 2.0,\n target_steps: int = 5000,\n is3d: bool = False,\n memory: int = 8,\n random_state: int = 0,\n select_frac: float = 0.1,\n select_K: int = 25,\n select_alpha: float = 1.0,\n net_alpha: float = 0.1,\n polish_target_steps: int = 1500,\n out_basis: str = \"net_fle\",\n) -> None:\n \"\"\"Construct the approximated Force-directed (FLE) graph using Deep Learning model to improve the speed.\n\n In specific, the deep model used is MLPRegressor_, the *scikit-learn* implementation of Multi-layer Perceptron regressor.\n\n .. _MLPRegressor: https://scikit-learn.org/stable/modules/generated/sklearn.neural_network.MLPRegressor.html\n\n Parameters\n ----------\n data: ``anndata.AnnData``\n Annotated data matrix with rows for cells and columns for genes.\n\n file_name: ``str``, optional, default: ``None``\n Temporary file to store the coordinates as the input to forceatlas2. If ``None``, use ``tempfile.mkstemp`` to generate file name.\n\n n_jobs: ``int``, optional, default: ``-1``\n Number of threads to use. If ``-1``, use all available threads.\n\n rep: ``str``, optional, default: ``\"diffmap\"``\n Representation of data used for the calculation. By default, use Diffusion Map coordinates. If ``None``, use the count matrix ``data.X``.\n\n K: ``int``, optional, default: ``50``\n Number of nearest neighbors to be considered during the computation.\n\n full_speed: ``bool``, optional, default: ``False``\n * If ``True``, use multiple threads in constructing ``hnsw`` index. However, the kNN results are not reproducible.\n * Otherwise, use only one thread to make sure results are reproducible.\n\n target_change_per_node: ``float``, optional, default: ``2.0``\n Target change per node to stop ForceAtlas2.\n\n target_steps: ``int``, optional, default: ``5000``\n Maximum number of iterations before stopping the ForceAtlas2 algorithm.\n\n is3d: ``bool``, optional, default: ``False``\n If ``True``, calculate 3D force-directed layout.\n\n memory: ``int``, optional, default: ``8``\n Memory size in GB for the Java FA2 component. By default, use 8GB memory.\n\n random_state: ``int``, optional, default: ``0``\n Random seed set for reproducing results.\n\n select_frac: ``float``, optional, default: ``0.1``\n Down sampling fraction on the cells.\n\n select_K: ``int``, optional, default: ``25``\n Number of neighbors to be used to estimate local density for each data point for down sampling.\n\n select_alpha: ``float``, optional, default: ``1.0``\n Weight the down sample to be proportional to ``radius ** select_alpha``.\n\n net_alpha: ``float``, optional, default: ``0.1``\n L2 penalty (regularization term) parameter of the deep regressor.\n\n polish_target_steps: ``int``, optional, default: ``1500``\n After running the deep regressor to predict new coordinate, Number of ForceAtlas2 iterations.\n\n out_basis: ``str``, optional, default: ``\"net_fle\"``\n Key name for calculated FLE coordinates to store.\n\n Returns\n -------\n ``None``\n\n Update ``data.obsm``:\n * ``data.obsm['X_' + out_basis]``: Net FLE coordinates of the data.\n\n Update ``data.obs``:\n * ``data.obs['ds_selected']``: Boolean array to indicate which cells are selected during the down sampling phase.\n\n Examples\n --------\n >>> scc.net_fle(adata)\n \"\"\"\n start = time.time()\n\n if file_name is None:\n if file_name is None:\n import tempfile\n\n _, file_name = tempfile.mkstemp()\n\n n_jobs = effective_n_jobs(n_jobs)\n rep = update_rep(rep)\n\n if (\"W_\" + rep) not in data.uns:\n neighbors(\n data,\n K=K,\n rep=rep,\n n_jobs=n_jobs,\n random_state=random_state,\n full_speed=full_speed,\n )\n\n indices_key = rep + \"_knn_indices\"\n distances_key = rep + \"_knn_distances\"\n\n if not knn_is_cached(data, indices_key, distances_key, select_K):\n raise ValueError(\"Please run neighbors first!\")\n\n selected = select_cells(\n data.uns[distances_key],\n select_frac,\n K=select_K,\n alpha=select_alpha,\n random_state=random_state,\n )\n\n X_full = X_from_rep(data, rep)\n X = X_full[selected, :]\n\n ds_indices_key = \"ds_\" + rep + \"_knn_indices\"\n ds_distances_key = \"ds_\" + rep + \"_knn_distances\"\n indices, distances = calculate_nearest_neighbors(\n X, K=K, n_jobs=n_jobs, random_state=random_state, full_speed=full_speed\n )\n data.uns[ds_indices_key] = indices\n data.uns[ds_distances_key] = distances\n\n W = calculate_affinity_matrix(indices, distances)\n\n X_fle = calc_force_directed_layout(\n W,\n file_name + \".small\",\n n_jobs,\n target_change_per_node,\n target_steps,\n is3d,\n memory,\n random_state,\n )\n\n data.uns[\"X_\" + out_basis + \"_small\"] = X_fle\n data.obs[\"ds_diffmap_selected\"] = selected\n\n Y_init = np.zeros((data.shape[0], 2), dtype=np.float64)\n Y_init[selected, :] = X_fle\n Y_init[~selected, :] = net_train_and_predict(\n X, X_fle, X_full[~selected, :], net_alpha, random_state, verbose=True\n )\n\n data.obsm[\"X_\" + out_basis + \"_pred\"] = Y_init\n\n data.obsm[\"X_\" + out_basis] = calc_force_directed_layout(\n W_from_rep(data, rep),\n file_name,\n n_jobs,\n target_change_per_node,\n polish_target_steps,\n is3d,\n memory,\n random_state,\n init=Y_init,\n )\n\n end = time.time()\n logger.info(\"Net FLE is calculated. Time spent = {:.2f}s.\".format(end - start))\n"
] | [
[
"scipy.stats.f_oneway",
"numpy.unique",
"numpy.isnan",
"pandas.DataFrame",
"numpy.ones",
"numpy.dtype",
"numpy.warnings.filterwarnings",
"numpy.exp",
"numpy.argsort",
"numpy.array",
"numpy.zeros",
"numpy.isin"
],
[
"matplotlib.use"
],
[
"numpy.random.seed",
"sklearn.utils.check_array",
"numpy.insert",
"scipy.sparse.isspmatrix_csr",
"numpy.zeros",
"sklearn.utils.check_random_state"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [
"0.13",
"1.6",
"0.14",
"1.10",
"0.15",
"1.4",
"0.16",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"0.12",
"1.0",
"0.17",
"1.3",
"1.8"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.7",
"1.0",
"0.10",
"1.2",
"0.14",
"0.19",
"1.5",
"0.12",
"0.17",
"0.13",
"1.6",
"1.4",
"1.9",
"1.3",
"1.10",
"0.15",
"0.18",
"0.16",
"1.8"
],
"tensorflow": []
}
] |
waldo2590/thunder | [
"967ff8f3e7c2fabe1705743d95eb2746d4329786",
"967ff8f3e7c2fabe1705743d95eb2746d4329786"
] | [
"test/test_series_io.py",
"thunder/series/series.py"
] | [
"import pytest\nimport os\nimport glob\nimport json\nfrom numpy import arange, array, allclose, save, savetxt\n\nfrom bolt import array as barray\nfrom thunder.series.readers import fromarray, fromtext, frombinary, fromexample\n\npytestmark = pytest.mark.usefixtures(\"eng\")\n\n\ndef test_from_array(eng):\n a = arange(8, dtype='int16').reshape((4, 2))\n data = fromarray(a, engine=eng)\n assert data.shape == (4, 2)\n assert data.dtype == 'int16'\n assert allclose(data.index, [0, 1])\n assert allclose(data.toarray(), a)\n\n\ndef test_from_array_bolt(eng):\n a = arange(8, dtype='int16').reshape((4, 2))\n if eng is not None:\n b = barray(a, context=eng)\n else:\n b = barray(a)\n data = fromarray(b, engine=eng)\n assert data.shape == (4, 2)\n assert data.dtype == 'int16'\n assert allclose(data.index, [0, 1])\n assert allclose(data.toarray(), a)\n\n\ndef test_from_array_vector(eng):\n a = arange(8, dtype='int16').reshape((4, 2))\n data = fromarray(a, engine=eng)\n assert data.shape == (4, 2)\n assert data.dtype == 'int16'\n assert allclose(data.index, [0, 1])\n assert allclose(data.toarray(), a)\n\n\ndef test_from_array_index(eng):\n a = arange(8, dtype='int16').reshape((4, 2))\n data = fromarray(a, index=[2, 3], engine=eng)\n assert allclose(data.index, [2, 3])\n\n\ndef test_from_text(tmpdir, eng):\n v = [[0, i] for i in range(10)]\n f = os.path.join(str(tmpdir), 'data.txt')\n savetxt(f, v, fmt='%.02g')\n data = fromtext(f, engine=eng)\n assert allclose(data.shape, (10, 2))\n assert data.dtype == 'float64'\n assert allclose(data.toarray(), v)\n\n\ndef test_from_text_skip(tmpdir):\n k = [[i] for i in range(10)]\n v = [[0, i] for i in range(10)]\n a = [kv[0] + kv[1] for kv in zip(k, v)]\n f = os.path.join(str(tmpdir), 'data.txt')\n savetxt(f, a, fmt='%.02g')\n data = fromtext(f, skip=1)\n assert allclose(data.shape, (10, 2))\n assert data.dtype == 'float64'\n assert allclose(data.toarray(), v)\n\n\ndef test_from_binary(tmpdir, eng):\n a = arange(8, dtype='int16').reshape((4, 2))\n p = os.path.join(str(tmpdir), 'data.bin')\n a.tofile(p)\n data = frombinary(p, shape=[4, 2], dtype='int16', engine=eng)\n assert allclose(data.shape, (4, 2))\n assert allclose(data.index, [0, 1])\n assert allclose(data.toarray(), a)\n\n\ndef test_from_binary_skip(tmpdir, eng):\n k = [[i] for i in range(10)]\n v = [[0, i] for i in range(10)]\n a = array([kv[0] + kv[1] for kv in zip(k, v)], dtype='int16')\n p = os.path.join(str(tmpdir), 'data.bin')\n a.tofile(p)\n data = frombinary(p, shape=[10, 2], dtype='int16', skip=1, engine=eng)\n assert allclose(data.shape, (10, 2))\n assert allclose(data.index, [0, 1])\n assert allclose(data.toarray(), v)\n\n\ndef test_to_binary(tmpdir, eng):\n a = arange(8, dtype='int16').reshape((4, 2))\n p = str(tmpdir) + '/data'\n fromarray(a, npartitions=1, engine=eng).tobinary(p)\n files = [os.path.basename(f) for f in glob.glob(str(tmpdir) + '/data/*')]\n assert sorted(files) == ['SUCCESS', 'conf.json', 'series-00000.bin']\n with open(str(tmpdir) + '/data/conf.json', 'r') as f:\n conf = json.load(f)\n assert conf['shape'] == [4, 2]\n assert conf['dtype'] == 'int16'\n\n\ndef test_to_binary_roundtrip(tmpdir, eng):\n a = arange(8, dtype='int16').reshape((4, 2))\n p = str(tmpdir) + '/data'\n data = fromarray(a, npartitions=1, engine=eng)\n data.tobinary(p)\n loaded = frombinary(p)\n assert allclose(data.toarray(), loaded.toarray())\n\n\ndef test_to_binary_roundtrip_partitioned(tmpdir, eng):\n a = arange(8, dtype='int16').reshape((4, 2))\n p = str(tmpdir) + '/data'\n data = fromarray([a, a], npartitions=4, engine=eng)\n data.tobinary(p)\n loaded = frombinary(p)\n assert allclose(data.toarray(), loaded.toarray())\n\n\ndef test_to_binary_roundtrip_3d(tmpdir, eng):\n a = arange(16, dtype='int16').reshape((4, 2, 2))\n p = str(tmpdir) + '/data'\n data = fromarray(a, npartitions=1, engine=eng)\n data.tobinary(p)\n loaded = frombinary(p, engine=eng)\n assert allclose(data.toarray(), loaded.toarray())\n\n\ndef test_from_example(eng):\n return\n data = fromexample('fish', engine=eng)\n assert allclose(data.toarray().shape, (76, 87, 2, 20))\n data = fromexample('mouse', engine=eng)\n assert allclose(data.toarray().shape, (64, 64, 20))\n data = fromexample('iris', engine=eng)\n assert allclose(data.toarray().shape, (150, 4))\n",
"from numpy import array, mean, median, std, size, arange, percentile,\\\n asarray, zeros, corrcoef, where, unique, array_equal, delete, \\\n ravel, logical_not, unravel_index, prod, random, shape, \\\n dot, outer, expand_dims, ScalarType, ndarray, sqrt, pi, angle, fft, \\\n roll, polyfit, polyval, ceil, float64, fix, floor\nimport logging\nfrom itertools import product\nfrom bolt.utils import tupleize\nfrom six import string_types\nfrom ..utils import check_options\n\n\nfrom ..base import Data\n\n\nclass Series(Data):\n \"\"\"\n Collection of indexed 1d array data.\n\n Backed by an array-like object, including a numpy array\n (for local computation) or a bolt array (for spark computation).\n\n Attributes\n ----------\n values : array-like\n numpy array or bolt array\n\n index : array-like or one-dimensional list\n Values must be unique, same length as the arrays in the input data.\n Defaults to arange(len(data)) if not provided.\n\n labels : array-like\n A set of labels, one per series record.\n \"\"\"\n _metadata = Data._metadata\n _attributes = Data._attributes + ['index']\n\n def __init__(self, values, index=None, labels=None, mode='local'):\n super(Series, self).__init__(values, mode=mode)\n self.labels = labels\n self._index = None\n if index is not None:\n self._index = index\n\n @property\n def index(self):\n if self._index is None:\n self._index = arange(self.shape[-1])\n return self._index\n\n @index.setter\n def index(self, value):\n lenself = len(self.index)\n if type(value) is str:\n value = [value]\n try:\n value[0]\n except:\n value = [value]\n try:\n lenvalue = len(value)\n except:\n raise TypeError('Index must be an object with a length')\n if lenvalue != lenself:\n raise ValueError(\"Length of new index '%g' must match length of original index '%g'\"\n .format(lenvalue, lenself))\n self._index = value\n\n @property\n def length(self):\n return len(self.index)\n\n @property\n def baseaxes(self):\n return tuple(range(0, len(self.shape)-1))\n\n @property\n def _constructor(self):\n return Series\n\n def flatten(self):\n \"\"\"\n Reshape all dimensions but the last into a single dimension\n \"\"\"\n size = prod(self.shape[:-1])\n return self.reshape(size, self.shape[-1])\n\n def count(self):\n \"\"\"\n Count the number of records.\n\n For lazy or distributed data, will force a computation.\n \"\"\"\n if self.mode == 'local':\n return prod(self.shape[:-1])\n\n if self.mode == 'spark':\n return self.tordd().count()\n\n def first(self):\n \"\"\"\n Return the first element.\n \"\"\"\n if self.mode == 'local':\n return self.values[tuple(zeros(len(self.baseaxes))) + (slice(None, None),)]\n\n if self.mode == 'spark':\n return self.values.first().toarray()\n\n def tolocal(self):\n \"\"\"\n Convert to local mode.\n \"\"\"\n from thunder.series.readers import fromarray\n\n if self.mode == 'local':\n logging.getLogger('thunder').warn('images already in local mode')\n pass\n\n return fromarray(self.toarray(), index=self.index, labels=self.labels)\n\n def tospark(self, engine=None):\n \"\"\"\n Convert to spark mode.\n \"\"\"\n from thunder.series.readers import fromarray\n\n if self.mode == 'spark':\n logging.getLogger('thunder').warn('images already in local mode')\n pass\n\n if engine is None:\n raise ValueError('Must provide SparkContext')\n\n return fromarray(self.toarray(), index=self.index, labels=self.labels, engine=engine)\n\n def sample(self, n=100, seed=None):\n \"\"\"\n Extract random sample of records.\n\n Parameters\n ----------\n n : int, optional, default = 100\n The number of data points to sample.\n\n seed : int, optional, default = None\n Random seed.\n \"\"\"\n if n < 1:\n raise ValueError(\"Number of samples must be larger than 0, got '%g'\" % n)\n\n if seed is None:\n seed = random.randint(0, 2 ** 32)\n\n if self.mode == 'spark':\n result = asarray(self.values.tordd().values().takeSample(False, n, seed))\n\n else:\n basedims = [self.shape[d] for d in self.baseaxes]\n inds = [unravel_index(int(k), basedims) for k in random.rand(n) * prod(basedims)]\n result = asarray([self.values[tupleize(i) + (slice(None, None),)] for i in inds])\n\n return self._constructor(result, index=self.index)\n\n def map(self, func, index=None, value_shape=None, dtype=None, with_keys=False):\n \"\"\"\n Map an array -> array function over each record.\n\n Parameters\n ----------\n func : function\n A function of a single record.\n\n index : array-like, optional, default = None\n If known, the index to be used following function evaluation.\n\n value_shape : int, optional, default=None\n Known shape of values resulting from operation. Only\n valid in spark mode.\n\n dtype : numpy.dtype, optional, default = None\n If known, the type of the data following function evaluation.\n\n with_keys : boolean, optional, default = False\n If true, function should be of both tuple indices and series values.\n \"\"\"\n # if new index is given, can infer missing value_shape\n if value_shape is None and index is not None:\n value_shape = len(index)\n\n if isinstance(value_shape, int):\n values_shape = (value_shape, )\n new = super(Series, self).map(func, value_shape=value_shape, dtype=dtype, with_keys=with_keys)\n\n if index is not None:\n new.index = index\n # if series shape did not change and no index was supplied, propagate original index\n else:\n if len(new.index) == len(self.index):\n new.index = self.index\n\n return new\n\n def reduce(self, func):\n \"\"\"\n Reduce a function over records.\n\n Parameters\n ----------\n func : function\n A function of two records.\n \"\"\"\n return self._reduce(func, axis=self.baseaxes)\n\n def mean(self):\n \"\"\"\n Compute the mean across records\n \"\"\"\n return self._constructor(self.values.mean(axis=self.baseaxes, keepdims=True))\n\n def var(self):\n \"\"\"\n Compute the variance across records\n \"\"\"\n return self._constructor(self.values.var(axis=self.baseaxes, keepdims=True))\n\n def std(self):\n \"\"\"\n Compute the standard deviation across records.\n \"\"\"\n return self._constructor(self.values.std(axis=self.baseaxes, keepdims=True))\n\n def sum(self):\n \"\"\"\n Compute the sum across records.\n \"\"\"\n return self._constructor(self.values.sum(axis=self.baseaxes, keepdims=True))\n\n def max(self):\n \"\"\"\n Compute the max across records.\n \"\"\"\n return self._constructor(self.values.max(axis=self.baseaxes, keepdims=True))\n\n def min(self):\n \"\"\"\n Compute the min across records.\n \"\"\"\n return self._constructor(self.values.min(axis=self.baseaxes, keepdims=True))\n\n def reshape(self, *shape):\n \"\"\"\n Reshape the Series object\n\n Cannot change the last dimension.\n\n Parameters\n ----------\n shape: one or more ints\n New shape\n \"\"\"\n if prod(self.shape) != prod(shape):\n raise ValueError(\"Reshaping must leave the number of elements unchanged\")\n\n if self.shape[-1] != shape[-1]:\n raise ValueError(\"Reshaping cannot change the size of the constituent series (last dimension)\")\n\n if self.labels is not None:\n newlabels = self.labels.reshape(*shape[:-1])\n else:\n newlabels = None\n\n return self._constructor(self.values.reshape(shape), labels=newlabels).__finalize__(self, noprop=('labels',))\n\n def between(self, left, right):\n \"\"\"\n Select subset of values within the given index range.\n\n Inclusive on the left; exclusive on the right.\n\n Parameters\n ----------\n left : int\n Left-most index in the desired range.\n\n right: int\n Right-most index in the desired range.\n \"\"\"\n crit = lambda x: left <= x < right\n return self.select(crit)\n\n def select(self, crit):\n \"\"\"\n Select subset of values that match a given index criterion.\n\n Parameters\n ----------\n crit : function, list, str, int\n Criterion function to map to indices, specific index value,\n or list of indices.\n \"\"\"\n import types\n\n # handle lists, strings, and ints\n if not isinstance(crit, types.FunctionType):\n # set(\"foo\") -> {\"f\", \"o\"}; wrap in list to prevent:\n if isinstance(crit, string_types):\n critlist = set([crit])\n else:\n try:\n critlist = set(crit)\n except TypeError:\n # typically means crit is not an iterable type; for instance, crit is an int\n critlist = set([crit])\n crit = lambda x: x in critlist\n\n # if only one index, return it directly or throw an error\n index = self.index\n if size(index) == 1:\n if crit(index[0]):\n return self\n else:\n raise Exception('No indices found matching criterion')\n\n # determine new index and check the result\n newindex = [i for i in index if crit(i)]\n if len(newindex) == 0:\n raise Exception('No indices found matching criterion')\n if array(newindex == index).all():\n return self\n\n # use fast logical indexing to get the new values\n subinds = where([crit(i) for i in index])\n new = self.map(lambda x: x[subinds], index=newindex)\n\n # if singleton, need to check whether it's an array or a scalar/int\n # if array, recompute a new set of indices\n if len(newindex) == 1:\n new = new.map(lambda x: x[0], index=newindex)\n val = new.first()\n if size(val) == 1:\n newindex = [newindex[0]]\n else:\n newindex = arange(0, size(val))\n\n new._index = newindex\n\n return new\n\n def center(self, axis=1):\n \"\"\"\n Subtract the mean either within or across records.\n\n Parameters\n ----------\n axis : int, optional, default = 1\n Which axis to center along, within (1) or across (0) records.\n \"\"\"\n if axis == 1:\n return self.map(lambda x: x - mean(x))\n elif axis == 0:\n meanval = self.mean().toarray()\n return self.map(lambda x: x - meanval)\n else:\n raise Exception('Axis must be 0 or 1')\n\n def standardize(self, axis=1):\n \"\"\"\n Divide by standard deviation either within or across records.\n\n Parameters\n ----------\n axis : int, optional, default = 0\n Which axis to standardize along, within (1) or across (0) records\n \"\"\"\n if axis == 1:\n return self.map(lambda x: x / std(x))\n elif axis == 0:\n stdval = self.std().toarray()\n return self.map(lambda x: x / stdval)\n else:\n raise Exception('Axis must be 0 or 1')\n\n def zscore(self, axis=1):\n \"\"\"\n Subtract the mean and divide by standard deviation within or across records.\n\n Parameters\n ----------\n axis : int, optional, default = 0\n Which axis to zscore along, within (1) or across (0) records\n \"\"\"\n if axis == 1:\n return self.map(lambda x: (x - mean(x)) / std(x))\n elif axis == 0:\n meanval = self.mean().toarray()\n stdval = self.std().toarray()\n return self.map(lambda x: (x - meanval) / stdval)\n else:\n raise Exception('Axis must be 0 or 1')\n\n def squelch(self, threshold):\n \"\"\"\n Set all records that do not exceed the given threhsold to 0.\n\n Parameters\n ----------\n threshold : scalar\n Level below which to set records to zero\n \"\"\"\n func = lambda x: zeros(x.shape) if max(x) < threshold else x\n return self.map(func)\n\n def correlate(self, signal):\n \"\"\"\n Correlate records against one or many one-dimensional arrays.\n\n Parameters\n ----------\n signal : array-like\n One or more signals to correlate against.\n \"\"\"\n s = asarray(signal)\n\n if s.ndim == 1:\n if size(s) != self.shape[-1]:\n raise ValueError(\"Length of signal '%g' does not match record length '%g'\"\n % (size(s), self.shape[-1]))\n\n return self.map(lambda x: corrcoef(x, s)[0, 1], index=[1])\n\n elif s.ndim == 2:\n if s.shape[1] != self.shape[-1]:\n raise ValueError(\"Length of signal '%g' does not match record length '%g'\"\n % (s.shape[1], self.shape[-1]))\n newindex = arange(0, s.shape[0])\n return self.map(lambda x: array([corrcoef(x, y)[0, 1] for y in s]), index=newindex)\n\n else:\n raise Exception('Signal to correlate with must have 1 or 2 dimensions')\n\n def _check_panel(self, length):\n \"\"\"\n Check that given fixed panel length evenly divides index.\n\n Parameters\n ----------\n length : int\n Fixed length with which to subdivide index\n \"\"\"\n n = len(self.index)\n if divmod(n, length)[1] != 0:\n raise ValueError(\"Panel length '%g' must evenly divide length of series '%g'\"\n % (length, n))\n if n == length:\n raise ValueError(\"Panel length '%g' cannot be length of series '%g'\"\n % (length, n))\n\n def mean_by_panel(self, length):\n \"\"\"\n Compute the mean across fixed sized panels of each record.\n\n Splits each record into panels of size `length`,\n and then computes the mean across panels.\n Panel length must subdivide record exactly.\n\n Parameters\n ----------\n length : int\n Fixed length with which to subdivide.\n \"\"\"\n self._check_panel(length)\n func = lambda v: v.reshape(-1, length).mean(axis=0)\n newindex = arange(length)\n return self.map(func, index=newindex)\n\n def _makemasks(self, index=None, level=0):\n \"\"\"\n Internal function for generating masks for selecting values based on multi-index values.\n\n As all other multi-index functions will call this function, basic type-checking is also\n performed at this stage.\n \"\"\"\n if index is None:\n index = self.index\n\n try:\n dims = len(array(index).shape)\n if dims == 1:\n index = array(index, ndmin=2).T\n except:\n raise TypeError('A multi-index must be convertible to a numpy ndarray')\n\n try:\n index = index[:, level]\n except:\n raise ValueError(\"Levels must be indices into individual elements of the index\")\n\n lenIdx = index.shape[0]\n nlevels = index.shape[1]\n\n combs = product(*[unique(index.T[i, :]) for i in range(nlevels)])\n combs = array([l for l in combs])\n\n masks = array([[array_equal(index[i], c) for i in range(lenIdx)] for c in combs])\n\n return zip(*[(masks[x], combs[x]) for x in range(len(masks)) if masks[x].any()])\n\n def _map_by_index(self, function, level=0):\n \"\"\"\n An internal function for maping a function to groups of values based on a multi-index\n\n Elements of each record are grouped according to unique value combinations of the multi-\n index across the given levels of the multi-index. Then the given function is applied\n to to each of these groups separately. If this function is many-to-one, the result\n can be recast as a Series indexed by the unique index values used for grouping.\n \"\"\"\n\n if type(level) is int:\n level = [level]\n\n masks, ind = self._makemasks(index=self.index, level=level)\n nMasks = len(masks)\n newindex = array(ind)\n if len(newindex[0]) == 1:\n newindex = ravel(newindex)\n return self.map(lambda v: asarray([array(function(v[masks[x]])) for x in range(nMasks)]),\n index=newindex)\n\n def select_by_index(self, val, level=0, squeeze=False, filter=False, return_mask=False):\n \"\"\"\n Select or filter elements of the Series by index values (across levels, if multi-index).\n\n The index is a property of a Series object that assigns a value to each position within\n the arrays stored in the records of the Series. This function returns a new Series where,\n within each record, only the elements indexed by a given value(s) are retained. An index\n where each value is a list of a fixed length is referred to as a 'multi-index',\n as it provides multiple labels for each index location. Each of the dimensions in these\n sublists is a 'level' of the multi-index. If the index of the Series is a multi-index, then\n the selection can proceed by first selecting one or more levels, and then selecting one\n or more values at each level.\n\n Parameters\n ----------\n val : list of lists\n Specifies the selected index values. List must contain one list for each level of the\n multi-index used in the selection. For any singleton lists, the list may be replaced\n with just the integer.\n\n level : list of ints, optional, default=0\n Specifies which levels in the multi-index to use when performing selection. If a single\n level is selected, the list can be replaced with an integer. Must be the same length\n as val.\n\n squeeze : bool, optional, default=False\n If True, the multi-index of the resulting Series will drop any levels that contain\n only a single value because of the selection. Useful if indices are used as unique\n identifiers.\n\n filter : bool, optional, default=False\n If True, selection process is reversed and all index values EXCEPT those specified\n are selected.\n\n return_mask : bool, optional, default=False\n If True, return the mask used to implement the selection.\n \"\"\"\n try:\n level[0]\n except:\n level = [level]\n try:\n val[0]\n except:\n val = [val]\n\n remove = []\n if len(level) == 1:\n try:\n val[0][0]\n except:\n val = [val]\n if squeeze and not filter and len(val) == 1:\n remove.append(level[0])\n else:\n for i in range(len(val)):\n try:\n val[i][0]\n except:\n val[i] = [val[i]]\n if squeeze and not filter and len(val[i]) == 1:\n remove.append(level[i])\n\n if len(level) != len(val):\n raise ValueError(\"List of levels must be same length as list of corresponding values\")\n\n p = product(*val)\n selected = set([x for x in p])\n\n masks, ind = self._makemasks(index=self.index, level=level)\n nmasks = len(masks)\n masks = array([masks[x] for x in range(nmasks) if tuple(ind[x]) in selected])\n\n final_mask = masks.any(axis=0)\n if filter:\n final_mask = logical_not(final_mask)\n\n indFinal = array(self.index)\n if len(indFinal.shape) == 1:\n indFinal = array(indFinal, ndmin=2).T\n indFinal = indFinal[final_mask]\n\n if squeeze:\n indFinal = delete(indFinal, remove, axis=1)\n\n if len(indFinal[0]) == 1:\n indFinal = ravel(indFinal)\n\n elif len(indFinal[1]) == 0:\n indFinal = arange(sum(final_mask))\n\n result = self.map(lambda v: v[final_mask], index=indFinal)\n\n if return_mask:\n return result, final_mask\n else:\n return result\n\n def aggregate_by_index(self, function, level=0):\n \"\"\"\n Aggregrate data in each record, grouping by index values.\n\n For each unique value of the index, applies a function to the group\n indexed by that value. Returns a Series indexed by those unique values.\n For the result to be a valid Series object, the aggregating function should\n return a simple numeric type. Also allows selection of levels within a\n multi-index. See select_by_index for more info on indices and multi-indices.\n\n Parameters\n ----------\n function : function\n Aggregating function to map to Series values. Should take a list or ndarray\n as input and return a simple numeric value.\n\n level : list of ints, optional, default=0\n Specifies the levels of the multi-index to use when determining unique index values.\n If only a single level is desired, can be an int.\n \"\"\"\n result = self._map_by_index(function, level=level)\n return result.map(lambda v: array(v), index=result.index)\n\n def stat_by_index(self, stat, level=0):\n \"\"\"\n Compute the desired statistic for each uniue index values (across levels, if multi-index)\n\n Parameters\n ----------\n stat : string\n Statistic to be computed: sum, mean, median, stdev, max, min, count\n\n level : list of ints, optional, default=0\n Specifies the levels of the multi-index to use when determining unique index values.\n If only a single level is desired, can be an int.\n \"\"\"\n from numpy import sum, min, max\n\n STATS = {\n 'sum': sum,\n 'mean': mean,\n 'median': median,\n 'stdev': std,\n 'max': max,\n 'min': min,\n 'count': size\n }\n func = STATS[stat.lower()]\n return self.aggregate_by_index(level=level, function=func)\n\n def sum_by_index(self, level=0):\n \"\"\"\n Compute sums for each unique index value (across levels, if multi-index)\n \"\"\"\n return self.stat_by_index(level=level, stat='sum')\n\n def mean_by_index(self, level=0):\n \"\"\"\n Compute means for each unique index value (across levels, if multi-index)\n \"\"\"\n return self.stat_by_index(level=level, stat='mean')\n\n def median_by_index(self, level=0):\n \"\"\"\n Compute medians for each unique index value (across levels, if multi-index)\n \"\"\"\n return self.stat_by_index(level=level, stat='median')\n\n def std_by_index(self, level=0):\n \"\"\"\n Compute means for each unique index value (across levels, if multi-index)\n \"\"\"\n return self.stat_by_index(level=level, stat='stdev')\n\n def max_by_index(self, level=0):\n \"\"\"\n Compute maximum values for each unique index value (across levels, if multi-index)\n \"\"\"\n return self.stat_by_index(level=level, stat='max')\n\n def min_by_index(self, level=0):\n \"\"\"\n Compute minimum values for each unique index value (across level, if multi-index)\n \"\"\"\n return self.stat_by_index(level=level, stat='min')\n\n def count_by_index(self, level=0):\n \"\"\"\n Count the number for each unique index value (across levels, if multi-index)\n \"\"\"\n return self.stat_by_index(level=level, stat='count')\n\n def cov(self):\n \"\"\"\n Compute covariance of a distributed matrix.\n\n Parameters\n ----------\n axis : int, optional, default = None\n Axis for performing mean subtraction, None (no subtraction), 0 (rows) or 1 (columns)\n \"\"\"\n return self.center(axis=0).gramian().times(1.0 / (self.shape[0] - 1))\n\n def gramian(self):\n \"\"\"\n Compute gramian of a distributed matrix.\n\n The gramian is defined as the product of the matrix\n with its transpose, i.e. A^T * A.\n \"\"\"\n if self.mode == 'spark':\n rdd = self.values.tordd()\n\n from pyspark.accumulators import AccumulatorParam\n\n class MatrixAccumulator(AccumulatorParam):\n def zero(self, value):\n return zeros(shape(value))\n\n def addInPlace(self, val1, val2):\n val1 += val2\n return val1\n\n global mat\n init = zeros((self.shape[1], self.shape[1]))\n mat = rdd.context.accumulator(init, MatrixAccumulator())\n\n def outer_sum(x):\n global mat\n mat += outer(x, x)\n\n rdd.values().foreach(outer_sum)\n return self._constructor(mat.value, index=self.index)\n\n if self.mode == 'local':\n return self._constructor(dot(self.values.T, self.values), index=self.index)\n\n def times(self, other):\n \"\"\"\n Multiply a matrix by another one.\n\n Other matrix must be a numpy array, a scalar,\n or another matrix in local mode.\n\n Parameters\n ----------\n other : Matrix, scalar, or numpy array\n A matrix to multiply with\n \"\"\"\n if isinstance(other, ScalarType):\n other = asarray(other)\n index = self.index\n else:\n if isinstance(other, list):\n other = asarray(other)\n if isinstance(other, ndarray) and other.ndim < 2:\n other = expand_dims(other, 1)\n if not self.shape[1] == other.shape[0]:\n raise ValueError('shapes %s and %s are not aligned' % (self.shape, other.shape))\n index = arange(other.shape[1])\n\n if self.mode == 'local' and isinstance(other, Series) and other.mode == 'spark':\n raise NotImplementedError\n\n if self.mode == 'spark' and isinstance(other, Series) and other.mode == 'spark':\n raise NotImplementedError\n\n if self.mode == 'local' and isinstance(other, (ndarray, ScalarType)):\n return self._constructor(dot(self.values, other), index=index)\n\n if self.mode == 'local' and isinstance(other, Series):\n return self._constructor(dot(self.values, other.values), index=index)\n\n if self.mode == 'spark' and isinstance(other, (ndarray, ScalarType)):\n return self.map(lambda x: dot(x, other), index=index)\n\n if self.mode == 'spark' and isinstance(other, Series):\n return self.map(lambda x: dot(x, other.values), index=index)\n\n def _makewindows(self, indices, window):\n \"\"\"\n Make masks used by windowing functions\n\n Given a list of indices specifying window centers,\n and a window size, construct a list of index arrays,\n one per window, that index into the target array\n\n Parameters\n ----------\n indices : array-like\n List of times specifying window centers\n\n window : int\n Window size\n \"\"\"\n div = divmod(window, 2)\n before = div[0]\n after = div[0] + div[1]\n index = asarray(self.index)\n indices = asarray(indices)\n if where(index == max(indices))[0][0] + after > len(index):\n raise ValueError(\"Maximum requested index %g, with window %g, exceeds length %g\"\n % (max(indices), window, len(index)))\n if where(index == min(indices))[0][0] - before < 0:\n raise ValueError(\"Minimum requested index %g, with window %g, is less than 0\"\n % (min(indices), window))\n masks = [arange(where(index == i)[0][0]-before, where(index == i)[0][0]+after, dtype='int') for i in indices]\n return masks\n\n def mean_by_window(self, indices, window):\n \"\"\"\n Average series across multiple windows specified by their centers.\n\n Parameters\n ----------\n indices : array-like\n List of times specifying window centers\n\n window : int\n Window size\n \"\"\"\n masks = self._makewindows(indices, window)\n newindex = arange(0, len(masks[0]))\n return self.map(lambda x: mean([x[m] for m in masks], axis=0), index=newindex)\n\n def subsample(self, sample_factor=2):\n \"\"\"\n Subsample series by an integer factor.\n\n Parameters\n ----------\n sample_factor : positive integer, optional, default=2\n Factor for downsampling.\n \"\"\"\n if sample_factor < 0:\n raise Exception('Factor for subsampling must be postive, got %g' % sample_factor)\n s = slice(0, len(self.index), sample_factor)\n newindex = self.index[s]\n return self.map(lambda v: v[s], index=newindex)\n\n def downsample(self, sample_factor=2):\n \"\"\"\n Downsample series by an integer factor by averaging.\n\n Parameters\n ----------\n sample_factor : positive integer, optional, default=2\n Factor for downsampling.\n \"\"\"\n if sample_factor < 0:\n raise Exception('Factor for subsampling must be postive, got %g' % sample_factor)\n newlength = floor(len(self.index) / sample_factor)\n func = lambda v: v[0:int(newlength * sample_factor)].reshape(-1, sample_factor).mean(axis=1)\n newindex = arange(newlength)\n return self.map(func, index=newindex)\n\n def fourier(self, freq=None):\n \"\"\"\n Compute statistics of a Fourier decomposition on series data.\n\n Parameters\n ----------\n freq : int\n Digital frequency at which to compute coherence and phase\n \"\"\"\n def get(y, freq):\n y = y - mean(y)\n nframes = len(y)\n ft = fft.fft(y)\n ft = ft[0:int(fix(nframes/2))]\n ampFt = 2*abs(ft)/nframes\n amp = ampFt[freq]\n ampSum = sqrt(sum(ampFt**2))\n co = amp / ampSum\n ph = -(pi/2) - angle(ft[freq])\n if ph < 0:\n ph += pi * 2\n return array([co, ph])\n\n if freq >= int(fix(size(self.index)/2)):\n raise Exception('Requested frequency, %g, is too high, '\n 'must be less than half the series duration' % freq)\n\n index = ['coherence', 'phase']\n return self.map(lambda x: get(x, freq), index=index)\n\n def convolve(self, signal, mode='full'):\n \"\"\"\n Convolve series data against another signal.\n\n Parameters\n ----------\n signal : array\n Signal to convolve with (must be 1D)\n\n mode : str, optional, default='full'\n Mode of convolution, options are 'full', 'same', and 'valid'\n \"\"\"\n\n from numpy import convolve\n\n s = asarray(signal)\n\n n = size(self.index)\n m = size(s)\n\n # use expected lengths to make a new index\n if mode == 'same':\n newmax = max(n, m)\n elif mode == 'valid':\n newmax = max(m, n) - min(m, n) + 1\n else:\n newmax = n+m-1\n newindex = arange(0, newmax)\n\n return self.map(lambda x: convolve(x, signal, mode), index=newindex)\n\n def crosscorr(self, signal, lag=0):\n \"\"\"\n Cross correlate series data against another signal.\n\n Parameters\n ----------\n signal : array\n Signal to correlate against (must be 1D).\n\n lag : int\n Range of lags to consider, will cover (-lag, +lag).\n \"\"\"\n from scipy.linalg import norm\n\n s = asarray(signal)\n s = s - mean(s)\n s = s / norm(s)\n\n if size(s) != size(self.index):\n raise Exception('Size of signal to cross correlate with, %g, '\n 'does not match size of series' % size(s))\n\n # created a matrix with lagged signals\n if lag is not 0:\n shifts = range(-lag, lag+1)\n d = len(s)\n m = len(shifts)\n sshifted = zeros((m, d))\n for i in range(0, len(shifts)):\n tmp = roll(s, shifts[i])\n if shifts[i] < 0:\n tmp[(d+shifts[i]):] = 0\n if shifts[i] > 0:\n tmp[:shifts[i]] = 0\n sshifted[i, :] = tmp\n s = sshifted\n else:\n shifts = [0]\n\n def get(y, s):\n y = y - mean(y)\n n = norm(y)\n if n == 0:\n b = zeros((s.shape[0],))\n else:\n y /= n\n b = dot(s, y)\n return b\n\n return self.map(lambda x: get(x, s), index=shifts)\n\n def detrend(self, method='linear', order=5):\n \"\"\"\n Detrend series data with linear or nonlinear detrending.\n\n Preserve intercept so that subsequent operations can adjust the baseline.\n\n Parameters\n ----------\n method : str, optional, default = 'linear'\n Detrending method\n\n order : int, optional, default = 5\n Order of polynomial, for non-linear detrending only\n \"\"\"\n check_options(method, ['linear', 'nonlinear'])\n\n if method == 'linear':\n order = 1\n\n def func(y):\n x = arange(len(y))\n p = polyfit(x, y, order)\n p[-1] = 0\n yy = polyval(p, x)\n return y - yy\n\n return self.map(func)\n\n def normalize(self, method='percentile', window=None, perc=20, offset=0.1):\n \"\"\"\n Normalize by subtracting and dividing by a baseline.\n\n Baseline can be derived from a global mean or percentile,\n or a smoothed percentile estimated within a rolling window.\n Windowed baselines may only be well-defined for\n temporal series data.\n\n Parameters\n ----------\n baseline : str, optional, default = 'percentile'\n Quantity to use as the baseline, options are 'mean', 'percentile',\n 'window', or 'window-exact'.\n\n window : int, optional, default = 6\n Size of window for baseline estimation,\n for 'window' and 'window-exact' baseline only.\n\n perc : int, optional, default = 20\n Percentile value to use, for 'percentile',\n 'window', or 'window-exact' baseline only.\n\n offset : float, optional, default = 0.1\n Scalar added to baseline during division to avoid division by 0.\n \"\"\"\n\n check_options(method, ['mean', 'percentile', 'window', 'window-exact'])\n\n from warnings import warn\n if not (method == 'window' or method == 'window-exact') and window is not None:\n warn('Setting window without using method \"window\" has no effect')\n\n if method == 'mean':\n baseFunc = mean\n\n if method == 'percentile':\n baseFunc = lambda x: percentile(x, perc)\n\n if method == 'window':\n from scipy.ndimage.filters import percentile_filter\n baseFunc = lambda x: percentile_filter(x.astype(float64), perc, window, mode='nearest')\n\n if method == 'window-exact':\n if window & 0x1:\n left, right = (ceil(window/2), ceil(window/2) + 1)\n else:\n left, right = (window/2, window/2)\n\n n = len(self.index)\n baseFunc = lambda x: asarray([percentile(x[max(ix-left, 0):min(ix+right+1, n)], perc)\n for ix in arange(0, n)])\n\n def get(y):\n b = baseFunc(y)\n return (y - b) / (b + offset)\n\n return self.map(get)\n\n def toimages(self, chunk_size='auto'):\n \"\"\"\n Converts to images data.\n\n This method is equivalent to series.toblocks(size).toimages().\n\n Parameters\n ----------\n chunk_size : str or tuple, size of series chunk used during conversion, default = 'auto'\n String interpreted as memory size (in kilobytes, e.g. '64').\n The exception is the string 'auto', which will choose a chunk size to make the\n resulting blocks ~100 MB in size. Int interpreted as 'number of elements'.\n Only valid in spark mode.\n \"\"\"\n from thunder.images.images import Images\n\n if chunk_size is 'auto':\n chunk_size = str(max([int(1e5/prod(self.baseshape)), 1]))\n\n n = len(self.shape) - 1\n\n if self.mode == 'spark':\n return Images(self.values.swap(tuple(range(n)), (0,), size=chunk_size))\n\n if self.mode == 'local':\n return Images(self.values.transpose((n,) + tuple(range(0, n))))\n\n def tobinary(self, path, prefix='series', overwrite=False, credentials=None):\n \"\"\"\n Write data to binary files.\n\n Parameters\n ----------\n path : string path or URI to directory to be created\n Output files will be written underneath path.\n Directory will be created as a result of this call.\n\n prefix : str, optional, default = 'series'\n String prefix for files.\n\n overwrite : bool\n If true, path and all its contents will be deleted and\n recreated as partof this call.\n \"\"\"\n from thunder.series.writers import tobinary\n tobinary(self, path, prefix=prefix, overwrite=overwrite, credentials=credentials)\n"
] | [
[
"numpy.savetxt",
"numpy.arange",
"numpy.allclose"
],
[
"numpy.dot",
"numpy.polyfit",
"numpy.expand_dims",
"numpy.asarray",
"numpy.max",
"numpy.mean",
"numpy.fix",
"numpy.polyval",
"numpy.roll",
"numpy.where",
"numpy.random.randint",
"numpy.unique",
"numpy.arange",
"numpy.ceil",
"numpy.std",
"scipy.linalg.norm",
"numpy.outer",
"numpy.ravel",
"numpy.zeros",
"numpy.logical_not",
"numpy.min",
"numpy.delete",
"numpy.random.rand",
"numpy.corrcoef",
"numpy.array",
"numpy.sum",
"numpy.convolve",
"numpy.fft.fft",
"numpy.array_equal",
"numpy.percentile",
"numpy.shape",
"numpy.prod",
"numpy.angle"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"0.14",
"0.15",
"0.12",
"0.10"
],
"tensorflow": []
}
] |
hx-Tang/GANet | [
"8935c9d3d82189fa6f940c2a877534a398a041e4",
"8935c9d3d82189fa6f940c2a877534a398a041e4"
] | [
"libs/sync_bn/src/__init__.py",
"view.py"
] | [
"import os\nimport torch\nfrom torch.utils.cpp_extension import load\n\ncwd = os.path.dirname(os.path.realpath(__file__))\ncpu_path = os.path.join(cwd, 'cpu')\ngpu_path = os.path.join(cwd, 'gpu')\n\ncpu = load('sync_bn_cpu', [\n os.path.join(cpu_path, 'operator.cpp'),\n os.path.join(cpu_path, 'sync_bn.cpp'),\n], build_directory=cpu_path, verbose=False)\n\nif torch.cuda.is_available():\n gpu = load('sync_bn_gpu', [\n os.path.join(gpu_path, 'operator.cpp'),\n os.path.join(gpu_path, 'sync_bn_cuda.cu'),\n ], build_directory=gpu_path, verbose=False)\n",
"# # from models.MyGANet4 import GANet\n# #\n# # model = GANet()\n# # for name, module in model.named_children():\n# # print(name)\n#\n# import torch\n# import torch.nn as nn\n#\n# a = torch.randn(2, 3, 2, 2) # 右图\n# b = torch.ones(2, 1, 2, 2) # disp\n# print(a)\n#\n# def warp(x, disp):\n# \"\"\"\n# warp an image/tensor (im2) back to im1, according to the optical flow\n# x: [B, C, H, W] (im2)\n# flo: [B, 2, H, W] flow\n# \"\"\"\n# B, C, H, W = x.size()\n# # mesh grid\n# xx = torch.arange(0, W).view(1, -1).repeat(H, 1)\n# yy = torch.arange(0, H).view(-1, 1).repeat(1, W)\n# xx = xx.view(1, 1, H, W).repeat(B, 1, 1, 1)\n# yy = yy.view(1, 1, H, W).repeat(B, 1, 1, 1)\n# vgrid = torch.cat((xx, yy), 1).float()\n#\n# # vgrid = Variable(grid)\n# vgrid[:, :1, :, :] = vgrid[:, :1, :, :] + disp\n#\n# # scale grid to [-1,1]\n# vgrid[:, 0, :, :] = 2.0 * vgrid[:, 0, :, :].clone() / max(W - 1, 1) - 1.0\n# vgrid[:, 1, :, :] = 2.0 * vgrid[:, 1, :, :].clone() / max(H - 1, 1) - 1.0\n#\n# vgrid = vgrid.permute(0, 2, 3, 1)\n# output = nn.functional.grid_sample(x, vgrid,align_corners=True)\n# return output\n#\n# o = warp(a,b)\n#\n# print(o)\n\nfrom models.CasGANet10 import GANet\nfrom models.MyGANet9 import GANet\nfrom models.GANet11 import GANet\nimport numpy as np\nimport datetime\nimport torch\n\nmodel = GANet()\n\nprint('parameters:{}'.format(np.sum([p.numel() for p in model.parameters()]).item()))\n\nmodel = torch.nn.DataParallel(model).cuda()\nmodel.eval()\ninput1 = torch.randn(1, 3, 384, 768).cuda()\ninput2 = torch.randn(1, 3, 384, 768).cuda()\n\nt = 0.\nfor i in range(10):\n with torch.no_grad():\n start = datetime.datetime.now()\n out1 = model(input1, input2)\n end = datetime.datetime.now()\n t += (end - start).total_seconds()\nprint(t/10)\n\n"
] | [
[
"torch.cuda.is_available"
],
[
"torch.randn",
"torch.nn.DataParallel",
"torch.no_grad"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
scottfredericks/PyXtal_Old | [
"3fa39b2f188197b42576087c6f4c3bca14b2e8f3"
] | [
"examples/LJ_38_Oh.py"
] | [
"from pyxtal.crystal import random_cluster\nfrom copy import deepcopy\nfrom optparse import OptionParser\nfrom random import randint, choice\nfrom scipy.optimize import minimize\nfrom scipy.spatial.distance import pdist, cdist\nfrom pyxtal.molecule import PointGroupAnalyzer\nfrom pymatgen import Molecule\nfrom pyxtal.database.collection import Collection\nfrom time import time\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport warnings\nplt.style.use(\"bmh\")\nwarnings.filterwarnings(\"ignore\")\n\n\"\"\"\nThis is a script to \n1, generate random clusters\n2, perform optimization\n\"\"\"\ndef LJ(pos, dim, mu=0.1):\n \"\"\"\n Calculate the total energy\n Args:\n pos: 1D array with N*dim numbers representing the atomic positions\n dim: dimension of the hyper/normal space\n output\n E: the total energy with punishing function\n \"\"\"\n N_atom = int(len(pos)/dim)\n pos = np.reshape(pos, (N_atom, dim))\n \n distance = pdist(pos) \n r6 = np.power(distance, 6)\n r12 = np.multiply(r6, r6)\n Eng = np.sum(4*(1/r12 - 1/r6))\n\n if dim > 3:\n norm = 0\n for i in range(3,dim):\n #diff = pos[:, i] - np.mean(pos[:, i])\n diff = pos[:, i] \n norm += np.sum(np.multiply(diff, diff))\n Eng += 0.5*mu*norm\n return Eng\n\ndef LJ_force(pos, dim, mu=0.1):\n N_atom = int(len(pos)/dim)\n pos = np.reshape(pos,[N_atom, dim])\n force = np.zeros([N_atom, dim])\n for i, pos0 in enumerate(pos):\n pos1 = deepcopy(pos)\n pos1 = np.delete(pos1, i, 0)\n distance = cdist([pos0], pos1)\n r = pos1 - pos0\n r2 = np.power(distance, 2)\n r6 = np.power(r2, 3)\n r12 = np.power(r6, 2)\n force[i] = np.dot((48/r12-24/r6)/r2, r)\n # force from the punish function mu*sum([x-mean(x)]^2)\n if dim > 3:\n for j in range(3,dim):\n #force[i, j] += mu*(pos[i, j] - np.mean(pos[:, j]))\n force[i, j] += mu*pos[i, j] #- np.mean(pos[:, j]))\n return force.flatten()\n\ndef single_optimize(pos, dim=3, kt=0.5, mu=0.1):\n \"\"\"\n perform optimization for a given cluster\n Args: \n pos: N*dim0 array representing the atomic positions\n dim: dimension of the hyper/normal space\n kt: perturbation factors\n\n output:\n energy: optmized energy\n pos: optimized positions\n \"\"\"\n N_atom = len(pos)\n diff = dim - np.shape(pos)[1]\n # if the input pos has less dimensions, we insert a random array for the extra dimension\n # if the input pos has more dimensions, we delete the array for the extra dimension\n if diff > 0:\n pos = np.hstack((pos, 0.5*(np.random.random([N_atom, diff])-0.5) ))\n elif diff < 0:\n pos = pos[:, :dim]\n\n pos = pos.flatten()\n res = minimize(LJ, pos, args=(dim, mu), jac=LJ_force, method='CG', tol=1e-3)\n pos = np.reshape(res.x, (N_atom, dim))\n energy = res.fun\n return energy, pos\n\n\ndef parse_symmetry(pos):\n mol = Molecule(['C']*len(pos), pos)\n try:\n symbol = PointGroupAnalyzer(mol, tolerance=0.1).sch_symbol\n except:\n symbol = 'N/A'\n return symbol\n\n\nclass LJ_prediction():\n \"\"\"\n A class to perform global optimization on LJ clusters\n Args:\n\n Attributes:\n\n \"\"\"\n\n def __init__(self, numIons):\n self.numIons = numIons\n ref = Collection('clusters')[str(numIons)]\n print('\\nReference for LJ {0:3d} is {1:12.3f} eV, PG: {2:4s}'.\\\n format(numIons, ref['energy'], ref['pointgroup']))\n self.reference = ref\n self.time0 = time()\n\n def generate_cluster(self, pgs = range(2, 33)):\n run = True\n while run:\n pg = choice(pgs)\n cluster = random_cluster(pg, ['Mo'], [self.numIons], 1.0)\n if cluster.valid:\n run = False\n return cluster.cart_coords\n \n def predict(self, dim=3, maxN=100, ncpu=2, pgs=range(2, 33)):\n\n print('\\nPerforming random search at {0:d}D space\\n'.format(dim))\n cycle = range(maxN)\n if ncpu > 1:\n from multiprocessing import Pool\n from functools import partial\n\n with Pool(ncpu) as p:\n func = partial(self.relaxation, dim, pgs)\n res = p.map(func, cycle)\n p.close()\n p.join()\n else:\n res=[]\n for i in cycle:\n res.append(self.relaxation(dim, pgs, i))\n \n N_success = 0\n for dct in res:\n if dct['ground']:\n N_success +=1\n print('\\nHit the ground state {0:4d} times out of {1:4d} attempts\\n'.\\\n format(N_success, maxN))\n return res\n\n def relaxation(self, dim, pgs, ind):\n pos = self.generate_cluster(pgs)\n pg1 = parse_symmetry(pos)\n if dim == 3:\n [energy, pos] = single_optimize(pos, 3)\n else:\n do = True\n while do:\n [energy1, pos1] = single_optimize(pos, 3)\n [energy2, pos2] = single_optimize(pos1, dim)\n [energy3, pos3] = single_optimize(pos2, 3)\n #print(energy1, energy2, energy3)\n if abs(energy3-energy1) < 1e-3 or energy3 > energy1:\n pos = pos1\n energy = energy1\n do = False\n #print('stop')\n else:\n pos = pos3\n if abs(energy-self.reference['energy']) <1e-3:\n ground = True\n elif energy < self.reference['energy']:\n ground = True\n print(\" --- ENERGY LOWER THAN REFERENCE FOUND ---\")\n else:\n ground = False\n\n pg2 = parse_symmetry(pos)\n res = {'pos': pos,\n 'energy': energy,\n 'pg_init': pg1,\n 'pg_finial': pg2,\n 'ground': ground,\n 'id': ind,\n }\n if ground:\n print('ID: {0:4d} PG initial: {1:4s} relaxed: {2:4s} Energy: {3:12.3f} Time: {4:6.1f} ++++++'.\\\n format(ind, pg1, pg2, energy, (time()-self.time0)/60))\n elif ind%10 == 0:\n print('ID: {0:4d} PG initial: {1:4s} relaxed: {2:4s} Energy: {3:12.3f} Time: {4:6.1f} '.\\\n format(ind, pg1, pg2, energy, (time()-self.time0)/60))\n return res\n\nif __name__ == \"__main__\":\n #-------------------------------- Options -------------------------\n parser = OptionParser()\n parser.add_option(\"-d\", \"--dimension\", dest=\"dim\", metavar='dim', default=3, type=int,\n help=\"dimension, 3 or higher\")\n parser.add_option(\"-n\", \"--numIons\", dest=\"numIons\", default=16, type=int,\n help=\"desired numbers of atoms: 16\")\n parser.add_option(\"-m\", \"--max\", dest=\"max\", default=100, type=int,\n help=\"maximum number of attempts\")\n parser.add_option(\"-p\", \"--proc\", dest=\"proc\", default=1, type=int,\n help=\"number of processors, default 1\")\n\n (options, args) = parser.parse_args()\n\n N = options.numIons #38\n maxN = options.max #1000\n dim = options.dim #4\n ncpu = options.proc\n\n lj_run = LJ_prediction(N)\n eng_min = lj_run.reference['energy']\n t0 = time()\n print(\"---No symmetry---\")\n results1 = lj_run.predict(dim=dim, maxN=maxN, ncpu=ncpu, pgs=[1])\n print('time: {0:6.2f} seconds'.format(time()-t0))\n\n print(\"---Random symmetry---\")\n results2 = lj_run.predict(dim=dim, maxN=maxN, ncpu=ncpu, pgs=range(2, 33))\n print('time: {0:6.2f} seconds'.format(time()-t0))\n\n print(\"---Oh only---\")\n results3 = lj_run.predict(dim=dim, maxN=maxN, ncpu=ncpu, pgs=[32])\n print('time: {0:6.2f} seconds'.format(time()-t0))\n\n print(\"---Random symmetry (not Oh)---\")\n results4 = lj_run.predict(dim=dim, maxN=maxN, ncpu=ncpu, pgs=range(2, 32))\n print('time: {0:6.2f} seconds'.format(time()-t0))\n eng1 = []\n eng2 = []\n eng3 = []\n eng4 = []\n ground1 = 0\n ground2 = 0\n ground3 = 0\n ground4 = 0\n for dct in results1:\n if dct['ground']:\n ground1 += 1\n eng1.append(dct['energy']) \n for dct in results2:\n if dct['ground']:\n ground2 += 1\n eng2.append(dct['energy']) \n for dct in results3:\n if dct['ground']:\n ground3 += 1\n eng3.append(dct['energy']) \n for dct in results4:\n if dct['ground']:\n ground4 += 1\n eng4.append(dct['energy']) \n eng1 = np.array(eng1)\n eng2 = np.array(eng2)\n eng3 = np.array(eng3)\n eng4 = np.array(eng4)\n\n eng_max = max([max(eng1), max(eng2)])\n bins = np.linspace(eng_min-0.1, 0.1, 100)\n plt.hist(eng1, bins, alpha=0.5, label='no symmetry: ' + str(ground1) + '/' + str(len(eng1)))\n plt.hist(eng2, bins, alpha=0.5, label='random point groups: ' + str(ground2) + '/' + str(len(eng2)))\n plt.xlabel('Energy (eV)')\n plt.ylabel('Counts')\n plt.legend(loc=1)\n plt.title('LJ cluster: ' + str(N) + ' Ground state: ' + str(eng_min))\n plt.savefig(str(N)+'-'+str(maxN)+'-'+str(dim)+'.pdf')\n plt.close()\n\n eng_max = max([max(eng3), max(eng4)])\n bins = np.linspace(eng_min-0.1, 0.1, 100)\n plt.hist(eng3, bins, alpha=0.5, label='Oh only: ' + str(ground3) + '/' + str(len(eng3)))\n plt.hist(eng4, bins, alpha=0.5, label='random point groups (excluding Oh): ' + str(ground4) + '/' + str(len(eng4)))\n plt.xlabel('Energy (eV)')\n plt.ylabel('Counts')\n plt.legend(loc=1)\n plt.title('LJ cluster: ' + str(N) + ' Ground state: ' + str(eng_min))\n plt.savefig(str(N)+'-'+str(maxN)+'-'+str(dim)+'_single.pdf')\n plt.close()\n"
] | [
[
"matplotlib.pyplot.legend",
"numpy.dot",
"numpy.linspace",
"numpy.reshape",
"matplotlib.pyplot.close",
"numpy.zeros",
"matplotlib.pyplot.style.use",
"numpy.multiply",
"numpy.power",
"scipy.spatial.distance.cdist",
"numpy.delete",
"scipy.optimize.minimize",
"numpy.array",
"numpy.sum",
"matplotlib.pyplot.ylabel",
"numpy.random.random",
"scipy.spatial.distance.pdist",
"numpy.shape",
"matplotlib.pyplot.xlabel"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"1.6",
"0.14",
"1.10",
"0.15",
"1.4",
"1.3",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"0.12",
"1.0",
"0.17",
"0.16",
"1.8"
],
"tensorflow": []
}
] |
quantapix/qnarre.com | [
"f51d5945c20ef8182c4aa11f1b407d064c190c70",
"f51d5945c20ef8182c4aa11f1b407d064c190c70",
"f51d5945c20ef8182c4aa11f1b407d064c190c70",
"f51d5945c20ef8182c4aa11f1b407d064c190c70",
"f51d5945c20ef8182c4aa11f1b407d064c190c70",
"f51d5945c20ef8182c4aa11f1b407d064c190c70",
"f51d5945c20ef8182c4aa11f1b407d064c190c70",
"f51d5945c20ef8182c4aa11f1b407d064c190c70",
"f51d5945c20ef8182c4aa11f1b407d064c190c70"
] | [
"qnarre/models/ibert_quant_modules.py",
"qnarre/prep/convert/roberta.py",
"qnarre/prep/convert/gpt_neo.py",
"std/pytorch/01-low/76.py",
"qnarre/prep/convert/rag.py",
"qnarre/prep/convert/segformer.py",
"std/pytorch/02-mid/05.py",
"std/huggingface/xlate.py",
"qnarre/old/bert/run_squad.py"
] | [
"import decimal\n\nimport numpy as np\nimport torch\nfrom torch import nn\nfrom torch.autograd import Function\n\nfrom ...utils import logging\n\n\nlogger = logging.get_logger(__name__)\n\n\nclass QuantEmbedding(qc.Module):\n def __init__(\n self,\n num_embeddings,\n embedding_dim,\n padding_idx=None,\n max_norm=None,\n norm_type=2.0,\n scale_grad_by_freq=False,\n sparse=False,\n _weight=None,\n weight_bit=8,\n momentum=0.95,\n quant_mode=False,\n ):\n super().__init__()\n self.num_ = num_embeddings\n self.dim = embedding_dim\n self.padding_idx = padding_idx\n self.max_norm = max_norm\n self.norm_type = norm_type\n self.scale_grad_by_freq = scale_grad_by_freq\n self.sparse = sparse\n\n self.weight = nn.Parameter(torch.zeros([num_embeddings, embedding_dim]))\n self.register_buffer(\"weight_scaling_factor\", torch.zeros(1))\n self.register_buffer(\"weight_integer\", torch.zeros_like(self.weight))\n\n self.weight_bit = weight_bit\n self.momentum = momentum\n self.quant_mode = quant_mode\n self.percentile_mode = False\n self.weight_function = SymmetricQuantFunction.apply\n\n def forward(self, x, positions=None, incremental_state=None):\n if not self.quant_mode:\n return (\n F.embedding(\n x,\n self.weight,\n self.padding_idx,\n self.max_norm,\n self.norm_type,\n self.scale_grad_by_freq,\n self.sparse,\n ),\n None,\n )\n\n w = self.weight\n w_transform = w.data.detach()\n w_min = w_transform.min().expand(1)\n w_max = w_transform.max().expand(1)\n\n self.weight_scaling_factor = symmetric_linear_quantization_params(\n self.weight_bit, w_min, w_max, False\n )\n self.weight_integer = self.weight_function(\n self.weight, self.weight_bit, self.percentile_mode, self.weight_scaling_factor\n )\n\n emb_int = F.embedding(\n x,\n self.weight_integer,\n self.padding_idx,\n self.max_norm,\n self.norm_type,\n self.scale_grad_by_freq,\n self.sparse,\n )\n return emb_int * self.weight_scaling_factor, self.weight_scaling_factor\n\n\nclass QuantAct(qc.Module):\n def __init__(\n self,\n activation_bit,\n act_range_momentum=0.95,\n per_channel=False,\n channel_len=None,\n quant_mode=False,\n ):\n super().__init__()\n\n self.activation_bit = activation_bit\n self.act_range_momentum = act_range_momentum\n self.quant_mode = quant_mode\n self.per_channel = per_channel\n self.percentile = False\n self.act_function = SymmetricQuantFunction.apply\n\n if not self.per_channel:\n self.register_buffer(\"x_min\", torch.zeros(1))\n self.register_buffer(\"x_max\", torch.zeros(1))\n self.register_buffer(\"act_scaling_factor\", torch.zeros(1))\n self.x_min -= 1e-5\n self.x_max += 1e-5\n else:\n raise NotImplementedError(\"per-channel mode is not currently supported for activation.\")\n\n def __repr__(self):\n return (\n f\"{self.__class__.__name__}(activation_bit={self.activation_bit}, \"\n f\"quant_mode: {self.activation_bit}, Act_min: {self.x_min.item():.2f}, \"\n f\"Act_max: {self.x_max.item():.2f})\"\n )\n\n def forward(\n self,\n x,\n pre_act_scaling_factor=None,\n identity=None,\n identity_scaling_factor=None,\n specified_min=None,\n specified_max=None,\n ):\n\n x_act = x if identity is None else identity + x\n # collect running stats if training\n if self.training:\n assert not self.percentile, \"percentile mode is not currently supported for activation.\"\n assert (\n not self.per_channel\n ), \"per-channel mode is not currently supported for activation.\"\n x_min = x_act.data.min()\n x_max = x_act.data.max()\n\n assert (\n x_max.isnan().sum() == 0 and x_min.isnan().sum() == 0\n ), \"NaN detected when computing min/max of the activation\"\n\n # Initialization\n if self.x_min.min() > -1.1e-5 and self.x_max.max() < 1.1e-5:\n self.x_min = self.x_min + x_min\n self.x_max = self.x_max + x_max\n\n # exponential moving average (EMA)\n # use momentum to prevent the quantized values change greatly every iteration\n elif self.act_range_momentum == -1:\n self.x_min = torch.min(self.x_min, x_min)\n self.x_max = torch.max(self.x_max, x_max)\n else:\n self.x_min = self.x_min * self.act_range_momentum + x_min * (\n 1 - self.act_range_momentum\n )\n self.x_max = self.x_max * self.act_range_momentum + x_max * (\n 1 - self.act_range_momentum\n )\n\n if not self.quant_mode:\n return x_act, None\n\n x_min = self.x_min if specified_min is None else specified_min\n x_max = self.x_max if specified_max is None else specified_max\n\n self.act_scaling_factor = symmetric_linear_quantization_params(\n self.activation_bit, x_min, x_max, per_channel=self.per_channel\n )\n\n if pre_act_scaling_factor is None:\n # this is for the input quantization\n quant_act_int = self.act_function(\n x, self.activation_bit, self.percentile, self.act_scaling_factor\n )\n else:\n quant_act_int = FixedPointMul.apply(\n x,\n pre_act_scaling_factor,\n self.activation_bit,\n self.act_scaling_factor,\n identity,\n identity_scaling_factor,\n )\n\n correct_output_scale = self.act_scaling_factor.view(-1)\n\n return quant_act_int * correct_output_scale, self.act_scaling_factor\n\n\nclass QuantLinear(qc.Module):\n def __init__(\n self,\n in_features,\n out_features,\n bias=True,\n weight_bit=8,\n bias_bit=32,\n per_channel=False,\n quant_mode=False,\n ):\n super().__init__()\n self.in_features = in_features\n self.out_features = out_features\n\n self.weight = nn.Parameter(torch.zeros([out_features, in_features]))\n self.register_buffer(\"weight_integer\", torch.zeros_like(self.weight))\n self.register_buffer(\"fc_scaling_factor\", torch.zeros(self.out_features))\n if bias:\n self.bias = nn.Parameter(torch.zeros(out_features))\n self.register_buffer(\"bias_integer\", torch.zeros_like(self.bias))\n\n self.weight_bit = weight_bit\n self.quant_mode = quant_mode\n self.per_channel = per_channel\n self.bias_bit = bias_bit\n self.quant_mode = quant_mode\n self.percentile_mode = False\n self.weight_function = SymmetricQuantFunction.apply\n\n def __repr__(self):\n s = super().__repr__()\n s = f\"({s} weight_bit={self.weight_bit}, quant_mode={self.quant_mode})\"\n return s\n\n def forward(self, x, prev_act_scaling_factor=None):\n if not self.quant_mode:\n return F.linear(x, weight=self.weight, bias=self.bias), None\n\n # assert that prev_act_scaling_factor is a scalar tensor\n assert prev_act_scaling_factor is not None and prev_act_scaling_factor.shape == (1,), (\n \"Input activation to the QuantLinear layer should be globally (non-channel-wise) quantized. \"\n \"Please add a QuantAct layer with `per_channel = True` before this QuantAct layer\"\n )\n\n w = self.weight\n w_transform = w.data.detach()\n if self.per_channel:\n w_min, _ = torch.min(w_transform, dim=1, out=None)\n w_max, _ = torch.max(w_transform, dim=1, out=None)\n else:\n w_min = w_transform.min().expand(1)\n w_max = w_transform.max().expand(1)\n\n self.fc_scaling_factor = symmetric_linear_quantization_params(\n self.weight_bit, w_min, w_max, self.per_channel\n )\n self.weight_integer = self.weight_function(\n self.weight, self.weight_bit, self.percentile_mode, self.fc_scaling_factor\n )\n\n bias_scaling_factor = self.fc_scaling_factor * prev_act_scaling_factor\n\n if self.bias is not None:\n self.bias_integer = self.weight_function(\n self.bias, self.bias_bit, False, bias_scaling_factor\n )\n\n prev_act_scaling_factor = prev_act_scaling_factor.view(1, -1)\n x_int = x / prev_act_scaling_factor\n\n return (\n F.linear(x_int, weight=self.weight_integer, bias=self.bias_integer)\n * bias_scaling_factor,\n bias_scaling_factor,\n )\n\n\nclass IntGELU(qc.Module):\n def __init__(self, quant_mode=True, force_dequant=\"none\"):\n super().__init__()\n self.quant_mode = quant_mode\n\n if force_dequant in [\"nonlinear\", \"gelu\"]:\n logger.info(\"Force dequantize gelu\")\n self.quant_mode = False\n\n if not self.quant_mode:\n self.activation_fn = nn.GELU()\n\n self.k = 1.4142\n self.const = 14 # dummy integer constant\n self.coeff = [-0.2888, -1.769, 1] # a(x+b)**2 + c\n self.coeff[2] /= self.coeff[0]\n\n def int_erf(self, x_int, scaling_factor):\n b_int = torch.floor(self.coeff[1] / scaling_factor)\n c_int = torch.floor(self.coeff[2] / scaling_factor**2)\n sign = torch.sign(x_int)\n\n abs_int = torch.min(torch.abs(x_int), -b_int)\n y_int = sign * ((abs_int + b_int) ** 2 + c_int)\n scaling_factor = scaling_factor**2 * self.coeff[0]\n\n # avoid overflow\n y_int = floor_ste.apply(y_int / 2**self.const)\n scaling_factor = scaling_factor * 2**self.const\n\n return y_int, scaling_factor\n\n def forward(self, x, scaling_factor=None):\n if not self.quant_mode:\n return self.activation_fn(x), None\n\n x_int = x / scaling_factor\n sigmoid_int, sigmoid_scaling_factor = self.int_erf(x_int, scaling_factor / self.k)\n\n shift_int = 1.0 // sigmoid_scaling_factor\n\n x_int = x_int * (sigmoid_int + shift_int)\n scaling_factor = scaling_factor * sigmoid_scaling_factor / 2\n\n return x_int * scaling_factor, scaling_factor\n\n\nclass IntSoftmax(qc.Module):\n def __init__(self, output_bit, quant_mode=False, force_dequant=\"none\"):\n super().__init__()\n self.output_bit = output_bit\n self.max_bit = 32\n self.quant_mode = quant_mode\n\n if force_dequant in [\"nonlinear\", \"softmax\"]:\n logger.info(\"Force dequantize softmax\")\n self.quant_mode = False\n\n self.act = QuantAct(16, quant_mode=self.quant_mode)\n self.x0 = -0.6931 # -ln2\n self.const = 30 # dummy integer constant\n self.coef = [0.35815147, 0.96963238, 1.0] # ax**2 + bx + c\n self.coef[1] /= self.coef[0]\n self.coef[2] /= self.coef[0]\n\n def int_polynomial(self, x_int, scaling_factor):\n with torch.no_grad():\n b_int = torch.floor(self.coef[1] / scaling_factor)\n c_int = torch.floor(self.coef[2] / scaling_factor**2)\n z = (x_int + b_int) * x_int + c_int\n scaling_factor = self.coef[0] * scaling_factor**2\n return z, scaling_factor\n\n def int_exp(self, x_int, scaling_factor):\n with torch.no_grad():\n x0_int = torch.floor(self.x0 / scaling_factor)\n x_int = torch.max(x_int, self.const * x0_int)\n\n q = floor_ste.apply(x_int / x0_int)\n r = x_int - x0_int * q\n exp_int, exp_scaling_factor = self.int_polynomial(r, scaling_factor)\n exp_int = torch.clamp(floor_ste.apply(exp_int * 2 ** (self.const - q)), min=0)\n scaling_factor = exp_scaling_factor / 2**self.const\n return exp_int, scaling_factor\n\n def forward(self, x, scaling_factor):\n if not self.quant_mode:\n return F.softmax(x, dim=-1), None\n\n x_int = x / scaling_factor\n\n x_int_max, _ = x_int.max(dim=-1, keepdim=True)\n x_int = x_int - x_int_max\n exp_int, exp_scaling_factor = self.int_exp(x_int, scaling_factor)\n\n # Avoid overflow\n exp, exp_scaling_factor = self.act(exp_int, exp_scaling_factor)\n exp_int = exp / exp_scaling_factor\n\n exp_int_sum = exp_int.sum(dim=-1, keepdim=True)\n factor = floor_ste.apply(2**self.max_bit / exp_int_sum)\n exp_int = floor_ste.apply(exp_int * factor / 2 ** (self.max_bit - self.output_bit))\n scaling_factor = 1 / 2**self.output_bit\n return exp_int * scaling_factor, scaling_factor\n\n\nclass IntLayerNorm(qc.Module):\n def __init__(self, normalized_shape, eps, output_bit=8, quant_mode=False, force_dequant=\"none\"):\n super().__init__()\n self.normalized_shape = normalized_shape\n self.eps = eps\n\n self.weight = nn.Parameter(torch.zeros(normalized_shape))\n self.bias = nn.Parameter(torch.zeros(normalized_shape))\n\n self.quant_mode = quant_mode\n if force_dequant in [\"nonlinear\", \"layernorm\"]:\n logger.info(\"Force dequantize layernorm\")\n self.quant_mode = False\n\n self.register_buffer(\"shift\", torch.zeros(1))\n self.output_bit = output_bit\n self.max_bit = 32\n self.dim_sqrt = None\n self.activation = QuantAct(self.output_bit, quant_mode=self.quant_mode)\n\n def set_shift(self, y_int):\n with torch.no_grad():\n y_sq_int = y_int**2\n var_int = torch.sum(y_sq_int, axis=2, keepdim=True)\n shift = (torch.log2(torch.sqrt(var_int / 2**self.max_bit)).ceil()).max()\n shift_old = self.shift\n self.shift = torch.max(self.shift, shift)\n logger.info(f\"Dynamic shift adjustment: {int(shift_old)} to {int(self.shift)}\")\n\n def overflow_fallback(self, y_int):\n self.set_shift(y_int) # adjusts `self.shift`\n y_int_shifted = floor_ste.apply(y_int / 2**self.shift)\n y_sq_int = y_int_shifted**2\n var_int = torch.sum(y_sq_int, axis=2, keepdim=True)\n return var_int\n\n def forward(self, x, scaling_factor=None):\n if not self.quant_mode:\n mean = x.mean(axis=2, keepdim=True)\n y = x - mean\n var = torch.mean(y**2, axis=2, keepdim=True)\n x = y / torch.sqrt(self.eps + var)\n x = x * self.weight + self.bias\n return x, None\n\n # compute sqrt of the feature dimension if it is the first run\n if self.dim_sqrt is None:\n n = torch.tensor(x.shape[2], dtype=torch.float)\n self.dim_sqrt = torch.sqrt(n).to(x.device)\n\n # Normalization: computes mean and variance(std)\n x_int = x / scaling_factor\n mean_int = round_ste.apply(x_int.mean(axis=2, keepdim=True))\n y_int = x_int - mean_int\n y_int_shifted = floor_ste.apply(y_int / 2**self.shift)\n y_sq_int = y_int_shifted**2\n var_int = torch.sum(y_sq_int, axis=2, keepdim=True)\n\n # overflow handling in training time\n if self.training:\n # if overflow is detected\n if var_int.max() >= 2**self.max_bit:\n var_int = self.overflow_fallback(y_int)\n assert var_int.max() < 2**self.max_bit + 0.1, (\n \"Error detected in overflow handling: \"\n \"`var_int` exceeds `self.max_bit` (the maximum possible bit width)\"\n )\n\n # To be replaced with integer-sqrt kernel that produces the same output\n std_int = floor_ste.apply(torch.sqrt(var_int)) * 2**self.shift\n factor = floor_ste.apply(2**31 / std_int)\n y_int = floor_ste.apply(y_int * factor / 2)\n scaling_factor = self.dim_sqrt / 2**30\n\n # scaling and shifting\n bias = self.bias.data.detach() / (self.weight.data.detach())\n bias_int = floor_ste.apply(bias / scaling_factor)\n\n y_int = y_int + bias_int\n scaling_factor = scaling_factor * self.weight\n x = y_int * scaling_factor\n\n return x, scaling_factor\n\n\ndef get_percentile_min_max(input, lower_percentile, upper_percentile, output_tensor=False):\n input_length = input.shape[0]\n\n lower_index = round(input_length * (1 - lower_percentile * 0.01))\n upper_index = round(input_length * upper_percentile * 0.01)\n\n upper_bound = torch.kthvalue(input, k=upper_index).values\n\n if lower_percentile == 0:\n lower_bound = upper_bound * 0\n # lower_index += 1\n else:\n lower_bound = -torch.kthvalue(-input, k=lower_index).values\n\n if not output_tensor:\n lower_bound = lower_bound.item()\n upper_bound = upper_bound.item()\n return lower_bound, upper_bound\n\n\ndef linear_quantize(input, scale, zero_point, inplace=False):\n if len(input.shape) == 4:\n scale = scale.view(-1, 1, 1, 1)\n zero_point = zero_point.view(-1, 1, 1, 1)\n # reshape scale and zeropoint for linear weights\n elif len(input.shape) == 2:\n scale = scale.view(-1, 1)\n zero_point = zero_point.view(-1, 1)\n else:\n scale = scale.view(-1)\n zero_point = zero_point.view(-1)\n # quantized = float / scale + zero_point\n if inplace:\n input.mul_(1.0 / scale).add_(zero_point).round_()\n return input\n return torch.round(1.0 / scale * input + zero_point)\n\n\ndef symmetric_linear_quantization_params(\n num_bits, saturation_min, saturation_max, per_channel=False\n):\n with torch.no_grad():\n n = 2 ** (num_bits - 1) - 1\n\n if per_channel:\n scale, _ = torch.max(\n torch.stack([saturation_min.abs(), saturation_max.abs()], dim=1), dim=1\n )\n scale = torch.clamp(scale, min=1e-8) / n\n\n else:\n scale = max(saturation_min.abs(), saturation_max.abs())\n scale = torch.clamp(scale, min=1e-8) / n\n\n return scale\n\n\nclass SymmetricQuantFunction(Function):\n @staticmethod\n def forward(ctx, x, k, percentile_mode, scale):\n zero_point = torch.tensor(0.0).to(scale.device)\n\n n = 2 ** (k - 1) - 1\n new_quant_x = linear_quantize(x, scale, zero_point, inplace=False)\n new_quant_x = torch.clamp(new_quant_x, -n, n - 1)\n\n ctx.scale = scale\n return new_quant_x\n\n @staticmethod\n def backward(ctx, grad_output):\n\n scale = ctx.scale\n if len(grad_output.shape) == 4:\n scale = scale.view(-1, 1, 1, 1)\n # reshape scale and zeropoint for linear weights\n elif len(grad_output.shape) == 2:\n scale = scale.view(-1, 1)\n else:\n scale = scale.view(-1)\n\n return grad_output.clone() / scale, None, None, None, None\n\n\nclass floor_ste(Function):\n @staticmethod\n def forward(ctx, x):\n return torch.floor(x)\n\n @staticmethod\n def backward(ctx, grad_output):\n return grad_output.clone()\n\n\nclass round_ste(Function):\n @staticmethod\n def forward(ctx, x):\n return torch.round(x)\n\n @staticmethod\n def backward(ctx, grad_output):\n return grad_output.clone()\n\n\ndef batch_frexp(inputs, max_bit=31):\n shape_of_input = inputs.size()\n\n # trans the input to be a 1-d tensor\n inputs = inputs.view(-1)\n\n output_m, output_e = np.frexp(inputs.cpu().numpy())\n tmp_m = []\n for m in output_m:\n int_m_shifted = int(\n decimal.Decimal(m * (2**max_bit)).quantize(\n decimal.Decimal(\"1\"), rounding=decimal.ROUND_HALF_UP\n )\n )\n tmp_m.append(int_m_shifted)\n output_m = np.array(tmp_m)\n\n output_e = float(max_bit) - output_e\n\n return (\n torch.from_numpy(output_m).to(inputs.device).view(shape_of_input),\n torch.from_numpy(output_e).to(inputs.device).view(shape_of_input),\n )\n\n\nclass FixedPointMul(Function):\n @staticmethod\n def forward(\n ctx,\n pre_act,\n pre_act_scaling_factor,\n bit_num,\n z_scaling_factor,\n identity=None,\n identity_scaling_factor=None,\n ):\n\n if len(pre_act_scaling_factor.shape) == 3:\n reshape = lambda x: x # noqa: E731\n else:\n reshape = lambda x: x.view(1, 1, -1) # noqa: E731\n ctx.identity = identity\n\n n = 2 ** (bit_num - 1) - 1\n\n with torch.no_grad():\n pre_act_scaling_factor = reshape(pre_act_scaling_factor)\n if identity is not None:\n identity_scaling_factor = reshape(identity_scaling_factor)\n\n ctx.z_scaling_factor = z_scaling_factor\n\n z_int = torch.round(pre_act / pre_act_scaling_factor)\n _A = pre_act_scaling_factor.type(torch.double)\n _B = (z_scaling_factor.type(torch.float)).type(torch.double)\n new_scale = _A / _B\n new_scale = reshape(new_scale)\n\n m, e = batch_frexp(new_scale)\n\n output = z_int.type(torch.double) * m.type(torch.double)\n output = torch.round(output / (2.0**e))\n\n if identity is not None:\n # needs addition of identity activation\n wx_int = torch.round(identity / identity_scaling_factor)\n\n _A = identity_scaling_factor.type(torch.double)\n _B = (z_scaling_factor.type(torch.float)).type(torch.double)\n new_scale = _A / _B\n new_scale = reshape(new_scale)\n\n m1, e1 = batch_frexp(new_scale)\n output1 = wx_int.type(torch.double) * m1.type(torch.double)\n output1 = torch.round(output1 / (2.0**e1))\n\n output = output1 + output\n\n return torch.clamp(output.type(torch.float), -n - 1, n)\n\n @staticmethod\n def backward(ctx, grad_output):\n identity_grad = None\n if ctx.identity is not None:\n identity_grad = grad_output.clone() / ctx.z_scaling_factor\n return (\n grad_output.clone() / ctx.z_scaling_factor,\n None,\n None,\n None,\n None,\n identity_grad,\n None,\n )\n",
"# Copyright 2022 Quantapix Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# =============================================================================\n\nimport pathlib\nimport torch\n\nfrom argparse import ArgumentParser\nfrom fairseq.models.roberta import RobertaModel as FairseqRobertaModel\nfrom fairseq.modules import TransformerSentenceEncLayer\n\nfrom transformers.utils import logging\n\nfrom ..config.bert import PreTrained\nfrom ...models.bert import ForMasked, ForSeqClassifier\n\n\nlogging.set_verbosity_info()\n\nlog = logging.get_logger(__name__)\n\nSAMPLE_TEXT = \"Hello world! cécé herlolip\"\n\n\ndef to_pytorch(src_path, save_path, classification_head):\n roberta = FairseqRobertaModel.from_pretrained(src_path)\n roberta.eval() # disable drop\n roberta_sent_encoder = roberta.model.encoder.sentence_encoder\n cfg = PreTrained(\n s_vocab=roberta_sent_encoder.embed_tokens.num_embeddings,\n d_hidden=roberta.args.encoder_embed_dim,\n n_lays=roberta.args.n_enc_lays,\n n_heads=roberta.args.n_enc_heads,\n d_ffnet=roberta.args.encoder_ffn_embed_dim,\n n_pos=514,\n n_typ=1,\n norm_eps=1e-5,\n )\n if classification_head:\n cfg.n_labels = roberta.model.classification_heads[\"mnli\"].out_proj.weight.shape[0]\n print(\"Our BERT config:\", cfg)\n m = ForSeqClassifier(cfg) if classification_head else ForMasked(cfg)\n m.eval()\n m.roberta.embeddings.tok_embed.weight = roberta_sent_encoder.embed_tokens.weight\n m.roberta.embeddings.pos_embed.weight = roberta_sent_encoder.embed_positions.weight\n m.roberta.embeddings.token_type_embeddings.weight.data = torch.zeros_like(\n m.roberta.embeddings.token_type_embeddings.weight\n )\n m.roberta.embeddings.LayerNorm.weight = roberta_sent_encoder.emb_layer_norm.weight\n m.roberta.embeddings.LayerNorm.bias = roberta_sent_encoder.emb_layer_norm.bias\n for i in range(cfg.n_lays):\n layer = m.roberta.encoder.layer[i]\n roberta_layer: TransformerSentenceEncLayer = roberta_sent_encoder.layers[i]\n self_attn = layer.attention.self\n assert (\n roberta_layer.self_attn.k_proj.weight.data.shape\n == roberta_layer.self_attn.q_proj.weight.data.shape\n == roberta_layer.self_attn.v_proj.weight.data.shape\n == torch.Size((cfg.d_hidden, cfg.d_hidden))\n )\n self_attn.query.weight.data = roberta_layer.self_attn.q_proj.weight\n self_attn.query.bias.data = roberta_layer.self_attn.q_proj.bias\n self_attn.key.weight.data = roberta_layer.self_attn.k_proj.weight\n self_attn.key.bias.data = roberta_layer.self_attn.k_proj.bias\n self_attn.value.weight.data = roberta_layer.self_attn.v_proj.weight\n self_attn.value.bias.data = roberta_layer.self_attn.v_proj.bias\n self_output = layer.attention.output\n assert self_output.dense.weight.shape == roberta_layer.self_attn.out_proj.weight.shape\n self_output.dense.weight = roberta_layer.self_attn.out_proj.weight\n self_output.dense.bias = roberta_layer.self_attn.out_proj.bias\n self_output.LayerNorm.weight = roberta_layer.self_attn_layer_norm.weight\n self_output.LayerNorm.bias = roberta_layer.self_attn_layer_norm.bias\n intermediate = layer.intermediate\n assert intermediate.dense.weight.shape == roberta_layer.fc1.weight.shape\n intermediate.dense.weight = roberta_layer.fc1.weight\n intermediate.dense.bias = roberta_layer.fc1.bias\n bert_output = layer.output\n assert bert_output.dense.weight.shape == roberta_layer.fc2.weight.shape\n bert_output.dense.weight = roberta_layer.fc2.weight\n bert_output.dense.bias = roberta_layer.fc2.bias\n bert_output.LayerNorm.weight = roberta_layer.final_layer_norm.weight\n bert_output.LayerNorm.bias = roberta_layer.final_layer_norm.bias\n if classification_head:\n m.classifier.dense.weight = roberta.model.classification_heads[\"mnli\"].dense.weight\n m.classifier.dense.bias = roberta.model.classification_heads[\"mnli\"].dense.bias\n m.classifier.out_proj.weight = roberta.model.classification_heads[\"mnli\"].out_proj.weight\n m.classifier.out_proj.bias = roberta.model.classification_heads[\"mnli\"].out_proj.bias\n else:\n m.lm_head.dense.weight = roberta.model.encoder.lm_head.dense.weight\n m.lm_head.dense.bias = roberta.model.encoder.lm_head.dense.bias\n m.lm_head.layer_norm.weight = roberta.model.encoder.lm_head.layer_norm.weight\n m.lm_head.layer_norm.bias = roberta.model.encoder.lm_head.layer_norm.bias\n m.lm_head.decoder.weight = roberta.model.encoder.lm_head.weight\n m.lm_head.decoder.bias = roberta.model.encoder.lm_head.bias\n input_ids = roberta.encode(SAMPLE_TEXT).unsqueeze(0) # batch of size 1\n our_output = m(input_ids)[0]\n if classification_head:\n their_output = roberta.model.classification_heads[\"mnli\"](\n roberta.extract_features(input_ids)\n )\n else:\n their_output = roberta.model(input_ids)[0]\n print(our_output.shape, their_output.shape)\n max_absolute_diff = torch.max(torch.abs(our_output - their_output)).item()\n print(f\"max_absolute_diff = {max_absolute_diff}\") # ~ 1e-7\n success = torch.allclose(our_output, their_output, atol=1e-3)\n print(\"Do both models output the same tensors?\", \"🔥\" if success else \"💩\")\n if not success:\n raise Exception(\"Something went wRoNg\")\n pathlib.Path(save_path).mkdir(parents=True, exist_ok=True)\n print(f\"Saving model to {save_path}\")\n m.save_pretrained(save_path)\n\n\nif __name__ == \"__main__\":\n x = ArgumentParser()\n x.add_argument(\"--roberta_checkpoint_path\", default=None, type=str, required=True)\n x.add_argument(\"--save_path\", default=None, type=str, required=True)\n x.add_argument(\"--classification_head\", action=\"store_true\")\n y = x.parse_args()\n to_pytorch(y.roberta_checkpoint_path, y.save_path, y.classification_head)\n",
"# Copyright 2022 Quantapix Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# =============================================================================\n\nimport json\nimport re\nimport tensorflow as tf\nimport torch\n\nfrom argparse import ArgumentParser\nfrom os.path import abspath\nfrom transformers.utils import logging\n\nfrom ..config.gpt_neo import PreTrained\nfrom ...models.gpt_neo import ForCausal\n\n\nlogging.set_verbosity_info()\n\nlog = logging.get_logger(__name__)\n\n\ndef load_src_weights(model, config, gpt_neo_checkpoint_path):\n tf_path = abspath(gpt_neo_checkpoint_path)\n log.info(f\"Converting TensorFlow checkpoint from {tf_path}\")\n init_vars = tf.train.list_variables(tf_path)\n names = []\n arrays = []\n for name, shape in init_vars:\n if \"global_step\" not in name and \"adam\" not in name:\n array = tf.train.load_variable(tf_path, name)\n array = tf.dtypes.cast(array.squeeze(), tf.float32).numpy()\n name = name.replace(\"attn/q\", \"attn/attention/q_proj/w\")\n name = name.replace(\"attn/k\", \"attn/attention/k_proj/w\")\n name = name.replace(\"attn/v\", \"attn/attention/v_proj/w\")\n name = name.replace(\"attn/o\", \"attn/attention/out_proj/w\")\n name = name.replace(\"norm_1\", \"ln_1\")\n name = name.replace(\"norm_2\", \"ln_2\")\n name = name.replace(\"attn/compute_output_bias/o_b\", \"attn/attention/out_proj/b\")\n name = name.replace(\"conv1d_main/c_fc/kernel\", \"c_fc/w\")\n name = name.replace(\"conv1d_main/c_fc/bias\", \"c_fc/b\")\n name = name.replace(\"conv1d_main/c_proj/kernel\", \"c_proj/w\")\n name = name.replace(\"conv1d_main/c_proj/bias\", \"c_proj/b\")\n names.append(name)\n arrays.append(array)\n for name, array in zip(names, arrays):\n name = name[5:] # skip \"gpt2/\"\n name = name.split(\"/\")\n pointer = model.transformer\n for m_name in name:\n if re.fullmatch(r\"[A-Za-z]+\\d+\", m_name):\n scope_names = re.split(r\"(\\d+)\", m_name)\n else:\n scope_names = [m_name]\n if scope_names[0] == \"w\" or scope_names[0] == \"g\":\n pointer = getattr(pointer, \"weight\")\n elif scope_names[0] == \"b\":\n pointer = getattr(pointer, \"bias\")\n elif scope_names[0] == \"wpe\" or scope_names[0] == \"wte\":\n pointer = getattr(pointer, scope_names[0])\n pointer = getattr(pointer, \"weight\")\n else:\n pointer = getattr(pointer, scope_names[0])\n if len(scope_names) >= 2:\n num = int(scope_names[1])\n pointer = pointer[num]\n if name[-1] == \"w\" and name[-2] in [\n \"out_proj\",\n \"k_proj\",\n \"q_proj\",\n \"v_proj\",\n \"c_proj\",\n \"c_fc\",\n ]:\n array = array.transpose()\n if name == [\"wte\"]:\n array = array[: config.s_vocab]\n if pointer.shape != array.shape:\n raise ValueError(\n f\"Pointer shape {pointer.shape} and array shape {array.shape} mismatched {name}\"\n )\n print(f\"Initialize PyTorch weight {name}\")\n pointer.data = torch.from_numpy(array)\n embs = model.transformer.wte.weight\n lin = nn.Linear(embs.size()[1], embs.size()[0], bias=False)\n lin.weight = embs\n model.set_output_embeddings(lin)\n return model\n\n\ndef to_pytorch(tf_checkpoint_path, config_file, pytorch_dump_path):\n config_json = json.load(open(config_file, \"r\"))\n cfg = GPTNeoConfig(\n d_hidden=config_json[\"n_embd\"],\n n_lays=config_json[\"n_lays\"],\n n_heads=config_json[\"n_heads\"],\n attention_types=config_json[\"attention_types\"],\n n_pos=config_json[\"n_pos\"],\n drop_resid=config_json[\"res_dropout\"],\n drop_embed=config_json[\"drop_embed\"],\n drop_attn=config_json[\"attn_dropout\"],\n )\n print(f\"Building from config: {cfg}\")\n m = ForCausal(cfg)\n load_src_weights(m, cfg, tf_checkpoint_path)\n print(f\"Saving to: {pytorch_dump_path}\")\n m.save_pretrained(pytorch_dump_path)\n\n\nif __name__ == \"__main__\":\n x = ArgumentParser()\n x.add_argument(\"--src_path\", default=None, type=str, required=True)\n x.add_argument(\"--cfg_path\", default=None, type=str, required=True)\n x.add_argument(\"--save_path\", default=None, type=str, required=True)\n y = x.parse_args()\n to_pytorch(y.src_path, y.cfg_path, y.save_path)\n",
"import torch\nimport torch.nn as nn\nimport torch.optim as optim\nfrom torch.optim import lr_scheduler\nimport numpy as np\nimport torchvision\nfrom torchvision import datasets, models, transforms\nimport matplotlib.pyplot as plt\nimport time\nimport os\nimport copy\n\nplt.ion()\n\ndata_transforms = {\n \"train\": transforms.Compose(\n [\n transforms.RandomResizedCrop(224),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),\n ]\n ),\n \"val\": transforms.Compose(\n [\n transforms.Resize(256),\n transforms.CenterCrop(224),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),\n ]\n ),\n}\n\ndata_dir = \"data/hymenoptera_data\"\nimage_datasets = {x: datasets.ImageFolder(os.path.join(data_dir, x), data_transforms[x]) for x in [\"train\", \"val\"]}\ndataloaders = {\n x: torch.utils.data.DataLoader(image_datasets[x], batch_size=4, shuffle=True, num_workers=4)\n for x in [\"train\", \"val\"]\n}\ndataset_sizes = {x: len(image_datasets[x]) for x in [\"train\", \"val\"]}\nclass_names = image_datasets[\"train\"].classes\n\ndevice = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n\n\ndef imshow(inp, title=None):\n inp = inp.numpy().transpose((1, 2, 0))\n mean = np.array([0.485, 0.456, 0.406])\n std = np.array([0.229, 0.224, 0.225])\n inp = std * inp + mean\n inp = np.clip(inp, 0, 1)\n plt.imshow(inp)\n if title is not None:\n plt.title(title)\n plt.pause(0.001)\n\n\ninputs, classes = next(iter(dataloaders[\"train\"]))\nout = torchvision.utils.make_grid(inputs)\n\nimshow(out, title=[class_names[x] for x in classes])\n\n\ndef train_model(model, criterion, optimizer, scheduler, num_epochs=25):\n since = time.time()\n\n best_model_wts = copy.deepcopy(model.state_dict())\n best_acc = 0.0\n\n for epoch in range(num_epochs):\n print(\"Epoch {}/{}\".format(epoch, num_epochs - 1))\n print(\"-\" * 10)\n for phase in [\"train\", \"val\"]:\n if phase == \"train\":\n model.train()\n else:\n model.eval()\n running_loss = 0.0\n running_corrects = 0\n for inputs, labels in dataloaders[phase]:\n inputs = inputs.to(device)\n labels = labels.to(device)\n optimizer.zero_grad()\n with torch.set_grad_enabled(phase == \"train\"):\n outputs = model(inputs)\n _, preds = torch.max(outputs, 1)\n loss = criterion(outputs, labels)\n if phase == \"train\":\n loss.backward()\n optimizer.step()\n running_loss += loss.item() * inputs.size(0)\n running_corrects += torch.sum(preds == labels.data)\n if phase == \"train\":\n scheduler.step()\n epoch_loss = running_loss / dataset_sizes[phase]\n epoch_acc = running_corrects.double() / dataset_sizes[phase]\n print(\"{} Loss: {:.4f} Acc: {:.4f}\".format(phase, epoch_loss, epoch_acc))\n if phase == \"val\" and epoch_acc > best_acc:\n best_acc = epoch_acc\n best_model_wts = copy.deepcopy(model.state_dict())\n print()\n\n time_elapsed = time.time() - since\n print(\"Training complete in {:.0f}m {:.0f}s\".format(time_elapsed // 60, time_elapsed % 60))\n print(\"Best val Acc: {:4f}\".format(best_acc))\n\n model.load_state_dict(best_model_wts)\n return model\n\n\ndef visualize_model(model, num_images=6):\n was_training = model.training\n model.eval()\n images_so_far = 0\n fig = plt.figure()\n\n with torch.no_grad():\n for i, (inputs, labels) in enumerate(dataloaders[\"val\"]):\n inputs = inputs.to(device)\n labels = labels.to(device)\n outputs = model(inputs)\n _, preds = torch.max(outputs, 1)\n for j in range(inputs.size()[0]):\n images_so_far += 1\n ax = plt.subplot(num_images // 2, 2, images_so_far)\n ax.axis(\"off\")\n ax.set_title(\"predicted: {}\".format(class_names[preds[j]]))\n imshow(inputs.cpu().data[j])\n if images_so_far == num_images:\n model.train(mode=was_training)\n return\n model.train(mode=was_training)\n\n\nmodel_ft = models.resnet18(pretrained=True)\nnum_ftrs = model_ft.fc.in_features\nmodel_ft.fc = nn.Linear(num_ftrs, 2)\nmodel_ft = model_ft.to(device)\n\ncriterion = nn.CrossEntropyLoss()\noptimizer_ft = optim.SGD(model_ft.parameters(), lr=0.001, momentum=0.9)\nexp_lr_scheduler = lr_scheduler.StepLR(optimizer_ft, step_size=7, gamma=0.1)\n\nmodel_ft = train_model(model_ft, criterion, optimizer_ft, exp_lr_scheduler, num_epochs=25)\nvisualize_model(model_ft)\n\nmodel_conv = torchvision.models.resnet18(pretrained=True)\nfor param in model_conv.parameters():\n param.requires_grad = False\n\nnum_ftrs = model_conv.fc.in_features\nmodel_conv.fc = nn.Linear(num_ftrs, 2)\n\nmodel_conv = model_conv.to(device)\n\ncriterion = nn.CrossEntropyLoss()\n\noptimizer_conv = optim.SGD(model_conv.fc.parameters(), lr=0.001, momentum=0.9)\n\nexp_lr_scheduler = lr_scheduler.StepLR(optimizer_conv, step_size=7, gamma=0.1)\n\n\nmodel_conv = train_model(model_conv, criterion, optimizer_conv, exp_lr_scheduler, num_epochs=25)\n\nvisualize_model(model_conv)\n\nplt.ioff()\nplt.show()\n",
"# Copyright 2022 Quantapix Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# =============================================================================\n\nimport os\nimport pickle\nimport time\n\nimport numpy as np\n\nfrom ...tokenization_utils import PreTrainedTokenizer\nfrom ...tokenization_utils_base import BatchEncoding\nfrom ...utils import (\n cached_path,\n is_datasets_available,\n is_faiss_available,\n is_remote_url,\n logging,\n requires_backends,\n)\nfrom .configuration_rag import RagConfig\nfrom .tokenization_rag import RagTokenizer\n\n\nif is_datasets_available():\n from datasets import Dataset, load_dataset, load_from_disk\n\nif is_faiss_available():\n import faiss\n\n\nlogger = logging.get_logger(__name__)\n\n\nLEGACY_INDEX_PATH = \"https://storage.googleapis.com/huggingface-nlp/datasets/wiki_dpr/\"\n\n\nclass Index:\n def get_doc_dicts(self, doc_ids: np.ndarray):\n raise NotImplementedError\n\n def get_top_docs(self, question_hidden_states: np.ndarray, n_docs=5):\n raise NotImplementedError\n\n def is_initialized(self):\n raise NotImplementedError\n\n def init_index(self):\n raise NotImplementedError\n\n\nclass LegacyIndex(Index):\n INDEX_FILENAME = \"hf_bert_base.hnswSQ8_correct_phi_128.c_index\"\n PASSAGE_FILENAME = \"psgs_w100.tsv.pkl\"\n\n def __init__(self, vector_size, index_path):\n self.index_id_to_db_id = []\n self.index_path = index_path\n self.passages = self._load_passages()\n self.vector_size = vector_size\n self.index = None\n self._index_initialized = False\n\n def _resolve_path(self, index_path, filename):\n assert os.path.isdir(index_path) or is_remote_url(\n index_path\n ), \"Please specify a valid `index_path`.\"\n archive_file = os.path.join(index_path, filename)\n try:\n # Load from URL or cache if already cached\n resolved_archive_file = cached_path(archive_file)\n except EnvironmentError:\n msg = (\n f\"Can't load '{archive_file}'. Make sure that:\\n\\n\"\n f\"- '{index_path}' is a correct remote path to a directory containing a file named {filename}\\n\\n\"\n f\"- or '{index_path}' is the correct path to a directory containing a file named {filename}.\\n\\n\"\n )\n raise EnvironmentError(msg)\n if resolved_archive_file == archive_file:\n logger.info(f\"loading file {archive_file}\")\n else:\n logger.info(f\"loading file {archive_file} from cache at {resolved_archive_file}\")\n return resolved_archive_file\n\n def _load_passages(self):\n logger.info(f\"Loading passages from {self.index_path}\")\n passages_path = self._resolve_path(self.index_path, self.PASSAGE_FILENAME)\n with open(passages_path, \"rb\") as passages_file:\n passages = pickle.load(passages_file)\n return passages\n\n def _deserialize_index(self):\n logger.info(f\"Loading index from {self.index_path}\")\n resolved_index_path = self._resolve_path(\n self.index_path, self.INDEX_FILENAME + \".index.dpr\"\n )\n self.index = faiss.read_index(resolved_index_path)\n resolved_meta_path = self._resolve_path(\n self.index_path, self.INDEX_FILENAME + \".index_meta.dpr\"\n )\n with open(resolved_meta_path, \"rb\") as metadata_file:\n self.index_id_to_db_id = pickle.load(metadata_file)\n assert (\n len(self.index_id_to_db_id) == self.index.ntotal\n ), \"Deserialized index_id_to_db_id should match faiss index size\"\n\n def is_initialized(self):\n return self._index_initialized\n\n def init_index(self):\n index = faiss.IndexHNSWFlat(self.vector_size + 1, 512)\n index.hnsw.efSearch = 128\n index.hnsw.efConstruction = 200\n self.index = index\n self._deserialize_index()\n self._index_initialized = True\n\n def get_doc_dicts(self, doc_ids: np.array):\n doc_list = []\n for doc_ids_i in doc_ids:\n ids = [str(int(doc_id)) for doc_id in doc_ids_i]\n docs = [self.passages[doc_id] for doc_id in ids]\n doc_list.append(docs)\n doc_dicts = []\n for docs in doc_list:\n doc_dict = {}\n doc_dict[\"title\"] = [doc[1] for doc in docs]\n doc_dict[\"text\"] = [doc[0] for doc in docs]\n doc_dicts.append(doc_dict)\n return doc_dicts\n\n def get_top_docs(self, question_hidden_states: np.ndarray, n_docs=5):\n aux_dim = np.zeros(len(question_hidden_states), dtype=\"float32\").reshape(-1, 1)\n query_nhsw_vectors = np.hstack((question_hidden_states, aux_dim))\n _, docs_ids = self.index.search(query_nhsw_vectors, n_docs)\n vectors = [\n [self.index.reconstruct(int(doc_id))[:-1] for doc_id in doc_ids] for doc_ids in docs_ids\n ]\n ids = [[int(self.index_id_to_db_id[doc_id]) for doc_id in doc_ids] for doc_ids in docs_ids]\n return np.array(ids), np.array(vectors)\n\n\nclass HFIndexBase(Index):\n def __init__(self, vector_size, dataset, index_initialized=False):\n self.vector_size = vector_size\n self.dataset = dataset\n self._index_initialized = index_initialized\n self._check_dataset_format(with_index=index_initialized)\n dataset.set_format(\n \"numpy\", columns=[\"embeddings\"], output_all_columns=True, dtype=\"float32\"\n )\n\n def _check_dataset_format(self, with_index):\n if not isinstance(self.dataset, Dataset):\n raise ValueError(\n f\"Dataset should be a datasets.Dataset object, but got {type(self.dataset)}\"\n )\n if len({\"title\", \"text\", \"embeddings\"} - set(self.dataset.column_names)) > 0:\n raise ValueError(\n \"Dataset should be a dataset with the following columns: \"\n \"title (str), text (str) and embeddings (arrays of dimension vector_size), \"\n f\"but got columns {self.dataset.column_names}\"\n )\n if with_index and \"embeddings\" not in self.dataset.list_indexes():\n raise ValueError(\n \"Missing faiss index in the dataset. Make sure you called `dataset.add_faiss_index` to compute it \"\n \"or `dataset.load_faiss_index` to load one from the disk.\"\n )\n\n def init_index(self):\n raise NotImplementedError()\n\n def is_initialized(self):\n return self._index_initialized\n\n def get_doc_dicts(self, doc_ids: np.ndarray):\n return [self.dataset[doc_ids[i].tolist()] for i in range(doc_ids.shape[0])]\n\n def get_top_docs(self, question_hidden_states: np.ndarray, n_docs=5):\n _, ids = self.dataset.search_batch(\"embeddings\", question_hidden_states, n_docs)\n docs = [self.dataset[[i for i in indices if i >= 0]] for indices in ids]\n vectors = [doc[\"embeddings\"] for doc in docs]\n for i in range(len(vectors)):\n if len(vectors[i]) < n_docs:\n vectors[i] = np.vstack(\n [vectors[i], np.zeros((n_docs - len(vectors[i]), self.vector_size))]\n )\n return np.array(ids), np.array(\n vectors\n ) # shapes (batch_size, n_docs) and (batch_size, n_docs, d)\n\n\nclass CanonicalHFIndex(HFIndexBase):\n def __init__(\n self,\n vector_size,\n dataset_name=\"wiki_dpr\",\n dataset_split=\"train\",\n index_name=None,\n index_path=None,\n use_dummy_dataset=False,\n ):\n if int(index_path is None) + int(index_name is None) != 1:\n raise ValueError(\"Please provide `index_name` or `index_path`.\")\n self.dataset_name = dataset_name\n self.dataset_split = dataset_split\n self.index_name = index_name\n self.index_path = index_path\n self.use_dummy_dataset = use_dummy_dataset\n logger.info(f\"Loading passages from {self.dataset_name}\")\n dataset = load_dataset(\n self.dataset_name,\n with_index=False,\n split=self.dataset_split,\n dummy=self.use_dummy_dataset,\n )\n super().__init__(vector_size, dataset, index_initialized=False)\n\n def init_index(self):\n if self.index_path is not None:\n logger.info(f\"Loading index from {self.index_path}\")\n self.dataset.load_faiss_index(\"embeddings\", file=self.index_path)\n else:\n logger.info(f\"Loading index from {self.dataset_name} with index name {self.index_name}\")\n self.dataset = load_dataset(\n self.dataset_name,\n with_embeddings=True,\n with_index=True,\n split=self.dataset_split,\n index_name=self.index_name,\n dummy=self.use_dummy_dataset,\n )\n self.dataset.set_format(\"numpy\", columns=[\"embeddings\"], output_all_columns=True)\n self._index_initialized = True\n\n\nclass CustomHFIndex(HFIndexBase):\n def __init__(self, vector_size, dataset, index_path=None):\n super().__init__(vector_size, dataset, index_initialized=index_path is None)\n self.index_path = index_path\n\n @classmethod\n def load_from_disk(cls, vector_size, dataset_path, index_path):\n logger.info(f\"Loading passages from {dataset_path}\")\n if dataset_path is None or index_path is None:\n raise ValueError(\n \"Please provide `dataset_path` and `index_path` after calling `dataset.save_to_disk(dataset_path)` \"\n \"and `dataset.get_index('embeddings').save(index_path)`.\"\n )\n dataset = load_from_disk(dataset_path)\n return cls(vector_size=vector_size, dataset=dataset, index_path=index_path)\n\n def init_index(self):\n if not self.is_initialized():\n logger.info(f\"Loading index from {self.index_path}\")\n self.dataset.load_faiss_index(\"embeddings\", file=self.index_path)\n self._index_initialized = True\n\n\nclass RagRetriever:\n def __init__(\n self,\n config,\n question_encoder_tokenizer,\n generator_tokenizer,\n index=None,\n init_retrieval=True,\n ):\n self._init_retrieval = init_retrieval\n requires_backends(self, [\"datasets\", \"faiss\"])\n super().__init__()\n self.index = index or self._build_index(config)\n self.generator_tokenizer = generator_tokenizer\n self.question_encoder_tokenizer = question_encoder_tokenizer\n\n self.n_docs = config.n_docs\n self.batch_size = config.retrieval_batch_size\n\n self.config = config\n if self._init_retrieval:\n self.init_retrieval()\n\n self.ctx_encoder_tokenizer = None\n self.return_tokenized_docs = False\n\n @staticmethod\n def _build_index(config):\n if config.index_name == \"legacy\":\n return LegacyIndex(\n config.retrieval_vector_size,\n config.index_path or LEGACY_INDEX_PATH,\n )\n elif config.index_name == \"custom\":\n return CustomHFIndex.load_from_disk(\n vector_size=config.retrieval_vector_size,\n dataset_path=config.passages_path,\n index_path=config.index_path,\n )\n else:\n return CanonicalHFIndex(\n vector_size=config.retrieval_vector_size,\n dataset_name=config.dataset,\n dataset_split=config.dataset_split,\n index_name=config.index_name,\n index_path=config.index_path,\n use_dummy_dataset=config.use_dummy_dataset,\n )\n\n @classmethod\n def from_pretrained(cls, retriever_name_or_path, indexed_dataset=None, **kw):\n requires_backends(cls, [\"datasets\", \"faiss\"])\n config = kw.pop(\"config\", None) or RagConfig.from_pretrained(retriever_name_or_path, **kw)\n rag_tokenizer = RagTokenizer.from_pretrained(retriever_name_or_path, config=config)\n question_encoder_tokenizer = rag_tokenizer.question_encoder\n generator_tokenizer = rag_tokenizer.generator\n if indexed_dataset is not None:\n config.index_name = \"custom\"\n index = CustomHFIndex(config.retrieval_vector_size, indexed_dataset)\n else:\n index = cls._build_index(config)\n return cls(\n config,\n question_encoder_tokenizer=question_encoder_tokenizer,\n generator_tokenizer=generator_tokenizer,\n index=index,\n )\n\n def save_pretrained(self, save_directory):\n if isinstance(self.index, CustomHFIndex):\n if self.config.index_path is None:\n index_path = os.path.join(save_directory, \"hf_dataset_index.faiss\")\n self.index.dataset.get_index(\"embeddings\").save(index_path)\n self.config.index_path = index_path\n if self.config.passages_path is None:\n passages_path = os.path.join(save_directory, \"hf_dataset\")\n # datasets don't support save_to_disk with indexes right now\n faiss_index = self.index.dataset._indexes.pop(\"embeddings\")\n self.index.dataset.save_to_disk(passages_path)\n self.index.dataset._indexes[\"embeddings\"] = faiss_index\n self.config.passages_path = passages_path\n self.config.save_pretrained(save_directory)\n rag_tokenizer = RagTokenizer(\n question_encoder=self.question_encoder_tokenizer,\n generator=self.generator_tokenizer,\n )\n rag_tokenizer.save_pretrained(save_directory)\n\n def init_retrieval(self):\n logger.info(\"initializing retrieval\")\n self.index.init_index()\n\n def postprocess_docs(self, docs, input_strings, prefix, n_docs, return_tensors=None):\n def cat_input_and_doc(doc_title, doc_text, input_string, prefix):\n if doc_title.startswith('\"'):\n doc_title = doc_title[1:]\n if doc_title.endswith('\"'):\n doc_title = doc_title[:-1]\n if prefix is None:\n prefix = \"\"\n out = (\n prefix\n + doc_title\n + self.config.title_sep\n + doc_text\n + self.config.doc_sep\n + input_string\n ).replace(\" \", \" \")\n return out\n\n rag_input_strings = [\n cat_input_and_doc(\n docs[i][\"title\"][j],\n docs[i][\"text\"][j],\n input_strings[i],\n prefix,\n )\n for i in range(len(docs))\n for j in range(n_docs)\n ]\n\n contextualized_inputs = self.generator_tokenizer.batch_encode_plus(\n rag_input_strings,\n max_length=self.config.max_combined_length,\n return_tensors=return_tensors,\n padding=\"max_length\",\n truncation=True,\n )\n\n return contextualized_inputs[\"input_ids\"], contextualized_inputs[\"attention_mask\"]\n\n def _chunk_tensor(self, t, chunk_size):\n return [t[i : i + chunk_size] for i in range(0, len(t), chunk_size)]\n\n def _main_retrieve(self, question_hidden_states: np.ndarray, n_docs):\n question_hidden_states_batched = self._chunk_tensor(question_hidden_states, self.batch_size)\n ids_batched = []\n vectors_batched = []\n for question_hidden_states in question_hidden_states_batched:\n start_time = time.time()\n ids, vectors = self.index.get_top_docs(question_hidden_states, n_docs)\n logger.debug(\n f\"index search time: {time.time() - start_time} sec, batch size {question_hidden_states.shape}\"\n )\n ids_batched.extend(ids)\n vectors_batched.extend(vectors)\n return (\n np.array(ids_batched),\n np.array(vectors_batched),\n ) # shapes (batch_size, n_docs) and (batch_size, n_docs, d)\n\n def retrieve(self, question_hidden_states: np.ndarray, n_docs):\n\n doc_ids, retrieved_doc_embeds = self._main_retrieve(question_hidden_states, n_docs)\n return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(doc_ids)\n\n def set_ctx_encoder_tokenizer(self, ctx_encoder_tokenizer: PreTrainedTokenizer):\n # used in end2end retriever training\n self.ctx_encoder_tokenizer = ctx_encoder_tokenizer\n self.return_tokenized_docs = True\n\n def __call__(\n self,\n question_input_ids,\n question_hidden_states,\n prefix=None,\n n_docs=None,\n return_tensors=None,\n ):\n n_docs = n_docs if n_docs is not None else self.n_docs\n prefix = prefix if prefix is not None else self.config.generator.prefix\n retrieved_doc_embeds, doc_ids, docs = self.retrieve(question_hidden_states, n_docs)\n\n input_strings = self.question_encoder_tokenizer.batch_decode(\n question_input_ids, skip_special_tokens=True\n )\n context_input_ids, context_attention_mask = self.postprocess_docs(\n docs, input_strings, prefix, n_docs, return_tensors=return_tensors\n )\n\n if self.return_tokenized_docs:\n retrived_doc_text = []\n retrived_doc_title = []\n\n for b_idx in range(len(docs)):\n for doc_idx in range(n_docs):\n retrived_doc_text.append(docs[b_idx][\"text\"][doc_idx])\n retrived_doc_title.append(docs[b_idx][\"title\"][doc_idx])\n\n tokenized_docs = self.ctx_encoder_tokenizer(\n retrived_doc_title,\n retrived_doc_text,\n truncation=True,\n padding=\"longest\",\n return_tensors=return_tensors,\n )\n\n return BatchEncoding(\n {\n \"context_input_ids\": context_input_ids,\n \"context_attention_mask\": context_attention_mask,\n \"retrieved_doc_embeds\": retrieved_doc_embeds,\n \"doc_ids\": doc_ids,\n \"tokenized_doc_ids\": tokenized_docs[\"input_ids\"],\n \"tokenized_doc_attention_mask\": tokenized_docs[\"attention_mask\"],\n },\n tensor_type=return_tensors,\n )\n\n else:\n return BatchEncoding(\n {\n \"context_input_ids\": context_input_ids,\n \"context_attention_mask\": context_attention_mask,\n \"retrieved_doc_embeds\": retrieved_doc_embeds,\n \"doc_ids\": doc_ids,\n },\n tensor_type=return_tensors,\n )\n",
"# Copyright 2022 Quantapix Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# =============================================================================\n\nimport argparse\nimport json\nimport torch\nimport requests\n\nfrom collections import OrderedDict\nfrom pathlib import Path\nfrom PIL import Image\n\nfrom huggingface_hub import hf_hub_download\nfrom transformers import (\n SegformerConfig,\n SegformerFeatureExtractor,\n SegformerForImageClassification,\n SegformerForSemanticSegmentation,\n)\nfrom transformers.utils import logging\n\n\nlogging.set_verbosity_info()\nlogger = logging.get_logger(__name__)\n\n\ndef rename_keys(state_dict, encoder_only=False):\n new_state_dict = OrderedDict()\n for key, value in state_dict.items():\n if encoder_only and not key.startswith(\"head\"):\n key = \"segformer.encoder.\" + key\n if key.startswith(\"backbone\"):\n key = key.replace(\"backbone\", \"segformer.encoder\")\n if \"patch_embed\" in key:\n # replace for example patch_embed1 by patch_embeddings.0\n idx = key[key.find(\"patch_embed\") + len(\"patch_embed\")]\n key = key.replace(f\"patch_embed{idx}\", f\"patch_embeddings.{int(idx)-1}\")\n if \"norm\" in key:\n key = key.replace(\"norm\", \"layer_norm\")\n if \"segformer.encoder.layer_norm\" in key:\n # replace for example layer_norm1 by layer_norm.0\n idx = key[\n key.find(\"segformer.encoder.layer_norm\") + len(\"segformer.encoder.layer_norm\")\n ]\n key = key.replace(f\"layer_norm{idx}\", f\"layer_norm.{int(idx)-1}\")\n if \"layer_norm1\" in key:\n key = key.replace(\"layer_norm1\", \"layer_norm_1\")\n if \"layer_norm2\" in key:\n key = key.replace(\"layer_norm2\", \"layer_norm_2\")\n if \"block\" in key:\n # replace for example block1 by block.0\n idx = key[key.find(\"block\") + len(\"block\")]\n key = key.replace(f\"block{idx}\", f\"block.{int(idx)-1}\")\n if \"attn.q\" in key:\n key = key.replace(\"attn.q\", \"attention.self.query\")\n if \"attn.proj\" in key:\n key = key.replace(\"attn.proj\", \"attention.output.dense\")\n if \"attn\" in key:\n key = key.replace(\"attn\", \"attention.self\")\n if \"fc1\" in key:\n key = key.replace(\"fc1\", \"dense1\")\n if \"fc2\" in key:\n key = key.replace(\"fc2\", \"dense2\")\n if \"linear_pred\" in key:\n key = key.replace(\"linear_pred\", \"classifier\")\n if \"linear_fuse\" in key:\n key = key.replace(\"linear_fuse.conv\", \"linear_fuse\")\n key = key.replace(\"linear_fuse.bn\", \"batch_norm\")\n if \"linear_c\" in key:\n # replace for example linear_c4 by linear_c.3\n idx = key[key.find(\"linear_c\") + len(\"linear_c\")]\n key = key.replace(f\"linear_c{idx}\", f\"linear_c.{int(idx)-1}\")\n if key.startswith(\"head\"):\n key = key.replace(\"head\", \"classifier\")\n new_state_dict[key] = value\n\n return new_state_dict\n\n\ndef read_in_k_v(state_dict, config):\n for i in range(config.num_encoder_blocks):\n for j in range(config.depths[i]):\n kv_weight = state_dict.pop(f\"segformer.encoder.block.{i}.{j}.attention.self.kv.weight\")\n kv_bias = state_dict.pop(f\"segformer.encoder.block.{i}.{j}.attention.self.kv.bias\")\n state_dict[f\"segformer.encoder.block.{i}.{j}.attention.self.key.weight\"] = kv_weight[\n : config.hidden_sizes[i], :\n ]\n state_dict[f\"segformer.encoder.block.{i}.{j}.attention.self.key.bias\"] = kv_bias[\n : config.hidden_sizes[i]\n ]\n state_dict[f\"segformer.encoder.block.{i}.{j}.attention.self.value.weight\"] = kv_weight[\n config.hidden_sizes[i] :, :\n ]\n state_dict[f\"segformer.encoder.block.{i}.{j}.attention.self.value.bias\"] = kv_bias[\n config.hidden_sizes[i] :\n ]\n\n\ndef prepare_img():\n url = \"http://images.cocodataset.org/val2017/000000039769.jpg\"\n image = Image.open(requests.get(url, stream=True).raw)\n return image\n\n\[email protected]_grad()\ndef convert_segformer_checkpoint(model_name, checkpoint_path, pytorch_dump_folder_path):\n config = SegformerConfig()\n encoder_only = False\n repo_id = \"datasets/huggingface/label-files\"\n if \"segformer\" in model_name:\n size = model_name[len(\"segformer.\") : len(\"segformer.\") + 2]\n if \"ade\" in model_name:\n config.num_labels = 150\n filename = \"ade20k-id2label.json\"\n expected_shape = (1, 150, 128, 128)\n elif \"city\" in model_name:\n config.num_labels = 19\n filename = \"cityscapes-id2label.json\"\n expected_shape = (1, 19, 128, 128)\n else:\n raise ValueError(f\"Model {model_name} not supported\")\n elif \"mit\" in model_name:\n encoder_only = True\n size = model_name[4:6]\n config.num_labels = 1000\n filename = \"imagenet-1k-id2label.json\"\n expected_shape = (1, 1000)\n else:\n raise ValueError(f\"Model {model_name} not supported\")\n id2label = json.load(open(hf_hub_download(repo_id, filename), \"r\"))\n id2label = {int(k): v for k, v in id2label.items()}\n config.id2label = id2label\n config.label2id = {v: k for k, v in id2label.items()}\n if size == \"b0\":\n pass\n elif size == \"b1\":\n config.hidden_sizes = [64, 128, 320, 512]\n config.decoder_hidden_size = 256\n elif size == \"b2\":\n config.hidden_sizes = [64, 128, 320, 512]\n config.decoder_hidden_size = 768\n config.depths = [3, 4, 6, 3]\n elif size == \"b3\":\n config.hidden_sizes = [64, 128, 320, 512]\n config.decoder_hidden_size = 768\n config.depths = [3, 4, 18, 3]\n elif size == \"b4\":\n config.hidden_sizes = [64, 128, 320, 512]\n config.decoder_hidden_size = 768\n config.depths = [3, 8, 27, 3]\n elif size == \"b5\":\n config.hidden_sizes = [64, 128, 320, 512]\n config.decoder_hidden_size = 768\n config.depths = [3, 6, 40, 3]\n else:\n raise ValueError(f\"Size {size} not supported\")\n feature_extractor = SegformerFeatureExtractor(\n image_scale=(512, 512), keep_ratio=False, align=False, do_random_crop=False\n )\n image = prepare_img()\n pixel_values = feature_extractor(images=image, return_tensors=\"pt\").pixel_values\n\n logger.info(f\"Converting model {model_name}...\")\n if encoder_only:\n state_dict = torch.load(checkpoint_path, map_location=torch.device(\"cpu\"))\n else:\n state_dict = torch.load(checkpoint_path, map_location=torch.device(\"cpu\"))[\"state_dict\"]\n state_dict = rename_keys(state_dict, encoder_only=encoder_only)\n if not encoder_only:\n del state_dict[\"decode_head.conv_seg.weight\"]\n del state_dict[\"decode_head.conv_seg.bias\"]\n read_in_k_v(state_dict, config)\n if encoder_only:\n config.reshape_last_stage = False\n model = SegformerForImageClassification(config)\n else:\n model = SegformerForSemanticSegmentation(config)\n model.load_state_dict(state_dict)\n model.eval()\n outputs = model(pixel_values)\n logits = outputs.logits\n if model_name == \"segformer.b0.512x512.ade.160k\":\n expected_slice = torch.tensor(\n [\n [\n [-4.6310, -5.5232, -6.2356],\n [-5.1921, -6.1444, -6.5996],\n [-5.4424, -6.2790, -6.7574],\n ],\n [\n [-12.1391, -13.3122, -13.9554],\n [-12.8732, -13.9352, -14.3563],\n [-12.9438, -13.8226, -14.2513],\n ],\n [\n [-12.5134, -13.4686, -14.4915],\n [-12.8669, -14.4343, -14.7758],\n [-13.2523, -14.5819, -15.0694],\n ],\n ]\n )\n elif model_name == \"segformer.b1.512x512.ade.160k\":\n expected_slice = torch.tensor(\n [\n [\n [-7.5820, -8.7231, -8.3215],\n [-8.0600, -10.3529, -10.0304],\n [-7.5208, -9.4103, -9.6239],\n ],\n [\n [-12.6918, -13.8994, -13.7137],\n [-13.3196, -15.7523, -15.4789],\n [-12.9343, -14.8757, -14.9689],\n ],\n [\n [-11.1911, -11.9421, -11.3243],\n [-11.3342, -13.6839, -13.3581],\n [-10.3909, -12.1832, -12.4858],\n ],\n ]\n )\n elif model_name == \"segformer.b2.512x512.ade.160k\":\n expected_slice = torch.tensor(\n [\n [\n [-11.8173, -14.3850, -16.3128],\n [-14.5648, -16.5804, -18.6568],\n [-14.7223, -15.7387, -18.4218],\n ],\n [\n [-15.7290, -17.9171, -19.4423],\n [-18.3105, -19.9448, -21.4661],\n [-17.9296, -18.6497, -20.7910],\n ],\n [\n [-15.0783, -17.0336, -18.2789],\n [-16.8771, -18.6870, -20.1612],\n [-16.2454, -17.1426, -19.5055],\n ],\n ]\n )\n elif model_name == \"segformer.b3.512x512.ade.160k\":\n expected_slice = torch.tensor(\n [\n [\n [-9.0878, -10.2081, -10.1891],\n [-9.3144, -10.7941, -10.9843],\n [-9.2294, -10.3855, -10.5704],\n ],\n [\n [-12.2316, -13.9068, -13.6102],\n [-12.9161, -14.3702, -14.3235],\n [-12.5233, -13.7174, -13.7932],\n ],\n [\n [-14.6275, -15.2490, -14.9727],\n [-14.3400, -15.9687, -16.2827],\n [-14.1484, -15.4033, -15.8937],\n ],\n ]\n )\n elif model_name == \"segformer.b4.512x512.ade.160k\":\n expected_slice = torch.tensor(\n [\n [\n [-12.3144, -13.2447, -14.0802],\n [-13.3614, -14.5816, -15.6117],\n [-13.3340, -14.4433, -16.2219],\n ],\n [\n [-19.2781, -20.4128, -20.7506],\n [-20.6153, -21.6566, -22.0998],\n [-19.9800, -21.0430, -22.1494],\n ],\n [\n [-18.8739, -19.7804, -21.1834],\n [-20.1233, -21.6765, -23.2944],\n [-20.0315, -21.2641, -23.6944],\n ],\n ]\n )\n elif model_name == \"segformer.b5.640x640.ade.160k\":\n expected_slice = torch.tensor(\n [\n [\n [-9.5524, -12.0835, -11.7348],\n [-10.5229, -13.6446, -14.5662],\n [-9.5842, -12.8851, -13.9414],\n ],\n [\n [-15.3432, -17.5323, -17.0818],\n [-16.3330, -18.9255, -19.2101],\n [-15.1340, -17.7848, -18.3971],\n ],\n [\n [-12.6072, -14.9486, -14.6631],\n [-13.7629, -17.0907, -17.7745],\n [-12.7899, -16.1695, -17.1671],\n ],\n ]\n )\n # Cityscapes checkpoints\n elif model_name == \"segformer.b0.1024x1024.city.160k\":\n expected_slice = torch.tensor(\n [\n [\n [-11.9295, -13.4057, -14.8106],\n [-13.3431, -14.8179, -15.3781],\n [-14.2836, -15.5942, -16.1588],\n ],\n [\n [-11.4906, -12.8067, -13.6564],\n [-13.1189, -14.0500, -14.1543],\n [-13.8748, -14.5136, -14.8789],\n ],\n [\n [0.5374, 0.1067, -0.4742],\n [0.1141, -0.2255, -0.7099],\n [-0.3000, -0.5924, -1.3105],\n ],\n ]\n )\n elif model_name == \"segformer.b0.512x1024.city.160k\":\n expected_slice = torch.tensor(\n [\n [\n [-7.8217, -9.8767, -10.1717],\n [-9.4438, -10.9058, -11.4047],\n [-9.7939, -12.3495, -12.1079],\n ],\n [\n [-7.1514, -9.5336, -10.0860],\n [-9.7776, -11.6822, -11.8439],\n [-10.1411, -12.7655, -12.8972],\n ],\n [\n [0.3021, 0.0805, -0.2310],\n [-0.0328, -0.1605, -0.2714],\n [-0.1408, -0.5477, -0.6976],\n ],\n ]\n )\n elif model_name == \"segformer.b0.640x1280.city.160k\":\n expected_slice = torch.tensor(\n [\n [\n [-1.1372e01, -1.2787e01, -1.3477e01],\n [-1.2536e01, -1.4194e01, -1.4409e01],\n [-1.3217e01, -1.4888e01, -1.5327e01],\n ],\n [\n [-1.4791e01, -1.7122e01, -1.8277e01],\n [-1.7163e01, -1.9192e01, -1.9533e01],\n [-1.7897e01, -1.9991e01, -2.0315e01],\n ],\n [\n [7.6723e-01, 4.1921e-01, -7.7878e-02],\n [4.7772e-01, 9.5557e-03, -2.8082e-01],\n [3.6032e-01, -2.4826e-01, -5.1168e-01],\n ],\n ]\n )\n elif model_name == \"segformer.b0.768x768.city.160k\":\n expected_slice = torch.tensor(\n [\n [\n [-9.4959, -11.3087, -11.7479],\n [-11.0025, -12.6540, -12.3319],\n [-11.4064, -13.0487, -12.9905],\n ],\n [\n [-9.8905, -11.3084, -12.0854],\n [-11.1726, -12.7698, -12.9583],\n [-11.5985, -13.3278, -14.1774],\n ],\n [\n [0.2213, 0.0192, -0.2466],\n [-0.1731, -0.4213, -0.4874],\n [-0.3126, -0.6541, -1.1389],\n ],\n ]\n )\n elif model_name == \"segformer.b1.1024x1024.city.160k\":\n expected_slice = torch.tensor(\n [\n [\n [-13.5748, -13.9111, -12.6500],\n [-14.3500, -15.3683, -14.2328],\n [-14.7532, -16.0424, -15.6087],\n ],\n [\n [-17.1651, -15.8725, -12.9653],\n [-17.2580, -17.3718, -14.8223],\n [-16.6058, -16.8783, -16.7452],\n ],\n [\n [-3.6456, -3.0209, -1.4203],\n [-3.0797, -3.1959, -2.0000],\n [-1.8757, -1.9217, -1.6997],\n ],\n ]\n )\n elif model_name == \"segformer.b2.1024x1024.city.160k\":\n expected_slice = torch.tensor(\n [\n [\n [-16.0976, -16.4856, -17.3962],\n [-16.6234, -19.0342, -19.7685],\n [-16.0900, -18.0661, -19.1180],\n ],\n [\n [-18.4750, -18.8488, -19.5074],\n [-19.4030, -22.1570, -22.5977],\n [-19.1191, -20.8486, -22.3783],\n ],\n [\n [-4.5178, -5.5037, -6.5109],\n [-5.0884, -7.2174, -8.0334],\n [-4.4156, -5.8117, -7.2970],\n ],\n ]\n )\n elif model_name == \"segformer.b3.1024x1024.city.160k\":\n expected_slice = torch.tensor(\n [\n [\n [-14.2081, -14.4732, -14.1977],\n [-14.5867, -16.4423, -16.6356],\n [-13.4441, -14.9685, -16.8696],\n ],\n [\n [-14.4576, -14.7073, -15.0451],\n [-15.0816, -17.6237, -17.9873],\n [-14.4213, -16.0199, -18.5992],\n ],\n [\n [-4.7349, -4.9588, -5.0966],\n [-4.3210, -6.9325, -7.2591],\n [-3.4312, -4.7484, -7.1917],\n ],\n ]\n )\n elif model_name == \"segformer.b4.1024x1024.city.160k\":\n expected_slice = torch.tensor(\n [\n [\n [-11.7737, -11.9526, -11.3273],\n [-13.6692, -14.4574, -13.8878],\n [-13.8937, -14.6924, -15.9345],\n ],\n [\n [-14.6706, -14.5330, -14.1306],\n [-16.1502, -16.8180, -16.4269],\n [-16.8338, -17.8939, -20.1746],\n ],\n [[1.0491, 0.8289, 1.0310], [1.1044, 0.5219, 0.8055], [1.0899, 0.6926, 0.5590]],\n ]\n )\n elif model_name == \"segformer.b5.1024x1024.city.160k\":\n expected_slice = torch.tensor(\n [\n [\n [-12.5641, -13.4777, -13.0684],\n [-13.9587, -15.8983, -16.6557],\n [-13.3109, -15.7350, -16.3141],\n ],\n [\n [-14.7074, -15.4352, -14.5944],\n [-16.6353, -18.1663, -18.6120],\n [-15.1702, -18.0329, -18.1547],\n ],\n [\n [-1.7990, -2.0951, -1.7784],\n [-2.6397, -3.8245, -3.9686],\n [-1.5264, -2.8126, -2.9316],\n ],\n ]\n )\n else:\n predicted_class_idx = logits.argmax(-1).item()\n print(\"Predicted class:\", model.config.id2label[predicted_class_idx])\n if not encoder_only:\n assert logits.shape == expected_shape\n assert torch.allclose(logits[0, :3, :3, :3], expected_slice, atol=1e-2)\n logger.info(f\"Saving PyTorch model and feature extractor to {pytorch_dump_folder_path}...\")\n Path(pytorch_dump_folder_path).mkdir(exist_ok=True)\n model.save_pretrained(pytorch_dump_folder_path)\n feature_extractor.save_pretrained(pytorch_dump_folder_path)\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"--model_name\",\n default=\"segformer.b0.512x512.ade.160k\",\n type=str,\n help=\"Name of the model you'd like to convert.\",\n )\n parser.add_argument(\n \"--checkpoint_path\",\n default=None,\n type=str,\n help=\"Path to the original PyTorch checkpoint (.pth file).\",\n )\n parser.add_argument(\n \"--pytorch_dump_folder_path\",\n default=None,\n type=str,\n help=\"Path to the folder to output PyTorch model.\",\n )\n args = parser.parse_args()\n convert_segformer_checkpoint(\n args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path\n )\n",
"import gym\nimport math\nimport random\nimport numpy as np\nimport matplotlib\nimport matplotlib.pyplot as plt\nfrom collections import namedtuple, deque\nfrom itertools import count\nfrom PIL import Image\n\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nimport torch.nn.functional as F\nimport torchvision.transforms as T\n\n\nenv = gym.make(\"CartPole-v0\").unwrapped\n\nis_ipython = \"inline\" in matplotlib.get_backend()\nif is_ipython:\n from IPython import display\n\nplt.ion()\n\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\nTransition = namedtuple(\"Transition\", (\"state\", \"action\", \"next_state\", \"reward\"))\n\n\nclass ReplayMemory(object):\n def __init__(self, capacity):\n self.memory = deque([], maxlen=capacity)\n\n def push(self, *args):\n \"\"\"Save a transition\"\"\"\n self.memory.append(Transition(*args))\n\n def sample(self, batch_size):\n return random.sample(self.memory, batch_size)\n\n def __len__(self):\n return len(self.memory)\n\n\nclass DQN(nn.Module):\n def __init__(self, h, w, outputs):\n super(DQN, self).__init__()\n self.conv1 = nn.Conv2d(3, 16, kernel_size=5, stride=2)\n self.bn1 = nn.BatchNorm2d(16)\n self.conv2 = nn.Conv2d(16, 32, kernel_size=5, stride=2)\n self.bn2 = nn.BatchNorm2d(32)\n self.conv3 = nn.Conv2d(32, 32, kernel_size=5, stride=2)\n self.bn3 = nn.BatchNorm2d(32)\n\n def conv2d_size_out(size, kernel_size=5, stride=2):\n return (size - (kernel_size - 1) - 1) // stride + 1\n\n convw = conv2d_size_out(conv2d_size_out(conv2d_size_out(w)))\n convh = conv2d_size_out(conv2d_size_out(conv2d_size_out(h)))\n linear_input_size = convw * convh * 32\n self.head = nn.Linear(linear_input_size, outputs)\n\n def forward(self, x):\n x = x.to(device)\n x = F.relu(self.bn1(self.conv1(x)))\n x = F.relu(self.bn2(self.conv2(x)))\n x = F.relu(self.bn3(self.conv3(x)))\n return self.head(x.view(x.size(0), -1))\n\n\nresize = T.Compose([T.ToPILImage(), T.Resize(40, interpolation=Image.CUBIC), T.ToTensor()])\n\n\ndef get_cart_location(screen_width):\n world_width = env.x_threshold * 2\n scale = screen_width / world_width\n return int(env.state[0] * scale + screen_width / 2.0)\n\n\ndef get_screen():\n screen = env.render(mode=\"rgb_array\").transpose((2, 0, 1))\n _, screen_height, screen_width = screen.shape\n screen = screen[:, int(screen_height * 0.4) : int(screen_height * 0.8)]\n view_width = int(screen_width * 0.6)\n cart_location = get_cart_location(screen_width)\n if cart_location < view_width // 2:\n slice_range = slice(view_width)\n elif cart_location > (screen_width - view_width // 2):\n slice_range = slice(-view_width, None)\n else:\n slice_range = slice(cart_location - view_width // 2, cart_location + view_width // 2)\n screen = screen[:, :, slice_range]\n screen = np.ascontiguousarray(screen, dtype=np.float32) / 255\n screen = torch.from_numpy(screen)\n return resize(screen).unsqueeze(0)\n\n\nenv.reset()\nplt.figure()\nplt.imshow(get_screen().cpu().squeeze(0).permute(1, 2, 0).numpy(), interpolation=\"none\")\nplt.title(\"Example extracted screen\")\nplt.show()\n\n\nBATCH_SIZE = 128\nGAMMA = 0.999\nEPS_START = 0.9\nEPS_END = 0.05\nEPS_DECAY = 200\nTARGET_UPDATE = 10\n\ninit_screen = get_screen()\n_, _, screen_height, screen_width = init_screen.shape\n\nn_actions = env.action_space.n\n\npolicy_net = DQN(screen_height, screen_width, n_actions).to(device)\ntarget_net = DQN(screen_height, screen_width, n_actions).to(device)\ntarget_net.load_state_dict(policy_net.state_dict())\ntarget_net.eval()\n\noptimizer = optim.RMSprop(policy_net.parameters())\nmemory = ReplayMemory(10000)\n\n\nsteps_done = 0\n\n\ndef select_action(state):\n global steps_done\n sample = random.random()\n eps_threshold = EPS_END + (EPS_START - EPS_END) * math.exp(-1.0 * steps_done / EPS_DECAY)\n steps_done += 1\n if sample > eps_threshold:\n with torch.no_grad():\n return policy_net(state).max(1)[1].view(1, 1)\n else:\n return torch.tensor([[random.randrange(n_actions)]], device=device, dtype=torch.long)\n\n\nepisode_durations = []\n\n\ndef plot_durations():\n plt.figure(2)\n plt.clf()\n durations_t = torch.tensor(episode_durations, dtype=torch.float)\n plt.title(\"Training...\")\n plt.xlabel(\"Episode\")\n plt.ylabel(\"Duration\")\n plt.plot(durations_t.numpy())\n if len(durations_t) >= 100:\n means = durations_t.unfold(0, 100, 1).mean(1).view(-1)\n means = torch.cat((torch.zeros(99), means))\n plt.plot(means.numpy())\n\n plt.pause(0.001)\n if is_ipython:\n display.clear_output(wait=True)\n display.display(plt.gcf())\n\n\ndef optimize_model():\n if len(memory) < BATCH_SIZE:\n return\n transitions = memory.sample(BATCH_SIZE)\n batch = Transition(*zip(*transitions))\n\n non_final_mask = torch.tensor(\n tuple(map(lambda s: s is not None, batch.next_state)), device=device, dtype=torch.bool\n )\n non_final_next_states = torch.cat([s for s in batch.next_state if s is not None])\n state_batch = torch.cat(batch.state)\n action_batch = torch.cat(batch.action)\n reward_batch = torch.cat(batch.reward)\n\n state_action_values = policy_net(state_batch).gather(1, action_batch)\n\n next_state_values = torch.zeros(BATCH_SIZE, device=device)\n next_state_values[non_final_mask] = target_net(non_final_next_states).max(1)[0].detach()\n\n expected_state_action_values = (next_state_values * GAMMA) + reward_batch\n\n criterion = nn.SmoothL1Loss()\n loss = criterion(state_action_values, expected_state_action_values.unsqueeze(1))\n\n optimizer.zero_grad()\n loss.backward()\n for param in policy_net.parameters():\n param.grad.data.clamp_(-1, 1)\n optimizer.step()\n\n\nnum_episodes = 50\nfor i_episode in range(num_episodes):\n env.reset()\n last_screen = get_screen()\n current_screen = get_screen()\n state = current_screen - last_screen\n for t in count():\n action = select_action(state)\n _, reward, done, _ = env.step(action.item())\n reward = torch.tensor([reward], device=device)\n\n last_screen = current_screen\n current_screen = get_screen()\n if not done:\n next_state = current_screen - last_screen\n else:\n next_state = None\n\n memory.push(state, action, next_state, reward)\n\n state = next_state\n\n optimize_model()\n if done:\n episode_durations.append(t + 1)\n plot_durations()\n break\n if i_episode % TARGET_UPDATE == 0:\n target_net.load_state_dict(policy_net.state_dict())\n\nprint(\"Complete\")\nenv.render()\nenv.close()\nplt.ioff()\nplt.show()\n",
"# Copyright 2021 Quantapix Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# =============================================================================\n# fine-tune on text translation\n\nimport logging\nimport numpy as np\nimport random\nimport torch\n\nfrom datasets import load_metric\nfrom torch.utils.data import DataLoader\nfrom transformers import (\n AutoModelForSeq2SeqLM,\n DataCollatorForSeq2Seq,\n MBartTokenizer,\n MBartTokenizerFast,\n default_data_collator,\n)\n\nfrom .params import TRAIN, EVAL, ALL\nfrom .runner import Runner as Base\n\nlog = logging.getLogger(__name__)\n\n\ndef postproc(xs, ls):\n xs = [x.strip() for x in xs]\n ls = [[x.strip()] for x in ls]\n return xs, ls\n\n\nclass Runner(Base):\n AutoModel = AutoModelForSeq2SeqLM\n\n @property\n def tokenizer(self):\n if self._tokenizer is None:\n ps = self.params\n t = super().tokenizer\n if isinstance(t, (MBartTokenizer, MBartTokenizerFast)):\n if ps.source_lang is not None:\n t.src_lang = ps.source_lang\n if ps.target_lang is not None:\n t.tgt_lang = ps.target_lang\n self.source_lang = ps.source_lang.split(\"_\")[0]\n self.target_lang = ps.target_lang.split(\"_\")[0]\n self.prefix = ps.source_prefix if ps.source_prefix is not None else \"\"\n return self._tokenizer\n\n @property\n def model(self):\n if self._model is None:\n ps = self.params\n t, m = self.tokenizer, super().model\n if m.config.decoder_start_token_id is None and isinstance(t, (MBartTokenizer, MBartTokenizerFast)):\n assert (\n ps.target_lang is not None and ps.source_lang is not None\n ), \"mBart needs --target_lang and --source_lang\"\n if isinstance(t, MBartTokenizer):\n m.config.decoder_start_token_id = t.lang_code_to_id[ps.target_lang]\n else:\n m.config.decoder_start_token_id = t.convert_tokens_to_ids(ps.target_lang)\n if m.config.decoder_start_token_id is None:\n raise ValueError(\"Needs `config.decoder_start_token_id`\")\n\n @property\n def train_ds(self):\n if self._train_ds is None:\n ps, mgr, ds = self.params, self.mgr, self.dataset\n with mgr.main_process_first():\n self._dataset = y = ds.map(\n self.prep_for_train,\n batched=True,\n remove_columns=self.cols[ALL],\n load_from_cache_file=not ps.overwrite_cache,\n desc=\"Running tokenizer on dataset\",\n )\n y = y[TRAIN]\n if ps.max_train_samples is not None:\n y = y.select(range(ps.max_train_samples))\n for i in random.sample(range(len(y)), 3):\n log.info(f\"Sample {i} of the training set: {y[i]}\")\n self._train_ds = y\n return self._train_ds\n\n def prep_for_train(self, xs):\n ps, t = self.params, self.tokenizer\n ins = [x[self.source_lang] for x in xs[\"translation\"]]\n targets = [x[self.target_lang] for x in xs[\"translation\"]]\n ins = [self.prefix + x for x in ins]\n ys = t(ins, max_length=ps.max_source_length, padding=ps.padding, truncation=True)\n with t.as_target_tokenizer():\n ls = t(targets, max_length=ps.max_target_length, padding=ps.padding, truncation=True)\n if self.padding == \"max_length\" and ps.ignore_pad_token_for_loss:\n ls[\"input_ids\"] = [[(y if y != t.pad_token_id else -100) for y in x] for x in ls[\"input_ids\"]]\n ys[\"labels\"] = ls[\"input_ids\"]\n return ys\n\n @property\n def loaders(self):\n if self._loaders is None:\n ps, t = self.params, self.tokenizer\n if ps.pad_to_max_length:\n c = default_data_collator\n else:\n c = DataCollatorForSeq2Seq(\n t,\n model=self.model,\n label_pad_token_id=-100 if ps.ignore_pad_token_for_loss else t.pad_token_id,\n pad_to_multiple_of=8 if self.mgr.use_fp16 else None,\n )\n t = DataLoader(self.train_ds, shuffle=True, collate_fn=c, batch_size=ps.per_device_train_batch_size)\n e = DataLoader(self.eval_ds, collate_fn=c, batch_size=ps.per_device_eval_batch_size)\n self._loaders = {TRAIN: t, EVAL: e}\n return self._loaders\n\n @property\n def metric(self):\n if self._metric is None:\n self._metric = load_metric(\"sacrebleu\")\n return self._metric\n\n def eval_epoch(self, e):\n ps, t, m, mgr = self.params, self.tokenizer, self.model, self.mgr\n m.eval()\n if ps.val_max_target_length is None:\n ps.val_max_target_length = ps.max_target_length\n kw = {\n \"max_length\": ps.val_max_target_length if ps is not None else self.config.max_length,\n \"num_beams\": ps.num_beams,\n }\n for xs in self.loaders[EVAL]:\n with torch.no_grad():\n ys = mgr.unwrap_model(m).generate(xs[\"input_ids\"], attention_mask=xs[\"attention_mask\"], **kw)\n ys = mgr.pad_across_processes(ys, dim=1, pad_index=t.pad_token_id)\n ls = xs[\"labels\"]\n if not ps.pad_to_max_length:\n ls = mgr.pad_across_processes(xs[\"labels\"], dim=1, pad_index=t.pad_token_id)\n ys = mgr.gather(ys).cpu().numpy()\n ls = mgr.gather(ls).cpu().numpy()\n if ps.ignore_pad_token_for_loss:\n ls = np.where(ls != -100, ls, t.pad_token_id)\n ys = t.batch_decode(ys, skip_special_tokens=True)\n ls = t.batch_decode(ls, skip_special_tokens=True)\n ys, ls = postproc(ys, ls)\n self.metric.add_batch(predictions=ys, references=ls)\n y = self.metric.compute()[\"score\"]\n mgr.print(f\"epoch {e}: bleu: {y}\")\n\n\ndef main():\n x = Runner()\n x.dataset\n x.config\n x.tokenizer\n x.model\n x.model.resize_token_embeddings(len(x.tokenizer))\n x.loaders\n x.prepare()\n x.train()\n x.save()\n\n\nif __name__ == \"__main__\":\n main()\n\n\n\"\"\"\npython xlate.py \\\n --model_name_or_path Helsinki-NLP/opus-mt-en-ro \\\n --source_lang en \\\n --target_lang ro \\\n --dataset_name wmt16 \\\n --dataset_config ro-en \\\n --output_dir ~/tmp/tst-translation\n\naccelerate launch xlate.py \\\n --model_name_or_path Helsinki-NLP/opus-mt-en-ro \\\n --source_lang en \\\n --target_lang ro \\\n --dataset_name wmt16 \\\n --dataset_config ro-en \\\n --output_dir ~/tmp/tst-translation\n\"\"\"\n",
"# coding=utf-8\n# Copyright 2018 The Google AI Language Team Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Run BERT on SQuAD 1.1 and SQuAD 2.0.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport collections\nimport json\nimport math\nimport os\nimport random\nimport modeling\nimport optimization\nimport tokenization\nimport six\nimport tensorflow as tf\n\nflags = tf.flags\n\nFLAGS = flags.FLAGS\n\n## Required parameters\nflags.DEFINE_string(\n \"bert_config_file\",\n None,\n \"The config json file corresponding to the pre-trained BERT model. \"\n \"This specifies the model architecture.\",\n)\n\nflags.DEFINE_string(\"vocab_file\", None, \"The vocabulary file that the BERT model was trained on.\")\n\nflags.DEFINE_string(\n \"output_dir\", None, \"The output directory where the model checkpoints will be written.\"\n)\n\n## Other parameters\nflags.DEFINE_string(\"train_file\", None, \"SQuAD json for training. E.g., train-v1.1.json\")\n\nflags.DEFINE_string(\n \"predict_file\", None, \"SQuAD json for predictions. E.g., dev-v1.1.json or test-v1.1.json\"\n)\n\nflags.DEFINE_string(\n \"init_checkpoint\", None, \"Initial checkpoint (usually from a pre-trained BERT model).\"\n)\n\nflags.DEFINE_bool(\n \"do_lower_case\",\n True,\n \"Whether to lower case the input text. Should be True for uncased \"\n \"models and False for cased models.\",\n)\n\nflags.DEFINE_integer(\n \"max_seq_length\",\n 384,\n \"The maximum total input sequence length after WordPiece tokenization. \"\n \"Sequences longer than this will be truncated, and sequences shorter \"\n \"than this will be padded.\",\n)\n\nflags.DEFINE_integer(\n \"doc_stride\",\n 128,\n \"When splitting up a long document into chunks, how much stride to \" \"take between chunks.\",\n)\n\nflags.DEFINE_integer(\n \"max_query_length\",\n 64,\n \"The maximum number of tokens for the question. Questions longer than \"\n \"this will be truncated to this length.\",\n)\n\nflags.DEFINE_bool(\"do_train\", False, \"Whether to run training.\")\n\nflags.DEFINE_bool(\"do_predict\", False, \"Whether to run eval on the dev set.\")\n\nflags.DEFINE_integer(\"train_batch_size\", 32, \"Total batch size for training.\")\n\nflags.DEFINE_integer(\"predict_batch_size\", 8, \"Total batch size for predictions.\")\n\nflags.DEFINE_float(\"learning_rate\", 5e-5, \"The initial learning rate for Adam.\")\n\nflags.DEFINE_float(\"num_train_epochs\", 3.0, \"Total number of training epochs to perform.\")\n\nflags.DEFINE_float(\n \"warmup_proportion\",\n 0.1,\n \"Proportion of training to perform linear learning rate warmup for. \"\n \"E.g., 0.1 = 10% of training.\",\n)\n\nflags.DEFINE_integer(\"save_checkpoints_steps\", 1000, \"How often to save the model checkpoint.\")\n\nflags.DEFINE_integer(\"iterations_per_loop\", 1000, \"How many steps to make in each estimator call.\")\n\nflags.DEFINE_integer(\n \"n_best_size\",\n 20,\n \"The total number of n-best predictions to generate in the \"\n \"nbest_predictions.json output file.\",\n)\n\nflags.DEFINE_integer(\n \"max_answer_length\",\n 30,\n \"The maximum length of an answer that can be generated. This is needed \"\n \"because the start and end predictions are not conditioned on one another.\",\n)\n\nflags.DEFINE_bool(\"use_tpu\", False, \"Whether to use TPU or GPU/CPU.\")\n\ntf.flags.DEFINE_string(\n \"tpu_name\",\n None,\n \"The Cloud TPU to use for training. This should be either the name \"\n \"used when creating the Cloud TPU, or a grpc://ip.address.of.tpu:8470 \"\n \"url.\",\n)\n\ntf.flags.DEFINE_string(\n \"tpu_zone\",\n None,\n \"[Optional] GCE zone where the Cloud TPU is located in. If not \"\n \"specified, we will attempt to automatically detect the GCE project from \"\n \"metadata.\",\n)\n\ntf.flags.DEFINE_string(\n \"gcp_project\",\n None,\n \"[Optional] Project name for the Cloud TPU-enabled project. If not \"\n \"specified, we will attempt to automatically detect the GCE project from \"\n \"metadata.\",\n)\n\ntf.flags.DEFINE_string(\"master\", None, \"[Optional] TensorFlow master URL.\")\n\nflags.DEFINE_integer(\n \"num_tpu_cores\", 8, \"Only used if `use_tpu` is True. Total number of TPU cores to use.\"\n)\n\nflags.DEFINE_bool(\n \"verbose_logging\",\n False,\n \"If true, all of the warnings related to data processing will be printed. \"\n \"A number of warnings are expected for a normal SQuAD evaluation.\",\n)\n\nflags.DEFINE_bool(\n \"version_2_with_negative\",\n False,\n \"If true, the SQuAD examples contain some that do not have an answer.\",\n)\n\nflags.DEFINE_float(\n \"null_score_diff_threshold\",\n 0.0,\n \"If null_score - best_non_null is greater than the threshold predict null.\",\n)\n\n\nclass SquadExample(object):\n \"\"\"A single training/test example for simple sequence classification.\n\n For examples without an answer, the start and end position are -1.\n \"\"\"\n\n def __init__(\n self,\n qas_id,\n question_text,\n doc_tokens,\n orig_answer_text=None,\n start_position=None,\n end_position=None,\n is_impossible=False,\n ):\n self.qas_id = qas_id\n self.question_text = question_text\n self.doc_tokens = doc_tokens\n self.orig_answer_text = orig_answer_text\n self.start_position = start_position\n self.end_position = end_position\n self.is_impossible = is_impossible\n\n def __str__(self):\n return self.__repr__()\n\n def __repr__(self):\n s = \"\"\n s += \"qas_id: %s\" % (tokenization.printable_text(self.qas_id))\n s += \", question_text: %s\" % (tokenization.printable_text(self.question_text))\n s += \", doc_tokens: [%s]\" % (\" \".join(self.doc_tokens))\n if self.start_position:\n s += \", start_position: %d\" % (self.start_position)\n if self.start_position:\n s += \", end_position: %d\" % (self.end_position)\n if self.start_position:\n s += \", is_impossible: %r\" % (self.is_impossible)\n return s\n\n\nclass InputFeatures(object):\n \"\"\"A single set of features of data.\"\"\"\n\n def __init__(\n self,\n unique_id,\n example_index,\n doc_span_index,\n tokens,\n token_to_orig_map,\n token_is_max_context,\n input_ids,\n input_mask,\n segment_ids,\n start_position=None,\n end_position=None,\n is_impossible=None,\n ):\n self.unique_id = unique_id\n self.example_index = example_index\n self.doc_span_index = doc_span_index\n self.tokens = tokens\n self.token_to_orig_map = token_to_orig_map\n self.token_is_max_context = token_is_max_context\n self.input_ids = input_ids\n self.input_mask = input_mask\n self.segment_ids = segment_ids\n self.start_position = start_position\n self.end_position = end_position\n self.is_impossible = is_impossible\n\n\ndef read_squad_examples(input_file, is_training):\n \"\"\"Read a SQuAD json file into a list of SquadExample.\"\"\"\n with tf.gfile.Open(input_file, \"r\") as reader:\n input_data = json.load(reader)[\"data\"]\n\n def is_whitespace(c):\n if c == \" \" or c == \"\\t\" or c == \"\\r\" or c == \"\\n\" or ord(c) == 0x202F:\n return True\n return False\n\n examples = []\n for entry in input_data:\n for paragraph in entry[\"paragraphs\"]:\n paragraph_text = paragraph[\"context\"]\n doc_tokens = []\n char_to_word_offset = []\n prev_is_whitespace = True\n for c in paragraph_text:\n if is_whitespace(c):\n prev_is_whitespace = True\n else:\n if prev_is_whitespace:\n doc_tokens.append(c)\n else:\n doc_tokens[-1] += c\n prev_is_whitespace = False\n char_to_word_offset.append(len(doc_tokens) - 1)\n\n for qa in paragraph[\"qas\"]:\n qas_id = qa[\"id\"]\n question_text = qa[\"question\"]\n start_position = None\n end_position = None\n orig_answer_text = None\n is_impossible = False\n if is_training:\n\n if FLAGS.version_2_with_negative:\n is_impossible = qa[\"is_impossible\"]\n if (len(qa[\"answers\"]) != 1) and (not is_impossible):\n raise ValueError(\n \"For training, each question should have exactly 1 answer.\"\n )\n if not is_impossible:\n answer = qa[\"answers\"][0]\n orig_answer_text = answer[\"text\"]\n answer_offset = answer[\"answer_start\"]\n answer_length = len(orig_answer_text)\n start_position = char_to_word_offset[answer_offset]\n end_position = char_to_word_offset[answer_offset + answer_length - 1]\n # Only add answers where the text can be exactly recovered from the\n # document. If this CAN'T happen it's likely due to weird Unicode\n # stuff so we will just skip the example.\n #\n # Note that this means for training mode, every example is NOT\n # guaranteed to be preserved.\n actual_text = \" \".join(doc_tokens[start_position : (end_position + 1)])\n cleaned_answer_text = \" \".join(\n tokenization.whitespace_tokenize(orig_answer_text)\n )\n if actual_text.find(cleaned_answer_text) == -1:\n tf.logging.warning(\n \"Could not find answer: '%s' vs. '%s'\",\n actual_text,\n cleaned_answer_text,\n )\n continue\n else:\n start_position = -1\n end_position = -1\n orig_answer_text = \"\"\n\n example = SquadExample(\n qas_id=qas_id,\n question_text=question_text,\n doc_tokens=doc_tokens,\n orig_answer_text=orig_answer_text,\n start_position=start_position,\n end_position=end_position,\n is_impossible=is_impossible,\n )\n examples.append(example)\n\n return examples\n\n\ndef convert_examples_to_features(\n examples, tokenizer, max_seq_length, doc_stride, max_query_length, is_training, output_fn\n):\n \"\"\"Loads a data file into a list of `InputBatch`s.\"\"\"\n\n unique_id = 1000000000\n\n for (example_index, example) in enumerate(examples):\n query_tokens = tokenizer.tokenize(example.question_text)\n\n if len(query_tokens) > max_query_length:\n query_tokens = query_tokens[0:max_query_length]\n\n tok_to_orig_index = []\n orig_to_tok_index = []\n all_doc_tokens = []\n for (i, token) in enumerate(example.doc_tokens):\n orig_to_tok_index.append(len(all_doc_tokens))\n sub_tokens = tokenizer.tokenize(token)\n for sub_token in sub_tokens:\n tok_to_orig_index.append(i)\n all_doc_tokens.append(sub_token)\n\n tok_start_position = None\n tok_end_position = None\n if is_training and example.is_impossible:\n tok_start_position = -1\n tok_end_position = -1\n if is_training and not example.is_impossible:\n tok_start_position = orig_to_tok_index[example.start_position]\n if example.end_position < len(example.doc_tokens) - 1:\n tok_end_position = orig_to_tok_index[example.end_position + 1] - 1\n else:\n tok_end_position = len(all_doc_tokens) - 1\n (tok_start_position, tok_end_position) = _improve_answer_span(\n all_doc_tokens,\n tok_start_position,\n tok_end_position,\n tokenizer,\n example.orig_answer_text,\n )\n\n # The -3 accounts for [CLS], [SEP] and [SEP]\n max_tokens_for_doc = max_seq_length - len(query_tokens) - 3\n\n # We can have documents that are longer than the maximum sequence length.\n # To deal with this we do a sliding window approach, where we take chunks\n # of the up to our max length with a stride of `doc_stride`.\n _DocSpan = collections.namedtuple( # pylint: disable=invalid-name\n \"DocSpan\", [\"start\", \"length\"]\n )\n doc_spans = []\n start_offset = 0\n while start_offset < len(all_doc_tokens):\n length = len(all_doc_tokens) - start_offset\n if length > max_tokens_for_doc:\n length = max_tokens_for_doc\n doc_spans.append(_DocSpan(start=start_offset, length=length))\n if start_offset + length == len(all_doc_tokens):\n break\n start_offset += min(length, doc_stride)\n\n for (doc_span_index, doc_span) in enumerate(doc_spans):\n tokens = []\n token_to_orig_map = {}\n token_is_max_context = {}\n segment_ids = []\n tokens.append(\"[CLS]\")\n segment_ids.append(0)\n for token in query_tokens:\n tokens.append(token)\n segment_ids.append(0)\n tokens.append(\"[SEP]\")\n segment_ids.append(0)\n\n for i in range(doc_span.length):\n split_token_index = doc_span.start + i\n token_to_orig_map[len(tokens)] = tok_to_orig_index[split_token_index]\n\n is_max_context = _check_is_max_context(doc_spans, doc_span_index, split_token_index)\n token_is_max_context[len(tokens)] = is_max_context\n tokens.append(all_doc_tokens[split_token_index])\n segment_ids.append(1)\n tokens.append(\"[SEP]\")\n segment_ids.append(1)\n\n input_ids = tokenizer.convert_tokens_to_ids(tokens)\n\n # The mask has 1 for real tokens and 0 for padding tokens. Only real\n # tokens are attended to.\n input_mask = [1] * len(input_ids)\n\n # Zero-pad up to the sequence length.\n while len(input_ids) < max_seq_length:\n input_ids.append(0)\n input_mask.append(0)\n segment_ids.append(0)\n\n assert len(input_ids) == max_seq_length\n assert len(input_mask) == max_seq_length\n assert len(segment_ids) == max_seq_length\n\n start_position = None\n end_position = None\n if is_training and not example.is_impossible:\n # For training, if our document chunk does not contain an annotation\n # we throw it out, since there is nothing to predict.\n doc_start = doc_span.start\n doc_end = doc_span.start + doc_span.length - 1\n out_of_span = False\n if not (tok_start_position >= doc_start and tok_end_position <= doc_end):\n out_of_span = True\n if out_of_span:\n start_position = 0\n end_position = 0\n else:\n doc_offset = len(query_tokens) + 2\n start_position = tok_start_position - doc_start + doc_offset\n end_position = tok_end_position - doc_start + doc_offset\n\n if is_training and example.is_impossible:\n start_position = 0\n end_position = 0\n\n if example_index < 20:\n tf.logging.info(\"*** Example ***\")\n tf.logging.info(\"unique_id: %s\" % (unique_id))\n tf.logging.info(\"example_index: %s\" % (example_index))\n tf.logging.info(\"doc_span_index: %s\" % (doc_span_index))\n tf.logging.info(\n \"tokens: %s\" % \" \".join([tokenization.printable_text(x) for x in tokens])\n )\n tf.logging.info(\n \"token_to_orig_map: %s\"\n % \" \".join([\"%d:%d\" % (x, y) for (x, y) in six.iteritems(token_to_orig_map)])\n )\n tf.logging.info(\n \"token_is_max_context: %s\"\n % \" \".join([\"%d:%s\" % (x, y) for (x, y) in six.iteritems(token_is_max_context)])\n )\n tf.logging.info(\"input_ids: %s\" % \" \".join([str(x) for x in input_ids]))\n tf.logging.info(\"input_mask: %s\" % \" \".join([str(x) for x in input_mask]))\n tf.logging.info(\"segment_ids: %s\" % \" \".join([str(x) for x in segment_ids]))\n if is_training and example.is_impossible:\n tf.logging.info(\"impossible example\")\n if is_training and not example.is_impossible:\n answer_text = \" \".join(tokens[start_position : (end_position + 1)])\n tf.logging.info(\"start_position: %d\" % (start_position))\n tf.logging.info(\"end_position: %d\" % (end_position))\n tf.logging.info(\"answer: %s\" % (tokenization.printable_text(answer_text)))\n\n feature = InputFeatures(\n unique_id=unique_id,\n example_index=example_index,\n doc_span_index=doc_span_index,\n tokens=tokens,\n token_to_orig_map=token_to_orig_map,\n token_is_max_context=token_is_max_context,\n input_ids=input_ids,\n input_mask=input_mask,\n segment_ids=segment_ids,\n start_position=start_position,\n end_position=end_position,\n is_impossible=example.is_impossible,\n )\n\n # Run callback\n output_fn(feature)\n\n unique_id += 1\n\n\ndef _improve_answer_span(doc_tokens, input_start, input_end, tokenizer, orig_answer_text):\n \"\"\"Returns tokenized answer spans that better match the annotated answer.\"\"\"\n\n # The SQuAD annotations are character based. We first project them to\n # whitespace-tokenized words. But then after WordPiece tokenization, we can\n # often find a \"better match\". For example:\n #\n # Question: What year was John Smith born?\n # Context: The leader was John Smith (1895-1943).\n # Answer: 1895\n #\n # The original whitespace-tokenized answer will be \"(1895-1943).\". However\n # after tokenization, our tokens will be \"( 1895 - 1943 ) .\". So we can match\n # the exact answer, 1895.\n #\n # However, this is not always possible. Consider the following:\n #\n # Question: What country is the top exporter of electornics?\n # Context: The Japanese electronics industry is the lagest in the world.\n # Answer: Japan\n #\n # In this case, the annotator chose \"Japan\" as a character sub-span of\n # the word \"Japanese\". Since our WordPiece tokenizer does not split\n # \"Japanese\", we just use \"Japanese\" as the annotation. This is fairly rare\n # in SQuAD, but does happen.\n tok_answer_text = \" \".join(tokenizer.tokenize(orig_answer_text))\n\n for new_start in range(input_start, input_end + 1):\n for new_end in range(input_end, new_start - 1, -1):\n text_span = \" \".join(doc_tokens[new_start : (new_end + 1)])\n if text_span == tok_answer_text:\n return (new_start, new_end)\n\n return (input_start, input_end)\n\n\ndef _check_is_max_context(doc_spans, cur_span_index, position):\n \"\"\"Check if this is the 'max context' doc span for the token.\"\"\"\n\n # Because of the sliding window approach taken to scoring documents, a single\n # token can appear in multiple documents. E.g.\n # Doc: the man went to the store and bought a gallon of milk\n # Span A: the man went to the\n # Span B: to the store and bought\n # Span C: and bought a gallon of\n # ...\n #\n # Now the word 'bought' will have two scores from spans B and C. We only\n # want to consider the score with \"maximum context\", which we define as\n # the *minimum* of its left and right context (the *sum* of left and\n # right context will always be the same, of course).\n #\n # In the example the maximum context for 'bought' would be span C since\n # it has 1 left context and 3 right context, while span B has 4 left context\n # and 0 right context.\n best_score = None\n best_span_index = None\n for (span_index, doc_span) in enumerate(doc_spans):\n end = doc_span.start + doc_span.length - 1\n if position < doc_span.start:\n continue\n if position > end:\n continue\n num_left_context = position - doc_span.start\n num_right_context = end - position\n score = min(num_left_context, num_right_context) + 0.01 * doc_span.length\n if best_score is None or score > best_score:\n best_score = score\n best_span_index = span_index\n\n return cur_span_index == best_span_index\n\n\ndef create_model(\n bert_config, is_training, input_ids, input_mask, segment_ids, use_one_hot_embeddings\n):\n \"\"\"Creates a classification model.\"\"\"\n model = modeling.BertModel(\n config=bert_config,\n is_training=is_training,\n input_ids=input_ids,\n input_mask=input_mask,\n token_type_ids=segment_ids,\n use_one_hot_embeddings=use_one_hot_embeddings,\n )\n\n final_hidden = model.get_sequence_output()\n\n final_hidden_shape = modeling.get_shape_list(final_hidden, expected_rank=3)\n batch_size = final_hidden_shape[0]\n seq_length = final_hidden_shape[1]\n d_hidden = final_hidden_shape[2]\n\n output_weights = tf.get_variable(\n \"cls/squad/output_weights\",\n [2, d_hidden],\n initializer=tf.truncated_normal_initializer(stddev=0.02),\n )\n\n output_bias = tf.get_variable(\"cls/squad/output_bias\", [2], initializer=tf.zeros_initializer())\n\n final_hidden_matrix = tf.reshape(final_hidden, [batch_size * seq_length, d_hidden])\n logits = tf.matmul(final_hidden_matrix, output_weights, transpose_b=True)\n logits = tf.nn.bias_add(logits, output_bias)\n\n logits = tf.reshape(logits, [batch_size, seq_length, 2])\n logits = tf.transpose(logits, [2, 0, 1])\n\n unstacked_logits = tf.unstack(logits, axis=0)\n\n (start_logits, end_logits) = (unstacked_logits[0], unstacked_logits[1])\n\n return (start_logits, end_logits)\n\n\ndef model_fn_builder(\n bert_config,\n init_checkpoint,\n learning_rate,\n num_train_steps,\n num_warmup_steps,\n use_tpu,\n use_one_hot_embeddings,\n):\n \"\"\"Returns `model_fn` closure for TPUEstimator.\"\"\"\n\n def model_fn(features, labels, mode, params): # pylint: disable=unused-argument\n \"\"\"The `model_fn` for TPUEstimator.\"\"\"\n\n tf.logging.info(\"*** Features ***\")\n for name in sorted(features.keys()):\n tf.logging.info(\" name = %s, shape = %s\" % (name, features[name].shape))\n\n unique_ids = features[\"unique_ids\"]\n input_ids = features[\"input_ids\"]\n input_mask = features[\"input_mask\"]\n segment_ids = features[\"segment_ids\"]\n\n is_training = mode == tf.estimator.ModeKeys.TRAIN\n\n (start_logits, end_logits) = create_model(\n bert_config=bert_config,\n is_training=is_training,\n input_ids=input_ids,\n input_mask=input_mask,\n segment_ids=segment_ids,\n use_one_hot_embeddings=use_one_hot_embeddings,\n )\n\n tvars = tf.trainable_variables()\n\n initialized_variable_names = {}\n scaffold_fn = None\n if init_checkpoint:\n (\n assignment_map,\n initialized_variable_names,\n ) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint)\n if use_tpu:\n\n def tpu_scaffold():\n tf.train.init_from_checkpoint(init_checkpoint, assignment_map)\n return tf.train.Scaffold()\n\n scaffold_fn = tpu_scaffold\n else:\n tf.train.init_from_checkpoint(init_checkpoint, assignment_map)\n\n tf.logging.info(\"**** Trainable Variables ****\")\n for var in tvars:\n init_string = \"\"\n if var.name in initialized_variable_names:\n init_string = \", *INIT_FROM_CKPT*\"\n tf.logging.info(\" name = %s, shape = %s%s\", var.name, var.shape, init_string)\n\n output_spec = None\n if mode == tf.estimator.ModeKeys.TRAIN:\n seq_length = modeling.get_shape_list(input_ids)[1]\n\n def compute_loss(logits, positions):\n one_hot_positions = tf.one_hot(positions, depth=seq_length, dtype=tf.float32)\n log_probs = tf.nn.log_softmax(logits, axis=-1)\n loss = -tf.reduce_mean(tf.reduce_sum(one_hot_positions * log_probs, axis=-1))\n return loss\n\n start_positions = features[\"start_positions\"]\n end_positions = features[\"end_positions\"]\n\n start_loss = compute_loss(start_logits, start_positions)\n end_loss = compute_loss(end_logits, end_positions)\n\n total_loss = (start_loss + end_loss) / 2.0\n\n train_op = optimization.create_optimizer(\n total_loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu\n )\n\n output_spec = tf.contrib.tpu.TPUEstimatorSpec(\n mode=mode, loss=total_loss, train_op=train_op, scaffold_fn=scaffold_fn\n )\n elif mode == tf.estimator.ModeKeys.PREDICT:\n predictions = {\n \"unique_ids\": unique_ids,\n \"start_logits\": start_logits,\n \"end_logits\": end_logits,\n }\n output_spec = tf.contrib.tpu.TPUEstimatorSpec(\n mode=mode, predictions=predictions, scaffold_fn=scaffold_fn\n )\n else:\n raise ValueError(\"Only TRAIN and PREDICT modes are supported: %s\" % (mode))\n\n return output_spec\n\n return model_fn\n\n\ndef input_fn_builder(input_file, seq_length, is_training, drop_remainder):\n \"\"\"Creates an `input_fn` closure to be passed to TPUEstimator.\"\"\"\n\n name_to_features = {\n \"unique_ids\": tf.FixedLenFeature([], tf.int64),\n \"input_ids\": tf.FixedLenFeature([seq_length], tf.int64),\n \"input_mask\": tf.FixedLenFeature([seq_length], tf.int64),\n \"segment_ids\": tf.FixedLenFeature([seq_length], tf.int64),\n }\n\n if is_training:\n name_to_features[\"start_positions\"] = tf.FixedLenFeature([], tf.int64)\n name_to_features[\"end_positions\"] = tf.FixedLenFeature([], tf.int64)\n\n def _decode_record(record, name_to_features):\n \"\"\"Decodes a record to a TensorFlow example.\"\"\"\n example = tf.parse_single_example(record, name_to_features)\n\n # tf.Example only supports tf.int64, but the TPU only supports tf.int32.\n # So cast all int64 to int32.\n for name in list(example.keys()):\n t = example[name]\n if t.dtype == tf.int64:\n t = tf.to_int32(t)\n example[name] = t\n\n return example\n\n def input_fn(params):\n \"\"\"The actual input function.\"\"\"\n batch_size = params[\"batch_size\"]\n\n # For training, we want a lot of parallel reading and shuffling.\n # For eval, we want no shuffling and parallel reading doesn't matter.\n d = tf.data.TFRecordDataset(input_file)\n if is_training:\n d = d.repeat()\n d = d.shuffle(buffer_size=100)\n\n d = d.apply(\n tf.contrib.data.map_and_batch(\n lambda record: _decode_record(record, name_to_features),\n batch_size=batch_size,\n drop_remainder=drop_remainder,\n )\n )\n\n return d\n\n return input_fn\n\n\nRawResult = collections.namedtuple(\"RawResult\", [\"unique_id\", \"start_logits\", \"end_logits\"])\n\n\ndef write_predictions(\n all_examples,\n all_features,\n all_results,\n n_best_size,\n max_answer_length,\n do_lower_case,\n output_prediction_file,\n output_nbest_file,\n output_null_log_odds_file,\n):\n \"\"\"Write final predictions to the json file and log-odds of null if needed.\"\"\"\n tf.logging.info(\"Writing predictions to: %s\" % (output_prediction_file))\n tf.logging.info(\"Writing nbest to: %s\" % (output_nbest_file))\n\n example_index_to_features = collections.defaultdict(list)\n for feature in all_features:\n example_index_to_features[feature.example_index].append(feature)\n\n unique_id_to_result = {}\n for result in all_results:\n unique_id_to_result[result.unique_id] = result\n\n _PrelimPrediction = collections.namedtuple( # pylint: disable=invalid-name\n \"PrelimPrediction\",\n [\"feature_index\", \"start_index\", \"end_index\", \"start_logit\", \"end_logit\"],\n )\n\n all_predictions = collections.OrderedDict()\n all_nbest_json = collections.OrderedDict()\n scores_diff_json = collections.OrderedDict()\n\n for (example_index, example) in enumerate(all_examples):\n features = example_index_to_features[example_index]\n\n prelim_predictions = []\n # keep track of the minimum score of null start+end of position 0\n score_null = 1000000 # large and positive\n min_null_feature_index = 0 # the paragraph slice with min mull score\n null_start_logit = 0 # the start logit at the slice with min null score\n null_end_logit = 0 # the end logit at the slice with min null score\n for (feature_index, feature) in enumerate(features):\n result = unique_id_to_result[feature.unique_id]\n start_indexes = _get_best_indexes(result.start_logits, n_best_size)\n end_indexes = _get_best_indexes(result.end_logits, n_best_size)\n # if we could have irrelevant answers, get the min score of irrelevant\n if FLAGS.version_2_with_negative:\n feature_null_score = result.start_logits[0] + result.end_logits[0]\n if feature_null_score < score_null:\n score_null = feature_null_score\n min_null_feature_index = feature_index\n null_start_logit = result.start_logits[0]\n null_end_logit = result.end_logits[0]\n for start_index in start_indexes:\n for end_index in end_indexes:\n # We could hypothetically create invalid predictions, e.g., predict\n # that the start of the span is in the question. We throw out all\n # invalid predictions.\n if start_index >= len(feature.tokens):\n continue\n if end_index >= len(feature.tokens):\n continue\n if start_index not in feature.token_to_orig_map:\n continue\n if end_index not in feature.token_to_orig_map:\n continue\n if not feature.token_is_max_context.get(start_index, False):\n continue\n if end_index < start_index:\n continue\n length = end_index - start_index + 1\n if length > max_answer_length:\n continue\n prelim_predictions.append(\n _PrelimPrediction(\n feature_index=feature_index,\n start_index=start_index,\n end_index=end_index,\n start_logit=result.start_logits[start_index],\n end_logit=result.end_logits[end_index],\n )\n )\n\n if FLAGS.version_2_with_negative:\n prelim_predictions.append(\n _PrelimPrediction(\n feature_index=min_null_feature_index,\n start_index=0,\n end_index=0,\n start_logit=null_start_logit,\n end_logit=null_end_logit,\n )\n )\n prelim_predictions = sorted(\n prelim_predictions, key=lambda x: (x.start_logit + x.end_logit), reverse=True\n )\n\n _NbestPrediction = collections.namedtuple( # pylint: disable=invalid-name\n \"NbestPrediction\", [\"text\", \"start_logit\", \"end_logit\"]\n )\n\n seen_predictions = {}\n nbest = []\n for pred in prelim_predictions:\n if len(nbest) >= n_best_size:\n break\n feature = features[pred.feature_index]\n if pred.start_index > 0: # this is a non-null prediction\n tok_tokens = feature.tokens[pred.start_index : (pred.end_index + 1)]\n orig_doc_start = feature.token_to_orig_map[pred.start_index]\n orig_doc_end = feature.token_to_orig_map[pred.end_index]\n orig_tokens = example.doc_tokens[orig_doc_start : (orig_doc_end + 1)]\n tok_text = \" \".join(tok_tokens)\n\n # De-tokenize WordPieces that have been split off.\n tok_text = tok_text.replace(\" ##\", \"\")\n tok_text = tok_text.replace(\"##\", \"\")\n\n # Clean whitespace\n tok_text = tok_text.strip()\n tok_text = \" \".join(tok_text.split())\n orig_text = \" \".join(orig_tokens)\n\n final_text = get_final_text(tok_text, orig_text, do_lower_case)\n if final_text in seen_predictions:\n continue\n\n seen_predictions[final_text] = True\n else:\n final_text = \"\"\n seen_predictions[final_text] = True\n\n nbest.append(\n _NbestPrediction(\n text=final_text, start_logit=pred.start_logit, end_logit=pred.end_logit\n )\n )\n\n # if we didn't inlude the empty option in the n-best, inlcude it\n if FLAGS.version_2_with_negative:\n if \"\" not in seen_predictions:\n nbest.append(\n _NbestPrediction(\n text=\"\", start_logit=null_start_logit, end_logit=null_end_logit\n )\n )\n # In very rare edge cases we could have no valid predictions. So we\n # just create a nonce prediction in this case to avoid failure.\n if not nbest:\n nbest.append(_NbestPrediction(text=\"empty\", start_logit=0.0, end_logit=0.0))\n\n assert len(nbest) >= 1\n\n total_scores = []\n best_non_null_entry = None\n for entry in nbest:\n total_scores.append(entry.start_logit + entry.end_logit)\n if not best_non_null_entry:\n if entry.text:\n best_non_null_entry = entry\n\n probs = _compute_softmax(total_scores)\n\n nbest_json = []\n for (i, entry) in enumerate(nbest):\n output = collections.OrderedDict()\n output[\"text\"] = entry.text\n output[\"probability\"] = probs[i]\n output[\"start_logit\"] = entry.start_logit\n output[\"end_logit\"] = entry.end_logit\n nbest_json.append(output)\n\n assert len(nbest_json) >= 1\n\n if not FLAGS.version_2_with_negative:\n all_predictions[example.qas_id] = nbest_json[0][\"text\"]\n else:\n # predict \"\" iff the null score - the score of best non-null > threshold\n score_diff = (\n score_null - best_non_null_entry.start_logit - (best_non_null_entry.end_logit)\n )\n scores_diff_json[example.qas_id] = score_diff\n if score_diff > FLAGS.null_score_diff_threshold:\n all_predictions[example.qas_id] = \"\"\n else:\n all_predictions[example.qas_id] = best_non_null_entry.text\n\n all_nbest_json[example.qas_id] = nbest_json\n\n with tf.gfile.GFile(output_prediction_file, \"w\") as writer:\n writer.write(json.dumps(all_predictions, indent=4) + \"\\n\")\n\n with tf.gfile.GFile(output_nbest_file, \"w\") as writer:\n writer.write(json.dumps(all_nbest_json, indent=4) + \"\\n\")\n\n if FLAGS.version_2_with_negative:\n with tf.gfile.GFile(output_null_log_odds_file, \"w\") as writer:\n writer.write(json.dumps(scores_diff_json, indent=4) + \"\\n\")\n\n\ndef get_final_text(pred_text, orig_text, do_lower_case):\n \"\"\"Project the tokenized prediction back to the original text.\"\"\"\n\n # When we created the data, we kept track of the alignment between original\n # (whitespace tokenized) tokens and our WordPiece tokenized tokens. So\n # now `orig_text` contains the span of our original text corresponding to the\n # span that we predicted.\n #\n # However, `orig_text` may contain extra characters that we don't want in\n # our prediction.\n #\n # For example, let's say:\n # pred_text = steve smith\n # orig_text = Steve Smith's\n #\n # We don't want to return `orig_text` because it contains the extra \"'s\".\n #\n # We don't want to return `pred_text` because it's already been normalized\n # (the SQuAD eval script also does punctuation stripping/lower casing but\n # our tokenizer does additional normalization like stripping accent\n # characters).\n #\n # What we really want to return is \"Steve Smith\".\n #\n # Therefore, we have to apply a semi-complicated alignment heruistic between\n # `pred_text` and `orig_text` to get a character-to-charcter alignment. This\n # can fail in certain cases in which case we just return `orig_text`.\n\n def _strip_spaces(text):\n ns_chars = []\n ns_to_s_map = collections.OrderedDict()\n for (i, c) in enumerate(text):\n if c == \" \":\n continue\n ns_to_s_map[len(ns_chars)] = i\n ns_chars.append(c)\n ns_text = \"\".join(ns_chars)\n return (ns_text, ns_to_s_map)\n\n # We first tokenize `orig_text`, strip whitespace from the result\n # and `pred_text`, and check if they are the same length. If they are\n # NOT the same length, the heuristic has failed. If they are the same\n # length, we assume the characters are one-to-one aligned.\n tokenizer = tokenization.BasicTokenizer(do_lower_case=do_lower_case)\n\n tok_text = \" \".join(tokenizer.tokenize(orig_text))\n\n start_position = tok_text.find(pred_text)\n if start_position == -1:\n if FLAGS.verbose_logging:\n tf.logging.info(\"Unable to find text: '%s' in '%s'\" % (pred_text, orig_text))\n return orig_text\n end_position = start_position + len(pred_text) - 1\n\n (orig_ns_text, orig_ns_to_s_map) = _strip_spaces(orig_text)\n (tok_ns_text, tok_ns_to_s_map) = _strip_spaces(tok_text)\n\n if len(orig_ns_text) != len(tok_ns_text):\n if FLAGS.verbose_logging:\n tf.logging.info(\n \"Length not equal after stripping spaces: '%s' vs '%s'\", orig_ns_text, tok_ns_text\n )\n return orig_text\n\n # We then project the characters in `pred_text` back to `orig_text` using\n # the character-to-character alignment.\n tok_s_to_ns_map = {}\n for (i, tok_index) in six.iteritems(tok_ns_to_s_map):\n tok_s_to_ns_map[tok_index] = i\n\n orig_start_position = None\n if start_position in tok_s_to_ns_map:\n ns_start_position = tok_s_to_ns_map[start_position]\n if ns_start_position in orig_ns_to_s_map:\n orig_start_position = orig_ns_to_s_map[ns_start_position]\n\n if orig_start_position is None:\n if FLAGS.verbose_logging:\n tf.logging.info(\"Couldn't map start position\")\n return orig_text\n\n orig_end_position = None\n if end_position in tok_s_to_ns_map:\n ns_end_position = tok_s_to_ns_map[end_position]\n if ns_end_position in orig_ns_to_s_map:\n orig_end_position = orig_ns_to_s_map[ns_end_position]\n\n if orig_end_position is None:\n if FLAGS.verbose_logging:\n tf.logging.info(\"Couldn't map end position\")\n return orig_text\n\n output_text = orig_text[orig_start_position : (orig_end_position + 1)]\n return output_text\n\n\ndef _get_best_indexes(logits, n_best_size):\n \"\"\"Get the n-best logits from a list.\"\"\"\n index_and_score = sorted(enumerate(logits), key=lambda x: x[1], reverse=True)\n\n best_indexes = []\n for i in range(len(index_and_score)):\n if i >= n_best_size:\n break\n best_indexes.append(index_and_score[i][0])\n return best_indexes\n\n\ndef _compute_softmax(scores):\n \"\"\"Compute softmax probability over raw logits.\"\"\"\n if not scores:\n return []\n\n max_score = None\n for score in scores:\n if max_score is None or score > max_score:\n max_score = score\n\n exp_scores = []\n total_sum = 0.0\n for score in scores:\n x = math.exp(score - max_score)\n exp_scores.append(x)\n total_sum += x\n\n probs = []\n for score in exp_scores:\n probs.append(score / total_sum)\n return probs\n\n\nclass FeatureWriter(object):\n \"\"\"Writes InputFeature to TF example file.\"\"\"\n\n def __init__(self, filename, is_training):\n self.filename = filename\n self.is_training = is_training\n self.num_features = 0\n self._writer = tf.python_io.TFRecordWriter(filename)\n\n def process_feature(self, feature):\n \"\"\"Write a InputFeature to the TFRecordWriter as a tf.train.Example.\"\"\"\n self.num_features += 1\n\n def create_int_feature(values):\n feature = tf.train.Feature(int64_list=tf.train.Int64List(value=list(values)))\n return feature\n\n features = collections.OrderedDict()\n features[\"unique_ids\"] = create_int_feature([feature.unique_id])\n features[\"input_ids\"] = create_int_feature(feature.input_ids)\n features[\"input_mask\"] = create_int_feature(feature.input_mask)\n features[\"segment_ids\"] = create_int_feature(feature.segment_ids)\n\n if self.is_training:\n features[\"start_positions\"] = create_int_feature([feature.start_position])\n features[\"end_positions\"] = create_int_feature([feature.end_position])\n impossible = 0\n if feature.is_impossible:\n impossible = 1\n features[\"is_impossible\"] = create_int_feature([impossible])\n\n tf_example = tf.train.Example(features=tf.train.Features(feature=features))\n self._writer.write(tf_example.SerializeToString())\n\n def close(self):\n self._writer.close()\n\n\ndef validate_flags_or_throw(bert_config):\n \"\"\"Validate the input FLAGS or throw an exception.\"\"\"\n tokenization.validate_case_matches_checkpoint(FLAGS.do_lower_case, FLAGS.init_checkpoint)\n\n if not FLAGS.do_train and not FLAGS.do_predict:\n raise ValueError(\"At least one of `do_train` or `do_predict` must be True.\")\n\n if FLAGS.do_train:\n if not FLAGS.train_file:\n raise ValueError(\"If `do_train` is True, then `train_file` must be specified.\")\n if FLAGS.do_predict:\n if not FLAGS.predict_file:\n raise ValueError(\"If `do_predict` is True, then `predict_file` must be specified.\")\n\n if FLAGS.max_seq_length > bert_config.n_pos:\n raise ValueError(\n \"Cannot use sequence length %d because the BERT model \"\n \"was only trained up to sequence length %d\" % (FLAGS.max_seq_length, bert_config.n_pos)\n )\n\n if FLAGS.max_seq_length <= FLAGS.max_query_length + 3:\n raise ValueError(\n \"The max_seq_length (%d) must be greater than max_query_length \"\n \"(%d) + 3\" % (FLAGS.max_seq_length, FLAGS.max_query_length)\n )\n\n\ndef main(_):\n tf.logging.set_verbosity(tf.logging.INFO)\n\n bert_config = modeling.BertConfig.from_json_file(FLAGS.bert_config_file)\n\n validate_flags_or_throw(bert_config)\n\n tf.gfile.MakeDirs(FLAGS.output_dir)\n\n tokenizer = tokenization.FullTokenizer(\n vocab_file=FLAGS.vocab_file, do_lower_case=FLAGS.do_lower_case\n )\n\n tpu_cluster_resolver = None\n if FLAGS.use_tpu and FLAGS.tpu_name:\n tpu_cluster_resolver = tf.contrib.cluster_resolver.TPUClusterResolver(\n FLAGS.tpu_name, zone=FLAGS.tpu_zone, project=FLAGS.gcp_project\n )\n\n is_per_host = tf.contrib.tpu.InputPipelineConfig.PER_HOST_V2\n run_config = tf.contrib.tpu.RunConfig(\n cluster=tpu_cluster_resolver,\n master=FLAGS.master,\n model_dir=FLAGS.output_dir,\n save_checkpoints_steps=FLAGS.save_checkpoints_steps,\n tpu_config=tf.contrib.tpu.TPUConfig(\n iterations_per_loop=FLAGS.iterations_per_loop,\n num_shards=FLAGS.num_tpu_cores,\n per_host_input_for_training=is_per_host,\n ),\n )\n\n train_examples = None\n num_train_steps = None\n num_warmup_steps = None\n if FLAGS.do_train:\n train_examples = read_squad_examples(input_file=FLAGS.train_file, is_training=True)\n num_train_steps = int(len(train_examples) / FLAGS.train_batch_size * FLAGS.num_train_epochs)\n num_warmup_steps = int(num_train_steps * FLAGS.warmup_proportion)\n\n # Pre-shuffle the input to avoid having to make a very large shuffle\n # buffer in in the `input_fn`.\n rng = random.Random(12345)\n rng.shuffle(train_examples)\n\n model_fn = model_fn_builder(\n bert_config=bert_config,\n init_checkpoint=FLAGS.init_checkpoint,\n learning_rate=FLAGS.learning_rate,\n num_train_steps=num_train_steps,\n num_warmup_steps=num_warmup_steps,\n use_tpu=FLAGS.use_tpu,\n use_one_hot_embeddings=FLAGS.use_tpu,\n )\n\n # If TPU is not available, this will fall back to normal Estimator on CPU\n # or GPU.\n estimator = tf.contrib.tpu.TPUEstimator(\n use_tpu=FLAGS.use_tpu,\n model_fn=model_fn,\n config=run_config,\n train_batch_size=FLAGS.train_batch_size,\n predict_batch_size=FLAGS.predict_batch_size,\n )\n\n if FLAGS.do_train:\n # We write to a temporary file to avoid storing very large constant tensors\n # in memory.\n train_writer = FeatureWriter(\n filename=os.path.join(FLAGS.output_dir, \"train.tf_record\"), is_training=True\n )\n convert_examples_to_features(\n examples=train_examples,\n tokenizer=tokenizer,\n max_seq_length=FLAGS.max_seq_length,\n doc_stride=FLAGS.doc_stride,\n max_query_length=FLAGS.max_query_length,\n is_training=True,\n output_fn=train_writer.process_feature,\n )\n train_writer.close()\n\n tf.logging.info(\"***** Running training *****\")\n tf.logging.info(\" Num orig examples = %d\", len(train_examples))\n tf.logging.info(\" Num split examples = %d\", train_writer.num_features)\n tf.logging.info(\" Batch size = %d\", FLAGS.train_batch_size)\n tf.logging.info(\" Num steps = %d\", num_train_steps)\n del train_examples\n\n train_input_fn = input_fn_builder(\n input_file=train_writer.filename,\n seq_length=FLAGS.max_seq_length,\n is_training=True,\n drop_remainder=True,\n )\n estimator.train(input_fn=train_input_fn, max_steps=num_train_steps)\n\n if FLAGS.do_predict:\n eval_examples = read_squad_examples(input_file=FLAGS.predict_file, is_training=False)\n\n eval_writer = FeatureWriter(\n filename=os.path.join(FLAGS.output_dir, \"eval.tf_record\"), is_training=False\n )\n eval_features = []\n\n def append_feature(feature):\n eval_features.append(feature)\n eval_writer.process_feature(feature)\n\n convert_examples_to_features(\n examples=eval_examples,\n tokenizer=tokenizer,\n max_seq_length=FLAGS.max_seq_length,\n doc_stride=FLAGS.doc_stride,\n max_query_length=FLAGS.max_query_length,\n is_training=False,\n output_fn=append_feature,\n )\n eval_writer.close()\n\n tf.logging.info(\"***** Running predictions *****\")\n tf.logging.info(\" Num orig examples = %d\", len(eval_examples))\n tf.logging.info(\" Num split examples = %d\", len(eval_features))\n tf.logging.info(\" Batch size = %d\", FLAGS.predict_batch_size)\n\n all_results = []\n\n predict_input_fn = input_fn_builder(\n input_file=eval_writer.filename,\n seq_length=FLAGS.max_seq_length,\n is_training=False,\n drop_remainder=False,\n )\n\n # If running eval on the TPU, you will need to specify the number of\n # steps.\n all_results = []\n for result in estimator.predict(predict_input_fn, yield_single_examples=True):\n if len(all_results) % 1000 == 0:\n tf.logging.info(\"Processing example: %d\" % (len(all_results)))\n unique_id = int(result[\"unique_ids\"])\n start_logits = [float(x) for x in result[\"start_logits\"].flat]\n end_logits = [float(x) for x in result[\"end_logits\"].flat]\n all_results.append(\n RawResult(unique_id=unique_id, start_logits=start_logits, end_logits=end_logits)\n )\n\n output_prediction_file = os.path.join(FLAGS.output_dir, \"predictions.json\")\n output_nbest_file = os.path.join(FLAGS.output_dir, \"nbest_predictions.json\")\n output_null_log_odds_file = os.path.join(FLAGS.output_dir, \"null_odds.json\")\n\n write_predictions(\n eval_examples,\n eval_features,\n all_results,\n FLAGS.n_best_size,\n FLAGS.max_answer_length,\n FLAGS.do_lower_case,\n output_prediction_file,\n output_nbest_file,\n output_null_log_odds_file,\n )\n\n\nif __name__ == \"__main__\":\n flags.mark_flag_as_required(\"vocab_file\")\n flags.mark_flag_as_required(\"bert_config_file\")\n flags.mark_flag_as_required(\"output_dir\")\n tf.app.run()\n"
] | [
[
"torch.abs",
"torch.nn.GELU",
"torch.mean",
"torch.max",
"torch.floor",
"torch.sign",
"torch.zeros",
"torch.sqrt",
"torch.round",
"torch.min",
"torch.sum",
"torch.zeros_like",
"torch.from_numpy",
"torch.tensor",
"torch.no_grad",
"torch.clamp",
"numpy.array",
"torch.kthvalue"
],
[
"torch.allclose",
"torch.Size",
"torch.zeros_like",
"torch.abs"
],
[
"tensorflow.train.load_variable",
"torch.from_numpy",
"tensorflow.train.list_variables"
],
[
"matplotlib.pyplot.imshow",
"torch.max",
"torch.utils.data.DataLoader",
"torch.sum",
"torch.set_grad_enabled",
"torch.no_grad",
"torch.cuda.is_available",
"torch.nn.CrossEntropyLoss",
"numpy.clip",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.figure",
"torch.optim.lr_scheduler.StepLR",
"matplotlib.pyplot.title",
"torch.nn.Linear",
"numpy.array",
"matplotlib.pyplot.ion",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ioff",
"matplotlib.pyplot.pause"
],
[
"numpy.hstack",
"numpy.array"
],
[
"torch.device",
"torch.allclose",
"torch.no_grad",
"torch.tensor"
],
[
"torch.cat",
"torch.zeros",
"torch.no_grad",
"torch.cuda.is_available",
"torch.from_numpy",
"torch.tensor",
"matplotlib.pyplot.gcf",
"matplotlib.pyplot.figure",
"torch.nn.SmoothL1Loss",
"matplotlib.pyplot.title",
"numpy.ascontiguousarray",
"torch.nn.Conv2d",
"matplotlib.get_backend",
"torch.nn.Linear",
"torch.nn.BatchNorm2d",
"matplotlib.pyplot.ion",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.ioff",
"matplotlib.pyplot.clf",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.pause"
],
[
"torch.no_grad",
"torch.utils.data.DataLoader",
"numpy.where"
],
[
"tensorflow.contrib.cluster_resolver.TPUClusterResolver",
"tensorflow.logging.warning",
"tensorflow.FixedLenFeature",
"tensorflow.nn.log_softmax",
"tensorflow.gfile.GFile",
"tensorflow.reduce_sum",
"tensorflow.train.init_from_checkpoint",
"tensorflow.gfile.MakeDirs",
"tensorflow.to_int32",
"tensorflow.contrib.tpu.TPUEstimatorSpec",
"tensorflow.contrib.tpu.TPUEstimator",
"tensorflow.data.TFRecordDataset",
"tensorflow.truncated_normal_initializer",
"tensorflow.python_io.TFRecordWriter",
"tensorflow.logging.set_verbosity",
"tensorflow.trainable_variables",
"tensorflow.parse_single_example",
"tensorflow.app.run",
"tensorflow.matmul",
"tensorflow.unstack",
"tensorflow.gfile.Open",
"tensorflow.zeros_initializer",
"tensorflow.logging.info",
"tensorflow.one_hot",
"tensorflow.contrib.tpu.TPUConfig",
"tensorflow.train.Features",
"tensorflow.nn.bias_add",
"tensorflow.train.Scaffold",
"tensorflow.transpose",
"tensorflow.flags.DEFINE_string",
"tensorflow.reshape"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"2.7",
"1.12",
"2.6",
"2.2",
"1.13",
"2.3",
"2.4",
"1.4",
"2.9",
"1.5",
"1.7",
"2.5",
"0.12",
"1.0",
"2.8",
"1.2",
"2.10"
]
}
] |
frezaeix/evaluating_bdl | [
"bd0a464981c18de8479b6be2d91867527016c8d3"
] | [
"toyClassification/MC-Dropout-MAP-01-Adam/eval.py"
] | [
"# code-checked\n# server-checked\n\nfrom model import ToyNet\n\nimport torch\nimport torch.utils.data\nimport torch.nn as nn\nfrom torch.autograd import Variable\nimport torch.optim as optim\nimport torch.nn.functional as F\n\nimport numpy as np\nimport pickle\nimport matplotlib\nmatplotlib.use(\"Agg\")\nimport matplotlib.pyplot as plt\nimport matplotlib.cm as cm\nimport cv2\n\nbatch_size = 32\n\nM = 4\n\nx_min = -6.0\nx_max = 6.0\nnum_points = 60\n\nnetwork = ToyNet(\"Farzaneh_eval_MC-Dropout-MAP-01-Adam_1_M10_0\", project_dir=\"../\").cuda()\nnetwork.load_state_dict(torch.load(\"../training_logs/model_Farzaneh_MC-Dropout-MAP-01-Adam_1_M10_0/checkpoints/model_Farzaneh_MC-Dropout-MAP-01-Adam_1_M10_epoch_300.pth\"))\n\nM_float = float(M)\nprint (M_float)\n\nnetwork.eval()\n\nfalse_prob_values = np.zeros((num_points, num_points))\nx_values = np.linspace(x_min, x_max, num_points, dtype=np.float32)\nfor x_1_i, x_1_value in enumerate(x_values):\n for x_2_i, x_2_value in enumerate(x_values):\n x = torch.from_numpy(np.array([x_1_value, x_2_value])).unsqueeze(0).cuda() # (shape: (1, 2))\n\n mean_prob_vector = np.zeros((2, ))\n for i in range(M):\n logits = network(x) # (shape: (1, num_classes)) (num_classes==2)\n prob_vector = F.softmax(logits, dim=1) # (shape: (1, num_classes))\n\n prob_vector = prob_vector.data.cpu().numpy()[0] # (shape: (2, ))\n\n mean_prob_vector += prob_vector/M_float\n\n false_prob_values[x_2_i, x_1_i] = mean_prob_vector[0]\n\nplt.figure(1)\nx_1, x_2 = np.meshgrid(x_values, x_values)\nplt.pcolormesh(x_1, x_2, false_prob_values, cmap=\"RdBu\")\nplt.xlabel(\"x_1\")\nplt.ylabel(\"x_2\")\nplt.title(\"Predictive Density\")\nplt.colorbar()\nplt.savefig(\"%s/predictive_density.png\" % network.model_dir)\nplt.close(1)\nplt.figure(1)\nplt.pcolormesh(x_1, x_2, false_prob_values, cmap=\"binary\")\nplt.xlabel(\"x_1\")\nplt.ylabel(\"x_2\")\nplt.title(\"Predictive Density\")\nplt.colorbar()\nplt.savefig(\"%s/predictive_density_gray.png\" % network.model_dir)\nplt.close(1)\n\nx_values = np.linspace(x_min, x_max, 1000, dtype=np.float32)\nx_1, x_2 = np.meshgrid(x_values, x_values)\ndist = np.sqrt(x_1**2 + x_2**2)\nfalse_prob_values_GT = np.zeros(dist.shape)\nfalse_prob_values_GT[dist < 2.4] = 1.0\nplt.figure(1)\nplt.pcolormesh(x_1, x_2, false_prob_values_GT, cmap=\"RdBu\")\nplt.xlabel(\"x_1\")\nplt.ylabel(\"x_2\")\nplt.title(\"Predictive Density - Ground Truth\")\nplt.colorbar()\nplt.savefig(\"%s/predictive_density_GT.png\" % network.model_dir)\nplt.close(1)\nplt.figure(1)\nplt.pcolormesh(x_1, x_2, false_prob_values_GT, cmap=\"binary\")\nplt.xlabel(\"x_1\")\nplt.ylabel(\"x_2\")\nplt.title(\"Predictive Density - Ground Truth\")\nplt.colorbar()\nplt.savefig(\"%s/predictive_density_gray_GT.png\" % network.model_dir)\nplt.close(1)\n\nwith open(\"../HMC/false_prob_values.pkl\", \"rb\") as file: # (needed for python3)\n false_prob_values_HMC = pickle.load(file) # (shape: (60, 60))\nx_values = np.linspace(x_min, x_max, num_points, dtype=np.float32)\nx_1, x_2 = np.meshgrid(x_values, x_values)\nx_values_GT = np.linspace(x_min, x_max, 1000, dtype=np.float32)\nx_1_GT, x_2_GT = np.meshgrid(x_values_GT, x_values_GT)\nfig, axes = plt.subplots(nrows=1, ncols=2, constrained_layout=True, sharex=True, sharey=True, figsize=(11.0, 5.0))\nim = axes.flat[0].pcolormesh(x_1, x_2, false_prob_values_HMC, cmap=\"RdBu\", vmin=0, vmax=1)\nim = axes.flat[1].pcolormesh(x_1, x_2, false_prob_values, cmap=\"RdBu\", vmin=0, vmax=1)\nfig.colorbar(im, ax=axes.flat)\nplt.savefig(\"%s/predictive_density_comparison.png\" % network.model_dir)\nplt.close()\n"
] | [
[
"torch.nn.functional.softmax",
"numpy.sqrt",
"numpy.linspace",
"matplotlib.pyplot.title",
"torch.load",
"matplotlib.use",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.colorbar",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.close",
"matplotlib.pyplot.pcolormesh",
"numpy.array",
"numpy.meshgrid",
"numpy.zeros",
"matplotlib.pyplot.figure"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
drunkpig/rlcard | [
"db8a410bbfefb7f9fd958239aae8d79a8bfb29d3",
"db8a410bbfefb7f9fd958239aae8d79a8bfb29d3"
] | [
"examples/uno_single.py",
"rlcard/envs/blackjack.py"
] | [
"''' A toy example of training single-agent algorithm on Leduc Hold'em\n The environment can be treated as normal OpenAI gym style single-agent environment\n'''\n\nimport tensorflow as tf\nimport os\nimport numpy as np\n\nimport rlcard\nfrom rlcard.agents.dqn_agent import DQNAgent\nfrom rlcard.agents.random_agent import RandomAgent\nfrom rlcard.utils.utils import set_global_seed, tournament\nfrom rlcard.utils.logger import Logger\n\n# Make environment\nenv = rlcard.make('uno', config={'single_agent_mode':True})\neval_env = rlcard.make('uno', config={'single_agent_mode':True})\n\n# Set the iterations numbers and how frequently we evaluate the performance\nevaluate_every = 1000\nevaluate_num = 10000\ntimesteps = 100000\n\n# The intial memory size\nmemory_init_size = 1000\n\n# Train the agent every X steps\ntrain_every = 1\n\n# The paths for saving the logs and learning curves\nlog_dir = './experiments/uno_single_dqn_result/'\n\n# Set a global seed\nset_global_seed(0)\n\nwith tf.Session() as sess:\n\n # Initialize a global step\n global_step = tf.Variable(0, name='global_step', trainable=False)\n\n # Set up the agents\n agent = DQNAgent(sess,\n scope='dqn',\n action_num=env.action_num,\n replay_memory_init_size=memory_init_size,\n train_every=train_every,\n state_shape=env.state_shape,\n mlp_layers=[128,128])\n # Initialize global variables\n sess.run(tf.global_variables_initializer())\n\n # Init a Logger to plot the learning curve\n logger = Logger(log_dir)\n\n state = env.reset()\n\n for timestep in range(timesteps):\n action = agent.step(state)\n next_state, reward, done = env.step(action)\n ts = (state, action, reward, next_state, done)\n agent.feed(ts)\n\n if timestep % evaluate_every == 0:\n rewards = []\n state = eval_env.reset()\n for _ in range(evaluate_num):\n action, _ = agent.eval_step(state)\n _, reward, done = env.step(action)\n if done:\n rewards.append(reward)\n logger.log_performance(env.timestep, np.mean(rewards))\n\n # Close files in the logger\n logger.close_files()\n\n # Plot the learning curve\n logger.plot('DQN')\n \n # Save model\n save_dir = 'models/uno_single_dqn'\n if not os.path.exists(save_dir):\n os.makedirs(save_dir)\n saver = tf.train.Saver()\n saver.save(sess, os.path.join(save_dir, 'model'))\n \n",
"import numpy as np\n\nfrom rlcard.envs.env import Env\nfrom rlcard.games.blackjack.game import BlackjackGame as Game\n\n\nclass BlackjackEnv(Env):\n ''' Blackjack Environment\n '''\n\n def __init__(self, config):\n ''' Initialize the Blackjack environment\n '''\n self.game = Game()\n super().__init__(config)\n self.rank2score = {\"A\":11, \"2\":2, \"3\":3, \"4\":4, \"5\":5, \"6\":6, \"7\":7, \"8\":8, \"9\":9, \"T\":10, \"J\":10, \"Q\":10, \"K\":10}\n self.actions = ['hit', 'stand']\n self.state_shape = [2]\n\n def _get_legal_actions(self):\n ''' Get all leagal actions\n\n Returns:\n encoded_action_list (list): return encoded legal action list (from str to int)\n '''\n encoded_action_list = []\n for i in range(len(self.actions)):\n encoded_action_list.append(i)\n return encoded_action_list\n\n def _extract_state(self, state):\n ''' Extract the state representation from state dictionary for agent\n\n Args:\n state (dict): Original state from the game\n\n Returns:\n observation (list): combine the player's score and dealer's observable score for observation\n '''\n cards = state['state']\n my_cards = cards[0]\n dealer_cards = cards[1]\n\n def get_scores_and_A(hand):\n score = 0\n has_a = 0\n for card in hand:\n score += self.rank2score[card[1:]]\n if card[1] == 'A':\n has_a = 1\n if score > 21 and has_a == 1:\n score -= 10\n return score, has_a\n\n my_score, _ = get_scores_and_A(my_cards)\n dealer_score, _ = get_scores_and_A(dealer_cards)\n obs = np.array([my_score, dealer_score])\n\n legal_actions = [i for i in range(len(self.actions))]\n extracted_state = {'obs': obs, 'legal_actions': legal_actions}\n if self.allow_raw_data:\n extracted_state['raw_obs'] = state\n extracted_state['raw_legal_actions'] = [a for a in self.actions]\n if self.record_action:\n extracted_state['action_record'] = self.action_recorder\n return extracted_state\n\n def get_payoffs(self):\n ''' Get the payoff of a game\n\n Returns:\n payoffs (list): list of payoffs\n '''\n if self.game.winner['player'] == 0 and self.game.winner['dealer'] == 1:\n return [-1]\n elif self.game.winner['dealer'] == 0 and self.game.winner['player'] == 1:\n return [1]\n elif self.game.winner['player'] == 1 and self.game.winner['dealer'] == 1:\n return [0]\n\n def _decode_action(self, action_id):\n ''' Decode the action for applying to the game\n\n Args:\n action id (int): action id\n\n Returns:\n action (str): action for the game\n '''\n return self.actions[action_id]\n"
] | [
[
"tensorflow.Variable",
"tensorflow.global_variables_initializer",
"numpy.mean",
"tensorflow.Session",
"tensorflow.train.Saver"
],
[
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
noashin/local_global_attention_model | [
"531e6a4cc1dc364a6a4168de1b9f972727a8aeb1"
] | [
"src/LocalChoiceModel/vel_param.py"
] | [
"import sys\n\nimport numpy as np\nfrom scipy.stats import multivariate_normal\n\nsys.path.append('./../../')\nfrom src.HMC.hmcparameter import HMCParameter\n\nclass VelParam(HMCParameter):\n def __init__(self, init_val):\n super().__init__(np.array(init_val))\n dim = np.array(init_val).shape\n self.mu = np.zeros(dim)\n self.sigma = 1\n\n def gen_init_value(self):\n self.value = multivariate_normal.rvs(self.mu, self.sigma)\n\n def get_energy_grad(self):\n return self.value\n\n def get_energy(self):\n return np.dot(self.value, self.value) / 2\n\n def get_energy_for_value(self, value):\n return np.dot(value, value) / 2\n"
] | [
[
"scipy.stats.multivariate_normal.rvs",
"numpy.dot",
"numpy.array",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ruriboshi/propnet | [
"770703fb4fc344f785f89c02f26b31ea5733d2bd",
"770703fb4fc344f785f89c02f26b31ea5733d2bd"
] | [
"propnet/models/python/electromechanical_coupling.py",
"propnet/web/layouts_correlate.py"
] | [
"import numpy as np\n\n\ndef plug_in(symbol_values):\n\n req_symbols = [\"S\", \"e\", \"d\"]\n data = {}\n if all(s in symbol_values for s in req_symbols):\n e = symbol_values[\"e\"]\n S = symbol_values[\"S\"]\n d = symbol_values[\"d\"]\n\n data[\"k\"] = np.abs(d[2][2] / np.sqrt(e[2][2] * S[2][2]))\n\n return data\n\n\nDESCRIPTION = \"\"\"\nModel calculating the electromechanical coupling factor,\nwhich is the efficiency of converting eletrical energy\nto acoustic energy in a piezoeletric transducer or filter\n\"\"\"\n\ntest_data = [{\n \"inputs\": {\n \"S\": [[0.007482236755310126, -0.002827041595205337, -0.002827041595205337, 0.0, 0.0, 0.0],\n [-0.002827041595205337, 0.007482236755310125, -0.002827041595205337, 0.0, 0.0, 0.0],\n [-0.0028270415952053366, -0.002827041595205337, 0.007482236755310125, 0.0, 0.0, 0.0],\n [0.0, 0.0, 0.0, 0.010309278350515464, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.010309278350515464, 0.0],\n [0.0, 0.0, 0.0, 0.0, 0.0, 0.010309278350515464]],\n \"e\": [[18.65, 0.00, 0.00], [-0.00, 18.65, 0.00], [-0.00, 0.00, 7.88]],\n \"d\": [[-0.0412497, -0.28686697, 0.06342802], [0.05065159, 0.26064878, -0.04828778],\n [0.08828203, 0.5660897, -0.11520665], [-0.16218673, -0.92468949, 0.2109461],\n [0.02485558, 0.03232004, -0.02421919], [0.06636329, 0.46541895, -0.09526407]]\n },\n \"outputs\": {\n \"k\": 0.47445902984\n }\n}]\n\nconfig = {\n \"name\": \"electromechanical_coupling\",\n \"connections\": [{\n \"inputs\": [\"e\", \"S\", \"d\"],\n \"outputs\": [\"k\"]\n }],\n \"categories\": [\"mechanical\", \"electrical\"],\n \"variable_symbol_map\": {\n \"S\": \"compliance_tensor_voigt\",\n \"e\": \"dielectric_tensor\",\n \"d\": \"piezoelectric_tensor_converse\",\n \"k\": \"electromechanical_coupling\"\n },\n \"description\": DESCRIPTION,\n \"implemented_by\": [\"shyamd\"],\n \"references\": [],\n \"plug_in\": plug_in,\n \"test_data\": test_data\n}\n",
"import dash_core_components as dcc\nimport dash_html_components as html\nimport dash_table as dt\n\nimport os\nfrom monty.serialization import loadfn\nfrom monty.json import MontyDecoder\n\n# noinspection PyUnresolvedReferences\nimport propnet.symbols\nfrom propnet.core.registry import Registry\n\nfrom propnet.web.layouts_plot import scalar_symbols\n\nfrom dash.dependencies import Input, Output, State\nfrom dash.exceptions import PreventUpdate\n\nimport plotly.graph_objs as go\n\nfrom pymatgen import MPRester\n\nfrom pymongo.errors import ServerSelectionTimeoutError\n\nimport logging\nimport numpy as np\nfrom json import JSONDecodeError\n\nlogger = logging.getLogger(__name__)\nmpr = MPRester()\n\n\ntry:\n store_data = os.environ[\"PROPNET_CORRELATION_STORE_FILE\"]\n if os.path.exists(store_data):\n # If store_data contains a file path\n store = loadfn(store_data)\n else:\n # Store data contains a json string\n store = MontyDecoder().decode(store_data)\n store.connect()\nexcept (ServerSelectionTimeoutError, KeyError, FileNotFoundError, JSONDecodeError) as ex:\n if isinstance(ex, ServerSelectionTimeoutError):\n logger.warning(\"Unable to connect to propnet correlation db!\")\n if isinstance(ex, KeyError):\n logger.warning(\"PROPNET_CORRELATION_STORE_FILE var not set!\")\n if isinstance(ex, FileNotFoundError):\n logger.warning(\"File specified in PROPNET_CORRELATION_STORE_FILE not found!\")\n if isinstance(ex, JSONDecodeError):\n logger.warning(\"PROPNET_CORRELATION_STORE_FILE does not contain a file or a valid monty JSON string!\")\n from maggma.stores import MemoryStore\n store = MemoryStore()\n store.connect()\n # layout won't work if database is down, but at least web app will stay up\n\ncorrelation_funcs = list(store.query().distinct(\"correlation_func\"))\n\ncorrelation_func_info = {\n \"mic\": {\"name\": \"Maximal information coefficient\",\n \"bounds\": lambda x: 0 <= round(x) <= 1},\n \"linlsq\": {\"name\": \"Linear least squares, R-squared\",\n \"bounds\": lambda x: 0 <= round(x) <= 1},\n \"theilsen\": {\"name\": \"Theil-Sen regression, R-squared\",\n \"bounds\": lambda x: -10 <= round(x) <= 1}, # Arbitrary lower bound to filter nonsense data\n \"ransac\": {\"name\": \"RANSAC regression\",\n \"bounds\": lambda x: -10 <= round(x) <= 1}, # Arbitrary lower bound to filter nonsense data\n \"pearson\": {\"name\": \"Pearson R correlation\",\n \"bounds\": lambda x: -1 <= round(x) <= 1},\n \"spearman\": {\"name\": \"Spearman R correlation\",\n \"bounds\": lambda x: -1 <= round(x) <= 1}\n }\n\n\ndef violin_plot(correlation_func=None):\n if correlation_func is None:\n return go.Figure()\n\n path_lengths = sorted(\n store.query().distinct(\"shortest_path_length\"), key=lambda x: (x is None, x)\n )\n\n props = {p: True for p in [\"property_x\", \"property_y\", \"shortest_path_length\",\n \"correlation\", \"n_points\"]}\n props['_id'] = False\n\n docs = store.query(\n criteria={\"correlation_func\": correlation_func, \"n_points\": {\"$ne\": 0}},\n properties=props\n )\n\n points = {p: [] for p in path_lengths}\n for d in docs:\n points[d[\"shortest_path_length\"]].append(d)\n data = []\n bounds_test_func = correlation_func_info[correlation_func]['bounds']\n pos_idx = 0\n for p in path_lengths:\n points_p = points[p]\n if len(points_p) == 0:\n continue\n y_data = []\n custom_data = []\n\n for pt in points_p:\n corr = pt['correlation']\n if corr is not None and np.isfinite(corr) and bounds_test_func(corr):\n y_data.append(\"{:0.5f}\".format(corr))\n custom_data.append(pt)\n\n x_data = [pos_idx] * len(y_data)\n if p is not None:\n name = f\"{p} model\"\n if p != 1:\n name += \"s\"\n else:\n name = \"Not connected\"\n trace = {\n \"type\": \"violin\",\n \"x\": x_data,\n \"y\": y_data,\n \"customdata\": custom_data,\n \"name\": name,\n \"box\": {\"visible\": True},\n \"points\": \"all\",\n \"meanline\": {\"visible\": False},\n \"hoverinfo\": \"y\",\n }\n data.append(trace)\n pos_idx += 1\n\n func_description = correlation_func_info[correlation_func][\"name\"]\n\n layout = {\n \"title\": f\"Correlation between properties based on<br>{func_description} score\",\n \"yaxis\": {\"zeroline\": False, \"showgrid\": False, \"title\": \"Correlation score\"},\n \"xaxis\": {\"showticklabels\": False}\n }\n\n return go.Figure(data=data, layout=layout)\n\n\ndef correlate_layout(app):\n\n graph = dcc.Graph(\n figure=violin_plot(),\n style={\"height\": 600},\n config={\"showLink\": False, \"displayModeBar\": False},\n id=\"correlation_violin\",\n )\n\n correlation_func_choice = dcc.Dropdown(\n id=\"correlation_func_choice\",\n options=[{\"label\": v['name'], \"value\": k} for k, v in correlation_func_info.items()],\n value=\"mic\",\n placeholder=\"Select correlation function\"\n )\n\n explain_text = dcc.Markdown(\"\"\"\n##### How to read this plot\n\nThe plot above is called a \"violin plot.\" The width of the plot shows the density of points that have\nthe value on the y-axis. Inscribed in the \"violin\" is a standard box-and-whisker plot, with lines \nindicating mean and quartile values. To the left of the violin plot are the actual data points used to\ncalculate the statistical data.\n\nEach violin plot shows correlation scores for scalar property pairs separated by different numbers \nof models on the _propnet_ knowledge graph. Property pairs that are not currently connected by any path \nare shown on the far right.\n\nSelect the dropdown above the plot to choose between different correlation metrics. For certain metrics,\nthe data have been filtered to make the plot more readable.\n\nTo isolate a single violin plot, double-click its entry in the legend. You can hide/show plots by clicking\ntheir entries on the legend. To find out more information about a point on the plot, click it and the\ninformation will be populated on the right. Click \"View the data plot\" to show the two properties plotted\nagainst each other in the \"Plot\" view.\n\n_Note: If you encounter issues with the mouseover labels on the plot, try\nisolating the plot you wish to explore by double-clicking its legend entry and/or setting the\naxes to a wider display range. The graphing package is in development._\n\"\"\")\n\n plot_display_layout = html.Div([\n correlation_func_choice,\n graph],\n className=\"seven columns\")\n\n info_layout = html.Div(className=\"five columns\", children=[\n dcc.Dropdown(id='choose-corr-x', options=[\n {'label': v.display_names[0], 'value': k} for k, v in\n scalar_symbols.items()\n ], placeholder=\"Select first property\"),\n dcc.Dropdown(id='choose-corr-y', options=[\n {'label': v.display_names[0], 'value': k} for k, v in\n scalar_symbols.items()\n ], placeholder=\"Select second property\"),\n html.Div(id=\"point_info\",\n children=[dcc.Markdown(\"\"\"\n##### Point information\nNo point selected\n\"\"\")])\n ])\n\n point_clicked = dcc.Store(id='point_clicked', storage_type='memory',\n data=False)\n\n layout = html.Div([\n html.Div([plot_display_layout, info_layout, point_clicked],\n className=\"row\"),\n html.Div([explain_text],\n className=\"row\")\n ])\n\n @app.callback(\n Output(\"correlation_violin\", \"figure\"),\n [Input(\"correlation_func_choice\", \"value\")],\n )\n def regenerate_figure_for_new_correlation_func(correlation_func):\n if not correlation_func:\n raise PreventUpdate\n return violin_plot(correlation_func)\n\n @app.callback(\n Output(\"corr-table\", \"style_data_conditional\"),\n [Input(\"correlation_violin\", \"figure\")],\n [State(\"correlation_func_choice\", \"value\"),\n State(\"point_clicked\", \"data\")]\n )\n def highlight_table_row(_, selected_func, point_info_populated):\n if not point_info_populated:\n raise PreventUpdate\n table_highlight = [{\n 'if': {'row_index': correlation_funcs.index(selected_func)},\n \"backgroundColor\": \"#3D9970\",\n 'color': 'white'\n }]\n return table_highlight\n\n @app.callback(\n [Output(\"choose-corr-x\", \"value\"),\n Output(\"choose-corr-y\", \"value\")],\n [Input(\"correlation_violin\", \"clickData\")]\n )\n def update_xy_selection(selected_points):\n if not selected_points:\n raise PreventUpdate\n target_data = selected_points['points'][0]['customdata']\n prop_x, prop_y = target_data['property_x'], target_data['property_y']\n\n return prop_x, prop_y\n\n @app.callback(\n [Output(\"point_info\", \"children\"),\n Output(\"point_clicked\", \"data\")],\n [Input(\"choose-corr-x\", \"value\"),\n Input(\"choose-corr-y\", \"value\")],\n [State(\"correlation_func_choice\", \"value\")]\n )\n def populate_point_information(prop_x, prop_y, current_func):\n if not (prop_x and prop_y):\n raise PreventUpdate\n\n prop_x_name = Registry(\"symbols\")[prop_x].display_names[0]\n prop_y_name = Registry(\"symbols\")[prop_y].display_names[0]\n\n data = list(store.query(criteria={'property_x': prop_x,\n 'property_y': prop_y}))\n\n path_length = data[0]['shortest_path_length']\n if path_length is None:\n path_text = \"not connected\"\n elif path_length == 0:\n path_text = \"properties are the same\"\n else:\n path_text = f\"separated by {path_length} model\"\n if path_length > 1:\n path_text += \"s\"\n point_text = dcc.Markdown(f\"\"\"\n##### Point information\n**x-axis property:** {prop_x_name}\n\n**y-axis property:** {prop_y_name}\n\n**distance apart on graph:** {path_text}\n\n**number of data points:** {data[0]['n_points']}\n\"\"\")\n\n # This ensures we know the ordering of the rows\n correlation_data = {\n d['correlation_func']:\n {'Correlation Function': correlation_func_info[d['correlation_func']][\"name\"],\n 'Correlation Value': f\"{d['correlation']:0.5f}\"}\n for d in data}\n correlation_data = [correlation_data[func] for func in correlation_funcs]\n\n correlation_table = dt.DataTable(id='corr-table', data=correlation_data,\n columns=[{'id': val, 'name': val}\n for val in ('Correlation Function', 'Correlation Value')],\n editable=False,\n style_data_conditional=[{\n 'if': {'row_index': correlation_funcs.index(current_func)},\n \"backgroundColor\": \"#3D9970\",\n 'color': 'white'\n }],\n style_cell={\n 'font-family': 'HelveticaNeue',\n 'text-align': 'left'\n },\n style_header={\n 'fontWeight': 'bold',\n 'font-family': 'HelveticaNeue',\n 'text-align': 'left'\n })\n link_to_plot = dcc.Link(\"View the data plot\",\n href=f'/plot?x={prop_x}&y={prop_y}')\n return [point_text, correlation_table, link_to_plot], True\n\n return layout\n"
] | [
[
"numpy.sqrt"
],
[
"numpy.isfinite"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
eduardojdiniz/Buzznauts | [
"8ac242a8d5309b4090a0f0b148ec275cac762bc0"
] | [
"analysis/baseline/s02_perform_encoding.py"
] | [
"#!/usr/bin/env python\n# coding=utf-8\n\nimport numpy as np\nimport os\nimport os.path as op\nimport argparse\nimport torch\nfrom Buzznauts.utils import load_dict, saveasnii, get_fmri, set_device\nfrom Buzznauts.analysis.baseline import get_activations, predict_fmri_fast\nfrom tqdm import tqdm\n\n\ndef main():\n description = 'Encoding model analysis for Algonauts 2021'\n parser = argparse.ArgumentParser(description=description)\n\n buzz_root = '/home/[email protected]/proj/Buzznauts'\n baseline = op.join(buzz_root, 'models/baseline')\n parser.add_argument('-rd', '--result_dir',\n help='saves predicted fMRI activity',\n default=op.join(baseline, 'results'),\n type=str)\n parser.add_argument('-ad', '--activations_dir',\n help='directory containing DNN activations',\n default=op.join(baseline, 'activations'),\n type=str)\n parser.add_argument('-model', '--model',\n help='model under which predicted fMRI will be saved',\n default='alexnet',\n type=str)\n _help = 'layer from which activations will be used to train & predict fMRI'\n parser.add_argument('-l', '--layer',\n help=_help,\n default='layer_5',\n type=str)\n parser.add_argument(\n '-sub', '--sub',\n help='subject number from which fMRI data will be used',\n default='sub04', type=str)\n parser.add_argument('-r', '--roi',\n help='brain region from which fMRI data will be used',\n default='EBA',\n type=str)\n _help = 'test or val, val returns mean correlation ' + \\\n 'by using 10% of training data for validation'\n parser.add_argument('-m', '--mode',\n help=_help,\n default='val',\n type=str)\n parser.add_argument('-fd', '--fmri_dir',\n help='directory containing fMRI activity',\n default=op.join(buzz_root, 'data/fmri'),\n type=str)\n parser.add_argument('-v', '--visualize',\n help='visualize whole brain in MNI space or not',\n default=True,\n type=bool)\n _help = 'number of voxel to fit at one time in case of memory constraints'\n parser.add_argument('-b', '--batch_size',\n help=_help,\n default=1000,\n type=int)\n args = vars(parser.parse_args())\n\n mode = args['mode']\n sub = args['sub']\n ROI = args['roi']\n model = args['model']\n layer = args['layer']\n visualize_results = args['visualize']\n batch_size = args['batch_size']\n\n device = set_device()\n\n if ROI == \"WB\":\n track = \"full_track\"\n else:\n track = \"mini_track\"\n\n activations_dir = op.join(args['activations_dir'], 'pca_100')\n fmri_dir = op.join(args['fmri_dir'], track)\n\n sub_fmri_dir = op.join(fmri_dir, sub)\n results_dir = op.join(args['result_dir'], model, layer, track, sub)\n if not op.exists(results_dir):\n os.makedirs(results_dir)\n\n print(\"ROi is : \", ROI)\n\n features_train, features_test = get_activations(activations_dir,\n layer)\n if track == \"full_track\":\n fmri_train_all, voxel_mask = get_fmri(sub_fmri_dir, ROI)\n else:\n fmri_train_all = get_fmri(sub_fmri_dir, ROI)\n num_voxels = fmri_train_all.shape[1]\n\n if mode == 'val':\n # Here as an example we use first 900 videos as training and rest of\n # the videos as validation\n features_test = features_train[900:, :]\n features_train = features_train[:900, :]\n fmri_train = fmri_train_all[:900, :]\n fmri_test = fmri_train_all[900:, :]\n pred_fmri = np.zeros_like(fmri_test)\n pred_fmri_save_path = op.join(results_dir, ROI + '_val.npy')\n else:\n fmri_train = fmri_train_all\n num_test_videos = 102\n pred_fmri = np.zeros((num_test_videos, num_voxels))\n pred_fmri_save_path = op.join(results_dir, ROI + '_test.npy')\n\n print(\"number of voxels is \", num_voxels)\n i = 0\n with tqdm(total=100) as pbar:\n while i < num_voxels - batch_size:\n j = i + batch_size\n pred_fmri[:, i:j] = predict_fmri_fast(features_train,\n features_test,\n fmri_train[:, i:j],\n device=device)\n i = j\n pbar.update((100*i) // num_voxels)\n pred_fmri[:, i:] = predict_fmri_fast(features_train,\n features_test,\n fmri_train[:, i:i + batch_size],\n device=device)\n\n if mode == 'val':\n score = vectorized_correlation(fmri_test, pred_fmri)\n print(\"Mean correlation for ROI : \", ROI, \"in \", sub, \" is :\",\n round(score.mean(), 6))\n\n # result visualization for whole brain (full_track)\n if track == \"full_track\" and visualize_results:\n brain_mask = op.join(buzz_root, 'data/fmri/example.nii')\n nii_save_path = op.join(results_dir, ROI + '_val.nii')\n\n view_args = {'brain_mask': brain_mask,\n 'nii_save_path': nii_save_path,\n 'score': score,\n 'voxel_mask': voxel_mask}\n\n view = visualize_activity_surf(sub, **view_args)\n view_save_path = op.join(results_dir, ROI + '_val.html')\n view.save_as_html(view_save_path)\n print(\"Results saved in this directory: \", results_dir)\n view.open_in_browser()\n\n np.save(pred_fmri_save_path, pred_fmri)\n\n print(\"ROI done : \", ROI)\n\n\nif __name__ == \"__main__\":\n main()\n"
] | [
[
"numpy.zeros_like",
"numpy.zeros",
"numpy.save"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Anders-Holst/Bonsai | [
"841aa4e12c8bea8945396bd232c2006260127507"
] | [
"datapreparation/analyze.py"
] | [
"#! /usr/bin/env python3\n\n\n\"\"\" -------------------------------\n\n analyse.py\n\n Copyright (C) 2018 RISE\n This code was produced by RISE\n The 2013-04-10 version\n\n bonsai/src_v02/analyze.py\n\n simple analysis of pandas dataframes data\n such as \n\n 1. find duplicated rows\n\n 2. number of unique values in a column\n\n 3. number of unique values in common \n between two columns in two different\n files\n \n 4. \n \n------------------------------------\"\"\"\n\n\nimport global_settings as gs\nimport numpy as np\nimport pandas as pd\nimport bonsai_io as bio\nimport common\nimport copy\n\ndef nr_of_unique_rows(df):\n d = df.drop_duplicates()\n return len(d)\n\ndef nr_of_unique_values_in_cols(df, cols):\n c = df.drop_duplicates(subset = cols)\n return len(c)\n\n\ndef nr_of_unique_values(df, col):\n c = df[col].dropna()\n c = c.drop_duplicates()\n return len(c)\n\n\"\"\"\ndef nr_of_unique_numeric_values(df, col):\n\n c = df[col].dropna()\n c = c.drop_duplicates()\n c = c.str.isnumeric() \n c = c[c].index.values\n\"\"\"\n\n\ndef nr_of_nonnan_values(df, col):\n\n c = df[col].dropna()\n return len(c)\n \ndef nr_of_unique_digital_values(df, col):\n\n c = df[col].dropna()\n c = c.drop_duplicates()\n c = c.str.isdigit() \n c = c[c].index.values\n # df = df.drop_duplicates(subset = col)\n # df = df[ df[col].dropna().str.isdigit() ]\n # df = df[ df[col].str.contains('\\d', regex=True) ]\n return len(c)\n\ndef duplicated_rows(df):\n df['dup'] = df.duplicated()\n df = df[df['dup'] == True]\n return df\n\ndef print_duplicated_rows(df, nr):\n dup = duplicated_rows(df)\n print('Nr of rows in total', len(df))\n print('Nr of duplicated rows', len(dup))\n nr = min( nr,len(dup) )\n if nr > 0:\n print('the first', nr,' of them')\n print(dup[0:nr])\n return dup\n\ndef unique_number_values(df, col):\n df = df.drop_duplicates(subset = col)\n df = df[ df[col].str.contains('\\d', regex=True) ]\n return df\n\n\ndef info(df, name = ''):\n print()\n if name != '':\n print()\n print('--------------------------------------------------')\n print()\n print('\\tInfo on the file\\n\\t' + name)\n print()\n print('--------------------------------------------------')\n print()\n df_unique_nr = nr_of_unique_rows(df)\n print(' shape', df.shape)\n print(' unique rows', df_unique_nr)\n\n for c in df.columns:\n print()\n print('\\tInfo on non-nan values of column', c)\n print()\n nonnan_nr = nr_of_nonnan_values(df, c)\n unique_nr = nr_of_unique_values(df, c)\n digital_nr = nr_of_unique_digital_values(df, c)\n # numeric_nr = nr_of_unique_numeric_values(df, c)\n print('non-nan values', nonnan_nr)\n print(' unique values', unique_nr)\n print('digital values', digital_nr)\n # print('numeric values', unique_nr)\n \n print()\n # return unique_number_values(df, 'ICD10')\n\n# df = df[ df[c].str.contains('\\d', regex=True) ]\n\n\n\ndef readall():\n dia = bio.read_generated_dia()\n dgr = bio.read_diagroups()\n per = bio.readperson()\n ctr = bio.readcontrol()\n inc = bio.readincare()\n nic = bio.readnicare()\n dru = bio.readdrug()\n dcl = bio.readdrugclasses()\n tre = bio.readtreatment()\n sur = bio.readsurgery()\n cau = bio.readcause()\n\n data = [\n dia, \n dgr, \n per,\n ctr, \n inc, \n nic, \n dru, \n dcl, \n tre,\n sur,\n cau\n]\n\n name = [\n 'diagnos ',\n 'diagnosgrupp ',\n 'person ',\n 'kontrollgrupp ',\n 'sluten v_rd ',\n '_ppen v_rd ',\n 'l_kemedel ',\n 'l_kemedelsgrupper',\n 'behandling ',\n 'kirurgi ',\n 'orsak ',\n ]\n\n return data, name\n\n\ndef info_on_all():\n\n data, name = readall()\n \n for i in range(0, len(name)):\n info(data[i], name[i])\n\n\ndef compare_lopnr(dfx, dfy, namex = 'data 1', namey = 'data 2'):\n\n xs = list(dfx['LopNr'].values)\n ys = list(dfy['LopNr'].values)\n\n sx = set(xs)\n sy = set(ys)\n cut = sx & sy\n ux = sx - sy\n uy = sy - sx\n\n print()\n # print('shape ' + namex + '\\t\\t', dfx.shape)\n # print('shape ' + namey + '\\t\\t', dfy.shape)\n # print('unique Lopnr ' + namex + '\\t', len(xs))\n # print('unique Lopnr ' + namey + '\\t', len(ys))\n\n print('common Lopnr\\t\\t\\t', len(cut))\n print('Lopnr in ' + namex + ' only\\t', len(ux))\n print('Lopnr in ' + namey + ' only\\t', len(uy))\n print()\n\n ux = list(ux)\n uy = list(uy)\n ux.sort\n uy.sort\n return ux, uy\n\n\ndef readlopnr():\n dia = bio.read_generated_dia()\n per = bio.readperson()\n ctr = bio.readcontrol()\n inc = bio.readincare()\n nic = bio.readnicare()\n dru = bio.readdrug()\n tre = bio.readtreatment()\n sur = bio.readsurgery()\n cau = bio.readcause()\n\n data = [dia, per, ctr, inc, nic, dru, tre, sur, cau]\n\n name = [\n 'diagnos ',\n 'person ',\n 'kontrollgrupp',\n 'sluten v_rd ',\n '_ppen v_rd ',\n 'l_kemedel ',\n 'behandling ',\n 'kirurgi ',\n 'orsak ',\n ]\n\n return data, name\n\n\ndef pairwise_lopnr_comparisions():\n\n data, name = readlopnr()\n\n for i in range(0, len(name)):\n for j in range(i+1, len(name)):\n print()\n print('--------------------------------------------------')\n print()\n print('\\tComparing ' + name[i] + ' with ' + name[j])\n print()\n print('--------------------------------------------------')\n print()\n\n compare_lopnr(data[i], data[j], name[i], name[j])\n\n\n\n\n\n\"\"\" -------------------------------\n \n 4. count amd list various types of diagnosis\n codes in care data\n \n------------------------------------\"\"\"\n\n\"\"\"\ndef is_icd10_class(x):\n if not common.isstr(x):\n return False\n if common.is_icd10(x):\n return False\n if len(x) < 3:\n return False\n if not x[0].isupper():\n return False\n return x[1].isdigit() and x[2].isdigit()\n\"\"\"\n\n\ndef code_count(xs):\n if not isinstance(xs, str):\n return 0\n return len(xs.split())\n\ndef icd10_count(xs):\n if not isinstance(xs, str):\n return 0\n count = 0\n for x in xs.split():\n if common.is_icd10(x):\n # print(x)\n count += 1\n return count\n\ndef not_icd10_count(xs):\n if not isinstance(xs, str):\n return 0\n count = 0\n for x in xs.split():\n if not common.is_icd10(x):\n # print(x)\n count += 1\n return count\n\ndef icd10_class_count(xs):\n if not isinstance(xs, str):\n return 0\n count = 0\n for x in xs.split():\n if common.is_icd10_class(x):\n # print(x)\n count += 1\n return count\n\n\"\"\"\ndef code_list(xs):\n if not isinstance(xs, str):\n return 0\n return len(xs.split())\n\"\"\"\n\ndef count_and_print(df, table = False):\n dia = 'DIAGNOS'\n dfc = copy.copy(df)\n dfc['code_count'] = df[dia].apply(code_count)\n dfc['icd10_count'] = df[dia].apply(icd10_count)\n dfc['not_icd10_count'] = df[dia].apply(not_icd10_count)\n dfc['icd10_class_count'] = df[dia].apply(icd10_class_count)\n nr_of_codes = dfc['code_count'].sum()\n nr_of_icd10 = dfc['icd10_count'].sum()\n nr_of_not_icd10 = dfc['not_icd10_count'].sum()\n nr_of_class_codes = dfc['icd10_class_count'].sum()\n\n if table:\n print('nr_of_lines\\t', len(df))\n print('nr_of_codes\\t', nr_of_codes)\n print('nr_of_icd10\\t', nr_of_icd10)\n print('nr_of_not_icd10\\t', nr_of_not_icd10)\n print('nr_of_icd10_class_codes\\t', nr_of_class_codes)\n \n else:\n \n \n print(' nr_of_lines', len(df))\n print(' nr_of_codes', nr_of_codes)\n print(' nr_of_icd10', nr_of_icd10)\n print(' nr_of_not_icd10', nr_of_not_icd10)\n print(' nr_of_icd10_class_codes', nr_of_class_codes)\n\n\n \"\"\"\n for c in df1[dia].values:\n print('\\t', c)\n \"\"\"\n\n\ndef print_dates(df, table = False):\n date = 'INDATUM'\n\n if table:\n\n print('first date\\t', df[date].min())\n print('last date\\t', df[date].max())\n\n else:\n\n print(' first date', df[date].min())\n print(' last date', df[date].max())\n \n\ndef icd10_class_list(xs):\n if not isinstance(xs, str):\n return []\n codes = []\n for x in xs.split():\n if common.is_icd10_class(x):\n codes += [x]\n #print(codes)\n return codes\n\ndef flat(xs):\n ys = []\n for x in xs:\n ys += x\n return ys\n\n \n\ndef print_class_codes(df):\n dia = 'DIAGNOS'\n dfc = copy.copy(df)\n dfc['icd10_class'] = df[dia].apply(icd10_class_list)\n dfc['is_class'] = dfc['icd10_class'].apply(lambda x: x != [])\n dfc = dfc[dfc['is_class']]\n codes = np.unique(flat(list(dfc['icd10_class'].values)))\n for c in codes:\n print('\\t', c)\n \n\ndef diagnosis_code_count(df, print_class = False, table = False):\n \n date = 'INDATUM'\n nr = 'LopNr'\n icd10_start = np.datetime64('1998-01-01')\n\n \"\"\"\n size0 = len(df)\n df = df.dropna().reset_index(drop=True)\n print('nr of empty lines:', size0- len(df))\n \"\"\"\n \n df[date] = df[date].apply(bio.str2time)\n df = df.sort_values(date).dropna().reset_index(drop=True)\n\n df1 = df[df[date] < icd10_start] \n df2 = df[df[date] >= icd10_start]\n\n print() \n print('code counts before 1998_01_01:')\n print()\n \n print_dates(df1, table = table)\n count_and_print(df1, table = table)\n\n print() \n print('code counts from 1998_01_01')\n print()\n \n print_dates(df2, table = table)\n count_and_print(df2, table = table)\n if print_class:\n print()\n print(' all icd10_class_codes:')\n print_class_codes(df2)\n\n print()\n"
] | [
[
"numpy.datetime64"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
vinayak1998/Data_Driven_Astronomy | [
"1d0dd82b2e9066759c442807c30c70bef096d719"
] | [
"Week1/brightest_pixel_position_fits.py"
] | [
"import numpy as np\nimport time\nfrom astropy.io import fits\nimport matplotlib.pyplot as plt\n\ndef load_fits(filename):\n start = time.perf_counter()\n hdulist = fits.open(filename)\n data = hdulist[0].data\n result = np.where(data == np.amax(data))\n coornidates = list(zip(result[0],result[1]))\n end = time.perf_counter() - start\n return coornidates[0]\n \nif __name__ == '__main__':\n # Run your `load_fits` function with examples:\n bright = load_fits('image1.fits')\n print(bright)\n\n # You can also confirm your result visually:\n from astropy.io import fits\n import matplotlib.pyplot as plt\n\n hdulist = fits.open('image1.fits')\n data = hdulist[0].data\n\n # Plot the 2D image data\n plt.imshow(data.T, cmap=plt.cm.viridis)\n plt.colorbar()\n plt.show()"
] | [
[
"matplotlib.pyplot.colorbar",
"matplotlib.pyplot.imshow",
"matplotlib.pyplot.show",
"numpy.amax"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
anaikawadi/svbrdf-estimation | [
"6c169b12210d2a92495c1ab1218dd3e4da0314a5"
] | [
"development/multiImage_pytorch/persistence.py"
] | [
"import gc\nimport json\nimport pathlib\nimport torch\n\nclass Checkpoint:\n def __init__(self, checkpoint=None):\n self.checkpoint = checkpoint\n\n @staticmethod\n def get_checkpoint_path(checkpoint_dir):\n return checkpoint_dir.joinpath(\"checkpoint.tar\")\n\n @staticmethod\n def load_legacy(model_dir):\n model_path = model_dir.joinpath(\"model.data\")\n state_path = model_dir.joinpath(\"state.json\")\n if not model_path.exists():\n return None\n \n checkpoint = {\n 'model_state_dict' : torch.load(model_path),\n }\n print(\"Loaded legacy model state\")\n\n if state_path.exists():\n with open(state_path, 'r') as f:\n state = json.load(f)\n checkpoint['epoch'] = state['epoch']\n print(\"Loaded legacy training state\")\n\n return checkpoint \n\n @classmethod\n def load(cls, checkpoint_dir):\n if not isinstance(checkpoint_dir, pathlib.Path):\n checkpoint_dir = pathlib.Path(checkpoint_dir)\n \n checkpoint_path = Checkpoint.get_checkpoint_path(checkpoint_dir)\n\n if not checkpoint_path.exists():\n # If there is no checkpoint file we try to perform a legacy load\n checkpoint = Checkpoint.load_legacy(checkpoint_dir)\n\n if checkpoint is None:\n print(\"No checkpoint found in directory '{}'\".format(checkpoint_dir))\n\n return cls(checkpoint)\n\n return cls(torch.load(checkpoint_path))\n\n @staticmethod\n def save(checkpoint_dir, args, model, optimizer, epoch):\n if not isinstance(checkpoint_dir, pathlib.Path):\n checkpoint_dir = pathlib.Path(checkpoint_dir)\n\n checkpoint_dir.mkdir(parents=True, exist_ok=True)\n\n checkpoint = {\n 'model_type' : args.model_type,\n 'use_coords' : True if args.use_coords else False,\n 'epoch' : epoch,\n 'model_state_dict': model.state_dict(),\n }\n\n if not args.omit_optimizer_state_save:\n checkpoint['optimizer_state_dict'] = optimizer.state_dict()\n\n torch.save(checkpoint, Checkpoint.get_checkpoint_path(checkpoint_dir))\n\n def purge(self):\n self.checkpoint = None\n gc.collect()\n\n def is_valid(self):\n return self.checkpoint is not None\n\n def restore_args(self, args):\n # Restore checkpoint relevant arguments\n\n if 'model_type' in self.checkpoint:\n args.model_type = self.checkpoint['model_type']\n print(\"Restored model type '{}'\".format(args.model_type))\n else:\n print(\"Failed to restore model type\")\n\n \n if 'use_coords' in self.checkpoint:\n args.use_coords = self.checkpoint['use_coords']\n print(\"Restored use coords flag '{}'\".format(args.use_coords))\n else:\n print(\"Failed to restore use coords flag\")\n\n return args\n\n def restore_model_state(self, model):\n if 'model_state_dict' in self.checkpoint:\n model.load_state_dict(self.checkpoint['model_state_dict'])\n print(\"Restored model state\")\n else:\n print(\"Failed to restore model state\")\n\n return model\n\n def restore_epoch(self, epoch):\n if 'epoch' in self.checkpoint:\n epoch = self.checkpoint['epoch']\n print(\"Restored epoch {}\".format(epoch))\n else:\n print(\"Failed to restore epoch\")\n \n return epoch\n\n def restore_optimizer_state(self, optimizer):\n if 'optimizer_state_dict' in self.checkpoint:\n optimizer.load_state_dict(self.checkpoint['optimizer_state_dict'])\n print(\"Restored optimizer state\")\n else:\n print(\"Failed to restore optimizer state\")\n\n return optimizer\n\n"
] | [
[
"torch.load"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
kobakobashu/posenet-python | [
"52290733504fd0a130cc2301bad5db761c14a4e9"
] | [
"models/helper.py"
] | [
"# -*- coding: utf-8 -*-\n\"\"\"Models helper\n\nThese are helper functions for models.\n\n\"\"\"\n\nimport torch.optim as optim\nimport torch.nn as nn\n\nfrom configs.supported_info import SUPPORTED_OPTIMIZER, SUPPORTED_CRITERION\n\n\ndef get_optimizer(cfg: object, network: object) -> object:\n \"\"\"Get optimizer function\n\n This is function to get optimizer.\n\n Args:\n cfg: Config of optimizer.\n network: Network of model.\n\n Returns:\n Optimizer object.\n\n Raises:\n NotImplementedError: If the optimizer you want to use is not suppoeted.\n\n \"\"\"\n \n optimizer_name = cfg.name\n\n if not optimizer_name:\n return None\n\n if optimizer_name not in SUPPORTED_OPTIMIZER:\n raise NotImplementedError('The optimizer is not supported.')\n\n if optimizer_name == \"adam\":\n return optim.Adam(network.parameters(),\n lr=cfg.lr,\n weight_decay=cfg.decay)\n\n\ndef get_criterion(cfg: object) -> object:\n \"\"\"Get criterion function\n\n This is function to get criterion.\n\n Args:\n cfg: Config of criterion.\n\n Returns:\n Criterion object.\n\n Raises:\n NotImplementedError: If the criterion you want to use is not suppoeted.\n\n \"\"\"\n \n criterion_name = cfg.name\n\n if not criterion_name:\n return None\n\n if criterion_name not in SUPPORTED_CRITERION:\n raise NotImplementedError('The loss function is not supported.')\n\n if criterion_name == \"cross_entropy\":\n return nn.CrossEntropyLoss()\n\n elif criterion_name == \"nll_loss\":\n return nn.NLLLoss()"
] | [
[
"torch.nn.NLLLoss",
"torch.nn.CrossEntropyLoss"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
zhangziyezzy/DeepLearningMugenKnock | [
"26830fe049c7da8001977ca0df12e946c0f030eb",
"26830fe049c7da8001977ca0df12e946c0f030eb"
] | [
"Scripts_Model/scripts_pytorch/VGG19_pytorch.py",
"Scripts_Model/scripts_pytorch/DenseNet169_pytorch.py"
] | [
"import torch\nimport torch.nn.functional as F\nimport numpy as np\nfrom collections import OrderedDict\nfrom easydict import EasyDict\nfrom _main_base import main\nimport os\n\n#---\n# config\n#---\ncfg = EasyDict()\n\n# class\ncfg.CLASS_LABEL = ['akahara', 'madara']\ncfg.CLASS_NUM = len(cfg.CLASS_LABEL)\n\n# model\ncfg.INPUT_HEIGHT = 64\ncfg.INPUT_WIDTH = 64\ncfg.INPUT_CHANNEL = 3\n\ncfg.GPU = False\ncfg.DEVICE = torch.device(\"cuda\" if cfg.GPU and torch.cuda.is_available() else \"cpu\")\n\ncfg.MODEL_SAVE_PATH = 'models/VGG16_{}.pt'\ncfg.MODEL_SAVE_INTERVAL = 200\ncfg.ITERATION = 1000\ncfg.MINIBATCH = 8\ncfg.OPTIMIZER = torch.optim.SGD\ncfg.LEARNING_RATE = 0.1\ncfg.MOMENTUM = 0.9\ncfg.LOSS_FUNCTION = loss_fn = torch.nn.NLLLoss()\n\ncfg.TRAIN = EasyDict()\ncfg.TRAIN.DISPAY_ITERATION_INTERVAL = 50\n\ncfg.TRAIN.DATA_PATH = '../Dataset/train/images/'\ncfg.TRAIN.DATA_HORIZONTAL_FLIP = True\ncfg.TRAIN.DATA_VERTICAL_FLIP = True\ncfg.TRAIN.DATA_ROTATION = False\n\ncfg.TEST = EasyDict()\ncfg.TEST.MODEL_PATH = cfg.MODEL_SAVE_PATH.format('final')\ncfg.TEST.DATA_PATH = '../Dataset/test/images/'\ncfg.TEST.MINIBATCH = 2\n\n# random seed\ntorch.manual_seed(0)\n\n\nclass VGG19(torch.nn.Module):\n def __init__(self):\n super(VGG19, self).__init__()\n\n self.conv1 = torch.nn.Sequential(OrderedDict({\n 'conv1_1' : torch.nn.Conv2d(cfg.INPUT_CHANNEL, 64, kernel_size=3, padding=1, stride=1),\n 'conv1_1_relu' : torch.nn.ReLU(),\n 'conv1_1_bn' : torch.nn.BatchNorm2d(64),\n 'conv1_2' : torch.nn.Conv2d(64, 64, kernel_size=3, padding=1, stride=1),\n 'conv1_2_relu' : torch.nn.ReLU(),\n 'conv1_2_bn' : torch.nn.BatchNorm2d(64),\n }))\n\n self.conv2 = torch.nn.Sequential(OrderedDict({\n 'conv2_1' : torch.nn.Conv2d(64, 128, kernel_size=3, padding=1, stride=1),\n 'conv2_1_relu' : torch.nn.ReLU(),\n 'conv2_1_bn' : torch.nn.BatchNorm2d(128),\n 'conv2_2' : torch.nn.Conv2d(128, 128, kernel_size=3, padding=1, stride=1),\n 'conv2_2_relu' : torch.nn.ReLU(),\n 'conv2_2_bn' : torch.nn.BatchNorm2d(128),\n }))\n\n self.conv3 = torch.nn.Sequential(OrderedDict({\n 'conv3_1' : torch.nn.Conv2d(128, 256, kernel_size=3, padding=1, stride=1),\n 'conv3_1_relu' : torch.nn.ReLU(),\n 'conv3_1_bn' : torch.nn.BatchNorm2d(256),\n 'conv3_2' : torch.nn.Conv2d(256, 256, kernel_size=3, padding=1, stride=1),\n 'conv3_2_relu' : torch.nn.ReLU(),\n 'conv3_2_bn' : torch.nn.BatchNorm2d(256),\n 'conv3_3' : torch.nn.Conv2d(256, 256, kernel_size=3, padding=1, stride=1),\n 'conv3_3_relu' : torch.nn.ReLU(),\n 'conv3_3_bn' : torch.nn.BatchNorm2d(256),\n 'conv3_4' : torch.nn.Conv2d(256, 256, kernel_size=3, padding=1, stride=1),\n 'conv3_4_relu' : torch.nn.ReLU(),\n 'conv3_4_bn' : torch.nn.BatchNorm2d(256),\n }))\n\n self.conv4 = torch.nn.Sequential(OrderedDict({\n 'conv4_1' : torch.nn.Conv2d(256, 512, kernel_size=3, padding=1, stride=1),\n 'conv4_1_relu' : torch.nn.ReLU(),\n 'conv4_1_bn' : torch.nn.BatchNorm2d(512),\n 'conv4_2' : torch.nn.Conv2d(512, 512, kernel_size=3, padding=1, stride=1),\n 'conv4_2_relu' : torch.nn.ReLU(),\n 'conv4_2_bn' : torch.nn.BatchNorm2d(512),\n 'conv4_3' : torch.nn.Conv2d(512, 512, kernel_size=3, padding=1, stride=1),\n 'conv4_3_relu' : torch.nn.ReLU(),\n 'conv4_3_bn' : torch.nn.BatchNorm2d(512),\n 'conv4_4' : torch.nn.Conv2d(512, 512, kernel_size=3, padding=1, stride=1),\n 'conv4_4_relu' : torch.nn.ReLU(),\n 'conv4_4_bn' : torch.nn.BatchNorm2d(512),\n }))\n\n self.conv5 = torch.nn.Sequential(OrderedDict({\n 'conv5_1' : torch.nn.Conv2d(512, 512, kernel_size=3, padding=1, stride=1),\n 'conv5_1_relu' : torch.nn.ReLU(),\n 'conv5_1_bn' : torch.nn.BatchNorm2d(512),\n 'conv5_2' : torch.nn.Conv2d(512, 512, kernel_size=3, padding=1, stride=1),\n 'conv5_2_relu' : torch.nn.ReLU(),\n 'conv5_2_bn' : torch.nn.BatchNorm2d(512),\n 'conv5_3' : torch.nn.Conv2d(512, 512, kernel_size=3, padding=1, stride=1),\n 'conv5_3_relu' : torch.nn.ReLU(),\n 'conv5_3_bn' : torch.nn.BatchNorm2d(512),\n 'conv5_3' : torch.nn.Conv2d(512, 512, kernel_size=3, padding=1, stride=1),\n 'conv5_3_relu' : torch.nn.ReLU(),\n 'conv5_3_bn' : torch.nn.BatchNorm2d(512),\n }))\n \n self.top = torch.nn.Sequential(OrderedDict({\n 'Dense1' : torch.nn.Linear(512 * (cfg.INPUT_HEIGHT // 32) * (cfg.INPUT_WIDTH // 32), 256),\n 'Dense1_relu' : torch.nn.ReLU(),\n 'Dense1_dropout' : torch.nn.Dropout(p=0.5),\n 'Dense2' : torch.nn.Linear(256, 256),\n 'Dense2_relu' : torch.nn.ReLU(),\n 'Dense2_dropout' : torch.nn.Dropout(p=0.5),\n }))\n\n self.fc_out = torch.nn.Linear(256, cfg.CLASS_NUM)\n \n\n def forward(self, x):\n # block conv1\n x = self.conv1(x)\n x = F.max_pool2d(x, 2, stride=2, padding=0)\n\n # block conv2\n x = self.conv2(x)\n x = F.max_pool2d(x, 2, stride=2, padding=0)\n\n # block conv3\n x = self.conv3(x)\n x = F.max_pool2d(x, 2, stride=2, padding=0)\n\n # block conv4\n x = self.conv4(x)\n x = F.max_pool2d(x, 2, stride=2, padding=0)\n\n # block conv5\n x = self.conv5(x)\n x = F.max_pool2d(x, 2, stride=2, padding=0)\n \n x = x.view(x.shape[0], -1)\n x = self.top(x)\n x = self.fc_out(x)\n x = F.softmax(x, dim=1)\n return x\n\n# main\nif __name__ == '__main__':\n\n model_save_dir = '/'.join(cfg.MODEL_SAVE_PATH.split('/')[:-1])\n os.makedirs(model_save_dir, exist_ok=True)\n\n main(cfg, VGG19())",
"import torch\nimport torch.nn.functional as F\nimport numpy as np\nfrom collections import OrderedDict\nfrom easydict import EasyDict\nfrom _main_base import main\nimport os\n\n#---\n# config\n#---\ncfg = EasyDict()\n\n# class\ncfg.CLASS_LABEL = ['akahara', 'madara']\ncfg.CLASS_NUM = len(cfg.CLASS_LABEL)\n\n# model\ncfg.INPUT_HEIGHT = 64\ncfg.INPUT_WIDTH = 64\ncfg.INPUT_CHANNEL = 3\n\ncfg.GPU = False\ncfg.DEVICE = torch.device(\"cuda\" if cfg.GPU and torch.cuda.is_available() else \"cpu\")\n\ncfg.MODEL_SAVE_PATH = 'models/DenseNet169_{}.pt'\ncfg.MODEL_SAVE_INTERVAL = 200\ncfg.ITERATION = 1000\ncfg.MINIBATCH = 8\ncfg.OPTIMIZER = torch.optim.SGD\ncfg.LEARNING_RATE = 0.01\ncfg.MOMENTUM = 0.9\ncfg.LOSS_FUNCTION = loss_fn = torch.nn.NLLLoss()\n\ncfg.TRAIN = EasyDict()\ncfg.TRAIN.DISPAY_ITERATION_INTERVAL = 50\n\ncfg.TRAIN.DATA_PATH = '../Dataset/train/images/'\ncfg.TRAIN.DATA_HORIZONTAL_FLIP = True\ncfg.TRAIN.DATA_VERTICAL_FLIP = True\ncfg.TRAIN.DATA_ROTATION = False\n\ncfg.TEST = EasyDict()\ncfg.TEST.MODEL_PATH = cfg.MODEL_SAVE_PATH.format('final')\ncfg.TEST.DATA_PATH = '../Dataset/test/images/'\ncfg.TEST.MINIBATCH = 2\n\n# random seed\ntorch.manual_seed(0)\n\n\nclass DenseNet169(torch.nn.Module):\n def __init__(self):\n super(DenseNet169, self).__init__()\n\n class Block(torch.nn.Module):\n def __init__(self, first_dim, k=32, L=6):\n super(Block, self).__init__()\n self.L = L\n self.blocks = torch.nn.ModuleList()\n self.blocks.append(torch.nn.Sequential(\n torch.nn.BatchNorm2d(first_dim),\n torch.nn.ReLU(),\n torch.nn.Conv2d(first_dim, k, kernel_size=1, padding=0, stride=1),\n torch.nn.BatchNorm2d(k),\n torch.nn.ReLU(),\n torch.nn.Conv2d(k, k, kernel_size=3, padding=1, stride=1),\n ))\n \n for i in range(1, L):\n self.blocks.append(torch.nn.Sequential(\n torch.nn.BatchNorm2d(k * i + first_dim),\n torch.nn.ReLU(),\n torch.nn.Conv2d(k * i + first_dim, k, kernel_size=1, padding=0, stride=1),\n torch.nn.BatchNorm2d(k),\n torch.nn.ReLU(),\n torch.nn.Conv2d(k, k, kernel_size=3, padding=1, stride=1),\n ))\n \n def forward(self, x):\n xs = [None for _ in range(self.L + 1)]\n xs[0] = x\n xs[1] = self.blocks[0](x)\n \n for i in range(1, self.L):\n x_in = xs[i]\n for j in range(i):\n x_in = torch.cat([x_in, xs[j]], dim=1)\n x = self.blocks[i](x_in)\n xs[i + 1] = x\n \n x = xs[0]\n for i in range(1, (self.L + 1)):\n x = torch.cat([x, xs[i]], dim=1)\n\n return x\n\n k = 32\n theta = 0.5\n self.bn1 = torch.nn.BatchNorm2d(cfg.INPUT_CHANNEL)\n self.conv1 = torch.nn.Conv2d(cfg.INPUT_CHANNEL, k * 2, kernel_size=7, padding=3, stride=2)\n \n # Dense block1\n block1_L = 6\n block1_dim = int(k * block1_L * theta)\n \n self.block1 = Block(first_dim = k * 2, L = block1_L)\n \n # Transition layer1\n self.transition1 = torch.nn.Sequential(\n torch.nn.BatchNorm2d(k * block1_L + k * 2),\n torch.nn.ReLU(),\n torch.nn.Conv2d(k * block1_L + k * 2, block1_dim, kernel_size=1, padding=0, stride=1),\n torch.nn.AvgPool2d(2, stride=2, padding=0)\n )\n \n # Dense block2\n block2_L = 12\n block2_dim = int(k * block2_L * theta)\n \n self.block2 = Block(first_dim = block1_dim, L = block2_L)\n\n # Transition layer2 \n self.transition2 = torch.nn.Sequential(\n torch.nn.BatchNorm2d(k * block2_L + block1_dim),\n torch.nn.ReLU(),\n torch.nn.Conv2d(k * block2_L + block1_dim, block2_dim, kernel_size=1, padding=0, stride=1),\n torch.nn.AvgPool2d(2, stride=2, padding=0)\n )\n \n # Dense block3\n block3_L = 32\n block3_dim = int(k * block3_L * theta)\n \n self.block3 = Block(first_dim = block2_dim, L = block3_L)\n \n # Transition layer3\n self.transition3 = torch.nn.Sequential(\n torch.nn.BatchNorm2d(k * block3_L + block2_dim),\n torch.nn.ReLU(),\n torch.nn.Conv2d(k * block3_L + block2_dim, block3_dim, kernel_size=1, padding=0, stride=1),\n torch.nn.AvgPool2d(2, stride=2, padding=0)\n )\n \n # Dense block4\n block4_L = 32\n self.block4 = Block(first_dim = block3_dim, L = block4_L)\n \n self.linear = torch.nn.Linear(k * block4_L + block3_dim, cfg.CLASS_NUM)\n \n \n def forward(self, x):\n # Entry flow\n x = self.bn1(x)\n x = F.relu(x)\n x = self.conv1(x)\n \n x = F.max_pool2d(x, 3, padding=1, stride=2)\n \n x = self.block1(x)\n \n x = self.transition1(x)\n \n x = self.block2(x)\n \n x = self.transition2(x)\n \n x = self.block3(x)\n \n x = self.transition3(x)\n \n x = self.block4(x)\n\n x = F.avg_pool2d(x, [cfg.INPUT_HEIGHT // 32, cfg.INPUT_WIDTH // 32], padding=0, stride=1)\n x = x.view(x.size()[0], -1)\n x = self.linear(x)\n x = F.softmax(x, dim=1)\n \n return x\n\n# main\nif __name__ == '__main__':\n\n model_save_dir = '/'.join(cfg.MODEL_SAVE_PATH.split('/')[:-1])\n os.makedirs(model_save_dir, exist_ok=True)\n\n main(cfg, DenseNet169())"
] | [
[
"torch.nn.NLLLoss",
"torch.nn.functional.softmax",
"torch.nn.Dropout",
"torch.manual_seed",
"torch.nn.Conv2d",
"torch.nn.Linear",
"torch.cuda.is_available",
"torch.nn.BatchNorm2d",
"torch.nn.ReLU",
"torch.nn.functional.max_pool2d"
],
[
"torch.nn.NLLLoss",
"torch.nn.functional.softmax",
"torch.cat",
"torch.manual_seed",
"torch.nn.functional.avg_pool2d",
"torch.nn.Conv2d",
"torch.nn.ModuleList",
"torch.nn.Linear",
"torch.nn.AvgPool2d",
"torch.nn.functional.relu",
"torch.cuda.is_available",
"torch.nn.BatchNorm2d",
"torch.nn.ReLU",
"torch.nn.functional.max_pool2d"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
DionysisChristopoulos/google-research | [
"7f59ef421beef32ca16c2a7215be74f7eba01a0f",
"eb2b142f26e39aac1dcbb768417465ae9d4e5af6",
"7cee4b22b925581d912e8d993625c180da2a5a4f",
"7cee4b22b925581d912e8d993625c180da2a5a4f",
"7cee4b22b925581d912e8d993625c180da2a5a4f",
"7cee4b22b925581d912e8d993625c180da2a5a4f",
"7f59ef421beef32ca16c2a7215be74f7eba01a0f",
"7cee4b22b925581d912e8d993625c180da2a5a4f",
"7f59ef421beef32ca16c2a7215be74f7eba01a0f",
"7cee4b22b925581d912e8d993625c180da2a5a4f",
"7cee4b22b925581d912e8d993625c180da2a5a4f",
"7cee4b22b925581d912e8d993625c180da2a5a4f",
"7f59ef421beef32ca16c2a7215be74f7eba01a0f",
"eb2b142f26e39aac1dcbb768417465ae9d4e5af6",
"eb2b142f26e39aac1dcbb768417465ae9d4e5af6",
"7cee4b22b925581d912e8d993625c180da2a5a4f",
"eb2b142f26e39aac1dcbb768417465ae9d4e5af6",
"7cee4b22b925581d912e8d993625c180da2a5a4f",
"7cee4b22b925581d912e8d993625c180da2a5a4f"
] | [
"blur/synapse_util.py",
"scann/scann/scann_ops/py/scann_ops.py",
"social_rl/multiagent_tfagents/multiagent_metrics.py",
"pse/dm_control/run_train_eval.py",
"pse/jumping_task/evaluation_helpers.py",
"dp_multiq/csmooth.py",
"meta_pseudo_labels/training_utils.py",
"es_enas/util.py",
"non_semantic_speech_benchmark/data_prep/count_duration_beam.py",
"covid_epidemiology/colab_utils.py",
"readtwice/layers/transformer.py",
"poem/core/tfe_input_layer.py",
"smu/parser/smu_utils_lib_test.py",
"aptamers_mlpd/simulation/utils.py",
"recs_ecosystem_creator_rl/environment/user.py",
"bnn_hmc/utils/cmd_args_utils.py",
"infinite_nature/fly_camera.py",
"cascaded_networks/train.py",
"felix/bert_example.py"
] | [
"# coding=utf-8\n# Copyright 2021 The Google Research Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Utilities for synapse handling.\"\"\"\n\nimport enum\nimport functools as ft\nfrom typing import Callable, List, Sequence, Text, Union, Optional\nimport dataclasses as dc\n\nimport jax.numpy as jp\nimport numpy as np\nimport tensorflow.compat.v1 as tf\n\nfrom blur import blur_env\n\nTensorShape = tf.TensorShape\nTensor = Union[tf.Tensor, np.ndarray, jp.array]\n\n\[email protected]\nclass SynapseInitializerParams:\n shape: TensorShape\n in_neurons: int\n out_neurons: int\n\n\nclass UpdateType(enum.Enum):\n FORWARD = 1\n BACKWARD = 2\n BOTH = 3\n NONE = 4\n\n\nSynapseInitializer = Callable[[SynapseInitializerParams], Tensor]\n\n# A callable that takes a sequence of layers and SynapseInitializer and creates\n# appropriately shaped list of Synapses.\nCreateSynapseFn = Callable[[Sequence[Tensor], SynapseInitializer], List[Tensor]]\n\n\ndef random_uniform_symmetric(shape, seed):\n return (tf.random.uniform(shape, seed=seed) - 0.5) * 2\n\n\ndef random_initializer(start_seed=0, scale_by_channels=False,\n scale=1, bias=0, random_fn=random_uniform_symmetric):\n \"\"\"Returns initializer that generates random sequence.\"\"\"\n seed = [hash(str(start_seed))]\n def impl(params):\n if len(params.shape) >= 3:\n # shape: species x (in+out) x (in+out) x states\n num_channels = int(params.shape[-2])\n seed[0] += 1\n v = random_fn(params.shape, seed[0])\n apply_scale = scale(params) if callable(scale) else scale\n r = v * apply_scale + bias\n if scale_by_channels:\n r = r / (num_channels ** 0.5)\n return r\n return impl\n\n\ndef _random_uniform_fn(start_seed):\n rng = np.random.RandomState(start_seed)\n return lambda shape: tf.constant(rng.uniform( # pylint: disable=g-long-lambda\n low=-1, high=1, size=shape), dtype=np.float32)\n\n\ndef fixed_random_initializer(start_seed=0,\n scale_by_channels=False,\n scale=1,\n bias=0,\n random_fn=None):\n \"\"\"Returns an initializer that generates random (but fixed) sequence.\n\n The resulting tensors are backed by a constant so they produce the same\n value across all calls.\n\n This initializer uses its own random state that is independent of default\n random sequence.\n\n Args:\n start_seed: initial seed passed to np.random.RandomStates\n scale_by_channels: whether to scale by number of channels.\n scale: target scale (default: 1)\n bias: mean of the resulting distribution.\n random_fn: random generator if none will use use _random_uniform_fn\n Returns:\n callable that accepts shape and returns tensorflow constant tensor.\n \"\"\"\n if random_fn is None:\n random_fn = _random_uniform_fn(start_seed)\n\n def impl(params):\n if len(params.shape) >= 3:\n # shape: species x (in+out) x (in+out) x states\n num_channels = int(params.shape[-2])\n v = random_fn(shape=params.shape)\n apply_scale = scale(params) if callable(scale) else scale\n r = v * apply_scale + bias\n if scale_by_channels:\n r = r / (num_channels ** 0.5)\n return r\n\n return impl\n\n\ndef create_synapse_init_fns(\n layers,\n initializer):\n \"\"\"Generates network synapse initializers.\n\n Arguments:\n layers: Sequence of network layers (used for shape calculation).\n initializer: SynapseInitializer used to initialize synapse tensors.\n\n Returns:\n A list of functions that produce synapse tensors for all layers upon\n execution.\n \"\"\"\n synapse_init_fns = []\n for pre, post in zip(layers, layers[1:]):\n # shape: population_dims, batch_size, in_channels, neuron_state\n pop_dims = pre.shape[:-3]\n # -2: is the number of channels\n num_inputs = pre.shape[-2] + post.shape[-2] + 1\n # -1: is the number of states in a single neuron.\n synapse_shape = (*pop_dims, num_inputs, num_inputs, pre.shape[-1])\n params = SynapseInitializerParams(\n shape=synapse_shape,\n in_neurons=pre.shape[-2],\n out_neurons=post.shape[-2])\n synapse_init_fns.append(ft.partial(initializer, params))\n return synapse_init_fns\n\n\ndef create_synapses(layers,\n initializer):\n \"\"\"Generates arbitrary form synapses.\n\n Arguments:\n layers: Sequence of network layers (used for shape calculation).\n initializer: SynapseInitializer used to initialize synapse tensors.\n\n Returns:\n A list of created synapse tensors for all layers.\n \"\"\"\n return [init_fn() for init_fn in create_synapse_init_fns(layers, initializer)]\n\n\ndef transpose_synapse(synapse, env):\n num_batch_dims = len(synapse.shape[:-3])\n perm = [\n *range(num_batch_dims), num_batch_dims + 1, num_batch_dims,\n num_batch_dims + 2\n ]\n return env.transpose(synapse, perm)\n\n\ndef synapse_submatrix(synapse,\n in_channels,\n update_type,\n include_bias = True):\n \"\"\"Returns a submatrix of a synapse matrix given the update type.\"\"\"\n bias = 1 if include_bias else 0\n if update_type == UpdateType.FORWARD:\n return synapse[Ellipsis, :(in_channels + bias), (in_channels + bias):, :]\n if update_type == UpdateType.BACKWARD:\n return synapse[Ellipsis, (in_channels + 1):, :(in_channels + bias), :]\n\n\ndef combine_in_out_synapses(in_out_synapse, out_in_synapse,\n env):\n \"\"\"Combines forward and backward synapses into a single matrix.\"\"\"\n batch_dims = in_out_synapse.shape[:-3]\n out_channels, in_channels, num_states = in_out_synapse.shape[-3:]\n synapse = env.concat([\n env.concat([\n env.zeros((*batch_dims, out_channels, out_channels, num_states)),\n in_out_synapse\n ], axis=-2),\n env.concat([\n out_in_synapse,\n env.zeros((*batch_dims, in_channels, in_channels, num_states))\n ], axis=-2)\n ], axis=-3)\n return synapse\n\n\ndef sync_all_synapses(synapses, layers, env):\n \"\"\"Sync synapses across all layers.\n\n For each synapse, syncs its first state forward synapse with backward synapse\n and copies it arocess all the states.\n\n Args:\n synapses: list of synapses in the network.\n layers: list of layers in the network.\n env: Environment\n Returns:\n Synchronized synapses.\n \"\"\"\n for i in range(len(synapses)):\n synapses[i] = sync_in_and_out_synapse(synapses[i], layers[i].shape[-2], env)\n return synapses\n\n\ndef sync_in_and_out_synapse(synapse, in_channels, env):\n \"\"\"Copies forward synapse to backward one.\"\"\"\n in_out_synapse = synapse_submatrix(\n synapse,\n in_channels=in_channels,\n update_type=UpdateType.FORWARD,\n include_bias=True)\n return combine_in_out_synapses(\n in_out_synapse,\n transpose_synapse(in_out_synapse, env),\n env)\n\n\ndef sync_states_synapse(synapse, env, num_states=None):\n \"\"\"Sync synapse's first state across all the other states.\"\"\"\n if num_states is None:\n num_states = synapse.shape[-1]\n return env.stack(num_states*[synapse[Ellipsis, 0]], axis=-1)\n\n\ndef normalize_synapses(synapses,\n rescale_to,\n env,\n axis = -3):\n \"\"\"Normalizes synapses across a particular axis (across input by def.).\"\"\"\n # Default value axis=-3 corresponds to normalizing across the input neuron\n # dimension.\n squared = env.sum(synapses ** 2, axis=axis, keepdims=True)\n synapses /= env.sqrt(squared + 1e-9)\n if rescale_to is not None:\n synapses *= rescale_to\n return synapses\n",
"# coding=utf-8\n# Copyright 2021 The Google Research Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Python API for ScaNN - single machine, dense vector similarity search.\"\"\"\n\nimport os\nimport uuid\nfrom scann.scann_ops.py import scann_builder\nimport tensorflow as tf\n\n_scann_ops_so = tf.load_op_library(\n os.path.join(\n os.path.dirname(os.path.dirname(os.path.abspath(__file__))),\n \"cc/_scann_ops.so\"))\nscann_create_searcher = _scann_ops_so.scann_scann_create_searcher\nscann_search = _scann_ops_so.scann_scann_search\nscann_search_batched = _scann_ops_so.scann_scann_search_batched\nscann_to_tensors = _scann_ops_so.scann_scann_to_tensors\ntensors_to_scann = _scann_ops_so.scann_tensors_to_scann\n\n\ndef searcher_from_module(module, db=None):\n del db # Unused.\n return ScannSearcher(module.recreate_handle())\n\n\nclass ScannState(tf.Module):\n \"\"\"Class that wraps ScaNN searcher assets for object-based checkpointing.\"\"\"\n\n def __init__(self, tensors):\n super(ScannState, self).__init__()\n scann_config, serialized_partitioner, datapoint_to_token, ah_codebook, hashed_dataset, int8_dataset, int8_multipliers, dp_norms, dataset = tensors\n\n def make_var(v):\n with tf.compat.v1.variable_scope(\n tf.compat.v1.VariableScope(use_resource=True, reuse=False)):\n return tf.Variable(v, validate_shape=False)\n\n self.scann_config = make_var(scann_config)\n self.serialized_partitioner = make_var(serialized_partitioner)\n self.datapoint_to_token = make_var(datapoint_to_token)\n self.ah_codebook = make_var(ah_codebook)\n self.hashed_dataset = make_var(hashed_dataset)\n self.int8_dataset = make_var(int8_dataset)\n self.int8_multipliers = make_var(int8_multipliers)\n self.dp_norms = make_var(dp_norms)\n self.dataset = make_var(dataset)\n\n @tf.function(input_signature=[])\n def recreate_handle(self):\n \"\"\"Creates resource handle to searcher from ScaNN searcher assets.\"\"\"\n return tensors_to_scann(self.dataset, self.scann_config,\n self.serialized_partitioner,\n self.datapoint_to_token, self.ah_codebook,\n self.hashed_dataset, self.int8_dataset,\n self.int8_multipliers, self.dp_norms)\n\n\nclass ScannSearcher(object):\n \"\"\"Wrapper class that holds the ScaNN searcher resource handle.\"\"\"\n\n def __init__(self, searcher_handle):\n self.searcher_handle = searcher_handle\n\n def search(self,\n q,\n final_num_neighbors=None,\n pre_reorder_num_neighbors=None,\n leaves_to_search=None):\n final_nn = -1 if final_num_neighbors is None else final_num_neighbors\n pre_nn = -1 if pre_reorder_num_neighbors is None else pre_reorder_num_neighbors\n leaves = -1 if leaves_to_search is None else leaves_to_search\n return scann_search(self.searcher_handle, q, final_nn, pre_nn, leaves)\n\n def search_batched(self,\n q,\n final_num_neighbors=None,\n pre_reorder_num_neighbors=None,\n leaves_to_search=None):\n final_nn = -1 if final_num_neighbors is None else final_num_neighbors\n pre_nn = -1 if pre_reorder_num_neighbors is None else pre_reorder_num_neighbors\n leaves = -1 if leaves_to_search is None else leaves_to_search\n return scann_search_batched(self.searcher_handle, q, final_nn, pre_nn,\n leaves, False)\n\n def search_batched_parallel(self,\n q,\n final_num_neighbors=None,\n pre_reorder_num_neighbors=None,\n leaves_to_search=None):\n final_nn = -1 if final_num_neighbors is None else final_num_neighbors\n pre_nn = -1 if pre_reorder_num_neighbors is None else pre_reorder_num_neighbors\n leaves = -1 if leaves_to_search is None else leaves_to_search\n return scann_search_batched(self.searcher_handle, q, final_nn, pre_nn,\n leaves, True)\n\n def serialize_to_module(self):\n return ScannState(scann_to_tensors(self.searcher_handle))\n\n\ndef builder(db, num_neighbors, distance_measure):\n \"\"\"Creates a ScannBuilder that returns a TensorFlow ScaNN searcher on build().\n\n Args:\n db: the dataset that ScaNN will search over; a 2d array of 32-bit floats\n with one data point per row.\n num_neighbors: the default # neighbors the searcher will return per query.\n distance_measure: one of \"squared_l2\" or \"dot_product\".\n\n Returns:\n A ScannBuilder object, which builds the ScaNN config via calls such as\n tree() and score_brute_force(). Calling build() on the ScannBuilder will\n return a TensorFlow ScaNN searcher with its specified config.\n \"\"\"\n\n def builder_lambda(db, config, training_threads, **kwargs):\n return create_searcher(db, config, training_threads, **kwargs)\n\n return scann_builder.ScannBuilder(\n db, num_neighbors, distance_measure).set_builder_lambda(builder_lambda)\n\n\ndef create_searcher(db,\n scann_config,\n training_threads=0,\n container=\"\",\n shared_name=None):\n \"\"\"Create a ScaNN searcher given a dataset and text config proto.\"\"\"\n if shared_name is None:\n shared_name = f\"scann-{uuid.uuid4()}\"\n return ScannSearcher(\n scann_create_searcher(\n x=db,\n scann_config=scann_config,\n training_threads=training_threads,\n container=container,\n shared_name=shared_name))\n",
"# coding=utf-8\n# Copyright 2021 The Google Research Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"TF metrics that work in the multi-agent case.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport collections\nfrom absl import logging\n\nimport gin\nimport numpy as np\nimport tensorflow as tf # pylint: disable=g-explicit-tensorflow-version-import\n\nfrom tf_agents.drivers import tf_driver\nfrom tf_agents.metrics import py_metrics\nfrom tf_agents.metrics import tf_metric\nfrom tf_agents.metrics.tf_metrics import TFDeque\nfrom tf_agents.utils import common\nfrom tf_agents.utils import numpy_storage\n\nfrom social_rl.multiagent_tfagents.joint_attention import drivers\n\n\ndef zero_out_new_episodes(trajectory, return_accumulator):\n return tf.where(trajectory.is_first(), tf.zeros_like(return_accumulator),\n return_accumulator)\n\n\[email protected]\nclass AverageReturnMetric(tf_metric.TFStepMetric):\n \"\"\"Metric for the average collective return and individual agent returns.\"\"\"\n\n def __init__(self,\n n_agents,\n name='MultiagentAverageReturn',\n prefix='Metrics',\n dtype=tf.float32,\n batch_size=1,\n buffer_size=10):\n super(AverageReturnMetric, self).__init__(name=name, prefix=prefix)\n self.n_agents = n_agents\n self._dtype = dtype\n\n # Accumulator and buffer for the average return of all agents\n self._collective_return_accumulator = common.create_variable(\n initial_value=0, dtype=dtype, shape=(batch_size,), name='Accumulator')\n self._collective_buffer = TFDeque(buffer_size, dtype)\n\n # Accumulators for each agent's independent reward\n self._agent_return_accumulators = []\n for a in range(n_agents):\n self._agent_return_accumulators.append(common.create_variable(\n initial_value=0, dtype=dtype, shape=(batch_size,),\n name='Accumulator' + str(a)))\n\n # Buffers for each agent's independent reward\n self._agent_buffers = []\n for a in range(n_agents):\n self._agent_buffers.append(TFDeque(buffer_size, dtype))\n\n @common.function(autograph=True)\n def call(self, trajectory):\n # Zero out batch indices where a new episode is starting.\n self._collective_return_accumulator.assign(\n zero_out_new_episodes(trajectory, self._collective_return_accumulator))\n for a in range(self.n_agents):\n self._agent_return_accumulators[a].assign(\n zero_out_new_episodes(trajectory, self._agent_return_accumulators[a]))\n\n # Note that trajectory.reward has shape (batch, n_agents)\n\n # Update accumulator with sum of received rewards.\n self._collective_return_accumulator.assign_add(\n tf.reduce_mean(trajectory.reward, axis=1))\n\n # Pull out data for each agent and assign\n for a in range(self.n_agents):\n self._agent_return_accumulators[a].assign_add(trajectory.reward[:, a])\n\n # Add final returns to buffer.\n last_episode_indices = tf.squeeze(tf.where(trajectory.is_last()), axis=-1)\n for indx in last_episode_indices:\n self._collective_buffer.add(self._collective_return_accumulator[indx])\n\n # Agent buffers that use the global done\n for a in range(self.n_agents):\n self._agent_buffers[a].add(self._agent_return_accumulators[a][indx])\n\n return trajectory\n\n def result(self):\n return self._collective_buffer.mean()\n\n def result_for_agent(self, agent_id):\n return self._agent_buffers[agent_id].mean()\n\n @common.function\n def reset(self):\n self._collective_buffer.clear()\n self._collective_return_accumulator.assign(\n tf.zeros_like(self._collective_return_accumulator))\n\n for a in range(self.n_agents):\n self._agent_buffers[a].clear()\n self._agent_return_accumulators[a].assign(\n tf.zeros_like(self._agent_return_accumulators[a]))\n\n def tf_summaries(self, train_step=None, step_metrics=()):\n \"\"\"Generates summaries for all agents & collective summary against steps.\n\n Args:\n train_step: (Optional) Step counter for training iterations. If None, no\n metric is generated against the global step.\n step_metrics: (Optional) Iterable of step metrics to generate summaries\n against.\n\n Returns:\n A list of summaries.\n \"\"\"\n summaries = super(AverageReturnMetric, self).tf_summaries(\n train_step=train_step, step_metrics=step_metrics)\n\n for a in range(self.n_agents):\n summaries.extend(self.single_agent_summary(\n a, train_step, step_metrics))\n\n return summaries\n\n def single_agent_summary(self, agent_id, train_step=None, step_metrics=()):\n summaries = []\n prefix = self._prefix\n name = self.name + '_agent' + str(agent_id)\n tag = common.join_scope(prefix, name)\n\n result = self.result_for_agent(agent_id)\n\n if train_step is not None:\n summaries.append(\n tf.compat.v2.summary.scalar(name=tag, data=result, step=train_step))\n if prefix:\n prefix += '_'\n for step_metric in step_metrics:\n # Skip plotting the metrics against itself.\n if self.name == step_metric.name:\n continue\n step_tag = '{}vs_{}/{}'.format(prefix, step_metric.name, name)\n # Summaries expect the step value to be an int64.\n step = tf.cast(step_metric.result(), tf.int64)\n summaries.append(tf.compat.v2.summary.scalar(\n name=step_tag,\n data=result,\n step=step))\n return summaries\n\n\[email protected]\nclass MultiagentScalar(tf_metric.TFStepMetric):\n \"\"\"Metric to compute average of simple scalars like number of obstacles.\"\"\"\n\n def __init__(self,\n n_agents,\n name,\n prefix='Metrics',\n dtype=tf.float32,\n buffer_size=10):\n super(MultiagentScalar, self).__init__(name=name, prefix=prefix)\n self._buffers = [TFDeque(buffer_size, dtype) for _ in range(n_agents)]\n self._n_agents = n_agents\n self._dtype = dtype\n\n @common.function(autograph=True)\n def call(self, new_scalar_vals, agent_id):\n self._buffers[agent_id].add(tf.reduce_mean(new_scalar_vals))\n return new_scalar_vals\n\n def result(self):\n return tf.reduce_mean([buffer.mean() for buffer in self._buffers])\n\n def result_for_agent(self, agent_id):\n return self._buffers[agent_id].mean()\n\n @common.function\n def reset(self):\n for buffer in self._buffers:\n buffer.clear()\n\n\ndef log_metrics(metrics, prefix=''):\n log = []\n for m in metrics:\n log.append('{0} = {1}'.format(m.name, m.result()))\n if 'Multiagent' in m.name:\n log += ['{0} = {1}'.format(\n m.name + '_agent' + str(a),\n m.result_for_agent(a)) for a in range(m.n_agents)]\n logging.info('%s \\n\\t\\t %s', prefix, '\\n\\t\\t '.join(log))\n\n\[email protected]\ndef eager_compute(metrics,\n environment,\n policy,\n num_episodes=1,\n train_step=None,\n summary_writer=None,\n summary_prefix='',\n use_function=True,\n use_attention_networks=False):\n \"\"\"Compute metrics using `policy` on the `environment`.\n\n *NOTE*: Because placeholders are not compatible with Eager mode we can not use\n python policies. Because we use tf_policies we need the environment time_steps\n to be tensors making it easier to use a tf_env for evaluations. Otherwise this\n method mirrors `compute` directly.\n\n Args:\n metrics: List of metrics to compute.\n environment: tf_environment instance.\n policy: tf_policy instance used to step the environment.\n num_episodes: Number of episodes to compute the metrics over.\n train_step: An optional step to write summaries against.\n summary_writer: An optional writer for generating metric summaries.\n summary_prefix: An optional prefix scope for metric summaries.\n use_function: Option to enable use of `tf.function` when collecting the\n metrics.\n use_attention_networks: Option to use attention network architecture in the\n agent. This architecture requires observations from the previous time step.\n Returns:\n A dictionary of results {metric_name: metric_value}\n \"\"\"\n for metric in metrics:\n metric.reset()\n\n multiagent_metrics = [m for m in metrics if 'Multiagent' in m.name]\n\n if use_attention_networks:\n driver = drivers.StateTFDriver(\n environment,\n policy,\n observers=metrics,\n max_episodes=num_episodes,\n disable_tf_function=not use_function,\n )\n else:\n driver = tf_driver.TFDriver(\n environment,\n policy,\n observers=metrics,\n max_episodes=num_episodes,\n disable_tf_function=not use_function)\n\n def run_driver():\n time_step = environment.reset()\n policy_state = policy.get_initial_state(environment.batch_size)\n if use_attention_networks:\n time_step.observation['policy_state'] = (\n policy_state['actor_network_state'][0],\n policy_state['actor_network_state'][1])\n driver.run(time_step, policy_state)\n\n if use_function:\n common.function(run_driver)()\n else:\n run_driver()\n\n results = [(metric.name, metric.result()) for metric in metrics]\n for m in multiagent_metrics:\n for a in range(m.n_agents):\n results.append((m.name + '_agent' + str(a), m.result_for_agent(a)))\n\n # TODO(b/120301678) remove the summaries and merge with compute\n if train_step and summary_writer:\n with summary_writer.as_default():\n for m in metrics:\n tag = common.join_scope(summary_prefix, m.name)\n tf.compat.v2.summary.scalar(name=tag, data=m.result(), step=train_step)\n if 'Multiagent' in m.name:\n for a in range(m.n_agents):\n tf.compat.v2.summary.scalar(name=tag + '_agent' + str(a),\n data=m.result_for_agent(a),\n step=train_step)\n # TODO(b/130249101): Add an option to log metrics.\n return collections.OrderedDict(results)\n\n\nclass MultiagentMetricsGroup(tf.Module):\n \"\"\"Group a list of Metrics into a container.\"\"\"\n\n def __init__(self, metrics, name=None):\n super(MultiagentMetricsGroup, self).__init__(name=name)\n self.metrics = metrics\n self.multiagent_metrics = [m for m in metrics if 'Multiagent' in m.name]\n\n def results(self):\n results = [(metric.name, metric.result()) for metric in self.metrics]\n\n for m in self.multiagent_metrics:\n for a in range(m.n_agents):\n results.append((m.name + '_agent' + str(a), m.result_for_agent(a)))\n return collections.OrderedDict(results)\n\n\[email protected]\nclass AverageReturnPyMetric(py_metrics.StreamingMetric):\n \"\"\"Computes the average undiscounted reward.\"\"\"\n\n def __init__(self,\n n_agents,\n name='MultiagentAverageReturn',\n buffer_size=10,\n batch_size=None):\n \"\"\"Creates an AverageReturnPyMetric.\"\"\"\n self.n_agents = n_agents\n self._np_state = numpy_storage.NumpyState()\n # Set a dummy value on self._np_state.episode_return so it gets included in\n # the first checkpoint (before metric is first called).\n self._np_state.episode_return = np.float64(0)\n self._agent_metrics = [\n py_metrics.AverageReturnMetric(\n 'AverageReturn%i' % i, buffer_size=buffer_size)\n for i in range(n_agents)\n ]\n super(AverageReturnPyMetric, self).__init__(name, buffer_size=buffer_size,\n batch_size=batch_size)\n\n def result_for_agent(self, agent_id):\n return self._agent_metrics[agent_id].result()\n\n # We want to reuse methods for the sub-metrics\n # pylint: disable=protected-access\n def _reset(self, batch_size):\n \"\"\"Resets stat gathering variables.\"\"\"\n self._np_state.episode_return = np.zeros(\n shape=(batch_size,), dtype=np.float64)\n for metric in self._agent_metrics:\n metric._reset(batch_size)\n\n def _batched_call(self, trajectory):\n \"\"\"Processes the trajectory to update the metric.\n\n Args:\n trajectory: a tf_agents.trajectory.Trajectory.\n \"\"\"\n episode_return = self._np_state.episode_return\n agent_episode_returns = [\n metric._np_state.episode_return for metric in self._agent_metrics\n ]\n\n is_first = np.where(trajectory.is_first())\n episode_return[is_first] = 0\n for r in agent_episode_returns:\n r[is_first] = 0\n\n for i in range(self.n_agents):\n agent_episode_returns[i] += trajectory.reward[:, i]\n episode_return += np.mean(trajectory.reward, axis=-1)\n\n is_last = np.where(trajectory.is_last())\n self.add_to_buffer(episode_return[is_last])\n for metric in self._agent_metrics:\n metric.add_to_buffer(agent_episode_returns[i][is_last])\n",
"# coding=utf-8\n# Copyright 2021 The Google Research Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Lint as: python3\nr\"\"\"Wraps drq_sac_agent and expands the root_dir for nightly baselines.\n\n\"\"\"\nimport os\n\nfrom absl import app\nfrom absl import flags\nfrom absl import logging\nimport gin\nimport numpy as np\nimport tensorflow.compat.v2 as tf\n\nfrom pse.dm_control import train_eval_flags # pylint:disable=unused-import\nfrom pse.dm_control.agents import pse_drq_train_eval\n\nFLAGS = flags.FLAGS\nflags.DEFINE_bool('debugging', False,\n 'If True, we set additional logging and run in eager mode.')\n\n\ndef set_random_seed(seed):\n \"\"\"Set random seed for reproducibility.\"\"\"\n os.environ['PYTHONHASHSEED'] = str(seed)\n tf.random.set_seed(seed)\n np.random.seed(seed)\n\n\[email protected](module='evaluator')\ndef evaluate(max_train_step=int(1e+8)): # pylint: disable=unused-argument\n pass\n\n\ndef main(argv):\n del argv\n logging.set_verbosity(logging.INFO)\n if FLAGS.seed is not None:\n set_random_seed(FLAGS.seed)\n logging.info('Random seed %d', FLAGS.seed)\n trial_suffix = f'{FLAGS.trial_id}/seed_{FLAGS.seed}'\n else:\n trial_suffix = str(FLAGS.trial_id)\n\n expanded_root_dir = os.path.join(\n FLAGS.root_dir, FLAGS.env_name, trial_suffix)\n if FLAGS.load_pretrained and (FLAGS.pretrained_model_dir is not None):\n pretrained_model_dir = os.path.join(\n FLAGS.pretrained_model_dir, FLAGS.env_name, trial_suffix)\n else:\n pretrained_model_dir = None\n if FLAGS.debugging:\n tf.debugging.set_log_device_placement(True)\n tf.config.experimental_run_functions_eagerly(True)\n\n gin.parse_config_files_and_bindings(FLAGS.gin_files, FLAGS.gin_bindings)\n\n pse_drq_train_eval.train_eval(\n expanded_root_dir,\n FLAGS.env_name,\n num_train_steps=FLAGS.num_train_steps,\n policy_save_interval=FLAGS.policy_save_interval,\n checkpoint_interval=FLAGS.checkpoint_interval,\n load_pretrained=FLAGS.load_pretrained,\n pretrained_model_dir=pretrained_model_dir,\n contrastive_loss_weight=FLAGS.contrastive_loss_weight,\n contrastive_loss_temperature=FLAGS.contrastive_loss_temperature,\n image_encoder_representation=FLAGS.image_encoder_representation,\n reverb_port=FLAGS.reverb_port,\n eval_interval=FLAGS.eval_interval)\n\n\nif __name__ == '__main__':\n flags.mark_flag_as_required('root_dir')\n app.run(main)\n",
"# coding=utf-8\n# Copyright 2021 The Google Research Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Lint as: python3\n\"\"\"Helpers for evaluating an agent on Jumpy World.\"\"\"\n\nimport io\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport seaborn as sns\nimport tensorflow.compat.v2 as tf\n\nsns.set_style('white')\n\n\ndef create_evaluation_grid(nn_model,\n imitation_data,\n mc_samples=1,\n color_name='WHITE'):\n \"\"\"Evaluates an agent on all environments in imitation_data.\"\"\"\n obstacle_positions = sorted(imitation_data.keys())\n floor_heights = sorted(imitation_data[obstacle_positions[0]].keys())\n evaluation_grid = np.zeros((len(obstacle_positions), len(floor_heights)))\n for i, pos in enumerate(obstacle_positions):\n for j, height in enumerate(floor_heights):\n input_observations, optimal_actions, _ = imitation_data[pos][height]\n predictions = tf.nn.softmax(\n nn_model(input_observations, training=False), axis=-1)\n # MC Averaging if using RandConv\n for _ in range(mc_samples - 1):\n predictions += tf.nn.softmax(\n nn_model(input_observations, training=False), axis=-1)\n predictions /= mc_samples\n greedy_actions = np.array(\n [1 if pi[1] > pi[0] else 0 for pi in predictions])\n action_diff = greedy_actions - np.array(optimal_actions)\n if color_name == 'GREEN':\n # The collision happens when the agent touches the block\n argmax_val = pos - 5\n elif color_name in ['WHITE', 'RED']:\n argmax_val = np.argmax(optimal_actions)\n else:\n raise ValueError(f'{color_name} is not a valid obstacle color.')\n binary_mask = np.arange(len(optimal_actions)) <= argmax_val\n is_optimal = sum(binary_mask * np.abs(action_diff)) == 0\n evaluation_grid[i][j] = is_optimal\n return evaluation_grid\n\n\ndef neigbhour_indices(x, y, max_x, max_y):\n valid_indices = []\n for index in [(x - 1, y), (x+1, y), (x, y-1), (x, y+1)]:\n is_x_valid = (0 <= index[0]) and (index[0] < max_x)\n is_y_valid = (0 <= index[1]) and (index[1] < max_y)\n if is_x_valid and is_y_valid:\n valid_indices.append(index)\n return valid_indices\n\n\ndef generate_validation_positions(training_positions, min_obs_position,\n min_floor_height, num_positions, num_heights):\n \"\"\"Generate validation positions.\"\"\"\n val_pos = []\n for (obstacle_pos, floor_height) in training_positions:\n pos_index = obstacle_pos - min_obs_position\n height_index = floor_height - min_floor_height\n validation_indices = neigbhour_indices(\n pos_index, height_index, num_positions, num_heights)\n for val_pos_index, val_height_index in validation_indices:\n val_pos.append((val_pos_index + min_obs_position,\n val_height_index + min_floor_height))\n return list(set(val_pos))\n\n\ndef num_solved_tasks(evaluation_grid, training_positions, validation_positions,\n min_obs_position, min_floor_height):\n \"\"\"Calculates number of tasks solved in training, validation and test sets.\"\"\"\n solved_envs = {'train': 0, 'test': 0}\n if validation_positions:\n solved_envs['validation'] = 0\n\n num_positions, num_heights = evaluation_grid.shape\n is_train_or_validation = np.zeros_like(evaluation_grid, dtype=np.int32)\n\n for (obstacle_pos, floor_height) in training_positions:\n pos_index = obstacle_pos - min_obs_position\n height_index = floor_height - min_floor_height\n is_train_or_validation[pos_index][height_index] = 1\n\n for (obstacle_pos, floor_height) in validation_positions:\n pos_index = obstacle_pos - min_obs_position\n height_index = floor_height - min_floor_height\n is_train_or_validation[pos_index][height_index] = 2\n\n for pos_index in range(num_positions):\n for height_index in range(num_heights):\n if is_train_or_validation[pos_index][height_index] == 1:\n solved_envs['train'] += evaluation_grid[pos_index][height_index]\n elif is_train_or_validation[pos_index][height_index] == 2:\n solved_envs['validation'] += evaluation_grid[pos_index][height_index]\n else:\n solved_envs['test'] += evaluation_grid[pos_index][height_index]\n return solved_envs\n\n\ndef plot_evaluation_grid(grid, training_positions, min_obs_position,\n min_floor_height):\n \"\"\"Plots the evaluation grid.\"\"\"\n fig, ax = plt.subplots(figsize=(7, 9))\n grid_x, grid_y = grid.shape\n extent = (0, grid_x, grid_y, 0)\n ax.imshow(grid.T, extent=extent, origin='lower')\n\n x_ticks = np.arange(grid_x)\n y_ticks = np.arange(grid_y)\n ax.set_xticks(x_ticks)\n ax.set_yticks(y_ticks)\n\n ax.tick_params(labelbottom=False, labelleft=False)\n\n # Loop over data dimensions and create text annotations.\n for (obstacle_pos, floor_height) in training_positions:\n pos_index = obstacle_pos - min_obs_position\n height_index = floor_height - min_floor_height\n ax.text(\n pos_index + 0.5,\n height_index + 0.5,\n 'T',\n ha='center',\n va='center',\n color='r')\n\n ax.grid(color='w', linewidth=1)\n fig.tight_layout()\n return fig\n\n\ndef plot_to_image(figure):\n \"\"\"Converts the plot specified by 'figure' to a PNG image and returns it.\"\"\"\n # Save the plot to a PNG in memory.\n buf = io.BytesIO()\n figure.savefig(buf, format='png')\n # Closing the figure prevents it from being displayed directly inside\n # the notebook.\n plt.close(figure)\n buf.seek(0)\n # Convert PNG buffer to TF image\n image = tf.image.decode_png(buf.getvalue(), channels=4)\n # Add the batch dimension\n image = tf.expand_dims(image, 0)\n return image\n\n\ndef np_array_figure(arr):\n fig, ax = plt.subplots(figsize=(6, 6))\n im = ax.imshow(arr.T, origin='lower', cmap='hot', interpolation='nearest')\n fig.colorbar(im, ax=ax)\n return plot_to_image(fig)\n\n\ndef sinkhorn_logspace(logits_rows, logits_cols, costs, n_steps,\n entropy_strength):\n \"\"\"Sinkhorn algorithm for (unbalanced) entropy-regularized optimal transport.\n\n The updates are computed in log-space and are thus more stable.\n\n Args:\n logits_rows: (..., n) tensor with the logits of the row-sum constraint\n logits_cols: (..., m) tensor with the logits of the column-sum constraint\n costs: (..., n, m) tensor holding the transportation costs\n n_steps: How many Sinkhorn iterations to perform.\n entropy_strength: The strength of the entropic regularizer\n\n Returns:\n (..., n, m) tensor with the computation optimal transportation matrices\n \"\"\"\n assert n_steps > 0\n assert entropy_strength > 0\n\n logits_rows = tf.expand_dims(logits_rows, axis=-1)\n logits_cols = tf.expand_dims(logits_cols, axis=-2)\n log_kernel = -costs / entropy_strength + logits_rows + logits_cols\n\n log_lbd_cols = tf.zeros_like(logits_cols)\n for _ in range(n_steps):\n log_lbd_rows = logits_rows - tf.reduce_logsumexp(\n log_kernel + log_lbd_cols, axis=-1, keepdims=True)\n log_lbd_cols = logits_cols - tf.reduce_logsumexp(\n log_kernel + log_lbd_rows, axis=-2, keepdims=True)\n return tf.exp(log_lbd_cols + log_kernel + log_lbd_rows)\n\n\[email protected]\ndef induced_coupling(similarity_matrix, n_steps=3, entropy_strength=0.0001):\n \"\"\"Calculates the coupling induced by the similarity matrix.\"\"\"\n dist_v = tf.ones(similarity_matrix.shape[0])\n dist_v /= tf.reduce_sum(dist_v)\n dist_v = tf.math.log(dist_v)\n coupling = tf.stop_gradient(sinkhorn_logspace(\n dist_v,\n dist_v,\n 1 - similarity_matrix,\n n_steps=n_steps,\n entropy_strength=entropy_strength))\n return coupling\n",
"# coding=utf-8\n# Copyright 2021 The Google Research Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"CDP smooth sensitivity method for computing differentially private quantiles.\n\nThe smooth sensitivity method is described in\n\"Smooth Sensitivity and Sampling in Private Data Analysis\" by Nissim,\nRaskhodnikova, and Smith\n(https://cs-people.bu.edu/ads22/pubs/NRS07/NRS07-full-draft-v1.pdf). Details for\nthe CDP noise distribution appear in Section 3.1 of \"Average-Case Averages:\nPrivate Algorithms for Smooth Sensitivity and Mean Estimation\" by Bun and\nSteinke (NeurIPS 2019). Details for optimizing t, s, and sigma appear in\nSection 3.1.1 of the same paper.\n\"\"\"\n\nimport numpy as np\n\nfrom dp_multiq import base\nfrom dp_multiq import smooth_utils\n\n\ndef compute_triples(eps, ts):\n \"\"\"Returns triples of form (t, log(s), sigma) for hyperparameter optimization.\n\n Args:\n eps: Privacy parameter epsilon.\n ts: Array of possible smooth sensitivity parameters.\n \"\"\"\n triples = np.empty([len(ts), 3])\n for t_idx in range(len(ts)):\n t = ts[t_idx]\n triples[t_idx, 0] = t\n sigma = opt_sigma(eps, t)\n triples[t_idx, 2] = sigma\n triples[t_idx, 1] = -1.5 * (sigma**2) + np.log(eps - (t / sigma))\n return triples\n\n\ndef opt_sigma(eps, t):\n \"\"\"Returns optimal sigma as detailed in Section 3.1.1 of Bun and Steinke.\n\n Args:\n eps: Privacy parameter epsilon.\n t: Smooth sensitivity parameter.\n \"\"\"\n return np.real(np.roots([5 * eps / t, -5, 0, -1])[0])\n\n\ndef lln(sigma):\n \"\"\"Returns a sample from the Laplace Log-Normal distribution.\n\n Args:\n sigma: Sigma parameter for the Laplace Log-Normal distribution.\n \"\"\"\n return np.random.laplace() * np.exp(sigma * np.random.normal())\n\n\ndef csmooth(sorted_data, data_low, data_high, qs, divided_eps, ts):\n \"\"\"Returns eps^2/2-CDP quantile estimates for qs.\n\n Args:\n sorted_data: Array of data points sorted in increasing order.\n data_low: Lower limit for any differentially private quantile output value.\n data_high: Upper limit for any differentially private quantile output value.\n qs: Increasing array of quantiles in [0,1].\n divided_eps: Privacy parameter epsilon. Assumes eps has already been divided\n so that the overall desired privacy guarantee is achieved.\n ts: Array of smooth sensitivity parameters, one for each q in qs.\n \"\"\"\n sorted_data = np.clip(sorted_data, data_low, data_high)\n o = np.empty(len(qs))\n triples = compute_triples(divided_eps, ts)\n for i in range(len(qs)):\n t, log_s, sigma = triples[i]\n true_quantile_idx = base.quantile_index(len(sorted_data), qs[i])\n true_quantile_value = sorted_data[true_quantile_idx]\n laplace_log_normal_noise = lln(sigma)\n log_sensitivity = smooth_utils.compute_log_smooth_sensitivity(\n sorted_data, data_low, data_high, true_quantile_idx, t)\n noise = np.sign(laplace_log_normal_noise) * np.exp(\n log_sensitivity + np.log(np.abs(laplace_log_normal_noise)) - log_s)\n o[i] = true_quantile_value + noise\n o = np.clip(o, data_low, data_high)\n return np.sort(o)\n\n\ndef log_choose_triple_idx(triples, eps, log_sensitivities):\n \"\"\"Returns triple (t, log_s, sigma) that minimizes noisy statistic variance.\n\n Args:\n triples: Array with entries of form (t, log_s, sigma).\n eps: Privacy parameter epsilon.\n log_sensitivities: Log(t smooth sensitivity) for each t in triples.\n \"\"\"\n variances = np.empty(len(triples))\n for triple_idx in range(len(triples)):\n numerator = 2 * (np.exp(2 * log_sensitivities[triple_idx]))\n denominator = np.exp(-5 * (triples[triple_idx][2]**2)) * (\n (eps - (triples[triple_idx][0] / triples[triple_idx][2]))**2)\n variances[triple_idx] = numerator / denominator\n return np.argmin(variances)\n\n\ndef csmooth_tune_and_return_ts(sorted_data, data_low, data_high, qs,\n divided_eps, log_t_low, log_t_high, num_t):\n \"\"\"Returns ts minimizing variance for data and each q under ~eps^2/2-CDP.\n\n Args:\n sorted_data: Array of data points sorted in increasing order.\n data_low: Lower limit for any differentially private quantile output value.\n data_high: Upper limit for any differentially private quantile output value.\n qs: Increasing array of quantiles in [0,1].\n divided_eps: Privacy parameter epsilon. Assumes eps has already been divided\n so that the overall desired privacy guarantee is achieved.\n log_t_low: Tuning range for t has lower bound 10^(log_t_low).\n log_t_high: Tuning range for t has upper bound 10^(log_t_high).\n num_t: Number of logarithmically spaced t used to populate tuning range.\n \"\"\"\n sorted_data = np.clip(sorted_data, data_low, data_high)\n triples = compute_triples(divided_eps,\n np.logspace(log_t_low, log_t_high, num_t))\n num_qs = len(qs)\n ts = np.empty(num_qs)\n for i in range(num_qs):\n true_quantile_idx = base.quantile_index(len(sorted_data), qs[i])\n log_sensitivities = np.zeros(len(triples))\n for triple_idx in range(len(triples)):\n t = triples[triple_idx, 0]\n log_sensitivities[\n triple_idx] = smooth_utils.compute_log_smooth_sensitivity(\n sorted_data, data_low, data_high, true_quantile_idx, t)\n ts[i] = triples[log_choose_triple_idx(triples, divided_eps,\n log_sensitivities)][0]\n return ts\n\n\ndef csmooth_tune_t_experiment(eps, num_samples, num_trials, num_quantiles_range,\n data_low, data_high, log_t_low, log_t_high,\n num_t):\n \"\"\"Returns 2-D array of ts, tuned for each (num_quantiles, quantile) pair.\n\n Args:\n eps: Privacy parameter epsilon.\n num_samples: Number of standard Gaussian samples to draw for each trial.\n num_trials: Number of trials to average.\n num_quantiles_range: Array of number of quantiles to estimate.\n data_low: Lower bound for data, used by CSmooth.\n data_high: Upper bound for data, used by CSmooth.\n log_t_low: Tuning range for t has lower bound 10^(log_t_low).\n log_t_high: Tuning range for t has upper bound 10^(log_t_high).\n num_t: Number of logarithmically spaced t used to populate tuning range.\n \"\"\"\n ts = [np.zeros(num_quantiles) for num_quantiles in num_quantiles_range]\n num_quantiles_idx = 0\n for num_quantiles_idx in range(len(num_quantiles_range)):\n num_quantiles = num_quantiles_range[num_quantiles_idx]\n divided_eps = eps / np.sqrt(num_quantiles)\n for _ in range(num_trials):\n sorted_data = base.gen_gaussian(num_samples, 0, 1)\n qs = np.linspace(0, 1, num_quantiles + 2)[1:-1]\n ts[num_quantiles_idx] += csmooth_tune_and_return_ts(\n sorted_data, data_low, data_high, qs, divided_eps, log_t_low,\n log_t_high, num_t) / num_trials\n print(\"Finished num_quantiles: {}\".format(num_quantiles))\n return ts\n",
"# coding=utf-8\n# Copyright 2021 The Google Research Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# pylint: disable=logging-format-interpolation\n# pylint: disable=unused-import\n# pylint: disable=protected-access\n# pylint: disable=g-direct-tensorflow-import\n# pylint: disable=g-long-lambda\n\nr\"\"\"Docs.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\n\nfrom __future__ import print_function\n\nimport collections\nimport heapq\nimport os\nimport sys\nimport time\nimport traceback\n\nfrom absl import app\nfrom absl import flags\nfrom absl import logging\nimport numpy as np\nimport tensorflow.compat.v1 as tf\n\nfrom meta_pseudo_labels import common_utils\nfrom meta_pseudo_labels import data_utils\n\nfrom tensorflow.compiler.xla.experimental.xla_sharding import xla_sharding\nfrom tensorflow.python.tpu import tpu_feed\n\n\nMODEL_SCOPE = 'model'\n\n\ndef eval_step_fn(params, model):\n \"\"\"Build `step_fn` for eval.\"\"\"\n dtypes = [tf.bfloat16 if params.use_bfloat16 else tf.float32,\n tf.float32, tf.float32]\n batch_size = params.eval_batch_size // params.num_replicas\n image_size = (params.eval_image_size if 'eval_image_size' in params\n else params.image_size)\n shapes = [[batch_size, image_size, image_size, 3],\n [batch_size, params.num_classes],\n [batch_size]]\n\n if params.use_xla_sharding and params.num_cores_per_replica > 1:\n q = tpu_feed._PartitionedInfeedQueue(\n number_of_tuple_elements=3,\n host_id=0,\n input_partition_dims=[[1, 1, params.num_cores_per_replica, 1],\n [1, 1], [1]],\n device_assignment=params.device_assignment)\n q.set_tuple_types(dtypes)\n q.set_tuple_shapes(shapes)\n images, labels, mask = q.generate_dequeue_op()\n images = xla_sharding.split(images, 2, params.num_cores_per_replica)\n else:\n with tf.device(tf.tpu.core(0)):\n images, labels, mask = tf.raw_ops.InfeedDequeueTuple(dtypes=dtypes,\n shapes=shapes)\n\n if len(labels.shape) > 1: # `labels` is one_hot. turn it to `int.32`\n labels = tf.argmax(labels, axis=-1, output_type=tf.int32)\n labels = tf.expand_dims(labels, axis=-1)\n _ = tf.train.get_or_create_global_step()\n\n with tf.variable_scope(MODEL_SCOPE):\n logits = model(images, training=False)\n logits = tf.cast(logits, tf.float32)\n\n return logits, labels, mask\n\n\nclass Supervised(object):\n \"\"\"Supervised information.\"\"\"\n\n def __init__(self):\n step_info = collections.OrderedDict()\n self.step_info = step_info\n\n def outfeed_signature(self):\n \"\"\"Returns the sigature of `step_info` as returned by `step_fn`.\"\"\"\n return self.step_info\n\n def step_fn(self, params, model):\n \"\"\"A single step for supervised learning.\"\"\"\n\n batch_size = params.train_batch_size // params.num_replicas\n dtypes = [tf.bfloat16 if params.use_bfloat16 else tf.float32, tf.float32]\n shapes = [[batch_size, params.image_size, params.image_size, 3],\n [batch_size, params.num_classes]]\n\n if params.use_xla_sharding and params.num_cores_per_replica > 1:\n q = tpu_feed._PartitionedInfeedQueue(\n number_of_tuple_elements=2,\n host_id=0,\n input_partition_dims=[[1, 1, params.num_cores_per_replica, 1],\n [1, 1]],\n device_assignment=params.device_assignment)\n q.set_tuple_types(dtypes)\n q.set_tuple_shapes(shapes)\n images, labels = q.generate_dequeue_op()\n images = xla_sharding.split(images, 2, params.num_cores_per_replica)\n else:\n with tf.device(tf.tpu.core(0)):\n images, labels = tf.raw_ops.InfeedDequeueTuple(dtypes=dtypes,\n shapes=shapes)\n\n if labels.dtype == tf.int32:\n labels = tf.one_hot(labels, depth=params.num_classes, dtype=tf.float32)\n global_step = tf.train.get_or_create_global_step()\n\n train_batch_size = tf.cast(params.train_batch_size, tf.float32)\n num_replicas = tf.cast(params.num_replicas, tf.float32)\n\n with tf.variable_scope(MODEL_SCOPE):\n logits = model(images, training=True)\n\n if 'noisy_student' in params.dataset_name.lower():\n cross_entropy = labels * tf.nn.log_softmax(logits, axis=-1)\n cross_entropy = tf.reduce_sum(-cross_entropy) / train_batch_size\n else:\n cross_entropy = tf.losses.softmax_cross_entropy(\n onehot_labels=labels, logits=logits,\n label_smoothing=params.label_smoothing,\n reduction=tf.losses.Reduction.SUM) / train_batch_size\n\n l2_reg_rate = tf.cast(params.weight_decay / params.num_replicas, tf.float32)\n weight_dec = common_utils.get_l2_loss()\n total_loss = cross_entropy + weight_dec * l2_reg_rate\n\n variables = tf.trainable_variables()\n gradients = tf.gradients(total_loss, variables)\n gradients = [tf.tpu.cross_replica_sum(g) for g in gradients]\n gradients, grad_norm = tf.clip_by_global_norm(gradients, params.grad_bound)\n\n learning_rate, optimizer = common_utils.get_optimizer(params)\n update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)\n train_op = tf.cond(\n tf.math.is_finite(grad_norm),\n lambda: optimizer.apply_gradients(zip(gradients, variables),\n global_step=global_step),\n tf.no_op)\n with tf.control_dependencies(update_ops + [train_op]):\n ema_train_op = common_utils.setup_ema(params,\n f'{MODEL_SCOPE}/{model.name}')\n\n with tf.control_dependencies([ema_train_op]):\n logs = collections.OrderedDict()\n logs['global_step'] = tf.cast(global_step, tf.float32)\n logs['loss/total'] = total_loss\n logs['loss/weight_decay'] = weight_dec / num_replicas\n logs['loss/cross_entropy'] = cross_entropy\n logs['loss/lr'] = tf.identity(learning_rate) / num_replicas\n logs['loss/grad_norm'] = grad_norm / num_replicas\n\n tensors = [tf.expand_dims(t, axis=0) for t in logs.values()]\n self.step_info = {k: [tf.float32, [1]] for k in logs.keys()}\n outfeed_enqueue_op = tf.cond(\n common_utils.should_log(params),\n lambda: tf.raw_ops.OutfeedEnqueueTuple(inputs=tensors), tf.no_op)\n return outfeed_enqueue_op\n\n\nclass UDA(object):\n \"\"\"UDA (https://arxiv.org/abs/1904.12848).\"\"\"\n\n def __init__(self):\n self.step_info = collections.OrderedDict()\n\n def outfeed_signature(self):\n \"\"\"Returns the sigature of `step_info` as returned by `step_fn`.\"\"\"\n return self.step_info\n\n @staticmethod\n def build_uda_cross_entropy(params, model, all_images, l_labels):\n \"\"\"Compute the UDA loss.\"\"\"\n train_batch_size = params.train_batch_size\n num_replicas = params.num_replicas\n uda_data = params.uda_data\n batch_size = train_batch_size // num_replicas\n\n labels = {}\n if l_labels.dtype == tf.int32: # l_labels is sparse. turn into one_hot\n labels['l'] = tf.one_hot(l_labels, params.num_classes, dtype=tf.float32)\n else:\n labels['l'] = l_labels\n\n global_step = tf.train.get_or_create_global_step()\n\n masks = {}\n logits = {}\n cross_entropy = {}\n all_logits = model(all_images, training=True)\n\n logits['l'], logits['u_ori'], logits['u_aug'] = tf.split(\n all_logits, [batch_size, batch_size*uda_data, batch_size*uda_data], 0)\n\n # sup loss\n cross_entropy['l'] = tf.losses.softmax_cross_entropy(\n onehot_labels=labels['l'],\n logits=logits['l'],\n label_smoothing=params.label_smoothing,\n reduction=tf.losses.Reduction.NONE)\n probs = tf.nn.softmax(logits['l'], axis=-1)\n correct_probs = tf.reduce_sum(labels['l']*probs, axis=-1)\n r = tf.cast(global_step, tf.float32) / float(params.num_train_steps)\n l_threshold = r * (1. - 1./params.num_classes) + 1. / params.num_classes\n masks['l'] = tf.less_equal(correct_probs, l_threshold)\n masks['l'] = tf.cast(masks['l'], tf.float32)\n masks['l'] = tf.stop_gradient(masks['l'])\n cross_entropy['l'] = tf.reduce_sum(cross_entropy['l']) / float(\n train_batch_size)\n\n # unsup loss\n labels['u_ori'] = tf.nn.softmax(logits['u_ori'] / params.uda_temp, axis=-1)\n labels['u_ori'] = tf.stop_gradient(labels['u_ori'])\n\n cross_entropy['u'] = (labels['u_ori'] *\n tf.nn.log_softmax(logits['u_aug'], axis=-1))\n largest_probs = tf.reduce_max(labels['u_ori'], axis=-1, keepdims=True)\n masks['u'] = tf.greater_equal(largest_probs, params.uda_threshold)\n masks['u'] = tf.cast(masks['u'], tf.float32)\n masks['u'] = tf.stop_gradient(masks['u'])\n cross_entropy['u'] = tf.reduce_sum(-cross_entropy['u']*masks['u']) / float(\n train_batch_size*uda_data)\n return logits, labels, masks, cross_entropy\n\n def step_fn(self, params, model):\n \"\"\"Separate implementation.\"\"\"\n train_batch_size = params.train_batch_size\n num_replicas = params.num_replicas\n batch_size = train_batch_size // num_replicas\n\n dtypes = [\n tf.bfloat16 if params.use_bfloat16 else tf.float32,\n tf.float32,\n tf.bfloat16 if params.use_bfloat16 else tf.float32,\n tf.bfloat16 if params.use_bfloat16 else tf.float32]\n shapes = [\n [batch_size, params.image_size, params.image_size, 3],\n [batch_size, params.num_classes],\n [batch_size*params.uda_data, params.image_size, params.image_size, 3],\n [batch_size*params.uda_data, params.image_size, params.image_size, 3]]\n\n if params.use_xla_sharding and params.num_cores_per_replica > 1:\n q = tpu_feed._PartitionedInfeedQueue(\n number_of_tuple_elements=4,\n host_id=0,\n input_partition_dims=[[1, 1, params.num_cores_per_replica, 1],\n [1, 1],\n [1, 1, params.num_cores_per_replica, 1],\n [1, 1, params.num_cores_per_replica, 1],],\n device_assignment=params.device_assignment)\n q.set_tuple_types(dtypes)\n q.set_tuple_shapes(shapes)\n l_images, l_labels, u_images_ori, u_images_aug = q.generate_dequeue_op()\n l_images = xla_sharding.split(l_images, 2,\n params.num_cores_per_replica)\n u_images_ori = xla_sharding.split(u_images_ori, 2,\n params.num_cores_per_replica)\n u_images_aug = xla_sharding.split(u_images_aug, 2,\n params.num_cores_per_replica)\n else:\n with tf.device(tf.tpu.core(0)):\n (l_images, l_labels, u_images_ori,\n u_images_aug) = tf.raw_ops.InfeedDequeueTuple(dtypes=dtypes,\n shapes=shapes)\n\n all_images = tf.concat([l_images, u_images_ori, u_images_aug], axis=0)\n global_step = tf.train.get_or_create_global_step()\n num_replicas = tf.cast(params.num_replicas, tf.float32)\n\n with tf.variable_scope(MODEL_SCOPE, reuse=tf.AUTO_REUSE):\n _, _, masks, cross_entropy = UDA.build_uda_cross_entropy(\n params, model, all_images, l_labels)\n\n l2_reg_rate = tf.cast(params.weight_decay / params.num_replicas, tf.float32)\n weight_dec = common_utils.get_l2_loss()\n uda_weight = params.uda_weight * tf.minimum(\n 1., tf.cast(global_step, tf.float32) / float(params.uda_steps))\n total_loss = (cross_entropy['u'] * uda_weight +\n cross_entropy['l'] +\n weight_dec * l2_reg_rate)\n variables = tf.trainable_variables()\n gradients = tf.gradients(total_loss, variables)\n gradients = [tf.tpu.cross_replica_sum(g) for g in gradients]\n gradients, grad_norm = tf.clip_by_global_norm(gradients, params.grad_bound)\n update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)\n\n learning_rate, optimizer = common_utils.get_optimizer(params)\n with tf.control_dependencies(update_ops):\n train_op = optimizer.apply_gradients(zip(gradients, variables),\n global_step=global_step)\n\n with tf.control_dependencies([train_op]):\n ema_train_op = common_utils.setup_ema(\n params, f'{MODEL_SCOPE}/{model.name}')\n\n with tf.control_dependencies([ema_train_op]):\n logs = collections.OrderedDict()\n logs['global_step'] = tf.cast(global_step, tf.float32)\n logs['loss/total'] = total_loss\n logs['loss/cross_entropy'] = cross_entropy['l']\n logs['loss/lr'] = tf.identity(learning_rate) / num_replicas\n logs['loss/grad_norm'] = tf.identity(grad_norm) / num_replicas\n logs['loss/weight_dec'] = weight_dec / num_replicas\n\n logs['uda/cross_entropy'] = cross_entropy['u']\n logs['uda/u_ratio'] = tf.reduce_mean(masks['u']) / num_replicas\n logs['uda/l_ratio'] = tf.reduce_mean(masks['l']) / num_replicas\n logs['uda/weight'] = uda_weight / num_replicas\n\n tensors = [tf.expand_dims(t, axis=0) for t in logs.values()]\n self.step_info = {k: [tf.float32, [1]] for k in logs.keys()}\n outfeed_enqueue_op = tf.cond(\n common_utils.should_log(params),\n lambda: tf.raw_ops.OutfeedEnqueueTuple(inputs=tensors), tf.no_op)\n return outfeed_enqueue_op\n\n\nclass MPL(object):\n \"\"\"Meta Pseudo Labels.\"\"\"\n\n def __init__(self):\n self.step_info = collections.OrderedDict()\n\n def outfeed_signature(self):\n \"\"\"Returns the sigature of `step_info` as returned by `step_fn`.\"\"\"\n return self.step_info\n\n def step_fn(self, params, model):\n \"\"\"Separate implementation.\"\"\"\n train_batch_size = params.train_batch_size\n num_replicas = params.num_replicas\n uda_data = params.uda_data\n batch_size = train_batch_size // num_replicas\n\n dtypes = [\n tf.bfloat16 if params.use_bfloat16 else tf.float32,\n tf.float32,\n tf.bfloat16 if params.use_bfloat16 else tf.float32,\n tf.bfloat16 if params.use_bfloat16 else tf.float32]\n shapes = [\n [batch_size, params.image_size, params.image_size, 3],\n [batch_size, params.num_classes],\n [batch_size*params.uda_data, params.image_size, params.image_size, 3],\n [batch_size*params.uda_data, params.image_size, params.image_size, 3]]\n\n if params.use_xla_sharding and params.num_cores_per_replica > 1:\n q = tpu_feed._PartitionedInfeedQueue(\n number_of_tuple_elements=4,\n host_id=0,\n input_partition_dims=[[1, 1, params.num_cores_per_replica, 1],\n [1, 1],\n [1, 1, params.num_cores_per_replica, 1],\n [1, 1, params.num_cores_per_replica, 1],],\n device_assignment=params.device_assignment)\n q.set_tuple_types(dtypes)\n q.set_tuple_shapes(shapes)\n l_images, l_labels, u_images_ori, u_images_aug = q.generate_dequeue_op()\n l_images = xla_sharding.split(l_images, 2,\n params.num_cores_per_replica)\n u_images_ori = xla_sharding.split(u_images_ori, 2,\n params.num_cores_per_replica)\n u_images_aug = xla_sharding.split(u_images_aug, 2,\n params.num_cores_per_replica)\n else:\n with tf.device(tf.tpu.core(0)):\n (l_images, l_labels, u_images_ori,\n u_images_aug) = tf.raw_ops.InfeedDequeueTuple(dtypes=dtypes,\n shapes=shapes)\n global_step = tf.train.get_or_create_global_step()\n num_replicas = tf.cast(params.num_replicas, tf.float32)\n\n all_images = tf.concat([l_images, u_images_ori, u_images_aug], axis=0)\n\n # all calls to teacher\n with tf.variable_scope('teacher', reuse=tf.AUTO_REUSE):\n logits, labels, masks, cross_entropy = UDA.build_uda_cross_entropy(\n params, model, all_images, l_labels)\n\n # 1st call to student\n with tf.variable_scope(MODEL_SCOPE):\n u_aug_and_l_images = tf.concat([u_images_aug, l_images], axis=0)\n logits['s_on_u_aug_and_l'] = model(u_aug_and_l_images, training=True)\n logits['s_on_u'], logits['s_on_l_old'] = tf.split(\n logits['s_on_u_aug_and_l'],\n [u_images_aug.shape[0].value, l_images.shape[0].value], axis=0)\n\n # for backprop\n cross_entropy['s_on_u'] = tf.losses.softmax_cross_entropy(\n onehot_labels=tf.stop_gradient(tf.nn.softmax(logits['u_aug'], -1)),\n logits=logits['s_on_u'],\n label_smoothing=params.label_smoothing,\n reduction=tf.losses.Reduction.NONE)\n cross_entropy['s_on_u'] = tf.reduce_sum(cross_entropy['s_on_u']) / float(\n train_batch_size*uda_data)\n\n # for Taylor\n cross_entropy['s_on_l_old'] = tf.losses.softmax_cross_entropy(\n onehot_labels=labels['l'],\n logits=logits['s_on_l_old'],\n reduction=tf.losses.Reduction.SUM)\n cross_entropy['s_on_l_old'] = tf.tpu.cross_replica_sum(\n cross_entropy['s_on_l_old']) / float(train_batch_size)\n shadow = tf.get_variable(\n name='cross_entropy_old', shape=[], trainable=False, dtype=tf.float32)\n shadow_update = tf.assign(shadow, cross_entropy['s_on_l_old'])\n\n w_s = {}\n g_s = {}\n g_n = {}\n lr = {}\n optim = {}\n w_s['s'] = [w for w in tf.trainable_variables()\n if w.name.lower().startswith(MODEL_SCOPE)]\n g_s['s_on_u'] = tf.gradients(cross_entropy['s_on_u'], w_s['s'])\n # g_s['s_on_u'] = [tf.tpu.cross_replica_sum(g) for g in g_s['s_on_u']]\n\n lr['s'] = common_utils.get_learning_rate(\n params,\n initial_lr=params.mpl_student_lr,\n num_warmup_steps=params.mpl_student_lr_warmup_steps,\n num_wait_steps=params.mpl_student_lr_wait_steps)\n lr['s'], optim['s'] = common_utils.get_optimizer(\n params, learning_rate=lr['s'])\n optim['s']._create_slots(w_s['s']) # pylint: disable=protected-access\n update_ops = [op for op in tf.get_collection(tf.GraphKeys.UPDATE_OPS)\n if op.name.startswith(f'train/{MODEL_SCOPE}/')]\n\n with tf.control_dependencies(update_ops + [shadow_update]):\n g_s['s_on_u'] = common_utils.add_weight_decay(\n params, w_s['s'], g_s['s_on_u'])\n g_s['s_on_u'], g_n['s_on_u'] = tf.clip_by_global_norm(\n g_s['s_on_u'], params.grad_bound)\n train_op = optim['s'].apply_gradients(zip(g_s['s_on_u'], w_s['s']))\n\n with tf.control_dependencies([train_op]):\n ema_train_op = common_utils.setup_ema(\n params, name_scope=f'{MODEL_SCOPE}/{model.name}')\n\n # 2nd call to student\n with tf.control_dependencies([ema_train_op]):\n with tf.variable_scope(MODEL_SCOPE, reuse=tf.AUTO_REUSE):\n logits['s_on_l_new'] = model(l_images, training=True)\n\n cross_entropy['s_on_l_new'] = tf.losses.softmax_cross_entropy(\n onehot_labels=labels['l'],\n logits=logits['s_on_l_new'],\n reduction=tf.losses.Reduction.SUM)\n cross_entropy['s_on_l_new'] = tf.tpu.cross_replica_sum(\n cross_entropy['s_on_l_new']) / float(train_batch_size)\n\n dot_product = cross_entropy['s_on_l_new'] - shadow\n # dot_product = tf.clip_by_value(\n # dot_product,\n # clip_value_min=-params.mpl_dot_product_bound,\n # clip_value_max=params.mpl_dot_product_bound)\n moving_dot_product = tf.get_variable(\n 'moving_dot_product', shape=[], trainable=False, dtype=tf.float32)\n moving_dot_product_update = tf.assign_sub(\n moving_dot_product, 0.01 * (moving_dot_product - dot_product))\n with tf.control_dependencies([moving_dot_product_update]):\n dot_product = dot_product - moving_dot_product\n dot_product = tf.stop_gradient(dot_product)\n cross_entropy['mpl'] = tf.losses.softmax_cross_entropy(\n onehot_labels=tf.stop_gradient(tf.nn.softmax(logits['u_aug'], axis=-1)),\n logits=logits['u_aug'],\n reduction=tf.losses.Reduction.NONE)\n cross_entropy['mpl'] = tf.reduce_sum(cross_entropy['mpl']) / float(\n train_batch_size*uda_data)\n\n # teacher train op\n uda_weight = params.uda_weight * tf.minimum(\n 1., tf.cast(global_step, tf.float32) / float(params.uda_steps))\n teacher_loss = (cross_entropy['u'] * uda_weight +\n cross_entropy['l'] +\n cross_entropy['mpl'] * dot_product)\n w_s['t'] = [w for w in tf.trainable_variables() if 'teacher' in w.name]\n g_s['t'] = tf.gradients(teacher_loss, w_s['t'])\n g_s['t'] = common_utils.add_weight_decay(params, w_s['t'], g_s['t'])\n g_s['t'], g_n['t'] = tf.clip_by_global_norm(g_s['t'], params.grad_bound)\n lr['t'] = common_utils.get_learning_rate(\n params,\n initial_lr=params.mpl_teacher_lr,\n num_warmup_steps=params.mpl_teacher_lr_warmup_steps)\n lr['t'], optim['t'] = common_utils.get_optimizer(params,\n learning_rate=lr['t'])\n\n teacher_train_op = optim['t'].apply_gradients(zip(g_s['t'], w_s['t']),\n global_step=global_step)\n\n with tf.control_dependencies([teacher_train_op]):\n logs = collections.OrderedDict()\n logs['global_step'] = tf.cast(global_step, tf.float32)\n\n logs['cross_entropy/student_on_u'] = cross_entropy['s_on_u']\n logs['cross_entropy/student_on_l'] = (cross_entropy['s_on_l_new'] /\n num_replicas)\n logs['cross_entropy/teacher_on_u'] = cross_entropy['u']\n logs['cross_entropy/teacher_on_l'] = cross_entropy['l']\n logs['lr/student'] = tf.identity(lr['s']) / num_replicas\n logs['lr/teacher'] = tf.identity(lr['t']) / num_replicas\n logs['mpl/dot_product'] = dot_product / num_replicas\n logs['mpl/moving_dot_product'] = moving_dot_product / num_replicas\n logs['uda/u_ratio'] = tf.reduce_mean(masks['u']) / num_replicas\n logs['uda/l_ratio'] = tf.reduce_mean(masks['l']) / num_replicas\n logs['uda/weight'] = uda_weight / num_replicas\n\n tensors = [tf.expand_dims(t, axis=0) for t in logs.values()]\n self.step_info = {k: [tf.float32, [1]] for k in logs.keys()}\n def outfeed(tensors):\n with tf.device(tf.tpu.core(params.num_cores_per_replica-1)):\n return tf.raw_ops.OutfeedEnqueueTuple(inputs=tensors)\n\n outfeed_enqueue_op = tf.cond(\n common_utils.should_log(params), lambda: outfeed(tensors), tf.no_op)\n\n return outfeed_enqueue_op\n",
"# coding=utf-8\n# Copyright 2021 The Google Research Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Contains logging/saving utilities.\"\"\"\nimport csv\nimport tensorflow as tf\n\n\ndef log_row(csv_file, row):\n with tf.gfile.Open(csv_file, 'ab') as csvfile:\n cw = csv.writer(\n csvfile, delimiter='\\t', quotechar='|', quoting=csv.QUOTE_MINIMAL)\n cw.writerow(row)\n",
"# coding=utf-8\n# Copyright 2021 The Google Research Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Lint as: python3\n# pylint:disable=line-too-long\nr\"\"\"Counts average audio length.\n\n\"\"\"\n# pylint:enable=line-too-long\n\nimport os\nfrom typing import Any, Dict, Iterable, List, Tuple\n\nfrom absl import app\nfrom absl import flags\nfrom absl import logging\n\nimport apache_beam as beam\nimport numpy as np\nimport tensorflow as tf\n\nfrom non_semantic_speech_benchmark.data_prep import audio_to_embeddings_beam_utils\n\nflags.DEFINE_string('output_file', None, 'Output file.')\nflags.DEFINE_boolean('debug', False, 'Whether to debug.')\nflags.DEFINE_list(\n 'audio_keys', ['audio', 'processed/audio_samples', 'audio_waveform'],\n 'Possible audio keys in tf.Examples.')\nflags.DEFINE_list(\n 'sr_keys', [], 'Possible sample rate keys in tf.Examples.')\n\nFLAGS = flags.FLAGS\n\n\ndef duration_from_tfex(k_v):\n \"\"\"Duration from a tf.Example.\"\"\"\n k, ex = k_v\n\n audio_vals = []\n for possible_audio_key in FLAGS.audio_keys:\n if possible_audio_key in ex.features.feature:\n logging.info('Found audio key: %s', possible_audio_key)\n audio_feats = ex.features.feature[possible_audio_key]\n cur_audio_vals = (audio_feats.int64_list.value or\n audio_feats.float_list.value)\n assert cur_audio_vals\n audio_vals.append(cur_audio_vals)\n assert len(audio_vals) == 1, ex\n audio_vals = audio_vals[0]\n logging.info('%s audio: %s', k, audio_vals)\n\n sr_vals = []\n for possible_sr_key in FLAGS.sr_keys:\n if possible_sr_key in ex.features.feature:\n logging.info('Found sample rate key: %s', possible_sr_key)\n cur_audio = ex.features.feature[possible_sr_key].int64_list.value[0]\n sr_vals.append(cur_audio)\n assert len(sr_vals) in [0, 1], ex\n if len(sr_vals) == 1:\n sr = sr_vals[0]\n else:\n logging.info('Used default sr.')\n sr = 16000\n\n return len(audio_vals) / float(sr)\n\n\ndef durations(root, ds_file, ds_name,\n reader_type, suffix):\n \"\"\"Beam pipeline for durations from a particular file or glob.\"\"\"\n logging.info('Reading from %s: (%s, %s)', reader_type, ds_name, ds_file)\n input_examples = audio_to_embeddings_beam_utils.reader_functions[reader_type](\n root, ds_file, f'Read-{suffix}')\n return input_examples | f'Lens-{suffix}' >> beam.Map(duration_from_tfex)\n\n\ndef duration_and_num_examples(\n root, ds_files, ds_name,\n reader_type):\n \"\"\"Beam pipeline for durations from a list of files or globs.\"\"\"\n durations_l = []\n for i, ds_file in enumerate(ds_files):\n cur_dur = durations(\n root, ds_file, ds_name, reader_type, suffix=f'{ds_name}_{i}')\n durations_l.append(cur_dur)\n def _mean_and_count(durs):\n return np.mean(durs), len(durs)\n return (durations_l\n | f'Flatten-{ds_name}' >> beam.Flatten()\n | f'ToList-{ds_name}' >> beam.combiners.ToList()\n | f'Stats-{ds_name}' >> beam.Map(_mean_and_count))\n\n\ndef get_dataset_info_dict(debug):\n \"\"\"Get dictionary of dataset info.\"\"\"\n def _tfds_fns(ds_name):\n fns = [\n x # pylint:disable=g-complex-comprehension\n for s in ('train', 'validation', 'test')\n for x in audio_to_embeddings_beam_utils._tfds_filenames(ds_name, s)] # pylint:disable=protected-access\n fns = [fns] # TFRecords require a list.\n return (fns, 'tfrecord')\n\n if debug:\n dss = {'savee': _tfds_fns('savee')}\n else:\n dss = {\n 'crema_d': _tfds_fns('crema_d'),\n 'savee': _tfds_fns('savee'),\n 'speech_commands': _tfds_fns('speech_commands'),\n 'voxceleb': _tfds_fns('voxceleb'),\n }\n\n return dss\n\n\ndef main(unused_argv):\n dss = get_dataset_info_dict(FLAGS.debug)\n\n out_file = FLAGS.output_file\n assert not tf.io.gfile.exists(out_file)\n if not tf.io.gfile.exists(os.path.dirname(out_file)):\n tf.io.gfile.makedirs(os.path.dirname(out_file))\n\n pipeline_option = None\n\n with beam.Pipeline(pipeline_option) as root:\n stats = [] # (ds name, avg duration, num examples)\n for ds_name, (ds_files, reader_type) in dss.items():\n cur_stat = duration_and_num_examples(root, ds_files, ds_name, reader_type)\n cur_stat = cur_stat | f'AddName-{ds_name}' >> beam.Map(\n lambda x, name: (name, x[0], x[1]), name=ds_name)\n stats.append(cur_stat)\n # Write output.\n _ = (\n stats\n | 'CombineDSes' >> beam.Flatten()\n | 'ToStr' >> beam.Map(lambda x: ','.join([str(r) for r in x]))\n | 'WriteOutput' >> beam.io.WriteToText(out_file, num_shards=1))\n\n\nif __name__ == '__main__':\n flags.mark_flag_as_required('output_file')\n app.run(main)\n",
"# coding=utf-8\n# Copyright 2021 The Google Research Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Common utilities for colab notebooks.\"\"\"\n\nimport collections\nimport datetime\nimport functools\nimport sys\nfrom typing import Any, List, Optional, Tuple\n\nimport numpy as np\nimport pandas as pd\nimport scipy.stats\n\n# pylint: disable=g-import-not-at-top\n# Only import version_utils if we are in a colab\nif 'google.colab' in sys.modules:\n from cloud_covid19_forecasting.etl.tools import version_utils\n# pylint: enable=g-import-not-at-top\n\nBQ_TABLES = {\n 'japan': 'covid_prod_features.jp_prefecture_ts',\n 'state': 'covid_prod_features.us_state_ts',\n 'county': 'covid_prod_features.us_county_ts',\n}\n\n# pylint: disable=line-too-long\nGT_FIELD_NAMES = {\n 'japan':\n 'kaz_deaths,open_gt_jp_deaths,kaz_confirmed_cases,open_gt_jp_confirmed_cases',\n 'state':\n 'jhu_state_confirmed_cases,jhu_state_deaths',\n 'county':\n 'jhu_county_confirmed_cases,jhu_county_deaths',\n}\n# pylint: enable=line-too-long\n\nLOC_NAME = {\n 'japan': 'pref',\n 'state': 'state_code',\n 'county': 'geo_id',\n}\n\n\ndef calculate_mape_apply_fn(row,\n average_type,\n expected_num_locations,\n min_count=None,\n min_mae=None,\n time_horizon=27,\n debug=False,\n value_type='cumulative'):\n \"\"\"Calculate MAPE, depending on various flags.\n\n From\n 'https://datascience.stackexchange.com/questions/15989/micro-average-vs-macro-average-performance-in-a-multiclass-classification-settin':\n \"A macro-average will compute the metric independently for each class and then\n take the average (hence treating all classes equally), whereas a micro-average\n will aggregate the contributions of all classes to compute the average\n metric.\"\n\n Args:\n row: A pd.Series. Should all have the same date.\n average_type: Python string. Either 'micro' or 'macro'.\n expected_num_locations: Expected number of locations.\n min_count: If not `None`, ignore values with max(gt) < min_count. Should be\n `None` if `average_type='micro'`. Only one of `min_count` and `min_mae`\n should be not `None.\n min_mae: If not `None`, ignore values with `mae < min_mae`. Should be `None`\n if `average_type='micro'`. Only one of `min_count` and `min_mae` should be\n not `None.\n time_horizon: The time horizon value to compare cumulative metrics on. The\n end of the forecast window is usually 28 days, but Reichlab uses 27 days,\n so we use that as the default. Note that `time_horizon` is 1-indexed,\n while Python arrays are 0-indexed.\n debug: Python bool. Whether to print verbose debug info.\n value_type: Python string. Describes the values to use for MAPE calculation.\n\n Returns:\n The appropriate MAPE score.\n \"\"\"\n assert average_type in ['micro', 'macro']\n assert expected_num_locations in [47, 51]\n assert value_type in ['cumulative', '4week', 'weekly']\n if average_type == 'micro':\n assert min_count is None, min_count\n assert min_mae is None, min_mae\n assert (min_count is None) or (min_mae is None), (min_count, min_mae)\n if 'forecast_date' in row:\n assert len(row.forecast_date.unique()) == 1, row\n cur_forecast_date = row.forecast_date.unique()[0]\n row_dat = row[['location_name', 0]]\n assert len(row_dat) == expected_num_locations, len(row_dat)\n\n # For macro average.\n mapes = []\n # For micro average.\n total_pred = None\n total_gt = None\n\n for _, loc_dat in row_dat.iterrows():\n cur_location_name = loc_dat.location_name\n cur_dat_dict = loc_dat[0]\n assert isinstance(cur_location_name, str), cur_location_name\n if not isinstance(cur_dat_dict, dict):\n assert np.isnan(cur_dat_dict), cur_dat_dict\n print(f'{cur_forecast_date} {cur_location_name} had no data, so '\n 'continuing with date...')\n continue\n # Time horizon is 1-indexed, but arrays are 0-index, so subtract 1.\n assert time_horizon <= len(cur_dat_dict['predictions'])\n if value_type == '4week':\n assert 'day_zero_gt' in cur_dat_dict, cur_dat_dict\n base_gt = cur_dat_dict['day_zero_gt']\n assert base_gt is not None, cur_dat_dict\n\n # The total increase over the last time window is\n # `dat[-1] - day_0_gt`, so we use this value in the \"4week\" case.\n # We use `day_0_gt` (rather than `gt[0]` in the forecast window) so that\n # incident cases on every day of the forecast window is possibly non-zero.\n pred_ys = (\n cur_dat_dict['predictions'][time_horizon - 1:time_horizon] - base_gt)\n gtru_ys = (\n cur_dat_dict['ground_truth'][time_horizon - 1:time_horizon] - base_gt)\n elif value_type == 'cumulative':\n # Since we are computing MAPE over cumulative values, we only care about\n # the values at the end of the forecast period.\n pred_ys = cur_dat_dict['predictions'][time_horizon - 1:time_horizon]\n gtru_ys = cur_dat_dict['ground_truth'][time_horizon - 1:time_horizon]\n else:\n assert value_type == 'weekly'\n # TODO(joelshor): We ignore `time_horizon`. Consider whether to use it.\n # Weekly is i=0,1,2,3, `dat[i*7 + 6] - dat[i*7]`, averaged over i.\n # However, since we only have 27 days into the future for GT, we modify\n # the last day.\n preds = cur_dat_dict['predictions']\n pred_ys = np.array([preds[7 * i + 6] - preds[7 * i] for i in range(3)] +\n [preds[7 * 3 + 5] - preds[7 * 3]])\n gtrs = cur_dat_dict['ground_truth']\n gtru_ys = np.array([gtrs[7 * i + 6] - gtrs[7 * i] for i in range(3)] +\n [gtrs[7 * 3 + 5] - gtrs[7 * 3]])\n if debug:\n print(f'{cur_forecast_date} {cur_location_name}: '\n f'gt {gtru_ys} pred {pred_ys}')\n\n if average_type == 'micro':\n if total_pred is None and total_gt is None:\n total_pred = np.zeros_like(pred_ys)\n total_gt = np.zeros_like(gtru_ys)\n assert isinstance(total_pred, np.ndarray)\n assert isinstance(total_gt, np.ndarray)\n total_pred += pred_ys\n total_gt += gtru_ys\n continue\n\n assert average_type == 'macro'\n assert isinstance(pred_ys, np.ndarray), type(pred_ys)\n assert isinstance(gtru_ys, np.ndarray), type(gtru_ys)\n assert pred_ys.size == (4 if value_type == 'weekly' else 1), pred_ys.size\n assert gtru_ys.size, gtru_ys.size\n\n cur_mape = calculate_mape(\n predictions=pred_ys.tolist(), ground_truths=gtru_ys.tolist())\n if debug:\n print(f'{cur_forecast_date} {cur_location_name}: MAPE: {cur_mape}')\n\n if min_count and max(gtru_ys) < min_count:\n mapes.append(np.nan)\n elif min_mae:\n cur_mae = calculate_mae(\n predictions=pred_ys.tolist(), ground_truth=gtru_ys.tolist())\n if cur_mae < min_mae:\n mapes.append(np.nan)\n else:\n mapes.append(cur_mape)\n else:\n mapes.append(cur_mape)\n\n if average_type == 'micro':\n if total_pred is None:\n return np.nan\n else:\n assert isinstance(total_pred, np.ndarray), total_pred\n assert isinstance(total_gt, np.ndarray), total_gt\n # We are only looking at the end of the forecast window, so it should be\n # size 1 if not weekly..\n assert total_pred.size == (4 if value_type == 'weekly' else\n 1), total_pred.size\n assert total_gt.size == total_pred.size, total_gt.size\n avg_pred = total_pred / expected_num_locations\n avg_gt = total_gt / expected_num_locations\n cur_mape = calculate_mape(\n predictions=avg_pred.tolist(), ground_truths=avg_gt.tolist())\n return cur_mape\n else:\n if len(mapes) == 0: # pylint:disable=g-explicit-length-test\n return np.nan\n else:\n assert average_type == 'macro', average_type\n # With low death counts, some prefectures are dropped, so this assert\n # isn't always helpful.\n # assert len(mapes) == expected_num_locations, len(mapes)\n return np.nanmean(mapes)\n\n\ndef get_gt(loc,\n start_date,\n end_date,\n feature_name,\n locale,\n bq_client,\n version = None,\n capitalize = True):\n \"\"\"Get ground truth in a colab.\"\"\"\n assert locale in ['japan', 'state', 'county'], locale\n\n assert feature_name in GT_FIELD_NAMES[locale], \\\n f'{feature_name} vs {GT_FIELD_NAMES[locale]}'\n\n bq_table = BQ_TABLES[locale]\n # Get the proper version.\n q = f'select DISTINCT(version) from `{bq_table}`'\n versions = bq_client.query(q).to_dataframe()\n if version:\n if version not in versions:\n raise ValueError(f'Version not found: {version} vs {versions}')\n else:\n # Get latest GT data ex \"2020-07-18 19:59:42 UTC\"\n version = versions.max()[0].strftime('%Y-%m-%d %H:%M:%S %Z')\n\n loc_field = LOC_NAME[locale]\n if capitalize:\n loc = loc.capitalize()\n q = (f'select dt,{loc_field},feature_name,feature_value '\n f'from `{bq_table}` '\n f'where dt >= \"{start_date}\" and dt <= \"{end_date}\" '\n f'and {loc_field} = \"{loc}\"'\n f'and feature_name = \"{feature_name}\" '\n f'and version = \"{version}\"')\n gt_pd = bq_client.query(q).to_dataframe()\n assert gt_pd.size > 0, q\n xs, ys = [], []\n for d, v in gt_pd.sort_values(by='dt')[['dt', 'feature_value']].values:\n xs.append(d)\n ys.append(v)\n return xs, ys\n\n\ndef get_all_gt(start_date,\n end_date,\n locale,\n bq_client,\n version = None,\n feature_keys = None):\n \"\"\"Return all ground truth during a certain date.\"\"\"\n assert locale in ['japan', 'state', 'county'], locale\n\n bq_table = BQ_TABLES[locale]\n # Get the proper version.If `version` is `None`, select latest.\n # If `version` is a string, use that version.\n # If `version` is a list, get all those versions.\n q = f'select DISTINCT(version) from `{bq_table}`'\n versions = bq_client.query(q).to_dataframe()\n if version is None:\n # Get latest GT data ex \"2020-07-18 19:59:42 UTC\"\n version = versions.max()[0].strftime('%Y-%m-%d %H:%M:%S %Z')\n elif isinstance(version, str):\n if not np.any(str(version) == versions):\n raise ValueError(f'Version not found: {version} vs {versions.to_numpy()}')\n else:\n assert isinstance(version, list)\n\n loc_field = LOC_NAME[locale]\n if isinstance(version, list):\n v_str = ','.join([f'\"{x}\"' for x in version])\n version_q = f'and version IN ({v_str})'\n else:\n version_q = f'and version = \"{version}\"'\n if feature_keys is not None:\n feat_str = ','.join([f'\"{x}\"' for x in feature_keys])\n feature_q = f' and feature_name IN ({feat_str})'\n else:\n feature_q = ''\n q = (f'select dt,{loc_field},feature_name,feature_value,version '\n f'from `{bq_table}` '\n f'where dt >= \"{start_date}\" and dt <= \"{end_date}\" '\n f' {version_q}{feature_q}')\n print(f'Querying GT with: \"{q}\"')\n gt_pd = bq_client.query(q).to_dataframe()\n assert gt_pd.size > 0, q\n\n if locale == 'japan':\n # Change data to standard spelling eg \"Hyōgo\" -> \"Hyogo\". The prefecture\n # names changed when we switched Japan data sources, so both names are\n # present in the GT tables.\n gt_pd = gt_pd.replace({loc_field: open_covid_locations_to_kaz_map()})\n\n return gt_pd\n\n\ndef get_gt_over_dates_and_versions(dates_and_gt_versions,\n locale,\n client,\n gt_feature_name,\n duration_days=28):\n \"\"\"Gets a single GT dataframe with the dates and versions requested.\"\"\"\n assert locale in ['japan', 'state'], locale\n\n # Minimize the number of reads by grouping reads of the same version.\n def _end_date_from_str(start_date):\n return (datetime.datetime.strptime(start_date, '%Y-%m-%d').date() +\n datetime.timedelta(days=duration_days)).isoformat()\n\n version_map = collections.defaultdict(list)\n for date, version in dates_and_gt_versions:\n version_map[version].append(date)\n version_map_list = [(v, min(dates), _end_date_from_str(max(dates)))\n for v, dates in version_map.items()]\n\n gt_dfs = []\n for version, start_date, end_date in version_map_list:\n # Read GT slice.\n print(f'Reading GT from {start_date} to {end_date}, version {version}...')\n gt_df = get_all_gt(\n start_date=start_date,\n end_date=end_date,\n locale=locale,\n bq_client=client,\n feature_keys=[gt_feature_name],\n version=version)\n gt_dfs.append(gt_df)\n gt_df = pd.concat(gt_dfs, axis=0).drop_duplicates()\n return gt_df\n\n\ndef get_gt_version_names(dates, version_locale, min_version, use_latest_gt,\n client):\n \"\"\"Returns either latest version or T+28 data version.\"\"\"\n assert isinstance(dates, list), type(dates)\n if use_latest_gt:\n gt_version = datetime.datetime.strptime(\n get_latest_version(client).name, '%Y-%m-%d %H:%M:%S %Z')\n gt_versions = [gt_version for _ in range(len(dates))]\n else:\n if isinstance(dates[0], str):\n dates = [datetime.datetime.strptime(d, '%Y-%m-%d').date() for d in dates]\n gt_versions = data_version_for_retrospective(\n dates,\n min_version=min_version,\n locale={\n 'japan': 'jp_prefecture',\n 'state': 'us_state'\n }[version_locale],\n client=client)\n return [\n gt_version.strftime('%Y-%m-%d %H:%M:%S+00:00')\n for gt_version in gt_versions\n ]\n\n\ndef get_public_forecast(\n forecast_pd,\n loc,\n cur_date,\n feature_key,\n loc_key,\n expeced_forecast_len = 28,\n prediction_date_key = 'prediction_date'):\n \"\"\"Extracts a single prediction from the historical forecasts table.\"\"\"\n assert loc_key in forecast_pd.columns, (loc_key, forecast_pd.columns)\n assert prediction_date_key in forecast_pd.columns, forecast_pd.columns\n assert feature_key in forecast_pd.columns, (feature_key, forecast_pd.columns)\n\n forecast = forecast_pd[(forecast_pd[loc_key] == loc)\n & (forecast_pd.forecast_date == cur_date) &\n (forecast_pd[prediction_date_key] > cur_date)]\n assert forecast.size > 0, (loc, cur_date, feature_key, loc_key)\n forecast_vals = forecast[[prediction_date_key,\n feature_key]].sort_values(by=prediction_date_key)\n xs = forecast_vals[prediction_date_key].to_numpy()\n ys = forecast_vals[feature_key].astype(np.float32).to_numpy()\n assert isinstance(xs[0], datetime.date), type(xs[0])\n assert xs.size == expeced_forecast_len, f'xs size: {xs.size}'\n assert ys.size == expeced_forecast_len, f'xs size: {ys.size}'\n return xs, ys\n\n\ndef calculate_mape(predictions, ground_truths):\n \"\"\"Calculate MAPE in a colab friendly way.\"\"\"\n if not predictions or len(predictions) != len(ground_truths):\n raise ValueError(\n 'Predictions and Ground Truth should be of equal length and non-empty')\n\n error = 0.\n num_nonzero_ground_truth = 0\n for prediction, ground_truth in zip(predictions, ground_truths):\n if ground_truth != 0:\n error += abs((prediction - ground_truth) / ground_truth)\n num_nonzero_ground_truth += 1\n\n if num_nonzero_ground_truth == 0:\n return float('nan')\n\n return 100 * error / num_nonzero_ground_truth\n\n\ndef calculate_mae(predictions, ground_truth):\n \"\"\"Calculate MAE in a colab friendly way.\"\"\"\n if not predictions or len(predictions) != len(ground_truth):\n raise ValueError(\n 'Predictions and Ground Truth should be of equal length and non-empty')\n\n error = 0.\n for i in range(len(predictions)):\n error += abs(predictions[i] - ground_truth[i])\n\n error /= len(predictions)\n\n return error\n\n\nLOCALES = ['country', 'us_county', 'us_state', 'jp_prefecture']\n\n\[email protected]_cache(maxsize=32)\ndef _read_data_versions(bq_client, locale):\n \"\"\"Wrapper around version table for easy caching.\"\"\"\n return version_utils.Client(bq_client).read_data_versions(\n dataset_name='covid_prod_features', table_name=f'{locale}_ts')\n\n\ndef get_closest_versions(ds,\n locale,\n bq_client = None,\n buffer_days = 0):\n \"\"\"Returns last first version stricly after given date + buffer days.\"\"\"\n if locale not in LOCALES:\n raise ValueError(f'Locale not recognized: {locale} not in {LOCALES}')\n vs = _read_data_versions(bq_client, locale)\n vs = [(version_utils.version_name_to_datetime(v).date(), v) for v in vs]\n\n vs = sorted(vs)\n rets = []\n for d in ds:\n if d < vs[0][0]:\n rets.append(vs[0][1])\n continue\n if d >= vs[-1][0]:\n rets.append(vs[-1][1])\n continue\n\n for i in range(len(vs)):\n # Don't break on equality, since we want strictly less than.\n if vs[i][0] > d + datetime.timedelta(days=buffer_days or 0):\n break\n rets.append(vs[i][1])\n\n return rets\n\n\ndef get_latest_version(bq_client):\n return version_utils.Client(bq_client).get_latest_version()\n\n\ndef version_or_min_version(version, min_version):\n \"\"\"Pick version or min version.\"\"\"\n if (version_utils.version_name_to_datetime(version) <=\n version_utils.version_name_to_datetime(min_version)):\n return min_version\n else:\n return version\n\n\ndef data_version_for_retrospective(dates,\n min_version,\n locale,\n client,\n days_ahead = 28):\n \"\"\"Get the GT data version associated with a particular time period.\n\n Args:\n dates: List of dates to fetch values for. This is parallelized for speed.\n min_version: Japan has a data minimum version, so use that if the requested\n version is too early.\n locale: String for locale.\n client: BQ Client for reading tables.\n days_ahead: Minimum number of days to include. If, for example, we want 28\n days ahead, we should look ahead (28-1) = 27 days. Some days in the\n prospective period actually require 28 day lookahead for some reason,\n however, so we rely on the automatic increment mechanism later in the\n code.\n\n Returns:\n List of version datetimes.\n \"\"\"\n version_dates = [d + datetime.timedelta(days=days_ahead) for d in dates]\n version_strs = get_closest_versions(version_dates, locale, client)\n if min_version:\n version_strs = [\n version_or_min_version(v_str, min_version=min_version)\n for v_str in version_strs\n ]\n return [\n version_utils.version_name_to_datetime(v_str) for v_str in version_strs\n ]\n\n\ndef trial_name_to_df(trial_name,\n predicted_metric,\n client,\n num_locs=47,\n forecast_len=28):\n \"\"\"Reads a trial name forecasts. Used for fetching retrospective results.\"\"\"\n assert predicted_metric in ['death', 'confirmed']\n\n # Read from the table and make sure the output is correct.\n all_pd = client.query(f'select * from `eval.{trial_name}`').to_dataframe()\n cur_pd = all_pd[all_pd.predicted_metric == predicted_metric][[\n 'location_name', 'time_horizon', 'point_prediction',\n 'target_prediction_date'\n ]]\n assert len(cur_pd) == num_locs * forecast_len, (\n len(cur_pd), all_pd.predicted_metric.unique())\n\n def _sort(x):\n x = x.sort_values(by='time_horizon')\n return {\n 'predictions': x.point_prediction.values,\n 'prediction_date': x.target_prediction_date.dt.date.values,\n }\n\n preds_pd = cur_pd.groupby('location_name').apply(_sort)\n assert len(preds_pd) == num_locs, len(preds_pd)\n\n target_prediction_dates = sorted(cur_pd['target_prediction_date'].unique())\n assert len(target_prediction_dates) == forecast_len\n\n return preds_pd, target_prediction_dates\n\n\ndef gather_data_from_prospective_row(row,\n gt_df,\n locale,\n available_versions,\n expected_forecast_len = 28,\n expected_gt_len = 27,\n debug = False):\n \"\"\"Pandas `apply` fn to gather GT and preds from data.\n\n We use this function to parallelize using the pandas groupby.\n With this optimization (compared to a for-loop), this function goes from\n 1.2 min -> 0.2 on Japan.\n 2.0 min -> XXX on US.\n\n Args:\n row:\n gt_df:\n locale:\n available_versions: A numpy array of available versions in `gt_df`. The\n difference between precomputing this and doing it on-the-fly for each row\n is about 10 minutes of compute over the prospective window, so we pass\n this in precomputed as a speedup.\n expected_forecast_len: Expected length of the forecast.\n expected_gt_len: Expected length of GT. Doesn't have to match\n `expected_forecast_len` due to how forecasts are measured.\n debug:\n\n Returns:\n {'dates', 'predictions', 'ground_truth'}\n \"\"\"\n assert 'predictions' in row.columns, row.columns\n assert 'prediction_date' in row.columns, row.colums\n assert 'gt_version' in row.columns, row.columns\n assert len(row.gt_version.unique()) == 1\n gt_version = row.gt_version.unique()[0]\n date, loc = row.name\n row = row.sort_values(by='prediction_date')\n pred_xs = row.prediction_date.values\n pred_ys = row.predictions.values\n\n return _row_gather_helper(loc, date, gt_version, available_versions, locale,\n pred_xs, pred_ys, expected_forecast_len,\n expected_gt_len, gt_df, debug)\n\n\ndef gather_data_from_retrospective_row(row,\n gt_df,\n locale,\n available_versions,\n expected_forecast_len = 28,\n expected_gt_len = 27,\n set_missing_values_to_zero = False,\n debug = False):\n \"\"\"Pandas `apply` fn to gather GT and preds from data.\n\n We use this function to parallelize using the pandas groupby.\n With this optimization (compared to a for-loop), this function goes from\n 1.2 min -> 0.2 on Japan.\n 2.0 min -> XXX on US.\n\n Args:\n row:\n gt_df:\n locale:\n available_versions: A numpy array of available versions in `gt_df`. The\n difference between precomputing this and doing it on-the-fly for each row\n is about 10 minutes of compute over the prospective window, so we pass\n this in precomputed as a speedup.\n expected_forecast_len: Expected length of the forecast.\n expected_gt_len: Expected length of GT. Doesn't have to match\n `expected_forecast_len` due to how forecasts are measured.\n set_missing_values_to_zero: If `True`, assuming missing GT values are 0.\n This should only be used for Japan deaths, during the retrospective\n period.\n debug:\n\n Returns:\n {'dates', 'predictions', 'ground_truth'}\n \"\"\"\n assert 'data' in row.index, row.index\n assert 'gt_version' in row.index, row.index\n date, loc = row.name\n date = datetime.datetime.strptime(date, '%Y-%m-%d').date()\n pred_xs = row.data['prediction_date']\n pred_ys = row.data['predictions']\n gt_version = row.gt_version\n\n return _row_gather_helper(loc, date, gt_version, available_versions, locale,\n pred_xs, pred_ys, expected_forecast_len,\n expected_gt_len, gt_df, debug,\n set_missing_values_to_zero)\n\n\ndef get_next_gt_version(cur_gt_version, all_gt_versions):\n \"\"\"Get the next gt version.\"\"\"\n assert cur_gt_version in all_gt_versions, (cur_gt_version, all_gt_versions)\n index = np.where(all_gt_versions == cur_gt_version)[0]\n assert len(index) == 1, index\n index = int(index[0])\n assert isinstance(index, int), (index, type(index))\n\n next_gt_version = all_gt_versions[index + 1]\n assert isinstance(next_gt_version, str), type(next_gt_version)\n\n return next_gt_version\n\n\ndef _day_before(day_date):\n assert isinstance(day_date, datetime.date)\n return day_date - datetime.timedelta(days=1)\n\n\ndef _row_gather_helper(loc,\n date,\n gt_version,\n available_versions,\n locale,\n pred_xs,\n pred_ys,\n expected_forecast_len,\n expected_gt_len,\n gt_df,\n debug,\n set_missing_values_to_zero=False,\n get_day_zero_gt=True):\n \"\"\"Common code for prospective / retrospective row gathers.\"\"\"\n assert len(pred_xs) == len(pred_ys)\n\n if len(pred_ys) != expected_forecast_len:\n print(f'No good predictions for {loc} {date}: '\n f'{len(pred_ys)} vs {expected_forecast_len}')\n return np.nan\n\n assert isinstance(pred_xs, np.ndarray), type(pred_xs)\n assert isinstance(pred_ys, np.ndarray), type(pred_ys)\n\n loc_key = LOC_NAME[locale]\n assert isinstance(date, datetime.date)\n\n # Change the location name, if necessary.\n gt_loc = loc.title()\n if locale == 'state':\n if gt_loc not in STATE_TO_CODE_MAP_:\n print(f'Skipping state {gt_loc}, since not found.')\n return np.nan\n gt_loc = STATE_TO_CODE_MAP_[gt_loc]\n\n # Get the right GT slice for this prediction window.\n # We start with `gt_version`, and increment until we have enough data to cover\n # `expected_gt_len` days into the future. This matches how the evaluator does\n # it.\n # We might have multiple features, so pick one that's the right length.\n feature_names = gt_df.feature_name.unique()\n cur_gt_version, cur_gt, day_zero_gt = None, pd.DataFrame(), None\n while cur_gt.size == 0 or len(cur_gt.dt.unique()) < expected_gt_len:\n if cur_gt_version is None:\n cur_gt_version = gt_version\n else:\n print(f'Date {date} {gt_loc} failed: Found {len(cur_gt.dt.unique())}, '\n f'expected {expected_gt_len} days with version {cur_gt_version}, '\n 'so incrementing...')\n cur_gt_version = get_next_gt_version(cur_gt_version, available_versions)\n for feature_name in feature_names:\n min_gt = _day_before(min(pred_xs)) if get_day_zero_gt else min(pred_xs)\n assert isinstance(min_gt, datetime.date)\n cur_gt = gt_df[(gt_df.version == cur_gt_version)\n & (gt_df.dt >= min_gt) & (gt_df.dt <= max(pred_xs)) &\n (gt_df[loc_key] == gt_loc) &\n (gt_df.feature_name == feature_name)][[\n 'dt', 'feature_value'\n ]]\n expected_len = expected_gt_len + 1 if get_day_zero_gt else expected_gt_len\n if len(cur_gt.dt.unique()) >= expected_len:\n if get_day_zero_gt:\n day_zero_gt = cur_gt[cur_gt.dt == min_gt]\n if len(day_zero_gt) == 0 and set_missing_values_to_zero: # pylint:disable=g-explicit-length-test\n day_zero_gt = 0\n else:\n day_zero_gt = day_zero_gt.feature_value.values[0]\n cur_gt = cur_gt[~(cur_gt.dt == min_gt)]\n break\n # Japan deaths are often empty, but we can safely assume that they're 0. So\n # skip the sanity check, and just assume that missing values are 0 later.\n if set_missing_values_to_zero:\n break\n assert len(cur_gt.dt.unique()) >= expected_gt_len\n assert expected_gt_len <= expected_forecast_len\n if set_missing_values_to_zero:\n cur_dates = cur_gt.dt.unique()\n expected_dates = pd.date_range(start=min(pred_xs), end=max(pred_xs))\n missing_dates = expected_dates.difference(cur_dates)\n if missing_dates.size > 0:\n print(f'Found missing dates for {date} {gt_loc}: {missing_dates}')\n cur_gt = cur_gt.append([{\n 'dt': dt.date(),\n 'feature_value': 0.0\n } for dt in missing_dates],\n ignore_index=True)\n assert expected_dates.difference(cur_gt.dt.unique()).size == 0\n assert np.all(\n sorted(cur_gt.dt.unique())[:expected_gt_len] == pred_xs[:expected_gt_len])\n\n cur_gt = cur_gt.sort_values(by='dt')\n gtru_xs = cur_gt.dt.to_numpy()\n gtru_ys = cur_gt.feature_value.to_numpy()\n if len(gtru_ys) < expected_gt_len:\n print(f'Length of gt wrong: {len(gtru_ys)} {loc} {date}')\n return np.nan\n\n if debug:\n print(f'Finish a row: {loc} {date}')\n\n ret = {\n 'dates': gtru_xs,\n 'predictions': pred_ys,\n 'ground_truth': gtru_ys,\n }\n if get_day_zero_gt:\n ret['day_zero_gt'] = day_zero_gt if day_zero_gt else 0.0\n\n return ret\n\n\ndef mean_confidence_interval(data, confidence=0.95, ignore_nan=False):\n a = 1.0 * np.array(data)\n n = len(a)\n if ignore_nan:\n m, se = np.nanmean(a), scipy.stats.sem(a, nan_policy='omit')\n else:\n m, se = np.mean(a), scipy.stats.sem(a)\n h = se * scipy.stats.t.ppf((1 + confidence) / 2., n - 1)\n return m, m - h, m + h\n\n\ndef kaz_locations_to_open_covid_map():\n return {\n 'Hyogo': 'Hyōgo',\n 'Kochi': 'Kōchi',\n 'Oita': 'Ōita',\n }\n\n\ndef open_covid_locations_to_kaz_map():\n return {v: k for (k, v) in kaz_locations_to_open_covid_map().items()}\n\n\ndef state_name_to_code_map():\n \"\"\"State name to code map.\"\"\"\n abbreviation = 'abbreviation'\n name = 'name'\n state_map = [\n {\n abbreviation: 'AL',\n name: 'Alabama'\n },\n {\n abbreviation: 'AK',\n name: 'Alaska'\n },\n {\n abbreviation: 'AZ',\n name: 'Arizona'\n },\n {\n abbreviation: 'AR',\n name: 'Arkansas'\n },\n {\n abbreviation: 'CA',\n name: 'California'\n },\n {\n abbreviation: 'CO',\n name: 'Colorado'\n },\n {\n abbreviation: 'CT',\n name: 'Connecticut'\n },\n {\n abbreviation: 'DE',\n name: 'Delaware'\n },\n {\n abbreviation: 'DC',\n name: 'District Of Columbia'\n },\n {\n abbreviation: 'FL',\n name: 'Florida'\n },\n {\n abbreviation: 'GA',\n name: 'Georgia'\n },\n {\n abbreviation: 'HI',\n name: 'Hawaii'\n },\n {\n abbreviation: 'ID',\n name: 'Idaho'\n },\n {\n abbreviation: 'IL',\n name: 'Illinois'\n },\n {\n abbreviation: 'IN',\n name: 'Indiana'\n },\n {\n abbreviation: 'IA',\n name: 'Iowa'\n },\n {\n abbreviation: 'KS',\n name: 'Kansas'\n },\n {\n abbreviation: 'KY',\n name: 'Kentucky'\n },\n {\n abbreviation: 'LA',\n name: 'Louisiana'\n },\n {\n abbreviation: 'ME',\n name: 'Maine'\n },\n {\n abbreviation: 'MD',\n name: 'Maryland'\n },\n {\n abbreviation: 'MA',\n name: 'Massachusetts'\n },\n {\n abbreviation: 'MI',\n name: 'Michigan'\n },\n {\n abbreviation: 'MN',\n name: 'Minnesota'\n },\n {\n abbreviation: 'MS',\n name: 'Mississippi'\n },\n {\n abbreviation: 'MO',\n name: 'Missouri'\n },\n {\n abbreviation: 'MT',\n name: 'Montana'\n },\n {\n abbreviation: 'NE',\n name: 'Nebraska'\n },\n {\n abbreviation: 'NV',\n name: 'Nevada'\n },\n {\n abbreviation: 'NH',\n name: 'New Hampshire'\n },\n {\n abbreviation: 'NJ',\n name: 'New Jersey'\n },\n {\n abbreviation: 'NM',\n name: 'New Mexico'\n },\n {\n abbreviation: 'NY',\n name: 'New York'\n },\n {\n abbreviation: 'NC',\n name: 'North Carolina'\n },\n {\n abbreviation: 'ND',\n name: 'North Dakota'\n },\n {\n abbreviation: 'OH',\n name: 'Ohio'\n },\n {\n abbreviation: 'OK',\n name: 'Oklahoma'\n },\n {\n abbreviation: 'OR',\n name: 'Oregon'\n },\n {\n abbreviation: 'PA',\n name: 'Pennsylvania'\n },\n {\n abbreviation: 'RI',\n name: 'Rhode Island'\n },\n {\n abbreviation: 'SC',\n name: 'South Carolina'\n },\n {\n abbreviation: 'SD',\n name: 'South Dakota'\n },\n {\n abbreviation: 'TN',\n name: 'Tennessee'\n },\n {\n abbreviation: 'TX',\n name: 'Texas'\n },\n {\n abbreviation: 'UT',\n name: 'Utah'\n },\n {\n abbreviation: 'VT',\n name: 'Vermont'\n },\n {\n abbreviation: 'VA',\n name: 'Virginia'\n },\n {\n abbreviation: 'WA',\n name: 'Washington'\n },\n {\n abbreviation: 'WV',\n name: 'West Virginia'\n },\n {\n abbreviation: 'WI',\n name: 'Wisconsin'\n },\n {\n abbreviation: 'WY',\n name: 'Wyoming'\n },\n ]\n return {d[name]: d[abbreviation] for d in state_map}\n\n\nSTATE_TO_CODE_MAP_ = state_name_to_code_map()\n\n\ndef code_to_state_name_map():\n return {v: k for (k, v) in state_name_to_code_map().items()}\n",
"# coding=utf-8\n# Copyright 2021 The Google Research Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Transformer layers for ReadTwice.\"\"\"\n\nfrom typing import List, Optional, Text, Union\n\nimport tensorflow as tf\n\nfrom readtwice.layers import attention\nfrom readtwice.layers import tensor_utils\nfrom readtwice.layers import wrappers\n\n\nclass TransformerWithSideInputLayers(tf.keras.layers.Layer):\n \"\"\"A sequence of Transformer layers (currently only the encoder side).\n\n The model follows the original Transformer model\n (https://arxiv.org/abs/1706.03762) while allowing additional side inputs.\n These inputs are only used during self-attention computations\n as extra keys and values.\n \"\"\"\n\n def __init__(self,\n hidden_size,\n num_hidden_layers,\n num_attention_heads,\n intermediate_size = None,\n hidden_act=tensor_utils.get_activation('gelu'),\n hidden_dropout_prob = 0.1,\n attention_probs_dropout_prob = 0.1,\n initializer_range = 0.02,\n share_kv_projections = False,\n num_cross_attention_heads = None,\n enable_default_side_input = False,\n name = 'transformer_layers',\n **kwargs):\n \"\"\"Init.\n\n Args:\n hidden_size: Size of the output hidden dimension. Must match the input\n hidden dimension size.\n num_hidden_layers: Number of Transformer layers. Each layer includes both\n an attention sublayer and a feed-forward sublayer.\n num_attention_heads: Number of attention heads. Must evenly divide\n `hidden_size`.\n intermediate_size: The size of the \"intermediate\" (i.e. feed-forward)\n layers. Defaults to 4 * hidden_size.\n hidden_act: The non-linear activation function in the intermediate layers.\n hidden_dropout_prob: The dropout probability for the attention and\n feed-forward residual blocks. Must be between 0.0 and 1.0.\n attention_probs_dropout_prob: Dropout probability for attention\n probabilities. Must be between 0.0 and 1.0.\n initializer_range: The standard deviation of the truncated normal\n initializer for initializing weight matrices not created by\n `linear_make_fn`. If zero, the scale of the truncated normal initializer\n will be tuned automatically according to the distribution of the inputs.\n share_kv_projections: If True, key and value projections will be shared\n between main-to-main and main-to-side components. This results in 1\n key projection per layer instead of 2 (and similarly for value\n projections). Only relevant for fused side attention,\n NOT cross attention over the side input (when num_cross_attention_heads\n is not None).\n num_cross_attention_heads: If it is not None, will add a cross-attention\n layer over side inputs. In this case, side inputs will NOT be used\n in the `FusedSideAttention`. Must be greater or equal than 0, where 0\n means that cross attention layer will have a single attention head\n WITHOUT projection matrices.\n enable_default_side_input: Add a default side input, which acts like a\n no-op attention, effective allowing attention weights to sum up\n to something less than 1.\n Currently, only available for the cross attention over side inputs.\n name: Name of the layer.\n **kwargs: Forwarded to super.\n \"\"\"\n super(TransformerWithSideInputLayers, self).__init__(name=name, **kwargs)\n\n if intermediate_size is None:\n intermediate_size = 4 * hidden_size\n\n if num_cross_attention_heads is not None:\n # This will prevent from allocating extra parameters for\n # fused side attention since side input will not be used there anyway.\n share_kv_projections = True\n\n self.hidden_size = hidden_size\n self.num_hidden_layers = num_hidden_layers\n self.num_attention_heads = num_attention_heads\n self.intermediate_size = intermediate_size\n self.hidden_act = hidden_act\n self.hidden_dropout_prob = hidden_dropout_prob\n self.attention_probs_dropout_prob = attention_probs_dropout_prob\n self.share_kv_projections = share_kv_projections\n self.num_cross_attention_heads = num_cross_attention_heads\n self.enable_default_side_input = enable_default_side_input\n\n if (self.enable_default_side_input and\n self.num_cross_attention_heads is None):\n raise ValueError('`enable_default_side_input` is only used when '\n 'num_cross_attention_heads is enabled.')\n if (self.num_cross_attention_heads is not None and\n self.num_cross_attention_heads < 0):\n raise ValueError('If `num_cross_attention_heads` is specified '\n 'it must be non-negative.')\n\n self.initializer = tf.keras.initializers.TruncatedNormal(\n stddev=initializer_range)\n\n self.attention_layers = []\n self.cross_attention_layers = []\n self.feed_forward_layers = []\n for i in range(num_hidden_layers):\n self.attention_layers.append(\n wrappers.ResidualBlock(\n inner_layer=attention.FusedSideAttention(\n hidden_size=self.hidden_size,\n num_heads=self.num_attention_heads,\n att_dropout_prob=self.attention_probs_dropout_prob,\n share_kv_projections=self.share_kv_projections,\n initializer=self.initializer),\n dropout_probability=self.hidden_dropout_prob,\n use_pre_activation_order=False,\n name='attention_layer_%d' % i))\n\n if self.num_cross_attention_heads is not None:\n self.cross_attention_layers.append(\n wrappers.ResidualBlock(\n inner_layer=attention.SideAttention(\n hidden_size=self.hidden_size,\n num_heads=self.num_cross_attention_heads,\n att_dropout_prob=self.attention_probs_dropout_prob,\n initializer=self.initializer,\n enable_default_side_input=self.enable_default_side_input),\n dropout_probability=self.hidden_dropout_prob,\n use_pre_activation_order=False,\n name='cross_attention_layer_%d' % i))\n\n self.feed_forward_layers.append(\n wrappers.ResidualBlock(\n dropout_probability=self.hidden_dropout_prob,\n use_pre_activation_order=False,\n inner_intermediate_size=self.intermediate_size,\n inner_activation=self.hidden_act,\n inner_kernel_initializer=self.initializer,\n name='feed_forward_layer_%d' % i))\n\n # TODO(urikz): Add some way for the user to access activations from\n # each hidden layer.\n def call(self,\n main_input,\n side_input = None,\n att_mask = None,\n training=None):\n \"\"\"Calls the layer.\n\n Args:\n main_input: <float32>[batch_size, main_seq_len, hidden_size].\n side_input: <float32>[batch_size, side_seq_len, hidden_size] or a list\n of tensors with this shape. The length of the list must be equal to\n `num_hidden_layers`.\n att_mask: <int32>[batch_size, main_seq_len, main_seq_len +\n side_seq_len]. Should have only 0 and 1 values, with 0 for entries\n that should be masked and 1 otherwise. Leave as None to allow all\n elements to attend to all other elements within each example.\n training: For Keras, optional boolean scalar tensor or Python boolean\n indicating whether the call is meant for training or inference.\n\n Returns:\n <float32>[batch_size, main_seq_len, hidden_size].\n\n Raises:\n ValueError if `side_input` is list and the length of `side_input`\n is not equal to `num_hidden_layers`.\n \"\"\"\n output_tensor = main_input\n\n if side_input is not None and not isinstance(side_input, list):\n side_input = [side_input] * self.num_hidden_layers\n else:\n side_input = [None] * self.num_hidden_layers\n\n if len(side_input) != self.num_hidden_layers:\n raise ValueError('Length of side input ({}) is not equal to '\n 'the number of hidden layers ({})'.format(\n len(side_input), self.num_hidden_layers))\n\n if att_mask is not None and self.num_cross_attention_heads is not None:\n main_seq_len = tf.shape(main_input)[1]\n att_mask_attention_layer = att_mask[:, :, :main_seq_len]\n att_mask_cross_attention_layer = att_mask[:, :, main_seq_len:]\n else:\n att_mask_attention_layer = None\n att_mask_cross_attention_layer = None\n\n for i in range(self.num_hidden_layers):\n if self.num_cross_attention_heads is not None:\n output_tensor = self.attention_layers[i](\n output_tensor,\n training=training,\n side_input=None,\n att_mask=att_mask_attention_layer)\n\n output_tensor = self.cross_attention_layers[i](\n output_tensor,\n side_input=side_input[i],\n att_mask=att_mask_cross_attention_layer,\n training=training)\n else:\n output_tensor = self.attention_layers[i](\n output_tensor,\n training=training,\n side_input=side_input[i],\n att_mask=att_mask)\n\n output_tensor = self.feed_forward_layers[i](\n output_tensor, training=training)\n\n return output_tensor\n",
"# coding=utf-8\n# Copyright 2021 The Google Research Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Tensorflow Example input layer.\"\"\"\n\nimport tensorflow as tf\n\nfrom poem.core import common\n\n\ndef add_decoder_image_sizes(instance_shape, common_module=common):\n \"\"\"Adds decoders for image sizes.\n\n Args:\n instance_shape: A list of integers for the shape (layout) of instances for\n each record.\n common_module: A Python module that defines common constants.\n\n Returns:\n A dictionary for decoders.\n \"\"\"\n return {\n common_module.TFE_KEY_IMAGE_HEIGHT:\n tf.io.FixedLenFeature(instance_shape, dtype=tf.int64),\n common_module.TFE_KEY_IMAGE_WIDTH:\n tf.io.FixedLenFeature(instance_shape, dtype=tf.int64),\n }\n\n\ndef add_decoder_keypoints_2d(keypoint_names_2d,\n include_keypoint_scores_2d,\n instance_shape,\n common_module=common):\n \"\"\"Adds decoders for 2D keypoints.\n\n Args:\n keypoint_names_2d: A list of strings for 2D keypoint names.\n include_keypoint_scores_2d: A boolean for whether to include 2D keypoint\n scores.\n instance_shape: A list of integers for the shape (layout) of instances for\n each record.\n common_module: A Python module that defines common constants.\n\n Returns:\n decoders: A dictionary for decoders.\n \"\"\"\n decoders = {}\n\n for name in keypoint_names_2d:\n for key_suffix in common_module.TFE_KEY_SUFFIX_KEYPOINT_2D:\n key = common_module.TFE_KEY_PREFIX_KEYPOINT_2D + name + key_suffix\n decoders[key] = tf.io.FixedLenFeature(instance_shape, dtype=tf.float32)\n if include_keypoint_scores_2d:\n key = (\n common_module.TFE_KEY_PREFIX_KEYPOINT_2D + name +\n common_module.TFE_KEY_SUFFIX_KEYPOINT_SCORE)\n decoders[key] = tf.io.FixedLenFeature(instance_shape, dtype=tf.float32)\n\n return decoders\n\n\ndef add_decoder_keypoints_3d(keypoint_names_3d,\n include_keypoint_scores_3d,\n instance_shape,\n common_module=common):\n \"\"\"Adds decoders for 3D keypoints.\n\n Args:\n keypoint_names_3d: A list of strings for 3D keypoint names.\n include_keypoint_scores_3d: A boolean for whether to include 3D keypoint\n scores.\n instance_shape: A list of integers for the shape (layout) of instances for\n each record.\n common_module: A Python module that defines common constants.\n\n Returns:\n decoders: A dictionary for decoders.\n \"\"\"\n decoders = {}\n\n for name in keypoint_names_3d:\n for key_suffix in common_module.TFE_KEY_SUFFIX_KEYPOINT_3D:\n key = common_module.TFE_KEY_PREFIX_KEYPOINT_3D + name + key_suffix\n decoders[key] = tf.io.FixedLenFeature(instance_shape, dtype=tf.float32)\n if include_keypoint_scores_3d:\n key = (\n common_module.TFE_KEY_PREFIX_KEYPOINT_3D + name +\n common_module.TFE_KEY_SUFFIX_KEYPOINT_SCORE)\n decoders[key] = tf.io.FixedLenFeature(instance_shape, dtype=tf.float32)\n\n return decoders\n\n\ndef add_decoder_features(feature_dim, instance_shape, common_module=common):\n \"\"\"Adds decoder for pre-computed features.\n\n Args:\n feature_dim: An integer for size of feature vectors.\n instance_shape: A list of integers for the shape (layout) of instances for\n each record.\n common_module: A Python module that defines common constants.\n\n Returns:\n A dictionary for decoder.\n \"\"\"\n feature_shape = list(instance_shape) + [feature_dim]\n return {\n common_module.TFE_KEY_FEATURE:\n tf.io.FixedLenFeature(feature_shape, dtype=tf.float32)\n }\n\n\ndef add_decoder_class_labels(common_module=common):\n \"\"\"Adds decoders for class label ids and confidences.\n\n IMPORTANT: Note that we assume there is one copy of label ids and confidences\n in each record and they apply to all the objects in the record.\n\n Args:\n common_module: A Python module that defines common constants.\n\n Returns:\n A dictionary for decoders.\n \"\"\"\n return {\n common_module.TFE_KEY_CLASS_LABEL_ID:\n tf.io.VarLenFeature(dtype=tf.int64),\n common_module.TFE_KEY_CLASS_LABEL_CONFIDENCE:\n tf.io.VarLenFeature(dtype=tf.float32)\n }\n\n\ndef process_decoded_image_sizes(decoded_tensors, common_module=common):\n \"\"\"Processes decoded image sizes.\n\n Args:\n decoded_tensors: A dictionary for decoded tensors.\n common_module: A Python module that defines common constants.\n\n Returns:\n A dictionary for processed 2D keypoint tensors.\n \"\"\"\n image_heights = decoded_tensors[common_module.TFE_KEY_IMAGE_HEIGHT]\n image_widths = decoded_tensors[common_module.TFE_KEY_IMAGE_WIDTH]\n return {\n common_module.KEY_IMAGE_SIZES:\n tf.stack([image_heights, image_widths], axis=-1)\n }\n\n\ndef process_decoded_keypoints_2d(decoded_tensors,\n keypoint_names_2d,\n include_keypoint_scores_2d,\n common_module=common):\n \"\"\"Processes decoded 2D keypoint tensors.\n\n Args:\n decoded_tensors: A dictionary for decoded tensors.\n keypoint_names_2d: A list of strings for 2D keypoint names.\n include_keypoint_scores_2d: A boolean for whether to include 2D keypoint\n scores.\n common_module: A Python module that defines common constants.\n\n Returns:\n outputs: A dictionary for processed 2D keypoint tensors.\n \"\"\"\n outputs = {}\n\n keypoints_2d = []\n for name in keypoint_names_2d:\n sub_keypoints_2d = []\n for key_suffix in common_module.TFE_KEY_SUFFIX_KEYPOINT_2D:\n key = common_module.TFE_KEY_PREFIX_KEYPOINT_2D + name + key_suffix\n sub_keypoints_2d.append(decoded_tensors[key])\n keypoints_2d.append(tf.stack(sub_keypoints_2d, axis=-1))\n outputs[common_module.KEY_KEYPOINTS_2D] = tf.stack(keypoints_2d, axis=-2)\n\n if include_keypoint_scores_2d:\n keypoint_scores_2d = []\n for name in keypoint_names_2d:\n key = (\n common_module.TFE_KEY_PREFIX_KEYPOINT_2D + name +\n common_module.TFE_KEY_SUFFIX_KEYPOINT_SCORE)\n keypoint_scores_2d.append(decoded_tensors[key])\n outputs[common_module.KEY_KEYPOINT_SCORES_2D] = tf.stack(\n keypoint_scores_2d, axis=-1)\n\n return outputs\n\n\ndef process_decoded_keypoints_3d(decoded_tensors,\n keypoint_names_3d,\n include_keypoint_scores_3d,\n common_module=common):\n \"\"\"Processes decoded 3D keypoint tensors.\n\n Args:\n decoded_tensors: A dictionary for decoded tensors.\n keypoint_names_3d: A list of strings for 3D keypoint names.\n include_keypoint_scores_3d: A boolean for whether to include 2D keypoint\n scores.\n common_module: A Python module that defines common constants.\n\n Returns:\n outputs: A dictionary for processed 2D keypoint tensors.\n \"\"\"\n outputs = {}\n\n keypoints_3d = []\n for name in keypoint_names_3d:\n sub_keypoints_3d = []\n for key_suffix in common_module.TFE_KEY_SUFFIX_KEYPOINT_3D:\n key = common_module.TFE_KEY_PREFIX_KEYPOINT_3D + name + key_suffix\n sub_keypoints_3d.append(decoded_tensors[key])\n keypoints_3d.append(tf.stack(sub_keypoints_3d, axis=-1))\n outputs[common_module.KEY_KEYPOINTS_3D] = tf.stack(keypoints_3d, axis=-2)\n\n if include_keypoint_scores_3d:\n keypoint_scores_3d = []\n for name in keypoint_names_3d:\n key = (\n common_module.TFE_KEY_PREFIX_KEYPOINT_3D + name +\n common_module.TFE_KEY_SUFFIX_KEYPOINT_SCORE)\n keypoint_scores_3d.append(decoded_tensors[key])\n outputs[common_module.KEY_KEYPOINT_SCORES_3D] = tf.stack(\n keypoint_scores_3d, axis=-1)\n\n return outputs\n\n\ndef process_decoded_features(decoded_tensors, common_module=common):\n \"\"\"Processes decoded features.\n\n Args:\n decoded_tensors: A dictionary for decoded tensors.\n common_module: A Python module that defines common constants.\n\n Returns:\n A dictionary for processed 2D keypoint tensors.\n \"\"\"\n return {\n common_module.KEY_FEATURES: decoded_tensors[common_module.TFE_KEY_FEATURE]\n }\n\n\ndef generate_class_targets(label_ids,\n label_confidences,\n num_classes,\n positive_label_confidence_threshold=0.5):\n \"\"\"Generates class targets and weights from label ids and confidences.\n\n Note that we use `class_targets` to represent if a label is positive or\n negative, and use `class_weights` to represent if a label exists in the input\n or not.\n\n Example usage:\n num_classes = 5\n label_ids = [0, 1, 3]\n label_confidences = [0.9, 0.3, 0.7]\n positive_label_confidence_threshold = 0.5\n -->\n class_targets = [1, 0, 0, 1, 0]\n class_weights = [1.0, 1.0, 0.0, 1.0, 0.0]\n\n Args:\n label_ids: A tensor for label ids. Shape = [num_classes].\n label_confidences: A tensor for label confidences. Shape = [num_classes].\n num_classes: An integer for total number of classes.\n positive_label_confidence_threshold: A float for the threshold to determine\n class target for label ids. If the confidence of a label id is greater\n than this value, it has positive class target (1), otherwise negative\n target (0).\n\n Returns:\n class_targets: A int64 tensor for class targets. Shape = [num_classes].\n class_weights: A float32 tensor for class weights. Shape = [num_classes].\n\n Raises:\n ValueError: If `label_ids` or `label_confidences` is not 1D tensor.\n \"\"\"\n if len(label_ids.shape.as_list()) != 1:\n raise ValueError('Label id tensor must be 1D: %d.' %\n len(label_ids.shape.as_list()))\n if len(label_confidences.shape.as_list()) != 1:\n raise ValueError('Label confidence tensor must be 1D: %d.' %\n len(label_confidences.shape.as_list()))\n\n if isinstance(label_ids, tf.SparseTensor):\n label_ids = tf.sparse.to_dense(label_ids)\n if isinstance(label_confidences, tf.SparseTensor):\n label_confidences = tf.sparse.to_dense(label_confidences)\n positive_label_id_masks = tf.math.greater(\n label_confidences, positive_label_confidence_threshold)\n positive_label_ids = tf.boolean_mask(label_ids, mask=positive_label_id_masks)\n class_targets = tf.math.reduce_sum(\n tf.one_hot(positive_label_ids, num_classes, dtype=tf.int64), axis=0)\n class_weights = tf.math.reduce_sum(\n tf.one_hot(label_ids, num_classes, dtype=tf.float32), axis=0)\n return class_targets, class_weights\n\n\ndef process_class_labels(decoded_tensors,\n num_classes,\n num_objects,\n common_module=common):\n \"\"\"Processes decoded class labels and confidences into targets and weights.\n\n IMPORTANT: Note that we assume there is one copy of label ids and confidences\n in each record and they apply to all the objects in the record.\n\n Args:\n decoded_tensors: A dictionary for decoded tensors.\n num_classes: An integer for total number of classification label classes to\n read labels for.\n num_objects: An integer for the number of objects each example has.\n common_module: A Python module that defines common constants.\n\n Returns:\n outputs: A dictionary for processed 2D keypoint tensors.\n \"\"\"\n class_targets, class_weights = (\n generate_class_targets(\n decoded_tensors[common_module.TFE_KEY_CLASS_LABEL_ID],\n decoded_tensors[common_module.TFE_KEY_CLASS_LABEL_CONFIDENCE],\n num_classes=num_classes))\n\n # Stack the same class targets and weights for multiple objects.\n class_targets = tf.stack([class_targets for i in range(num_objects)], axis=0)\n class_weights = tf.stack([class_weights for i in range(num_objects)], axis=0)\n\n outputs = {\n common_module.KEY_CLASS_TARGETS: class_targets,\n common_module.KEY_CLASS_WEIGHTS: class_weights,\n }\n return outputs\n\n\ndef get_tfe_parser_fn(decoders, post_process_fn):\n \"\"\"Creates a tf.Example parser function.\n\n Args:\n decoders: A dictionary for keyed tf.Example field decoders.\n post_process_fn: A function handle for postprocessing decoded tensors.\n\n Returns:\n parser_fn: A function handle for the parser function.\n \"\"\"\n\n def parser_fn(*inputs):\n \"\"\"Decoder function.\"\"\"\n # Here `inputs` can be either just a serialized example or a (key,\n # serialized example) tuple (in which we ignore the key), and we would like\n # to handle both cases.\n serialized_example = inputs[-1]\n decoded_tensors = tf.io.parse_single_example(serialized_example, decoders)\n return post_process_fn(decoded_tensors)\n\n return parser_fn\n\n\ndef create_tfe_parser(keypoint_names_2d=None,\n keypoint_names_3d=None,\n include_keypoint_scores_2d=True,\n include_keypoint_scores_3d=False,\n feature_dim=None,\n num_classes=None,\n num_objects=1,\n sequence_length=None,\n common_module=common):\n \"\"\"Creates tf.Example parser function.\n\n IMPORTANT: Currently only supports all objects having the same class label\n information, and the class label related fields in tf.Examples are expected to\n only have values for one object.\n\n Args:\n keypoint_names_2d: A list of strings for 2D keypoint names. Use None to skip\n reading 2D keypoints.\n keypoint_names_3d: A list of strings for 3D keypoint names. Use None to skip\n reading 3D keypoints.\n include_keypoint_scores_2d: A boolean for whether to read 2D keypoint\n scores. Only used if `keypoint_names_2d` is specified.\n include_keypoint_scores_3d: A boolean for whether to read 3D keypoint\n scores. Only used if `keypoint_names_3d` is specified.\n feature_dim: An integer for size of pre-computed feature vectors. Only reads\n features if specified.\n num_classes: An integer for the number of classification label classes to\n read labels for. Only reads labels if specified.\n num_objects: An integer for the number of objects each example has.\n sequence_length: An integer for the length of sequence per object each\n example has. Skips adding the sequence dimension if None.\n common_module: A Python module that defines common constants.\n\n Returns:\n parser_fn: A function handle for the parser.\n \"\"\"\n instance_shape = ([num_objects] if sequence_length is None else\n [num_objects, sequence_length])\n\n decoders = add_decoder_image_sizes(\n instance_shape=instance_shape, common_module=common_module)\n\n if keypoint_names_2d:\n decoders.update(\n add_decoder_keypoints_2d(\n keypoint_names_2d,\n include_keypoint_scores_2d=include_keypoint_scores_2d,\n instance_shape=instance_shape,\n common_module=common_module))\n\n if keypoint_names_3d:\n decoders.update(\n add_decoder_keypoints_3d(\n keypoint_names_3d,\n include_keypoint_scores_3d=include_keypoint_scores_3d,\n instance_shape=instance_shape,\n common_module=common_module))\n\n if feature_dim:\n decoders.update(\n add_decoder_features(\n feature_dim=feature_dim,\n instance_shape=instance_shape,\n common_module=common_module))\n\n if num_classes:\n decoders.update(add_decoder_class_labels(common_module=common_module))\n\n def post_process_decoded_tensors(decoded_tensors):\n \"\"\"Postprocesses decoded tensors.\"\"\"\n outputs = process_decoded_image_sizes(decoded_tensors, common_module)\n\n if keypoint_names_2d:\n outputs.update(\n process_decoded_keypoints_2d(\n decoded_tensors,\n keypoint_names_2d=keypoint_names_2d,\n include_keypoint_scores_2d=include_keypoint_scores_2d,\n common_module=common_module))\n\n if keypoint_names_3d:\n outputs.update(\n process_decoded_keypoints_3d(\n decoded_tensors,\n keypoint_names_3d=keypoint_names_3d,\n include_keypoint_scores_3d=include_keypoint_scores_3d,\n common_module=common_module))\n\n if feature_dim:\n outputs.update(\n process_decoded_features(\n decoded_tensors, common_module=common_module))\n\n if num_classes:\n outputs.update(\n process_class_labels(\n decoded_tensors,\n num_classes=num_classes,\n num_objects=num_objects,\n common_module=common_module))\n\n return outputs\n\n return get_tfe_parser_fn(decoders, post_process_decoded_tensors)\n\n\ndef read_from_table(table_pattern,\n shuffle=True,\n num_epochs=None,\n shuffle_buffer_size=4096,\n num_shards=1,\n shard_index=None,\n dataset_class=tf.data.TFRecordDataset,\n parser_fn=None,\n seed=None):\n \"\"\"Reads tf.Examples from input table.\n\n Args:\n table_pattern: Path or pattern to input tables.\n shuffle: A boolean for whether to shuffle the common queue when reading.\n num_epochs: An integer for the number of epochs to read. Use None to read\n indefinitely.\n shuffle_buffer_size: An integer for the buffer size used for shuffling. A\n large buffer size benefits shuffling quality.\n num_shards: An integer for the number of shards to divide the dataset. This\n is useful to distributed training. See `tf.data.Dataset.shard` for\n details.\n shard_index: An integer for the shard index to use. This is useful to\n distributed training, and should usually be set to the id of a\n synchronized worker. See `tf.data.Dataset.shard` for details. Note this\n must be specified if `num_shards` is greater than 1.\n dataset_class: A dataset class to use. Must match input table type.\n parser_fn: A function handle for parser function.\n seed: An integer for random seed.\n\n Returns:\n A dictionary of parsed input tensors.\n\n Raises:\n ValueError: If `num_shards` is greater than 1 but `shard_index` is not\n specified.\n \"\"\"\n dataset = tf.data.Dataset.list_files(\n table_pattern, shuffle=shuffle, seed=seed)\n dataset = dataset.interleave(\n dataset_class, cycle_length=tf.data.experimental.AUTOTUNE)\n dataset = dataset.repeat(num_epochs)\n\n if num_shards > 1:\n if shard_index is None:\n raise ValueError('Shard index is not specified: `%s.`' % str(shard_index))\n dataset = dataset.shard(num_shards, index=shard_index)\n\n if shuffle:\n dataset = dataset.shuffle(shuffle_buffer_size, seed=seed)\n\n dataset = dataset.map(\n parser_fn, num_parallel_calls=tf.data.experimental.AUTOTUNE)\n dataset = dataset.prefetch(tf.data.experimental.AUTOTUNE)\n\n return dataset\n\n\ndef read_batch_from_tables(table_patterns,\n batch_sizes,\n drop_remainder,\n seed=None,\n **reader_kwargs):\n \"\"\"Reads and batches inputs from tf.Example tables.\n\n Args:\n table_patterns: A list of strings for the paths or pattern to input tables.\n batch_sizes: A list of integers for the batch sizes to read from each table.\n drop_remainder: A boolean for whether to drop remaining elements that cannot\n make up a full batch at the end of an epoch. Usually set to True for\n evaluation.\n seed: An integer for random seed.\n **reader_kwargs: A dictionary of additional arguments passed to\n `read_from_table`.\n\n Returns:\n A dictionary of parsed input tensors.\n\n Raises:\n ValueError: If the size of `table_patterns` is different than that of\n `nums_samples`.\n ValueError: If the size of `table_patterns` is different than that of\n `batch_sizes`.\n \"\"\"\n if not table_patterns:\n raise ValueError('No table pattern is provided.')\n\n if len(table_patterns) != len(batch_sizes):\n raise ValueError(\n 'Number of table patterns is different than that of batch sizes: %d vs.'\n ' %d.' % (len(table_patterns), len(batch_sizes)))\n\n if len(table_patterns) == 1:\n dataset = read_from_table(table_patterns[0], seed=seed, **reader_kwargs)\n else:\n datasets = [\n read_from_table(table_pattern, seed=seed, **reader_kwargs)\n for table_pattern in table_patterns\n ]\n dataset = tf.data.experimental.sample_from_datasets(\n datasets, weights=[float(x) for x in batch_sizes], seed=seed)\n return dataset.batch(sum(batch_sizes), drop_remainder=drop_remainder)\n",
"# coding=utf-8\n# Copyright 2021 The Google Research Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Tests for smu_utils_lib.\"\"\"\n\nimport copy\nimport os\nimport tempfile\n\nfrom absl.testing import absltest\nfrom absl.testing import parameterized\nimport numpy as np\nimport pandas as pd\nfrom rdkit import Chem\n\nfrom google.protobuf import text_format\nfrom smu import dataset_pb2\nfrom smu.parser import smu_parser_lib\nfrom smu.parser import smu_utils_lib\n\nMAIN_DAT_FILE = 'x07_sample.dat'\nSTAGE1_DAT_FILE = 'x07_stage1.dat'\nTESTDATA_PATH = os.path.join(os.path.dirname(os.path.abspath(__file__)),\n 'testdata')\n\n\ndef str_to_bond_topology(s):\n bt = dataset_pb2.BondTopology()\n text_format.Parse(s, bt)\n return bt\n\n\ndef get_stage1_conformer():\n parser = smu_parser_lib.SmuParser(\n os.path.join(TESTDATA_PATH, STAGE1_DAT_FILE))\n conformer, _ = next(parser.process_stage1())\n return conformer\n\n\ndef get_stage2_conformer():\n parser = smu_parser_lib.SmuParser(os.path.join(TESTDATA_PATH, MAIN_DAT_FILE))\n conformer, _ = next(parser.process_stage2())\n return conformer\n\n\nclass SpecialIDTest(absltest.TestCase):\n\n def test_from_dat_id(self):\n self.assertIsNone(\n smu_utils_lib.special_case_bt_id_from_dat_id(123456, 'CC'))\n self.assertEqual(smu_utils_lib.special_case_bt_id_from_dat_id(999998, 'O'),\n 899650)\n self.assertEqual(smu_utils_lib.special_case_bt_id_from_dat_id(0, 'O'),\n 899650)\n with self.assertRaises(ValueError):\n smu_utils_lib.special_case_bt_id_from_dat_id(0, 'NotASpecialCaseSmiles')\n\n def test_from_bt_id(self):\n self.assertIsNone(smu_utils_lib.special_case_dat_id_from_bt_id(123456))\n self.assertEqual(\n smu_utils_lib.special_case_dat_id_from_bt_id(899651), 999997)\n\n\nclass GetCompositionTest(absltest.TestCase):\n\n def test_simple(self):\n bt = dataset_pb2.BondTopology()\n bt.atoms.extend([dataset_pb2.BondTopology.ATOM_C,\n dataset_pb2.BondTopology.ATOM_C,\n dataset_pb2.BondTopology.ATOM_N,\n dataset_pb2.BondTopology.ATOM_H,\n dataset_pb2.BondTopology.ATOM_H,\n dataset_pb2.BondTopology.ATOM_H])\n self.assertEqual('x03_c2nh3', smu_utils_lib.get_composition(bt))\n\n\nclass GetCanonicalStoichiometryWithHydrogensTest(absltest.TestCase):\n\n def test_cyclobutane(self):\n bt = smu_utils_lib.create_bond_topology('CCCC', '110011', '2222')\n self.assertEqual(\n smu_utils_lib.get_canonical_stoichiometry_with_hydrogens(bt), '(ch2)4')\n\n def test_ethylene(self):\n bt = smu_utils_lib.create_bond_topology('CC', '2', '22')\n self.assertEqual(\n smu_utils_lib.get_canonical_stoichiometry_with_hydrogens(bt), '(ch2)2')\n\n def test_acrylic_acid(self):\n bt = smu_utils_lib.create_bond_topology('CCCOO', '2000100210', '21001')\n self.assertEqual(\n smu_utils_lib.get_canonical_stoichiometry_with_hydrogens(bt),\n '(c)(ch)(ch2)(o)(oh)')\n\n def test_fluorine(self):\n bt = smu_utils_lib.create_bond_topology('OFF', '110', '000')\n self.assertEqual(\n smu_utils_lib.get_canonical_stoichiometry_with_hydrogens(bt), '(o)(f)2')\n\n def test_fully_saturated(self):\n self.assertEqual(\n smu_utils_lib.get_canonical_stoichiometry_with_hydrogens(\n smu_utils_lib.create_bond_topology('C', '', '4')), '(ch4)')\n self.assertEqual(\n smu_utils_lib.get_canonical_stoichiometry_with_hydrogens(\n smu_utils_lib.create_bond_topology('N', '', '3')), '(nh3)')\n self.assertEqual(\n smu_utils_lib.get_canonical_stoichiometry_with_hydrogens(\n smu_utils_lib.create_bond_topology('O', '', '2')), '(oh2)')\n self.assertEqual(\n smu_utils_lib.get_canonical_stoichiometry_with_hydrogens(\n smu_utils_lib.create_bond_topology('F', '', '1')), '(fh)')\n\n def test_nplus_oneg(self):\n bt = smu_utils_lib.create_bond_topology('NO', '1', '30')\n self.assertEqual(\n smu_utils_lib.get_canonical_stoichiometry_with_hydrogens(bt),\n '(nh3)(o)')\n\n\nclass ParseBondTopologyTest(absltest.TestCase):\n\n def test_4_heavy(self):\n num_atoms, atoms_str, matrix, hydrogens = smu_utils_lib.parse_bond_topology_line(\n ' 4 N+O O O- 010110 3000')\n self.assertEqual(num_atoms, 4)\n self.assertEqual(atoms_str, 'N+O O O-')\n self.assertEqual(matrix, '010110')\n self.assertEqual(hydrogens, '3000')\n\n def test_7_heavy(self):\n num_atoms, atoms_str, matrix, hydrogens = smu_utils_lib.parse_bond_topology_line(\n ' 7 N+O O O O-F F 001011101001000000000 1000000')\n self.assertEqual(num_atoms, 7)\n self.assertEqual(atoms_str, 'N+O O O O-F F ') # Note the trailing space\n self.assertEqual(matrix, '001011101001000000000')\n self.assertEqual(hydrogens, '1000000')\n\n\nclass CreateBondTopologyTest(absltest.TestCase):\n\n def test_no_charged(self):\n got = smu_utils_lib.create_bond_topology('CNFF', '111000', '1200')\n expected_str = '''\natoms: ATOM_C\natoms: ATOM_N\natoms: ATOM_F\natoms: ATOM_F\natoms: ATOM_H\natoms: ATOM_H\natoms: ATOM_H\nbonds {\n atom_b: 1\n bond_type: BOND_SINGLE\n}\nbonds {\n atom_b: 2\n bond_type: BOND_SINGLE\n}\nbonds {\n atom_b: 3\n bond_type: BOND_SINGLE\n}\nbonds {\n atom_b: 4\n bond_type: BOND_SINGLE\n}\nbonds {\n atom_a: 1\n atom_b: 5\n bond_type: BOND_SINGLE\n}\nbonds {\n atom_a: 1\n atom_b: 6\n bond_type: BOND_SINGLE\n}\n'''\n expected = str_to_bond_topology(expected_str)\n self.assertEqual(str(expected), str(got))\n\n def test_charged(self):\n # This is actually C N N+O-\n got = smu_utils_lib.create_bond_topology('CNNO', '200101', '2020')\n expected_str = '''\natoms: ATOM_C\natoms: ATOM_N\natoms: ATOM_NPOS\natoms: ATOM_ONEG\natoms: ATOM_H\natoms: ATOM_H\natoms: ATOM_H\natoms: ATOM_H\nbonds {\n atom_b: 1\n bond_type: BOND_DOUBLE\n}\nbonds {\n atom_a: 1\n atom_b: 2\n bond_type: BOND_SINGLE\n}\nbonds {\n atom_a: 2\n atom_b: 3\n bond_type: BOND_SINGLE\n}\nbonds {\n atom_b: 4\n bond_type: BOND_SINGLE\n}\nbonds {\n atom_b: 5\n bond_type: BOND_SINGLE\n}\nbonds {\n atom_a: 2\n atom_b: 6\n bond_type: BOND_SINGLE\n}\nbonds {\n atom_a: 2\n atom_b: 7\n bond_type: BOND_SINGLE\n}\n'''\n expected = str_to_bond_topology(expected_str)\n self.assertEqual(str(expected), str(got))\n\n def test_one_heavy(self):\n got = smu_utils_lib.create_bond_topology('C', '', '4')\n expected_str = '''\natoms: ATOM_C\natoms: ATOM_H\natoms: ATOM_H\natoms: ATOM_H\natoms: ATOM_H\nbonds {\n atom_b: 1\n bond_type: BOND_SINGLE\n}\nbonds {\n atom_b: 2\n bond_type: BOND_SINGLE\n}\nbonds {\n atom_b: 3\n bond_type: BOND_SINGLE\n}\nbonds {\n atom_b: 4\n bond_type: BOND_SINGLE\n}\n'''\n expected = str_to_bond_topology(expected_str)\n self.assertEqual(str(expected), str(got))\n\n\nclass FromCSVTest(absltest.TestCase):\n\n def test_basic(self):\n infile = tempfile.NamedTemporaryFile(mode='w', delete=False)\n infile.write(\n 'id,num_atoms,atoms_str,connectivity_matrix,hydrogens,smiles\\n')\n infile.write('68,3,C N+O-,310,010,[NH+]#C[O-]\\n')\n infile.write('134,4,N+O-F F ,111000,1000,[O-][NH+](F)F\\n')\n infile.close()\n\n out = smu_utils_lib.generate_bond_topologies_from_csv(infile.name)\n\n bt = next(out)\n self.assertEqual(68, bt.bond_topology_id)\n self.assertLen(bt.atoms, 4)\n self.assertEqual(bt.smiles, '[NH+]#C[O-]')\n\n bt = next(out)\n self.assertEqual(134, bt.bond_topology_id)\n self.assertLen(bt.atoms, 5)\n self.assertEqual(bt.smiles, '[O-][NH+](F)F')\n\n\nclass ParseDuplicatesFileTest(absltest.TestCase):\n\n def test_basic(self):\n df = smu_utils_lib.parse_duplicates_file(\n os.path.join(TESTDATA_PATH, 'small.equivalent_isomers.dat'))\n pd.testing.assert_frame_equal(\n pd.DataFrame(\n columns=['name1', 'stoich1', 'btid1', 'shortconfid1', 'confid1',\n 'name2', 'stoich2', 'btid2', 'shortconfid2', 'confid2'],\n data=[\n ['x07_c2n2o2fh3.224227.004',\n 'c2n2o2fh3', 224227, 4, 224227004,\n 'x07_c2n2o2fh3.224176.005',\n 'c2n2o2fh3', 224176, 5, 224176005],\n ['x07_c2n2o2fh3.260543.005',\n 'c2n2o2fh3', 260543, 5, 260543005,\n 'x07_c2n2o2fh3.224050.001',\n 'c2n2o2fh3', 224050, 1, 224050001],\n ]),\n df,\n check_like=True)\n\n\nclass BondTopologyToMoleculeTest(absltest.TestCase):\n\n def test_o2(self):\n bond_topology = str_to_bond_topology('''\natoms: ATOM_O\natoms: ATOM_O\nbonds {\n atom_b: 1\n bond_type: BOND_DOUBLE\n}\n''')\n got = smu_utils_lib.bond_topology_to_molecule(bond_topology)\n self.assertEqual('O=O', Chem.MolToSmiles(got))\n\n def test_methane(self):\n bond_topology = str_to_bond_topology('''\natoms: ATOM_C\natoms: ATOM_H\natoms: ATOM_H\natoms: ATOM_H\natoms: ATOM_H\nbonds {\n atom_b: 1\n bond_type: BOND_SINGLE\n}\nbonds {\n atom_b: 2\n bond_type: BOND_SINGLE\n}\nbonds {\n atom_b: 3\n bond_type: BOND_SINGLE\n}\nbonds {\n atom_b: 4\n bond_type: BOND_SINGLE\n}\n''')\n got = smu_utils_lib.bond_topology_to_molecule(bond_topology)\n self.assertEqual('[H]C([H])([H])[H]', Chem.MolToSmiles(got))\n\n # This molecule is an N+ central atom, bonded to C (triply), O-, and F\n def test_charged_molecule(self):\n bond_topology = str_to_bond_topology('''\natoms: ATOM_C\natoms: ATOM_NPOS\natoms: ATOM_ONEG\natoms: ATOM_F\nbonds {\n atom_b: 1\n bond_type: BOND_TRIPLE\n}\nbonds {\n atom_a: 1\n atom_b: 2\n bond_type: BOND_SINGLE\n}\nbonds {\n atom_a: 1\n atom_b: 3\n bond_type: BOND_SINGLE\n}\n''')\n got = smu_utils_lib.bond_topology_to_molecule(bond_topology)\n self.assertEqual('C#[N+]([O-])F', Chem.MolToSmiles(got))\n\n\nclass ConformerToMoleculeTest(absltest.TestCase):\n\n def setUp(self):\n super().setUp()\n self.conformer = get_stage2_conformer()\n\n # We'll make a new initial_geometry which is just the current one with all\n # coordinates multiplied by 1000\n self.conformer.initial_geometries.append(\n self.conformer.initial_geometries[0])\n new_geom = self.conformer.initial_geometries[1]\n for atom_pos in new_geom.atom_positions:\n atom_pos.x = atom_pos.x * 1000\n atom_pos.y = atom_pos.y * 1000\n atom_pos.z = atom_pos.z * 1000\n\n # For the extra bond_topology, we'll just copy the existing one and change\n # the id. Through the dumb luck of the molecule we picked there's not a\n # simple way to make this a new bond topology and still have it look valid\n # to RDKit\n self.conformer.bond_topologies.append(self.conformer.bond_topologies[0])\n self.conformer.bond_topologies[1].bond_topology_id = 99999\n\n def test_all_outputs(self):\n mols = list(smu_utils_lib.conformer_to_molecules(self.conformer))\n self.assertLen(mols, 6) # 2 bond topologies * (1 opt geom + 2 init_geom)\n self.assertEqual([m.GetProp('_Name') for m in mols], [\n 'SMU 618451001 bt=618451(0/2) geom=init(0/2)',\n 'SMU 618451001 bt=618451(0/2) geom=init(1/2)',\n 'SMU 618451001 bt=618451(0/2) geom=opt',\n 'SMU 618451001 bt=99999(1/2) geom=init(0/2)',\n 'SMU 618451001 bt=99999(1/2) geom=init(1/2)',\n 'SMU 618451001 bt=99999(1/2) geom=opt'\n ])\n self.assertEqual(\n '[H]C(F)=C(OC([H])([H])[H])OC([H])([H])[H]',\n Chem.MolToSmiles(mols[0], kekuleSmiles=True, isomericSmiles=False))\n self.assertEqual(\n '[H]C(F)=C(OC([H])([H])[H])OC([H])([H])[H]',\n Chem.MolToSmiles(mols[4], kekuleSmiles=True, isomericSmiles=False))\n\n def test_initial_only(self):\n mols = list(\n smu_utils_lib.conformer_to_molecules(\n self.conformer,\n include_initial_geometries=True,\n include_optimized_geometry=False,\n include_all_bond_topologies=False))\n self.assertLen(mols, 2)\n self.assertEqual([m.GetProp('_Name') for m in mols], [\n 'SMU 618451001 bt=618451(0/2) geom=init(0/2)',\n 'SMU 618451001 bt=618451(0/2) geom=init(1/2)',\n ])\n # This is just one random atom I picked from the .dat file and converted to\n # angstroms instead of bohr.\n self.assertEqual('C', mols[0].GetAtomWithIdx(1).GetSymbol())\n np.testing.assert_allclose([0.6643, -3.470301, 3.4766],\n list(mols[0].GetConformer().GetAtomPosition(1)),\n atol=1e-6)\n\n self.assertEqual('C', mols[1].GetAtomWithIdx(1).GetSymbol())\n np.testing.assert_allclose([664.299998, -3470.300473, 3476.600215],\n list(mols[1].GetConformer().GetAtomPosition(1)),\n atol=1e-6)\n\n def test_optimized_only(self):\n mols = list(\n smu_utils_lib.conformer_to_molecules(\n self.conformer,\n include_initial_geometries=False,\n include_optimized_geometry=True,\n include_all_bond_topologies=False))\n self.assertLen(mols, 1)\n self.assertEqual(\n mols[0].GetProp('_Name'),\n 'SMU 618451001 bt=618451(0/2) geom=opt',\n )\n self.assertEqual(\n '[H]C(F)=C(OC([H])([H])[H])OC([H])([H])[H]',\n Chem.MolToSmiles(mols[0], kekuleSmiles=True, isomericSmiles=False))\n # This is just two random atoms I picked from the .dat file and converted to\n # angstroms instead of bohr.\n self.assertEqual('C', mols[0].GetAtomWithIdx(1).GetSymbol())\n np.testing.assert_allclose([0.540254, -3.465543, 3.456982],\n list(mols[0].GetConformer().GetAtomPosition(1)),\n atol=1e-6)\n self.assertEqual('H', mols[0].GetAtomWithIdx(13).GetSymbol())\n np.testing.assert_allclose([2.135153, -1.817366, 0.226376],\n list(mols[0].GetConformer().GetAtomPosition(13)),\n atol=1e-6)\n\n\nclass SmilesCompareTest(absltest.TestCase):\n\n def test_string_format(self):\n # for some simplicity later on, we use shorter names\n self.assertEqual('MISSING', str(smu_utils_lib.SmilesCompareResult.MISSING))\n self.assertEqual('MISMATCH',\n str(smu_utils_lib.SmilesCompareResult.MISMATCH))\n self.assertEqual('MATCH', str(smu_utils_lib.SmilesCompareResult.MATCH))\n\n def test_missing(self):\n bond_topology = str_to_bond_topology('''\natoms: ATOM_O\natoms: ATOM_O\nbonds {\n atom_b: 1\n bond_type: BOND_DOUBLE\n}\n''')\n result, with_h, without_h = smu_utils_lib.bond_topology_smiles_comparison(\n bond_topology)\n self.assertEqual(smu_utils_lib.SmilesCompareResult.MISSING, result)\n self.assertEqual('O=O', with_h)\n self.assertEqual('O=O', without_h)\n\n # Also directly test compute_smiles_for_bond_topology\n self.assertEqual(\n 'O=O',\n smu_utils_lib.compute_smiles_for_bond_topology(\n bond_topology, include_hs=True))\n\n def test_mismatch(self):\n bond_topology = str_to_bond_topology('''\natoms: ATOM_O\natoms: ATOM_O\nbonds {\n atom_b: 1\n bond_type: BOND_DOUBLE\n}\nsmiles: \"BlahBlahBlah\"\n''')\n result, with_h, without_h = smu_utils_lib.bond_topology_smiles_comparison(\n bond_topology)\n self.assertEqual(smu_utils_lib.SmilesCompareResult.MISMATCH, result)\n self.assertEqual('O=O', with_h)\n self.assertEqual('O=O', without_h)\n\n def test_matched_and_h_stripping(self):\n bond_topology = str_to_bond_topology('''\natoms: ATOM_O\natoms: ATOM_H\natoms: ATOM_H\nbonds {\n atom_b: 1\n bond_type: BOND_SINGLE\n}\nbonds {\n atom_b: 2\n bond_type: BOND_SINGLE\n}\nsmiles: \"O\"\n''')\n result, with_h, without_h = smu_utils_lib.bond_topology_smiles_comparison(\n bond_topology)\n self.assertEqual(smu_utils_lib.SmilesCompareResult.MATCH, result)\n self.assertEqual('[H]O[H]', with_h)\n self.assertEqual('O', without_h)\n\n # Also directly test compute_smiles_for_bond_topology\n self.assertEqual(\n '[H]O[H]',\n smu_utils_lib.compute_smiles_for_bond_topology(\n bond_topology, include_hs=True))\n self.assertEqual(\n 'O',\n smu_utils_lib.compute_smiles_for_bond_topology(\n bond_topology, include_hs=False))\n\n def test_compute_smiles_from_molecule_no_hs(self):\n mol = Chem.MolFromSmiles('FOC', sanitize=False)\n self.assertEqual(\n smu_utils_lib.compute_smiles_for_molecule(mol, include_hs=False), 'COF')\n # This is expected. Even with include_hs=True, if there were no Hs in the\n # molecule, they will not be in the smiles.\n self.assertEqual(\n smu_utils_lib.compute_smiles_for_molecule(mol, include_hs=True), 'COF')\n\n def test_compute_smiles_from_molecule_with_hs(self):\n mol = Chem.MolFromSmiles('FOC', sanitize=False)\n Chem.SanitizeMol(mol, Chem.rdmolops.SanitizeFlags.SANITIZE_ADJUSTHS)\n mol = Chem.AddHs(mol)\n self.assertEqual(\n smu_utils_lib.compute_smiles_for_molecule(mol, include_hs=False), 'COF')\n self.assertEqual(\n smu_utils_lib.compute_smiles_for_molecule(mol, include_hs=True),\n '[H]C([H])([H])OF')\n\n def test_compute_smiles_from_molecule_special_case(self):\n mol = Chem.MolFromSmiles('C12=C3C4=C1C4=C23', sanitize=False)\n # Double check that this really is the special case -- we get back the\n # SMILES we put in even though it's not the one we want.\n self.assertEqual('C12=C3C4=C1C4=C23',\n Chem.MolToSmiles(mol, kekuleSmiles=True))\n self.assertEqual(\n smu_utils_lib.compute_smiles_for_molecule(mol, include_hs=False),\n 'C12=C3C1=C1C2=C31')\n\n def test_compute_smiles_from_molecule_labeled_with_h(self):\n mol = Chem.MolFromSmiles(\n '[O-][N+]([H])([H])N([H])OC([H])([H])F', sanitize=False)\n self.assertIsNotNone(mol)\n self.assertEqual(\n '[O-][N+:1]([H:2])([H:3])[N:4]([H:5])[O:6][C:7]([H:8])([H:9])[F:10]',\n smu_utils_lib.compute_smiles_for_molecule(\n mol, include_hs=True, labeled_atoms=True))\n\n def test_compute_smiles_from_molecule_labeled_no_h(self):\n mol = Chem.MolFromSmiles(\n '[O-][N+]([H])([H])N([H])OC([H])([H])F', sanitize=False)\n self.assertIsNotNone(mol)\n self.assertEqual(\n '[O-][NH2+:1][NH:2][O:3][CH2:4][F:5]',\n smu_utils_lib.compute_smiles_for_molecule(\n mol, include_hs=False, labeled_atoms=True))\n\n\nclass MergeConformersTest(absltest.TestCase):\n\n def setUp(self):\n super().setUp()\n # We are relying on the fact that the first conformer in both x07_sample.dat\n # and x07_stage1.dat are the same.\n self.stage1_conformer = get_stage1_conformer()\n self.stage2_conformer = get_stage2_conformer()\n\n self.duplicate_conformer = dataset_pb2.Conformer()\n self.duplicate_conformer.conformer_id = self.stage1_conformer.conformer_id\n # A real duplicate conformer wouldn't have both of these fields filled in,\n # but it's fine for the test to make sure everything is copied.\n self.duplicate_conformer.duplicated_by = 123\n self.duplicate_conformer.duplicate_of.extend([111, 222])\n\n def test_two_stage2(self):\n with self.assertRaises(ValueError):\n smu_utils_lib.merge_conformer(self.stage2_conformer,\n self.stage2_conformer)\n\n def test_two_stage1(self):\n with self.assertRaises(ValueError):\n smu_utils_lib.merge_conformer(self.stage1_conformer,\n self.stage1_conformer)\n\n def test_two_duplicates(self):\n duplicate_conformer2 = copy.deepcopy(self.duplicate_conformer)\n duplicate_conformer2.duplicate_of[:] = [333, 444]\n\n got_conf, got_conflict = smu_utils_lib.merge_conformer(\n self.duplicate_conformer, duplicate_conformer2)\n self.assertIsNone(got_conflict)\n self.assertEqual(123, got_conf.duplicated_by)\n self.assertCountEqual([111, 222, 333, 444], got_conf.duplicate_of)\n\n def test_stage2_stage1(self):\n # Add a duplicate to stage1 to make sure it is copied\n self.stage1_conformer.duplicate_of.append(999)\n got_conf, got_conflict = smu_utils_lib.merge_conformer(\n self.stage2_conformer, self.stage1_conformer)\n self.assertIsNone(got_conflict)\n self.assertEqual(got_conf.duplicate_of, [999])\n # Just check a random field that is in stage2 but not stage1\n self.assertNotEmpty(got_conf.properties.normal_modes)\n\n def test_stage2_stage1_conflict_energy(self):\n self.stage2_conformer.properties.initial_geometry_energy.value = -1.23\n got_conf, got_conflict = smu_utils_lib.merge_conformer(\n self.stage2_conformer, self.stage1_conformer)\n self.assertEqual(got_conflict, [\n 618451001,\n 1, 1, 1, 1, -406.51179, 0.052254, -406.522079, 2.5e-05, True, True,\n 1, 1, 1, 1, -1.23, 0.052254, -406.522079, 2.5e-05, True, True\n ])\n # Just check a random field that is in stage2 but not stage1\n self.assertNotEmpty(got_conf.properties.normal_modes)\n # This stage2 values should be returned\n self.assertEqual(got_conf.properties.initial_geometry_energy.value, -1.23)\n\n def test_stage2_stage1_conflict_error_codes(self):\n self.stage2_conformer.properties.errors.error_nstat1 = 999\n got_conf, got_conflict = smu_utils_lib.merge_conformer(\n self.stage2_conformer, self.stage1_conformer)\n self.assertEqual(got_conflict, [\n 618451001,\n 1, 1, 1, 1, -406.51179, 0.052254, -406.522079, 2.5e-05, True, True,\n 999, 1, 1, 1, -406.51179, 0.052254, -406.522079, 2.5e-05, True, True\n ])\n # Just check a random field that is in stage2 but not stage1\n self.assertNotEmpty(got_conf.properties.normal_modes)\n\n def test_stage2_stage1_conflict_missing_geometry(self):\n self.stage2_conformer.ClearField('optimized_geometry')\n got_conf, got_conflict = smu_utils_lib.merge_conformer(\n self.stage2_conformer, self.stage1_conformer)\n self.assertEqual(got_conflict, [\n 618451001,\n 1, 1, 1, 1, -406.51179, 0.052254, -406.522079, 2.5e-05, True, True,\n 1, 1, 1, 1, -406.51179, 0.052254, -406.522079, 2.5e-05, True, False\n ])\n # Just check a random field that is in stage2 but not stage1\n self.assertNotEmpty(got_conf.properties.normal_modes)\n\n def test_stage2_stage1_no_conflict_minus1(self):\n # If stage2 contains a -1, we keep that (stricter error checking later on)\n self.stage2_conformer.properties.initial_geometry_energy.value = -1.0\n got_conf, got_conflict = smu_utils_lib.merge_conformer(\n self.stage2_conformer, self.stage1_conformer)\n self.assertIsNone(got_conflict)\n self.assertEqual(got_conf.properties.initial_geometry_energy.value, -1.0)\n\n def test_stage2_stage1_no_conflict_approx_equal(self):\n self.stage2_conformer.properties.initial_geometry_energy.value += 1e-7\n got_conf, got_conflict = smu_utils_lib.merge_conformer(\n self.stage2_conformer, self.stage1_conformer)\n self.assertIsNone(got_conflict)\n # Just check a random field from stage2\n self.assertNotEmpty(got_conf.properties.normal_modes)\n\n def test_stage2_duplicate(self):\n got_conf, got_conflict = smu_utils_lib.merge_conformer(\n self.stage2_conformer, self.duplicate_conformer)\n self.assertIsNone(got_conflict)\n self.assertEqual(got_conf.duplicate_of, [111, 222])\n self.assertEqual(got_conf.duplicated_by, 123)\n # Just check a random field from stage2\n self.assertNotEmpty(got_conf.properties.normal_modes)\n\n def test_stage1_duplicate(self):\n got_conf, got_conflict = smu_utils_lib.merge_conformer(\n self.stage1_conformer, self.duplicate_conformer)\n self.assertIsNone(got_conflict)\n self.assertEqual(got_conf.duplicate_of, [111, 222])\n self.assertEqual(got_conf.duplicated_by, 123)\n # Just check a random field from stage1\n self.assertTrue(got_conf.properties.HasField('initial_geometry_energy'))\n\n def test_multiple_initial_geometries(self):\n bad_conformer = copy.deepcopy(self.stage1_conformer)\n bad_conformer.initial_geometries.append(bad_conformer.initial_geometries[0])\n with self.assertRaises(ValueError):\n smu_utils_lib.merge_conformer(bad_conformer, self.stage2_conformer)\n with self.assertRaises(ValueError):\n smu_utils_lib.merge_conformer(self.stage2_conformer, bad_conformer)\n\n def test_multiple_bond_topologies(self):\n bad_conformer = copy.deepcopy(self.stage1_conformer)\n bad_conformer.bond_topologies.append(bad_conformer.bond_topologies[0])\n with self.assertRaises(ValueError):\n smu_utils_lib.merge_conformer(bad_conformer, self.stage2_conformer)\n with self.assertRaises(ValueError):\n smu_utils_lib.merge_conformer(self.stage2_conformer, bad_conformer)\n\n def test_different_bond_topologies(self):\n self.stage1_conformer.bond_topologies[0].atoms[0] = (\n dataset_pb2.BondTopology.ATOM_H)\n with self.assertRaises(ValueError):\n smu_utils_lib.merge_conformer(self.stage1_conformer,\n self.stage2_conformer)\n with self.assertRaises(ValueError):\n smu_utils_lib.merge_conformer(self.stage2_conformer,\n self.stage1_conformer)\n\n\nclass ConformerErrorTest(absltest.TestCase):\n\n def test_stage1_no_error(self):\n conformer = get_stage1_conformer()\n self.assertFalse(smu_utils_lib.conformer_has_calculation_errors(conformer))\n\n def test_stage1_error(self):\n conformer = get_stage2_conformer()\n conformer.properties.errors.error_frequencies = 123\n self.assertTrue(smu_utils_lib.conformer_has_calculation_errors(conformer))\n\n def test_stage2_no_error(self):\n conformer = get_stage2_conformer()\n self.assertFalse(smu_utils_lib.conformer_has_calculation_errors(conformer))\n\n def test_stage2_error_in_1_expected_field(self):\n conformer = get_stage2_conformer()\n conformer.properties.errors.error_rotational_modes = 123\n self.assertTrue(smu_utils_lib.conformer_has_calculation_errors(conformer))\n\n def test_stage2_error_in_0_expected_field(self):\n conformer = get_stage2_conformer()\n # This field is 0 to indicate no error. Why the discrepancy? Who knows!\n conformer.properties.errors.error_nsvg09 = 1\n self.assertTrue(smu_utils_lib.conformer_has_calculation_errors(conformer))\n\n def test_stage2_nstat1_is_3(self):\n # This is the other bizaare case. nstat1 of 3 is still considered success.\n conformer = get_stage2_conformer()\n conformer.properties.errors.error_nstat1 = 3\n self.assertFalse(smu_utils_lib.conformer_has_calculation_errors(conformer))\n\n\nclass FilterConformerByAvailabilityTest(absltest.TestCase):\n\n def setUp(self):\n super().setUp()\n self.conformer = dataset_pb2.Conformer()\n properties = self.conformer.properties\n # A STANDARD field\n properties.single_point_energy_pbe0d3_6_311gd.value = 1.23\n # A COMPLETE field\n properties.homo_pbe0_aug_pc_1.value = 1.23\n # An INTERNAL_ONLY field\n properties.nuclear_repulsion_energy.value = 1.23\n\n def test_standard(self):\n smu_utils_lib.filter_conformer_by_availability(self.conformer,\n [dataset_pb2.STANDARD])\n self.assertTrue(\n self.conformer.properties.HasField(\n 'single_point_energy_pbe0d3_6_311gd'))\n self.assertFalse(self.conformer.properties.HasField('homo_pbe0_aug_pc_1'))\n self.assertFalse(\n self.conformer.properties.HasField('nuclear_repulsion_energy'))\n\n def test_complete_and_internal_only(self):\n smu_utils_lib.filter_conformer_by_availability(\n self.conformer, [dataset_pb2.COMPLETE, dataset_pb2.INTERNAL_ONLY])\n self.assertFalse(\n self.conformer.properties.HasField(\n 'single_point_energy_pbe0d3_6_311gd'))\n self.assertTrue(self.conformer.properties.HasField('homo_pbe0_aug_pc_1'))\n self.assertTrue(\n self.conformer.properties.HasField('nuclear_repulsion_energy'))\n\n\nclass ConformerToStandardTest(absltest.TestCase):\n\n def setUp(self):\n super().setUp()\n\n self.conformer = get_stage2_conformer()\n\n def test_field_filtering(self):\n # Check that the field which should be filtered starts out set\n self.assertTrue(self.conformer.properties.HasField(\n 'single_point_energy_hf_6_31gd'))\n\n got = smu_utils_lib.conformer_to_standard(self.conformer)\n # Check for a field that was originally in self.conformer and should be\n # filtered and a field which should still be present.\n self.assertTrue(got.properties.HasField(\n 'single_point_energy_pbe0d3_6_311gd'))\n self.assertFalse(\n got.properties.HasField('single_point_energy_hf_6_31gd'))\n\n def test_remove_error_conformer(self):\n self.conformer.properties.errors.error_frequencies = 123\n\n self.assertIsNone(smu_utils_lib.conformer_to_standard(self.conformer))\n\n def test_remove_duplicate(self):\n self.conformer.duplicated_by = 123\n\n self.assertIsNone(smu_utils_lib.conformer_to_standard(self.conformer))\n\n\nclass DetermineFateTest(parameterized.TestCase):\n\n def test_duplicate_same_topology(self):\n conformer = get_stage1_conformer()\n # bond topology is conformer_id // 1000\n conformer.duplicated_by = conformer.conformer_id + 1\n self.assertEqual(dataset_pb2.Conformer.FATE_DUPLICATE_SAME_TOPOLOGY,\n smu_utils_lib.determine_fate(conformer))\n\n def test_duplicate_different_topology(self):\n conformer = get_stage1_conformer()\n # bond topology is conformer_id // 1000\n conformer.duplicated_by = conformer.conformer_id + 1000\n self.assertEqual(dataset_pb2.Conformer.FATE_DUPLICATE_DIFFERENT_TOPOLOGY,\n smu_utils_lib.determine_fate(conformer))\n\n @parameterized.parameters(\n (2, dataset_pb2.Conformer.FATE_GEOMETRY_OPTIMIZATION_PROBLEM),\n (5, dataset_pb2.Conformer.FATE_DISASSOCIATED),\n (4, dataset_pb2.Conformer.FATE_FORCE_CONSTANT_FAILURE),\n (6, dataset_pb2.Conformer.FATE_DISCARDED_OTHER))\n def test_geometry_failures(self, nstat1, expected_fate):\n conformer = get_stage1_conformer()\n conformer.properties.errors.error_nstat1 = nstat1\n self.assertEqual(expected_fate, smu_utils_lib.determine_fate(conformer))\n\n def test_no_result(self):\n conformer = get_stage1_conformer()\n self.assertEqual(dataset_pb2.Conformer.FATE_NO_CALCULATION_RESULTS,\n smu_utils_lib.determine_fate(conformer))\n\n def test_calculation_errors(self):\n conformer = get_stage2_conformer()\n # This is a random choice of an error to set. I just need some error.\n conformer.properties.errors.error_atomic_analysis = 999\n self.assertEqual(dataset_pb2.Conformer.FATE_CALCULATION_WITH_ERROR,\n smu_utils_lib.determine_fate(conformer))\n\n def test_success(self):\n conformer = get_stage2_conformer()\n self.assertEqual(dataset_pb2.Conformer.FATE_SUCCESS,\n smu_utils_lib.determine_fate(conformer))\n\n\nclass ToBondTopologySummaryTest(absltest.TestCase):\n\n def setUp(self):\n super().setUp()\n self.conformer = get_stage2_conformer()\n\n def test_dup_same(self):\n self.conformer.fate = dataset_pb2.Conformer.FATE_DUPLICATE_SAME_TOPOLOGY\n got = list(\n smu_utils_lib.conformer_to_bond_topology_summaries(self.conformer))\n self.assertLen(got, 1)\n self.assertEqual(got[0].bond_topology.bond_topology_id,\n self.conformer.bond_topologies[0].bond_topology_id)\n self.assertEqual(got[0].count_attempted_conformers, 1)\n self.assertEqual(got[0].count_duplicates_same_topology, 1)\n\n def test_dup_diff(self):\n self.conformer.fate = (\n dataset_pb2.Conformer.FATE_DUPLICATE_DIFFERENT_TOPOLOGY)\n got = list(\n smu_utils_lib.conformer_to_bond_topology_summaries(self.conformer))\n self.assertLen(got, 1)\n self.assertEqual(got[0].count_attempted_conformers, 1)\n self.assertEqual(got[0].count_duplicates_different_topology, 1)\n\n def test_geometry_failed(self):\n self.conformer.fate = (dataset_pb2.Conformer.FATE_DISCARDED_OTHER)\n got = list(\n smu_utils_lib.conformer_to_bond_topology_summaries(self.conformer))\n self.assertLen(got, 1)\n self.assertEqual(got[0].count_attempted_conformers, 1)\n self.assertEqual(got[0].count_failed_geometry_optimization, 1)\n\n def test_missing_calculation(self):\n self.conformer.fate = dataset_pb2.Conformer.FATE_NO_CALCULATION_RESULTS\n got = list(\n smu_utils_lib.conformer_to_bond_topology_summaries(self.conformer))\n self.assertLen(got, 1)\n self.assertEqual(got[0].count_attempted_conformers, 1)\n self.assertEqual(got[0].count_kept_geometry, 1)\n self.assertEqual(got[0].count_missing_calculation, 1)\n\n def test_calculation_with_error(self):\n self.conformer.fate = dataset_pb2.Conformer.FATE_CALCULATION_WITH_ERROR\n self.conformer.bond_topologies.append(self.conformer.bond_topologies[0])\n self.conformer.bond_topologies[-1].bond_topology_id = 123\n got = list(\n smu_utils_lib.conformer_to_bond_topology_summaries(self.conformer))\n self.assertLen(got, 2)\n # We don't actually care about the order, but this is what comes out right\n # now.\n self.assertEqual(got[0].bond_topology.bond_topology_id, 123)\n self.assertEqual(got[0].count_attempted_conformers, 0)\n self.assertEqual(got[0].count_kept_geometry, 0)\n self.assertEqual(got[0].count_calculation_with_error, 0)\n self.assertEqual(got[0].count_detected_match_with_error, 1)\n\n self.assertEqual(got[1].bond_topology.bond_topology_id,\n self.conformer.bond_topologies[0].bond_topology_id)\n self.assertEqual(got[1].count_attempted_conformers, 1)\n self.assertEqual(got[1].count_kept_geometry, 1)\n self.assertEqual(got[1].count_calculation_with_error, 1)\n self.assertEqual(got[1].count_detected_match_with_error, 0)\n\n def test_calculation_success(self):\n self.conformer.fate = dataset_pb2.Conformer.FATE_SUCCESS\n self.conformer.bond_topologies.append(self.conformer.bond_topologies[0])\n self.conformer.bond_topologies[-1].bond_topology_id = 123\n got = list(\n smu_utils_lib.conformer_to_bond_topology_summaries(self.conformer))\n self.assertLen(got, 2)\n # We don't actually care about the order, but this is what comes out right\n # now.\n self.assertEqual(got[0].bond_topology.bond_topology_id, 123)\n self.assertEqual(got[0].count_attempted_conformers, 0)\n self.assertEqual(got[0].count_kept_geometry, 0)\n self.assertEqual(got[0].count_calculation_success, 0)\n self.assertEqual(got[0].count_detected_match_success, 1)\n\n self.assertEqual(got[1].bond_topology.bond_topology_id,\n self.conformer.bond_topologies[0].bond_topology_id)\n self.assertEqual(got[1].count_attempted_conformers, 1)\n self.assertEqual(got[1].count_kept_geometry, 1)\n self.assertEqual(got[1].count_calculation_success, 1)\n self.assertEqual(got[1].count_detected_match_success, 0)\n\n\nclass LabeledSmilesTester(absltest.TestCase):\n\n def test_atom_labels(self):\n mol = Chem.MolFromSmiles('FCON[NH2+][O-]', sanitize=False)\n self.assertIsNotNone(mol)\n smiles_before = Chem.MolToSmiles(mol)\n self.assertEqual(\n smu_utils_lib.labeled_smiles(mol), 'F[CH2:1][O:2][NH:3][NH2+:4][O-:5]')\n # Testing both the atom numbers and the smiles is redundant,\n # but guards against possible future changes.\n for atom in mol.GetAtoms():\n self.assertEqual(atom.GetAtomMapNum(), 0)\n self.assertEqual(Chem.MolToSmiles(mol), smiles_before)\n\n\nif __name__ == '__main__':\n absltest.main()\n",
"# coding=utf-8\n# Copyright 2021 The Google Research Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Lint as: python3\n\"\"\"Utility functions for aptamer simulations.\n\"\"\"\n\nimport numpy\n\n\n# numpy.random.RandomState uses uint32 seeds\nRANDOM_SEED_MAX = 2 ** 32\n\n\ndef random_seed_stream(random_seed=None):\n \"\"\"Yield an infinite stream of numbers for seeding random number generators.\n\n This method is not proven to be cryptographically secure, and only explores a\n small portion of the state space for NumPy's random number generator. Still,\n it's a useful shortcut for writing decoupled functions that rely on random\n state. See this thread for extensive discussion of its merits and the\n alternatives:\n https://mail.scipy.org/pipermail/numpy-discussion/2016-May/075487.html\n\n Example:\n\n >>> seed_gen = random_seed_stream(42)\n >>> next(seed_gen)\n 1608637542\n\n Args:\n random_seed: optional integer used to seed this stream of random seeds.\n\n Yields:\n Integer seeds suitable for use in numpy.random.RandomState. Each seed is\n independent and psuedo-randomly generated from the `random_seed` argument.\n \"\"\"\n rs = numpy.random.RandomState(random_seed)\n seed = rs.randint(RANDOM_SEED_MAX)\n\n while True:\n yield seed\n\n # Incrementing is better than generating new seeds with a call to randint,\n # because with random seeds collisions are likely after only around 2 ** 16\n # samples due to the birthday paradox.\n seed = (seed + 1) % RANDOM_SEED_MAX\n\n\ndef target_occupancy(target_affinity,\n serum_affinity,\n target_concentration,\n serum_concentration):\n \"\"\"Calculate target site occupancy in the presence of serum.\n\n Assumes that the amount of target and serum are very large (compared to the\n amount of aptamers), such that their concentration can be treated as fixed.\n\n TODO(mdimon): Validate this assumption.\n\n All argument should be provided with the same units.\n\n Args:\n target_affinity: number or ndarray-like giving affinity for the target site.\n serum_affinity: number or ndarray-like giving serum affinity.\n target_concentration: number or ndarray-like giving target concentration.\n serum_concentration: number or ndarray-like giving serum concentration.\n\n Returns:\n Number or ndarray-like giving the fraction of bound target sites.\n \"\"\"\n # see Equation (7) from:\n # https://en.wikipedia.org/wiki/Competitive_inhibition#Derivation\n numerator = serum_affinity * target_concentration\n denominator = (target_affinity * serum_affinity\n + serum_affinity * target_concentration\n + target_affinity * serum_concentration)\n return numerator / denominator\n",
"# coding=utf-8\n# Copyright 2021 The Google Research Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"User model in the ecosystem.\n\nThe user model consists of user state representation, a user sampler, user\nstate transition model, and user response model.\n - user state representation: includes both observable and unobservable user\n features.\n - user sampler: sample a user.\n - user state transition model: describes the dynamics of user state\n transition after receiving recommendations from the agent.\n - user response model: characterizes how the user responds to the recommended\n slate, e.g document choice and engagement/satisfaction level with it.\n\"\"\"\n\nfrom absl import flags\nfrom gym import spaces\nimport numpy as np\nfrom recsim import choice_model\nfrom recsim import user\n\nfrom recs_ecosystem_creator_rl.environment import sampling_utils\n\nFLAGS = flags.FLAGS\n\n\nclass UserState(user.AbstractUserState):\n \"\"\"Class to represent users.\"\"\"\n\n def __init__(self, user_id, quality_sensitivity, topic_influence,\n observation_noise_std, viability_threshold, topic_dim,\n topic_preference, initial_satisfaction, satisfaction_decay):\n \"\"\"Constructor.\n\n Args:\n user_id: Int representing user id.\n quality_sensitivity: Float representing how sensitive the user is to the\n quality of document when generating rewards.\n topic_influence: Float within [0,1] representing how much topic_preferene\n changes as a response to clicked document.\n observation_noise_std: Float, standard deviation of truncated Guassian\n noise when generating user reward, noise is truncated within [-1, 1].\n viability_threshold: Float, the least satisfaction the user needs to have\n to stay in the platform.\n topic_dim: int representing number of topics,\n topic_preference: Float array of probability representing user's\n preference on topics.\n initial_satisfaction: Float representing user's initial satisfaction with\n the platform.\n satisfaction_decay: Float representing user's satisfaction decay rate.\n \"\"\"\n self.user_id = user_id\n\n # Transition hyper-parameters.\n self.quality_sensitivity = quality_sensitivity\n self.topic_influence = topic_influence\n self.observation_noise_std = observation_noise_std\n self.viability_threshold = viability_threshold\n self.satisfaction_decay = satisfaction_decay\n\n # State variables.\n self.topic_dim = topic_dim\n self.topic_preference = topic_preference\n self.satisfaction = initial_satisfaction\n\n def create_observation(self):\n \"\"\"Returns user id since user's state is not observable.\"\"\"\n # User state (topic_preference) is not observable.\n return int(self.user_id)\n\n @staticmethod\n def observation_space():\n return spaces.Discrete(np.inf)\n\n def score_document(self, doc_obs):\n \"\"\"Returns the user's affinity to the document.\"\"\"\n # Current document observation is document's topic.\n return np.dot(self.topic_preference, doc_obs['topic'])\n\n\nclass UserSampler(user.AbstractUserSampler):\n \"\"\"Generates a user with uniformly sampled topic preferences.\"\"\"\n\n def __init__(self,\n user_ctor=UserState,\n user_id=0,\n quality_sensitivity=0.3,\n topic_influence=0.2,\n topic_dim=10,\n observation_noise_std=0.1,\n initial_satisfaction=10,\n viability_threshold=0,\n satisfaction_decay=1.0,\n sampling_space='unit ball',\n **kwargs):\n self._state_parameters = {\n 'user_id': user_id,\n 'quality_sensitivity': quality_sensitivity,\n 'topic_dim': topic_dim,\n 'topic_influence': topic_influence,\n 'observation_noise_std': observation_noise_std,\n 'initial_satisfaction': initial_satisfaction,\n 'viability_threshold': viability_threshold,\n 'satisfaction_decay': satisfaction_decay,\n }\n self.sampling_space = sampling_space\n super(UserSampler, self).__init__(user_ctor, **kwargs)\n\n def sample_user(self):\n # Uniformly sample initial topic preference from a simplex of dimension\n # `topic_dim`.\n if self.sampling_space == 'unit ball':\n self._state_parameters[\n 'topic_preference'] = sampling_utils.sample_from_unit_ball(\n self._rng, self._state_parameters['topic_dim'])\n elif self.sampling_space == 'simplex':\n self._state_parameters[\n 'topic_preference'] = sampling_utils.sample_from_simplex(\n self._rng, self._state_parameters['topic_dim'])\n else:\n raise ValueError('Only support sampling from a simplex or a unit ball.')\n return self._user_ctor(**self._state_parameters)\n\n\nclass ResponseModel(user.AbstractResponse):\n \"\"\"User response class that records user's response to recommended slate.\"\"\"\n\n def __init__(self, clicked=False, reward=0.0):\n self.clicked = clicked\n self.reward = reward\n\n def create_observation(self):\n return {'click': int(self.clicked), 'reward': np.array(self.reward)}\n\n @staticmethod\n def response_space():\n return spaces.Dict({\n 'click':\n spaces.Discrete(2),\n 'reward':\n spaces.Box(low=0.0, high=np.inf, dtype=np.float32, shape=tuple())\n })\n\n\n# TODO(team): Add more details in the class docstring about the User Model.\nclass UserModel(user.AbstractUserModel):\n \"\"\"Class that represents an encoding of a user's dynamics including generating responses and state transitioning.\"\"\"\n\n def __init__(\n self,\n slate_size,\n user_sampler,\n response_model_ctor,\n choice_model_ctor=lambda: choice_model.MultinomialLogitChoiceModel({})):\n \"\"\"Initializes a UserModel.\n\n Args:\n slate_size: Number of items that the agent suggests.\n user_sampler: A UserSampler responsible for providing new users every time\n reset is called.\n response_model_ctor: A response_model class that generates user response\n to recommendations.\n choice_model_ctor: A function that returns a ChoiceModel that will\n determine which doc in the slate the user interacts with.\n \"\"\"\n super(UserModel, self).__init__(\n slate_size=slate_size,\n user_sampler=user_sampler,\n response_model_ctor=response_model_ctor,\n )\n self.choice_model = choice_model_ctor()\n\n def simulate_response(self, documents):\n \"\"\"Simulate user's response to a slate of documents with choice model.\n\n If the document is not clicked by the user, the default reward for this\n document is -1.\n If the document is clicked by the user, the reward is\n user.quality_sensitivity * document.quality + (1 -\n user.quality_sensitivity) * <user.topic_preference, document.topic> +\n noise.\n The noise is sampled from a truncated Gaussian within range [-1, 1],\n\n Args:\n documents: A list of Document objects.\n\n Returns:\n responses: A list of Response objects, one for each document.\n \"\"\"\n responses = [self._response_model_ctor() for _ in documents]\n\n # Score each slate item and select one.\n self.choice_model.score_documents(\n self._user_state, [doc.create_observation() for doc in documents])\n selected_index = self.choice_model.choose_item()\n # `choice_model.choose_item()` can return None if the \"None of the above\"\n # option is given sufficient weight to be chosen, (see e.g.,\n # choice_model.NormalizableChoiceModel.choose_item which always adds an\n # extra item to the slate which signifies choosing nothing from the slate.)\n # If None is returned, no item is clicked.\n if selected_index is not None:\n responses[selected_index].clicked = True\n responses[\n selected_index].reward = self._user_state.quality_sensitivity * documents[\n selected_index].quality + (\n 1 - self._user_state.quality_sensitivity) * np.dot(\n self._user_state.topic_preference,\n documents[selected_index].topic\n ) + sampling_utils.sample_from_truncated_normal(\n mean=0.0,\n std=self._user_state.observation_noise_std,\n clip_a=-1.0,\n clip_b=1.0) + 2.0 # Shift to positive.\n\n return responses\n\n def update_state(self, documents):\n \"\"\"Update user state and generate user response_observations.\n\n Use self.simulate_response to generate a list of Response object for each\n documents.\n\n User's total satisfaction firstly shrinks by rate satisfaction_decay.\n If no document is consumed, user's topic preference remains untouched, and\n the total satisfaction decreases by 1.\n\n If the user clicks one document, her satisfaction changes by the\n response.reward, and her topic_preference will be:\n 1. temporal_topic_preference <- topic_preference + topic_influence *\n response.reward * document.topic.\n 2. normalize the temporal_topic_preference to the topic_preference domain\n (unit ball),and set it to be the new user.topic_preference.\n Intuitively, the user topic preference will shift toward the document.topic\n if the response.reward is positive. Otherwise the user will decrease her\n preference on the document's topic.\n\n Args:\n documents: A list of Document objects in the recommended slate.\n\n Returns:\n A list of Response observations for the recommended documents.\n \"\"\"\n responses = self.simulate_response(documents)\n self._user_state.satisfaction *= self._user_state.satisfaction_decay\n click = False\n for doc, response in zip(documents, responses):\n if response.clicked:\n # Update user's satisfaction based on the clicked document.\n self._user_state.satisfaction += response.reward\n # Update user's topic preference based on the clicked document.\n topic_preference = self._user_state.topic_preference + self._user_state.topic_influence * response.reward * doc.topic\n # Normalize the topic_preference to the unit ball.\n topic_preference = topic_preference / np.sqrt(\n np.sum(topic_preference**2))\n self._user_state.topic_preference = topic_preference\n click = True\n break\n\n # If no click, user satisfaction decreases by 1.\n if not click:\n self._user_state.satisfaction -= 1\n return [response.create_observation() for response in responses]\n\n def score_document(self, doc_obs):\n return self._user_state.score_document(doc_obs)\n\n def get_user_id(self):\n return self._user_state.user_id\n\n def is_terminal(self):\n return self._user_state.satisfaction < self._user_state.viability_threshold\n",
"# coding=utf-8\n# Copyright 2021 The Google Research Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Copyright 2020 The Google Research Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Utility functions for command line arguments.\"\"\"\n\nimport os\nimport sys\nimport tensorflow.compat.v2 as tf\n\n\ndef add_common_flags(parser):\n parser.add_argument(\n \"--tpu_ip\",\n type=str,\n default=None,\n help=\"Cloud TPU internal ip \"\n \"(see `gcloud compute tpus list`)\")\n parser.add_argument(\"--seed\", type=int, default=0, help=\"Random seed\")\n parser.add_argument(\n \"--weight_decay\",\n type=float,\n default=15.,\n help=\"Weight decay, equivalent to setting prior std\")\n parser.add_argument(\n \"--temperature\",\n type=float,\n default=1.,\n help=\"Temperature of the posterior\")\n parser.add_argument(\n \"--init_checkpoint\",\n type=str,\n default=None,\n help=\"Checkpoint to use for initialization of the chain\")\n parser.add_argument(\n \"--tabulate_freq\",\n type=int,\n default=40,\n help=\"Frequency of tabulate table header prints\")\n parser.add_argument(\n \"--dir\",\n type=str,\n default=None,\n required=True,\n help=\"Directory for checkpoints and tensorboard logs\")\n parser.add_argument(\n \"--dataset_name\", type=str, default=\"cifar10\", help=\"Name of the dataset\")\n parser.add_argument(\n \"--subset_train_to\",\n type=int,\n default=None,\n help=\"Size of the subset of train data to use; \"\n \"full dataset is used by default\")\n parser.add_argument(\n \"--model_name\", type=str, default=\"lenet\", help=\"Name of the dataset\")\n parser.add_argument(\n \"--use_float64\",\n dest=\"use_float64\",\n action=\"store_true\",\n help=\"Use float64 precision (does not work on TPUs)\")\n\n\ndef add_sgd_flags(parser):\n\n parser.add_argument(\n \"--init_step_size\",\n type=float,\n default=1.e-6,\n help=\"Initial SGD step size\")\n parser.add_argument(\n \"--num_epochs\",\n type=int,\n default=300,\n help=\"Total number of SGD epochs iterations\")\n parser.add_argument(\"--batch_size\", type=int, default=80, help=\"Batch size\")\n parser.add_argument(\n \"--eval_freq\",\n type=int,\n default=10,\n help=\"Frequency of evaluation (epochs)\")\n parser.add_argument(\n \"--save_freq\",\n type=int,\n default=50,\n help=\"Frequency of checkpointing (epochs)\")\n parser.add_argument(\n \"--momentum_decay\",\n type=float,\n default=0.9,\n help=\"Momentum decay parameter for SGD\")\n\n\ndef save_cmd(dirname, tf_writer):\n command = \" \".join(sys.argv)\n with open(os.path.join(dirname, \"command.sh\"), \"w\") as f:\n f.write(command)\n f.write(\"\\n\")\n if tf_writer is not None:\n with tf_writer.as_default():\n tf.summary.text(\n \"command\", command, step=0, description=\"Command line arguments\")\n",
"# coding=utf-8\n# Copyright 2021 The Google Research Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Functions for heuristically flying a camera through a generated scene.\"\"\"\n\nimport math\n\nimport geometry\nimport tensorflow as tf\n\n\ndef camera_with_look_direction(position, look_direction, down_direction):\n \"\"\"A camera pose specified by where it is and what direction it looks in.\n\n Args:\n position: [..., 3] position of camera in world.\n look_direction: [..., 3] direction of optical axis (need not be normalised).\n down_direction: [..., 3] a direction that should project to down (+ve Y).\n Returns:\n [..., 3, 4] Camera pose.\n \"\"\"\n # We construct world vectors that correspond to the three axis in camera\n # space.\n # look_direction is like Z, down_direction is like Y.\n # Y cross Z = X (right-hand rule).\n vector_z = tf.math.l2_normalize(look_direction, axis=-1)\n vector_x = tf.math.l2_normalize(\n tf.linalg.cross(down_direction, vector_z), axis=-1)\n vector_y = tf.linalg.cross(vector_z, vector_x)\n # With these three vectors and the pose, we can build the camera matrix:\n camera_to_world = tf.stack([vector_x, vector_y, vector_z, position], axis=-1)\n return geometry.mat34_pose_inverse(camera_to_world)\n\n\ndef skyline_balance(disparity, horizon=0.3, near_fraction=0.2):\n \"\"\"Computes movement parameters from a disparity image.\n\n Args:\n disparity: [H, W, 1] disparity image.\n horizon: how far down the image the horizon should ideally be.\n near_fraction: how much of the image should be \"near\".\n\n Returns:\n (x, y, h) where x and y are where in the image we want to be looking (as\n texture coordinates) and h is how much we want to move upwards.\n \"\"\"\n # Experiment shows that the skyline boundary is somewhere between disparity\n # 0.05 and disparity 0.1. So scale and clip to give a soft sky mask.\n sky = tf.clip_by_value(20.0 * (0.1 - disparity), 0.0, 1.0)\n\n # How much of the image is sky?\n sky_fraction = tf.reduce_mean(sky)\n y = 0.5 + sky_fraction - horizon\n\n # The balance of sky in the left and right half of the image.\n w2 = disparity.shape[-2] // 2\n sky_left = tf.reduce_mean(sky[Ellipsis, :w2, :])\n sky_right = tf.reduce_mean(sky[Ellipsis, w2:, :])\n # Turn away from mountain:\n epsilon = 1e-4\n x = (sky_right + epsilon) / (sky_left + sky_right + 2 * epsilon)\n\n # Now we try to measure how \"near the ground\" we are, by looking at how\n # much of the image has disparity > 0.5 (ramping to max at 0.6)\n ground = tf.clip_by_value(10.0 * (disparity - 0.5), 0.0, 1.0)\n ground_fraction = tf.reduce_mean(ground)\n h = horizon + (near_fraction - ground_fraction)\n return x, y, h\n\n\ndef fly_dynamic(\n intrinsics, initial_pose,\n speed=0.2, lerp=0.05, movelerp=0.05,\n horizon=0.3, near_fraction=0.2,\n meander_x_period=100, meander_x_magnitude=0.0,\n meander_y_period=100, meander_y_magnitude=0.0,\n turn_function=None):\n \"\"\"Return a function for flying a camera heuristically.\n\n This flying function looks at the disparity as it goes and decides whether\n to look more up/down or left/right, and also whether to try to fly further\n away from or nearer to the ground.\n\n Args:\n intrinsics: [4] Camera intrinsics.\n initial_pose: [3, 4] Initial camera pose.\n speed: How far to move per step.\n lerp: How fast to converge look direction to target.\n movelerp: How fast to converge movement to target.\n horizon: What fraction of the image should lie above the horizon\n near_fraction:\n meander_x_period: Number of frames to produce a cyclic meander in the\n horizontal direction\n meander_x_magnitude: How far to meander horizontally\n meander_y_period: Number of frames to produce a cyclic meander in the\n vertical direciton\n meander_y_magnitude: How far to meander vertically\n turn_function: A function which returns an x, y position to turn towards\n\n Returns:\n a function fly_step which takes an rgbd image and returns the pose for the\n the next camera. Call fly_step repeatedly to generate a series of poses.\n This is a stateful function and will internally keep track of camera\n position and velocity. Can only operate in eager mode.\n \"\"\"\n # Where is the camera looking, and which way is down:\n camera_to_world = geometry.mat34_pose_inverse(initial_pose)\n look_dir = camera_to_world[:, 2]\n move_dir = look_dir # Begin by moving forwards.\n down = camera_to_world[:, 1]\n position = camera_to_world[:, 3]\n t = 0\n\n reverse = (speed < 0)\n\n def fly_step(rgbd):\n nonlocal camera_to_world\n nonlocal look_dir\n nonlocal move_dir\n nonlocal down\n nonlocal position\n nonlocal t\n\n if turn_function:\n (xoff, yoff) = turn_function(t)\n else:\n (xoff, yoff) = (0.0, 0.0)\n\n xoff += math.sin(t * 2.0 * math.pi/ meander_x_period) * meander_x_magnitude\n yoff += math.sin(t * 2.0 * math.pi/ meander_y_period) * meander_y_magnitude\n t = t + 1\n\n down = camera_to_world[:, 1] # Comment this out for fixed down\n disparity = rgbd[Ellipsis, 3:]\n x, y, h = skyline_balance(\n disparity, horizon=horizon, near_fraction=near_fraction)\n if reverse:\n h = 1.0 - h\n x = 1.0 - x\n look_uv = tf.stack([x + xoff, y + yoff])\n move_uv = tf.stack([0.5, h])\n uvs = tf.stack([look_uv, move_uv], axis=0)\n\n # Points in world\n points = geometry.mat34_transform(\n camera_to_world,\n geometry.texture_to_camera_coordinates(uvs, intrinsics))\n new_look_dir = tf.math.l2_normalize(points[0] - position)\n new_move_dir = tf.math.l2_normalize(points[1] - position)\n\n # Very simple smoothing\n look_dir = look_dir * (1.0 - lerp) + new_look_dir * lerp\n move_dir = move_dir * (1.0 - movelerp) + new_move_dir * movelerp\n position = position + move_dir * speed\n\n # Next pose\n pose = camera_with_look_direction(position, look_dir, down)\n camera_to_world = geometry.mat34_pose_inverse(pose)\n return pose\n\n return fly_step\n",
"# coding=utf-8\n# Copyright 2021 The Google Research Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Main training script for Cascaded Nets.\"\"\"\nimport collections\nimport os\nfrom absl import app\nfrom absl import flags\nfrom absl import logging\nfrom ml_collections.config_flags import config_flags\nimport numpy as np\nimport torch\nfrom torch import optim\nfrom cascaded_networks.datasets.dataset_handler import DataHandler\nfrom cascaded_networks.models import densenet\nfrom cascaded_networks.models import resnet\nfrom cascaded_networks.modules import eval_handler\nfrom cascaded_networks.modules import losses\nfrom cascaded_networks.modules import train_handler\nfrom cascaded_networks.modules import utils\n\n# Setup Flags\nFLAGS = flags.FLAGS\nflags.DEFINE_string('gcs_path', None, 'gcs_path dir')\nflags.DEFINE_bool('hyper_param_sweep', None, 'conducting hyperparam sweep')\nflags.DEFINE_integer('n_gpus', None, 'Number of GPUs')\n\nconfig_flags.DEFINE_config_file(\n name='config',\n default=None,\n help_string='Path to the Training configuration.')\n\n\ndef main(_):\n config = FLAGS.config\n\n if config.debug:\n config.epochs = 5\n\n # Make reproducible\n utils.make_reproducible(config.random_seed)\n\n # Parse GCS bucket path\n gcs_subpath = config.local_output_dir\n\n # Setup output directory\n out_basename = f'td({config.lambda_val})' if config.cascaded else 'std'\n out_basename += f',seed_{config.random_seed}'\n if FLAGS.hyper_param_sweep:\n out_basename += f',bs={config.batch_size}'\n out_basename += f',lr={config.learning_rate}'\n out_basename += f',wd={config.weight_decay}'\n\n save_root = os.path.join(gcs_subpath, config.experiment_name, out_basename)\n logging.info('Saving experiment to %s', save_root)\n\n # Flag check\n if config.tdl_mode == 'EWS':\n assert config.tdl_alpha is not None, 'tdl_alpha not set'\n elif config.tdl_mode == 'noise':\n assert config.noise_var is not None, 'noise_var not set'\n utils.save_flags(FLAGS, save_root, config)\n\n # Device\n device = torch.device('cuda'\n if torch.cuda.is_available() and config.use_gpu\n else 'cpu')\n\n # Set dataset root\n dataset_root = '/tmp/dataset'\n if not os.path.exists(dataset_root):\n os.makedirs(dataset_root)\n\n # Data Handler\n data_dict = {\n 'dataset_name': config.dataset_name,\n 'data_root': dataset_root,\n 'val_split': config.val_split,\n 'split_idxs_root': 'split_idxs',\n 'noise_type': config.augmentation_noise_type,\n 'load_previous_splits': True,\n }\n data_handler = DataHandler(**data_dict)\n\n # Model\n model_dict = {\n 'seed': config.random_seed,\n 'num_classes': data_handler.num_classes,\n 'pretrained': False,\n 'cascaded': config.cascaded,\n 'lambda_val': config.lambda_val,\n 'tdl_alpha': config.tdl_alpha,\n 'tdl_mode': config.tdl_mode,\n 'noise_var': config.noise_var,\n 'bn_opts': {\n 'temporal_affine': config.bn_time_affine,\n 'temporal_stats': config.bn_time_stats,\n },\n 'imagenet': config.dataset_name == 'ImageNet2012',\n }\n\n # Model init op\n if config.model_key.startswith('resnet'):\n model_init_op = resnet\n elif config.model_key.startswith('densenet'):\n model_init_op = densenet\n\n # Initialize net\n net = model_init_op.__dict__[config.model_key](**model_dict).to(device)\n\n # Save model config\n model_dict['model_key'] = config.model_key\n utils.save_model_config(model_dict, save_root, config)\n\n # Optimizer\n optimizer = optim.SGD(net.parameters(),\n lr=config.learning_rate,\n momentum=config.momentum,\n nesterov=config.nesterov)\n\n # Scheduler\n lr_scheduler = optim.lr_scheduler.MultiStepLR(optimizer,\n milestones=config.lr_milestones,\n gamma=config.lr_schedule_gamma)\n\n # Criterion\n criterion = losses.categorical_cross_entropy\n\n # Set Loaders\n train_loader = data_handler.build_loader('train', config)\n val_loader = data_handler.build_loader('val', config)\n test_loader = data_handler.build_loader('test', config)\n\n # train and eval functions\n train_fxn = train_handler.get_train_loop(net.timesteps,\n data_handler.num_classes,\n config)\n eval_fxn = eval_handler.get_eval_loop(net.timesteps,\n data_handler.num_classes,\n config)\n\n # Metrics container\n metrics = {\n 'train': collections.defaultdict(list),\n 'val': collections.defaultdict(list),\n 'test': collections.defaultdict(float),\n }\n\n for epoch_i in range(config.epochs):\n # Train net\n train_loss, train_acc = train_fxn(net, train_loader, criterion,\n optimizer, device)\n\n # Log train metrics\n metrics['train']['loss'].append((epoch_i, train_loss))\n metrics['train']['acc'].append((epoch_i, train_acc))\n\n # Update lr scheduler\n lr_scheduler.step()\n\n if epoch_i % config.eval_freq == 0:\n # Evaluate net\n val_loss, val_acc = eval_fxn(net, val_loader, criterion, device)\n\n # Log eval metrics\n metrics['val']['loss'].append((epoch_i, val_loss))\n metrics['val']['acc'].append((epoch_i, val_acc))\n\n if config.cascaded:\n train_loss_val = np.mean(train_loss, axis=0)[-1]\n train_acc_val = np.mean(train_acc, axis=0)[-1] * 100\n else:\n train_loss_val = np.mean(train_loss, axis=0)\n train_acc_val = np.mean(train_acc, axis=0) * 100\n\n logging.info('Epoch %d/%d -- Acc: %0.2f -- Loss: %0.6f',\n epoch_i+1, config.epochs, train_acc_val, train_loss_val)\n\n if epoch_i % config.upload_freq == 0:\n utils.save_model(net, optimizer, save_root, epoch_i, config)\n utils.save_metrics(metrics, save_root, config)\n\n # Evaluate test set\n test_loss, test_acc = eval_fxn(net, test_loader, criterion, device)\n metrics['test']['loss'] = test_loss\n metrics['test']['acc'] = test_acc\n\n # Save model and metrics\n utils.save_model(net, optimizer, save_root, epoch_i, config)\n utils.save_metrics(metrics, save_root, config)\n\nif __name__ == '__main__':\n app.run(main)\n",
"# coding=utf-8\n# Copyright 2021 The Google Research Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Build BERT Examples from text (source, target) pairs.\"\"\"\n\nimport collections\nimport itertools\nfrom typing import Mapping, MutableSequence, Optional, Sequence, Tuple\n\nimport frozendict\nimport tensorflow as tf\n\nfrom felix import felix_constants as constants\nfrom felix import insertion_converter\nfrom felix import pointing_converter\nfrom felix import tokenization\nfrom felix import utils\n\n\nclass BertExample:\n \"\"\"Class for training and inference examples for BERT.\n\n Attributes:\n features: A dictionary of features with numeral lists as values.\n features_float: A dictionary of features with float lists as values.\n scalar_features: A dictionary of features with scalar values.\n \"\"\"\n\n def __init__(self,\n input_ids,\n input_mask,\n segment_ids,\n labels,\n point_indexes,\n labels_mask,\n input_tokens,\n label_tokens = None,\n source_text = None,\n target_text = None):\n \"\"\"Constructor for BERTExample.\n\n Args:\n input_ids: list of ids of source tokens.\n input_mask: list of 1s and 0s. 0 indicates a PAD token.\n segment_ids: List of segment ids for BERT.\n labels: list of added_phrases. If list is empty we assume we are at test\n time.\n point_indexes: list of target points.\n labels_mask: list of 1s and 0s. 0 indicates a PAD token.\n input_tokens: List of tokens (as text), For debug purposes.\n label_tokens: List of labels (as text). Optional. For debug purposes.\n source_text: Raw string of source input. Optional. For debug purposes.\n target_text: Raw string of target output. Optional. For debug purposes.\n \"\"\"\n if not labels:\n self.features = collections.OrderedDict([('input_ids', input_ids),\n ('input_mask', input_mask),\n ('segment_ids', segment_ids)])\n self.features_float = {}\n else:\n self.features = collections.OrderedDict([\n ('input_ids', input_ids),\n ('point_indexes', point_indexes),\n ('input_mask', input_mask),\n ('segment_ids', segment_ids),\n ('labels', labels),\n ])\n\n self.features_float = collections.OrderedDict([\n ('labels_mask', labels_mask),\n ])\n self.scalar_features = collections.OrderedDict()\n self.debug_features = collections.OrderedDict()\n self.debug_features['input_tokens'] = input_tokens\n if label_tokens is not None:\n self.debug_features['label_tokens'] = label_tokens\n if source_text is not None:\n self.debug_features['text_source'] = [source_text]\n if target_text is not None:\n self.debug_features['text_target'] = [target_text]\n\n def pad_to_max_length(self, max_seq_length, pad_token_id):\n \"\"\"Pad the feature vectors so that they all have max_seq_length.\n\n Args:\n max_seq_length: The length that features will have after padding.\n pad_token_id: input_ids feature is padded with this ID, other features\n with ID 0.\n \"\"\"\n\n for key, feature in itertools.chain(self.features.items(),\n self.features_float.items()):\n pad_len = max_seq_length - len(feature)\n pad_id = pad_token_id if key == 'input_ids' else 0\n\n feature.extend([pad_id] * pad_len)\n if len(feature) != max_seq_length:\n raise ValueError('{} has length {} (should be {}).'.format(\n key, len(feature), max_seq_length))\n\n def _int_feature(self, values):\n return tf.train.Feature(int64_list=tf.train.Int64List(value=list(values)))\n\n def _float_feature(self, values):\n return tf.train.Feature(float_list=tf.train.FloatList(value=list(values)))\n\n def to_tf_example(self):\n \"\"\"Returns this object as a tf.Example.\"\"\"\n\n tf_features = collections.OrderedDict([\n (key, self._int_feature(val)) for key, val in self.features.items()\n ])\n # Add scalar integer features.\n for key, value in self.scalar_features.items():\n tf_features[key] = self._int_feature([value])\n\n # Add label mask feature.\n for key, value in self.features_float.items():\n tf_features[key] = self._float_feature(value)\n\n # Add debug fields.\n for key, value in self.debug_features.items():\n tf_features[key] = tf.train.Feature(\n bytes_list=tf.train.BytesList(\n value=[element.encode('utf8') for element in value]))\n\n return tf.train.Example(features=tf.train.Features(feature=tf_features))\n\n\nclass BertExampleBuilder:\n \"\"\"Builder class for BertExample objects.\n\n Attributes:\n label_map: Mapping from tags to tag IDs.\n tokenizer: A tokenization.FullTokenizer, which converts between strings and\n lists of tokens.\n \"\"\"\n\n def __init__(self,\n label_map,\n max_seq_length,\n do_lower_case,\n converter,\n use_open_vocab,\n vocab_file = None,\n converter_insertion = None,\n special_glue_string_for_sources = None):\n \"\"\"Initializes an instance of BertExampleBuilder.\n\n Args:\n label_map: Mapping from tags to tag IDs.\n max_seq_length: Maximum sequence length.\n do_lower_case: Whether to lower case the input text. Should be True for\n uncased models and False for cased models.\n converter: Converter from text targets to points.\n use_open_vocab: Should MASK be inserted or phrases. Currently only True is\n supported.\n vocab_file: Path to BERT vocabulary file.\n converter_insertion: Converter for building an insertion example based on\n the tagger output. Optional.\n special_glue_string_for_sources: If there are multiple sources, this\n string is used to combine them into one string. The empty string is a\n valid value. Optional.\n \"\"\"\n self.label_map = label_map\n inverse_label_map = {}\n for label, label_id in label_map.items():\n if label_id in inverse_label_map:\n raise ValueError(\n 'Multiple labels with the same ID: {}'.format(label_id))\n inverse_label_map[label_id] = label\n self._inverse_label_map = frozendict.frozendict(inverse_label_map)\n self.tokenizer = tokenization.FullTokenizer(\n vocab_file, do_lower_case=do_lower_case)\n self._max_seq_length = max_seq_length\n self._converter = converter\n self._pad_id = self._get_pad_id()\n self._do_lower_case = do_lower_case\n self._use_open_vocab = use_open_vocab\n self._converter_insertion = converter_insertion\n if special_glue_string_for_sources is not None:\n self._special_glue_string_for_sources = special_glue_string_for_sources\n else:\n self._special_glue_string_for_sources = ' '\n\n def build_bert_example(\n self,\n sources,\n target = None,\n is_test_time = False\n ):\n \"\"\"Constructs a BERT tagging and insertion examples.\n\n Args:\n sources: List of source texts.\n target: Target text or None when building an example during inference. If\n the target is None then we don't calculate gold labels or tags, this is\n equivaltn to setting is_test_time to True.\n is_test_time: Controls whether the dataset is to be used at test time.\n Unlike setting target = None to indicate test time, this flags allows\n for saving the target in the tfrecord.\n\n Returns:\n A tuple with:\n 1. BertExample for the tagging model or None if there's a tag not found in\n self.label_map or conversion from text to tags was infeasible.\n 2. FeedDict for the insertion model or None if the BertExample or the\n insertion conversion failed.\n \"\"\"\n\n merged_sources = self._special_glue_string_for_sources.join(sources)\n original_source = merged_sources\n merged_sources = merged_sources.strip()\n if self._do_lower_case:\n merged_sources = merged_sources.lower()\n # [SEP] Should always be uppercase.\n merged_sources = merged_sources.replace(constants.SEP.lower(),\n constants.SEP)\n tokens = self._split_to_wordpieces(merged_sources.split())\n tokens = self._truncate_list(tokens)\n\n input_tokens = [constants.CLS] + tokens + [constants.SEP]\n input_ids = self.tokenizer.convert_tokens_to_ids(input_tokens)\n\n input_mask = [1] * len(input_ids)\n segment_ids = [0] * len(input_ids)\n if not target or is_test_time:\n example = BertExample(\n input_ids=input_ids,\n input_mask=input_mask,\n segment_ids=segment_ids,\n labels=[],\n point_indexes=[],\n labels_mask=[],\n input_tokens=input_tokens,\n source_text=original_source,\n target_text=target)\n example.pad_to_max_length(self._max_seq_length, self._pad_id)\n return example, None\n\n if self._do_lower_case:\n target = target.lower()\n\n output_tokens = self._split_to_wordpieces(target.split())\n\n output_tokens = self._truncate_list(output_tokens)\n output_tokens = [constants.CLS] + output_tokens + [constants.SEP]\n points = self._converter.compute_points(' '.join(input_tokens).split(),\n ' '.join(output_tokens))\n if not points:\n return None, None\n\n labels = [t.added_phrase for t in points]\n\n point_indexes = [t.point_index for t in points]\n point_indexes_set = set(point_indexes)\n try:\n new_labels = []\n for i, added_phrase in enumerate(labels):\n if i not in point_indexes_set:\n new_labels.append(self.label_map['DELETE'])\n elif not added_phrase:\n new_labels.append(self.label_map['KEEP'])\n else:\n if self._use_open_vocab:\n new_labels.append(self.label_map['KEEP|' +\n str(len(added_phrase.split()))])\n else:\n new_labels.append(self.label_map['KEEP|' + str(added_phrase)])\n labels = new_labels\n except KeyError:\n # added_phrase is not in label_map.\n return None, None\n\n if not labels:\n return None, None\n\n label_tokens = [\n self._inverse_label_map.get(label_id, constants.PAD)\n for label_id in labels\n ]\n label_counter = collections.Counter(labels)\n label_weight = {\n label: len(labels) / count / len(label_counter)\n for label, count in label_counter.items()\n }\n # Weight the labels inversely proportional to their frequency.\n labels_mask = [label_weight[label] for label in labels]\n example = BertExample(\n input_ids=input_ids,\n input_mask=input_mask,\n segment_ids=segment_ids,\n labels=labels,\n point_indexes=point_indexes,\n labels_mask=labels_mask,\n input_tokens=input_tokens,\n label_tokens=label_tokens,\n source_text=merged_sources,\n target_text=target)\n example.pad_to_max_length(self._max_seq_length, self._pad_id)\n\n insertion_example = None\n if self._converter_insertion is not None:\n insertion_example = self._converter_insertion.create_insertion_example(\n input_tokens, labels, point_indexes, output_tokens)\n\n return example, insertion_example\n\n def _split_to_wordpieces(self, tokens):\n \"\"\"Splits tokens to WordPieces.\n\n Args:\n tokens: Tokens to be split.\n\n Returns:\n List of WordPieces.\n \"\"\"\n bert_tokens = [] # Original tokens split into wordpieces.\n special_tokens = {'<::::>', constants.SEP.lower(), constants.CLS.lower()}\n for token in tokens:\n\n # Don't tokenize special tokens.\n if token.lower() not in special_tokens:\n pieces = self.tokenizer.tokenize(token)\n else:\n pieces = [token]\n\n bert_tokens.extend(pieces)\n\n return bert_tokens\n\n def _truncate_list(self, x):\n \"\"\"Returns truncated version of x according to the self._max_seq_length.\"\"\"\n # Save two slots for the first [CLS] token and the last [SEP] token.\n return x[:self._max_seq_length - 2]\n\n def _get_pad_id(self):\n \"\"\"Returns the ID of the [PAD] token (or 0 if it's not in the vocab).\"\"\"\n try:\n return self.tokenizer.convert_tokens_to_ids([constants.PAD])[0]\n except KeyError:\n return 0\n"
] | [
[
"numpy.random.RandomState",
"tensorflow.compat.v1.random.uniform"
],
[
"tensorflow.function",
"tensorflow.Variable",
"tensorflow.compat.v1.VariableScope"
],
[
"tensorflow.reduce_mean",
"tensorflow.zeros_like",
"tensorflow.compat.v2.summary.scalar",
"numpy.mean",
"numpy.float64",
"numpy.zeros"
],
[
"tensorflow.compat.v2.config.experimental_run_functions_eagerly",
"tensorflow.compat.v2.random.set_seed",
"numpy.random.seed",
"tensorflow.compat.v2.debugging.set_log_device_placement"
],
[
"tensorflow.compat.v2.zeros_like",
"tensorflow.compat.v2.exp",
"tensorflow.compat.v2.math.log",
"numpy.abs",
"numpy.arange",
"matplotlib.pyplot.subplots",
"tensorflow.compat.v2.expand_dims",
"numpy.zeros_like",
"tensorflow.compat.v2.ones",
"matplotlib.pyplot.close",
"tensorflow.compat.v2.reduce_sum",
"numpy.argmax",
"numpy.array",
"tensorflow.compat.v2.reduce_logsumexp"
],
[
"numpy.log",
"numpy.sqrt",
"numpy.linspace",
"numpy.clip",
"numpy.logspace",
"numpy.abs",
"numpy.sort",
"numpy.roots",
"numpy.sign",
"numpy.random.normal",
"numpy.argmin",
"numpy.random.laplace",
"numpy.exp",
"numpy.zeros",
"numpy.empty"
],
[
"tensorflow.compat.v1.assign_sub",
"tensorflow.compat.v1.math.is_finite",
"tensorflow.compat.v1.concat",
"tensorflow.compat.v1.raw_ops.OutfeedEnqueueTuple",
"tensorflow.compat.v1.gradients",
"tensorflow.compat.v1.greater_equal",
"tensorflow.compiler.xla.experimental.xla_sharding.xla_sharding.split",
"tensorflow.compat.v1.identity",
"tensorflow.compat.v1.tpu.cross_replica_sum",
"tensorflow.compat.v1.reduce_sum",
"tensorflow.compat.v1.trainable_variables",
"tensorflow.compat.v1.one_hot",
"tensorflow.compat.v1.train.get_or_create_global_step",
"tensorflow.compat.v1.tpu.core",
"tensorflow.compat.v1.stop_gradient",
"tensorflow.compat.v1.variable_scope",
"tensorflow.compat.v1.raw_ops.InfeedDequeueTuple",
"tensorflow.compat.v1.nn.log_softmax",
"tensorflow.compat.v1.nn.softmax",
"tensorflow.compat.v1.assign",
"tensorflow.compat.v1.reduce_mean",
"tensorflow.compat.v1.get_variable",
"tensorflow.compat.v1.split",
"tensorflow.compat.v1.get_collection",
"tensorflow.compat.v1.losses.softmax_cross_entropy",
"tensorflow.python.tpu.tpu_feed._PartitionedInfeedQueue",
"tensorflow.compat.v1.reduce_max",
"tensorflow.compat.v1.cast",
"tensorflow.compat.v1.expand_dims",
"tensorflow.compat.v1.less_equal",
"tensorflow.compat.v1.argmax",
"tensorflow.compat.v1.control_dependencies",
"tensorflow.compat.v1.clip_by_global_norm"
],
[
"tensorflow.gfile.Open"
],
[
"tensorflow.io.gfile.exists",
"numpy.mean"
],
[
"pandas.concat",
"numpy.isnan",
"pandas.DataFrame",
"numpy.mean",
"numpy.nanmean",
"numpy.zeros_like",
"numpy.array",
"numpy.where"
],
[
"tensorflow.keras.initializers.TruncatedNormal",
"tensorflow.shape"
],
[
"tensorflow.boolean_mask",
"tensorflow.sparse.to_dense",
"tensorflow.stack",
"tensorflow.io.parse_single_example",
"tensorflow.io.VarLenFeature",
"tensorflow.io.FixedLenFeature",
"tensorflow.data.Dataset.list_files",
"tensorflow.math.greater",
"tensorflow.one_hot"
],
[
"pandas.DataFrame"
],
[
"numpy.random.RandomState"
],
[
"numpy.dot",
"numpy.array",
"numpy.sum"
],
[
"tensorflow.compat.v2.summary.text"
],
[
"tensorflow.clip_by_value",
"tensorflow.reduce_mean",
"tensorflow.stack",
"tensorflow.math.l2_normalize",
"tensorflow.linalg.cross"
],
[
"torch.optim.lr_scheduler.MultiStepLR",
"numpy.mean",
"torch.cuda.is_available"
],
[
"tensorflow.train.Features"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ishine/malaya-speech | [
"fd34afc7107af1656dff4b3201fa51dda54fde18",
"fd34afc7107af1656dff4b3201fa51dda54fde18",
"fd34afc7107af1656dff4b3201fa51dda54fde18",
"fd34afc7107af1656dff4b3201fa51dda54fde18",
"fd34afc7107af1656dff4b3201fa51dda54fde18",
"fd34afc7107af1656dff4b3201fa51dda54fde18",
"fd34afc7107af1656dff4b3201fa51dda54fde18",
"fd34afc7107af1656dff4b3201fa51dda54fde18",
"fd34afc7107af1656dff4b3201fa51dda54fde18",
"fd34afc7107af1656dff4b3201fa51dda54fde18"
] | [
"pretrained-model/stt/hubert/conformer-tiny-ctc.py",
"session/speaker-change/finetune-vggvox-v2.py",
"pretrained-model/stt/jasper/medium-jasper-ctc.py",
"malaya_speech/train/model/fastsplit/model.py",
"malaya_speech/train/model/fastsplit/loss.py",
"malaya_speech/train/model/pix2pix/discriminator.py",
"pretrained-model/vocoder/universal-melgan/universal-melgan-1024.py",
"pretrained-model/vocoder/mbmelgan/mbmelgan-female-generator.py",
"pretrained-model/multispeaker-separation/fastsep-4-mel.py",
"session/multispeaker-count/speakernet.py"
] | [
"import os\n\nos.environ['CUDA_VISIBLE_DEVICES'] = '3'\n\nimport pyroomacoustics as pra\nimport numpy as np\nfrom pydub import AudioSegment\nfrom sklearn.utils import shuffle\nfrom glob import glob\nimport random\nimport json\nfrom malaya_speech.train.model.conformer.model import Model as ConformerModel\nfrom malaya_speech.train.model import hubert, ctc\nimport malaya_speech.train as train\nimport malaya_speech.config\nimport malaya_speech.augmentation.waveform as augmentation\nimport malaya_speech\nimport tensorflow as tf\nimport os\nimport string\n\n\nsr = 16000\nmaxlen = 18\nminlen_text = 1\nprob_aug = 0.95\n\nunique_vocab = [''] + list(string.ascii_lowercase + string.digits) + [' ']\n\n\ndef augment_room(y, scale=1.0):\n corners = np.array(\n [[0, 0], [0, 5 * scale], [3 * scale, 5 * scale], [3 * scale, 0]]\n ).T\n room = pra.Room.from_corners(\n corners,\n fs=sr,\n materials=pra.Material(0.2, 0.15),\n ray_tracing=True,\n air_absorption=True,\n )\n room.extrude(3.5, materials=pra.Material(0.2, 0.15))\n room.set_ray_tracing(\n receiver_radius=0.5, n_rays=1000, energy_thres=1e-5\n )\n room.add_source([1.5 * scale, 4 * scale, 0.5], signal=y)\n R = np.array([[1.5 * scale], [0.5 * scale], [0.5]])\n room.add_microphone(R)\n room.simulate()\n return room.mic_array.signals[0]\n\n\ndef random_amplitude_threshold(sample, low=1, high=2, threshold=0.4):\n y_aug = sample.copy()\n dyn_change = np.random.uniform(low=low, high=high)\n y_aug[np.abs(y_aug) >= threshold] = (\n y_aug[np.abs(y_aug) >= threshold] * dyn_change\n )\n return np.clip(y_aug, -1, 1)\n\n\ndef add_uniform_noise(\n sample, power=0.01, return_noise=False, scale=False\n):\n y_noise = sample.copy()\n noise_amp = power * np.random.uniform() * np.amax(y_noise)\n noise = noise_amp * np.random.normal(size=y_noise.shape[0])\n y_noise = y_noise + noise\n if scale:\n y_noise = y_noise / (np.max(np.abs(y_noise)) + 1e-9)\n if return_noise:\n if scale:\n noise = noise / (np.max(np.abs(y_noise)) + 1e-9)\n return y_noise, noise\n else:\n return y_noise\n\n\ndef calc(signal, add_uniform=True):\n choice = random.randint(0, 10)\n print('choice', choice)\n if choice == 0:\n x = augmentation.sox_augment_high(\n signal,\n min_bass_gain=random.randint(25, 50),\n reverberance=random.randint(0, 80),\n hf_damping=10,\n room_scale=random.randint(0, 50),\n negate=1,\n )\n if choice == 1:\n x = augmentation.sox_augment_high(\n signal,\n min_bass_gain=random.randint(25, 70),\n reverberance=random.randint(0, 80),\n hf_damping=10,\n room_scale=random.randint(0, 50),\n negate=0,\n )\n if choice == 2:\n x = augmentation.sox_augment_low(\n signal,\n min_bass_gain=random.randint(5, 30),\n reverberance=random.randint(0, 80),\n hf_damping=10,\n room_scale=random.randint(0, 50),\n negate=random.randint(0, 1),\n )\n if choice == 3:\n x = augmentation.sox_augment_combine(\n signal,\n min_bass_gain_high=random.randint(25, 70),\n min_bass_gain_low=random.randint(5, 30),\n reverberance=random.randint(0, 80),\n hf_damping=10,\n room_scale=random.randint(0, 90),\n )\n if choice == 4:\n x = augmentation.sox_reverb(\n signal,\n reverberance=random.randint(10, 80),\n hf_damping=10,\n room_scale=random.randint(10, 90),\n )\n if choice == 5:\n x = random_amplitude_threshold(\n signal, threshold=random.uniform(0.35, 0.8)\n )\n if choice == 6:\n x = augmentation.lowpass_filter(\n signal, sr=sr, cutoff=random.randint(200, 551)\n )\n if choice == 7:\n x = augmentation.highpass_filter(\n signal, sr=sr, cutoff=random.randint(551, 1653)\n )\n if choice == 8:\n x = augmentation.bandpass_filter(\n signal,\n sr=sr,\n cutoff_low=random.randint(200, 551),\n cutoff_high=random.randint(551, 1653),\n )\n if choice == 9:\n x = augment_room(signal)\n if choice == 10:\n x = signal\n\n if choice not in [5] and random.gauss(0.5, 0.14) > 0.6:\n x = random_amplitude_threshold(\n x, low=1.0, high=2.0, threshold=random.uniform(0.6, 0.9)\n )\n\n if random.gauss(0.5, 0.14) > 0.6 and add_uniform:\n x = add_uniform_noise(x, power=random.uniform(0.005, 0.015))\n\n return x\n\n\ndef mp3_to_wav(file, sr=sr):\n audio = AudioSegment.from_file(file)\n audio = audio.set_frame_rate(sr).set_channels(1)\n sample = np.array(audio.get_array_of_samples())\n return malaya_speech.astype.int_to_float(sample), sr\n\n\ndef generate(file):\n with open(file) as fopen:\n dataset = json.load(fopen)\n audios, cleaned_texts = dataset['X'], dataset['Y']\n while True:\n audios, cleaned_texts = shuffle(audios, cleaned_texts)\n for i in range(len(audios)):\n try:\n if audios[i].endswith('.mp3'):\n # print('found mp3', audios[i])\n wav_data, _ = mp3_to_wav(audios[i])\n else:\n wav_data, _ = malaya_speech.load(audios[i], sr=sr)\n\n if len(cleaned_texts[i]) < minlen_text:\n # print(f'skipped text too short {audios[i]}')\n continue\n\n if (len(wav_data) / sr) > maxlen:\n continue\n\n t = [unique_vocab.index(c) for c in cleaned_texts[i]]\n\n yield {\n 'waveforms': wav_data,\n 'waveforms_length': [len(wav_data)],\n 'targets': t,\n 'targets_length': [len(t)],\n }\n except Exception as e:\n print(e)\n\n\ndef get_dataset(\n file,\n batch_size=12,\n shuffle_size=20,\n thread_count=24,\n maxlen_feature=1800,\n):\n def get():\n dataset = tf.data.Dataset.from_generator(\n generate,\n {\n 'waveforms': tf.float32,\n 'waveforms_length': tf.int32,\n 'targets': tf.int32,\n 'targets_length': tf.int32,\n },\n output_shapes={\n 'waveforms': tf.TensorShape([None]),\n 'waveforms_length': tf.TensorShape([None]),\n 'targets': tf.TensorShape([None]),\n 'targets_length': tf.TensorShape([None]),\n },\n args=(file,),\n )\n dataset = dataset.prefetch(tf.contrib.data.AUTOTUNE)\n dataset = dataset.padded_batch(\n batch_size,\n padded_shapes={\n 'waveforms': tf.TensorShape([None]),\n 'waveforms_length': tf.TensorShape([None]),\n 'targets': tf.TensorShape([None]),\n 'targets_length': tf.TensorShape([None]),\n },\n padding_values={\n 'waveforms': tf.constant(0, dtype=tf.float32),\n 'waveforms_length': tf.constant(0, dtype=tf.int32),\n 'targets': tf.constant(0, dtype=tf.int32),\n 'targets_length': tf.constant(0, dtype=tf.int32),\n },\n )\n return dataset\n\n return get\n\n\nclass Encoder:\n def __init__(self, config):\n self.config = config\n self.encoder = ConformerModel(**self.config)\n\n def __call__(self, x, input_mask, training=True):\n return self.encoder(x, training=training)\n\n\ntotal_steps = 2000000\n\n\ndef model_fn(features, labels, mode, params):\n config_conformer = malaya_speech.config.conformer_tiny_encoder_config\n config_conformer['subsampling']['type'] = 'none'\n config_conformer['dropout'] = 0.0\n encoder = Encoder(config_conformer)\n cfg = hubert.HuBERTConfig(\n extractor_mode='layer_norm',\n dropout=0.0,\n attention_dropout=0.0,\n encoder_layerdrop=0.0,\n dropout_input=0.0,\n dropout_features=0.0,\n final_dim=128,\n )\n model = hubert.Model(cfg, encoder, ['pad', 'eos', 'unk'] + [str(i) for i in range(100)])\n X = features['waveforms']\n X_len = features['waveforms_length'][:, 0]\n targets = features['targets']\n targets_int32 = tf.cast(targets, tf.int32)\n targets_length = features['targets_length'][:, 0]\n r = model(X, padding_mask=X_len, features_only=True, mask=False)\n logits = tf.layers.dense(r['x'], len(unique_vocab) + 1)\n seq_lens = tf.reduce_sum(\n tf.cast(tf.logical_not(r['padding_mask']), tf.int32), axis=1\n )\n mean_error, sum_error, sum_weight = ctc.loss.ctc_loss(\n logits, seq_lens, targets_int32, targets_length\n )\n loss = mean_error\n accuracy = ctc.metrics.ctc_sequence_accuracy(\n logits, seq_lens, targets_int32, targets_length,\n )\n\n tf.identity(loss, 'train_loss')\n tf.identity(accuracy, name='train_accuracy')\n\n tf.summary.scalar('train_accuracy', accuracy)\n\n variables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)\n init_checkpoint = 'hubert-conformer-tiny/model.ckpt-1000000'\n\n assignment_map, initialized_variable_names = train.get_assignment_map_from_checkpoint(\n variables, init_checkpoint\n )\n\n tf.train.init_from_checkpoint(init_checkpoint, assignment_map)\n\n if mode == tf.estimator.ModeKeys.TRAIN:\n train_op = train.optimizer.adamw.create_optimizer(\n loss,\n init_lr=5e-5,\n num_train_steps=total_steps,\n num_warmup_steps=100000,\n end_learning_rate=0.0,\n weight_decay_rate=0.01,\n beta_1=0.9,\n beta_2=0.999,\n epsilon=1e-6,\n clip_norm=1.0,\n )\n estimator_spec = tf.estimator.EstimatorSpec(\n mode=mode, loss=loss, train_op=train_op\n )\n\n elif mode == tf.estimator.ModeKeys.EVAL:\n\n estimator_spec = tf.estimator.EstimatorSpec(\n mode=tf.estimator.ModeKeys.EVAL,\n loss=loss,\n eval_metric_ops={\n 'accuracy': ctc.metrics.ctc_sequence_accuracy_estimator(\n logits, seq_lens, targets_int32, targets_length\n )\n },\n )\n\n return estimator_spec\n\n\ntrain_hooks = [\n tf.train.LoggingTensorHook(\n ['train_accuracy', 'train_loss'], every_n_iter=1\n )\n]\ntrain_dataset = get_dataset('bahasa-asr-train-combined.json')\ndev_dataset = get_dataset('bahasa-asr-test.json')\n\ntrain.run_training(\n train_fn=train_dataset,\n model_fn=model_fn,\n model_dir='hubert-conformer-tiny-ctc-char',\n num_gpus=1,\n log_step=1,\n save_checkpoint_step=20000,\n max_steps=total_steps,\n eval_fn=dev_dataset,\n train_hooks=train_hooks,\n)\n",
"import os\n\nos.environ['GOOGLE_APPLICATION_CREDENTIALS'] = '../gcs/mesolitica-storage.json'\nos.environ['CUDA_VISIBLE_DEVICES'] = '1'\n\nimport tensorflow as tf\nimport malaya_speech.train as train\nimport malaya_speech.train.model.vggvox_v2 as vggvox_v2\nimport malaya_speech\nfrom glob import glob\nimport librosa\nimport numpy as np\n\n\ndef lin_spectogram_from_wav(wav, hop_length, win_length, n_fft=1024):\n linear = librosa.stft(\n wav, n_fft=n_fft, win_length=win_length, hop_length=hop_length\n ) # linear spectrogram\n return linear.T\n\n\ndef load_data(\n wav,\n win_length=400,\n sr=16000,\n hop_length=50,\n n_fft=512,\n spec_len=250,\n mode='train',\n):\n linear_spect = lin_spectogram_from_wav(wav, hop_length, win_length, n_fft)\n mag, _ = librosa.magphase(linear_spect) # magnitude\n mag_T = mag.T\n freq, time = mag_T.shape\n if mode == 'train':\n if time > spec_len:\n randtime = np.random.randint(0, time - spec_len)\n spec_mag = mag_T[:, randtime: randtime + spec_len]\n else:\n spec_mag = np.pad(mag_T, ((0, 0), (0, spec_len - time)), 'constant')\n else:\n spec_mag = mag_T\n # preprocessing, subtract mean, divided by time-wise var\n mu = np.mean(spec_mag, 0, keepdims=True)\n std = np.std(spec_mag, 0, keepdims=True)\n return (spec_mag - mu) / (std + 1e-5)\n\n\nDIMENSION = 257\n\n\ndef calc(v):\n\n r = load_data(v, mode='eval')\n return r\n\n\ndef preprocess_inputs(example):\n s = tf.compat.v1.numpy_function(calc, [example['inputs']], tf.float32)\n\n s = tf.reshape(s, (DIMENSION, -1, 1))\n example['inputs'] = s\n\n return example\n\n\ndef parse(serialized_example):\n\n data_fields = {\n 'inputs': tf.VarLenFeature(tf.float32),\n 'targets': tf.VarLenFeature(tf.int64),\n }\n features = tf.parse_single_example(\n serialized_example, features=data_fields\n )\n for k in features.keys():\n features[k] = features[k].values\n\n features = preprocess_inputs(features)\n\n keys = list(features.keys())\n for k in keys:\n if k not in ['inputs', 'targets']:\n features.pop(k, None)\n\n return features\n\n\ndef get_dataset(files, batch_size=32, shuffle_size=1024, thread_count=24):\n def get():\n dataset = tf.data.TFRecordDataset(files)\n dataset = dataset.map(parse, num_parallel_calls=thread_count)\n dataset = dataset.shuffle(shuffle_size)\n dataset = dataset.padded_batch(\n batch_size,\n padded_shapes={\n 'inputs': tf.TensorShape([DIMENSION, None, 1]),\n 'targets': tf.TensorShape([None]),\n },\n padding_values={\n 'inputs': tf.constant(0, dtype=tf.float32),\n 'targets': tf.constant(0, dtype=tf.int64),\n },\n )\n dataset = dataset.repeat()\n return dataset\n\n return get\n\n\nlearning_rate = 1e-5\ninit_checkpoint = '../vggvox-speaker-identification/v2/vggvox.ckpt'\n\n\ndef model_fn(features, labels, mode, params):\n Y = tf.cast(features['targets'][:, 0], tf.int32)\n model = vggvox_v2.Model(features['inputs'], num_class=2, mode='train')\n\n logits = model.logits\n loss = tf.reduce_mean(\n tf.nn.sparse_softmax_cross_entropy_with_logits(\n logits=logits, labels=Y\n )\n )\n\n tf.identity(loss, 'train_loss')\n\n accuracy = tf.metrics.accuracy(\n labels=Y, predictions=tf.argmax(logits, axis=1)\n )\n\n tf.identity(accuracy[1], name='train_accuracy')\n tf.summary.scalar('train_accuracy', accuracy[1])\n\n variables = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)\n variables = [v for v in variables if 'prediction' not in v.name]\n\n assignment_map, initialized_variable_names = train.get_assignment_map_from_checkpoint(\n variables, init_checkpoint\n )\n\n tf.train.init_from_checkpoint(init_checkpoint, assignment_map)\n\n if mode == tf.estimator.ModeKeys.TRAIN:\n global_step = tf.train.get_or_create_global_step()\n optimizer = tf.train.AdamOptimizer(learning_rate)\n train_op = optimizer.minimize(loss, global_step=global_step)\n estimator_spec = tf.estimator.EstimatorSpec(\n mode=mode, loss=loss, train_op=train_op\n )\n\n elif mode == tf.estimator.ModeKeys.EVAL:\n\n estimator_spec = tf.estimator.EstimatorSpec(\n mode=tf.estimator.ModeKeys.EVAL,\n loss=loss,\n eval_metric_ops={'accuracy': accuracy},\n )\n\n return estimator_spec\n\n\ntrain_hooks = [\n tf.train.LoggingTensorHook(\n ['train_accuracy', 'train_loss'], every_n_iter=1\n )\n]\n\nfiles = tf.io.gfile.glob(\n 'gs://mesolitica-general/speaker-change/data/*.tfrecords'\n)\ntrain_dataset = get_dataset(files)\n\nsave_directory = 'output-vggvox-v2-speaker-change'\n\ntrain.run_training(\n train_fn=train_dataset,\n model_fn=model_fn,\n model_dir=save_directory,\n num_gpus=1,\n log_step=1,\n save_checkpoint_step=25000,\n max_steps=300000,\n train_hooks=train_hooks,\n)\n",
"import os\n\nos.environ['CUDA_VISIBLE_DEVICES'] = '2,3'\n\nimport tensorflow as tf\nimport malaya_speech\nimport malaya_speech.augmentation.waveform as augmentation\nimport malaya_speech.augmentation.spectrogram as mask_augmentation\nimport malaya_speech.train.model.medium_jasper as jasper\nimport malaya_speech.train.model.ctc as ctc\nimport malaya_speech.train as train\nfrom malaya_speech.train.model.quartznet import layer, abstract\nimport numpy as np\nimport random\nfrom glob import glob\nimport json\n\nwith open('malaya-speech-sst-vocab.json') as fopen:\n unique_vocab = json.load(fopen) + ['{', '}', '[']\n\nparameters = {\n 'optimizer_params': {},\n 'lr_policy_params': {\n 'learning_rate': 1e-4,\n 'min_lr': 1e-6,\n 'warmup_steps': 0,\n 'decay_steps': 500_000,\n },\n}\n\n\ndef learning_rate_scheduler(global_step):\n return train.schedule.cosine_decay(\n global_step, **parameters['lr_policy_params']\n )\n\n\nfeaturizer = malaya_speech.tf_featurization.STTFeaturizer(\n normalize_per_feature=True\n)\nn_mels = featurizer.num_feature_bins\n\n\ndef mel_augmentation(features):\n\n features = mask_augmentation.mask_frequency(features, width_freq_mask=15)\n features = mask_augmentation.mask_time(\n features, width_time_mask=int(features.shape[0] * 0.05)\n )\n return features\n\n\ndef preprocess_inputs(example):\n s = featurizer.vectorize(example['waveforms'])\n s = tf.reshape(s, (-1, n_mels))\n s = tf.compat.v1.numpy_function(mel_augmentation, [s], tf.float32)\n mel_fbanks = tf.reshape(s, (-1, n_mels))\n length = tf.cast(tf.shape(mel_fbanks)[0], tf.int32)\n length = tf.expand_dims(length, 0)\n example['inputs'] = mel_fbanks\n example['inputs_length'] = length\n\n return example\n\n\ndef parse(serialized_example):\n\n data_fields = {\n 'waveforms': tf.VarLenFeature(tf.float32),\n 'targets': tf.VarLenFeature(tf.int64),\n }\n features = tf.parse_single_example(\n serialized_example, features=data_fields\n )\n for k in features.keys():\n features[k] = features[k].values\n\n features = preprocess_inputs(features)\n\n keys = list(features.keys())\n for k in keys:\n if k not in ['waveforms', 'inputs', 'inputs_length', 'targets']:\n features.pop(k, None)\n\n return features\n\n\ndef get_dataset(\n path,\n batch_size=32,\n shuffle_size=32,\n thread_count=24,\n maxlen_feature=1800,\n):\n def get():\n files = glob(path)\n dataset = tf.data.TFRecordDataset(files)\n dataset = dataset.shuffle(shuffle_size)\n dataset = dataset.repeat()\n dataset = dataset.prefetch(tf.contrib.data.AUTOTUNE)\n dataset = dataset.map(parse, num_parallel_calls=thread_count)\n dataset = dataset.padded_batch(\n batch_size,\n padded_shapes={\n 'waveforms': tf.TensorShape([None]),\n 'inputs': tf.TensorShape([None, n_mels]),\n 'inputs_length': tf.TensorShape([None]),\n 'targets': tf.TensorShape([None]),\n },\n padding_values={\n 'waveforms': tf.constant(0, dtype=tf.float32),\n 'inputs': tf.constant(0, dtype=tf.float32),\n 'inputs_length': tf.constant(0, dtype=tf.int32),\n 'targets': tf.constant(0, dtype=tf.int64),\n },\n )\n return dataset\n\n return get\n\n\ndef model_fn(features, labels, mode, params):\n\n model = jasper.Model(\n features['inputs'], features['inputs_length'][:, 0], training=True\n )\n logits = tf.layers.dense(model.logits['outputs'], len(unique_vocab) + 1)\n seq_lens = model.logits['src_length']\n\n targets_int32 = tf.cast(features['targets'], tf.int32)\n\n mean_error, sum_error, sum_weight = ctc.loss.ctc_loss(\n logits, targets_int32, seq_lens\n )\n\n loss = mean_error\n accuracy = ctc.metrics.ctc_sequence_accuracy(\n logits, targets_int32, seq_lens\n )\n\n tf.identity(loss, 'train_loss')\n tf.identity(accuracy, name='train_accuracy')\n\n tf.summary.scalar('train_accuracy', accuracy)\n\n if mode == tf.estimator.ModeKeys.TRAIN:\n train_op = train.optimizer.optimize_loss(\n loss,\n tf.train.AdamOptimizer,\n parameters['optimizer_params'],\n learning_rate_scheduler,\n summaries=['learning_rate', 'loss_scale'],\n larc_params=parameters.get('larc_params', None),\n loss_scaling=parameters.get('loss_scaling', 1.0),\n loss_scaling_params=parameters.get('loss_scaling_params', None),\n )\n estimator_spec = tf.estimator.EstimatorSpec(\n mode=mode, loss=loss, train_op=train_op\n )\n\n elif mode == tf.estimator.ModeKeys.EVAL:\n\n estimator_spec = tf.estimator.EstimatorSpec(\n mode=tf.estimator.ModeKeys.EVAL,\n loss=loss,\n eval_metric_ops={\n 'accuracy': ctc.metrics.ctc_sequence_accuracy_estimator(\n logits, targets_int32, seq_lens\n )\n },\n )\n\n return estimator_spec\n\n\ntrain_hooks = [\n tf.train.LoggingTensorHook(\n ['train_accuracy', 'train_loss'], every_n_iter=1\n )\n]\ntrain_dataset = get_dataset(\n '../speech-bahasa/bahasa-asr/data/bahasa-asr-train-*'\n)\ndev_dataset = get_dataset(\n '../speech-bahasa/bahasa-asr-test/data/bahasa-asr-dev-*'\n)\n\ntrain.run_training(\n train_fn=train_dataset,\n model_fn=model_fn,\n model_dir='asr-medium-jasper-ctc',\n num_gpus=2,\n log_step=1,\n save_checkpoint_step=5000,\n max_steps=parameters['lr_policy_params']['decay_steps'],\n eval_fn=dev_dataset,\n train_hooks=train_hooks,\n)\n",
"import tensorflow as tf\nfrom ..fastspeech.model import TFFastSpeechEncoder\nimport numpy as np\n\n\nclass Encoder(tf.keras.layers.Layer):\n def __init__(self, config, **kwargs):\n super(Encoder, self).__init__(name='Encoder', **kwargs)\n self.config = config\n self.encoder = TFFastSpeechEncoder(config, name='encoder')\n self.position_embeddings = tf.convert_to_tensor(\n self._sincos_embedding()\n )\n\n def call(self, x, attention_mask, training=True):\n input_shape = tf.shape(x)\n seq_length = input_shape[1]\n\n position_ids = tf.range(1, seq_length + 1, dtype=tf.int32)[\n tf.newaxis, :\n ]\n inputs = tf.cast(position_ids, tf.int32)\n position_embeddings = tf.gather(self.position_embeddings, inputs)\n x = x + tf.cast(position_embeddings, x.dtype)\n f = self.encoder([x, attention_mask], training=training)[0]\n return f\n\n def _sincos_embedding(self):\n position_enc = np.array(\n [\n [\n pos\n / np.power(10000, 2.0 * (i // 2) / self.config.hidden_size)\n for i in range(self.config.hidden_size)\n ]\n for pos in range(self.config.max_position_embeddings + 1)\n ]\n )\n\n position_enc[:, 0::2] = np.sin(position_enc[:, 0::2])\n position_enc[:, 1::2] = np.cos(position_enc[:, 1::2])\n\n # pad embedding.\n position_enc[0] = 0.0\n\n return position_enc\n\n\nclass Model(tf.keras.Model):\n def __init__(\n self, config, O, C, kernel_size=5, masking=False, **kwargs\n ):\n super(Model, self).__init__(name='fastvc', **kwargs)\n self.encoder = Encoder(config.encoder_self_attention_params)\n self.decoder = Encoder(config.decoder_self_attention_params)\n self.encoder_dense = tf.keras.layers.Conv1D(\n config.encoder_self_attention_params.hidden_size,\n kernel_size=kernel_size,\n strides=1,\n use_bias=False,\n padding='SAME',\n )\n self.mel_dense = tf.keras.layers.Dense(\n units=config.num_mels, dtype=tf.float32, name='mel_before'\n )\n self.dim = O\n self.C = C\n self.masking = masking\n\n def call(self, x, mel_lengths, training=True, **kwargs):\n original = x\n T_mix = tf.shape(x)[1]\n batch_size = tf.shape(x)[0]\n max_length = tf.cast(tf.reduce_max(mel_lengths), tf.int32)\n attention_mask = tf.sequence_mask(\n lengths=mel_lengths, maxlen=max_length, dtype=tf.float32\n )\n attention_mask.set_shape((None, None))\n x = tf.concat([x] * self.C, axis=2)\n x = self.encoder_dense(x)\n encoded = self.encoder(x, attention_mask, training=training)\n decoder_output = self.decoder(\n encoded, attention_mask, training=training\n )\n decoder_output = tf.reshape(\n decoder_output, (batch_size, T_mix, self.C, self.dim)\n )\n mel_before = self.mel_dense(decoder_output)\n if self.masking:\n mel_before = tf.nn.tanh(mel_before)\n tiled = tf.tile(tf.expand_dims(original, 2), [1, 1, self.C, 1])\n return tiled * mel_before\n else:\n return mel_before\n",
"import tensorflow as tf\nimport numpy as np\nfrom itertools import permutations\nfrom tensorflow.python.ops import weights_broadcast_ops\n\nEPS = 1e-8\n\n\ndef cal_abs_with_pit(\n source, estimate_source, source_lengths, C, method=tf.abs\n):\n # estimate_source = B, T, S, D\n # source = B, S, T, D\n\n # estimate_source = B, S, T, D\n estimate_source = tf.transpose(estimate_source, perm=[0, 2, 1, 3])\n\n mask = tf.cast(\n tf.sequence_mask(source_lengths, tf.reduce_max(source_lengths)),\n source.dtype,\n )\n mask = tf.expand_dims(mask, 1)\n mask = tf.expand_dims(mask, -1)\n # estimate_source *= mask\n\n targets = tf.expand_dims(source, 1)\n est_targets = tf.expand_dims(estimate_source, 2)\n pw_loss = method(targets - est_targets)\n # pair_wise_abs = tf.reduce_mean(pw_loss, axis = [3, 4])\n\n losses = pw_loss\n m = tf.expand_dims(mask, 1)\n weights = tf.cast(m, dtype=tf.float32)\n weighted_losses = tf.multiply(losses, weights)\n total_loss = tf.reduce_sum(weighted_losses, axis=[3, 4])\n present = tf.where(\n tf.equal(weights, 0.0), tf.zeros_like(weights), tf.ones_like(weights)\n )\n present = weights_broadcast_ops.broadcast_weights(present, losses)\n present = tf.reduce_sum(present, axis=[3, 4])\n pair_wise_abs = tf.div_no_nan(total_loss, present)\n\n v_perms = tf.constant(list(permutations(range(C))))\n perms_one_hot = tf.one_hot(v_perms, C)\n\n abs_set = tf.einsum('bij,pij->bp', pair_wise_abs, perms_one_hot)\n min_abs = tf.reduce_min(abs_set, axis=1, keepdims=True)\n\n return min_abs, abs_set\n\n\ndef calculate_loss(\n source,\n estimate_source,\n source_lengths,\n C,\n method=tf.abs,\n return_set=False,\n):\n min_abs, abs_set = cal_abs_with_pit(\n source, estimate_source, source_lengths, C, method=method\n )\n if return_set:\n return tf.reduce_mean(min_abs), abs_set\n else:\n return tf.reduce_mean(min_abs)\n",
"import tensorflow as tf\nfrom .layer import *\n\n\nclass Discriminator:\n def __init__(self, inputs, targets, ndf=64):\n n_layers = 3\n layers = []\n input = tf.concat([inputs, targets], axis=3)\n with tf.variable_scope('layer_1'):\n convolved = discrim_conv(input, ndf, stride=2)\n rectified = lrelu(convolved, 0.2)\n layers.append(rectified)\n\n for i in range(n_layers):\n with tf.variable_scope('layer_%d' % (len(layers) + 1)):\n out_channels = ndf * min(2 ** (i + 1), 8)\n stride = 1 if i == n_layers - 1 else 2\n convolved = discrim_conv(\n layers[-1], out_channels, stride=stride\n )\n normalized = batchnorm(convolved)\n rectified = lrelu(normalized, 0.2)\n layers.append(rectified)\n with tf.variable_scope('layer_%d' % (len(layers) + 1)):\n convolved = discrim_conv(rectified, out_channels=1, stride=1)\n output = tf.sigmoid(convolved)\n layers.append(output)\n\n self.logits = layers[-1]\n",
"import os\n\nos.environ['CUDA_VISIBLE_DEVICES'] = '1'\n\nimport tensorflow as tf\nimport numpy as np\nfrom glob import glob\nfrom itertools import cycle\nimport malaya_speech\nimport malaya_speech.train\nfrom malaya_speech.train.model import universal_melgan as melgan\nfrom malaya_speech.train.model import melgan as melgan_loss\nimport malaya_speech.config\nfrom malaya_speech.train.loss import calculate_2d_loss, calculate_3d_loss\nimport random\n\nmels = glob('output-universal/mels/*.npy')\nmels.extend(glob('speech-augmentation/mels/*.npy'))\nrandom.shuffle(mels)\nfile_cycle = cycle(mels)\n\n\ndef generate(batch_max_steps=8192, hop_size=256):\n while True:\n f = next(file_cycle)\n mel = np.load(f)\n audio = np.load(f.replace('mels', 'audios'))\n\n batch_max_frames = batch_max_steps // hop_size\n if len(audio) < len(mel) * hop_size:\n audio = np.pad(audio, [[0, len(mel) * hop_size - len(audio)]])\n\n if len(mel) > batch_max_frames:\n interval_start = 0\n interval_end = len(mel) - batch_max_frames\n start_frame = random.randint(interval_start, interval_end)\n start_step = start_frame * hop_size\n audio = audio[start_step: start_step + batch_max_steps]\n mel = mel[start_frame: start_frame + batch_max_frames, :]\n else:\n audio = np.pad(audio, [[0, batch_max_steps - len(audio)]])\n mel = np.pad(mel, [[0, batch_max_frames - len(mel)], [0, 0]])\n\n yield {'mel': mel, 'audio': audio}\n\n\ndataset = tf.data.Dataset.from_generator(\n generate,\n {'mel': tf.float32, 'audio': tf.float32},\n output_shapes={\n 'mel': tf.TensorShape([None, 80]),\n 'audio': tf.TensorShape([None]),\n },\n)\ndataset = dataset.shuffle(32)\ndataset = dataset.padded_batch(\n 32,\n padded_shapes={\n 'audio': tf.TensorShape([None]),\n 'mel': tf.TensorShape([None, 80]),\n },\n padding_values={\n 'audio': tf.constant(0, dtype=tf.float32),\n 'mel': tf.constant(0, dtype=tf.float32),\n },\n)\n\nfeatures = dataset.make_one_shot_iterator().get_next()\n\nmelgan_config = malaya_speech.config.universal_melgan_config\nmelgan_config['melgan_generator_params']['filters'] = 1024\ngenerator = melgan.Generator(\n melgan.GeneratorConfig(**melgan_config['melgan_generator_params']),\n name='universalmelgan-generator',\n)\ndiscriminator = melgan.MultiScaleDiscriminator(\n melgan.WaveFormDiscriminatorConfig(\n **melgan_config['melgan_waveform_discriminator_params']\n ),\n melgan.STFTDiscriminatorConfig(\n **melgan_config['melgan_stft_discriminator_params']\n ),\n name='universalmelgan-discriminator',\n)\n\nmels_loss = melgan_loss.loss.TFMelSpectrogram()\n\nmse_loss = tf.keras.losses.MeanSquaredError()\nmae_loss = tf.keras.losses.MeanAbsoluteError()\n\n\ndef compute_per_example_generator_losses(audios, outputs):\n y_hat = outputs\n p_hat = discriminator(y_hat)\n p = discriminator(tf.expand_dims(audios, 2))\n\n adv_loss = 0.0\n for i in range(len(p_hat)):\n adv_loss += mse_loss(tf.ones_like(p_hat[i][-1]), p_hat[i][-1])\n adv_loss /= i + 1\n\n fm_loss = 0.0\n for i in range(len(p_hat)):\n for j in range(len(p_hat[i]) - 1):\n fm_loss += mae_loss(p[i][j], p_hat[i][j])\n fm_loss /= (i + 1) * (j + 1)\n adv_loss += 10 * fm_loss\n\n per_example_losses = adv_loss\n\n a = calculate_2d_loss(audios, tf.squeeze(y_hat, -1), loss_fn=mels_loss)\n\n dict_metrics_losses = {\n 'adversarial_loss': adv_loss,\n 'fm_loss': fm_loss,\n 'gen_loss': adv_loss,\n 'mels_spectrogram_loss': tf.reduce_mean(a),\n }\n\n return per_example_losses, dict_metrics_losses\n\n\ndef compute_per_example_discriminator_losses(audios, gen_outputs):\n y_hat = gen_outputs\n y = tf.expand_dims(audios, 2)\n p = discriminator(y)\n p_hat = discriminator(y_hat)\n\n real_loss = 0.0\n fake_loss = 0.0\n for i in range(len(p)):\n real_loss += mse_loss(tf.ones_like(p[i][-1]), p[i][-1])\n fake_loss += mse_loss(tf.zeros_like(p_hat[i][-1]), p_hat[i][-1])\n\n real_loss /= i + 1\n fake_loss /= i + 1\n dis_loss = real_loss + fake_loss\n\n per_example_losses = dis_loss\n\n dict_metrics_losses = {\n 'real_loss': real_loss,\n 'fake_loss': fake_loss,\n 'dis_loss': dis_loss,\n }\n\n return per_example_losses, dict_metrics_losses\n\n\ny_hat = generator(features['mel'], training=True)\naudios = features['audio']\nper_example_losses, generator_losses = compute_per_example_generator_losses(\n audios, y_hat\n)\ngenerator_loss = tf.reduce_mean(per_example_losses)\n\ny_hat = generator(features['mel'], training=True)\naudios = features['audio']\nper_example_losses, discriminator_losses = compute_per_example_discriminator_losses(\n audios, y_hat\n)\ndiscriminator_loss = tf.reduce_mean(per_example_losses)\n\nfor k, v in generator_losses.items():\n tf.summary.scalar(k, v)\n\nfor k, v in discriminator_losses.items():\n tf.summary.scalar(k, v)\n\nsummaries = tf.summary.merge_all()\n\nt_vars = tf.trainable_variables()\nd_vars = [\n var\n for var in t_vars\n if var.name.startswith('universalmelgan-discriminator')\n]\ng_vars = [\n var for var in t_vars if var.name.startswith('universalmelgan-generator')\n]\n\nd_optimizer = tf.train.AdamOptimizer(0.0001, beta1=0.5, beta2=0.9).minimize(\n discriminator_loss, var_list=d_vars\n)\ng_optimizer = tf.train.AdamOptimizer(0.0001, beta1=0.5, beta2=0.9).minimize(\n generator_loss, var_list=g_vars\n)\n\nsess = tf.InteractiveSession()\nsess.run(tf.global_variables_initializer())\nsaver = tf.train.Saver()\n\ncheckpoint = 5000\nwrite_tensorboard = 100\nepoch = 1_000_000\npath = 'universal-melgan-1024'\n\nwriter = tf.summary.FileWriter(f'./{path}')\n\nckpt_path = tf.train.latest_checkpoint(path)\nif ckpt_path:\n saver.restore(sess, ckpt_path)\n\nfor i in range(epoch):\n g_loss, _ = sess.run([generator_loss, g_optimizer])\n d_loss, _ = sess.run([discriminator_loss, d_optimizer])\n s = sess.run(summaries)\n writer.add_summary(s, i)\n\n if i % checkpoint == 0:\n saver.save(sess, f'{path}/model.ckpt', global_step=i)\n\n if i % write_tensorboard == 0:\n writer.add_summary(s, i)\n\n print(i, g_loss, d_loss)\n",
"import os\n\nos.environ['CUDA_VISIBLE_DEVICES'] = '3'\n\nimport tensorflow as tf\nimport numpy as np\nfrom glob import glob\nfrom itertools import cycle\n\nmels = glob('../speech-bahasa/output-female-v2/mels/*.npy')\nfile_cycle = cycle(mels)\nf = next(file_cycle)\n\nimport random\n\n\ndef generate(batch_max_steps=8192, hop_size=256):\n while True:\n f = next(file_cycle)\n mel = np.load(f)\n audio = np.load(f.replace('mels', 'audios'))\n\n batch_max_frames = batch_max_steps // hop_size\n if len(audio) < len(mel) * hop_size:\n audio = np.pad(audio, [[0, len(mel) * hop_size - len(audio)]])\n\n if len(mel) > batch_max_frames:\n interval_start = 0\n interval_end = len(mel) - batch_max_frames\n start_frame = random.randint(interval_start, interval_end)\n start_step = start_frame * hop_size\n audio = audio[start_step: start_step + batch_max_steps]\n mel = mel[start_frame: start_frame + batch_max_frames, :]\n else:\n audio = np.pad(audio, [[0, batch_max_steps - len(audio)]])\n mel = np.pad(mel, [[0, batch_max_frames - len(mel)], [0, 0]])\n\n yield {'mel': mel, 'audio': audio}\n\n\ndataset = tf.data.Dataset.from_generator(\n generate,\n {'mel': tf.float32, 'audio': tf.float32},\n output_shapes={\n 'mel': tf.TensorShape([None, 80]),\n 'audio': tf.TensorShape([None]),\n },\n)\ndataset = dataset.shuffle(32)\ndataset = dataset.padded_batch(\n 32,\n padded_shapes={\n 'audio': tf.TensorShape([None]),\n 'mel': tf.TensorShape([None, 80]),\n },\n padding_values={\n 'audio': tf.constant(0, dtype=tf.float32),\n 'mel': tf.constant(0, dtype=tf.float32),\n },\n)\n\nfeatures = dataset.make_one_shot_iterator().get_next()\nfeatures\n\nimport malaya_speech\nimport malaya_speech.train\nfrom malaya_speech.train.model import melgan, mb_melgan\nfrom malaya_speech.train.model import stft\nimport malaya_speech.config\nfrom malaya_speech.train.loss import calculate_2d_loss, calculate_3d_loss\n\nmb_melgan_config = malaya_speech.config.mb_melgan_config\ngenerator = melgan.Generator(\n mb_melgan.GeneratorConfig(**mb_melgan_config['melgan_generator_params']),\n name='mb_melgan-generator',\n)\npqmf = mb_melgan.PQMF(\n mb_melgan.GeneratorConfig(**mb_melgan_config['melgan_generator_params']),\n dtype=tf.float32,\n name='pqmf',\n)\n\nsub_band_stft_loss = stft.loss.MultiResolutionSTFT(\n **mb_melgan_config['subband_stft_loss_params']\n)\n\nfull_band_stft_loss = stft.loss.MultiResolutionSTFT(\n **mb_melgan_config['stft_loss_params']\n)\n\ny_mb_hat = generator(features['mel'], training=True)\naudios = features['audio']\ny_hat = pqmf.synthesis(y_mb_hat)\n\ny_mb = pqmf.analysis(tf.expand_dims(audios, -1))\ny_mb = tf.transpose(y_mb, (0, 2, 1))\ny_mb = tf.reshape(y_mb, (-1, tf.shape(y_mb)[-1]))\n\ny_mb_hat = tf.transpose(y_mb_hat, (0, 2, 1))\ny_mb_hat = tf.reshape(y_mb_hat, (-1, tf.shape(y_mb_hat)[-1]))\nsub_sc_loss, sub_mag_loss = calculate_2d_loss(\n y_mb, y_mb_hat, sub_band_stft_loss\n)\n\nsub_sc_loss = tf.reduce_mean(tf.reshape(sub_sc_loss, [-1, pqmf.subbands]), -1)\nsub_mag_loss = tf.reduce_mean(tf.reshape(sub_mag_loss, [-1, pqmf.subbands]), -1)\nfull_sc_loss, full_mag_loss = calculate_2d_loss(\n audios, tf.squeeze(y_hat, -1), full_band_stft_loss\n)\n\ngenerator_loss = 0.5 * (sub_sc_loss + sub_mag_loss) + 0.5 * (\n full_sc_loss + full_mag_loss\n)\ngenerator_loss = tf.reduce_mean(generator_loss)\ng_optimizer = tf.train.AdamOptimizer(0.0001, beta1=0.5, beta2=0.9).minimize(\n generator_loss\n)\n\nsess = tf.InteractiveSession()\nsess.run(tf.global_variables_initializer())\nsaver = tf.train.Saver()\n\ncheckpoint = 10000\nepoch = 200_000\npath = 'mbmelgan-female'\n\nckpt_path = tf.train.latest_checkpoint(path)\nif ckpt_path:\n saver.restore(sess, ckpt_path)\n print(f'restoring checkpoint from {ckpt_path}')\n\nfor i in range(0, epoch):\n g_loss, _ = sess.run([generator_loss, g_optimizer])\n\n if i % checkpoint == 0:\n saver.save(sess, f'{path}/model.ckpt', global_step=i)\n\n print(i, g_loss)\n\nsaver.save(sess, f'{path}/model.ckpt', global_step=epoch)\n",
"import os\nimport warnings\n\nos.environ['CUDA_VISIBLE_DEVICES'] = '1'\nwarnings.filterwarnings('ignore')\n\nimport tensorflow as tf\nimport malaya_speech\nimport numpy as np\nimport IPython.display as ipd\nimport matplotlib.pyplot as plt\nimport malaya_speech.augmentation.waveform as augmentation\nfrom malaya_speech.train.model import fastsplit, fastspeech, fastvc\nfrom malaya_speech.train.model import sepformer_old as sepformer\nfrom malaya_speech.utils import tf_featurization\nimport malaya_speech.train as train\nimport random\nimport pickle\nfrom glob import glob\nfrom sklearn.utils import shuffle\n\nsr = 22050\nspeakers_size = 4\n\n\ndef get_data(combined_path, speakers_size=4, sr=22050):\n with open(combined_path, 'rb') as fopen:\n combined = pickle.load(fopen)\n y = []\n for i in range(speakers_size):\n with open(combined_path.replace('combined', str(i)), 'rb') as fopen:\n y_ = pickle.load(fopen)\n y.append(y_)\n return combined, y\n\n\ndef to_mel(y):\n mel = malaya_speech.featurization.universal_mel(y)\n mel[mel <= np.log(1e-2)] = np.log(1e-2)\n return mel\n\n\ndef generate():\n combined = glob('split-speaker-22k-train/combined/*.pkl')\n while True:\n combined = shuffle(combined)\n for i in range(len(combined)):\n x, y = get_data(combined[i])\n yield {'combined': x, 'y': y, 'length': [len(x)]}\n\n\ndef get_dataset(batch_size=8):\n def get():\n dataset = tf.data.Dataset.from_generator(\n generate,\n {'combined': tf.float32, 'y': tf.float32, 'length': tf.int32},\n output_shapes={\n 'combined': tf.TensorShape([None, 80]),\n 'y': tf.TensorShape([speakers_size, None, 80]),\n 'length': tf.TensorShape([None]),\n },\n )\n dataset = dataset.padded_batch(\n batch_size,\n padded_shapes={\n 'combined': tf.TensorShape([None, 80]),\n 'y': tf.TensorShape([speakers_size, None, 80]),\n 'length': tf.TensorShape([None]),\n },\n padding_values={\n 'combined': tf.constant(np.log(1e-2), dtype=tf.float32),\n 'y': tf.constant(np.log(1e-2), dtype=tf.float32),\n 'length': tf.constant(0, dtype=tf.int32),\n },\n )\n return dataset\n\n return get\n\n\ntotal_steps = 10000000\n\n\ndef model_fn(features, labels, mode, params):\n lengths = features['length'][:, 0]\n config = malaya_speech.config.fastspeech_config\n dim = 256\n config['encoder_hidden_size'] = dim\n config['decoder_hidden_size'] = dim\n config['encoder_num_hidden_layers'] = 4\n config['encoder_num_attention_heads'] = 4\n config = fastspeech.Config(vocab_size=1, **config)\n\n def transformer(): return sepformer.Encoder_FastSpeech(\n config.encoder_self_attention_params\n )\n def decoder(): return fastvc.Decoder(config.decoder_self_attention_params)\n model = sepformer.Model_Mel(\n transformer, transformer, decoder, activation=None\n )\n logits = model(features['combined'], lengths)\n outputs = tf.transpose(logits, [1, 2, 0, 3])\n loss = fastsplit.calculate_loss(\n features['y'], outputs, lengths, C=speakers_size\n )\n tf.identity(loss, 'total_loss')\n tf.summary.scalar('total_loss', loss)\n\n global_step = tf.train.get_or_create_global_step()\n\n if mode == tf.estimator.ModeKeys.TRAIN:\n\n train_op = train.optimizer.adamw.create_optimizer(\n loss,\n init_lr=0.0001,\n num_train_steps=total_steps,\n num_warmup_steps=100000,\n end_learning_rate=0.00001,\n weight_decay_rate=0.001,\n beta_1=0.9,\n beta_2=0.98,\n epsilon=1e-6,\n clip_norm=1.0,\n )\n estimator_spec = tf.estimator.EstimatorSpec(\n mode=mode, loss=loss, train_op=train_op\n )\n\n elif mode == tf.estimator.ModeKeys.EVAL:\n\n estimator_spec = tf.estimator.EstimatorSpec(\n mode=tf.estimator.ModeKeys.EVAL, loss=loss\n )\n\n return estimator_spec\n\n\ntrain_hooks = [tf.train.LoggingTensorHook(['total_loss'], every_n_iter=1)]\ntrain_dataset = get_dataset()\n\nsave_directory = 'split-speaker-sepformer-mel'\n\ntrain.run_training(\n train_fn=train_dataset,\n model_fn=model_fn,\n model_dir=save_directory,\n num_gpus=1,\n log_step=1,\n save_checkpoint_step=3000,\n max_steps=total_steps,\n train_hooks=train_hooks,\n eval_step=0,\n)\n",
"import os\n\nos.environ['CUDA_VISIBLE_DEVICES'] = '3'\n\nimport tensorflow as tf\nimport malaya_speech.train as train\nimport malaya_speech.train.model.speakernet as speakernet\nimport malaya_speech.augmentation.waveform as augmentation\nimport malaya_speech\nimport librosa\nimport numpy as np\nfrom glob import glob\nfrom collections import defaultdict\nfrom itertools import cycle\nfrom multiprocessing import Pool\nimport itertools\nimport pandas as pd\nimport random\n\n\ndef chunks(l, n):\n for i in range(0, len(l), n):\n yield (l[i: i + n], i // n)\n\n\ndef multiprocessing(strings, function, cores=6, returned=True):\n df_split = chunks(strings, len(strings) // cores)\n pool = Pool(cores)\n print('initiate pool map')\n pooled = pool.map(function, df_split)\n print('gather from pool')\n pool.close()\n pool.join()\n print('closed pool')\n\n if returned:\n return list(itertools.chain(*pooled))\n\n\nlibrispeech = glob('../speech-bahasa/LibriSpeech/*/*/*/*.flac')\n\n\ndef get_speaker_librispeech(file):\n return file.split('/')[-1].split('-')[0]\n\n\nspeakers = defaultdict(list)\nfor f in librispeech:\n speakers[get_speaker_librispeech(f)].append(f)\n\nvctk = glob('vtck/**/*.flac', recursive=True)\nvctk_speakers = defaultdict(list)\nfor f in vctk:\n s = f.split('/')[-1].split('_')[0]\n vctk_speakers[s].append(f)\n\nfiles = glob('../speech-bahasa/ST-CMDS-20170001_1-OS/*.wav')\nspeakers_mandarin = defaultdict(list)\nfor f in files:\n speakers_mandarin[f[:-9]].append(f)\nlen(speakers_mandarin)\n\nspeakers_malay = {}\nspeakers_malay['salina'] = glob(\n '/home/husein/speech-bahasa/salina/output-wav-salina/*.wav'\n)\nmale = glob('/home/husein/speech-bahasa/turki/output-wav-turki/*.wav')\nmale.extend(\n glob('/home/husein/speech-bahasa/output-wav-dari-pasentran-ke-istana/*.wav')\n)\nspeakers_malay['male'] = male\nspeakers_malay['haqkiem'] = glob('/home/husein/speech-bahasa/haqkiem/*.wav')\nspeakers_malay['khalil'] = glob('/home/husein/speech-bahasa/tolong-sebut/*.wav')\nspeakers_malay['mas'] = glob(\n '/home/husein/speech-bahasa/sebut-perkataan-woman/*.wav'\n)\nhusein = glob('/home/husein/speech-bahasa/audio-wattpad/*.wav')\nhusein.extend(glob('/home/husein/speech-bahasa/audio-iium/*.wav'))\nhusein.extend(glob('/home/husein/speech-bahasa/audio/*.wav'))\nhusein.extend(glob('/home/husein/speech-bahasa/sebut-perkataan-man/*.wav'))\nspeakers_malay['husein'] = husein\n\ndf_nepali = pd.read_csv(\n '/home/husein/speech-bahasa/nepali_0/asr_nepali/utt_spk_text.tsv',\n sep='\\t',\n header=None,\n)\nasr_nepali = glob('/home/husein/speech-bahasa/*/asr_nepali/data/*/*.flac')\nasr_nepali_replaced = {\n f.split('/')[-1].replace('.flac', ''): f for f in asr_nepali\n}\ndf_nepali = df_nepali[df_nepali[0].isin(asr_nepali_replaced.keys())]\n\nspeakers_nepali = defaultdict(list)\nfor i in range(len(df_nepali)):\n speakers_nepali[df_nepali.iloc[i, 1]].append(\n asr_nepali_replaced[df_nepali.iloc[i, 0]]\n )\n\nsr = 16000\n\ns = {\n **speakers,\n **vctk_speakers,\n **speakers_malay,\n **speakers_mandarin,\n **speakers_nepali,\n}\n\nkeys = list(s.keys())\n\n\ndef random_speakers(n):\n ks = random.sample(keys, n)\n r = []\n for k in ks:\n r.append(random.choice(s[k]))\n return r\n\n\ndef read_wav(f):\n return malaya_speech.load(f, sr=sr)\n\n\ndef random_sampling(s, length):\n return augmentation.random_sampling(s, sr=sr, length=length)\n\n\ndef combine_speakers(files, n=5):\n w_samples = random.sample(files, n)\n w_samples = [\n random_sampling(\n read_wav(f)[0], length=random.randint(500, max(10000 // n, 5000))\n )\n for f in w_samples\n ]\n left = w_samples[0].copy() * random.uniform(0.5, 1.0)\n\n combined = None\n\n for i in range(1, n):\n right = w_samples[i].copy() * random.uniform(0.5, 1.0)\n overlap = random.uniform(0.1, 0.9)\n len_overlap = int(overlap * len(right))\n minus = len(left) - len_overlap\n if minus < 0:\n minus = 0\n padded_right = np.pad(right, (minus, 0))\n left = np.pad(left, (0, len(padded_right) - len(left)))\n\n left = left + padded_right\n\n left = left / np.max(np.abs(left))\n return left\n\n\nlabels = [\n '0 speaker',\n '1 speaker',\n '2 speakers',\n '3 speakers',\n '4 speakers',\n '5 speakers',\n '6 speakers',\n '7 speakers',\n '8 speakers',\n '9 speakers',\n '10 speakers',\n 'more than 10 speakers',\n]\n\n\ndef parallel(f):\n count = random.randint(0, 11)\n if count > 10:\n count = random.randint(11, 15)\n while True:\n try:\n if count > 0:\n combined = combine_speakers(random_speakers(count), count)\n else:\n combined = combine_speakers(noises, random.randint(1, 10))\n break\n except Exception as e:\n print(e)\n pass\n if count >= (len(labels) - 1):\n print(count)\n count = len(labels) - 1\n\n return combined, [count]\n\n\ndef loop(files):\n files = files[0]\n results = []\n for f in files:\n for _ in range(3):\n results.append(parallel(f))\n return results\n\n\ndef generate(batch_size=10, repeat=6):\n fs = [i for i in range(batch_size)]\n while True:\n results = multiprocessing(fs, loop, cores=len(fs))\n for _ in range(repeat):\n random.shuffle(results)\n for r in results:\n if not np.isnan(r[0]).any() and not np.isnan(r[1]).any():\n yield {'inputs': r[0], 'targets': r[1]}\n\n\nconfig = malaya_speech.config.speakernet_featurizer_config\nnew_config = {'frame_ms': 20, 'stride_ms': 12.0}\nfeaturizer = malaya_speech.featurization.SpeakerNetFeaturizer(\n **{**config, **new_config}\n)\n\nDIMENSION = 64\n\n\ndef calc(v):\n r = featurizer(v)\n return r\n\n\ndef preprocess_inputs(example):\n s = tf.compat.v1.numpy_function(calc, [example['inputs']], tf.float32)\n\n s = tf.reshape(s, (-1, DIMENSION))\n length = tf.cast(tf.shape(s)[0], tf.int32)\n length = tf.expand_dims(length, 0)\n example['inputs'] = s\n example['inputs_length'] = length\n\n return example\n\n\ndef get_dataset(batch_size=32, shuffle_size=256, thread_count=6):\n def get():\n dataset = tf.data.Dataset.from_generator(\n generate,\n {'inputs': tf.float32, 'targets': tf.int32},\n output_shapes={\n 'inputs': tf.TensorShape([None]),\n 'targets': tf.TensorShape([None]),\n },\n )\n dataset = dataset.prefetch(tf.contrib.data.AUTOTUNE)\n dataset = dataset.map(\n preprocess_inputs, num_parallel_calls=thread_count\n )\n dataset = dataset.padded_batch(\n batch_size,\n padded_shapes={\n 'inputs': tf.TensorShape([None, DIMENSION]),\n 'inputs_length': tf.TensorShape([None]),\n 'targets': tf.TensorShape([None]),\n },\n padding_values={\n 'inputs': tf.constant(0, dtype=tf.float32),\n 'inputs_length': tf.constant(0, dtype=tf.int32),\n 'targets': tf.constant(0, dtype=tf.int32),\n },\n )\n return dataset\n\n return get\n\n\ndef model_fn(features, labels, mode, params):\n learning_rate = 1e-5\n init_checkpoint = '../speakernet/model.ckpt'\n Y = tf.cast(features['targets'][:, 0], tf.int32)\n\n model = speakernet.Model(\n features['inputs'],\n features['inputs_length'][:, 0],\n num_class=len(labels),\n mode='train',\n )\n logits = model.logits\n\n loss = tf.reduce_mean(\n tf.nn.sparse_softmax_cross_entropy_with_logits(\n logits=logits, labels=Y\n )\n )\n\n tf.identity(loss, 'train_loss')\n\n accuracy = tf.metrics.accuracy(\n labels=Y, predictions=tf.argmax(logits, axis=1)\n )\n\n tf.identity(accuracy[1], name='train_accuracy')\n\n variables = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)\n variables = [v for v in variables if 'dense_2' not in v.name]\n\n assignment_map, initialized_variable_names = train.get_assignment_map_from_checkpoint(\n variables, init_checkpoint\n )\n\n tf.train.init_from_checkpoint(init_checkpoint, assignment_map)\n\n if mode == tf.estimator.ModeKeys.TRAIN:\n global_step = tf.train.get_or_create_global_step()\n optimizer = tf.train.AdamOptimizer(learning_rate)\n train_op = optimizer.minimize(loss, global_step=global_step)\n estimator_spec = tf.estimator.EstimatorSpec(\n mode=mode, loss=loss, train_op=train_op\n )\n\n elif mode == tf.estimator.ModeKeys.EVAL:\n\n estimator_spec = tf.estimator.EstimatorSpec(\n mode=tf.estimator.ModeKeys.EVAL,\n loss=loss,\n eval_metric_ops={'accuracy': accuracy},\n )\n\n return estimator_spec\n\n\ntrain_hooks = [\n tf.train.LoggingTensorHook(\n ['train_accuracy', 'train_loss'], every_n_iter=1\n )\n]\n\n\ntrain_dataset = get_dataset()\n\nsave_directory = 'output-speakernet-speaker-count'\n\ntrain.run_training(\n train_fn=train_dataset,\n model_fn=model_fn,\n model_dir=save_directory,\n num_gpus=1,\n log_step=1,\n save_checkpoint_step=25000,\n max_steps=300_000,\n train_hooks=train_hooks,\n)\n"
] | [
[
"tensorflow.train.LoggingTensorHook",
"numpy.amax",
"tensorflow.TensorShape",
"numpy.abs",
"tensorflow.constant",
"numpy.clip",
"tensorflow.get_collection",
"sklearn.utils.shuffle",
"tensorflow.cast",
"tensorflow.identity",
"tensorflow.train.init_from_checkpoint",
"numpy.random.normal",
"tensorflow.estimator.EstimatorSpec",
"tensorflow.logical_not",
"numpy.random.uniform",
"numpy.array",
"tensorflow.summary.scalar"
],
[
"tensorflow.train.LoggingTensorHook",
"tensorflow.cast",
"tensorflow.train.init_from_checkpoint",
"numpy.mean",
"tensorflow.train.AdamOptimizer",
"tensorflow.summary.scalar",
"numpy.random.randint",
"tensorflow.compat.v1.numpy_function",
"numpy.pad",
"tensorflow.get_collection",
"tensorflow.data.TFRecordDataset",
"tensorflow.io.gfile.glob",
"tensorflow.train.get_or_create_global_step",
"numpy.std",
"tensorflow.parse_single_example",
"tensorflow.argmax",
"tensorflow.TensorShape",
"tensorflow.identity",
"tensorflow.nn.sparse_softmax_cross_entropy_with_logits",
"tensorflow.VarLenFeature",
"tensorflow.constant",
"tensorflow.reshape",
"tensorflow.estimator.EstimatorSpec"
],
[
"tensorflow.train.LoggingTensorHook",
"tensorflow.TensorShape",
"tensorflow.constant",
"tensorflow.shape",
"tensorflow.data.TFRecordDataset",
"tensorflow.cast",
"tensorflow.reshape",
"tensorflow.expand_dims",
"tensorflow.identity",
"tensorflow.estimator.EstimatorSpec",
"tensorflow.VarLenFeature",
"tensorflow.parse_single_example",
"tensorflow.summary.scalar",
"tensorflow.compat.v1.numpy_function"
],
[
"tensorflow.reduce_max",
"tensorflow.concat",
"tensorflow.range",
"tensorflow.shape",
"numpy.power",
"tensorflow.keras.layers.Dense",
"tensorflow.keras.layers.Conv1D",
"tensorflow.cast",
"tensorflow.reshape",
"numpy.cos",
"tensorflow.nn.tanh",
"numpy.sin",
"tensorflow.expand_dims",
"tensorflow.gather",
"tensorflow.sequence_mask"
],
[
"tensorflow.reduce_max",
"tensorflow.multiply",
"tensorflow.transpose",
"tensorflow.reduce_mean",
"tensorflow.reduce_sum",
"tensorflow.cast",
"tensorflow.equal",
"tensorflow.expand_dims",
"tensorflow.ones_like",
"tensorflow.einsum",
"tensorflow.zeros_like",
"tensorflow.reduce_min",
"tensorflow.one_hot",
"tensorflow.python.ops.weights_broadcast_ops.broadcast_weights",
"tensorflow.div_no_nan"
],
[
"tensorflow.variable_scope",
"tensorflow.sigmoid",
"tensorflow.concat"
],
[
"tensorflow.train.AdamOptimizer",
"tensorflow.summary.scalar",
"tensorflow.keras.losses.MeanAbsoluteError",
"tensorflow.squeeze",
"tensorflow.trainable_variables",
"numpy.load",
"tensorflow.train.Saver",
"tensorflow.TensorShape",
"tensorflow.InteractiveSession",
"tensorflow.keras.losses.MeanSquaredError",
"tensorflow.global_variables_initializer",
"tensorflow.zeros_like",
"tensorflow.summary.merge_all",
"tensorflow.summary.FileWriter",
"tensorflow.train.latest_checkpoint",
"tensorflow.constant",
"tensorflow.reduce_mean",
"tensorflow.ones_like",
"tensorflow.expand_dims"
],
[
"tensorflow.TensorShape",
"tensorflow.train.latest_checkpoint",
"tensorflow.transpose",
"tensorflow.InteractiveSession",
"tensorflow.reduce_mean",
"tensorflow.constant",
"tensorflow.shape",
"tensorflow.reshape",
"tensorflow.expand_dims",
"tensorflow.squeeze",
"tensorflow.global_variables_initializer",
"tensorflow.train.AdamOptimizer",
"tensorflow.train.Saver",
"numpy.load"
],
[
"numpy.log",
"tensorflow.train.LoggingTensorHook",
"tensorflow.transpose",
"tensorflow.TensorShape",
"tensorflow.constant",
"sklearn.utils.shuffle",
"tensorflow.identity",
"tensorflow.train.get_or_create_global_step",
"tensorflow.estimator.EstimatorSpec",
"tensorflow.summary.scalar"
],
[
"tensorflow.train.LoggingTensorHook",
"tensorflow.cast",
"tensorflow.train.init_from_checkpoint",
"tensorflow.train.AdamOptimizer",
"tensorflow.compat.v1.numpy_function",
"pandas.read_csv",
"numpy.pad",
"tensorflow.get_collection",
"tensorflow.train.get_or_create_global_step",
"tensorflow.argmax",
"tensorflow.TensorShape",
"tensorflow.shape",
"numpy.isnan",
"tensorflow.identity",
"tensorflow.nn.sparse_softmax_cross_entropy_with_logits",
"tensorflow.constant",
"numpy.abs",
"tensorflow.reshape",
"tensorflow.expand_dims",
"tensorflow.estimator.EstimatorSpec"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.12"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"1.12",
"1.4",
"1.13",
"1.5",
"1.7",
"0.12",
"1.0",
"1.2"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
fab464654/SSD_on_ActiveVisionDataset | [
"1bc6f0745241d0b45c3f257c6fb09ea0435c993e"
] | [
"train.py"
] | [
"import time\nimport torch.backends.cudnn as cudnn\nimport torch.optim\nimport torch.utils.data\nfrom model import SSD300, MultiBoxLoss\nfrom datasets import PascalVOCDataset\nfrom utils import *\n\n# Data parameters\ndata_folder = 'google_drive/MyDrive/ColabNotebooks/Project/GT' # folder with data files\nkeep_difficult = True # use objects considered difficult to detect?\n\n# Model parameters\n# Not too many here since the SSD300 has a very specific structure\nn_classes = len(label_map) # number of different types of objects\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n# Learning parameters\ncheckpoint = \"google_drive/MyDrive/checkpointsIeri/checkpoint_ssd300.pth.tar\" # path to model checkpoint, None if none\nbatch_size = 9 # batch size\niterations = 120000 # number of iterations to train\nworkers = 4 # number of workers for loading data in the DataLoader\nprint_freq = 5 # print training status every __ batches\nlr = 5e-4 # learning rate\ndecay_lr_at = [80000, 100000] # decay learning rate after these many iterations\ndecay_lr_to = 0.1 # decay learning rate to this fraction of the existing learning rate\nmomentum = 0.9 # momentum\nweight_decay = 5e-4 # weight decay\ngrad_clip = None # clip if gradients are exploding, which may happen at larger batch sizes (sometimes at 32) - you will recognize it by a sorting error in the MuliBox loss calculation\n\ncudnn.benchmark = True\n\n\ndef main():\n \"\"\"\n Training.\n \"\"\"\n global start_epoch, label_map, epoch, checkpoint, decay_lr_at\n\n # Initialize model or load checkpoint\n if checkpoint is None:\n start_epoch = 0\n model = SSD300(n_classes=n_classes)\n # Initialize the optimizer, with twice the default learning rate for biases, as in the original Caffe repo\n biases = list()\n not_biases = list()\n for param_name, param in model.named_parameters():\n if param.requires_grad:\n if param_name.endswith('.bias'):\n biases.append(param)\n else:\n not_biases.append(param)\n optimizer = torch.optim.SGD(params=[{'params': biases, 'lr': 2 * lr}, {'params': not_biases}],\n lr=lr, momentum=momentum, weight_decay=weight_decay)\n\n else:\n checkpoint = torch.load(checkpoint)\n start_epoch = checkpoint['epoch'] + 1\n print('\\nLoaded checkpoint from epoch %d.\\n' % start_epoch)\n model = checkpoint['model']\n optimizer = checkpoint['optimizer']\n\n # Move to default device\n model = model.to(device)\n criterion = MultiBoxLoss(priors_cxcy=model.priors_cxcy).to(device)\n\n #import active_vision_dataset_processing.data_loading\n import transforms, active_vision_dataset\n\n #Include all instances\n pick_trans = transforms.PickInstances(range(34))\n\n TRAIN_PATH = \"./google_drive/MyDrive/ColabNotebooks/Project/trainDataset\"\n\n \n train_dataset = active_vision_dataset.AVD(root=TRAIN_PATH, train=True,\n target_transform=pick_trans,\n scene_list=['Home_001_1', \n 'Home_002_1',\n 'Home_003_1', \n 'Home_004_1',\n 'Home_005_1',\n 'Home_006_1',\n 'Home_007_1',\n 'Home_008_1',\n 'Home_014_1',\n 'Home_011_1',\n 'Home_010_1',\n 'Office_001_1'],\n fraction_of_no_box=-1)\n \n\n train_loader = torch.utils.data.DataLoader(train_dataset,\n batch_size=batch_size,\n shuffle=True,\n collate_fn=active_vision_dataset.collate\n )\n \"\"\"\n #I TRY TO USE THE DEFAULT DATASET LOADER::::::::::::::\n\n # Custom dataloaders\n train_dataset = PascalVOCDataset(data_folder,\n split='train',\n keep_difficult=keep_difficult)\n train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=batch_size, shuffle=True,\n collate_fn=train_dataset.collate_fn, num_workers=workers,\n pin_memory=True) # note that we're passing the collate function here\n \"\"\"\n\n # Calculate total number of epochs to train and the epochs to decay learning rate at (i.e. convert iterations to epochs)\n # To convert iterations to epochs, divide iterations by the number of iterations per epoch\n # The paper trains for 120,000 iterations with a batch size of 32, decays after 80,000 and 100,000 iterations\n epochs = iterations // (len(train_dataset) // 32)\n decay_lr_at = [it // (len(train_dataset) // 32) for it in decay_lr_at]\n\n # Epochs\n for epoch in range(start_epoch, epochs):\n \n # Decay learning rate at particular epochs\n if epoch in decay_lr_at:\n adjust_learning_rate(optimizer, decay_lr_to)\n\n # One epoch's training\n train(train_loader=train_loader,\n model=model,\n criterion=criterion,\n optimizer=optimizer,\n epoch=epoch)\n\n # Save checkpoint\n save_checkpoint(epoch, model, optimizer)\n \n\n\ndef train(train_loader, model, criterion, optimizer, epoch):\n \n \n \"\"\"\n One epoch's training.\n\n :param train_loader: DataLoader for training data\n :param model: model\n :param criterion: MultiBox loss\n :param optimizer: optimizer\n :param epoch: epoch number\n \"\"\"\n model.train() # training mode enables dropout\n\n batch_time = AverageMeter() # forward prop. + back prop. time\n data_time = AverageMeter() # data loading time\n losses = AverageMeter() # loss\n\n start = time.time()\n\n import numpy as np\n # Batches\n for i, (images, labels) in enumerate(train_loader):\n\n #CHECK / REMOVE THIS CODE!\n data_time.update(time.time() - start)\n #print(len(images))\n #print(labels)\n # Move to default device\n data = images\n a = np.asarray(data)\n #print(a.shape)\n #a = np.squeeze(a, axis=1) # shape should now be (L, 224, 224, 3)\n \n\n #image = torch.from_numpy(a) \n #image = image.permute(0,3,1,2)\n #print(image.shape)\n\n #Pre-processing: \n from torchvision import transforms as transf\n preprocess = transf.Compose([\n transf.ToPILImage(),\n transf.Resize(300),\n transf.CenterCrop(300),\n transf.ToTensor(), \n transf.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),\n ])\n \n for j in range(batch_size): \n \n if j == 0: \n input_tensor = preprocess(images[j])\n input_tensor = input_tensor.unsqueeze(0)\n input_batch = input_tensor\n else:\n input_tensor = preprocess(images[j])\n #print(input_tensor)\n input_tensor = input_tensor.unsqueeze(0) # create a mini-batch as expected by the model\n #print(input_tensor.shape)\n input_batch = torch.cat((input_batch, input_tensor), 0)\n #print(\"shape images: \",input_batch.shape) \n\n \n \n # In the Active Vision Dataset we have this formatting:\n # [xmin ymin xmax ymax instance_id difficulty]\n \n \"\"\" From the Tutorial: \nSince the number of objects in any given image can vary, we can't use a fixed \nsize tensor for storing the bounding boxes for the entire batch of N images.\n\nTherefore, ground truth bounding boxes fed to the model must be a list of \nlength N, where each element of the list is a Float tensor of dimensions\nN_o, 4, where N_o is the number of objects present in that particular image.\n\nTherefore, ground truth labels fed to the model must be a list of length N, \nwhere each element of the list is a Long tensor of dimensions N_o, where N_o \nis the number of objects present in that particular image.\n \"\"\"\n #Prints to test\n #print(j)\n box_id_diff = [b for b in labels[j][0]] \n box = [l[0:4] for l in box_id_diff]\n\n #print('before:',box) #To check\n\n #Boundary coordinates as requested\n for k in range(len(box)): \n box[k][0] = box[k][0]/1920.0\n box[k][2] = box[k][2]/1920.0 \n box[k][1] = box[k][1]/1080.0\n box[k][3] = box[k][3]/1080.0 \n\n #print('after:',box) #To check\n \n box_tensor = torch.FloatTensor(box).to(device)\n\n #Done with the parameter in AVD method\n \"\"\" \n #Check if there are objects in the images\n if j == 0: \n start = True\n \n if len(box_tensor) > 0:\n if start == True:\n box_list = box_tensor\n start = False\n elif start == False:\n box_list = [box_list, box_tensor] \n #box_list = torch.cat((box_list,box_tensor),0) \n else:\n start = True\n \"\"\"\n \n #print(box_tensor) #To check\n\n if j == 0: \n box_list = [box_tensor]\n else:\n box_list.append(box_tensor) \n\n label = [l[4] for l in box_id_diff]\n label_tensor = torch.LongTensor(label).to(device)\n if j == 0: \n label_list = [label_tensor]\n else:\n label_list.append(label_tensor) \n\n \n #print(box_id_diff[0][0:4])\n \n \"\"\"\n if len(box_id_diff.size())-1 != 0:\n if j == 0: \n box = box_id_diff[0][0:4]\n print(\"asad:\",box)\n #box = box.unsqueeze(0)\n boxes = box\n else:\n box = [l[0:4] for l in box_id_diff]\n\n #box = box.unsqueeze(0) # create a mini-batch as expected by the model\n #print(input_tensor.shape)\n boxes = torch.cat((boxes, box), 0)\n print(\"boxes:\", boxes)\n \"\"\"\n #box = torch.split(box_id_diff, 2)\n #print(box)\n \"\"\"\n if not labels[j][0]:\n labels = [] \n print(\"coasc\") \n else: \n labels = [l.to(device) for l in torch.tensor(labels[j][0][4])]\n \"\"\"\n \n #print(\"list of boxes:\",box_list)\n #print(\"list of labels:\", label_list)\n\n images = input_batch.to(device) # (batch_size (N), 3, 300, 300)\n #print(images.shape)\n boxes = box_list\n labels = label_list\n\n # Forward prop. \n predicted_locs, predicted_scores = model(images) # (N, 8732, 4), (N, 8732, n_classes)\n\n #Prints to check the dimensions\n #print(predicted_locs.shape) #correct \n #print(predicted_scores.shape) #correct \n\n # Loss\n loss = criterion(predicted_locs, predicted_scores, boxes, labels) # scalar\n\n # Backward prop.\n optimizer.zero_grad()\n loss.backward()\n\n # Clip gradients, if necessary\n if grad_clip is not None:\n clip_gradient(optimizer, grad_clip)\n\n # Update model\n optimizer.step()\n\n losses.update(loss.item(), images.size(0))\n batch_time.update(time.time() - start)\n\n start = time.time()\n\n \n # Print status\n if i % print_freq == 0: \n print('Epoch: [{0}][{1}/{2}]\\t' \n 'Loss {loss.val:.4f} ({loss.avg:.4f})\\t'.format(epoch, i, len(train_loader), loss=losses))\n \"\"\"\n print('Epoch: [{0}][{1}/{2}]\\t'\n 'Batch Time {batch_time.val:.3f} ({batch_time.avg:.3f})\\t'\n 'Data Time {data_time.val:.3f} ({data_time.avg:.3f})\\t'\n 'Loss {loss.val:.4f} ({loss.avg:.4f})\\t'.format(epoch, i, len(train_loader),\n batch_time=batch_time,\n data_time=data_time, loss=losses))\n \"\"\" \n del predicted_locs, predicted_scores, images, boxes, labels # free some memory since their histories may be stored\n\n\nif __name__ == '__main__':\n main()\n"
] | [
[
"numpy.asarray"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
1nadequacy/dm_control | [
"a55474768cf0a6d570fe4a376802630027ad5f01"
] | [
"dm_control/rl/specs_test.py"
] | [
"# Copyright 2017 The dm_control Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"Tests for specs.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\n# Internal dependencies.\n\nfrom absl.testing import absltest\nfrom dm_control.rl import specs as array_spec\nimport numpy as np\nimport six\n\n\nclass ArraySpecTest(absltest.TestCase):\n\n def testShapeTypeError(self):\n with self.assertRaises(TypeError):\n array_spec.ArraySpec(32, np.int32)\n\n def testDtypeTypeError(self):\n with self.assertRaises(TypeError):\n array_spec.ArraySpec((1, 2, 3), \"32\")\n\n def testStringDtype(self):\n array_spec.ArraySpec((1, 2, 3), \"int32\")\n\n def testNumpyDtype(self):\n array_spec.ArraySpec((1, 2, 3), np.int32)\n\n def testDtype(self):\n spec = array_spec.ArraySpec((1, 2, 3), np.int32)\n self.assertEqual(np.int32, spec.dtype)\n\n def testShape(self):\n spec = array_spec.ArraySpec([1, 2, 3], np.int32)\n self.assertEqual((1, 2, 3), spec.shape)\n\n def testEqual(self):\n spec_1 = array_spec.ArraySpec((1, 2, 3), np.int32)\n spec_2 = array_spec.ArraySpec((1, 2, 3), np.int32)\n self.assertEqual(spec_1, spec_2)\n\n def testNotEqualDifferentShape(self):\n spec_1 = array_spec.ArraySpec((1, 2, 3), np.int32)\n spec_2 = array_spec.ArraySpec((1, 3, 3), np.int32)\n self.assertNotEqual(spec_1, spec_2)\n\n def testNotEqualDifferentDtype(self):\n spec_1 = array_spec.ArraySpec((1, 2, 3), np.int64)\n spec_2 = array_spec.ArraySpec((1, 2, 3), np.int32)\n self.assertNotEqual(spec_1, spec_2)\n\n def testNotEqualOtherClass(self):\n spec_1 = array_spec.ArraySpec((1, 2, 3), np.int32)\n spec_2 = None\n self.assertNotEqual(spec_1, spec_2)\n self.assertNotEqual(spec_2, spec_1)\n\n spec_2 = ()\n self.assertNotEqual(spec_1, spec_2)\n self.assertNotEqual(spec_2, spec_1)\n\n def testIsUnhashable(self):\n spec = array_spec.ArraySpec(shape=(1, 2, 3), dtype=np.int32)\n with self.assertRaisesRegexp(TypeError, \"unhashable type\"):\n hash(spec)\n\n def testValidateDtype(self):\n spec = array_spec.ArraySpec((1, 2), np.int32)\n spec.validate(np.zeros((1, 2), dtype=np.int32))\n with self.assertRaises(ValueError):\n spec.validate(np.zeros((1, 2), dtype=np.float32))\n\n def testValidateShape(self):\n spec = array_spec.ArraySpec((1, 2), np.int32)\n spec.validate(np.zeros((1, 2), dtype=np.int32))\n with self.assertRaises(ValueError):\n spec.validate(np.zeros((1, 2, 3), dtype=np.int32))\n\n def testGenerateValue(self):\n spec = array_spec.ArraySpec((1, 2), np.int32)\n test_value = spec.generate_value()\n spec.validate(test_value)\n\n\nclass BoundedArraySpecTest(absltest.TestCase):\n\n def testInvalidMinimum(self):\n with six.assertRaisesRegex(self, ValueError, \"not compatible\"):\n array_spec.BoundedArraySpec((3, 5), np.uint8, (0, 0, 0), (1, 1))\n\n def testInvalidMaximum(self):\n with six.assertRaisesRegex(self, ValueError, \"not compatible\"):\n array_spec.BoundedArraySpec((3, 5), np.uint8, 0, (1, 1, 1))\n\n def testMinMaxAttributes(self):\n spec = array_spec.BoundedArraySpec((1, 2, 3), np.float32, 0, (5, 5, 5))\n self.assertEqual(type(spec.minimum), np.ndarray)\n self.assertEqual(type(spec.maximum), np.ndarray)\n\n def testNotWriteable(self):\n spec = array_spec.BoundedArraySpec((1, 2, 3), np.float32, 0, (5, 5, 5))\n with six.assertRaisesRegex(self, ValueError, \"read-only\"):\n spec.minimum[0] = -1\n with six.assertRaisesRegex(self, ValueError, \"read-only\"):\n spec.maximum[0] = 100\n\n def testEqualBroadcastingBounds(self):\n spec_1 = array_spec.BoundedArraySpec(\n (1, 2), np.int32, minimum=0.0, maximum=1.0)\n spec_2 = array_spec.BoundedArraySpec(\n (1, 2), np.int32, minimum=[0.0, 0.0], maximum=[1.0, 1.0])\n self.assertEqual(spec_1, spec_2)\n\n def testNotEqualDifferentMinimum(self):\n spec_1 = array_spec.BoundedArraySpec(\n (1, 2), np.int32, minimum=[0.0, -0.6], maximum=[1.0, 1.0])\n spec_2 = array_spec.BoundedArraySpec(\n (1, 2), np.int32, minimum=[0.0, 0.0], maximum=[1.0, 1.0])\n self.assertNotEqual(spec_1, spec_2)\n\n def testNotEqualOtherClass(self):\n spec_1 = array_spec.BoundedArraySpec(\n (1, 2), np.int32, minimum=[0.0, -0.6], maximum=[1.0, 1.0])\n spec_2 = array_spec.ArraySpec((1, 2), np.int32)\n self.assertNotEqual(spec_1, spec_2)\n self.assertNotEqual(spec_2, spec_1)\n\n spec_2 = None\n self.assertNotEqual(spec_1, spec_2)\n self.assertNotEqual(spec_2, spec_1)\n\n spec_2 = ()\n self.assertNotEqual(spec_1, spec_2)\n self.assertNotEqual(spec_2, spec_1)\n\n def testNotEqualDifferentMaximum(self):\n spec_1 = array_spec.BoundedArraySpec(\n (1, 2), np.int32, minimum=0.0, maximum=2.0)\n spec_2 = array_spec.BoundedArraySpec(\n (1, 2), np.int32, minimum=[0.0, 0.0], maximum=[1.0, 1.0])\n self.assertNotEqual(spec_1, spec_2)\n\n def testIsUnhashable(self):\n spec = array_spec.BoundedArraySpec(\n shape=(1, 2), dtype=np.int32, minimum=0.0, maximum=2.0)\n with self.assertRaisesRegexp(TypeError, \"unhashable type\"):\n hash(spec)\n\n def testRepr(self):\n as_string = repr(array_spec.BoundedArraySpec(\n (1, 2), np.int32, minimum=101.0, maximum=73.0))\n self.assertIn(\"101\", as_string)\n self.assertIn(\"73\", as_string)\n\n def testValidateBounds(self):\n spec = array_spec.BoundedArraySpec((2, 2), np.int32, minimum=5, maximum=10)\n spec.validate(np.array([[5, 6], [8, 10]], dtype=np.int32))\n with self.assertRaises(ValueError):\n spec.validate(np.array([[5, 6], [8, 11]], dtype=np.int32))\n with self.assertRaises(ValueError):\n spec.validate(np.array([[4, 6], [8, 10]], dtype=np.int32))\n\n def testGenerateValue(self):\n spec = array_spec.BoundedArraySpec((2, 2), np.int32, minimum=5, maximum=10)\n test_value = spec.generate_value()\n spec.validate(test_value)\n\n def testScalarBounds(self):\n spec = array_spec.BoundedArraySpec((), np.float, minimum=0.0, maximum=1.0)\n\n self.assertIsInstance(spec.minimum, np.ndarray)\n self.assertIsInstance(spec.maximum, np.ndarray)\n\n # Sanity check that numpy compares correctly to a scalar for an empty shape.\n self.assertEqual(0.0, spec.minimum)\n self.assertEqual(1.0, spec.maximum)\n\n # Check that the spec doesn't fail its own input validation.\n _ = array_spec.BoundedArraySpec(\n spec.shape, spec.dtype, spec.minimum, spec.maximum)\n\n\nif __name__ == \"__main__\":\n absltest.main()\n"
] | [
[
"numpy.array",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
JoeTao-097/Multi-REZ-Evalution-for-Breast-Ultrasound-Images | [
"344d64ad2fe9d790c49e8005b3abee219d362278",
"344d64ad2fe9d790c49e8005b3abee219d362278"
] | [
"Model_test.py",
"Model Perfomance Comparasion.py"
] | [
"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon Aug 2 17:32:52 2021\r\n\r\n@author: jiangyt\r\n\"\"\"\r\n\r\nfrom Tools import *\r\nfrom tensorflow import keras\r\nfrom tensorflow.keras.layers import Dense, Activation, Flatten, Dropout, Input, BatchNormalization\r\nfrom tensorflow.keras.layers import Conv2D, MaxPooling2D, add, AveragePooling2D, ZeroPadding2D, GlobalAveragePooling2D\r\nfrom tensorflow.keras.models import Model, Sequential\r\n\r\n\"\"\"\r\nWeight Dict\r\n\"\"\"\r\nWeight = {'Resnet50_448':\"./model_checkpoints/ResNet50_448_checkpoints/20218131038.h5\",\r\n 'MobileNet_224':\"./model_checkpoints/MobileNet_224_checkpoints/202189956.h5\",\r\n 'Xception_448':\"./model_checkpoints/Xception_448_checkpoints/2021810951.h5\",\r\n 'EfficientNet_B0_320':\"./model_checkpoints/EfficientNetB0_320_checkpoints/2021871045.h5\",\r\n 'DenseNet121_448':\"./model_checkpoints/DenseNet121_448_checkpoints/2021891655.h5\"}\r\n\r\n\"\"\"\r\nLoad model\r\n\"\"\"\r\ndf = pd.read_excel('./AI-Physician Comparasion Dataset.xlsx')\r\n# df = pd.read_csv('/home/joe/Project/Breast_new/20210805_b_m_Xception_train/df_test_small.csv')\r\n\r\n\"\"\"\r\nEval each model\r\n\"\"\"\r\nfor key in Weight.keys():\r\n if key == 'Resnet50_448':\r\n from tensorflow.keras.applications.resnet50 import preprocess_input\r\n backbone_model= keras.applications.resnet50.ResNet50(include_top=False, weights=None, input_tensor=None,\r\n input_shape=(448, 448, 3), pooling=None, classes=2)\r\n elif key == 'MobileNet_224':\r\n from tensorflow.keras.applications.mobilenet import preprocess_input\r\n backbone_model= keras.applications.mobilenet.MobileNet(include_top=False, weights=None, input_tensor=None,\r\n input_shape=(224, 224, 3), pooling=None, classes=2)\r\n elif key == 'Xception_448':\r\n from tensorflow.keras.applications.xception import preprocess_input\r\n backbone_model= keras.applications.xception.Xception(include_top=False, weights=None, input_tensor=None,\r\n input_shape=(448, 448, 3), pooling=None, classes=2)\r\n elif key == 'EfficientNet_B0_320':\r\n from tensorflow.keras.applications.efficientnet import preprocess_input\r\n backbone_model= keras.applications.efficientnet.EfficientNetB0(include_top=False, weights=None, input_tensor=None,\r\n input_shape=(320, 320, 3), pooling=None, classes=2)\r\n elif key == 'DenseNet121_448':\r\n from tensorflow.keras.applications.densenet import preprocess_input\r\n\r\n backbone_model = keras.applications.densenet.DenseNet121(include_top=False, weights=\"imagenet\",\r\n input_tensor=None,\r\n input_shape=(448, 448, 3), pooling=None, classes=2)\r\n else:\r\n print('Error: No model weight find')\r\n test_model = Sequential()\r\n test_model.add(backbone_model)\r\n test_model.add(GlobalAveragePooling2D())\r\n test_model.add(Dense(2, activation='softmax', name='fc1'))\r\n test_model.load_weights(Weight[key])\r\n\r\n test_model.summary()\r\n\r\n y_true = []\r\n y_pred = []\r\n\r\n for i in range(len(df)):\r\n y_true.append(df['malignancy'][i])\r\n x = Image.open(df['path'][i])\r\n x = np.array(x)\r\n x = zero_pad(x,int(key.split('_')[-1]))\r\n x = preprocess_input(x)\r\n x = x.reshape(1,x.shape[0],x.shape[1],x.shape[2])\r\n y_pred.append(test_model.predict(x))\r\n \r\n\r\n y_pred = np.array(y_pred)\r\n y_pred = y_pred.reshape(y_pred.shape[0],2)\r\n y_pred_1 = y_pred[:,1]\r\n\r\n thresh_0=get_auc(0, np.array(y_true), np.array(y_pred_1), 'Malignancy', plot=False)\r\n y_pred_comp_lvl=[1 if y>thresh_0 else 0 for y in y_pred_1]\r\n cm_comp=confusion_matrix(y_true, y_pred_comp_lvl)\r\n\r\n fig, axes = plt.subplots(nrows=2, ncols=2)\r\n fig.tight_layout(pad=2, w_pad=2.)\r\n fig.set_figheight(8)\r\n fig.set_figwidth(7)\r\n thresh_0=get_auc(axes[0, 0], np.array(y_true), np.array(y_pred_1), 'Performance of {}'.format(key))\r\n thresh_AP=get_precision_recall(axes[0, 1], np.array(y_true), np.array(y_pred_1), 'Malignancy=0 vs 1')\r\n plot_confusion_matrix(axes[1, 0], cm_comp, [\"0\", \"1\"], title='Malignancy', normalize=False)\r\n plot_confusion_matrix(axes[1, 1], cm_comp, [\"0\", \"1\"], title='Malignancy (normalized)')\r\n print('f1 score is: {:.3f}'.format(f1_score(y_true, y_pred_comp_lvl)))\r\n",
"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Dec 16 10:55:17 2021\n\n@author: joe\n\"\"\"\n\nimport numpy as np\nfrom matplotlib import pyplot as plt\nimport scipy.stats as st\nfrom sklearn import metrics\n\nclass DelongTest():\n def __init__(self,preds1,preds2,label,threshold=0.05):\n '''\n preds1:the output of model1\n preds2:the output of model2\n label :the actual label\n '''\n self._preds1=preds1\n self._preds2=preds2\n self._label=label\n self.threshold=threshold\n self._show_result()\n\n\n def _auc(self,X, Y)->float:\n return 1/(len(X)*len(Y)) * sum([self._kernel(x, y) for x in X for y in Y])\n\n def _kernel(self,X, Y)->float:\n '''\n Mann-Whitney statistic\n '''\n return .5 if Y==X else int(Y < X)\n\n def _structural_components(self,X, Y)->list:\n V10 = [1/len(Y) * sum([self._kernel(x, y) for y in Y]) for x in X]\n V01 = [1/len(X) * sum([self._kernel(x, y) for x in X]) for y in Y]\n return V10, V01\n\n def _get_S_entry(self,V_A, V_B, auc_A, auc_B)->float:\n return 1/(len(V_A)-1) * sum([(a-auc_A)*(b-auc_B) for a,b in zip(V_A, V_B)])\n \n def _z_score(self,var_A, var_B, covar_AB, auc_A, auc_B):\n return (auc_A - auc_B)/((var_A + var_B - 2*covar_AB )**(.5)+ 1e-8)\n\n def _group_preds_by_label(self,preds, actual)->list:\n X = [p for (p, a) in zip(preds, actual) if a]\n Y = [p for (p, a) in zip(preds, actual) if not a]\n return X, Y\n\n def _compute_z_p(self):\n X_A, Y_A = self._group_preds_by_label(self._preds1, self._label)\n X_B, Y_B = self._group_preds_by_label(self._preds2, self._label)\n\n V_A10, V_A01 = self._structural_components(X_A, Y_A)\n V_B10, V_B01 = self._structural_components(X_B, Y_B)\n\n auc_A = self._auc(X_A, Y_A)\n auc_B = self._auc(X_B, Y_B)\n\n # Compute entries of covariance matrix S (covar_AB = covar_BA)\n var_A = (self._get_S_entry(V_A10, V_A10, auc_A, auc_A) * 1/len(V_A10)+ self._get_S_entry(V_A01, V_A01, auc_A, auc_A) * 1/len(V_A01))\n var_B = (self._get_S_entry(V_B10, V_B10, auc_B, auc_B) * 1/len(V_B10)+ self._get_S_entry(V_B01, V_B01, auc_B, auc_B) * 1/len(V_B01))\n covar_AB = (self._get_S_entry(V_A10, V_B10, auc_A, auc_B) * 1/len(V_A10)+ self._get_S_entry(V_A01, V_B01, auc_A, auc_B) * 1/len(V_A01))\n\n # Two tailed test\n z = self._z_score(var_A, var_B, covar_AB, auc_A, auc_B)\n p = st.norm.sf(abs(z))*2\n\n return z,p\n\n def _show_result(self):\n z,p=self._compute_z_p()\n print(f\"z score = {z:.5f};\\np value = {p:.5f};\")\n if p < self.threshold :print(\"There is a significant difference\")\n else: print(\"There is NO significant difference\")\n\nimport pandas as pd\nimport seaborn as sns\n\n\n\ndf = pd.read_csv('./model and physicians performance on AI-Physician Comparasion set/AI-Physician Comparasion result.csv')\n\nphys_name = ['senior-1','senior-2','junior-1','junior-2','entry-1','entry-2']\nphys = df.columns.tolist()[6:12]\ncols = df.columns.tolist()[1:]\nAIs = df.columns.tolist()[1:6]\n\nd_frame = []\n\nfor AI in AIs:\n d = []\n\n for AI2 in AIs:\n Delong = DelongTest(np.array(df[AI]), np.array(df[AI2]), np.array(df['y_true']))\n z,p = Delong._compute_z_p()\n d.append(p)\n \n d_frame.append(d)\n \nd_frame = pd.DataFrame(d_frame).T\nd_frame.columns = AIs\nd_frame.index = AIs\n\nplt.figure()\nsns.heatmap(data=d_frame,annot=True,cmap='RdBu_r')\nplt.xticks(rotation=30)\nplt.yticks(rotation=0)\nplt.title('p value of DeLong test(error of AIs)')\n\n\n\n"
] | [
[
"tensorflow.keras.applications.resnet50.ResNet50",
"tensorflow.keras.layers.GlobalAveragePooling2D",
"tensorflow.keras.applications.efficientnet.EfficientNetB0",
"tensorflow.keras.applications.xception.Xception",
"tensorflow.keras.layers.Dense",
"tensorflow.keras.applications.densenet.DenseNet121",
"tensorflow.keras.applications.mobilenet.MobileNet",
"tensorflow.keras.models.Sequential",
"tensorflow.keras.applications.densenet.preprocess_input"
],
[
"pandas.read_csv",
"matplotlib.pyplot.title",
"pandas.DataFrame",
"matplotlib.pyplot.yticks",
"numpy.array",
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.figure"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.7",
"2.4",
"2.3",
"2.5",
"2.6"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.3",
"1.1",
"1.5",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
arnoyu-hub/COMP0016miemie | [
"59af664dcf190eab4f93cefb8471908717415fea",
"59af664dcf190eab4f93cefb8471908717415fea",
"59af664dcf190eab4f93cefb8471908717415fea",
"59af664dcf190eab4f93cefb8471908717415fea",
"59af664dcf190eab4f93cefb8471908717415fea",
"59af664dcf190eab4f93cefb8471908717415fea",
"59af664dcf190eab4f93cefb8471908717415fea",
"59af664dcf190eab4f93cefb8471908717415fea",
"59af664dcf190eab4f93cefb8471908717415fea",
"59af664dcf190eab4f93cefb8471908717415fea",
"59af664dcf190eab4f93cefb8471908717415fea",
"59af664dcf190eab4f93cefb8471908717415fea",
"59af664dcf190eab4f93cefb8471908717415fea",
"59af664dcf190eab4f93cefb8471908717415fea",
"59af664dcf190eab4f93cefb8471908717415fea",
"59af664dcf190eab4f93cefb8471908717415fea",
"59af664dcf190eab4f93cefb8471908717415fea",
"59af664dcf190eab4f93cefb8471908717415fea",
"59af664dcf190eab4f93cefb8471908717415fea",
"59af664dcf190eab4f93cefb8471908717415fea",
"59af664dcf190eab4f93cefb8471908717415fea",
"59af664dcf190eab4f93cefb8471908717415fea",
"59af664dcf190eab4f93cefb8471908717415fea",
"59af664dcf190eab4f93cefb8471908717415fea",
"59af664dcf190eab4f93cefb8471908717415fea",
"59af664dcf190eab4f93cefb8471908717415fea",
"59af664dcf190eab4f93cefb8471908717415fea",
"59af664dcf190eab4f93cefb8471908717415fea",
"59af664dcf190eab4f93cefb8471908717415fea",
"59af664dcf190eab4f93cefb8471908717415fea",
"59af664dcf190eab4f93cefb8471908717415fea",
"59af664dcf190eab4f93cefb8471908717415fea",
"59af664dcf190eab4f93cefb8471908717415fea",
"59af664dcf190eab4f93cefb8471908717415fea"
] | [
"venv/Lib/site-packages/pandas/tests/series/indexing/test_get.py",
"venv/Lib/site-packages/pandas/tests/frame/methods/test_at_time.py",
"venv/Lib/site-packages/pandas/tests/arrays/categorical/test_operators.py",
"venv/Lib/site-packages/pandas/tests/frame/methods/test_filter.py",
"venv/Lib/site-packages/sklearn/base.py",
"venv/Lib/site-packages/pandas/tests/series/methods/test_append.py",
"venv/Lib/site-packages/pandas/tests/dtypes/cast/test_construct_ndarray.py",
"venv/Lib/site-packages/pandas/core/groupby/ops.py",
"venv/Lib/site-packages/pandas/tests/util/test_assert_series_equal.py",
"venv/Lib/site-packages/sklearn/linear_model/_coordinate_descent.py",
"venv/Lib/site-packages/pandas/core/indexes/range.py",
"venv/Lib/site-packages/sklearn/feature_selection/_mutual_info.py",
"venv/Lib/site-packages/sklearn/inspection/_plot/tests/test_plot_partial_dependence.py",
"venv/Lib/site-packages/pandas/tests/extension/test_extension.py",
"venv/Lib/site-packages/pandas/tests/indexing/test_chaining_and_caching.py",
"venv/Lib/site-packages/mpl_toolkits/axes_grid1/inset_locator.py",
"venv/Lib/site-packages/pandas/core/arrays/boolean.py",
"venv/Lib/site-packages/matplotlib/tri/tricontour.py",
"venv/Lib/site-packages/pandas/tests/indexes/datetimes/methods/test_astype.py",
"venv/Lib/site-packages/pandas/tests/arrays/sparse/test_combine_concat.py",
"venv/Lib/site-packages/pandas/tests/frame/methods/test_convert_dtypes.py",
"venv/Lib/site-packages/pandas/tests/strings/test_extract.py",
"venv/Lib/site-packages/pandas/tests/indexes/test_setops.py",
"venv/Lib/site-packages/pandas/tests/io/pytables/test_categorical.py",
"venv/Lib/site-packages/pandas/tests/extension/test_string.py",
"venv/Lib/site-packages/sklearn/manifold/_spectral_embedding.py",
"venv/Lib/site-packages/sklearn/neighbors/tests/test_neighbors.py",
"venv/Lib/site-packages/pandas/tests/series/methods/test_unique.py",
"venv/Lib/site-packages/sklearn/metrics/_classification.py",
"venv/Lib/site-packages/pandas/core/array_algos/putmask.py",
"venv/Lib/site-packages/sklearn/metrics/cluster/setup.py",
"venv/Lib/site-packages/sklearn/utils/tests/test_metaestimators.py",
"venv/Lib/site-packages/pandas/tests/series/methods/test_rename.py",
"venv/Lib/site-packages/pandas/tests/groupby/test_value_counts.py"
] | [
"import numpy as np\r\nimport pytest\r\n\r\nimport pandas as pd\r\nfrom pandas import Series\r\nimport pandas._testing as tm\r\n\r\n\r\ndef test_get():\r\n # GH 6383\r\n s = Series(\r\n np.array(\r\n [\r\n 43,\r\n 48,\r\n 60,\r\n 48,\r\n 50,\r\n 51,\r\n 50,\r\n 45,\r\n 57,\r\n 48,\r\n 56,\r\n 45,\r\n 51,\r\n 39,\r\n 55,\r\n 43,\r\n 54,\r\n 52,\r\n 51,\r\n 54,\r\n ]\r\n )\r\n )\r\n\r\n result = s.get(25, 0)\r\n expected = 0\r\n assert result == expected\r\n\r\n s = Series(\r\n np.array(\r\n [\r\n 43,\r\n 48,\r\n 60,\r\n 48,\r\n 50,\r\n 51,\r\n 50,\r\n 45,\r\n 57,\r\n 48,\r\n 56,\r\n 45,\r\n 51,\r\n 39,\r\n 55,\r\n 43,\r\n 54,\r\n 52,\r\n 51,\r\n 54,\r\n ]\r\n ),\r\n index=pd.Float64Index(\r\n [\r\n 25.0,\r\n 36.0,\r\n 49.0,\r\n 64.0,\r\n 81.0,\r\n 100.0,\r\n 121.0,\r\n 144.0,\r\n 169.0,\r\n 196.0,\r\n 1225.0,\r\n 1296.0,\r\n 1369.0,\r\n 1444.0,\r\n 1521.0,\r\n 1600.0,\r\n 1681.0,\r\n 1764.0,\r\n 1849.0,\r\n 1936.0,\r\n ]\r\n ),\r\n )\r\n\r\n result = s.get(25, 0)\r\n expected = 43\r\n assert result == expected\r\n\r\n # GH 7407\r\n # with a boolean accessor\r\n df = pd.DataFrame({\"i\": [0] * 3, \"b\": [False] * 3})\r\n vc = df.i.value_counts()\r\n result = vc.get(99, default=\"Missing\")\r\n assert result == \"Missing\"\r\n\r\n vc = df.b.value_counts()\r\n result = vc.get(False, default=\"Missing\")\r\n assert result == 3\r\n\r\n result = vc.get(True, default=\"Missing\")\r\n assert result == \"Missing\"\r\n\r\n\r\ndef test_get_nan():\r\n # GH 8569\r\n s = pd.Float64Index(range(10)).to_series()\r\n assert s.get(np.nan) is None\r\n assert s.get(np.nan, default=\"Missing\") == \"Missing\"\r\n\r\n\r\ndef test_get_nan_multiple():\r\n # GH 8569\r\n # ensure that fixing \"test_get_nan\" above hasn't broken get\r\n # with multiple elements\r\n s = pd.Float64Index(range(10)).to_series()\r\n\r\n idx = [2, 30]\r\n assert s.get(idx) is None\r\n\r\n idx = [2, np.nan]\r\n assert s.get(idx) is None\r\n\r\n # GH 17295 - all missing keys\r\n idx = [20, 30]\r\n assert s.get(idx) is None\r\n\r\n idx = [np.nan, np.nan]\r\n assert s.get(idx) is None\r\n\r\n\r\ndef test_get_with_default():\r\n # GH#7725\r\n d0 = [\"a\", \"b\", \"c\", \"d\"]\r\n d1 = np.arange(4, dtype=\"int64\")\r\n others = [\"e\", 10]\r\n\r\n for data, index in ((d0, d1), (d1, d0)):\r\n s = Series(data, index=index)\r\n for i, d in zip(index, data):\r\n assert s.get(i) == d\r\n assert s.get(i, d) == d\r\n assert s.get(i, \"z\") == d\r\n for other in others:\r\n assert s.get(other, \"z\") == \"z\"\r\n assert s.get(other, other) == other\r\n\r\n\r\[email protected](\r\n \"arr\",\r\n [np.random.randn(10), tm.makeDateIndex(10, name=\"a\").tz_localize(tz=\"US/Eastern\")],\r\n)\r\ndef test_get2(arr):\r\n # TODO: better name, possibly split\r\n # GH#21260\r\n ser = Series(arr, index=[2 * i for i in range(len(arr))])\r\n assert ser.get(4) == ser.iloc[2]\r\n\r\n result = ser.get([4, 6])\r\n expected = ser.iloc[[2, 3]]\r\n tm.assert_series_equal(result, expected)\r\n\r\n result = ser.get(slice(2))\r\n expected = ser.iloc[[0, 1]]\r\n tm.assert_series_equal(result, expected)\r\n\r\n assert ser.get(-1) is None\r\n assert ser.get(ser.index.max() + 1) is None\r\n\r\n ser = Series(arr[:6], index=list(\"abcdef\"))\r\n assert ser.get(\"c\") == ser.iloc[2]\r\n\r\n result = ser.get(slice(\"b\", \"d\"))\r\n expected = ser.iloc[[1, 2, 3]]\r\n tm.assert_series_equal(result, expected)\r\n\r\n result = ser.get(\"Z\")\r\n assert result is None\r\n\r\n assert ser.get(4) == ser.iloc[4]\r\n assert ser.get(-1) == ser.iloc[-1]\r\n assert ser.get(len(ser)) is None\r\n\r\n # GH#21257\r\n ser = Series(arr)\r\n ser2 = ser[::2]\r\n assert ser2.get(1) is None\r\n\r\n\r\ndef test_getitem_get(string_series, object_series):\r\n for obj in [string_series, object_series]:\r\n idx = obj.index[5]\r\n\r\n assert obj[idx] == obj.get(idx)\r\n assert obj[idx] == obj[5]\r\n\r\n assert string_series.get(-1) == string_series.get(string_series.index[-1])\r\n assert string_series[5] == string_series.get(string_series.index[5])\r\n\r\n\r\ndef test_get_none():\r\n # GH#5652\r\n s1 = Series(dtype=object)\r\n s2 = Series(dtype=object, index=list(\"abc\"))\r\n for s in [s1, s2]:\r\n result = s.get(None)\r\n assert result is None\r\n",
"from datetime import time\r\n\r\nimport numpy as np\r\nimport pytest\r\nimport pytz\r\n\r\nfrom pandas._libs.tslibs import timezones\r\n\r\nfrom pandas import (\r\n DataFrame,\r\n date_range,\r\n)\r\nimport pandas._testing as tm\r\n\r\n\r\nclass TestAtTime:\r\n @pytest.mark.parametrize(\"tzstr\", [\"US/Eastern\", \"dateutil/US/Eastern\"])\r\n def test_localized_at_time(self, tzstr, frame_or_series):\r\n tz = timezones.maybe_get_tz(tzstr)\r\n\r\n rng = date_range(\"4/16/2012\", \"5/1/2012\", freq=\"H\")\r\n ts = frame_or_series(np.random.randn(len(rng)), index=rng)\r\n\r\n ts_local = ts.tz_localize(tzstr)\r\n\r\n result = ts_local.at_time(time(10, 0))\r\n expected = ts.at_time(time(10, 0)).tz_localize(tzstr)\r\n tm.assert_equal(result, expected)\r\n assert timezones.tz_compare(result.index.tz, tz)\r\n\r\n def test_at_time(self, frame_or_series):\r\n rng = date_range(\"1/1/2000\", \"1/5/2000\", freq=\"5min\")\r\n ts = DataFrame(np.random.randn(len(rng), 2), index=rng)\r\n if frame_or_series is not DataFrame:\r\n ts = ts[0]\r\n rs = ts.at_time(rng[1])\r\n assert (rs.index.hour == rng[1].hour).all()\r\n assert (rs.index.minute == rng[1].minute).all()\r\n assert (rs.index.second == rng[1].second).all()\r\n\r\n result = ts.at_time(\"9:30\")\r\n expected = ts.at_time(time(9, 30))\r\n tm.assert_equal(result, expected)\r\n\r\n def test_at_time_midnight(self, frame_or_series):\r\n # midnight, everything\r\n rng = date_range(\"1/1/2000\", \"1/31/2000\")\r\n ts = DataFrame(np.random.randn(len(rng), 3), index=rng)\r\n if frame_or_series is not DataFrame:\r\n ts = ts[0]\r\n\r\n result = ts.at_time(time(0, 0))\r\n tm.assert_equal(result, ts)\r\n\r\n def test_at_time_nonexistent(self, frame_or_series):\r\n # time doesn't exist\r\n rng = date_range(\"1/1/2012\", freq=\"23Min\", periods=384)\r\n ts = DataFrame(np.random.randn(len(rng)), rng)\r\n if frame_or_series is not DataFrame:\r\n ts = ts[0]\r\n rs = ts.at_time(\"16:00\")\r\n assert len(rs) == 0\r\n\r\n @pytest.mark.parametrize(\r\n \"hour\", [\"1:00\", \"1:00AM\", time(1), time(1, tzinfo=pytz.UTC)]\r\n )\r\n def test_at_time_errors(self, hour):\r\n # GH#24043\r\n dti = date_range(\"2018\", periods=3, freq=\"H\")\r\n df = DataFrame(list(range(len(dti))), index=dti)\r\n if getattr(hour, \"tzinfo\", None) is None:\r\n result = df.at_time(hour)\r\n expected = df.iloc[1:2]\r\n tm.assert_frame_equal(result, expected)\r\n else:\r\n with pytest.raises(ValueError, match=\"Index must be timezone\"):\r\n df.at_time(hour)\r\n\r\n def test_at_time_tz(self):\r\n # GH#24043\r\n dti = date_range(\"2018\", periods=3, freq=\"H\", tz=\"US/Pacific\")\r\n df = DataFrame(list(range(len(dti))), index=dti)\r\n result = df.at_time(time(4, tzinfo=pytz.timezone(\"US/Eastern\")))\r\n expected = df.iloc[1:2]\r\n tm.assert_frame_equal(result, expected)\r\n\r\n def test_at_time_raises(self, frame_or_series):\r\n # GH#20725\r\n obj = DataFrame([[1, 2, 3], [4, 5, 6]])\r\n if frame_or_series is not DataFrame:\r\n obj = obj[0]\r\n msg = \"Index must be DatetimeIndex\"\r\n with pytest.raises(TypeError, match=msg): # index is not a DatetimeIndex\r\n obj.at_time(\"00:00\")\r\n\r\n @pytest.mark.parametrize(\"axis\", [\"index\", \"columns\", 0, 1])\r\n def test_at_time_axis(self, axis):\r\n # issue 8839\r\n rng = date_range(\"1/1/2000\", \"1/5/2000\", freq=\"5min\")\r\n ts = DataFrame(np.random.randn(len(rng), len(rng)))\r\n ts.index, ts.columns = rng, rng\r\n\r\n indices = rng[(rng.hour == 9) & (rng.minute == 30) & (rng.second == 0)]\r\n\r\n if axis in [\"index\", 0]:\r\n expected = ts.loc[indices, :]\r\n elif axis in [\"columns\", 1]:\r\n expected = ts.loc[:, indices]\r\n\r\n result = ts.at_time(\"9:30\", axis=axis)\r\n\r\n # Without clearing freq, result has freq 1440T and expected 5T\r\n result.index = result.index._with_freq(None)\r\n expected.index = expected.index._with_freq(None)\r\n tm.assert_frame_equal(result, expected)\r\n\r\n def test_at_time_datetimeindex(self):\r\n index = date_range(\"2012-01-01\", \"2012-01-05\", freq=\"30min\")\r\n df = DataFrame(np.random.randn(len(index), 5), index=index)\r\n akey = time(12, 0, 0)\r\n ainds = [24, 72, 120, 168]\r\n\r\n result = df.at_time(akey)\r\n expected = df.loc[akey]\r\n expected2 = df.iloc[ainds]\r\n tm.assert_frame_equal(result, expected)\r\n tm.assert_frame_equal(result, expected2)\r\n assert len(result) == 4\r\n",
"import operator\r\nimport warnings\r\n\r\nimport numpy as np\r\nimport pytest\r\n\r\nimport pandas as pd\r\nfrom pandas import (\r\n Categorical,\r\n DataFrame,\r\n Series,\r\n date_range,\r\n)\r\nimport pandas._testing as tm\r\nfrom pandas.tests.arrays.categorical.common import TestCategorical\r\n\r\n\r\nclass TestCategoricalOpsWithFactor(TestCategorical):\r\n def test_categories_none_comparisons(self):\r\n factor = Categorical([\"a\", \"b\", \"b\", \"a\", \"a\", \"c\", \"c\", \"c\"], ordered=True)\r\n tm.assert_categorical_equal(factor, self.factor)\r\n\r\n def test_comparisons(self):\r\n result = self.factor[self.factor == \"a\"]\r\n expected = self.factor[np.asarray(self.factor) == \"a\"]\r\n tm.assert_categorical_equal(result, expected)\r\n\r\n result = self.factor[self.factor != \"a\"]\r\n expected = self.factor[np.asarray(self.factor) != \"a\"]\r\n tm.assert_categorical_equal(result, expected)\r\n\r\n result = self.factor[self.factor < \"c\"]\r\n expected = self.factor[np.asarray(self.factor) < \"c\"]\r\n tm.assert_categorical_equal(result, expected)\r\n\r\n result = self.factor[self.factor > \"a\"]\r\n expected = self.factor[np.asarray(self.factor) > \"a\"]\r\n tm.assert_categorical_equal(result, expected)\r\n\r\n result = self.factor[self.factor >= \"b\"]\r\n expected = self.factor[np.asarray(self.factor) >= \"b\"]\r\n tm.assert_categorical_equal(result, expected)\r\n\r\n result = self.factor[self.factor <= \"b\"]\r\n expected = self.factor[np.asarray(self.factor) <= \"b\"]\r\n tm.assert_categorical_equal(result, expected)\r\n\r\n n = len(self.factor)\r\n\r\n other = self.factor[np.random.permutation(n)]\r\n result = self.factor == other\r\n expected = np.asarray(self.factor) == np.asarray(other)\r\n tm.assert_numpy_array_equal(result, expected)\r\n\r\n result = self.factor == \"d\"\r\n expected = np.zeros(len(self.factor), dtype=bool)\r\n tm.assert_numpy_array_equal(result, expected)\r\n\r\n # comparisons with categoricals\r\n cat_rev = Categorical([\"a\", \"b\", \"c\"], categories=[\"c\", \"b\", \"a\"], ordered=True)\r\n cat_rev_base = Categorical(\r\n [\"b\", \"b\", \"b\"], categories=[\"c\", \"b\", \"a\"], ordered=True\r\n )\r\n cat = Categorical([\"a\", \"b\", \"c\"], ordered=True)\r\n cat_base = Categorical([\"b\", \"b\", \"b\"], categories=cat.categories, ordered=True)\r\n\r\n # comparisons need to take categories ordering into account\r\n res_rev = cat_rev > cat_rev_base\r\n exp_rev = np.array([True, False, False])\r\n tm.assert_numpy_array_equal(res_rev, exp_rev)\r\n\r\n res_rev = cat_rev < cat_rev_base\r\n exp_rev = np.array([False, False, True])\r\n tm.assert_numpy_array_equal(res_rev, exp_rev)\r\n\r\n res = cat > cat_base\r\n exp = np.array([False, False, True])\r\n tm.assert_numpy_array_equal(res, exp)\r\n\r\n # Only categories with same categories can be compared\r\n msg = \"Categoricals can only be compared if 'categories' are the same\"\r\n with pytest.raises(TypeError, match=msg):\r\n cat > cat_rev\r\n\r\n cat_rev_base2 = Categorical([\"b\", \"b\", \"b\"], categories=[\"c\", \"b\", \"a\", \"d\"])\r\n\r\n with pytest.raises(TypeError, match=msg):\r\n cat_rev > cat_rev_base2\r\n\r\n # Only categories with same ordering information can be compared\r\n cat_unorderd = cat.set_ordered(False)\r\n assert not (cat > cat).any()\r\n\r\n with pytest.raises(TypeError, match=msg):\r\n cat > cat_unorderd\r\n\r\n # comparison (in both directions) with Series will raise\r\n s = Series([\"b\", \"b\", \"b\"])\r\n msg = (\r\n \"Cannot compare a Categorical for op __gt__ with type \"\r\n r\"<class 'numpy\\.ndarray'>\"\r\n )\r\n with pytest.raises(TypeError, match=msg):\r\n cat > s\r\n with pytest.raises(TypeError, match=msg):\r\n cat_rev > s\r\n with pytest.raises(TypeError, match=msg):\r\n s < cat\r\n with pytest.raises(TypeError, match=msg):\r\n s < cat_rev\r\n\r\n # comparison with numpy.array will raise in both direction, but only on\r\n # newer numpy versions\r\n a = np.array([\"b\", \"b\", \"b\"])\r\n with pytest.raises(TypeError, match=msg):\r\n cat > a\r\n with pytest.raises(TypeError, match=msg):\r\n cat_rev > a\r\n\r\n # Make sure that unequal comparison take the categories order in\r\n # account\r\n cat_rev = Categorical(list(\"abc\"), categories=list(\"cba\"), ordered=True)\r\n exp = np.array([True, False, False])\r\n res = cat_rev > \"b\"\r\n tm.assert_numpy_array_equal(res, exp)\r\n\r\n # check that zero-dim array gets unboxed\r\n res = cat_rev > np.array(\"b\")\r\n tm.assert_numpy_array_equal(res, exp)\r\n\r\n\r\nclass TestCategoricalOps:\r\n def test_compare_frame(self):\r\n # GH#24282 check that Categorical.__cmp__(DataFrame) defers to frame\r\n data = [\"a\", \"b\", 2, \"a\"]\r\n cat = Categorical(data)\r\n\r\n df = DataFrame(cat)\r\n\r\n result = cat == df.T\r\n expected = DataFrame([[True, True, True, True]])\r\n tm.assert_frame_equal(result, expected)\r\n\r\n result = cat[::-1] != df.T\r\n expected = DataFrame([[False, True, True, False]])\r\n tm.assert_frame_equal(result, expected)\r\n\r\n def test_compare_frame_raises(self, all_compare_operators):\r\n # alignment raises unless we transpose\r\n op = getattr(operator, all_compare_operators)\r\n cat = Categorical([\"a\", \"b\", 2, \"a\"])\r\n df = DataFrame(cat)\r\n msg = \"Unable to coerce to Series, length must be 1: given 4\"\r\n with pytest.raises(ValueError, match=msg):\r\n op(cat, df)\r\n\r\n def test_datetime_categorical_comparison(self):\r\n dt_cat = Categorical(date_range(\"2014-01-01\", periods=3), ordered=True)\r\n tm.assert_numpy_array_equal(dt_cat > dt_cat[0], np.array([False, True, True]))\r\n tm.assert_numpy_array_equal(dt_cat[0] < dt_cat, np.array([False, True, True]))\r\n\r\n def test_reflected_comparison_with_scalars(self):\r\n # GH8658\r\n cat = Categorical([1, 2, 3], ordered=True)\r\n tm.assert_numpy_array_equal(cat > cat[0], np.array([False, True, True]))\r\n tm.assert_numpy_array_equal(cat[0] < cat, np.array([False, True, True]))\r\n\r\n def test_comparison_with_unknown_scalars(self):\r\n # https://github.com/pandas-dev/pandas/issues/9836#issuecomment-92123057\r\n # and following comparisons with scalars not in categories should raise\r\n # for unequal comps, but not for equal/not equal\r\n cat = Categorical([1, 2, 3], ordered=True)\r\n\r\n msg = \"Invalid comparison between dtype=category and int\"\r\n with pytest.raises(TypeError, match=msg):\r\n cat < 4\r\n with pytest.raises(TypeError, match=msg):\r\n cat > 4\r\n with pytest.raises(TypeError, match=msg):\r\n 4 < cat\r\n with pytest.raises(TypeError, match=msg):\r\n 4 > cat\r\n\r\n tm.assert_numpy_array_equal(cat == 4, np.array([False, False, False]))\r\n tm.assert_numpy_array_equal(cat != 4, np.array([True, True, True]))\r\n\r\n def test_comparison_with_tuple(self):\r\n cat = Categorical(np.array([\"foo\", (0, 1), 3, (0, 1)], dtype=object))\r\n\r\n result = cat == \"foo\"\r\n expected = np.array([True, False, False, False], dtype=bool)\r\n tm.assert_numpy_array_equal(result, expected)\r\n\r\n result = cat == (0, 1)\r\n expected = np.array([False, True, False, True], dtype=bool)\r\n tm.assert_numpy_array_equal(result, expected)\r\n\r\n result = cat != (0, 1)\r\n tm.assert_numpy_array_equal(result, ~expected)\r\n\r\n def test_comparison_of_ordered_categorical_with_nan_to_scalar(\r\n self, compare_operators_no_eq_ne\r\n ):\r\n # https://github.com/pandas-dev/pandas/issues/26504\r\n # BUG: fix ordered categorical comparison with missing values (#26504 )\r\n # and following comparisons with scalars in categories with missing\r\n # values should be evaluated as False\r\n\r\n cat = Categorical([1, 2, 3, None], categories=[1, 2, 3], ordered=True)\r\n scalar = 2\r\n with warnings.catch_warnings():\r\n warnings.simplefilter(\"ignore\", RuntimeWarning)\r\n expected = getattr(np.array(cat), compare_operators_no_eq_ne)(scalar)\r\n actual = getattr(cat, compare_operators_no_eq_ne)(scalar)\r\n tm.assert_numpy_array_equal(actual, expected)\r\n\r\n def test_comparison_of_ordered_categorical_with_nan_to_listlike(\r\n self, compare_operators_no_eq_ne\r\n ):\r\n # https://github.com/pandas-dev/pandas/issues/26504\r\n # and following comparisons of missing values in ordered Categorical\r\n # with listlike should be evaluated as False\r\n\r\n cat = Categorical([1, 2, 3, None], categories=[1, 2, 3], ordered=True)\r\n other = Categorical([2, 2, 2, 2], categories=[1, 2, 3], ordered=True)\r\n with warnings.catch_warnings():\r\n warnings.simplefilter(\"ignore\", RuntimeWarning)\r\n expected = getattr(np.array(cat), compare_operators_no_eq_ne)(2)\r\n actual = getattr(cat, compare_operators_no_eq_ne)(other)\r\n tm.assert_numpy_array_equal(actual, expected)\r\n\r\n @pytest.mark.parametrize(\r\n \"data,reverse,base\",\r\n [(list(\"abc\"), list(\"cba\"), list(\"bbb\")), ([1, 2, 3], [3, 2, 1], [2, 2, 2])],\r\n )\r\n def test_comparisons(self, data, reverse, base):\r\n cat_rev = Series(Categorical(data, categories=reverse, ordered=True))\r\n cat_rev_base = Series(Categorical(base, categories=reverse, ordered=True))\r\n cat = Series(Categorical(data, ordered=True))\r\n cat_base = Series(\r\n Categorical(base, categories=cat.cat.categories, ordered=True)\r\n )\r\n s = Series(base)\r\n a = np.array(base)\r\n\r\n # comparisons need to take categories ordering into account\r\n res_rev = cat_rev > cat_rev_base\r\n exp_rev = Series([True, False, False])\r\n tm.assert_series_equal(res_rev, exp_rev)\r\n\r\n res_rev = cat_rev < cat_rev_base\r\n exp_rev = Series([False, False, True])\r\n tm.assert_series_equal(res_rev, exp_rev)\r\n\r\n res = cat > cat_base\r\n exp = Series([False, False, True])\r\n tm.assert_series_equal(res, exp)\r\n\r\n scalar = base[1]\r\n res = cat > scalar\r\n exp = Series([False, False, True])\r\n exp2 = cat.values > scalar\r\n tm.assert_series_equal(res, exp)\r\n tm.assert_numpy_array_equal(res.values, exp2)\r\n res_rev = cat_rev > scalar\r\n exp_rev = Series([True, False, False])\r\n exp_rev2 = cat_rev.values > scalar\r\n tm.assert_series_equal(res_rev, exp_rev)\r\n tm.assert_numpy_array_equal(res_rev.values, exp_rev2)\r\n\r\n # Only categories with same categories can be compared\r\n msg = \"Categoricals can only be compared if 'categories' are the same\"\r\n with pytest.raises(TypeError, match=msg):\r\n cat > cat_rev\r\n\r\n # categorical cannot be compared to Series or numpy array, and also\r\n # not the other way around\r\n msg = (\r\n \"Cannot compare a Categorical for op __gt__ with type \"\r\n r\"<class 'numpy\\.ndarray'>\"\r\n )\r\n with pytest.raises(TypeError, match=msg):\r\n cat > s\r\n with pytest.raises(TypeError, match=msg):\r\n cat_rev > s\r\n with pytest.raises(TypeError, match=msg):\r\n cat > a\r\n with pytest.raises(TypeError, match=msg):\r\n cat_rev > a\r\n\r\n with pytest.raises(TypeError, match=msg):\r\n s < cat\r\n with pytest.raises(TypeError, match=msg):\r\n s < cat_rev\r\n\r\n with pytest.raises(TypeError, match=msg):\r\n a < cat\r\n with pytest.raises(TypeError, match=msg):\r\n a < cat_rev\r\n\r\n @pytest.mark.parametrize(\r\n \"ctor\",\r\n [\r\n lambda *args, **kwargs: Categorical(*args, **kwargs),\r\n lambda *args, **kwargs: Series(Categorical(*args, **kwargs)),\r\n ],\r\n )\r\n def test_unordered_different_order_equal(self, ctor):\r\n # https://github.com/pandas-dev/pandas/issues/16014\r\n c1 = ctor([\"a\", \"b\"], categories=[\"a\", \"b\"], ordered=False)\r\n c2 = ctor([\"a\", \"b\"], categories=[\"b\", \"a\"], ordered=False)\r\n assert (c1 == c2).all()\r\n\r\n c1 = ctor([\"a\", \"b\"], categories=[\"a\", \"b\"], ordered=False)\r\n c2 = ctor([\"b\", \"a\"], categories=[\"b\", \"a\"], ordered=False)\r\n assert (c1 != c2).all()\r\n\r\n c1 = ctor([\"a\", \"a\"], categories=[\"a\", \"b\"], ordered=False)\r\n c2 = ctor([\"b\", \"b\"], categories=[\"b\", \"a\"], ordered=False)\r\n assert (c1 != c2).all()\r\n\r\n c1 = ctor([\"a\", \"a\"], categories=[\"a\", \"b\"], ordered=False)\r\n c2 = ctor([\"a\", \"b\"], categories=[\"b\", \"a\"], ordered=False)\r\n result = c1 == c2\r\n tm.assert_numpy_array_equal(np.array(result), np.array([True, False]))\r\n\r\n def test_unordered_different_categories_raises(self):\r\n c1 = Categorical([\"a\", \"b\"], categories=[\"a\", \"b\"], ordered=False)\r\n c2 = Categorical([\"a\", \"c\"], categories=[\"c\", \"a\"], ordered=False)\r\n\r\n with pytest.raises(TypeError, match=(\"Categoricals can only be compared\")):\r\n c1 == c2\r\n\r\n def test_compare_different_lengths(self):\r\n c1 = Categorical([], categories=[\"a\", \"b\"])\r\n c2 = Categorical([], categories=[\"a\"])\r\n\r\n msg = \"Categoricals can only be compared if 'categories' are the same.\"\r\n with pytest.raises(TypeError, match=msg):\r\n c1 == c2\r\n\r\n def test_compare_unordered_different_order(self):\r\n # https://github.com/pandas-dev/pandas/issues/16603#issuecomment-\r\n # 349290078\r\n a = Categorical([\"a\"], categories=[\"a\", \"b\"])\r\n b = Categorical([\"b\"], categories=[\"b\", \"a\"])\r\n assert not a.equals(b)\r\n\r\n def test_numeric_like_ops(self):\r\n\r\n df = DataFrame({\"value\": np.random.randint(0, 10000, 100)})\r\n labels = [f\"{i} - {i + 499}\" for i in range(0, 10000, 500)]\r\n cat_labels = Categorical(labels, labels)\r\n\r\n df = df.sort_values(by=[\"value\"], ascending=True)\r\n df[\"value_group\"] = pd.cut(\r\n df.value, range(0, 10500, 500), right=False, labels=cat_labels\r\n )\r\n\r\n # numeric ops should not succeed\r\n for op, str_rep in [\r\n (\"__add__\", r\"\\+\"),\r\n (\"__sub__\", \"-\"),\r\n (\"__mul__\", r\"\\*\"),\r\n (\"__truediv__\", \"/\"),\r\n ]:\r\n msg = f\"Series cannot perform the operation {str_rep}|unsupported operand\"\r\n with pytest.raises(TypeError, match=msg):\r\n getattr(df, op)(df)\r\n\r\n # reduction ops should not succeed (unless specifically defined, e.g.\r\n # min/max)\r\n s = df[\"value_group\"]\r\n for op in [\"kurt\", \"skew\", \"var\", \"std\", \"mean\", \"sum\", \"median\"]:\r\n msg = f\"'Categorical' does not implement reduction '{op}'\"\r\n with pytest.raises(TypeError, match=msg):\r\n getattr(s, op)(numeric_only=False)\r\n\r\n # mad technically works because it takes always the numeric data\r\n\r\n # numpy ops\r\n s = Series(Categorical([1, 2, 3, 4]))\r\n with pytest.raises(\r\n TypeError, match=\"'Categorical' does not implement reduction 'sum'\"\r\n ):\r\n np.sum(s)\r\n\r\n # numeric ops on a Series\r\n for op, str_rep in [\r\n (\"__add__\", r\"\\+\"),\r\n (\"__sub__\", \"-\"),\r\n (\"__mul__\", r\"\\*\"),\r\n (\"__truediv__\", \"/\"),\r\n ]:\r\n msg = f\"Series cannot perform the operation {str_rep}|unsupported operand\"\r\n with pytest.raises(TypeError, match=msg):\r\n getattr(s, op)(2)\r\n\r\n # invalid ufunc\r\n msg = \"Object with dtype category cannot perform the numpy op log\"\r\n with pytest.raises(TypeError, match=msg):\r\n np.log(s)\r\n",
"import numpy as np\r\nimport pytest\r\n\r\nimport pandas as pd\r\nfrom pandas import DataFrame\r\nimport pandas._testing as tm\r\n\r\n\r\nclass TestDataFrameFilter:\r\n def test_filter(self, float_frame, float_string_frame):\r\n # Items\r\n filtered = float_frame.filter([\"A\", \"B\", \"E\"])\r\n assert len(filtered.columns) == 2\r\n assert \"E\" not in filtered\r\n\r\n filtered = float_frame.filter([\"A\", \"B\", \"E\"], axis=\"columns\")\r\n assert len(filtered.columns) == 2\r\n assert \"E\" not in filtered\r\n\r\n # Other axis\r\n idx = float_frame.index[0:4]\r\n filtered = float_frame.filter(idx, axis=\"index\")\r\n expected = float_frame.reindex(index=idx)\r\n tm.assert_frame_equal(filtered, expected)\r\n\r\n # like\r\n fcopy = float_frame.copy()\r\n fcopy[\"AA\"] = 1\r\n\r\n filtered = fcopy.filter(like=\"A\")\r\n assert len(filtered.columns) == 2\r\n assert \"AA\" in filtered\r\n\r\n # like with ints in column names\r\n df = DataFrame(0.0, index=[0, 1, 2], columns=[0, 1, \"_A\", \"_B\"])\r\n filtered = df.filter(like=\"_\")\r\n assert len(filtered.columns) == 2\r\n\r\n # regex with ints in column names\r\n # from PR #10384\r\n df = DataFrame(0.0, index=[0, 1, 2], columns=[\"A1\", 1, \"B\", 2, \"C\"])\r\n expected = DataFrame(\r\n 0.0, index=[0, 1, 2], columns=pd.Index([1, 2], dtype=object)\r\n )\r\n filtered = df.filter(regex=\"^[0-9]+$\")\r\n tm.assert_frame_equal(filtered, expected)\r\n\r\n expected = DataFrame(0.0, index=[0, 1, 2], columns=[0, \"0\", 1, \"1\"])\r\n # shouldn't remove anything\r\n filtered = expected.filter(regex=\"^[0-9]+$\")\r\n tm.assert_frame_equal(filtered, expected)\r\n\r\n # pass in None\r\n with pytest.raises(TypeError, match=\"Must pass\"):\r\n float_frame.filter()\r\n with pytest.raises(TypeError, match=\"Must pass\"):\r\n float_frame.filter(items=None)\r\n with pytest.raises(TypeError, match=\"Must pass\"):\r\n float_frame.filter(axis=1)\r\n\r\n # test mutually exclusive arguments\r\n with pytest.raises(TypeError, match=\"mutually exclusive\"):\r\n float_frame.filter(items=[\"one\", \"three\"], regex=\"e$\", like=\"bbi\")\r\n with pytest.raises(TypeError, match=\"mutually exclusive\"):\r\n float_frame.filter(items=[\"one\", \"three\"], regex=\"e$\", axis=1)\r\n with pytest.raises(TypeError, match=\"mutually exclusive\"):\r\n float_frame.filter(items=[\"one\", \"three\"], regex=\"e$\")\r\n with pytest.raises(TypeError, match=\"mutually exclusive\"):\r\n float_frame.filter(items=[\"one\", \"three\"], like=\"bbi\", axis=0)\r\n with pytest.raises(TypeError, match=\"mutually exclusive\"):\r\n float_frame.filter(items=[\"one\", \"three\"], like=\"bbi\")\r\n\r\n # objects\r\n filtered = float_string_frame.filter(like=\"foo\")\r\n assert \"foo\" in filtered\r\n\r\n # unicode columns, won't ascii-encode\r\n df = float_frame.rename(columns={\"B\": \"\\u2202\"})\r\n filtered = df.filter(like=\"C\")\r\n assert \"C\" in filtered\r\n\r\n def test_filter_regex_search(self, float_frame):\r\n fcopy = float_frame.copy()\r\n fcopy[\"AA\"] = 1\r\n\r\n # regex\r\n filtered = fcopy.filter(regex=\"[A]+\")\r\n assert len(filtered.columns) == 2\r\n assert \"AA\" in filtered\r\n\r\n # doesn't have to be at beginning\r\n df = DataFrame(\r\n {\"aBBa\": [1, 2], \"BBaBB\": [1, 2], \"aCCa\": [1, 2], \"aCCaBB\": [1, 2]}\r\n )\r\n\r\n result = df.filter(regex=\"BB\")\r\n exp = df[[x for x in df.columns if \"BB\" in x]]\r\n tm.assert_frame_equal(result, exp)\r\n\r\n @pytest.mark.parametrize(\r\n \"name,expected\",\r\n [\r\n (\"a\", DataFrame({\"a\": [1, 2]})),\r\n (\"a\", DataFrame({\"a\": [1, 2]})),\r\n (\"あ\", DataFrame({\"あ\": [3, 4]})),\r\n ],\r\n )\r\n def test_filter_unicode(self, name, expected):\r\n # GH13101\r\n df = DataFrame({\"a\": [1, 2], \"あ\": [3, 4]})\r\n\r\n tm.assert_frame_equal(df.filter(like=name), expected)\r\n tm.assert_frame_equal(df.filter(regex=name), expected)\r\n\r\n @pytest.mark.parametrize(\"name\", [\"a\", \"a\"])\r\n def test_filter_bytestring(self, name):\r\n # GH13101\r\n df = DataFrame({b\"a\": [1, 2], b\"b\": [3, 4]})\r\n expected = DataFrame({b\"a\": [1, 2]})\r\n\r\n tm.assert_frame_equal(df.filter(like=name), expected)\r\n tm.assert_frame_equal(df.filter(regex=name), expected)\r\n\r\n def test_filter_corner(self):\r\n empty = DataFrame()\r\n\r\n result = empty.filter([])\r\n tm.assert_frame_equal(result, empty)\r\n\r\n result = empty.filter(like=\"foo\")\r\n tm.assert_frame_equal(result, empty)\r\n\r\n def test_filter_regex_non_string(self):\r\n # GH#5798 trying to filter on non-string columns should drop,\r\n # not raise\r\n df = DataFrame(np.random.random((3, 2)), columns=[\"STRING\", 123])\r\n result = df.filter(regex=\"STRING\")\r\n expected = df[[\"STRING\"]]\r\n tm.assert_frame_equal(result, expected)\r\n",
"\"\"\"Base classes for all estimators.\"\"\"\r\n\r\n# Author: Gael Varoquaux <[email protected]>\r\n# License: BSD 3 clause\r\n\r\nimport copy\r\nimport warnings\r\nfrom collections import defaultdict\r\nimport platform\r\nimport inspect\r\nimport re\r\n\r\nimport numpy as np\r\n\r\nfrom . import __version__\r\nfrom ._config import get_config\r\nfrom .utils import _IS_32BIT\r\nfrom .utils._tags import (\r\n _DEFAULT_TAGS,\r\n _safe_tags,\r\n)\r\nfrom .utils.validation import check_X_y\r\nfrom .utils.validation import check_array\r\nfrom .utils.validation import _check_y\r\nfrom .utils.validation import _num_features\r\nfrom .utils.validation import _check_feature_names_in\r\nfrom .utils._estimator_html_repr import estimator_html_repr\r\nfrom .utils.validation import _get_feature_names\r\n\r\n\r\ndef clone(estimator, *, safe=True):\r\n \"\"\"Constructs a new unfitted estimator with the same parameters.\r\n\r\n Clone does a deep copy of the model in an estimator\r\n without actually copying attached data. It yields a new estimator\r\n with the same parameters that has not been fitted on any data.\r\n\r\n If the estimator's `random_state` parameter is an integer (or if the\r\n estimator doesn't have a `random_state` parameter), an *exact clone* is\r\n returned: the clone and the original estimator will give the exact same\r\n results. Otherwise, *statistical clone* is returned: the clone might\r\n yield different results from the original estimator. More details can be\r\n found in :ref:`randomness`.\r\n\r\n Parameters\r\n ----------\r\n estimator : {list, tuple, set} of estimator instance or a single \\\r\n estimator instance\r\n The estimator or group of estimators to be cloned.\r\n\r\n safe : bool, default=True\r\n If safe is False, clone will fall back to a deep copy on objects\r\n that are not estimators.\r\n\r\n \"\"\"\r\n estimator_type = type(estimator)\r\n # XXX: not handling dictionaries\r\n if estimator_type in (list, tuple, set, frozenset):\r\n return estimator_type([clone(e, safe=safe) for e in estimator])\r\n elif not hasattr(estimator, \"get_params\") or isinstance(estimator, type):\r\n if not safe:\r\n return copy.deepcopy(estimator)\r\n else:\r\n if isinstance(estimator, type):\r\n raise TypeError(\r\n \"Cannot clone object. \"\r\n + \"You should provide an instance of \"\r\n + \"scikit-learn estimator instead of a class.\"\r\n )\r\n else:\r\n raise TypeError(\r\n \"Cannot clone object '%s' (type %s): \"\r\n \"it does not seem to be a scikit-learn \"\r\n \"estimator as it does not implement a \"\r\n \"'get_params' method.\" % (repr(estimator), type(estimator))\r\n )\r\n\r\n klass = estimator.__class__\r\n new_object_params = estimator.get_params(deep=False)\r\n for name, param in new_object_params.items():\r\n new_object_params[name] = clone(param, safe=False)\r\n new_object = klass(**new_object_params)\r\n params_set = new_object.get_params(deep=False)\r\n\r\n # quick sanity check of the parameters of the clone\r\n for name in new_object_params:\r\n param1 = new_object_params[name]\r\n param2 = params_set[name]\r\n if param1 is not param2:\r\n raise RuntimeError(\r\n \"Cannot clone object %s, as the constructor \"\r\n \"either does not set or modifies parameter %s\" % (estimator, name)\r\n )\r\n return new_object\r\n\r\n\r\ndef _pprint(params, offset=0, printer=repr):\r\n \"\"\"Pretty print the dictionary 'params'\r\n\r\n Parameters\r\n ----------\r\n params : dict\r\n The dictionary to pretty print\r\n\r\n offset : int, default=0\r\n The offset in characters to add at the begin of each line.\r\n\r\n printer : callable, default=repr\r\n The function to convert entries to strings, typically\r\n the builtin str or repr\r\n\r\n \"\"\"\r\n # Do a multi-line justified repr:\r\n options = np.get_printoptions()\r\n np.set_printoptions(precision=5, threshold=64, edgeitems=2)\r\n params_list = list()\r\n this_line_length = offset\r\n line_sep = \",\\n\" + (1 + offset // 2) * \" \"\r\n for i, (k, v) in enumerate(sorted(params.items())):\r\n if type(v) is float:\r\n # use str for representing floating point numbers\r\n # this way we get consistent representation across\r\n # architectures and versions.\r\n this_repr = \"%s=%s\" % (k, str(v))\r\n else:\r\n # use repr of the rest\r\n this_repr = \"%s=%s\" % (k, printer(v))\r\n if len(this_repr) > 500:\r\n this_repr = this_repr[:300] + \"...\" + this_repr[-100:]\r\n if i > 0:\r\n if this_line_length + len(this_repr) >= 75 or \"\\n\" in this_repr:\r\n params_list.append(line_sep)\r\n this_line_length = len(line_sep)\r\n else:\r\n params_list.append(\", \")\r\n this_line_length += 2\r\n params_list.append(this_repr)\r\n this_line_length += len(this_repr)\r\n\r\n np.set_printoptions(**options)\r\n lines = \"\".join(params_list)\r\n # Strip trailing space to avoid nightmare in doctests\r\n lines = \"\\n\".join(l.rstrip(\" \") for l in lines.split(\"\\n\"))\r\n return lines\r\n\r\n\r\nclass BaseEstimator:\r\n \"\"\"Base class for all estimators in scikit-learn.\r\n\r\n Notes\r\n -----\r\n All estimators should specify all the parameters that can be set\r\n at the class level in their ``__init__`` as explicit keyword\r\n arguments (no ``*args`` or ``**kwargs``).\r\n \"\"\"\r\n\r\n @classmethod\r\n def _get_param_names(cls):\r\n \"\"\"Get parameter names for the estimator\"\"\"\r\n # fetch the constructor or the original constructor before\r\n # deprecation wrapping if any\r\n init = getattr(cls.__init__, \"deprecated_original\", cls.__init__)\r\n if init is object.__init__:\r\n # No explicit constructor to introspect\r\n return []\r\n\r\n # introspect the constructor arguments to find the model parameters\r\n # to represent\r\n init_signature = inspect.signature(init)\r\n # Consider the constructor parameters excluding 'self'\r\n parameters = [\r\n p\r\n for p in init_signature.parameters.values()\r\n if p.name != \"self\" and p.kind != p.VAR_KEYWORD\r\n ]\r\n for p in parameters:\r\n if p.kind == p.VAR_POSITIONAL:\r\n raise RuntimeError(\r\n \"scikit-learn estimators should always \"\r\n \"specify their parameters in the signature\"\r\n \" of their __init__ (no varargs).\"\r\n \" %s with constructor %s doesn't \"\r\n \" follow this convention.\" % (cls, init_signature)\r\n )\r\n # Extract and sort argument names excluding 'self'\r\n return sorted([p.name for p in parameters])\r\n\r\n def get_params(self, deep=True):\r\n \"\"\"\r\n Get parameters for this estimator.\r\n\r\n Parameters\r\n ----------\r\n deep : bool, default=True\r\n If True, will return the parameters for this estimator and\r\n contained subobjects that are estimators.\r\n\r\n Returns\r\n -------\r\n params : dict\r\n Parameter names mapped to their values.\r\n \"\"\"\r\n out = dict()\r\n for key in self._get_param_names():\r\n value = getattr(self, key)\r\n if deep and hasattr(value, \"get_params\"):\r\n deep_items = value.get_params().items()\r\n out.update((key + \"__\" + k, val) for k, val in deep_items)\r\n out[key] = value\r\n return out\r\n\r\n def set_params(self, **params):\r\n \"\"\"\r\n Set the parameters of this estimator.\r\n\r\n The method works on simple estimators as well as on nested objects\r\n (such as :class:`~sklearn.pipeline.Pipeline`). The latter have\r\n parameters of the form ``<component>__<parameter>`` so that it's\r\n possible to update each component of a nested object.\r\n\r\n Parameters\r\n ----------\r\n **params : dict\r\n Estimator parameters.\r\n\r\n Returns\r\n -------\r\n self : estimator instance\r\n Estimator instance.\r\n \"\"\"\r\n if not params:\r\n # Simple optimization to gain speed (inspect is slow)\r\n return self\r\n valid_params = self.get_params(deep=True)\r\n\r\n nested_params = defaultdict(dict) # grouped by prefix\r\n for key, value in params.items():\r\n key, delim, sub_key = key.partition(\"__\")\r\n if key not in valid_params:\r\n raise ValueError(\r\n \"Invalid parameter %s for estimator %s. \"\r\n \"Check the list of available parameters \"\r\n \"with `estimator.get_params().keys()`.\" % (key, self)\r\n )\r\n\r\n if delim:\r\n nested_params[key][sub_key] = value\r\n else:\r\n setattr(self, key, value)\r\n valid_params[key] = value\r\n\r\n for key, sub_params in nested_params.items():\r\n valid_params[key].set_params(**sub_params)\r\n\r\n return self\r\n\r\n def __repr__(self, N_CHAR_MAX=700):\r\n # N_CHAR_MAX is the (approximate) maximum number of non-blank\r\n # characters to render. We pass it as an optional parameter to ease\r\n # the tests.\r\n\r\n from .utils._pprint import _EstimatorPrettyPrinter\r\n\r\n N_MAX_ELEMENTS_TO_SHOW = 30 # number of elements to show in sequences\r\n\r\n # use ellipsis for sequences with a lot of elements\r\n pp = _EstimatorPrettyPrinter(\r\n compact=True,\r\n indent=1,\r\n indent_at_name=True,\r\n n_max_elements_to_show=N_MAX_ELEMENTS_TO_SHOW,\r\n )\r\n\r\n repr_ = pp.pformat(self)\r\n\r\n # Use bruteforce ellipsis when there are a lot of non-blank characters\r\n n_nonblank = len(\"\".join(repr_.split()))\r\n if n_nonblank > N_CHAR_MAX:\r\n lim = N_CHAR_MAX // 2 # apprx number of chars to keep on both ends\r\n regex = r\"^(\\s*\\S){%d}\" % lim\r\n # The regex '^(\\s*\\S){%d}' % n\r\n # matches from the start of the string until the nth non-blank\r\n # character:\r\n # - ^ matches the start of string\r\n # - (pattern){n} matches n repetitions of pattern\r\n # - \\s*\\S matches a non-blank char following zero or more blanks\r\n left_lim = re.match(regex, repr_).end()\r\n right_lim = re.match(regex, repr_[::-1]).end()\r\n\r\n if \"\\n\" in repr_[left_lim:-right_lim]:\r\n # The left side and right side aren't on the same line.\r\n # To avoid weird cuts, e.g.:\r\n # categoric...ore',\r\n # we need to start the right side with an appropriate newline\r\n # character so that it renders properly as:\r\n # categoric...\r\n # handle_unknown='ignore',\r\n # so we add [^\\n]*\\n which matches until the next \\n\r\n regex += r\"[^\\n]*\\n\"\r\n right_lim = re.match(regex, repr_[::-1]).end()\r\n\r\n ellipsis = \"...\"\r\n if left_lim + len(ellipsis) < len(repr_) - right_lim:\r\n # Only add ellipsis if it results in a shorter repr\r\n repr_ = repr_[:left_lim] + \"...\" + repr_[-right_lim:]\r\n\r\n return repr_\r\n\r\n def __getstate__(self):\r\n try:\r\n state = super().__getstate__()\r\n except AttributeError:\r\n state = self.__dict__.copy()\r\n\r\n if type(self).__module__.startswith(\"sklearn.\"):\r\n return dict(state.items(), _sklearn_version=__version__)\r\n else:\r\n return state\r\n\r\n def __setstate__(self, state):\r\n if type(self).__module__.startswith(\"sklearn.\"):\r\n pickle_version = state.pop(\"_sklearn_version\", \"pre-0.18\")\r\n if pickle_version != __version__:\r\n warnings.warn(\r\n \"Trying to unpickle estimator {0} from version {1} when \"\r\n \"using version {2}. This might lead to breaking code or \"\r\n \"invalid results. Use at your own risk. \"\r\n \"For more info please refer to:\\n\"\r\n \"https://scikit-learn.org/stable/modules/model_persistence\"\r\n \".html#security-maintainability-limitations\".format(\r\n self.__class__.__name__, pickle_version, __version__\r\n ),\r\n UserWarning,\r\n )\r\n try:\r\n super().__setstate__(state)\r\n except AttributeError:\r\n self.__dict__.update(state)\r\n\r\n def _more_tags(self):\r\n return _DEFAULT_TAGS\r\n\r\n def _get_tags(self):\r\n collected_tags = {}\r\n for base_class in reversed(inspect.getmro(self.__class__)):\r\n if hasattr(base_class, \"_more_tags\"):\r\n # need the if because mixins might not have _more_tags\r\n # but might do redundant work in estimators\r\n # (i.e. calling more tags on BaseEstimator multiple times)\r\n more_tags = base_class._more_tags(self)\r\n collected_tags.update(more_tags)\r\n return collected_tags\r\n\r\n def _check_n_features(self, X, reset):\r\n \"\"\"Set the `n_features_in_` attribute, or check against it.\r\n\r\n Parameters\r\n ----------\r\n X : {ndarray, sparse matrix} of shape (n_samples, n_features)\r\n The input samples.\r\n reset : bool\r\n If True, the `n_features_in_` attribute is set to `X.shape[1]`.\r\n If False and the attribute exists, then check that it is equal to\r\n `X.shape[1]`. If False and the attribute does *not* exist, then\r\n the check is skipped.\r\n .. note::\r\n It is recommended to call reset=True in `fit` and in the first\r\n call to `partial_fit`. All other methods that validate `X`\r\n should set `reset=False`.\r\n \"\"\"\r\n try:\r\n n_features = _num_features(X)\r\n except TypeError as e:\r\n if not reset and hasattr(self, \"n_features_in_\"):\r\n raise ValueError(\r\n \"X does not contain any features, but \"\r\n f\"{self.__class__.__name__} is expecting \"\r\n f\"{self.n_features_in_} features\"\r\n ) from e\r\n # If the number of features is not defined and reset=True,\r\n # then we skip this check\r\n return\r\n\r\n if reset:\r\n self.n_features_in_ = n_features\r\n return\r\n\r\n if not hasattr(self, \"n_features_in_\"):\r\n # Skip this check if the expected number of expected input features\r\n # was not recorded by calling fit first. This is typically the case\r\n # for stateless transformers.\r\n return\r\n\r\n if n_features != self.n_features_in_:\r\n raise ValueError(\r\n f\"X has {n_features} features, but {self.__class__.__name__} \"\r\n f\"is expecting {self.n_features_in_} features as input.\"\r\n )\r\n\r\n def _check_feature_names(self, X, *, reset):\r\n \"\"\"Set or check the `feature_names_in_` attribute.\r\n\r\n .. versionadded:: 1.0\r\n\r\n Parameters\r\n ----------\r\n X : {ndarray, dataframe} of shape (n_samples, n_features)\r\n The input samples.\r\n\r\n reset : bool\r\n Whether to reset the `feature_names_in_` attribute.\r\n If False, the input will be checked for consistency with\r\n feature names of data provided when reset was last True.\r\n .. note::\r\n It is recommended to call `reset=True` in `fit` and in the first\r\n call to `partial_fit`. All other methods that validate `X`\r\n should set `reset=False`.\r\n \"\"\"\r\n\r\n if reset:\r\n feature_names_in = _get_feature_names(X)\r\n if feature_names_in is not None:\r\n self.feature_names_in_ = feature_names_in\r\n elif hasattr(self, \"feature_names_in_\"):\r\n # Delete the attribute when the estimator is fitted on a new dataset\r\n # that has no feature names.\r\n delattr(self, \"feature_names_in_\")\r\n return\r\n\r\n fitted_feature_names = getattr(self, \"feature_names_in_\", None)\r\n X_feature_names = _get_feature_names(X)\r\n\r\n if fitted_feature_names is None and X_feature_names is None:\r\n # no feature names seen in fit and in X\r\n return\r\n\r\n if X_feature_names is not None and fitted_feature_names is None:\r\n warnings.warn(\r\n f\"X has feature names, but {self.__class__.__name__} was fitted without\"\r\n \" feature names\"\r\n )\r\n return\r\n\r\n if X_feature_names is None and fitted_feature_names is not None:\r\n warnings.warn(\r\n \"X does not have valid feature names, but\"\r\n f\" {self.__class__.__name__} was fitted with feature names\"\r\n )\r\n return\r\n\r\n # validate the feature names against the `feature_names_in_` attribute\r\n if len(fitted_feature_names) != len(X_feature_names) or np.any(\r\n fitted_feature_names != X_feature_names\r\n ):\r\n message = (\r\n \"The feature names should match those that were \"\r\n \"passed during fit. Starting version 1.2, an error will be raised.\\n\"\r\n )\r\n fitted_feature_names_set = set(fitted_feature_names)\r\n X_feature_names_set = set(X_feature_names)\r\n\r\n unexpected_names = sorted(X_feature_names_set - fitted_feature_names_set)\r\n missing_names = sorted(fitted_feature_names_set - X_feature_names_set)\r\n\r\n def add_names(names):\r\n output = \"\"\r\n max_n_names = 5\r\n for i, name in enumerate(names):\r\n if i >= max_n_names:\r\n output += \"- ...\\n\"\r\n break\r\n output += f\"- {name}\\n\"\r\n return output\r\n\r\n if unexpected_names:\r\n message += \"Feature names unseen at fit time:\\n\"\r\n message += add_names(unexpected_names)\r\n\r\n if missing_names:\r\n message += \"Feature names seen at fit time, yet now missing:\\n\"\r\n message += add_names(missing_names)\r\n\r\n if not missing_names and not missing_names:\r\n message += (\r\n \"Feature names must be in the same order as they were in fit.\\n\"\r\n )\r\n\r\n warnings.warn(message, FutureWarning)\r\n\r\n def _validate_data(\r\n self,\r\n X=\"no_validation\",\r\n y=\"no_validation\",\r\n reset=True,\r\n validate_separately=False,\r\n **check_params,\r\n ):\r\n \"\"\"Validate input data and set or check the `n_features_in_` attribute.\r\n\r\n Parameters\r\n ----------\r\n X : {array-like, sparse matrix, dataframe} of shape \\\r\n (n_samples, n_features), default='no validation'\r\n The input samples.\r\n If `'no_validation'`, no validation is performed on `X`. This is\r\n useful for meta-estimator which can delegate input validation to\r\n their underlying estimator(s). In that case `y` must be passed and\r\n the only accepted `check_params` are `multi_output` and\r\n `y_numeric`.\r\n\r\n y : array-like of shape (n_samples,), default='no_validation'\r\n The targets.\r\n\r\n - If `None`, `check_array` is called on `X`. If the estimator's\r\n requires_y tag is True, then an error will be raised.\r\n - If `'no_validation'`, `check_array` is called on `X` and the\r\n estimator's requires_y tag is ignored. This is a default\r\n placeholder and is never meant to be explicitly set. In that case\r\n `X` must be passed.\r\n - Otherwise, only `y` with `_check_y` or both `X` and `y` are\r\n checked with either `check_array` or `check_X_y` depending on\r\n `validate_separately`.\r\n\r\n reset : bool, default=True\r\n Whether to reset the `n_features_in_` attribute.\r\n If False, the input will be checked for consistency with data\r\n provided when reset was last True.\r\n .. note::\r\n It is recommended to call reset=True in `fit` and in the first\r\n call to `partial_fit`. All other methods that validate `X`\r\n should set `reset=False`.\r\n validate_separately : False or tuple of dicts, default=False\r\n Only used if y is not None.\r\n If False, call validate_X_y(). Else, it must be a tuple of kwargs\r\n to be used for calling check_array() on X and y respectively.\r\n **check_params : kwargs\r\n Parameters passed to :func:`sklearn.utils.check_array` or\r\n :func:`sklearn.utils.check_X_y`. Ignored if validate_separately\r\n is not False.\r\n\r\n Returns\r\n -------\r\n out : {ndarray, sparse matrix} or tuple of these\r\n The validated input. A tuple is returned if both `X` and `y` are\r\n validated.\r\n \"\"\"\r\n self._check_feature_names(X, reset=reset)\r\n\r\n if y is None and self._get_tags()[\"requires_y\"]:\r\n raise ValueError(\r\n f\"This {self.__class__.__name__} estimator \"\r\n \"requires y to be passed, but the target y is None.\"\r\n )\r\n\r\n no_val_X = isinstance(X, str) and X == \"no_validation\"\r\n no_val_y = y is None or isinstance(y, str) and y == \"no_validation\"\r\n\r\n if no_val_X and no_val_y:\r\n raise ValueError(\"Validation should be done on X, y or both.\")\r\n elif not no_val_X and no_val_y:\r\n X = check_array(X, **check_params)\r\n out = X\r\n elif no_val_X and not no_val_y:\r\n y = _check_y(y, **check_params)\r\n out = y\r\n else:\r\n if validate_separately:\r\n # We need this because some estimators validate X and y\r\n # separately, and in general, separately calling check_array()\r\n # on X and y isn't equivalent to just calling check_X_y()\r\n # :(\r\n check_X_params, check_y_params = validate_separately\r\n X = check_array(X, **check_X_params)\r\n y = check_array(y, **check_y_params)\r\n else:\r\n X, y = check_X_y(X, y, **check_params)\r\n out = X, y\r\n\r\n if not no_val_X and check_params.get(\"ensure_2d\", True):\r\n self._check_n_features(X, reset=reset)\r\n\r\n return out\r\n\r\n @property\r\n def _repr_html_(self):\r\n \"\"\"HTML representation of estimator.\r\n\r\n This is redundant with the logic of `_repr_mimebundle_`. The latter\r\n should be favorted in the long term, `_repr_html_` is only\r\n implemented for consumers who do not interpret `_repr_mimbundle_`.\r\n \"\"\"\r\n if get_config()[\"display\"] != \"diagram\":\r\n raise AttributeError(\r\n \"_repr_html_ is only defined when the \"\r\n \"'display' configuration option is set to \"\r\n \"'diagram'\"\r\n )\r\n return self._repr_html_inner\r\n\r\n def _repr_html_inner(self):\r\n \"\"\"This function is returned by the @property `_repr_html_` to make\r\n `hasattr(estimator, \"_repr_html_\") return `True` or `False` depending\r\n on `get_config()[\"display\"]`.\r\n \"\"\"\r\n return estimator_html_repr(self)\r\n\r\n def _repr_mimebundle_(self, **kwargs):\r\n \"\"\"Mime bundle used by jupyter kernels to display estimator\"\"\"\r\n output = {\"text/plain\": repr(self)}\r\n if get_config()[\"display\"] == \"diagram\":\r\n output[\"text/html\"] = estimator_html_repr(self)\r\n return output\r\n\r\n\r\nclass ClassifierMixin:\r\n \"\"\"Mixin class for all classifiers in scikit-learn.\"\"\"\r\n\r\n _estimator_type = \"classifier\"\r\n\r\n def score(self, X, y, sample_weight=None):\r\n \"\"\"\r\n Return the mean accuracy on the given test data and labels.\r\n\r\n In multi-label classification, this is the subset accuracy\r\n which is a harsh metric since you require for each sample that\r\n each label set be correctly predicted.\r\n\r\n Parameters\r\n ----------\r\n X : array-like of shape (n_samples, n_features)\r\n Test samples.\r\n\r\n y : array-like of shape (n_samples,) or (n_samples, n_outputs)\r\n True labels for `X`.\r\n\r\n sample_weight : array-like of shape (n_samples,), default=None\r\n Sample weights.\r\n\r\n Returns\r\n -------\r\n score : float\r\n Mean accuracy of ``self.predict(X)`` wrt. `y`.\r\n \"\"\"\r\n from .metrics import accuracy_score\r\n\r\n return accuracy_score(y, self.predict(X), sample_weight=sample_weight)\r\n\r\n def _more_tags(self):\r\n return {\"requires_y\": True}\r\n\r\n\r\nclass RegressorMixin:\r\n \"\"\"Mixin class for all regression estimators in scikit-learn.\"\"\"\r\n\r\n _estimator_type = \"regressor\"\r\n\r\n def score(self, X, y, sample_weight=None):\r\n \"\"\"Return the coefficient of determination of the prediction.\r\n\r\n The coefficient of determination :math:`R^2` is defined as\r\n :math:`(1 - \\\\frac{u}{v})`, where :math:`u` is the residual\r\n sum of squares ``((y_true - y_pred)** 2).sum()`` and :math:`v`\r\n is the total sum of squares ``((y_true - y_true.mean()) ** 2).sum()``.\r\n The best possible score is 1.0 and it can be negative (because the\r\n model can be arbitrarily worse). A constant model that always predicts\r\n the expected value of `y`, disregarding the input features, would get\r\n a :math:`R^2` score of 0.0.\r\n\r\n Parameters\r\n ----------\r\n X : array-like of shape (n_samples, n_features)\r\n Test samples. For some estimators this may be a precomputed\r\n kernel matrix or a list of generic objects instead with shape\r\n ``(n_samples, n_samples_fitted)``, where ``n_samples_fitted``\r\n is the number of samples used in the fitting for the estimator.\r\n\r\n y : array-like of shape (n_samples,) or (n_samples, n_outputs)\r\n True values for `X`.\r\n\r\n sample_weight : array-like of shape (n_samples,), default=None\r\n Sample weights.\r\n\r\n Returns\r\n -------\r\n score : float\r\n :math:`R^2` of ``self.predict(X)`` wrt. `y`.\r\n\r\n Notes\r\n -----\r\n The :math:`R^2` score used when calling ``score`` on a regressor uses\r\n ``multioutput='uniform_average'`` from version 0.23 to keep consistent\r\n with default value of :func:`~sklearn.metrics.r2_score`.\r\n This influences the ``score`` method of all the multioutput\r\n regressors (except for\r\n :class:`~sklearn.multioutput.MultiOutputRegressor`).\r\n \"\"\"\r\n\r\n from .metrics import r2_score\r\n\r\n y_pred = self.predict(X)\r\n return r2_score(y, y_pred, sample_weight=sample_weight)\r\n\r\n def _more_tags(self):\r\n return {\"requires_y\": True}\r\n\r\n\r\nclass ClusterMixin:\r\n \"\"\"Mixin class for all cluster estimators in scikit-learn.\"\"\"\r\n\r\n _estimator_type = \"clusterer\"\r\n\r\n def fit_predict(self, X, y=None):\r\n \"\"\"\r\n Perform clustering on `X` and returns cluster labels.\r\n\r\n Parameters\r\n ----------\r\n X : array-like of shape (n_samples, n_features)\r\n Input data.\r\n\r\n y : Ignored\r\n Not used, present for API consistency by convention.\r\n\r\n Returns\r\n -------\r\n labels : ndarray of shape (n_samples,), dtype=np.int64\r\n Cluster labels.\r\n \"\"\"\r\n # non-optimized default implementation; override when a better\r\n # method is possible for a given clustering algorithm\r\n self.fit(X)\r\n return self.labels_\r\n\r\n def _more_tags(self):\r\n return {\"preserves_dtype\": []}\r\n\r\n\r\nclass BiclusterMixin:\r\n \"\"\"Mixin class for all bicluster estimators in scikit-learn.\"\"\"\r\n\r\n @property\r\n def biclusters_(self):\r\n \"\"\"Convenient way to get row and column indicators together.\r\n\r\n Returns the ``rows_`` and ``columns_`` members.\r\n \"\"\"\r\n return self.rows_, self.columns_\r\n\r\n def get_indices(self, i):\r\n \"\"\"Row and column indices of the `i`'th bicluster.\r\n\r\n Only works if ``rows_`` and ``columns_`` attributes exist.\r\n\r\n Parameters\r\n ----------\r\n i : int\r\n The index of the cluster.\r\n\r\n Returns\r\n -------\r\n row_ind : ndarray, dtype=np.intp\r\n Indices of rows in the dataset that belong to the bicluster.\r\n col_ind : ndarray, dtype=np.intp\r\n Indices of columns in the dataset that belong to the bicluster.\r\n \"\"\"\r\n rows = self.rows_[i]\r\n columns = self.columns_[i]\r\n return np.nonzero(rows)[0], np.nonzero(columns)[0]\r\n\r\n def get_shape(self, i):\r\n \"\"\"Shape of the `i`'th bicluster.\r\n\r\n Parameters\r\n ----------\r\n i : int\r\n The index of the cluster.\r\n\r\n Returns\r\n -------\r\n n_rows : int\r\n Number of rows in the bicluster.\r\n\r\n n_cols : int\r\n Number of columns in the bicluster.\r\n \"\"\"\r\n indices = self.get_indices(i)\r\n return tuple(len(i) for i in indices)\r\n\r\n def get_submatrix(self, i, data):\r\n \"\"\"Return the submatrix corresponding to bicluster `i`.\r\n\r\n Parameters\r\n ----------\r\n i : int\r\n The index of the cluster.\r\n data : array-like of shape (n_samples, n_features)\r\n The data.\r\n\r\n Returns\r\n -------\r\n submatrix : ndarray of shape (n_rows, n_cols)\r\n The submatrix corresponding to bicluster `i`.\r\n\r\n Notes\r\n -----\r\n Works with sparse matrices. Only works if ``rows_`` and\r\n ``columns_`` attributes exist.\r\n \"\"\"\r\n from .utils.validation import check_array\r\n\r\n data = check_array(data, accept_sparse=\"csr\")\r\n row_ind, col_ind = self.get_indices(i)\r\n return data[row_ind[:, np.newaxis], col_ind]\r\n\r\n\r\nclass TransformerMixin:\r\n \"\"\"Mixin class for all transformers in scikit-learn.\"\"\"\r\n\r\n def fit_transform(self, X, y=None, **fit_params):\r\n \"\"\"\r\n Fit to data, then transform it.\r\n\r\n Fits transformer to `X` and `y` with optional parameters `fit_params`\r\n and returns a transformed version of `X`.\r\n\r\n Parameters\r\n ----------\r\n X : array-like of shape (n_samples, n_features)\r\n Input samples.\r\n\r\n y : array-like of shape (n_samples,) or (n_samples, n_outputs), \\\r\n default=None\r\n Target values (None for unsupervised transformations).\r\n\r\n **fit_params : dict\r\n Additional fit parameters.\r\n\r\n Returns\r\n -------\r\n X_new : ndarray array of shape (n_samples, n_features_new)\r\n Transformed array.\r\n \"\"\"\r\n # non-optimized default implementation; override when a better\r\n # method is possible for a given clustering algorithm\r\n if y is None:\r\n # fit method of arity 1 (unsupervised transformation)\r\n return self.fit(X, **fit_params).transform(X)\r\n else:\r\n # fit method of arity 2 (supervised transformation)\r\n return self.fit(X, y, **fit_params).transform(X)\r\n\r\n\r\nclass _OneToOneFeatureMixin:\r\n \"\"\"Provides `get_feature_names_out` for simple transformers.\r\n\r\n Assumes there's a 1-to-1 correspondence between input features\r\n and output features.\r\n \"\"\"\r\n\r\n def get_feature_names_out(self, input_features=None):\r\n \"\"\"Get output feature names for transformation.\r\n\r\n Parameters\r\n ----------\r\n input_features : array-like of str or None, default=None\r\n Input features.\r\n\r\n - If `input_features` is `None`, then `feature_names_in_` is\r\n used as feature names in. If `feature_names_in_` is not defined,\r\n then names are generated: `[x0, x1, ..., x(n_features_in_)]`.\r\n - If `input_features` is an array-like, then `input_features` must\r\n match `feature_names_in_` if `feature_names_in_` is defined.\r\n\r\n Returns\r\n -------\r\n feature_names_out : ndarray of str objects\r\n Same as input features.\r\n \"\"\"\r\n return _check_feature_names_in(self, input_features)\r\n\r\n\r\nclass DensityMixin:\r\n \"\"\"Mixin class for all density estimators in scikit-learn.\"\"\"\r\n\r\n _estimator_type = \"DensityEstimator\"\r\n\r\n def score(self, X, y=None):\r\n \"\"\"Return the score of the model on the data `X`.\r\n\r\n Parameters\r\n ----------\r\n X : array-like of shape (n_samples, n_features)\r\n Test samples.\r\n\r\n y : Ignored\r\n Not used, present for API consistency by convention.\r\n\r\n Returns\r\n -------\r\n score : float\r\n \"\"\"\r\n pass\r\n\r\n\r\nclass OutlierMixin:\r\n \"\"\"Mixin class for all outlier detection estimators in scikit-learn.\"\"\"\r\n\r\n _estimator_type = \"outlier_detector\"\r\n\r\n def fit_predict(self, X, y=None):\r\n \"\"\"Perform fit on X and returns labels for X.\r\n\r\n Returns -1 for outliers and 1 for inliers.\r\n\r\n Parameters\r\n ----------\r\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\r\n The input samples.\r\n\r\n y : Ignored\r\n Not used, present for API consistency by convention.\r\n\r\n Returns\r\n -------\r\n y : ndarray of shape (n_samples,)\r\n 1 for inliers, -1 for outliers.\r\n \"\"\"\r\n # override for transductive outlier detectors like LocalOulierFactor\r\n return self.fit(X).predict(X)\r\n\r\n\r\nclass MetaEstimatorMixin:\r\n _required_parameters = [\"estimator\"]\r\n \"\"\"Mixin class for all meta estimators in scikit-learn.\"\"\"\r\n\r\n\r\nclass MultiOutputMixin:\r\n \"\"\"Mixin to mark estimators that support multioutput.\"\"\"\r\n\r\n def _more_tags(self):\r\n return {\"multioutput\": True}\r\n\r\n\r\nclass _UnstableArchMixin:\r\n \"\"\"Mark estimators that are non-determinstic on 32bit or PowerPC\"\"\"\r\n\r\n def _more_tags(self):\r\n return {\r\n \"non_deterministic\": (\r\n _IS_32BIT or platform.machine().startswith((\"ppc\", \"powerpc\"))\r\n )\r\n }\r\n\r\n\r\ndef is_classifier(estimator):\r\n \"\"\"Return True if the given estimator is (probably) a classifier.\r\n\r\n Parameters\r\n ----------\r\n estimator : object\r\n Estimator object to test.\r\n\r\n Returns\r\n -------\r\n out : bool\r\n True if estimator is a classifier and False otherwise.\r\n \"\"\"\r\n return getattr(estimator, \"_estimator_type\", None) == \"classifier\"\r\n\r\n\r\ndef is_regressor(estimator):\r\n \"\"\"Return True if the given estimator is (probably) a regressor.\r\n\r\n Parameters\r\n ----------\r\n estimator : estimator instance\r\n Estimator object to test.\r\n\r\n Returns\r\n -------\r\n out : bool\r\n True if estimator is a regressor and False otherwise.\r\n \"\"\"\r\n return getattr(estimator, \"_estimator_type\", None) == \"regressor\"\r\n\r\n\r\ndef is_outlier_detector(estimator):\r\n \"\"\"Return True if the given estimator is (probably) an outlier detector.\r\n\r\n Parameters\r\n ----------\r\n estimator : estimator instance\r\n Estimator object to test.\r\n\r\n Returns\r\n -------\r\n out : bool\r\n True if estimator is an outlier detector and False otherwise.\r\n \"\"\"\r\n return getattr(estimator, \"_estimator_type\", None) == \"outlier_detector\"\r\n\r\n\r\ndef _is_pairwise(estimator):\r\n \"\"\"Returns True if estimator is pairwise.\r\n\r\n - If the `_pairwise` attribute and the tag are present and consistent,\r\n then use the value and not issue a warning.\r\n - If the `_pairwise` attribute and the tag are present and not\r\n consistent, use the `_pairwise` value and issue a deprecation\r\n warning.\r\n - If only the `_pairwise` attribute is present and it is not False,\r\n issue a deprecation warning and use the `_pairwise` value.\r\n\r\n Parameters\r\n ----------\r\n estimator : object\r\n Estimator object to test.\r\n\r\n Returns\r\n -------\r\n out : bool\r\n True if the estimator is pairwise and False otherwise.\r\n \"\"\"\r\n with warnings.catch_warnings():\r\n warnings.filterwarnings(\"ignore\", category=FutureWarning)\r\n has_pairwise_attribute = hasattr(estimator, \"_pairwise\")\r\n pairwise_attribute = getattr(estimator, \"_pairwise\", False)\r\n pairwise_tag = _safe_tags(estimator, key=\"pairwise\")\r\n\r\n if has_pairwise_attribute:\r\n if pairwise_attribute != pairwise_tag:\r\n warnings.warn(\r\n \"_pairwise was deprecated in 0.24 and will be removed in 1.1 \"\r\n \"(renaming of 0.26). Set the estimator tags of your estimator \"\r\n \"instead\",\r\n FutureWarning,\r\n )\r\n return pairwise_attribute\r\n\r\n # use pairwise tag when the attribute is not present\r\n return pairwise_tag\r\n",
"import numpy as np\r\nimport pytest\r\n\r\nimport pandas as pd\r\nfrom pandas import (\r\n DataFrame,\r\n DatetimeIndex,\r\n Index,\r\n Series,\r\n Timestamp,\r\n date_range,\r\n)\r\nimport pandas._testing as tm\r\n\r\n\r\nclass TestSeriesAppend:\r\n def test_append_preserve_name(self, datetime_series):\r\n result = datetime_series[:5].append(datetime_series[5:])\r\n assert result.name == datetime_series.name\r\n\r\n def test_append(self, datetime_series, string_series, object_series):\r\n appended_series = string_series.append(object_series)\r\n for idx, value in appended_series.items():\r\n if idx in string_series.index:\r\n assert value == string_series[idx]\r\n elif idx in object_series.index:\r\n assert value == object_series[idx]\r\n else:\r\n raise AssertionError(\"orphaned index!\")\r\n\r\n msg = \"Indexes have overlapping values:\"\r\n with pytest.raises(ValueError, match=msg):\r\n datetime_series.append(datetime_series, verify_integrity=True)\r\n\r\n def test_append_many(self, datetime_series):\r\n pieces = [datetime_series[:5], datetime_series[5:10], datetime_series[10:]]\r\n\r\n result = pieces[0].append(pieces[1:])\r\n tm.assert_series_equal(result, datetime_series)\r\n\r\n def test_append_duplicates(self):\r\n # GH 13677\r\n s1 = Series([1, 2, 3])\r\n s2 = Series([4, 5, 6])\r\n exp = Series([1, 2, 3, 4, 5, 6], index=[0, 1, 2, 0, 1, 2])\r\n tm.assert_series_equal(s1.append(s2), exp)\r\n tm.assert_series_equal(pd.concat([s1, s2]), exp)\r\n\r\n # the result must have RangeIndex\r\n exp = Series([1, 2, 3, 4, 5, 6])\r\n tm.assert_series_equal(\r\n s1.append(s2, ignore_index=True), exp, check_index_type=True\r\n )\r\n tm.assert_series_equal(\r\n pd.concat([s1, s2], ignore_index=True), exp, check_index_type=True\r\n )\r\n\r\n msg = \"Indexes have overlapping values:\"\r\n with pytest.raises(ValueError, match=msg):\r\n s1.append(s2, verify_integrity=True)\r\n with pytest.raises(ValueError, match=msg):\r\n pd.concat([s1, s2], verify_integrity=True)\r\n\r\n def test_append_tuples(self):\r\n # GH 28410\r\n s = Series([1, 2, 3])\r\n list_input = [s, s]\r\n tuple_input = (s, s)\r\n\r\n expected = s.append(list_input)\r\n result = s.append(tuple_input)\r\n\r\n tm.assert_series_equal(expected, result)\r\n\r\n def test_append_dataframe_raises(self):\r\n # GH 31413\r\n df = DataFrame({\"A\": [1, 2], \"B\": [3, 4]})\r\n\r\n msg = \"to_append should be a Series or list/tuple of Series, got DataFrame\"\r\n with pytest.raises(TypeError, match=msg):\r\n df.A.append(df)\r\n with pytest.raises(TypeError, match=msg):\r\n df.A.append([df])\r\n\r\n\r\nclass TestSeriesAppendWithDatetimeIndex:\r\n def test_append(self):\r\n rng = date_range(\"5/8/2012 1:45\", periods=10, freq=\"5T\")\r\n ts = Series(np.random.randn(len(rng)), rng)\r\n df = DataFrame(np.random.randn(len(rng), 4), index=rng)\r\n\r\n result = ts.append(ts)\r\n result_df = df.append(df)\r\n ex_index = DatetimeIndex(np.tile(rng.values, 2))\r\n tm.assert_index_equal(result.index, ex_index)\r\n tm.assert_index_equal(result_df.index, ex_index)\r\n\r\n appended = rng.append(rng)\r\n tm.assert_index_equal(appended, ex_index)\r\n\r\n appended = rng.append([rng, rng])\r\n ex_index = DatetimeIndex(np.tile(rng.values, 3))\r\n tm.assert_index_equal(appended, ex_index)\r\n\r\n # different index names\r\n rng1 = rng.copy()\r\n rng2 = rng.copy()\r\n rng1.name = \"foo\"\r\n rng2.name = \"bar\"\r\n assert rng1.append(rng1).name == \"foo\"\r\n assert rng1.append(rng2).name is None\r\n\r\n def test_append_tz(self):\r\n # see gh-2938\r\n rng = date_range(\"5/8/2012 1:45\", periods=10, freq=\"5T\", tz=\"US/Eastern\")\r\n rng2 = date_range(\"5/8/2012 2:35\", periods=10, freq=\"5T\", tz=\"US/Eastern\")\r\n rng3 = date_range(\"5/8/2012 1:45\", periods=20, freq=\"5T\", tz=\"US/Eastern\")\r\n ts = Series(np.random.randn(len(rng)), rng)\r\n df = DataFrame(np.random.randn(len(rng), 4), index=rng)\r\n ts2 = Series(np.random.randn(len(rng2)), rng2)\r\n df2 = DataFrame(np.random.randn(len(rng2), 4), index=rng2)\r\n\r\n result = ts.append(ts2)\r\n result_df = df.append(df2)\r\n tm.assert_index_equal(result.index, rng3)\r\n tm.assert_index_equal(result_df.index, rng3)\r\n\r\n appended = rng.append(rng2)\r\n tm.assert_index_equal(appended, rng3)\r\n\r\n def test_append_tz_explicit_pytz(self):\r\n # see gh-2938\r\n from pytz import timezone as timezone\r\n\r\n rng = date_range(\r\n \"5/8/2012 1:45\", periods=10, freq=\"5T\", tz=timezone(\"US/Eastern\")\r\n )\r\n rng2 = date_range(\r\n \"5/8/2012 2:35\", periods=10, freq=\"5T\", tz=timezone(\"US/Eastern\")\r\n )\r\n rng3 = date_range(\r\n \"5/8/2012 1:45\", periods=20, freq=\"5T\", tz=timezone(\"US/Eastern\")\r\n )\r\n ts = Series(np.random.randn(len(rng)), rng)\r\n df = DataFrame(np.random.randn(len(rng), 4), index=rng)\r\n ts2 = Series(np.random.randn(len(rng2)), rng2)\r\n df2 = DataFrame(np.random.randn(len(rng2), 4), index=rng2)\r\n\r\n result = ts.append(ts2)\r\n result_df = df.append(df2)\r\n tm.assert_index_equal(result.index, rng3)\r\n tm.assert_index_equal(result_df.index, rng3)\r\n\r\n appended = rng.append(rng2)\r\n tm.assert_index_equal(appended, rng3)\r\n\r\n def test_append_tz_dateutil(self):\r\n # see gh-2938\r\n rng = date_range(\r\n \"5/8/2012 1:45\", periods=10, freq=\"5T\", tz=\"dateutil/US/Eastern\"\r\n )\r\n rng2 = date_range(\r\n \"5/8/2012 2:35\", periods=10, freq=\"5T\", tz=\"dateutil/US/Eastern\"\r\n )\r\n rng3 = date_range(\r\n \"5/8/2012 1:45\", periods=20, freq=\"5T\", tz=\"dateutil/US/Eastern\"\r\n )\r\n ts = Series(np.random.randn(len(rng)), rng)\r\n df = DataFrame(np.random.randn(len(rng), 4), index=rng)\r\n ts2 = Series(np.random.randn(len(rng2)), rng2)\r\n df2 = DataFrame(np.random.randn(len(rng2), 4), index=rng2)\r\n\r\n result = ts.append(ts2)\r\n result_df = df.append(df2)\r\n tm.assert_index_equal(result.index, rng3)\r\n tm.assert_index_equal(result_df.index, rng3)\r\n\r\n appended = rng.append(rng2)\r\n tm.assert_index_equal(appended, rng3)\r\n\r\n def test_series_append_aware(self):\r\n rng1 = date_range(\"1/1/2011 01:00\", periods=1, freq=\"H\", tz=\"US/Eastern\")\r\n rng2 = date_range(\"1/1/2011 02:00\", periods=1, freq=\"H\", tz=\"US/Eastern\")\r\n ser1 = Series([1], index=rng1)\r\n ser2 = Series([2], index=rng2)\r\n ts_result = ser1.append(ser2)\r\n\r\n exp_index = DatetimeIndex(\r\n [\"2011-01-01 01:00\", \"2011-01-01 02:00\"], tz=\"US/Eastern\", freq=\"H\"\r\n )\r\n exp = Series([1, 2], index=exp_index)\r\n tm.assert_series_equal(ts_result, exp)\r\n assert ts_result.index.tz == rng1.tz\r\n\r\n rng1 = date_range(\"1/1/2011 01:00\", periods=1, freq=\"H\", tz=\"UTC\")\r\n rng2 = date_range(\"1/1/2011 02:00\", periods=1, freq=\"H\", tz=\"UTC\")\r\n ser1 = Series([1], index=rng1)\r\n ser2 = Series([2], index=rng2)\r\n ts_result = ser1.append(ser2)\r\n\r\n exp_index = DatetimeIndex(\r\n [\"2011-01-01 01:00\", \"2011-01-01 02:00\"], tz=\"UTC\", freq=\"H\"\r\n )\r\n exp = Series([1, 2], index=exp_index)\r\n tm.assert_series_equal(ts_result, exp)\r\n utc = rng1.tz\r\n assert utc == ts_result.index.tz\r\n\r\n # GH#7795\r\n # different tz coerces to object dtype, not UTC\r\n rng1 = date_range(\"1/1/2011 01:00\", periods=1, freq=\"H\", tz=\"US/Eastern\")\r\n rng2 = date_range(\"1/1/2011 02:00\", periods=1, freq=\"H\", tz=\"US/Central\")\r\n ser1 = Series([1], index=rng1)\r\n ser2 = Series([2], index=rng2)\r\n ts_result = ser1.append(ser2)\r\n exp_index = Index(\r\n [\r\n Timestamp(\"1/1/2011 01:00\", tz=\"US/Eastern\"),\r\n Timestamp(\"1/1/2011 02:00\", tz=\"US/Central\"),\r\n ]\r\n )\r\n exp = Series([1, 2], index=exp_index)\r\n tm.assert_series_equal(ts_result, exp)\r\n\r\n def test_series_append_aware_naive(self):\r\n rng1 = date_range(\"1/1/2011 01:00\", periods=1, freq=\"H\")\r\n rng2 = date_range(\"1/1/2011 02:00\", periods=1, freq=\"H\", tz=\"US/Eastern\")\r\n ser1 = Series(np.random.randn(len(rng1)), index=rng1)\r\n ser2 = Series(np.random.randn(len(rng2)), index=rng2)\r\n ts_result = ser1.append(ser2)\r\n\r\n expected = ser1.index.astype(object).append(ser2.index.astype(object))\r\n assert ts_result.index.equals(expected)\r\n\r\n # mixed\r\n rng1 = date_range(\"1/1/2011 01:00\", periods=1, freq=\"H\")\r\n rng2 = range(100)\r\n ser1 = Series(np.random.randn(len(rng1)), index=rng1)\r\n ser2 = Series(np.random.randn(len(rng2)), index=rng2)\r\n ts_result = ser1.append(ser2)\r\n\r\n expected = ser1.index.astype(object).append(ser2.index)\r\n assert ts_result.index.equals(expected)\r\n\r\n def test_series_append_dst(self):\r\n rng1 = date_range(\"1/1/2016 01:00\", periods=3, freq=\"H\", tz=\"US/Eastern\")\r\n rng2 = date_range(\"8/1/2016 01:00\", periods=3, freq=\"H\", tz=\"US/Eastern\")\r\n ser1 = Series([1, 2, 3], index=rng1)\r\n ser2 = Series([10, 11, 12], index=rng2)\r\n ts_result = ser1.append(ser2)\r\n\r\n exp_index = DatetimeIndex(\r\n [\r\n \"2016-01-01 01:00\",\r\n \"2016-01-01 02:00\",\r\n \"2016-01-01 03:00\",\r\n \"2016-08-01 01:00\",\r\n \"2016-08-01 02:00\",\r\n \"2016-08-01 03:00\",\r\n ],\r\n tz=\"US/Eastern\",\r\n )\r\n exp = Series([1, 2, 3, 10, 11, 12], index=exp_index)\r\n tm.assert_series_equal(ts_result, exp)\r\n assert ts_result.index.tz == rng1.tz\r\n",
"import numpy as np\r\nimport pytest\r\n\r\nimport pandas._testing as tm\r\nfrom pandas.core.construction import sanitize_array\r\n\r\n\r\[email protected](\r\n \"values, dtype, expected\",\r\n [\r\n ([1, 2, 3], None, np.array([1, 2, 3], dtype=np.int64)),\r\n (np.array([1, 2, 3]), None, np.array([1, 2, 3])),\r\n ([\"1\", \"2\", None], None, np.array([\"1\", \"2\", None])),\r\n ([\"1\", \"2\", None], np.dtype(\"str\"), np.array([\"1\", \"2\", None])),\r\n ([1, 2, None], np.dtype(\"str\"), np.array([\"1\", \"2\", None])),\r\n ],\r\n)\r\ndef test_construct_1d_ndarray_preserving_na(values, dtype, expected):\r\n result = sanitize_array(values, index=None, dtype=dtype)\r\n tm.assert_numpy_array_equal(result, expected)\r\n\r\n\r\[email protected](\"dtype\", [\"m8[ns]\", \"M8[ns]\"])\r\ndef test_construct_1d_ndarray_preserving_na_datetimelike(dtype):\r\n arr = np.arange(5, dtype=np.int64).view(dtype)\r\n expected = np.array(list(arr), dtype=object)\r\n assert all(isinstance(x, type(arr[0])) for x in expected)\r\n\r\n result = sanitize_array(arr, index=None, dtype=np.dtype(object))\r\n tm.assert_numpy_array_equal(result, expected)\r\n",
"\"\"\"\r\nProvide classes to perform the groupby aggregate operations.\r\n\r\nThese are not exposed to the user and provide implementations of the grouping\r\noperations, primarily in cython. These classes (BaseGrouper and BinGrouper)\r\nare contained *in* the SeriesGroupBy and DataFrameGroupBy objects.\r\n\"\"\"\r\nfrom __future__ import annotations\r\n\r\nimport collections\r\nimport functools\r\nfrom typing import (\r\n Generic,\r\n Hashable,\r\n Iterator,\r\n Sequence,\r\n overload,\r\n)\r\n\r\nimport numpy as np\r\n\r\nfrom pandas._libs import (\r\n NaT,\r\n lib,\r\n)\r\nimport pandas._libs.groupby as libgroupby\r\nimport pandas._libs.reduction as libreduction\r\nfrom pandas._typing import (\r\n ArrayLike,\r\n DtypeObj,\r\n F,\r\n FrameOrSeries,\r\n Shape,\r\n final,\r\n)\r\nfrom pandas.errors import AbstractMethodError\r\nfrom pandas.util._decorators import cache_readonly\r\n\r\nfrom pandas.core.dtypes.cast import (\r\n maybe_cast_pointwise_result,\r\n maybe_downcast_to_dtype,\r\n)\r\nfrom pandas.core.dtypes.common import (\r\n ensure_float64,\r\n ensure_int64,\r\n ensure_platform_int,\r\n is_1d_only_ea_obj,\r\n is_bool_dtype,\r\n is_categorical_dtype,\r\n is_complex_dtype,\r\n is_datetime64_any_dtype,\r\n is_float_dtype,\r\n is_integer_dtype,\r\n is_numeric_dtype,\r\n is_sparse,\r\n is_timedelta64_dtype,\r\n needs_i8_conversion,\r\n)\r\nfrom pandas.core.dtypes.dtypes import ExtensionDtype\r\nfrom pandas.core.dtypes.missing import (\r\n isna,\r\n maybe_fill,\r\n)\r\n\r\nfrom pandas.core.arrays import (\r\n DatetimeArray,\r\n ExtensionArray,\r\n PeriodArray,\r\n TimedeltaArray,\r\n)\r\nfrom pandas.core.arrays.boolean import BooleanDtype\r\nfrom pandas.core.arrays.floating import (\r\n Float64Dtype,\r\n FloatingDtype,\r\n)\r\nfrom pandas.core.arrays.integer import (\r\n Int64Dtype,\r\n _IntegerDtype,\r\n)\r\nfrom pandas.core.arrays.masked import (\r\n BaseMaskedArray,\r\n BaseMaskedDtype,\r\n)\r\nfrom pandas.core.arrays.string_ import StringDtype\r\nimport pandas.core.common as com\r\nfrom pandas.core.frame import DataFrame\r\nfrom pandas.core.generic import NDFrame\r\nfrom pandas.core.groupby import (\r\n base,\r\n grouper,\r\n)\r\nfrom pandas.core.indexes.api import (\r\n CategoricalIndex,\r\n Index,\r\n MultiIndex,\r\n ensure_index,\r\n)\r\nfrom pandas.core.internals import ArrayManager\r\nfrom pandas.core.series import Series\r\nfrom pandas.core.sorting import (\r\n compress_group_index,\r\n decons_obs_group_ids,\r\n get_flattened_list,\r\n get_group_index,\r\n get_group_index_sorter,\r\n get_indexer_dict,\r\n)\r\n\r\n\r\nclass WrappedCythonOp:\r\n \"\"\"\r\n Dispatch logic for functions defined in _libs.groupby\r\n \"\"\"\r\n\r\n # Functions for which we do _not_ attempt to cast the cython result\r\n # back to the original dtype.\r\n cast_blocklist = frozenset([\"rank\", \"count\", \"size\", \"idxmin\", \"idxmax\"])\r\n\r\n def __init__(self, kind: str, how: str):\r\n self.kind = kind\r\n self.how = how\r\n\r\n _CYTHON_FUNCTIONS = {\r\n \"aggregate\": {\r\n \"add\": \"group_add\",\r\n \"prod\": \"group_prod\",\r\n \"min\": \"group_min\",\r\n \"max\": \"group_max\",\r\n \"mean\": \"group_mean\",\r\n \"median\": \"group_median\",\r\n \"var\": \"group_var\",\r\n \"first\": \"group_nth\",\r\n \"last\": \"group_last\",\r\n \"ohlc\": \"group_ohlc\",\r\n },\r\n \"transform\": {\r\n \"cumprod\": \"group_cumprod\",\r\n \"cumsum\": \"group_cumsum\",\r\n \"cummin\": \"group_cummin\",\r\n \"cummax\": \"group_cummax\",\r\n \"rank\": \"group_rank\",\r\n },\r\n }\r\n\r\n _MASKED_CYTHON_FUNCTIONS = {\"cummin\", \"cummax\"}\r\n\r\n _cython_arity = {\"ohlc\": 4} # OHLC\r\n\r\n # Note: we make this a classmethod and pass kind+how so that caching\r\n # works at the class level and not the instance level\r\n @classmethod\r\n @functools.lru_cache(maxsize=None)\r\n def _get_cython_function(\r\n cls, kind: str, how: str, dtype: np.dtype, is_numeric: bool\r\n ):\r\n\r\n dtype_str = dtype.name\r\n ftype = cls._CYTHON_FUNCTIONS[kind][how]\r\n\r\n # see if there is a fused-type version of function\r\n # only valid for numeric\r\n f = getattr(libgroupby, ftype)\r\n if is_numeric:\r\n return f\r\n # error: Non-overlapping equality check (left operand type: \"dtype[Any]\", right\r\n # operand type: \"Literal['object']\")\r\n elif dtype == object: # type: ignore[comparison-overlap]\r\n if \"object\" not in f.__signatures__:\r\n # raise NotImplementedError here rather than TypeError later\r\n raise NotImplementedError(\r\n f\"function is not implemented for this dtype: \"\r\n f\"[how->{how},dtype->{dtype_str}]\"\r\n )\r\n return f\r\n\r\n def get_cython_func_and_vals(self, values: np.ndarray, is_numeric: bool):\r\n \"\"\"\r\n Find the appropriate cython function, casting if necessary.\r\n\r\n Parameters\r\n ----------\r\n values : np.ndarray\r\n is_numeric : bool\r\n\r\n Returns\r\n -------\r\n func : callable\r\n values : np.ndarray\r\n \"\"\"\r\n how = self.how\r\n kind = self.kind\r\n\r\n if how in [\"median\", \"cumprod\"]:\r\n # these two only have float64 implementations\r\n if is_numeric:\r\n values = ensure_float64(values)\r\n else:\r\n raise NotImplementedError(\r\n f\"function is not implemented for this dtype: \"\r\n f\"[how->{how},dtype->{values.dtype.name}]\"\r\n )\r\n func = getattr(libgroupby, f\"group_{how}_float64\")\r\n return func, values\r\n\r\n func = self._get_cython_function(kind, how, values.dtype, is_numeric)\r\n\r\n if values.dtype.kind in [\"i\", \"u\"]:\r\n if how in [\"add\", \"var\", \"prod\", \"mean\", \"ohlc\"]:\r\n # result may still include NaN, so we have to cast\r\n values = ensure_float64(values)\r\n\r\n return func, values\r\n\r\n def _disallow_invalid_ops(self, dtype: DtypeObj, is_numeric: bool = False):\r\n \"\"\"\r\n Check if we can do this operation with our cython functions.\r\n\r\n Raises\r\n ------\r\n NotImplementedError\r\n This is either not a valid function for this dtype, or\r\n valid but not implemented in cython.\r\n \"\"\"\r\n how = self.how\r\n\r\n if is_numeric:\r\n # never an invalid op for those dtypes, so return early as fastpath\r\n return\r\n\r\n if is_categorical_dtype(dtype):\r\n # NotImplementedError for methods that can fall back to a\r\n # non-cython implementation.\r\n if how in [\"add\", \"prod\", \"cumsum\", \"cumprod\"]:\r\n raise TypeError(f\"{dtype} type does not support {how} operations\")\r\n raise NotImplementedError(f\"{dtype} dtype not supported\")\r\n\r\n elif is_sparse(dtype):\r\n # categoricals are only 1d, so we\r\n # are not setup for dim transforming\r\n raise NotImplementedError(f\"{dtype} dtype not supported\")\r\n elif is_datetime64_any_dtype(dtype):\r\n # we raise NotImplemented if this is an invalid operation\r\n # entirely, e.g. adding datetimes\r\n if how in [\"add\", \"prod\", \"cumsum\", \"cumprod\"]:\r\n raise TypeError(f\"datetime64 type does not support {how} operations\")\r\n elif is_timedelta64_dtype(dtype):\r\n if how in [\"prod\", \"cumprod\"]:\r\n raise TypeError(f\"timedelta64 type does not support {how} operations\")\r\n\r\n def _get_output_shape(self, ngroups: int, values: np.ndarray) -> Shape:\r\n how = self.how\r\n kind = self.kind\r\n\r\n arity = self._cython_arity.get(how, 1)\r\n\r\n out_shape: Shape\r\n if how == \"ohlc\":\r\n out_shape = (ngroups, 4)\r\n elif arity > 1:\r\n raise NotImplementedError(\r\n \"arity of more than 1 is not supported for the 'how' argument\"\r\n )\r\n elif kind == \"transform\":\r\n out_shape = values.shape\r\n else:\r\n out_shape = (ngroups,) + values.shape[1:]\r\n return out_shape\r\n\r\n def get_out_dtype(self, dtype: np.dtype) -> np.dtype:\r\n how = self.how\r\n\r\n if how == \"rank\":\r\n out_dtype = \"float64\"\r\n else:\r\n if is_numeric_dtype(dtype):\r\n out_dtype = f\"{dtype.kind}{dtype.itemsize}\"\r\n else:\r\n out_dtype = \"object\"\r\n return np.dtype(out_dtype)\r\n\r\n @overload\r\n def _get_result_dtype(self, dtype: np.dtype) -> np.dtype:\r\n ... # pragma: no cover\r\n\r\n @overload\r\n def _get_result_dtype(self, dtype: ExtensionDtype) -> ExtensionDtype:\r\n ... # pragma: no cover\r\n\r\n def _get_result_dtype(self, dtype: DtypeObj) -> DtypeObj:\r\n \"\"\"\r\n Get the desired dtype of a result based on the\r\n input dtype and how it was computed.\r\n\r\n Parameters\r\n ----------\r\n dtype : np.dtype or ExtensionDtype\r\n Input dtype.\r\n\r\n Returns\r\n -------\r\n np.dtype or ExtensionDtype\r\n The desired dtype of the result.\r\n \"\"\"\r\n how = self.how\r\n\r\n if how in [\"add\", \"cumsum\", \"sum\", \"prod\"]:\r\n if dtype == np.dtype(bool):\r\n return np.dtype(np.int64)\r\n elif isinstance(dtype, (BooleanDtype, _IntegerDtype)):\r\n return Int64Dtype()\r\n elif how in [\"mean\", \"median\", \"var\"]:\r\n if isinstance(dtype, (BooleanDtype, _IntegerDtype)):\r\n return Float64Dtype()\r\n elif is_float_dtype(dtype):\r\n return dtype\r\n elif is_numeric_dtype(dtype):\r\n return np.dtype(np.float64)\r\n return dtype\r\n\r\n def uses_mask(self) -> bool:\r\n return self.how in self._MASKED_CYTHON_FUNCTIONS\r\n\r\n @final\r\n def _ea_wrap_cython_operation(\r\n self,\r\n values: ExtensionArray,\r\n min_count: int,\r\n ngroups: int,\r\n comp_ids: np.ndarray,\r\n **kwargs,\r\n ) -> ArrayLike:\r\n \"\"\"\r\n If we have an ExtensionArray, unwrap, call _cython_operation, and\r\n re-wrap if appropriate.\r\n \"\"\"\r\n # TODO: general case implementation overridable by EAs.\r\n if isinstance(values, BaseMaskedArray) and self.uses_mask():\r\n return self._masked_ea_wrap_cython_operation(\r\n values,\r\n min_count=min_count,\r\n ngroups=ngroups,\r\n comp_ids=comp_ids,\r\n **kwargs,\r\n )\r\n orig_values = values\r\n\r\n if isinstance(orig_values, (DatetimeArray, PeriodArray)):\r\n # All of the functions implemented here are ordinal, so we can\r\n # operate on the tz-naive equivalents\r\n npvalues = orig_values._ndarray.view(\"M8[ns]\")\r\n res_values = self._cython_op_ndim_compat(\r\n npvalues,\r\n min_count=min_count,\r\n ngroups=ngroups,\r\n comp_ids=comp_ids,\r\n mask=None,\r\n **kwargs,\r\n )\r\n if self.how in [\"rank\"]:\r\n # i.e. how in WrappedCythonOp.cast_blocklist, since\r\n # other cast_blocklist methods dont go through cython_operation\r\n # preserve float64 dtype\r\n return res_values\r\n\r\n res_values = res_values.view(\"i8\")\r\n result = type(orig_values)(res_values, dtype=orig_values.dtype)\r\n return result\r\n\r\n elif isinstance(orig_values, TimedeltaArray):\r\n # We have an ExtensionArray but not ExtensionDtype\r\n res_values = self._cython_op_ndim_compat(\r\n orig_values._ndarray,\r\n min_count=min_count,\r\n ngroups=ngroups,\r\n comp_ids=comp_ids,\r\n mask=None,\r\n **kwargs,\r\n )\r\n if self.how in [\"rank\"]:\r\n # i.e. how in WrappedCythonOp.cast_blocklist, since\r\n # other cast_blocklist methods dont go through cython_operation\r\n # preserve float64 dtype\r\n return res_values\r\n\r\n # otherwise res_values has the same dtype as original values\r\n return type(orig_values)(res_values)\r\n\r\n elif isinstance(values.dtype, (BooleanDtype, _IntegerDtype)):\r\n # IntegerArray or BooleanArray\r\n npvalues = values.to_numpy(\"float64\", na_value=np.nan)\r\n res_values = self._cython_op_ndim_compat(\r\n npvalues,\r\n min_count=min_count,\r\n ngroups=ngroups,\r\n comp_ids=comp_ids,\r\n mask=None,\r\n **kwargs,\r\n )\r\n if self.how in [\"rank\"]:\r\n # i.e. how in WrappedCythonOp.cast_blocklist, since\r\n # other cast_blocklist methods dont go through cython_operation\r\n return res_values\r\n\r\n dtype = self._get_result_dtype(orig_values.dtype)\r\n cls = dtype.construct_array_type()\r\n return cls._from_sequence(res_values, dtype=dtype)\r\n\r\n elif isinstance(values.dtype, FloatingDtype):\r\n # FloatingArray\r\n npvalues = values.to_numpy(\r\n values.dtype.numpy_dtype,\r\n na_value=np.nan,\r\n )\r\n res_values = self._cython_op_ndim_compat(\r\n npvalues,\r\n min_count=min_count,\r\n ngroups=ngroups,\r\n comp_ids=comp_ids,\r\n mask=None,\r\n **kwargs,\r\n )\r\n if self.how in [\"rank\"]:\r\n # i.e. how in WrappedCythonOp.cast_blocklist, since\r\n # other cast_blocklist methods dont go through cython_operation\r\n return res_values\r\n\r\n dtype = self._get_result_dtype(orig_values.dtype)\r\n cls = dtype.construct_array_type()\r\n return cls._from_sequence(res_values, dtype=dtype)\r\n\r\n elif isinstance(values.dtype, StringDtype):\r\n # StringArray\r\n npvalues = values.to_numpy(object, na_value=np.nan)\r\n res_values = self._cython_op_ndim_compat(\r\n npvalues,\r\n min_count=min_count,\r\n ngroups=ngroups,\r\n comp_ids=comp_ids,\r\n mask=None,\r\n **kwargs,\r\n )\r\n if self.how in [\"rank\"]:\r\n # i.e. how in WrappedCythonOp.cast_blocklist, since\r\n # other cast_blocklist methods dont go through cython_operation\r\n return res_values\r\n\r\n dtype = self._get_result_dtype(orig_values.dtype)\r\n cls = dtype.construct_array_type()\r\n return cls._from_sequence(res_values, dtype=dtype)\r\n\r\n raise NotImplementedError(\r\n f\"function is not implemented for this dtype: {values.dtype}\"\r\n )\r\n\r\n @final\r\n def _masked_ea_wrap_cython_operation(\r\n self,\r\n values: BaseMaskedArray,\r\n min_count: int,\r\n ngroups: int,\r\n comp_ids: np.ndarray,\r\n **kwargs,\r\n ) -> BaseMaskedArray:\r\n \"\"\"\r\n Equivalent of `_ea_wrap_cython_operation`, but optimized for masked EA's\r\n and cython algorithms which accept a mask.\r\n \"\"\"\r\n orig_values = values\r\n\r\n # Copy to ensure input and result masks don't end up shared\r\n mask = values._mask.copy()\r\n arr = values._data\r\n\r\n res_values = self._cython_op_ndim_compat(\r\n arr,\r\n min_count=min_count,\r\n ngroups=ngroups,\r\n comp_ids=comp_ids,\r\n mask=mask,\r\n **kwargs,\r\n )\r\n dtype = self._get_result_dtype(orig_values.dtype)\r\n assert isinstance(dtype, BaseMaskedDtype)\r\n cls = dtype.construct_array_type()\r\n\r\n return cls(res_values.astype(dtype.type, copy=False), mask)\r\n\r\n @final\r\n def _cython_op_ndim_compat(\r\n self,\r\n values: np.ndarray,\r\n *,\r\n min_count: int,\r\n ngroups: int,\r\n comp_ids: np.ndarray,\r\n mask: np.ndarray | None,\r\n **kwargs,\r\n ) -> np.ndarray:\r\n if values.ndim == 1:\r\n # expand to 2d, dispatch, then squeeze if appropriate\r\n values2d = values[None, :]\r\n res = self._call_cython_op(\r\n values2d,\r\n min_count=min_count,\r\n ngroups=ngroups,\r\n comp_ids=comp_ids,\r\n mask=mask,\r\n **kwargs,\r\n )\r\n if res.shape[0] == 1:\r\n return res[0]\r\n\r\n # otherwise we have OHLC\r\n return res.T\r\n\r\n return self._call_cython_op(\r\n values,\r\n min_count=min_count,\r\n ngroups=ngroups,\r\n comp_ids=comp_ids,\r\n mask=mask,\r\n **kwargs,\r\n )\r\n\r\n @final\r\n def _call_cython_op(\r\n self,\r\n values: np.ndarray, # np.ndarray[ndim=2]\r\n *,\r\n min_count: int,\r\n ngroups: int,\r\n comp_ids: np.ndarray,\r\n mask: np.ndarray | None,\r\n **kwargs,\r\n ) -> np.ndarray: # np.ndarray[ndim=2]\r\n orig_values = values\r\n\r\n dtype = values.dtype\r\n is_numeric = is_numeric_dtype(dtype)\r\n\r\n is_datetimelike = needs_i8_conversion(dtype)\r\n\r\n if is_datetimelike:\r\n values = values.view(\"int64\")\r\n is_numeric = True\r\n elif is_bool_dtype(dtype):\r\n values = values.astype(\"int64\")\r\n elif is_integer_dtype(dtype):\r\n # e.g. uint8 -> uint64, int16 -> int64\r\n dtype_str = dtype.kind + \"8\"\r\n values = values.astype(dtype_str, copy=False)\r\n elif is_numeric:\r\n if not is_complex_dtype(dtype):\r\n values = ensure_float64(values)\r\n\r\n values = values.T\r\n\r\n if mask is not None:\r\n mask = mask.reshape(values.shape, order=\"C\")\r\n\r\n out_shape = self._get_output_shape(ngroups, values)\r\n func, values = self.get_cython_func_and_vals(values, is_numeric)\r\n out_dtype = self.get_out_dtype(values.dtype)\r\n\r\n result = maybe_fill(np.empty(out_shape, dtype=out_dtype))\r\n if self.kind == \"aggregate\":\r\n counts = np.zeros(ngroups, dtype=np.int64)\r\n if self.how in [\"min\", \"max\", \"mean\"]:\r\n func(\r\n result,\r\n counts,\r\n values,\r\n comp_ids,\r\n min_count,\r\n is_datetimelike=is_datetimelike,\r\n )\r\n else:\r\n func(result, counts, values, comp_ids, min_count)\r\n else:\r\n # TODO: min_count\r\n if self.uses_mask():\r\n func(\r\n result,\r\n values,\r\n comp_ids,\r\n ngroups,\r\n is_datetimelike,\r\n mask=mask,\r\n **kwargs,\r\n )\r\n else:\r\n func(result, values, comp_ids, ngroups, is_datetimelike, **kwargs)\r\n\r\n if self.kind == \"aggregate\":\r\n # i.e. counts is defined. Locations where count<min_count\r\n # need to have the result set to np.nan, which may require casting,\r\n # see GH#40767\r\n if is_integer_dtype(result.dtype) and not is_datetimelike:\r\n cutoff = max(1, min_count)\r\n empty_groups = counts < cutoff\r\n if empty_groups.any():\r\n # Note: this conversion could be lossy, see GH#40767\r\n result = result.astype(\"float64\")\r\n result[empty_groups] = np.nan\r\n\r\n result = result.T\r\n\r\n if self.how not in self.cast_blocklist:\r\n # e.g. if we are int64 and need to restore to datetime64/timedelta64\r\n # \"rank\" is the only member of cast_blocklist we get here\r\n res_dtype = self._get_result_dtype(orig_values.dtype)\r\n op_result = maybe_downcast_to_dtype(result, res_dtype)\r\n else:\r\n op_result = result\r\n\r\n # error: Incompatible return value type (got \"Union[ExtensionArray, ndarray]\",\r\n # expected \"ndarray\")\r\n return op_result # type: ignore[return-value]\r\n\r\n @final\r\n def cython_operation(\r\n self,\r\n *,\r\n values: ArrayLike,\r\n axis: int,\r\n min_count: int = -1,\r\n comp_ids: np.ndarray,\r\n ngroups: int,\r\n **kwargs,\r\n ) -> ArrayLike:\r\n \"\"\"\r\n Call our cython function, with appropriate pre- and post- processing.\r\n \"\"\"\r\n if values.ndim > 2:\r\n raise NotImplementedError(\"number of dimensions is currently limited to 2\")\r\n elif values.ndim == 2:\r\n assert axis == 1, axis\r\n elif not is_1d_only_ea_obj(values):\r\n # Note: it is *not* the case that axis is always 0 for 1-dim values,\r\n # as we can have 1D ExtensionArrays that we need to treat as 2D\r\n assert axis == 0\r\n\r\n dtype = values.dtype\r\n is_numeric = is_numeric_dtype(dtype)\r\n\r\n # can we do this operation with our cython functions\r\n # if not raise NotImplementedError\r\n self._disallow_invalid_ops(dtype, is_numeric)\r\n\r\n if not isinstance(values, np.ndarray):\r\n # i.e. ExtensionArray\r\n return self._ea_wrap_cython_operation(\r\n values,\r\n min_count=min_count,\r\n ngroups=ngroups,\r\n comp_ids=comp_ids,\r\n **kwargs,\r\n )\r\n\r\n return self._cython_op_ndim_compat(\r\n values,\r\n min_count=min_count,\r\n ngroups=ngroups,\r\n comp_ids=comp_ids,\r\n mask=None,\r\n **kwargs,\r\n )\r\n\r\n\r\nclass BaseGrouper:\r\n \"\"\"\r\n This is an internal Grouper class, which actually holds\r\n the generated groups\r\n\r\n Parameters\r\n ----------\r\n axis : Index\r\n groupings : Sequence[Grouping]\r\n all the grouping instances to handle in this grouper\r\n for example for grouper list to groupby, need to pass the list\r\n sort : bool, default True\r\n whether this grouper will give sorted result or not\r\n group_keys : bool, default True\r\n mutated : bool, default False\r\n indexer : np.ndarray[np.intp], optional\r\n the indexer created by Grouper\r\n some groupers (TimeGrouper) will sort its axis and its\r\n group_info is also sorted, so need the indexer to reorder\r\n\r\n \"\"\"\r\n\r\n axis: Index\r\n\r\n def __init__(\r\n self,\r\n axis: Index,\r\n groupings: Sequence[grouper.Grouping],\r\n sort: bool = True,\r\n group_keys: bool = True,\r\n mutated: bool = False,\r\n indexer: np.ndarray | None = None,\r\n dropna: bool = True,\r\n ):\r\n assert isinstance(axis, Index), axis\r\n\r\n self.axis = axis\r\n self._groupings: list[grouper.Grouping] = list(groupings)\r\n self._sort = sort\r\n self.group_keys = group_keys\r\n self.mutated = mutated\r\n self.indexer = indexer\r\n self.dropna = dropna\r\n\r\n @property\r\n def groupings(self) -> list[grouper.Grouping]:\r\n return self._groupings\r\n\r\n @property\r\n def shape(self) -> Shape:\r\n return tuple(ping.ngroups for ping in self.groupings)\r\n\r\n def __iter__(self):\r\n return iter(self.indices)\r\n\r\n @property\r\n def nkeys(self) -> int:\r\n return len(self.groupings)\r\n\r\n def get_iterator(\r\n self, data: FrameOrSeries, axis: int = 0\r\n ) -> Iterator[tuple[Hashable, FrameOrSeries]]:\r\n \"\"\"\r\n Groupby iterator\r\n\r\n Returns\r\n -------\r\n Generator yielding sequence of (name, subsetted object)\r\n for each group\r\n \"\"\"\r\n splitter = self._get_splitter(data, axis=axis)\r\n keys = self._get_group_keys()\r\n for key, group in zip(keys, splitter):\r\n yield key, group.__finalize__(data, method=\"groupby\")\r\n\r\n @final\r\n def _get_splitter(self, data: FrameOrSeries, axis: int = 0) -> DataSplitter:\r\n \"\"\"\r\n Returns\r\n -------\r\n Generator yielding subsetted objects\r\n\r\n __finalize__ has not been called for the subsetted objects returned.\r\n \"\"\"\r\n ids, _, ngroups = self.group_info\r\n return get_splitter(data, ids, ngroups, axis=axis)\r\n\r\n def _get_grouper(self):\r\n \"\"\"\r\n We are a grouper as part of another's groupings.\r\n\r\n We have a specific method of grouping, so cannot\r\n convert to a Index for our grouper.\r\n \"\"\"\r\n return self.groupings[0].grouping_vector\r\n\r\n @final\r\n def _get_group_keys(self):\r\n if len(self.groupings) == 1:\r\n return self.levels[0]\r\n else:\r\n ids, _, ngroups = self.group_info\r\n\r\n # provide \"flattened\" iterator for multi-group setting\r\n return get_flattened_list(ids, ngroups, self.levels, self.codes)\r\n\r\n @final\r\n def apply(self, f: F, data: FrameOrSeries, axis: int = 0):\r\n mutated = self.mutated\r\n splitter = self._get_splitter(data, axis=axis)\r\n group_keys = self._get_group_keys()\r\n result_values = None\r\n\r\n if data.ndim == 2 and any(\r\n isinstance(x, ExtensionArray) for x in data._iter_column_arrays()\r\n ):\r\n # calling splitter.fast_apply will raise TypeError via apply_frame_axis0\r\n # if we pass EA instead of ndarray\r\n # TODO: can we have a workaround for EAs backed by ndarray?\r\n pass\r\n\r\n elif isinstance(data._mgr, ArrayManager):\r\n # TODO(ArrayManager) don't use fast_apply / libreduction.apply_frame_axis0\r\n # for now -> relies on BlockManager internals\r\n pass\r\n elif (\r\n com.get_callable_name(f) not in base.plotting_methods\r\n and isinstance(splitter, FrameSplitter)\r\n and axis == 0\r\n # fast_apply/libreduction doesn't allow non-numpy backed indexes\r\n and not data.index._has_complex_internals\r\n ):\r\n try:\r\n sdata = splitter.sorted_data\r\n result_values, mutated = splitter.fast_apply(f, sdata, group_keys)\r\n\r\n except IndexError:\r\n # This is a rare case in which re-running in python-space may\r\n # make a difference, see test_apply_mutate.test_mutate_groups\r\n pass\r\n\r\n else:\r\n # If the fast apply path could be used we can return here.\r\n # Otherwise we need to fall back to the slow implementation.\r\n if len(result_values) == len(group_keys):\r\n return group_keys, result_values, mutated\r\n\r\n if result_values is None:\r\n # result_values is None if fast apply path wasn't taken\r\n # or fast apply aborted with an unexpected exception.\r\n # In either case, initialize the result list and perform\r\n # the slow iteration.\r\n result_values = []\r\n skip_first = False\r\n else:\r\n # If result_values is not None we're in the case that the\r\n # fast apply loop was broken prematurely but we have\r\n # already the result for the first group which we can reuse.\r\n skip_first = True\r\n\r\n # This calls DataSplitter.__iter__\r\n zipped = zip(group_keys, splitter)\r\n if skip_first:\r\n # pop the first item from the front of the iterator\r\n next(zipped)\r\n\r\n for key, group in zipped:\r\n object.__setattr__(group, \"name\", key)\r\n\r\n # group might be modified\r\n group_axes = group.axes\r\n res = f(group)\r\n if not _is_indexed_like(res, group_axes, axis):\r\n mutated = True\r\n result_values.append(res)\r\n\r\n return group_keys, result_values, mutated\r\n\r\n @cache_readonly\r\n def indices(self):\r\n \"\"\"dict {group name -> group indices}\"\"\"\r\n if len(self.groupings) == 1 and isinstance(self.result_index, CategoricalIndex):\r\n # This shows unused categories in indices GH#38642\r\n return self.groupings[0].indices\r\n codes_list = [ping.codes for ping in self.groupings]\r\n keys = [ping.group_index for ping in self.groupings]\r\n return get_indexer_dict(codes_list, keys)\r\n\r\n @property\r\n def codes(self) -> list[np.ndarray]:\r\n return [ping.codes for ping in self.groupings]\r\n\r\n @property\r\n def levels(self) -> list[Index]:\r\n return [ping.group_index for ping in self.groupings]\r\n\r\n @property\r\n def names(self) -> list[Hashable]:\r\n return [ping.name for ping in self.groupings]\r\n\r\n @final\r\n def size(self) -> Series:\r\n \"\"\"\r\n Compute group sizes.\r\n \"\"\"\r\n ids, _, ngroups = self.group_info\r\n if ngroups:\r\n out = np.bincount(ids[ids != -1], minlength=ngroups)\r\n else:\r\n out = []\r\n return Series(out, index=self.result_index, dtype=\"int64\")\r\n\r\n @cache_readonly\r\n def groups(self) -> dict[Hashable, np.ndarray]:\r\n \"\"\"dict {group name -> group labels}\"\"\"\r\n if len(self.groupings) == 1:\r\n return self.groupings[0].groups\r\n else:\r\n to_groupby = zip(*(ping.grouping_vector for ping in self.groupings))\r\n index = Index(to_groupby)\r\n return self.axis.groupby(index)\r\n\r\n @final\r\n @cache_readonly\r\n def is_monotonic(self) -> bool:\r\n # return if my group orderings are monotonic\r\n return Index(self.group_info[0]).is_monotonic\r\n\r\n @cache_readonly\r\n def group_info(self):\r\n comp_ids, obs_group_ids = self._get_compressed_codes()\r\n\r\n ngroups = len(obs_group_ids)\r\n comp_ids = ensure_platform_int(comp_ids)\r\n\r\n return comp_ids, obs_group_ids, ngroups\r\n\r\n @final\r\n @cache_readonly\r\n def codes_info(self) -> np.ndarray:\r\n # return the codes of items in original grouped axis\r\n ids, _, _ = self.group_info\r\n if self.indexer is not None:\r\n sorter = np.lexsort((ids, self.indexer))\r\n ids = ids[sorter]\r\n return ids\r\n\r\n @final\r\n def _get_compressed_codes(self) -> tuple[np.ndarray, np.ndarray]:\r\n if len(self.groupings) > 1:\r\n group_index = get_group_index(self.codes, self.shape, sort=True, xnull=True)\r\n return compress_group_index(group_index, sort=self._sort)\r\n\r\n ping = self.groupings[0]\r\n return ping.codes, np.arange(len(ping.group_index))\r\n\r\n @final\r\n @cache_readonly\r\n def ngroups(self) -> int:\r\n return len(self.result_index)\r\n\r\n @property\r\n def reconstructed_codes(self) -> list[np.ndarray]:\r\n codes = self.codes\r\n ids, obs_ids, _ = self.group_info\r\n return decons_obs_group_ids(ids, obs_ids, self.shape, codes, xnull=True)\r\n\r\n @cache_readonly\r\n def result_arraylike(self) -> ArrayLike:\r\n \"\"\"\r\n Analogous to result_index, but returning an ndarray/ExtensionArray\r\n allowing us to retain ExtensionDtypes not supported by Index.\r\n \"\"\"\r\n # TODO: once Index supports arbitrary EAs, this can be removed in favor\r\n # of result_index\r\n if len(self.groupings) == 1:\r\n return self.groupings[0].group_arraylike\r\n\r\n return self.result_index._values\r\n\r\n @cache_readonly\r\n def result_index(self) -> Index:\r\n if len(self.groupings) == 1:\r\n return self.groupings[0].result_index.rename(self.names[0])\r\n\r\n codes = self.reconstructed_codes\r\n levels = [ping.result_index for ping in self.groupings]\r\n return MultiIndex(\r\n levels=levels, codes=codes, verify_integrity=False, names=self.names\r\n )\r\n\r\n @final\r\n def get_group_levels(self) -> list[ArrayLike]:\r\n # Note: only called from _insert_inaxis_grouper_inplace, which\r\n # is only called for BaseGrouper, never for BinGrouper\r\n if len(self.groupings) == 1:\r\n return [self.groupings[0].result_index]\r\n\r\n name_list = []\r\n for ping, codes in zip(self.groupings, self.reconstructed_codes):\r\n codes = ensure_platform_int(codes)\r\n levels = ping.result_index.take(codes)\r\n\r\n name_list.append(levels)\r\n\r\n return name_list\r\n\r\n # ------------------------------------------------------------\r\n # Aggregation functions\r\n\r\n @final\r\n def _cython_operation(\r\n self,\r\n kind: str,\r\n values,\r\n how: str,\r\n axis: int,\r\n min_count: int = -1,\r\n **kwargs,\r\n ) -> ArrayLike:\r\n \"\"\"\r\n Returns the values of a cython operation.\r\n \"\"\"\r\n assert kind in [\"transform\", \"aggregate\"]\r\n\r\n cy_op = WrappedCythonOp(kind=kind, how=how)\r\n\r\n ids, _, _ = self.group_info\r\n ngroups = self.ngroups\r\n return cy_op.cython_operation(\r\n values=values,\r\n axis=axis,\r\n min_count=min_count,\r\n comp_ids=ids,\r\n ngroups=ngroups,\r\n **kwargs,\r\n )\r\n\r\n @final\r\n def agg_series(\r\n self, obj: Series, func: F, preserve_dtype: bool = False\r\n ) -> ArrayLike:\r\n \"\"\"\r\n Parameters\r\n ----------\r\n obj : Series\r\n func : function taking a Series and returning a scalar-like\r\n preserve_dtype : bool\r\n Whether the aggregation is known to be dtype-preserving.\r\n\r\n Returns\r\n -------\r\n np.ndarray or ExtensionArray\r\n \"\"\"\r\n # test_groupby_empty_with_category gets here with self.ngroups == 0\r\n # and len(obj) > 0\r\n\r\n if len(obj) == 0:\r\n # SeriesGrouper would raise if we were to call _aggregate_series_fast\r\n result = self._aggregate_series_pure_python(obj, func)\r\n\r\n elif not isinstance(obj._values, np.ndarray):\r\n # _aggregate_series_fast would raise TypeError when\r\n # calling libreduction.Slider\r\n # In the datetime64tz case it would incorrectly cast to tz-naive\r\n # TODO: can we get a performant workaround for EAs backed by ndarray?\r\n result = self._aggregate_series_pure_python(obj, func)\r\n\r\n # we can preserve a little bit more aggressively with EA dtype\r\n # because maybe_cast_pointwise_result will do a try/except\r\n # with _from_sequence. NB we are assuming here that _from_sequence\r\n # is sufficiently strict that it casts appropriately.\r\n preserve_dtype = True\r\n\r\n elif obj.index._has_complex_internals:\r\n # Preempt TypeError in _aggregate_series_fast\r\n result = self._aggregate_series_pure_python(obj, func)\r\n\r\n else:\r\n result = self._aggregate_series_fast(obj, func)\r\n\r\n npvalues = lib.maybe_convert_objects(result, try_float=False)\r\n if preserve_dtype:\r\n out = maybe_cast_pointwise_result(npvalues, obj.dtype, numeric_only=True)\r\n else:\r\n out = npvalues\r\n return out\r\n\r\n def _aggregate_series_fast(self, obj: Series, func: F) -> np.ndarray:\r\n # -> np.ndarray[object]\r\n\r\n # At this point we have already checked that\r\n # - obj.index is not a MultiIndex\r\n # - obj is backed by an ndarray, not ExtensionArray\r\n # - len(obj) > 0\r\n func = com.is_builtin_func(func)\r\n\r\n ids, _, ngroups = self.group_info\r\n\r\n # avoids object / Series creation overhead\r\n indexer = get_group_index_sorter(ids, ngroups)\r\n obj = obj.take(indexer)\r\n ids = ids.take(indexer)\r\n sgrouper = libreduction.SeriesGrouper(obj, func, ids, ngroups)\r\n result, _ = sgrouper.get_result()\r\n return result\r\n\r\n @final\r\n def _aggregate_series_pure_python(self, obj: Series, func: F) -> np.ndarray:\r\n # -> np.ndarray[object]\r\n ids, _, ngroups = self.group_info\r\n\r\n counts = np.zeros(ngroups, dtype=int)\r\n result = np.empty(ngroups, dtype=\"O\")\r\n initialized = False\r\n\r\n # equiv: splitter = self._get_splitter(obj, axis=0)\r\n splitter = get_splitter(obj, ids, ngroups, axis=0)\r\n\r\n for i, group in enumerate(splitter):\r\n\r\n # Each step of this loop corresponds to\r\n # libreduction._BaseGrouper._apply_to_group\r\n res = func(group)\r\n res = libreduction.extract_result(res)\r\n\r\n if not initialized:\r\n # We only do this validation on the first iteration\r\n libreduction.check_result_array(res, group.dtype)\r\n initialized = True\r\n\r\n counts[i] = group.shape[0]\r\n result[i] = res\r\n\r\n return result\r\n\r\n\r\nclass BinGrouper(BaseGrouper):\r\n \"\"\"\r\n This is an internal Grouper class\r\n\r\n Parameters\r\n ----------\r\n bins : the split index of binlabels to group the item of axis\r\n binlabels : the label list\r\n mutated : bool, default False\r\n indexer : np.ndarray[np.intp]\r\n\r\n Examples\r\n --------\r\n bins: [2, 4, 6, 8, 10]\r\n binlabels: DatetimeIndex(['2005-01-01', '2005-01-03',\r\n '2005-01-05', '2005-01-07', '2005-01-09'],\r\n dtype='datetime64[ns]', freq='2D')\r\n\r\n the group_info, which contains the label of each item in grouped\r\n axis, the index of label in label list, group number, is\r\n\r\n (array([0, 0, 1, 1, 2, 2, 3, 3, 4, 4]), array([0, 1, 2, 3, 4]), 5)\r\n\r\n means that, the grouped axis has 10 items, can be grouped into 5\r\n labels, the first and second items belong to the first label, the\r\n third and forth items belong to the second label, and so on\r\n\r\n \"\"\"\r\n\r\n bins: np.ndarray # np.ndarray[np.int64]\r\n binlabels: Index\r\n mutated: bool\r\n\r\n def __init__(\r\n self,\r\n bins,\r\n binlabels,\r\n mutated: bool = False,\r\n indexer=None,\r\n ):\r\n self.bins = ensure_int64(bins)\r\n self.binlabels = ensure_index(binlabels)\r\n self.mutated = mutated\r\n self.indexer = indexer\r\n\r\n # These lengths must match, otherwise we could call agg_series\r\n # with empty self.bins, which would raise in libreduction.\r\n assert len(self.binlabels) == len(self.bins)\r\n\r\n @cache_readonly\r\n def groups(self):\r\n \"\"\"dict {group name -> group labels}\"\"\"\r\n # this is mainly for compat\r\n # GH 3881\r\n result = {\r\n key: value\r\n for key, value in zip(self.binlabels, self.bins)\r\n if key is not NaT\r\n }\r\n return result\r\n\r\n @property\r\n def nkeys(self) -> int:\r\n # still matches len(self.groupings), but we can hard-code\r\n return 1\r\n\r\n def _get_grouper(self):\r\n \"\"\"\r\n We are a grouper as part of another's groupings.\r\n\r\n We have a specific method of grouping, so cannot\r\n convert to a Index for our grouper.\r\n \"\"\"\r\n return self\r\n\r\n def get_iterator(self, data: FrameOrSeries, axis: int = 0):\r\n \"\"\"\r\n Groupby iterator\r\n\r\n Returns\r\n -------\r\n Generator yielding sequence of (name, subsetted object)\r\n for each group\r\n \"\"\"\r\n if axis == 0:\r\n slicer = lambda start, edge: data.iloc[start:edge]\r\n else:\r\n slicer = lambda start, edge: data.iloc[:, start:edge]\r\n\r\n length = len(data.axes[axis])\r\n\r\n start = 0\r\n for edge, label in zip(self.bins, self.binlabels):\r\n if label is not NaT:\r\n yield label, slicer(start, edge)\r\n start = edge\r\n\r\n if start < length:\r\n yield self.binlabels[-1], slicer(start, None)\r\n\r\n @cache_readonly\r\n def indices(self):\r\n indices = collections.defaultdict(list)\r\n\r\n i = 0\r\n for label, bin in zip(self.binlabels, self.bins):\r\n if i < bin:\r\n if label is not NaT:\r\n indices[label] = list(range(i, bin))\r\n i = bin\r\n return indices\r\n\r\n @cache_readonly\r\n def group_info(self):\r\n ngroups = self.ngroups\r\n obs_group_ids = np.arange(ngroups, dtype=np.int64)\r\n rep = np.diff(np.r_[0, self.bins])\r\n\r\n rep = ensure_platform_int(rep)\r\n if ngroups == len(self.bins):\r\n comp_ids = np.repeat(np.arange(ngroups), rep)\r\n else:\r\n comp_ids = np.repeat(np.r_[-1, np.arange(ngroups)], rep)\r\n\r\n return (\r\n ensure_platform_int(comp_ids),\r\n obs_group_ids,\r\n ngroups,\r\n )\r\n\r\n @cache_readonly\r\n def reconstructed_codes(self) -> list[np.ndarray]:\r\n # get unique result indices, and prepend 0 as groupby starts from the first\r\n return [np.r_[0, np.flatnonzero(self.bins[1:] != self.bins[:-1]) + 1]]\r\n\r\n @cache_readonly\r\n def result_index(self):\r\n if len(self.binlabels) != 0 and isna(self.binlabels[0]):\r\n return self.binlabels[1:]\r\n\r\n return self.binlabels\r\n\r\n @property\r\n def levels(self) -> list[Index]:\r\n return [self.binlabels]\r\n\r\n @property\r\n def names(self) -> list[Hashable]:\r\n return [self.binlabels.name]\r\n\r\n @property\r\n def groupings(self) -> list[grouper.Grouping]:\r\n lev = self.binlabels\r\n ping = grouper.Grouping(lev, lev, in_axis=False, level=None)\r\n return [ping]\r\n\r\n def _aggregate_series_fast(self, obj: Series, func: F) -> np.ndarray:\r\n # -> np.ndarray[object]\r\n\r\n # At this point we have already checked that\r\n # - obj.index is not a MultiIndex\r\n # - obj is backed by an ndarray, not ExtensionArray\r\n # - ngroups != 0\r\n # - len(self.bins) > 0\r\n sbg = libreduction.SeriesBinGrouper(obj, func, self.bins)\r\n result, _ = sbg.get_result()\r\n return result\r\n\r\n\r\ndef _is_indexed_like(obj, axes, axis: int) -> bool:\r\n if isinstance(obj, Series):\r\n if len(axes) > 1:\r\n return False\r\n return obj.axes[axis].equals(axes[axis])\r\n elif isinstance(obj, DataFrame):\r\n return obj.axes[axis].equals(axes[axis])\r\n\r\n return False\r\n\r\n\r\n# ----------------------------------------------------------------------\r\n# Splitting / application\r\n\r\n\r\nclass DataSplitter(Generic[FrameOrSeries]):\r\n def __init__(self, data: FrameOrSeries, labels, ngroups: int, axis: int = 0):\r\n self.data = data\r\n self.labels = ensure_platform_int(labels) # _should_ already be np.intp\r\n self.ngroups = ngroups\r\n\r\n self.axis = axis\r\n assert isinstance(axis, int), axis\r\n\r\n @cache_readonly\r\n def slabels(self) -> np.ndarray: # np.ndarray[np.intp]\r\n # Sorted labels\r\n return self.labels.take(self._sort_idx)\r\n\r\n @cache_readonly\r\n def _sort_idx(self) -> np.ndarray: # np.ndarray[np.intp]\r\n # Counting sort indexer\r\n return get_group_index_sorter(self.labels, self.ngroups)\r\n\r\n def __iter__(self):\r\n sdata = self.sorted_data\r\n\r\n if self.ngroups == 0:\r\n # we are inside a generator, rather than raise StopIteration\r\n # we merely return signal the end\r\n return\r\n\r\n starts, ends = lib.generate_slices(self.slabels, self.ngroups)\r\n\r\n for start, end in zip(starts, ends):\r\n yield self._chop(sdata, slice(start, end))\r\n\r\n @cache_readonly\r\n def sorted_data(self) -> FrameOrSeries:\r\n return self.data.take(self._sort_idx, axis=self.axis)\r\n\r\n def _chop(self, sdata, slice_obj: slice) -> NDFrame:\r\n raise AbstractMethodError(self)\r\n\r\n\r\nclass SeriesSplitter(DataSplitter):\r\n def _chop(self, sdata: Series, slice_obj: slice) -> Series:\r\n # fastpath equivalent to `sdata.iloc[slice_obj]`\r\n mgr = sdata._mgr.get_slice(slice_obj)\r\n # __finalize__ not called here, must be applied by caller if applicable\r\n\r\n # fastpath equivalent to:\r\n # `return sdata._constructor(mgr, name=sdata.name, fastpath=True)`\r\n obj = type(sdata)._from_mgr(mgr)\r\n object.__setattr__(obj, \"_flags\", sdata._flags)\r\n object.__setattr__(obj, \"_name\", sdata._name)\r\n return obj\r\n\r\n\r\nclass FrameSplitter(DataSplitter):\r\n def fast_apply(self, f: F, sdata: FrameOrSeries, names):\r\n # must return keys::list, values::list, mutated::bool\r\n starts, ends = lib.generate_slices(self.slabels, self.ngroups)\r\n return libreduction.apply_frame_axis0(sdata, f, names, starts, ends)\r\n\r\n def _chop(self, sdata: DataFrame, slice_obj: slice) -> DataFrame:\r\n # Fastpath equivalent to:\r\n # if self.axis == 0:\r\n # return sdata.iloc[slice_obj]\r\n # else:\r\n # return sdata.iloc[:, slice_obj]\r\n mgr = sdata._mgr.get_slice(slice_obj, axis=1 - self.axis)\r\n # __finalize__ not called here, must be applied by caller if applicable\r\n\r\n # fastpath equivalent to `return sdata._constructor(mgr)`\r\n obj = type(sdata)._from_mgr(mgr)\r\n object.__setattr__(obj, \"_flags\", sdata._flags)\r\n return obj\r\n\r\n\r\ndef get_splitter(\r\n data: FrameOrSeries, labels: np.ndarray, ngroups: int, axis: int = 0\r\n) -> DataSplitter:\r\n if isinstance(data, Series):\r\n klass: type[DataSplitter] = SeriesSplitter\r\n else:\r\n # i.e. DataFrame\r\n klass = FrameSplitter\r\n\r\n return klass(data, labels, ngroups, axis)\r\n",
"import pytest\r\n\r\nimport pandas as pd\r\nfrom pandas import (\r\n Categorical,\r\n DataFrame,\r\n Series,\r\n)\r\nimport pandas._testing as tm\r\n\r\n\r\ndef _assert_series_equal_both(a, b, **kwargs):\r\n \"\"\"\r\n Check that two Series equal.\r\n\r\n This check is performed commutatively.\r\n\r\n Parameters\r\n ----------\r\n a : Series\r\n The first Series to compare.\r\n b : Series\r\n The second Series to compare.\r\n kwargs : dict\r\n The arguments passed to `tm.assert_series_equal`.\r\n \"\"\"\r\n tm.assert_series_equal(a, b, **kwargs)\r\n tm.assert_series_equal(b, a, **kwargs)\r\n\r\n\r\ndef _assert_not_series_equal(a, b, **kwargs):\r\n \"\"\"\r\n Check that two Series are not equal.\r\n\r\n Parameters\r\n ----------\r\n a : Series\r\n The first Series to compare.\r\n b : Series\r\n The second Series to compare.\r\n kwargs : dict\r\n The arguments passed to `tm.assert_series_equal`.\r\n \"\"\"\r\n try:\r\n tm.assert_series_equal(a, b, **kwargs)\r\n msg = \"The two Series were equal when they shouldn't have been\"\r\n\r\n pytest.fail(msg=msg)\r\n except AssertionError:\r\n pass\r\n\r\n\r\ndef _assert_not_series_equal_both(a, b, **kwargs):\r\n \"\"\"\r\n Check that two Series are not equal.\r\n\r\n This check is performed commutatively.\r\n\r\n Parameters\r\n ----------\r\n a : Series\r\n The first Series to compare.\r\n b : Series\r\n The second Series to compare.\r\n kwargs : dict\r\n The arguments passed to `tm.assert_series_equal`.\r\n \"\"\"\r\n _assert_not_series_equal(a, b, **kwargs)\r\n _assert_not_series_equal(b, a, **kwargs)\r\n\r\n\r\[email protected](\"data\", [range(3), list(\"abc\"), list(\"áàä\")])\r\ndef test_series_equal(data):\r\n _assert_series_equal_both(Series(data), Series(data))\r\n\r\n\r\[email protected](\r\n \"data1,data2\",\r\n [\r\n (range(3), range(1, 4)),\r\n (list(\"abc\"), list(\"xyz\")),\r\n (list(\"áàä\"), list(\"éèë\")),\r\n (list(\"áàä\"), list(b\"aaa\")),\r\n (range(3), range(4)),\r\n ],\r\n)\r\ndef test_series_not_equal_value_mismatch(data1, data2):\r\n _assert_not_series_equal_both(Series(data1), Series(data2))\r\n\r\n\r\[email protected](\r\n \"kwargs\",\r\n [\r\n {\"dtype\": \"float64\"}, # dtype mismatch\r\n {\"index\": [1, 2, 4]}, # index mismatch\r\n {\"name\": \"foo\"}, # name mismatch\r\n ],\r\n)\r\ndef test_series_not_equal_metadata_mismatch(kwargs):\r\n data = range(3)\r\n s1 = Series(data)\r\n\r\n s2 = Series(data, **kwargs)\r\n _assert_not_series_equal_both(s1, s2)\r\n\r\n\r\[email protected](\"data1,data2\", [(0.12345, 0.12346), (0.1235, 0.1236)])\r\[email protected](\"dtype\", [\"float32\", \"float64\"])\r\[email protected](\"decimals\", [0, 1, 2, 3, 5, 10])\r\ndef test_less_precise(data1, data2, dtype, decimals):\r\n rtol = 10 ** -decimals\r\n s1 = Series([data1], dtype=dtype)\r\n s2 = Series([data2], dtype=dtype)\r\n\r\n if (decimals == 5 or decimals == 10) or (\r\n decimals >= 3 and abs(data1 - data2) >= 0.0005\r\n ):\r\n msg = \"Series values are different\"\r\n with pytest.raises(AssertionError, match=msg):\r\n tm.assert_series_equal(s1, s2, rtol=rtol)\r\n else:\r\n _assert_series_equal_both(s1, s2, rtol=rtol)\r\n\r\n\r\[email protected](\r\n \"s1,s2,msg\",\r\n [\r\n # Index\r\n (\r\n Series([\"l1\", \"l2\"], index=[1, 2]),\r\n Series([\"l1\", \"l2\"], index=[1.0, 2.0]),\r\n \"Series\\\\.index are different\",\r\n ),\r\n # MultiIndex\r\n (\r\n DataFrame.from_records(\r\n {\"a\": [1, 2], \"b\": [2.1, 1.5], \"c\": [\"l1\", \"l2\"]}, index=[\"a\", \"b\"]\r\n ).c,\r\n DataFrame.from_records(\r\n {\"a\": [1.0, 2.0], \"b\": [2.1, 1.5], \"c\": [\"l1\", \"l2\"]}, index=[\"a\", \"b\"]\r\n ).c,\r\n \"MultiIndex level \\\\[0\\\\] are different\",\r\n ),\r\n ],\r\n)\r\ndef test_series_equal_index_dtype(s1, s2, msg, check_index_type):\r\n kwargs = {\"check_index_type\": check_index_type}\r\n\r\n if check_index_type:\r\n with pytest.raises(AssertionError, match=msg):\r\n tm.assert_series_equal(s1, s2, **kwargs)\r\n else:\r\n tm.assert_series_equal(s1, s2, **kwargs)\r\n\r\n\r\ndef test_series_equal_length_mismatch(rtol):\r\n msg = \"\"\"Series are different\r\n\r\nSeries length are different\r\n\\\\[left\\\\]: 3, RangeIndex\\\\(start=0, stop=3, step=1\\\\)\r\n\\\\[right\\\\]: 4, RangeIndex\\\\(start=0, stop=4, step=1\\\\)\"\"\"\r\n\r\n s1 = Series([1, 2, 3])\r\n s2 = Series([1, 2, 3, 4])\r\n\r\n with pytest.raises(AssertionError, match=msg):\r\n tm.assert_series_equal(s1, s2, rtol=rtol)\r\n\r\n\r\ndef test_series_equal_numeric_values_mismatch(rtol):\r\n msg = \"\"\"Series are different\r\n\r\nSeries values are different \\\\(33\\\\.33333 %\\\\)\r\n\\\\[index\\\\]: \\\\[0, 1, 2\\\\]\r\n\\\\[left\\\\]: \\\\[1, 2, 3\\\\]\r\n\\\\[right\\\\]: \\\\[1, 2, 4\\\\]\"\"\"\r\n\r\n s1 = Series([1, 2, 3])\r\n s2 = Series([1, 2, 4])\r\n\r\n with pytest.raises(AssertionError, match=msg):\r\n tm.assert_series_equal(s1, s2, rtol=rtol)\r\n\r\n\r\ndef test_series_equal_categorical_values_mismatch(rtol):\r\n msg = \"\"\"Series are different\r\n\r\nSeries values are different \\\\(66\\\\.66667 %\\\\)\r\n\\\\[index\\\\]: \\\\[0, 1, 2\\\\]\r\n\\\\[left\\\\]: \\\\['a', 'b', 'c'\\\\]\r\nCategories \\\\(3, object\\\\): \\\\['a', 'b', 'c'\\\\]\r\n\\\\[right\\\\]: \\\\['a', 'c', 'b'\\\\]\r\nCategories \\\\(3, object\\\\): \\\\['a', 'b', 'c'\\\\]\"\"\"\r\n\r\n s1 = Series(Categorical([\"a\", \"b\", \"c\"]))\r\n s2 = Series(Categorical([\"a\", \"c\", \"b\"]))\r\n\r\n with pytest.raises(AssertionError, match=msg):\r\n tm.assert_series_equal(s1, s2, rtol=rtol)\r\n\r\n\r\ndef test_series_equal_datetime_values_mismatch(rtol):\r\n msg = \"\"\"numpy array are different\r\n\r\nnumpy array values are different \\\\(100.0 %\\\\)\r\n\\\\[index\\\\]: \\\\[0, 1, 2\\\\]\r\n\\\\[left\\\\]: \\\\[1514764800000000000, 1514851200000000000, 1514937600000000000\\\\]\r\n\\\\[right\\\\]: \\\\[1549065600000000000, 1549152000000000000, 1549238400000000000\\\\]\"\"\"\r\n\r\n s1 = Series(pd.date_range(\"2018-01-01\", periods=3, freq=\"D\"))\r\n s2 = Series(pd.date_range(\"2019-02-02\", periods=3, freq=\"D\"))\r\n\r\n with pytest.raises(AssertionError, match=msg):\r\n tm.assert_series_equal(s1, s2, rtol=rtol)\r\n\r\n\r\ndef test_series_equal_categorical_mismatch(check_categorical):\r\n msg = \"\"\"Attributes of Series are different\r\n\r\nAttribute \"dtype\" are different\r\n\\\\[left\\\\]: CategoricalDtype\\\\(categories=\\\\['a', 'b'\\\\], ordered=False\\\\)\r\n\\\\[right\\\\]: CategoricalDtype\\\\(categories=\\\\['a', 'b', 'c'\\\\], \\\r\nordered=False\\\\)\"\"\"\r\n\r\n s1 = Series(Categorical([\"a\", \"b\"]))\r\n s2 = Series(Categorical([\"a\", \"b\"], categories=list(\"abc\")))\r\n\r\n if check_categorical:\r\n with pytest.raises(AssertionError, match=msg):\r\n tm.assert_series_equal(s1, s2, check_categorical=check_categorical)\r\n else:\r\n _assert_series_equal_both(s1, s2, check_categorical=check_categorical)\r\n\r\n\r\ndef test_assert_series_equal_extension_dtype_mismatch():\r\n # https://github.com/pandas-dev/pandas/issues/32747\r\n left = Series(pd.array([1, 2, 3], dtype=\"Int64\"))\r\n right = left.astype(int)\r\n\r\n msg = \"\"\"Attributes of Series are different\r\n\r\nAttribute \"dtype\" are different\r\n\\\\[left\\\\]: Int64\r\n\\\\[right\\\\]: int[32|64]\"\"\"\r\n\r\n tm.assert_series_equal(left, right, check_dtype=False)\r\n\r\n with pytest.raises(AssertionError, match=msg):\r\n tm.assert_series_equal(left, right, check_dtype=True)\r\n\r\n\r\ndef test_assert_series_equal_interval_dtype_mismatch():\r\n # https://github.com/pandas-dev/pandas/issues/32747\r\n left = Series([pd.Interval(0, 1)], dtype=\"interval\")\r\n right = left.astype(object)\r\n\r\n msg = \"\"\"Attributes of Series are different\r\n\r\nAttribute \"dtype\" are different\r\n\\\\[left\\\\]: interval\\\\[int64, right\\\\]\r\n\\\\[right\\\\]: object\"\"\"\r\n\r\n tm.assert_series_equal(left, right, check_dtype=False)\r\n\r\n with pytest.raises(AssertionError, match=msg):\r\n tm.assert_series_equal(left, right, check_dtype=True)\r\n\r\n\r\ndef test_series_equal_series_type():\r\n class MySeries(Series):\r\n pass\r\n\r\n s1 = Series([1, 2])\r\n s2 = Series([1, 2])\r\n s3 = MySeries([1, 2])\r\n\r\n tm.assert_series_equal(s1, s2, check_series_type=False)\r\n tm.assert_series_equal(s1, s2, check_series_type=True)\r\n\r\n tm.assert_series_equal(s1, s3, check_series_type=False)\r\n tm.assert_series_equal(s3, s1, check_series_type=False)\r\n\r\n with pytest.raises(AssertionError, match=\"Series classes are different\"):\r\n tm.assert_series_equal(s1, s3, check_series_type=True)\r\n\r\n with pytest.raises(AssertionError, match=\"Series classes are different\"):\r\n tm.assert_series_equal(s3, s1, check_series_type=True)\r\n\r\n\r\ndef test_series_equal_exact_for_nonnumeric():\r\n # https://github.com/pandas-dev/pandas/issues/35446\r\n s1 = Series([\"a\", \"b\"])\r\n s2 = Series([\"a\", \"b\"])\r\n s3 = Series([\"b\", \"a\"])\r\n\r\n tm.assert_series_equal(s1, s2, check_exact=True)\r\n tm.assert_series_equal(s2, s1, check_exact=True)\r\n\r\n msg = \"\"\"Series are different\r\n\r\nSeries values are different \\\\(100\\\\.0 %\\\\)\r\n\\\\[index\\\\]: \\\\[0, 1\\\\]\r\n\\\\[left\\\\]: \\\\[a, b\\\\]\r\n\\\\[right\\\\]: \\\\[b, a\\\\]\"\"\"\r\n with pytest.raises(AssertionError, match=msg):\r\n tm.assert_series_equal(s1, s3, check_exact=True)\r\n\r\n msg = \"\"\"Series are different\r\n\r\nSeries values are different \\\\(100\\\\.0 %\\\\)\r\n\\\\[index\\\\]: \\\\[0, 1\\\\]\r\n\\\\[left\\\\]: \\\\[b, a\\\\]\r\n\\\\[right\\\\]: \\\\[a, b\\\\]\"\"\"\r\n with pytest.raises(AssertionError, match=msg):\r\n tm.assert_series_equal(s3, s1, check_exact=True)\r\n\r\n\r\[email protected](\"right_dtype\", [\"Int32\", \"int64\"])\r\ndef test_assert_series_equal_ignore_extension_dtype_mismatch(right_dtype):\r\n # https://github.com/pandas-dev/pandas/issues/35715\r\n left = Series([1, 2, 3], dtype=\"Int64\")\r\n right = Series([1, 2, 3], dtype=right_dtype)\r\n tm.assert_series_equal(left, right, check_dtype=False)\r\n\r\n\r\ndef test_allows_duplicate_labels():\r\n left = Series([1])\r\n right = Series([1]).set_flags(allows_duplicate_labels=False)\r\n tm.assert_series_equal(left, left)\r\n tm.assert_series_equal(right, right)\r\n tm.assert_series_equal(left, right, check_flags=False)\r\n tm.assert_series_equal(right, left, check_flags=False)\r\n\r\n with pytest.raises(AssertionError, match=\"<Flags\"):\r\n tm.assert_series_equal(left, right)\r\n\r\n with pytest.raises(AssertionError, match=\"<Flags\"):\r\n tm.assert_series_equal(left, right)\r\n\r\n\r\ndef test_assert_series_equal_identical_na(nulls_fixture):\r\n ser = Series([nulls_fixture])\r\n\r\n tm.assert_series_equal(ser, ser.copy())\r\n\r\n # while we're here do Index too\r\n idx = pd.Index(ser)\r\n tm.assert_index_equal(idx, idx.copy(deep=True))\r\n",
"# Author: Alexandre Gramfort <[email protected]>\r\n# Fabian Pedregosa <[email protected]>\r\n# Olivier Grisel <[email protected]>\r\n# Gael Varoquaux <[email protected]>\r\n#\r\n# License: BSD 3 clause\r\n\r\nimport sys\r\nimport warnings\r\nimport numbers\r\nfrom abc import ABC, abstractmethod\r\n\r\nimport numpy as np\r\nfrom scipy import sparse\r\nfrom joblib import Parallel, effective_n_jobs\r\n\r\nfrom ._base import LinearModel, _pre_fit\r\nfrom ..base import RegressorMixin, MultiOutputMixin\r\nfrom ._base import _preprocess_data, _deprecate_normalize\r\nfrom ..utils import check_array\r\nfrom ..utils.validation import check_random_state\r\nfrom ..model_selection import check_cv\r\nfrom ..utils.extmath import safe_sparse_dot\r\nfrom ..utils.fixes import _astype_copy_false, _joblib_parallel_args\r\nfrom ..utils.validation import (\r\n _check_sample_weight,\r\n check_consistent_length,\r\n check_is_fitted,\r\n column_or_1d,\r\n)\r\nfrom ..utils.fixes import delayed\r\n\r\n# mypy error: Module 'sklearn.linear_model' has no attribute '_cd_fast'\r\nfrom . import _cd_fast as cd_fast # type: ignore\r\n\r\n\r\ndef _set_order(X, y, order=\"C\"):\r\n \"\"\"Change the order of X and y if necessary.\r\n\r\n Parameters\r\n ----------\r\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\r\n Training data.\r\n\r\n y : ndarray of shape (n_samples,)\r\n Target values.\r\n\r\n order : {None, 'C', 'F'}\r\n If 'C', dense arrays are returned as C-ordered, sparse matrices in csr\r\n format. If 'F', dense arrays are return as F-ordered, sparse matrices\r\n in csc format.\r\n\r\n Returns\r\n -------\r\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\r\n Training data with guaranteed order.\r\n\r\n y : ndarray of shape (n_samples,)\r\n Target values with guaranteed order.\r\n \"\"\"\r\n if order not in [None, \"C\", \"F\"]:\r\n raise ValueError(\r\n \"Unknown value for order. Got {} instead of None, 'C' or 'F'.\".format(order)\r\n )\r\n sparse_X = sparse.issparse(X)\r\n sparse_y = sparse.issparse(y)\r\n if order is not None:\r\n sparse_format = \"csc\" if order == \"F\" else \"csr\"\r\n if sparse_X:\r\n # As of scipy 1.1.0, new argument copy=False by default.\r\n # This is what we want.\r\n X = X.asformat(sparse_format, **_astype_copy_false(X))\r\n else:\r\n X = np.asarray(X, order=order)\r\n if sparse_y:\r\n y = y.asformat(sparse_format)\r\n else:\r\n y = np.asarray(y, order=order)\r\n return X, y\r\n\r\n\r\n###############################################################################\r\n# Paths functions\r\n\r\n\r\ndef _alpha_grid(\r\n X,\r\n y,\r\n Xy=None,\r\n l1_ratio=1.0,\r\n fit_intercept=True,\r\n eps=1e-3,\r\n n_alphas=100,\r\n normalize=False,\r\n copy_X=True,\r\n):\r\n \"\"\"Compute the grid of alpha values for elastic net parameter search\r\n\r\n Parameters\r\n ----------\r\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\r\n Training data. Pass directly as Fortran-contiguous data to avoid\r\n unnecessary memory duplication\r\n\r\n y : ndarray of shape (n_samples,) or (n_samples, n_outputs)\r\n Target values\r\n\r\n Xy : array-like of shape (n_features,) or (n_features, n_outputs),\\\r\n default=None\r\n Xy = np.dot(X.T, y) that can be precomputed.\r\n\r\n l1_ratio : float, default=1.0\r\n The elastic net mixing parameter, with ``0 < l1_ratio <= 1``.\r\n For ``l1_ratio = 0`` the penalty is an L2 penalty. (currently not\r\n supported) ``For l1_ratio = 1`` it is an L1 penalty. For\r\n ``0 < l1_ratio <1``, the penalty is a combination of L1 and L2.\r\n\r\n eps : float, default=1e-3\r\n Length of the path. ``eps=1e-3`` means that\r\n ``alpha_min / alpha_max = 1e-3``\r\n\r\n n_alphas : int, default=100\r\n Number of alphas along the regularization path\r\n\r\n fit_intercept : bool, default=True\r\n Whether to fit an intercept or not\r\n\r\n normalize : bool, default=False\r\n This parameter is ignored when ``fit_intercept`` is set to False.\r\n If True, the regressors X will be normalized before regression by\r\n subtracting the mean and dividing by the l2-norm.\r\n If you wish to standardize, please use\r\n :class:`~sklearn.preprocessing.StandardScaler` before calling ``fit``\r\n on an estimator with ``normalize=False``.\r\n\r\n .. deprecated:: 1.0\r\n ``normalize`` was deprecated in version 1.0 and will be removed in\r\n 1.2.\r\n\r\n copy_X : bool, default=True\r\n If ``True``, X will be copied; else, it may be overwritten.\r\n \"\"\"\r\n if l1_ratio == 0:\r\n raise ValueError(\r\n \"Automatic alpha grid generation is not supported for\"\r\n \" l1_ratio=0. Please supply a grid by providing \"\r\n \"your estimator with the appropriate `alphas=` \"\r\n \"argument.\"\r\n )\r\n n_samples = len(y)\r\n\r\n sparse_center = False\r\n if Xy is None:\r\n X_sparse = sparse.isspmatrix(X)\r\n sparse_center = X_sparse and (fit_intercept or normalize)\r\n X = check_array(\r\n X, accept_sparse=\"csc\", copy=(copy_X and fit_intercept and not X_sparse)\r\n )\r\n if not X_sparse:\r\n # X can be touched inplace thanks to the above line\r\n X, y, _, _, _ = _preprocess_data(X, y, fit_intercept, normalize, copy=False)\r\n Xy = safe_sparse_dot(X.T, y, dense_output=True)\r\n\r\n if sparse_center:\r\n # Workaround to find alpha_max for sparse matrices.\r\n # since we should not destroy the sparsity of such matrices.\r\n _, _, X_offset, _, X_scale = _preprocess_data(\r\n X, y, fit_intercept, normalize, return_mean=True\r\n )\r\n mean_dot = X_offset * np.sum(y)\r\n\r\n if Xy.ndim == 1:\r\n Xy = Xy[:, np.newaxis]\r\n\r\n if sparse_center:\r\n if fit_intercept:\r\n Xy -= mean_dot[:, np.newaxis]\r\n if normalize:\r\n Xy /= X_scale[:, np.newaxis]\r\n\r\n alpha_max = np.sqrt(np.sum(Xy ** 2, axis=1)).max() / (n_samples * l1_ratio)\r\n\r\n if alpha_max <= np.finfo(float).resolution:\r\n alphas = np.empty(n_alphas)\r\n alphas.fill(np.finfo(float).resolution)\r\n return alphas\r\n\r\n return np.logspace(np.log10(alpha_max * eps), np.log10(alpha_max), num=n_alphas)[\r\n ::-1\r\n ]\r\n\r\n\r\ndef lasso_path(\r\n X,\r\n y,\r\n *,\r\n eps=1e-3,\r\n n_alphas=100,\r\n alphas=None,\r\n precompute=\"auto\",\r\n Xy=None,\r\n copy_X=True,\r\n coef_init=None,\r\n verbose=False,\r\n return_n_iter=False,\r\n positive=False,\r\n **params,\r\n):\r\n \"\"\"Compute Lasso path with coordinate descent.\r\n\r\n The Lasso optimization function varies for mono and multi-outputs.\r\n\r\n For mono-output tasks it is::\r\n\r\n (1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1\r\n\r\n For multi-output tasks it is::\r\n\r\n (1 / (2 * n_samples)) * ||Y - XW||^2_Fro + alpha * ||W||_21\r\n\r\n Where::\r\n\r\n ||W||_21 = \\\\sum_i \\\\sqrt{\\\\sum_j w_{ij}^2}\r\n\r\n i.e. the sum of norm of each row.\r\n\r\n Read more in the :ref:`User Guide <lasso>`.\r\n\r\n Parameters\r\n ----------\r\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\r\n Training data. Pass directly as Fortran-contiguous data to avoid\r\n unnecessary memory duplication. If ``y`` is mono-output then ``X``\r\n can be sparse.\r\n\r\n y : {array-like, sparse matrix} of shape (n_samples,) or \\\r\n (n_samples, n_targets)\r\n Target values.\r\n\r\n eps : float, default=1e-3\r\n Length of the path. ``eps=1e-3`` means that\r\n ``alpha_min / alpha_max = 1e-3``.\r\n\r\n n_alphas : int, default=100\r\n Number of alphas along the regularization path.\r\n\r\n alphas : ndarray, default=None\r\n List of alphas where to compute the models.\r\n If ``None`` alphas are set automatically.\r\n\r\n precompute : 'auto', bool or array-like of shape \\\r\n (n_features, n_features), default='auto'\r\n Whether to use a precomputed Gram matrix to speed up\r\n calculations. If set to ``'auto'`` let us decide. The Gram\r\n matrix can also be passed as argument.\r\n\r\n Xy : array-like of shape (n_features,) or (n_features, n_targets),\\\r\n default=None\r\n Xy = np.dot(X.T, y) that can be precomputed. It is useful\r\n only when the Gram matrix is precomputed.\r\n\r\n copy_X : bool, default=True\r\n If ``True``, X will be copied; else, it may be overwritten.\r\n\r\n coef_init : ndarray of shape (n_features, ), default=None\r\n The initial values of the coefficients.\r\n\r\n verbose : bool or int, default=False\r\n Amount of verbosity.\r\n\r\n return_n_iter : bool, default=False\r\n Whether to return the number of iterations or not.\r\n\r\n positive : bool, default=False\r\n If set to True, forces coefficients to be positive.\r\n (Only allowed when ``y.ndim == 1``).\r\n\r\n **params : kwargs\r\n Keyword arguments passed to the coordinate descent solver.\r\n\r\n Returns\r\n -------\r\n alphas : ndarray of shape (n_alphas,)\r\n The alphas along the path where models are computed.\r\n\r\n coefs : ndarray of shape (n_features, n_alphas) or \\\r\n (n_targets, n_features, n_alphas)\r\n Coefficients along the path.\r\n\r\n dual_gaps : ndarray of shape (n_alphas,)\r\n The dual gaps at the end of the optimization for each alpha.\r\n\r\n n_iters : list of int\r\n The number of iterations taken by the coordinate descent optimizer to\r\n reach the specified tolerance for each alpha.\r\n\r\n See Also\r\n --------\r\n lars_path : Compute Least Angle Regression or Lasso path using LARS\r\n algorithm.\r\n Lasso : The Lasso is a linear model that estimates sparse coefficients.\r\n LassoLars : Lasso model fit with Least Angle Regression a.k.a. Lars.\r\n LassoCV : Lasso linear model with iterative fitting along a regularization\r\n path.\r\n LassoLarsCV : Cross-validated Lasso using the LARS algorithm.\r\n sklearn.decomposition.sparse_encode : Estimator that can be used to\r\n transform signals into sparse linear combination of atoms from a fixed.\r\n\r\n Notes\r\n -----\r\n For an example, see\r\n :ref:`examples/linear_model/plot_lasso_coordinate_descent_path.py\r\n <sphx_glr_auto_examples_linear_model_plot_lasso_coordinate_descent_path.py>`.\r\n\r\n To avoid unnecessary memory duplication the X argument of the fit method\r\n should be directly passed as a Fortran-contiguous numpy array.\r\n\r\n Note that in certain cases, the Lars solver may be significantly\r\n faster to implement this functionality. In particular, linear\r\n interpolation can be used to retrieve model coefficients between the\r\n values output by lars_path\r\n\r\n Examples\r\n --------\r\n\r\n Comparing lasso_path and lars_path with interpolation:\r\n\r\n >>> import numpy as np\r\n >>> from sklearn.linear_model import lasso_path\r\n >>> X = np.array([[1, 2, 3.1], [2.3, 5.4, 4.3]]).T\r\n >>> y = np.array([1, 2, 3.1])\r\n >>> # Use lasso_path to compute a coefficient path\r\n >>> _, coef_path, _ = lasso_path(X, y, alphas=[5., 1., .5])\r\n >>> print(coef_path)\r\n [[0. 0. 0.46874778]\r\n [0.2159048 0.4425765 0.23689075]]\r\n\r\n >>> # Now use lars_path and 1D linear interpolation to compute the\r\n >>> # same path\r\n >>> from sklearn.linear_model import lars_path\r\n >>> alphas, active, coef_path_lars = lars_path(X, y, method='lasso')\r\n >>> from scipy import interpolate\r\n >>> coef_path_continuous = interpolate.interp1d(alphas[::-1],\r\n ... coef_path_lars[:, ::-1])\r\n >>> print(coef_path_continuous([5., 1., .5]))\r\n [[0. 0. 0.46915237]\r\n [0.2159048 0.4425765 0.23668876]]\r\n \"\"\"\r\n return enet_path(\r\n X,\r\n y,\r\n l1_ratio=1.0,\r\n eps=eps,\r\n n_alphas=n_alphas,\r\n alphas=alphas,\r\n precompute=precompute,\r\n Xy=Xy,\r\n copy_X=copy_X,\r\n coef_init=coef_init,\r\n verbose=verbose,\r\n positive=positive,\r\n return_n_iter=return_n_iter,\r\n **params,\r\n )\r\n\r\n\r\ndef enet_path(\r\n X,\r\n y,\r\n *,\r\n l1_ratio=0.5,\r\n eps=1e-3,\r\n n_alphas=100,\r\n alphas=None,\r\n precompute=\"auto\",\r\n Xy=None,\r\n copy_X=True,\r\n coef_init=None,\r\n verbose=False,\r\n return_n_iter=False,\r\n positive=False,\r\n check_input=True,\r\n **params,\r\n):\r\n \"\"\"Compute elastic net path with coordinate descent.\r\n\r\n The elastic net optimization function varies for mono and multi-outputs.\r\n\r\n For mono-output tasks it is::\r\n\r\n 1 / (2 * n_samples) * ||y - Xw||^2_2\r\n + alpha * l1_ratio * ||w||_1\r\n + 0.5 * alpha * (1 - l1_ratio) * ||w||^2_2\r\n\r\n For multi-output tasks it is::\r\n\r\n (1 / (2 * n_samples)) * ||Y - XW||_Fro^2\r\n + alpha * l1_ratio * ||W||_21\r\n + 0.5 * alpha * (1 - l1_ratio) * ||W||_Fro^2\r\n\r\n Where::\r\n\r\n ||W||_21 = \\\\sum_i \\\\sqrt{\\\\sum_j w_{ij}^2}\r\n\r\n i.e. the sum of norm of each row.\r\n\r\n Read more in the :ref:`User Guide <elastic_net>`.\r\n\r\n Parameters\r\n ----------\r\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\r\n Training data. Pass directly as Fortran-contiguous data to avoid\r\n unnecessary memory duplication. If ``y`` is mono-output then ``X``\r\n can be sparse.\r\n\r\n y : {array-like, sparse matrix} of shape (n_samples,) or \\\r\n (n_samples, n_targets)\r\n Target values.\r\n\r\n l1_ratio : float, default=0.5\r\n Number between 0 and 1 passed to elastic net (scaling between\r\n l1 and l2 penalties). ``l1_ratio=1`` corresponds to the Lasso.\r\n\r\n eps : float, default=1e-3\r\n Length of the path. ``eps=1e-3`` means that\r\n ``alpha_min / alpha_max = 1e-3``.\r\n\r\n n_alphas : int, default=100\r\n Number of alphas along the regularization path.\r\n\r\n alphas : ndarray, default=None\r\n List of alphas where to compute the models.\r\n If None alphas are set automatically.\r\n\r\n precompute : 'auto', bool or array-like of shape \\\r\n (n_features, n_features), default='auto'\r\n Whether to use a precomputed Gram matrix to speed up\r\n calculations. If set to ``'auto'`` let us decide. The Gram\r\n matrix can also be passed as argument.\r\n\r\n Xy : array-like of shape (n_features,) or (n_features, n_targets),\\\r\n default=None\r\n Xy = np.dot(X.T, y) that can be precomputed. It is useful\r\n only when the Gram matrix is precomputed.\r\n\r\n copy_X : bool, default=True\r\n If ``True``, X will be copied; else, it may be overwritten.\r\n\r\n coef_init : ndarray of shape (n_features, ), default=None\r\n The initial values of the coefficients.\r\n\r\n verbose : bool or int, default=False\r\n Amount of verbosity.\r\n\r\n return_n_iter : bool, default=False\r\n Whether to return the number of iterations or not.\r\n\r\n positive : bool, default=False\r\n If set to True, forces coefficients to be positive.\r\n (Only allowed when ``y.ndim == 1``).\r\n\r\n check_input : bool, default=True\r\n If set to False, the input validation checks are skipped (including the\r\n Gram matrix when provided). It is assumed that they are handled\r\n by the caller.\r\n\r\n **params : kwargs\r\n Keyword arguments passed to the coordinate descent solver.\r\n\r\n Returns\r\n -------\r\n alphas : ndarray of shape (n_alphas,)\r\n The alphas along the path where models are computed.\r\n\r\n coefs : ndarray of shape (n_features, n_alphas) or \\\r\n (n_targets, n_features, n_alphas)\r\n Coefficients along the path.\r\n\r\n dual_gaps : ndarray of shape (n_alphas,)\r\n The dual gaps at the end of the optimization for each alpha.\r\n\r\n n_iters : list of int\r\n The number of iterations taken by the coordinate descent optimizer to\r\n reach the specified tolerance for each alpha.\r\n (Is returned when ``return_n_iter`` is set to True).\r\n\r\n See Also\r\n --------\r\n MultiTaskElasticNet : Multi-task ElasticNet model trained with L1/L2 mixed-norm \\\r\n as regularizer.\r\n MultiTaskElasticNetCV : Multi-task L1/L2 ElasticNet with built-in cross-validation.\r\n ElasticNet : Linear regression with combined L1 and L2 priors as regularizer.\r\n ElasticNetCV : Elastic Net model with iterative fitting along a regularization path.\r\n\r\n Notes\r\n -----\r\n For an example, see\r\n :ref:`examples/linear_model/plot_lasso_coordinate_descent_path.py\r\n <sphx_glr_auto_examples_linear_model_plot_lasso_coordinate_descent_path.py>`.\r\n \"\"\"\r\n X_offset_param = params.pop(\"X_offset\", None)\r\n X_scale_param = params.pop(\"X_scale\", None)\r\n tol = params.pop(\"tol\", 1e-4)\r\n max_iter = params.pop(\"max_iter\", 1000)\r\n random_state = params.pop(\"random_state\", None)\r\n selection = params.pop(\"selection\", \"cyclic\")\r\n\r\n if len(params) > 0:\r\n raise ValueError(\"Unexpected parameters in params\", params.keys())\r\n\r\n # We expect X and y to be already Fortran ordered when bypassing\r\n # checks\r\n if check_input:\r\n X = check_array(\r\n X,\r\n accept_sparse=\"csc\",\r\n dtype=[np.float64, np.float32],\r\n order=\"F\",\r\n copy=copy_X,\r\n )\r\n y = check_array(\r\n y,\r\n accept_sparse=\"csc\",\r\n dtype=X.dtype.type,\r\n order=\"F\",\r\n copy=False,\r\n ensure_2d=False,\r\n )\r\n if Xy is not None:\r\n # Xy should be a 1d contiguous array or a 2D C ordered array\r\n Xy = check_array(\r\n Xy, dtype=X.dtype.type, order=\"C\", copy=False, ensure_2d=False\r\n )\r\n\r\n n_samples, n_features = X.shape\r\n\r\n multi_output = False\r\n if y.ndim != 1:\r\n multi_output = True\r\n n_targets = y.shape[1]\r\n\r\n if multi_output and positive:\r\n raise ValueError(\"positive=True is not allowed for multi-output (y.ndim != 1)\")\r\n\r\n # MultiTaskElasticNet does not support sparse matrices\r\n if not multi_output and sparse.isspmatrix(X):\r\n if X_offset_param is not None:\r\n # As sparse matrices are not actually centered we need this\r\n # to be passed to the CD solver.\r\n X_sparse_scaling = X_offset_param / X_scale_param\r\n X_sparse_scaling = np.asarray(X_sparse_scaling, dtype=X.dtype)\r\n else:\r\n X_sparse_scaling = np.zeros(n_features, dtype=X.dtype)\r\n\r\n # X should be normalized and fit already if function is called\r\n # from ElasticNet.fit\r\n if check_input:\r\n X, y, X_offset, y_offset, X_scale, precompute, Xy = _pre_fit(\r\n X,\r\n y,\r\n Xy,\r\n precompute,\r\n normalize=False,\r\n fit_intercept=False,\r\n copy=False,\r\n check_input=check_input,\r\n )\r\n if alphas is None:\r\n # No need to normalize of fit_intercept: it has been done\r\n # above\r\n alphas = _alpha_grid(\r\n X,\r\n y,\r\n Xy=Xy,\r\n l1_ratio=l1_ratio,\r\n fit_intercept=False,\r\n eps=eps,\r\n n_alphas=n_alphas,\r\n normalize=False,\r\n copy_X=False,\r\n )\r\n else:\r\n alphas = np.sort(alphas)[::-1] # make sure alphas are properly ordered\r\n\r\n n_alphas = len(alphas)\r\n dual_gaps = np.empty(n_alphas)\r\n n_iters = []\r\n\r\n rng = check_random_state(random_state)\r\n if selection not in [\"random\", \"cyclic\"]:\r\n raise ValueError(\"selection should be either random or cyclic.\")\r\n random = selection == \"random\"\r\n\r\n if not multi_output:\r\n coefs = np.empty((n_features, n_alphas), dtype=X.dtype)\r\n else:\r\n coefs = np.empty((n_targets, n_features, n_alphas), dtype=X.dtype)\r\n\r\n if coef_init is None:\r\n coef_ = np.zeros(coefs.shape[:-1], dtype=X.dtype, order=\"F\")\r\n else:\r\n coef_ = np.asfortranarray(coef_init, dtype=X.dtype)\r\n\r\n for i, alpha in enumerate(alphas):\r\n # account for n_samples scaling in objectives between here and cd_fast\r\n l1_reg = alpha * l1_ratio * n_samples\r\n l2_reg = alpha * (1.0 - l1_ratio) * n_samples\r\n if not multi_output and sparse.isspmatrix(X):\r\n model = cd_fast.sparse_enet_coordinate_descent(\r\n coef_,\r\n l1_reg,\r\n l2_reg,\r\n X.data,\r\n X.indices,\r\n X.indptr,\r\n y,\r\n X_sparse_scaling,\r\n max_iter,\r\n tol,\r\n rng,\r\n random,\r\n positive,\r\n )\r\n elif multi_output:\r\n model = cd_fast.enet_coordinate_descent_multi_task(\r\n coef_, l1_reg, l2_reg, X, y, max_iter, tol, rng, random\r\n )\r\n elif isinstance(precompute, np.ndarray):\r\n # We expect precompute to be already Fortran ordered when bypassing\r\n # checks\r\n if check_input:\r\n precompute = check_array(precompute, dtype=X.dtype.type, order=\"C\")\r\n model = cd_fast.enet_coordinate_descent_gram(\r\n coef_,\r\n l1_reg,\r\n l2_reg,\r\n precompute,\r\n Xy,\r\n y,\r\n max_iter,\r\n tol,\r\n rng,\r\n random,\r\n positive,\r\n )\r\n elif precompute is False:\r\n model = cd_fast.enet_coordinate_descent(\r\n coef_, l1_reg, l2_reg, X, y, max_iter, tol, rng, random, positive\r\n )\r\n else:\r\n raise ValueError(\r\n \"Precompute should be one of True, False, 'auto' or array-like. Got %r\"\r\n % precompute\r\n )\r\n coef_, dual_gap_, eps_, n_iter_ = model\r\n coefs[..., i] = coef_\r\n # we correct the scale of the returned dual gap, as the objective\r\n # in cd_fast is n_samples * the objective in this docstring.\r\n dual_gaps[i] = dual_gap_ / n_samples\r\n n_iters.append(n_iter_)\r\n\r\n if verbose:\r\n if verbose > 2:\r\n print(model)\r\n elif verbose > 1:\r\n print(\"Path: %03i out of %03i\" % (i, n_alphas))\r\n else:\r\n sys.stderr.write(\".\")\r\n\r\n if return_n_iter:\r\n return alphas, coefs, dual_gaps, n_iters\r\n return alphas, coefs, dual_gaps\r\n\r\n\r\n###############################################################################\r\n# ElasticNet model\r\n\r\n\r\nclass ElasticNet(MultiOutputMixin, RegressorMixin, LinearModel):\r\n \"\"\"Linear regression with combined L1 and L2 priors as regularizer.\r\n\r\n Minimizes the objective function::\r\n\r\n 1 / (2 * n_samples) * ||y - Xw||^2_2\r\n + alpha * l1_ratio * ||w||_1\r\n + 0.5 * alpha * (1 - l1_ratio) * ||w||^2_2\r\n\r\n If you are interested in controlling the L1 and L2 penalty\r\n separately, keep in mind that this is equivalent to::\r\n\r\n a * ||w||_1 + 0.5 * b * ||w||_2^2\r\n\r\n where::\r\n\r\n alpha = a + b and l1_ratio = a / (a + b)\r\n\r\n The parameter l1_ratio corresponds to alpha in the glmnet R package while\r\n alpha corresponds to the lambda parameter in glmnet. Specifically, l1_ratio\r\n = 1 is the lasso penalty. Currently, l1_ratio <= 0.01 is not reliable,\r\n unless you supply your own sequence of alpha.\r\n\r\n Read more in the :ref:`User Guide <elastic_net>`.\r\n\r\n Parameters\r\n ----------\r\n alpha : float, default=1.0\r\n Constant that multiplies the penalty terms. Defaults to 1.0.\r\n See the notes for the exact mathematical meaning of this\r\n parameter. ``alpha = 0`` is equivalent to an ordinary least square,\r\n solved by the :class:`LinearRegression` object. For numerical\r\n reasons, using ``alpha = 0`` with the ``Lasso`` object is not advised.\r\n Given this, you should use the :class:`LinearRegression` object.\r\n\r\n l1_ratio : float, default=0.5\r\n The ElasticNet mixing parameter, with ``0 <= l1_ratio <= 1``. For\r\n ``l1_ratio = 0`` the penalty is an L2 penalty. ``For l1_ratio = 1`` it\r\n is an L1 penalty. For ``0 < l1_ratio < 1``, the penalty is a\r\n combination of L1 and L2.\r\n\r\n fit_intercept : bool, default=True\r\n Whether the intercept should be estimated or not. If ``False``, the\r\n data is assumed to be already centered.\r\n\r\n normalize : bool, default=False\r\n This parameter is ignored when ``fit_intercept`` is set to False.\r\n If True, the regressors X will be normalized before regression by\r\n subtracting the mean and dividing by the l2-norm.\r\n If you wish to standardize, please use\r\n :class:`~sklearn.preprocessing.StandardScaler` before calling ``fit``\r\n on an estimator with ``normalize=False``.\r\n\r\n .. deprecated:: 1.0\r\n ``normalize`` was deprecated in version 1.0 and will be removed in\r\n 1.2.\r\n\r\n precompute : bool or array-like of shape (n_features, n_features),\\\r\n default=False\r\n Whether to use a precomputed Gram matrix to speed up\r\n calculations. The Gram matrix can also be passed as argument.\r\n For sparse input this option is always ``False`` to preserve sparsity.\r\n\r\n max_iter : int, default=1000\r\n The maximum number of iterations.\r\n\r\n copy_X : bool, default=True\r\n If ``True``, X will be copied; else, it may be overwritten.\r\n\r\n tol : float, default=1e-4\r\n The tolerance for the optimization: if the updates are\r\n smaller than ``tol``, the optimization code checks the\r\n dual gap for optimality and continues until it is smaller\r\n than ``tol``.\r\n\r\n warm_start : bool, default=False\r\n When set to ``True``, reuse the solution of the previous call to fit as\r\n initialization, otherwise, just erase the previous solution.\r\n See :term:`the Glossary <warm_start>`.\r\n\r\n positive : bool, default=False\r\n When set to ``True``, forces the coefficients to be positive.\r\n\r\n random_state : int, RandomState instance, default=None\r\n The seed of the pseudo random number generator that selects a random\r\n feature to update. Used when ``selection`` == 'random'.\r\n Pass an int for reproducible output across multiple function calls.\r\n See :term:`Glossary <random_state>`.\r\n\r\n selection : {'cyclic', 'random'}, default='cyclic'\r\n If set to 'random', a random coefficient is updated every iteration\r\n rather than looping over features sequentially by default. This\r\n (setting to 'random') often leads to significantly faster convergence\r\n especially when tol is higher than 1e-4.\r\n\r\n Attributes\r\n ----------\r\n coef_ : ndarray of shape (n_features,) or (n_targets, n_features)\r\n Parameter vector (w in the cost function formula).\r\n\r\n sparse_coef_ : sparse matrix of shape (n_features,) or \\\r\n (n_targets, n_features)\r\n Sparse representation of the `coef_`.\r\n\r\n intercept_ : float or ndarray of shape (n_targets,)\r\n Independent term in decision function.\r\n\r\n n_iter_ : list of int\r\n Number of iterations run by the coordinate descent solver to reach\r\n the specified tolerance.\r\n\r\n dual_gap_ : float or ndarray of shape (n_targets,)\r\n Given param alpha, the dual gaps at the end of the optimization,\r\n same shape as each observation of y.\r\n\r\n n_features_in_ : int\r\n Number of features seen during :term:`fit`.\r\n\r\n .. versionadded:: 0.24\r\n\r\n feature_names_in_ : ndarray of shape (`n_features_in_`,)\r\n Names of features seen during :term:`fit`. Defined only when `X`\r\n has feature names that are all strings.\r\n\r\n .. versionadded:: 1.0\r\n\r\n See Also\r\n --------\r\n ElasticNetCV : Elastic net model with best model selection by\r\n cross-validation.\r\n SGDRegressor : Implements elastic net regression with incremental training.\r\n SGDClassifier : Implements logistic regression with elastic net penalty\r\n (``SGDClassifier(loss=\"log\", penalty=\"elasticnet\")``).\r\n\r\n Notes\r\n -----\r\n To avoid unnecessary memory duplication the X argument of the fit method\r\n should be directly passed as a Fortran-contiguous numpy array.\r\n\r\n Examples\r\n --------\r\n >>> from sklearn.linear_model import ElasticNet\r\n >>> from sklearn.datasets import make_regression\r\n\r\n >>> X, y = make_regression(n_features=2, random_state=0)\r\n >>> regr = ElasticNet(random_state=0)\r\n >>> regr.fit(X, y)\r\n ElasticNet(random_state=0)\r\n >>> print(regr.coef_)\r\n [18.83816048 64.55968825]\r\n >>> print(regr.intercept_)\r\n 1.451...\r\n >>> print(regr.predict([[0, 0]]))\r\n [1.451...]\r\n \"\"\"\r\n\r\n path = staticmethod(enet_path)\r\n\r\n def __init__(\r\n self,\r\n alpha=1.0,\r\n *,\r\n l1_ratio=0.5,\r\n fit_intercept=True,\r\n normalize=\"deprecated\",\r\n precompute=False,\r\n max_iter=1000,\r\n copy_X=True,\r\n tol=1e-4,\r\n warm_start=False,\r\n positive=False,\r\n random_state=None,\r\n selection=\"cyclic\",\r\n ):\r\n self.alpha = alpha\r\n self.l1_ratio = l1_ratio\r\n self.fit_intercept = fit_intercept\r\n self.normalize = normalize\r\n self.precompute = precompute\r\n self.max_iter = max_iter\r\n self.copy_X = copy_X\r\n self.tol = tol\r\n self.warm_start = warm_start\r\n self.positive = positive\r\n self.random_state = random_state\r\n self.selection = selection\r\n\r\n def fit(self, X, y, sample_weight=None, check_input=True):\r\n \"\"\"Fit model with coordinate descent.\r\n\r\n Parameters\r\n ----------\r\n X : {ndarray, sparse matrix} of (n_samples, n_features)\r\n Data.\r\n\r\n y : {ndarray, sparse matrix} of shape (n_samples,) or \\\r\n (n_samples, n_targets)\r\n Target. Will be cast to X's dtype if necessary.\r\n\r\n sample_weight : float or array-like of shape (n_samples,), default=None\r\n Sample weights. Internally, the `sample_weight` vector will be\r\n rescaled to sum to `n_samples`.\r\n\r\n .. versionadded:: 0.23\r\n\r\n check_input : bool, default=True\r\n Allow to bypass several input checking.\r\n Don't use this parameter unless you know what you do.\r\n\r\n Returns\r\n -------\r\n self : object\r\n Fitted estimator.\r\n\r\n Notes\r\n -----\r\n Coordinate descent is an algorithm that considers each column of\r\n data at a time hence it will automatically convert the X input\r\n as a Fortran-contiguous numpy array if necessary.\r\n\r\n To avoid memory re-allocation it is advised to allocate the\r\n initial data in memory directly using that format.\r\n \"\"\"\r\n _normalize = _deprecate_normalize(\r\n self.normalize, default=False, estimator_name=self.__class__.__name__\r\n )\r\n\r\n if self.alpha == 0:\r\n warnings.warn(\r\n \"With alpha=0, this algorithm does not converge \"\r\n \"well. You are advised to use the LinearRegression \"\r\n \"estimator\",\r\n stacklevel=2,\r\n )\r\n\r\n if isinstance(self.precompute, str):\r\n raise ValueError(\r\n \"precompute should be one of True, False or array-like. Got %r\"\r\n % self.precompute\r\n )\r\n\r\n if (\r\n not isinstance(self.l1_ratio, numbers.Number)\r\n or self.l1_ratio < 0\r\n or self.l1_ratio > 1\r\n ):\r\n raise ValueError(\r\n f\"l1_ratio must be between 0 and 1; got l1_ratio={self.l1_ratio}\"\r\n )\r\n\r\n # Remember if X is copied\r\n X_copied = False\r\n # We expect X and y to be float64 or float32 Fortran ordered arrays\r\n # when bypassing checks\r\n if check_input:\r\n X_copied = self.copy_X and self.fit_intercept\r\n X, y = self._validate_data(\r\n X,\r\n y,\r\n accept_sparse=\"csc\",\r\n order=\"F\",\r\n dtype=[np.float64, np.float32],\r\n copy=X_copied,\r\n multi_output=True,\r\n y_numeric=True,\r\n )\r\n y = check_array(\r\n y, order=\"F\", copy=False, dtype=X.dtype.type, ensure_2d=False\r\n )\r\n\r\n n_samples, n_features = X.shape\r\n alpha = self.alpha\r\n\r\n if isinstance(sample_weight, numbers.Number):\r\n sample_weight = None\r\n if sample_weight is not None:\r\n if check_input:\r\n if sparse.issparse(X):\r\n raise ValueError(\r\n \"Sample weights do not (yet) support sparse matrices.\"\r\n )\r\n sample_weight = _check_sample_weight(sample_weight, X, dtype=X.dtype)\r\n # TLDR: Rescale sw to sum up to n_samples.\r\n # Long: The objective function of Enet\r\n #\r\n # 1/2 * np.average(squared error, weights=sw)\r\n # + alpha * penalty (1)\r\n #\r\n # is invariant under rescaling of sw.\r\n # But enet_path coordinate descent minimizes\r\n #\r\n # 1/2 * sum(squared error) + alpha * penalty\r\n #\r\n # and therefore sets\r\n #\r\n # alpha = n_samples * alpha\r\n #\r\n # inside its function body, which results in an objective\r\n # equivalent to (1) without sw.\r\n # With sw, however, enet_path should set\r\n #\r\n # alpha = sum(sw) * alpha (2)\r\n #\r\n # Therefore, using the freedom of Eq. (1) to rescale alpha before\r\n # calling enet_path, we do\r\n #\r\n # alpha = sum(sw) / n_samples * alpha\r\n #\r\n # such that the rescaling inside enet_path is exactly Eq. (2)\r\n # because now sum(sw) = n_samples.\r\n sample_weight = sample_weight * (n_samples / np.sum(sample_weight))\r\n # Note: Alternatively, we could also have rescaled alpha instead\r\n # of sample_weight:\r\n #\r\n # alpha *= np.sum(sample_weight) / n_samples\r\n\r\n # Ensure copying happens only once, don't do it again if done above.\r\n # X and y will be rescaled if sample_weight is not None, order='F'\r\n # ensures that the returned X and y are still F-contiguous.\r\n should_copy = self.copy_X and not X_copied\r\n X, y, X_offset, y_offset, X_scale, precompute, Xy = _pre_fit(\r\n X,\r\n y,\r\n None,\r\n self.precompute,\r\n _normalize,\r\n self.fit_intercept,\r\n copy=should_copy,\r\n check_input=check_input,\r\n sample_weight=sample_weight,\r\n )\r\n # coordinate descent needs F-ordered arrays and _pre_fit might have\r\n # called _rescale_data\r\n if check_input or sample_weight is not None:\r\n X, y = _set_order(X, y, order=\"F\")\r\n if y.ndim == 1:\r\n y = y[:, np.newaxis]\r\n if Xy is not None and Xy.ndim == 1:\r\n Xy = Xy[:, np.newaxis]\r\n\r\n n_targets = y.shape[1]\r\n\r\n if self.selection not in [\"cyclic\", \"random\"]:\r\n raise ValueError(\"selection should be either random or cyclic.\")\r\n\r\n if not self.warm_start or not hasattr(self, \"coef_\"):\r\n coef_ = np.zeros((n_targets, n_features), dtype=X.dtype, order=\"F\")\r\n else:\r\n coef_ = self.coef_\r\n if coef_.ndim == 1:\r\n coef_ = coef_[np.newaxis, :]\r\n\r\n dual_gaps_ = np.zeros(n_targets, dtype=X.dtype)\r\n self.n_iter_ = []\r\n\r\n for k in range(n_targets):\r\n if Xy is not None:\r\n this_Xy = Xy[:, k]\r\n else:\r\n this_Xy = None\r\n _, this_coef, this_dual_gap, this_iter = self.path(\r\n X,\r\n y[:, k],\r\n l1_ratio=self.l1_ratio,\r\n eps=None,\r\n n_alphas=None,\r\n alphas=[alpha],\r\n precompute=precompute,\r\n Xy=this_Xy,\r\n copy_X=True,\r\n verbose=False,\r\n tol=self.tol,\r\n positive=self.positive,\r\n X_offset=X_offset,\r\n X_scale=X_scale,\r\n return_n_iter=True,\r\n coef_init=coef_[k],\r\n max_iter=self.max_iter,\r\n random_state=self.random_state,\r\n selection=self.selection,\r\n check_input=False,\r\n )\r\n coef_[k] = this_coef[:, 0]\r\n dual_gaps_[k] = this_dual_gap[0]\r\n self.n_iter_.append(this_iter[0])\r\n\r\n if n_targets == 1:\r\n self.n_iter_ = self.n_iter_[0]\r\n self.coef_ = coef_[0]\r\n self.dual_gap_ = dual_gaps_[0]\r\n else:\r\n self.coef_ = coef_\r\n self.dual_gap_ = dual_gaps_\r\n\r\n self._set_intercept(X_offset, y_offset, X_scale)\r\n\r\n # workaround since _set_intercept will cast self.coef_ into X.dtype\r\n self.coef_ = np.asarray(self.coef_, dtype=X.dtype)\r\n\r\n # return self for chaining fit and predict calls\r\n return self\r\n\r\n @property\r\n def sparse_coef_(self):\r\n \"\"\"Sparse representation of the fitted `coef_`.\"\"\"\r\n return sparse.csr_matrix(self.coef_)\r\n\r\n def _decision_function(self, X):\r\n \"\"\"Decision function of the linear model.\r\n\r\n Parameters\r\n ----------\r\n X : numpy array or scipy.sparse matrix of shape (n_samples, n_features)\r\n\r\n Returns\r\n -------\r\n T : ndarray of shape (n_samples,)\r\n The predicted decision function.\r\n \"\"\"\r\n check_is_fitted(self)\r\n if sparse.isspmatrix(X):\r\n return safe_sparse_dot(X, self.coef_.T, dense_output=True) + self.intercept_\r\n else:\r\n return super()._decision_function(X)\r\n\r\n\r\n###############################################################################\r\n# Lasso model\r\n\r\n\r\nclass Lasso(ElasticNet):\r\n \"\"\"Linear Model trained with L1 prior as regularizer (aka the Lasso).\r\n\r\n The optimization objective for Lasso is::\r\n\r\n (1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1\r\n\r\n Technically the Lasso model is optimizing the same objective function as\r\n the Elastic Net with ``l1_ratio=1.0`` (no L2 penalty).\r\n\r\n Read more in the :ref:`User Guide <lasso>`.\r\n\r\n Parameters\r\n ----------\r\n alpha : float, default=1.0\r\n Constant that multiplies the L1 term. Defaults to 1.0.\r\n ``alpha = 0`` is equivalent to an ordinary least square, solved\r\n by the :class:`LinearRegression` object. For numerical\r\n reasons, using ``alpha = 0`` with the ``Lasso`` object is not advised.\r\n Given this, you should use the :class:`LinearRegression` object.\r\n\r\n fit_intercept : bool, default=True\r\n Whether to calculate the intercept for this model. If set\r\n to False, no intercept will be used in calculations\r\n (i.e. data is expected to be centered).\r\n\r\n normalize : bool, default=False\r\n This parameter is ignored when ``fit_intercept`` is set to False.\r\n If True, the regressors X will be normalized before regression by\r\n subtracting the mean and dividing by the l2-norm.\r\n If you wish to standardize, please use\r\n :class:`~sklearn.preprocessing.StandardScaler` before calling ``fit``\r\n on an estimator with ``normalize=False``.\r\n\r\n .. deprecated:: 1.0\r\n ``normalize`` was deprecated in version 1.0 and will be removed in\r\n 1.2.\r\n\r\n precompute : 'auto', bool or array-like of shape (n_features, n_features),\\\r\n precompute : bool or array-like of shape (n_features, n_features),\\\r\n default=False\r\n Whether to use a precomputed Gram matrix to speed up\r\n calculations. The Gram matrix can also be passed as argument.\r\n For sparse input this option is always ``False`` to preserve sparsity.\r\n\r\n copy_X : bool, default=True\r\n If ``True``, X will be copied; else, it may be overwritten.\r\n\r\n max_iter : int, default=1000\r\n The maximum number of iterations.\r\n\r\n tol : float, default=1e-4\r\n The tolerance for the optimization: if the updates are\r\n smaller than ``tol``, the optimization code checks the\r\n dual gap for optimality and continues until it is smaller\r\n than ``tol``.\r\n\r\n warm_start : bool, default=False\r\n When set to True, reuse the solution of the previous call to fit as\r\n initialization, otherwise, just erase the previous solution.\r\n See :term:`the Glossary <warm_start>`.\r\n\r\n positive : bool, default=False\r\n When set to ``True``, forces the coefficients to be positive.\r\n\r\n random_state : int, RandomState instance, default=None\r\n The seed of the pseudo random number generator that selects a random\r\n feature to update. Used when ``selection`` == 'random'.\r\n Pass an int for reproducible output across multiple function calls.\r\n See :term:`Glossary <random_state>`.\r\n\r\n selection : {'cyclic', 'random'}, default='cyclic'\r\n If set to 'random', a random coefficient is updated every iteration\r\n rather than looping over features sequentially by default. This\r\n (setting to 'random') often leads to significantly faster convergence\r\n especially when tol is higher than 1e-4.\r\n\r\n Attributes\r\n ----------\r\n coef_ : ndarray of shape (n_features,) or (n_targets, n_features)\r\n Parameter vector (w in the cost function formula).\r\n\r\n dual_gap_ : float or ndarray of shape (n_targets,)\r\n Given param alpha, the dual gaps at the end of the optimization,\r\n same shape as each observation of y.\r\n\r\n sparse_coef_ : sparse matrix of shape (n_features, 1) or \\\r\n (n_targets, n_features)\r\n Readonly property derived from ``coef_``.\r\n\r\n intercept_ : float or ndarray of shape (n_targets,)\r\n Independent term in decision function.\r\n\r\n n_iter_ : int or list of int\r\n Number of iterations run by the coordinate descent solver to reach\r\n the specified tolerance.\r\n\r\n n_features_in_ : int\r\n Number of features seen during :term:`fit`.\r\n\r\n .. versionadded:: 0.24\r\n\r\n feature_names_in_ : ndarray of shape (`n_features_in_`,)\r\n Names of features seen during :term:`fit`. Defined only when `X`\r\n has feature names that are all strings.\r\n\r\n .. versionadded:: 1.0\r\n\r\n See Also\r\n --------\r\n lars_path : Regularization path using LARS.\r\n lasso_path : Regularization path using Lasso.\r\n LassoLars : Lasso Path along the regularization parameter usingLARS algorithm.\r\n LassoCV : Lasso alpha parameter by cross-validation.\r\n LassoLarsCV : Lasso least angle parameter algorithm by cross-validation.\r\n sklearn.decomposition.sparse_encode : Sparse coding array estimator.\r\n\r\n Notes\r\n -----\r\n The algorithm used to fit the model is coordinate descent.\r\n\r\n To avoid unnecessary memory duplication the X argument of the fit method\r\n should be directly passed as a Fortran-contiguous numpy array.\r\n\r\n Examples\r\n --------\r\n >>> from sklearn import linear_model\r\n >>> clf = linear_model.Lasso(alpha=0.1)\r\n >>> clf.fit([[0,0], [1, 1], [2, 2]], [0, 1, 2])\r\n Lasso(alpha=0.1)\r\n >>> print(clf.coef_)\r\n [0.85 0. ]\r\n >>> print(clf.intercept_)\r\n 0.15...\r\n \"\"\"\r\n\r\n path = staticmethod(enet_path)\r\n\r\n def __init__(\r\n self,\r\n alpha=1.0,\r\n *,\r\n fit_intercept=True,\r\n normalize=\"deprecated\",\r\n precompute=False,\r\n copy_X=True,\r\n max_iter=1000,\r\n tol=1e-4,\r\n warm_start=False,\r\n positive=False,\r\n random_state=None,\r\n selection=\"cyclic\",\r\n ):\r\n super().__init__(\r\n alpha=alpha,\r\n l1_ratio=1.0,\r\n fit_intercept=fit_intercept,\r\n normalize=normalize,\r\n precompute=precompute,\r\n copy_X=copy_X,\r\n max_iter=max_iter,\r\n tol=tol,\r\n warm_start=warm_start,\r\n positive=positive,\r\n random_state=random_state,\r\n selection=selection,\r\n )\r\n\r\n\r\n###############################################################################\r\n# Functions for CV with paths functions\r\n\r\n\r\ndef _path_residuals(\r\n X,\r\n y,\r\n sample_weight,\r\n train,\r\n test,\r\n normalize,\r\n fit_intercept,\r\n path,\r\n path_params,\r\n alphas=None,\r\n l1_ratio=1,\r\n X_order=None,\r\n dtype=None,\r\n):\r\n \"\"\"Returns the MSE for the models computed by 'path'.\r\n\r\n Parameters\r\n ----------\r\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\r\n Training data.\r\n\r\n y : array-like of shape (n_samples,) or (n_samples, n_targets)\r\n Target values.\r\n\r\n sample_weight : None or array-like of shape (n_samples,)\r\n Sample weights.\r\n\r\n train : list of indices\r\n The indices of the train set.\r\n\r\n test : list of indices\r\n The indices of the test set.\r\n\r\n path : callable\r\n Function returning a list of models on the path. See\r\n enet_path for an example of signature.\r\n\r\n path_params : dictionary\r\n Parameters passed to the path function.\r\n\r\n alphas : array-like, default=None\r\n Array of float that is used for cross-validation. If not\r\n provided, computed using 'path'.\r\n\r\n l1_ratio : float, default=1\r\n float between 0 and 1 passed to ElasticNet (scaling between\r\n l1 and l2 penalties). For ``l1_ratio = 0`` the penalty is an\r\n L2 penalty. For ``l1_ratio = 1`` it is an L1 penalty. For ``0\r\n < l1_ratio < 1``, the penalty is a combination of L1 and L2.\r\n\r\n X_order : {'F', 'C'}, default=None\r\n The order of the arrays expected by the path function to\r\n avoid memory copies.\r\n\r\n dtype : a numpy dtype, default=None\r\n The dtype of the arrays expected by the path function to\r\n avoid memory copies.\r\n \"\"\"\r\n X_train = X[train]\r\n y_train = y[train]\r\n X_test = X[test]\r\n y_test = y[test]\r\n if sample_weight is None:\r\n sw_train, sw_test = None, None\r\n else:\r\n sw_train = sample_weight[train]\r\n sw_test = sample_weight[test]\r\n n_samples = X_train.shape[0]\r\n # TLDR: Rescale sw_train to sum up to n_samples on the training set.\r\n # See TLDR and long comment inside ElasticNet.fit.\r\n sw_train *= n_samples / np.sum(sw_train)\r\n # Note: Alternatively, we could also have rescaled alpha instead\r\n # of sample_weight:\r\n #\r\n # alpha *= np.sum(sample_weight) / n_samples\r\n\r\n if not sparse.issparse(X):\r\n for array, array_input in (\r\n (X_train, X),\r\n (y_train, y),\r\n (X_test, X),\r\n (y_test, y),\r\n ):\r\n if array.base is not array_input and not array.flags[\"WRITEABLE\"]:\r\n # fancy indexing should create a writable copy but it doesn't\r\n # for read-only memmaps (cf. numpy#14132).\r\n array.setflags(write=True)\r\n\r\n if y.ndim == 1:\r\n precompute = path_params[\"precompute\"]\r\n else:\r\n # No Gram variant of multi-task exists right now.\r\n # Fall back to default enet_multitask\r\n precompute = False\r\n\r\n X_train, y_train, X_offset, y_offset, X_scale, precompute, Xy = _pre_fit(\r\n X_train,\r\n y_train,\r\n None,\r\n precompute,\r\n normalize,\r\n fit_intercept,\r\n copy=False,\r\n sample_weight=sw_train,\r\n )\r\n\r\n path_params = path_params.copy()\r\n path_params[\"Xy\"] = Xy\r\n path_params[\"X_offset\"] = X_offset\r\n path_params[\"X_scale\"] = X_scale\r\n path_params[\"precompute\"] = precompute\r\n path_params[\"copy_X\"] = False\r\n path_params[\"alphas\"] = alphas\r\n\r\n if \"l1_ratio\" in path_params:\r\n path_params[\"l1_ratio\"] = l1_ratio\r\n\r\n # Do the ordering and type casting here, as if it is done in the path,\r\n # X is copied and a reference is kept here\r\n X_train = check_array(X_train, accept_sparse=\"csc\", dtype=dtype, order=X_order)\r\n alphas, coefs, _ = path(X_train, y_train, **path_params)\r\n del X_train, y_train\r\n\r\n if y.ndim == 1:\r\n # Doing this so that it becomes coherent with multioutput.\r\n coefs = coefs[np.newaxis, :, :]\r\n y_offset = np.atleast_1d(y_offset)\r\n y_test = y_test[:, np.newaxis]\r\n\r\n if normalize:\r\n nonzeros = np.flatnonzero(X_scale)\r\n coefs[:, nonzeros] /= X_scale[nonzeros][:, np.newaxis]\r\n\r\n intercepts = y_offset[:, np.newaxis] - np.dot(X_offset, coefs)\r\n X_test_coefs = safe_sparse_dot(X_test, coefs)\r\n residues = X_test_coefs - y_test[:, :, np.newaxis]\r\n residues += intercepts\r\n if sample_weight is None:\r\n this_mse = (residues ** 2).mean(axis=0)\r\n else:\r\n this_mse = np.average(residues ** 2, weights=sw_test, axis=0)\r\n\r\n return this_mse.mean(axis=0)\r\n\r\n\r\nclass LinearModelCV(MultiOutputMixin, LinearModel, ABC):\r\n \"\"\"Base class for iterative model fitting along a regularization path.\"\"\"\r\n\r\n @abstractmethod\r\n def __init__(\r\n self,\r\n eps=1e-3,\r\n n_alphas=100,\r\n alphas=None,\r\n fit_intercept=True,\r\n normalize=\"deprecated\",\r\n precompute=\"auto\",\r\n max_iter=1000,\r\n tol=1e-4,\r\n copy_X=True,\r\n cv=None,\r\n verbose=False,\r\n n_jobs=None,\r\n positive=False,\r\n random_state=None,\r\n selection=\"cyclic\",\r\n ):\r\n self.eps = eps\r\n self.n_alphas = n_alphas\r\n self.alphas = alphas\r\n self.fit_intercept = fit_intercept\r\n self.normalize = normalize\r\n self.precompute = precompute\r\n self.max_iter = max_iter\r\n self.tol = tol\r\n self.copy_X = copy_X\r\n self.cv = cv\r\n self.verbose = verbose\r\n self.n_jobs = n_jobs\r\n self.positive = positive\r\n self.random_state = random_state\r\n self.selection = selection\r\n\r\n @abstractmethod\r\n def _get_estimator(self):\r\n \"\"\"Model to be fitted after the best alpha has been determined.\"\"\"\r\n\r\n @abstractmethod\r\n def _is_multitask(self):\r\n \"\"\"Bool indicating if class is meant for multidimensional target.\"\"\"\r\n\r\n @staticmethod\r\n @abstractmethod\r\n def path(X, y, **kwargs):\r\n \"\"\"Compute path with coordinate descent.\"\"\"\r\n\r\n def fit(self, X, y, sample_weight=None):\r\n \"\"\"Fit linear model with coordinate descent.\r\n\r\n Fit is on grid of alphas and best alpha estimated by cross-validation.\r\n\r\n Parameters\r\n ----------\r\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\r\n Training data. Pass directly as Fortran-contiguous data\r\n to avoid unnecessary memory duplication. If y is mono-output,\r\n X can be sparse.\r\n\r\n y : array-like of shape (n_samples,) or (n_samples, n_targets)\r\n Target values.\r\n\r\n sample_weight : float or array-like of shape (n_samples,), \\\r\n default=None\r\n Sample weights used for fitting and evaluation of the weighted\r\n mean squared error of each cv-fold. Note that the cross validated\r\n MSE that is finally used to find the best model is the unweighted\r\n mean over the (weighted) MSEs of each test fold.\r\n\r\n Returns\r\n -------\r\n self : object\r\n Returns an instance of fitted model.\r\n \"\"\"\r\n\r\n # Do as _deprecate_normalize but without warning as it's raised\r\n # below during the refitting on the best alpha.\r\n _normalize = self.normalize\r\n if _normalize == \"deprecated\":\r\n _normalize = False\r\n\r\n # This makes sure that there is no duplication in memory.\r\n # Dealing right with copy_X is important in the following:\r\n # Multiple functions touch X and subsamples of X and can induce a\r\n # lot of duplication of memory\r\n copy_X = self.copy_X and self.fit_intercept\r\n\r\n check_y_params = dict(\r\n copy=False, dtype=[np.float64, np.float32], ensure_2d=False\r\n )\r\n if isinstance(X, np.ndarray) or sparse.isspmatrix(X):\r\n # Keep a reference to X\r\n reference_to_old_X = X\r\n # Let us not impose fortran ordering so far: it is\r\n # not useful for the cross-validation loop and will be done\r\n # by the model fitting itself\r\n\r\n # Need to validate separately here.\r\n # We can't pass multi_ouput=True because that would allow y to be\r\n # csr. We also want to allow y to be 64 or 32 but check_X_y only\r\n # allows to convert for 64.\r\n check_X_params = dict(\r\n accept_sparse=\"csc\", dtype=[np.float64, np.float32], copy=False\r\n )\r\n X, y = self._validate_data(\r\n X, y, validate_separately=(check_X_params, check_y_params)\r\n )\r\n if sparse.isspmatrix(X):\r\n if hasattr(reference_to_old_X, \"data\") and not np.may_share_memory(\r\n reference_to_old_X.data, X.data\r\n ):\r\n # X is a sparse matrix and has been copied\r\n copy_X = False\r\n elif not np.may_share_memory(reference_to_old_X, X):\r\n # X has been copied\r\n copy_X = False\r\n del reference_to_old_X\r\n else:\r\n # Need to validate separately here.\r\n # We can't pass multi_ouput=True because that would allow y to be\r\n # csr. We also want to allow y to be 64 or 32 but check_X_y only\r\n # allows to convert for 64.\r\n check_X_params = dict(\r\n accept_sparse=\"csc\",\r\n dtype=[np.float64, np.float32],\r\n order=\"F\",\r\n copy=copy_X,\r\n )\r\n X, y = self._validate_data(\r\n X, y, validate_separately=(check_X_params, check_y_params)\r\n )\r\n copy_X = False\r\n\r\n check_consistent_length(X, y)\r\n\r\n if not self._is_multitask():\r\n if y.ndim > 1 and y.shape[1] > 1:\r\n raise ValueError(\r\n \"For multi-task outputs, use MultiTask%s\" % self.__class__.__name__\r\n )\r\n y = column_or_1d(y, warn=True)\r\n else:\r\n if sparse.isspmatrix(X):\r\n raise TypeError(\"X should be dense but a sparse matrix waspassed\")\r\n elif y.ndim == 1:\r\n raise ValueError(\r\n \"For mono-task outputs, use %sCV\" % self.__class__.__name__[9:]\r\n )\r\n\r\n if isinstance(sample_weight, numbers.Number):\r\n sample_weight = None\r\n if sample_weight is not None:\r\n if sparse.issparse(X):\r\n raise ValueError(\"Sample weights do not (yet) support sparse matrices.\")\r\n sample_weight = _check_sample_weight(sample_weight, X, dtype=X.dtype)\r\n\r\n model = self._get_estimator()\r\n\r\n if self.selection not in [\"random\", \"cyclic\"]:\r\n raise ValueError(\"selection should be either random or cyclic.\")\r\n\r\n # All LinearModelCV parameters except 'cv' are acceptable\r\n path_params = self.get_params()\r\n\r\n # FIXME: 'normalize' to be removed in 1.2\r\n # path_params[\"normalize\"] = _normalize\r\n # Pop `intercept` and `normalize` that are not parameter of the path\r\n # function\r\n path_params.pop(\"normalize\", None)\r\n path_params.pop(\"fit_intercept\", None)\r\n\r\n if \"l1_ratio\" in path_params:\r\n l1_ratios = np.atleast_1d(path_params[\"l1_ratio\"])\r\n # For the first path, we need to set l1_ratio\r\n path_params[\"l1_ratio\"] = l1_ratios[0]\r\n else:\r\n l1_ratios = [\r\n 1,\r\n ]\r\n path_params.pop(\"cv\", None)\r\n path_params.pop(\"n_jobs\", None)\r\n\r\n alphas = self.alphas\r\n n_l1_ratio = len(l1_ratios)\r\n if alphas is None:\r\n alphas = [\r\n _alpha_grid(\r\n X,\r\n y,\r\n l1_ratio=l1_ratio,\r\n fit_intercept=self.fit_intercept,\r\n eps=self.eps,\r\n n_alphas=self.n_alphas,\r\n normalize=_normalize,\r\n copy_X=self.copy_X,\r\n )\r\n for l1_ratio in l1_ratios\r\n ]\r\n else:\r\n # Making sure alphas is properly ordered.\r\n alphas = np.tile(np.sort(alphas)[::-1], (n_l1_ratio, 1))\r\n # We want n_alphas to be the number of alphas used for each l1_ratio.\r\n n_alphas = len(alphas[0])\r\n path_params.update({\"n_alphas\": n_alphas})\r\n\r\n path_params[\"copy_X\"] = copy_X\r\n # We are not computing in parallel, we can modify X\r\n # inplace in the folds\r\n if effective_n_jobs(self.n_jobs) > 1:\r\n path_params[\"copy_X\"] = False\r\n\r\n # init cross-validation generator\r\n cv = check_cv(self.cv)\r\n\r\n # Compute path for all folds and compute MSE to get the best alpha\r\n folds = list(cv.split(X, y))\r\n best_mse = np.inf\r\n\r\n # We do a double for loop folded in one, in order to be able to\r\n # iterate in parallel on l1_ratio and folds\r\n jobs = (\r\n delayed(_path_residuals)(\r\n X,\r\n y,\r\n sample_weight,\r\n train,\r\n test,\r\n _normalize,\r\n self.fit_intercept,\r\n self.path,\r\n path_params,\r\n alphas=this_alphas,\r\n l1_ratio=this_l1_ratio,\r\n X_order=\"F\",\r\n dtype=X.dtype.type,\r\n )\r\n for this_l1_ratio, this_alphas in zip(l1_ratios, alphas)\r\n for train, test in folds\r\n )\r\n mse_paths = Parallel(\r\n n_jobs=self.n_jobs,\r\n verbose=self.verbose,\r\n **_joblib_parallel_args(prefer=\"threads\"),\r\n )(jobs)\r\n mse_paths = np.reshape(mse_paths, (n_l1_ratio, len(folds), -1))\r\n # The mean is computed over folds.\r\n mean_mse = np.mean(mse_paths, axis=1)\r\n self.mse_path_ = np.squeeze(np.moveaxis(mse_paths, 2, 1))\r\n for l1_ratio, l1_alphas, mse_alphas in zip(l1_ratios, alphas, mean_mse):\r\n i_best_alpha = np.argmin(mse_alphas)\r\n this_best_mse = mse_alphas[i_best_alpha]\r\n if this_best_mse < best_mse:\r\n best_alpha = l1_alphas[i_best_alpha]\r\n best_l1_ratio = l1_ratio\r\n best_mse = this_best_mse\r\n\r\n self.l1_ratio_ = best_l1_ratio\r\n self.alpha_ = best_alpha\r\n if self.alphas is None:\r\n self.alphas_ = np.asarray(alphas)\r\n if n_l1_ratio == 1:\r\n self.alphas_ = self.alphas_[0]\r\n # Remove duplicate alphas in case alphas is provided.\r\n else:\r\n self.alphas_ = np.asarray(alphas[0])\r\n\r\n # Refit the model with the parameters selected\r\n common_params = {\r\n name: value\r\n for name, value in self.get_params().items()\r\n if name in model.get_params()\r\n }\r\n model.set_params(**common_params)\r\n model.alpha = best_alpha\r\n model.l1_ratio = best_l1_ratio\r\n model.copy_X = copy_X\r\n precompute = getattr(self, \"precompute\", None)\r\n if isinstance(precompute, str) and precompute == \"auto\":\r\n model.precompute = False\r\n\r\n if sample_weight is None:\r\n # MultiTaskElasticNetCV does not (yet) support sample_weight, even\r\n # not sample_weight=None.\r\n model.fit(X, y)\r\n else:\r\n model.fit(X, y, sample_weight=sample_weight)\r\n if not hasattr(self, \"l1_ratio\"):\r\n del self.l1_ratio_\r\n self.coef_ = model.coef_\r\n self.intercept_ = model.intercept_\r\n self.dual_gap_ = model.dual_gap_\r\n self.n_iter_ = model.n_iter_\r\n return self\r\n\r\n def _more_tags(self):\r\n # Note: check_sample_weights_invariance(kind='ones') should work, but\r\n # currently we can only mark a whole test as xfail.\r\n return {\r\n \"_xfail_checks\": {\r\n \"check_sample_weights_invariance\": (\r\n \"zero sample_weight is not equivalent to removing samples\"\r\n ),\r\n }\r\n }\r\n\r\n\r\nclass LassoCV(RegressorMixin, LinearModelCV):\r\n \"\"\"Lasso linear model with iterative fitting along a regularization path.\r\n\r\n See glossary entry for :term:`cross-validation estimator`.\r\n\r\n The best model is selected by cross-validation.\r\n\r\n The optimization objective for Lasso is::\r\n\r\n (1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1\r\n\r\n Read more in the :ref:`User Guide <lasso>`.\r\n\r\n Parameters\r\n ----------\r\n eps : float, default=1e-3\r\n Length of the path. ``eps=1e-3`` means that\r\n ``alpha_min / alpha_max = 1e-3``.\r\n\r\n n_alphas : int, default=100\r\n Number of alphas along the regularization path.\r\n\r\n alphas : ndarray, default=None\r\n List of alphas where to compute the models.\r\n If ``None`` alphas are set automatically.\r\n\r\n fit_intercept : bool, default=True\r\n Whether to calculate the intercept for this model. If set\r\n to false, no intercept will be used in calculations\r\n (i.e. data is expected to be centered).\r\n\r\n normalize : bool, default=False\r\n This parameter is ignored when ``fit_intercept`` is set to False.\r\n If True, the regressors X will be normalized before regression by\r\n subtracting the mean and dividing by the l2-norm.\r\n If you wish to standardize, please use\r\n :class:`~sklearn.preprocessing.StandardScaler` before calling ``fit``\r\n on an estimator with ``normalize=False``.\r\n\r\n .. deprecated:: 1.0\r\n ``normalize`` was deprecated in version 1.0 and will be removed in\r\n 1.2.\r\n\r\n precompute : 'auto', bool or array-like of shape \\\r\n (n_features, n_features), default='auto'\r\n Whether to use a precomputed Gram matrix to speed up\r\n calculations. If set to ``'auto'`` let us decide. The Gram\r\n matrix can also be passed as argument.\r\n\r\n max_iter : int, default=1000\r\n The maximum number of iterations.\r\n\r\n tol : float, default=1e-4\r\n The tolerance for the optimization: if the updates are\r\n smaller than ``tol``, the optimization code checks the\r\n dual gap for optimality and continues until it is smaller\r\n than ``tol``.\r\n\r\n copy_X : bool, default=True\r\n If ``True``, X will be copied; else, it may be overwritten.\r\n\r\n cv : int, cross-validation generator or iterable, default=None\r\n Determines the cross-validation splitting strategy.\r\n Possible inputs for cv are:\r\n\r\n - None, to use the default 5-fold cross-validation,\r\n - int, to specify the number of folds.\r\n - :term:`CV splitter`,\r\n - An iterable yielding (train, test) splits as arrays of indices.\r\n\r\n For int/None inputs, :class:`KFold` is used.\r\n\r\n Refer :ref:`User Guide <cross_validation>` for the various\r\n cross-validation strategies that can be used here.\r\n\r\n .. versionchanged:: 0.22\r\n ``cv`` default value if None changed from 3-fold to 5-fold.\r\n\r\n verbose : bool or int, default=False\r\n Amount of verbosity.\r\n\r\n n_jobs : int, default=None\r\n Number of CPUs to use during the cross validation.\r\n ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.\r\n ``-1`` means using all processors. See :term:`Glossary <n_jobs>`\r\n for more details.\r\n\r\n positive : bool, default=False\r\n If positive, restrict regression coefficients to be positive.\r\n\r\n random_state : int, RandomState instance, default=None\r\n The seed of the pseudo random number generator that selects a random\r\n feature to update. Used when ``selection`` == 'random'.\r\n Pass an int for reproducible output across multiple function calls.\r\n See :term:`Glossary <random_state>`.\r\n\r\n selection : {'cyclic', 'random'}, default='cyclic'\r\n If set to 'random', a random coefficient is updated every iteration\r\n rather than looping over features sequentially by default. This\r\n (setting to 'random') often leads to significantly faster convergence\r\n especially when tol is higher than 1e-4.\r\n\r\n Attributes\r\n ----------\r\n alpha_ : float\r\n The amount of penalization chosen by cross validation.\r\n\r\n coef_ : ndarray of shape (n_features,) or (n_targets, n_features)\r\n Parameter vector (w in the cost function formula).\r\n\r\n intercept_ : float or ndarray of shape (n_targets,)\r\n Independent term in decision function.\r\n\r\n mse_path_ : ndarray of shape (n_alphas, n_folds)\r\n Mean square error for the test set on each fold, varying alpha.\r\n\r\n alphas_ : ndarray of shape (n_alphas,)\r\n The grid of alphas used for fitting.\r\n\r\n dual_gap_ : float or ndarray of shape (n_targets,)\r\n The dual gap at the end of the optimization for the optimal alpha\r\n (``alpha_``).\r\n\r\n n_iter_ : int\r\n Number of iterations run by the coordinate descent solver to reach\r\n the specified tolerance for the optimal alpha.\r\n\r\n n_features_in_ : int\r\n Number of features seen during :term:`fit`.\r\n\r\n .. versionadded:: 0.24\r\n\r\n feature_names_in_ : ndarray of shape (`n_features_in_`,)\r\n Names of features seen during :term:`fit`. Defined only when `X`\r\n has feature names that are all strings.\r\n\r\n .. versionadded:: 1.0\r\n\r\n See Also\r\n --------\r\n lars_path : Compute Least Angle Regression or Lasso path using LARS\r\n algorithm.\r\n lasso_path : Compute Lasso path with coordinate descent.\r\n Lasso : The Lasso is a linear model that estimates sparse coefficients.\r\n LassoLars : Lasso model fit with Least Angle Regression a.k.a. Lars.\r\n LassoCV : Lasso linear model with iterative fitting along a regularization\r\n path.\r\n LassoLarsCV : Cross-validated Lasso using the LARS algorithm.\r\n\r\n Notes\r\n -----\r\n For an example, see\r\n :ref:`examples/linear_model/plot_lasso_model_selection.py\r\n <sphx_glr_auto_examples_linear_model_plot_lasso_model_selection.py>`.\r\n\r\n To avoid unnecessary memory duplication the X argument of the fit method\r\n should be directly passed as a Fortran-contiguous numpy array.\r\n\r\n Examples\r\n --------\r\n >>> from sklearn.linear_model import LassoCV\r\n >>> from sklearn.datasets import make_regression\r\n >>> X, y = make_regression(noise=4, random_state=0)\r\n >>> reg = LassoCV(cv=5, random_state=0).fit(X, y)\r\n >>> reg.score(X, y)\r\n 0.9993...\r\n >>> reg.predict(X[:1,])\r\n array([-78.4951...])\r\n \"\"\"\r\n\r\n path = staticmethod(lasso_path)\r\n\r\n def __init__(\r\n self,\r\n *,\r\n eps=1e-3,\r\n n_alphas=100,\r\n alphas=None,\r\n fit_intercept=True,\r\n normalize=\"deprecated\",\r\n precompute=\"auto\",\r\n max_iter=1000,\r\n tol=1e-4,\r\n copy_X=True,\r\n cv=None,\r\n verbose=False,\r\n n_jobs=None,\r\n positive=False,\r\n random_state=None,\r\n selection=\"cyclic\",\r\n ):\r\n super().__init__(\r\n eps=eps,\r\n n_alphas=n_alphas,\r\n alphas=alphas,\r\n fit_intercept=fit_intercept,\r\n normalize=normalize,\r\n precompute=precompute,\r\n max_iter=max_iter,\r\n tol=tol,\r\n copy_X=copy_X,\r\n cv=cv,\r\n verbose=verbose,\r\n n_jobs=n_jobs,\r\n positive=positive,\r\n random_state=random_state,\r\n selection=selection,\r\n )\r\n\r\n def _get_estimator(self):\r\n return Lasso()\r\n\r\n def _is_multitask(self):\r\n return False\r\n\r\n def _more_tags(self):\r\n return {\"multioutput\": False}\r\n\r\n\r\nclass ElasticNetCV(RegressorMixin, LinearModelCV):\r\n \"\"\"Elastic Net model with iterative fitting along a regularization path.\r\n\r\n See glossary entry for :term:`cross-validation estimator`.\r\n\r\n Read more in the :ref:`User Guide <elastic_net>`.\r\n\r\n Parameters\r\n ----------\r\n l1_ratio : float or list of float, default=0.5\r\n Float between 0 and 1 passed to ElasticNet (scaling between\r\n l1 and l2 penalties). For ``l1_ratio = 0``\r\n the penalty is an L2 penalty. For ``l1_ratio = 1`` it is an L1 penalty.\r\n For ``0 < l1_ratio < 1``, the penalty is a combination of L1 and L2\r\n This parameter can be a list, in which case the different\r\n values are tested by cross-validation and the one giving the best\r\n prediction score is used. Note that a good choice of list of\r\n values for l1_ratio is often to put more values close to 1\r\n (i.e. Lasso) and less close to 0 (i.e. Ridge), as in ``[.1, .5, .7,\r\n .9, .95, .99, 1]``.\r\n\r\n eps : float, default=1e-3\r\n Length of the path. ``eps=1e-3`` means that\r\n ``alpha_min / alpha_max = 1e-3``.\r\n\r\n n_alphas : int, default=100\r\n Number of alphas along the regularization path, used for each l1_ratio.\r\n\r\n alphas : ndarray, default=None\r\n List of alphas where to compute the models.\r\n If None alphas are set automatically.\r\n\r\n fit_intercept : bool, default=True\r\n Whether to calculate the intercept for this model. If set\r\n to false, no intercept will be used in calculations\r\n (i.e. data is expected to be centered).\r\n\r\n normalize : bool, default=False\r\n This parameter is ignored when ``fit_intercept`` is set to False.\r\n If True, the regressors X will be normalized before regression by\r\n subtracting the mean and dividing by the l2-norm.\r\n If you wish to standardize, please use\r\n :class:`~sklearn.preprocessing.StandardScaler` before calling ``fit``\r\n on an estimator with ``normalize=False``.\r\n\r\n .. deprecated:: 1.0\r\n ``normalize`` was deprecated in version 1.0 and will be removed in\r\n 1.2.\r\n\r\n precompute : 'auto', bool or array-like of shape \\\r\n (n_features, n_features), default='auto'\r\n Whether to use a precomputed Gram matrix to speed up\r\n calculations. If set to ``'auto'`` let us decide. The Gram\r\n matrix can also be passed as argument.\r\n\r\n max_iter : int, default=1000\r\n The maximum number of iterations.\r\n\r\n tol : float, default=1e-4\r\n The tolerance for the optimization: if the updates are\r\n smaller than ``tol``, the optimization code checks the\r\n dual gap for optimality and continues until it is smaller\r\n than ``tol``.\r\n\r\n cv : int, cross-validation generator or iterable, default=None\r\n Determines the cross-validation splitting strategy.\r\n Possible inputs for cv are:\r\n\r\n - None, to use the default 5-fold cross-validation,\r\n - int, to specify the number of folds.\r\n - :term:`CV splitter`,\r\n - An iterable yielding (train, test) splits as arrays of indices.\r\n\r\n For int/None inputs, :class:`KFold` is used.\r\n\r\n Refer :ref:`User Guide <cross_validation>` for the various\r\n cross-validation strategies that can be used here.\r\n\r\n .. versionchanged:: 0.22\r\n ``cv`` default value if None changed from 3-fold to 5-fold.\r\n\r\n copy_X : bool, default=True\r\n If ``True``, X will be copied; else, it may be overwritten.\r\n\r\n verbose : bool or int, default=0\r\n Amount of verbosity.\r\n\r\n n_jobs : int, default=None\r\n Number of CPUs to use during the cross validation.\r\n ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.\r\n ``-1`` means using all processors. See :term:`Glossary <n_jobs>`\r\n for more details.\r\n\r\n positive : bool, default=False\r\n When set to ``True``, forces the coefficients to be positive.\r\n\r\n random_state : int, RandomState instance, default=None\r\n The seed of the pseudo random number generator that selects a random\r\n feature to update. Used when ``selection`` == 'random'.\r\n Pass an int for reproducible output across multiple function calls.\r\n See :term:`Glossary <random_state>`.\r\n\r\n selection : {'cyclic', 'random'}, default='cyclic'\r\n If set to 'random', a random coefficient is updated every iteration\r\n rather than looping over features sequentially by default. This\r\n (setting to 'random') often leads to significantly faster convergence\r\n especially when tol is higher than 1e-4.\r\n\r\n Attributes\r\n ----------\r\n alpha_ : float\r\n The amount of penalization chosen by cross validation.\r\n\r\n l1_ratio_ : float\r\n The compromise between l1 and l2 penalization chosen by\r\n cross validation.\r\n\r\n coef_ : ndarray of shape (n_features,) or (n_targets, n_features)\r\n Parameter vector (w in the cost function formula).\r\n\r\n intercept_ : float or ndarray of shape (n_targets, n_features)\r\n Independent term in the decision function.\r\n\r\n mse_path_ : ndarray of shape (n_l1_ratio, n_alpha, n_folds)\r\n Mean square error for the test set on each fold, varying l1_ratio and\r\n alpha.\r\n\r\n alphas_ : ndarray of shape (n_alphas,) or (n_l1_ratio, n_alphas)\r\n The grid of alphas used for fitting, for each l1_ratio.\r\n\r\n dual_gap_ : float\r\n The dual gaps at the end of the optimization for the optimal alpha.\r\n\r\n n_iter_ : int\r\n Number of iterations run by the coordinate descent solver to reach\r\n the specified tolerance for the optimal alpha.\r\n\r\n n_features_in_ : int\r\n Number of features seen during :term:`fit`.\r\n\r\n .. versionadded:: 0.24\r\n\r\n feature_names_in_ : ndarray of shape (`n_features_in_`,)\r\n Names of features seen during :term:`fit`. Defined only when `X`\r\n has feature names that are all strings.\r\n\r\n .. versionadded:: 1.0\r\n\r\n See Also\r\n --------\r\n enet_path : Compute elastic net path with coordinate descent.\r\n ElasticNet : Linear regression with combined L1 and L2 priors as regularizer.\r\n\r\n Notes\r\n -----\r\n For an example, see\r\n :ref:`examples/linear_model/plot_lasso_model_selection.py\r\n <sphx_glr_auto_examples_linear_model_plot_lasso_model_selection.py>`.\r\n\r\n To avoid unnecessary memory duplication the X argument of the fit method\r\n should be directly passed as a Fortran-contiguous numpy array.\r\n\r\n The parameter l1_ratio corresponds to alpha in the glmnet R package\r\n while alpha corresponds to the lambda parameter in glmnet.\r\n More specifically, the optimization objective is::\r\n\r\n 1 / (2 * n_samples) * ||y - Xw||^2_2\r\n + alpha * l1_ratio * ||w||_1\r\n + 0.5 * alpha * (1 - l1_ratio) * ||w||^2_2\r\n\r\n If you are interested in controlling the L1 and L2 penalty\r\n separately, keep in mind that this is equivalent to::\r\n\r\n a * L1 + b * L2\r\n\r\n for::\r\n\r\n alpha = a + b and l1_ratio = a / (a + b).\r\n\r\n Examples\r\n --------\r\n >>> from sklearn.linear_model import ElasticNetCV\r\n >>> from sklearn.datasets import make_regression\r\n\r\n >>> X, y = make_regression(n_features=2, random_state=0)\r\n >>> regr = ElasticNetCV(cv=5, random_state=0)\r\n >>> regr.fit(X, y)\r\n ElasticNetCV(cv=5, random_state=0)\r\n >>> print(regr.alpha_)\r\n 0.199...\r\n >>> print(regr.intercept_)\r\n 0.398...\r\n >>> print(regr.predict([[0, 0]]))\r\n [0.398...]\r\n \"\"\"\r\n\r\n path = staticmethod(enet_path)\r\n\r\n def __init__(\r\n self,\r\n *,\r\n l1_ratio=0.5,\r\n eps=1e-3,\r\n n_alphas=100,\r\n alphas=None,\r\n fit_intercept=True,\r\n normalize=\"deprecated\",\r\n precompute=\"auto\",\r\n max_iter=1000,\r\n tol=1e-4,\r\n cv=None,\r\n copy_X=True,\r\n verbose=0,\r\n n_jobs=None,\r\n positive=False,\r\n random_state=None,\r\n selection=\"cyclic\",\r\n ):\r\n self.l1_ratio = l1_ratio\r\n self.eps = eps\r\n self.n_alphas = n_alphas\r\n self.alphas = alphas\r\n self.fit_intercept = fit_intercept\r\n self.normalize = normalize\r\n self.precompute = precompute\r\n self.max_iter = max_iter\r\n self.tol = tol\r\n self.cv = cv\r\n self.copy_X = copy_X\r\n self.verbose = verbose\r\n self.n_jobs = n_jobs\r\n self.positive = positive\r\n self.random_state = random_state\r\n self.selection = selection\r\n\r\n def _get_estimator(self):\r\n return ElasticNet()\r\n\r\n def _is_multitask(self):\r\n return False\r\n\r\n def _more_tags(self):\r\n return {\"multioutput\": False}\r\n\r\n\r\n###############################################################################\r\n# Multi Task ElasticNet and Lasso models (with joint feature selection)\r\n\r\n\r\nclass MultiTaskElasticNet(Lasso):\r\n \"\"\"Multi-task ElasticNet model trained with L1/L2 mixed-norm as regularizer.\r\n\r\n The optimization objective for MultiTaskElasticNet is::\r\n\r\n (1 / (2 * n_samples)) * ||Y - XW||_Fro^2\r\n + alpha * l1_ratio * ||W||_21\r\n + 0.5 * alpha * (1 - l1_ratio) * ||W||_Fro^2\r\n\r\n Where::\r\n\r\n ||W||_21 = sum_i sqrt(sum_j W_ij ^ 2)\r\n\r\n i.e. the sum of norms of each row.\r\n\r\n Read more in the :ref:`User Guide <multi_task_elastic_net>`.\r\n\r\n Parameters\r\n ----------\r\n alpha : float, default=1.0\r\n Constant that multiplies the L1/L2 term. Defaults to 1.0.\r\n\r\n l1_ratio : float, default=0.5\r\n The ElasticNet mixing parameter, with 0 < l1_ratio <= 1.\r\n For l1_ratio = 1 the penalty is an L1/L2 penalty. For l1_ratio = 0 it\r\n is an L2 penalty.\r\n For ``0 < l1_ratio < 1``, the penalty is a combination of L1/L2 and L2.\r\n\r\n fit_intercept : bool, default=True\r\n Whether to calculate the intercept for this model. If set\r\n to false, no intercept will be used in calculations\r\n (i.e. data is expected to be centered).\r\n\r\n normalize : bool, default=False\r\n This parameter is ignored when ``fit_intercept`` is set to False.\r\n If True, the regressors X will be normalized before regression by\r\n subtracting the mean and dividing by the l2-norm.\r\n If you wish to standardize, please use\r\n :class:`~sklearn.preprocessing.StandardScaler` before calling ``fit``\r\n on an estimator with ``normalize=False``.\r\n\r\n .. deprecated:: 1.0\r\n ``normalize`` was deprecated in version 1.0 and will be removed in\r\n 1.2.\r\n\r\n copy_X : bool, default=True\r\n If ``True``, X will be copied; else, it may be overwritten.\r\n\r\n max_iter : int, default=1000\r\n The maximum number of iterations.\r\n\r\n tol : float, default=1e-4\r\n The tolerance for the optimization: if the updates are\r\n smaller than ``tol``, the optimization code checks the\r\n dual gap for optimality and continues until it is smaller\r\n than ``tol``.\r\n\r\n warm_start : bool, default=False\r\n When set to ``True``, reuse the solution of the previous call to fit as\r\n initialization, otherwise, just erase the previous solution.\r\n See :term:`the Glossary <warm_start>`.\r\n\r\n random_state : int, RandomState instance, default=None\r\n The seed of the pseudo random number generator that selects a random\r\n feature to update. Used when ``selection`` == 'random'.\r\n Pass an int for reproducible output across multiple function calls.\r\n See :term:`Glossary <random_state>`.\r\n\r\n selection : {'cyclic', 'random'}, default='cyclic'\r\n If set to 'random', a random coefficient is updated every iteration\r\n rather than looping over features sequentially by default. This\r\n (setting to 'random') often leads to significantly faster convergence\r\n especially when tol is higher than 1e-4.\r\n\r\n Attributes\r\n ----------\r\n intercept_ : ndarray of shape (n_targets,)\r\n Independent term in decision function.\r\n\r\n coef_ : ndarray of shape (n_targets, n_features)\r\n Parameter vector (W in the cost function formula). If a 1D y is\r\n passed in at fit (non multi-task usage), ``coef_`` is then a 1D array.\r\n Note that ``coef_`` stores the transpose of ``W``, ``W.T``.\r\n\r\n n_iter_ : int\r\n Number of iterations run by the coordinate descent solver to reach\r\n the specified tolerance.\r\n\r\n dual_gap_ : float\r\n The dual gaps at the end of the optimization.\r\n\r\n eps_ : float\r\n The tolerance scaled scaled by the variance of the target `y`.\r\n\r\n sparse_coef_ : sparse matrix of shape (n_features,) or \\\r\n (n_targets, n_features)\r\n Sparse representation of the `coef_`.\r\n\r\n n_features_in_ : int\r\n Number of features seen during :term:`fit`.\r\n\r\n .. versionadded:: 0.24\r\n\r\n feature_names_in_ : ndarray of shape (`n_features_in_`,)\r\n Names of features seen during :term:`fit`. Defined only when `X`\r\n has feature names that are all strings.\r\n\r\n .. versionadded:: 1.0\r\n\r\n See Also\r\n --------\r\n MultiTaskElasticNetCV : Multi-task L1/L2 ElasticNet with built-in\r\n cross-validation.\r\n ElasticNet : Linear regression with combined L1 and L2 priors as regularizer.\r\n MultiTaskLasso : Multi-task L1/L2 Lasso with built-in cross-validation.\r\n\r\n Notes\r\n -----\r\n The algorithm used to fit the model is coordinate descent.\r\n\r\n To avoid unnecessary memory duplication the X and y arguments of the fit\r\n method should be directly passed as Fortran-contiguous numpy arrays.\r\n\r\n Examples\r\n --------\r\n >>> from sklearn import linear_model\r\n >>> clf = linear_model.MultiTaskElasticNet(alpha=0.1)\r\n >>> clf.fit([[0,0], [1, 1], [2, 2]], [[0, 0], [1, 1], [2, 2]])\r\n MultiTaskElasticNet(alpha=0.1)\r\n >>> print(clf.coef_)\r\n [[0.45663524 0.45612256]\r\n [0.45663524 0.45612256]]\r\n >>> print(clf.intercept_)\r\n [0.0872422 0.0872422]\r\n \"\"\"\r\n\r\n def __init__(\r\n self,\r\n alpha=1.0,\r\n *,\r\n l1_ratio=0.5,\r\n fit_intercept=True,\r\n normalize=\"deprecated\",\r\n copy_X=True,\r\n max_iter=1000,\r\n tol=1e-4,\r\n warm_start=False,\r\n random_state=None,\r\n selection=\"cyclic\",\r\n ):\r\n self.l1_ratio = l1_ratio\r\n self.alpha = alpha\r\n self.fit_intercept = fit_intercept\r\n self.normalize = normalize\r\n self.max_iter = max_iter\r\n self.copy_X = copy_X\r\n self.tol = tol\r\n self.warm_start = warm_start\r\n self.random_state = random_state\r\n self.selection = selection\r\n\r\n def fit(self, X, y):\r\n \"\"\"Fit MultiTaskElasticNet model with coordinate descent.\r\n\r\n Parameters\r\n ----------\r\n X : ndarray of shape (n_samples, n_features)\r\n Data.\r\n y : ndarray of shape (n_samples, n_targets)\r\n Target. Will be cast to X's dtype if necessary.\r\n\r\n Returns\r\n -------\r\n self : object\r\n Fitted estimator.\r\n\r\n Notes\r\n -----\r\n Coordinate descent is an algorithm that considers each column of\r\n data at a time hence it will automatically convert the X input\r\n as a Fortran-contiguous numpy array if necessary.\r\n\r\n To avoid memory re-allocation it is advised to allocate the\r\n initial data in memory directly using that format.\r\n \"\"\"\r\n _normalize = _deprecate_normalize(\r\n self.normalize, default=False, estimator_name=self.__class__.__name__\r\n )\r\n\r\n # Need to validate separately here.\r\n # We can't pass multi_ouput=True because that would allow y to be csr.\r\n check_X_params = dict(\r\n dtype=[np.float64, np.float32],\r\n order=\"F\",\r\n copy=self.copy_X and self.fit_intercept,\r\n )\r\n check_y_params = dict(ensure_2d=False, order=\"F\")\r\n X, y = self._validate_data(\r\n X, y, validate_separately=(check_X_params, check_y_params)\r\n )\r\n check_consistent_length(X, y)\r\n y = y.astype(X.dtype)\r\n\r\n if hasattr(self, \"l1_ratio\"):\r\n model_str = \"ElasticNet\"\r\n else:\r\n model_str = \"Lasso\"\r\n if y.ndim == 1:\r\n raise ValueError(\"For mono-task outputs, use %s\" % model_str)\r\n\r\n n_samples, n_features = X.shape\r\n n_targets = y.shape[1]\r\n\r\n X, y, X_offset, y_offset, X_scale = _preprocess_data(\r\n X, y, self.fit_intercept, _normalize, copy=False\r\n )\r\n\r\n if not self.warm_start or not hasattr(self, \"coef_\"):\r\n self.coef_ = np.zeros(\r\n (n_targets, n_features), dtype=X.dtype.type, order=\"F\"\r\n )\r\n\r\n l1_reg = self.alpha * self.l1_ratio * n_samples\r\n l2_reg = self.alpha * (1.0 - self.l1_ratio) * n_samples\r\n\r\n self.coef_ = np.asfortranarray(self.coef_) # coef contiguous in memory\r\n\r\n if self.selection not in [\"random\", \"cyclic\"]:\r\n raise ValueError(\"selection should be either random or cyclic.\")\r\n random = self.selection == \"random\"\r\n\r\n (\r\n self.coef_,\r\n self.dual_gap_,\r\n self.eps_,\r\n self.n_iter_,\r\n ) = cd_fast.enet_coordinate_descent_multi_task(\r\n self.coef_,\r\n l1_reg,\r\n l2_reg,\r\n X,\r\n y,\r\n self.max_iter,\r\n self.tol,\r\n check_random_state(self.random_state),\r\n random,\r\n )\r\n\r\n # account for different objective scaling here and in cd_fast\r\n self.dual_gap_ /= n_samples\r\n\r\n self._set_intercept(X_offset, y_offset, X_scale)\r\n\r\n # return self for chaining fit and predict calls\r\n return self\r\n\r\n def _more_tags(self):\r\n return {\"multioutput_only\": True}\r\n\r\n\r\nclass MultiTaskLasso(MultiTaskElasticNet):\r\n \"\"\"Multi-task Lasso model trained with L1/L2 mixed-norm as regularizer.\r\n\r\n The optimization objective for Lasso is::\r\n\r\n (1 / (2 * n_samples)) * ||Y - XW||^2_Fro + alpha * ||W||_21\r\n\r\n Where::\r\n\r\n ||W||_21 = \\\\sum_i \\\\sqrt{\\\\sum_j w_{ij}^2}\r\n\r\n i.e. the sum of norm of each row.\r\n\r\n Read more in the :ref:`User Guide <multi_task_lasso>`.\r\n\r\n Parameters\r\n ----------\r\n alpha : float, default=1.0\r\n Constant that multiplies the L1/L2 term. Defaults to 1.0.\r\n\r\n fit_intercept : bool, default=True\r\n Whether to calculate the intercept for this model. If set\r\n to false, no intercept will be used in calculations\r\n (i.e. data is expected to be centered).\r\n\r\n normalize : bool, default=False\r\n This parameter is ignored when ``fit_intercept`` is set to False.\r\n If True, the regressors X will be normalized before regression by\r\n subtracting the mean and dividing by the l2-norm.\r\n If you wish to standardize, please use\r\n :class:`~sklearn.preprocessing.StandardScaler` before calling ``fit``\r\n on an estimator with ``normalize=False``.\r\n\r\n .. deprecated:: 1.0\r\n ``normalize`` was deprecated in version 1.0 and will be removed in\r\n 1.2.\r\n\r\n copy_X : bool, default=True\r\n If ``True``, X will be copied; else, it may be overwritten.\r\n\r\n max_iter : int, default=1000\r\n The maximum number of iterations.\r\n\r\n tol : float, default=1e-4\r\n The tolerance for the optimization: if the updates are\r\n smaller than ``tol``, the optimization code checks the\r\n dual gap for optimality and continues until it is smaller\r\n than ``tol``.\r\n\r\n warm_start : bool, default=False\r\n When set to ``True``, reuse the solution of the previous call to fit as\r\n initialization, otherwise, just erase the previous solution.\r\n See :term:`the Glossary <warm_start>`.\r\n\r\n random_state : int, RandomState instance, default=None\r\n The seed of the pseudo random number generator that selects a random\r\n feature to update. Used when ``selection`` == 'random'.\r\n Pass an int for reproducible output across multiple function calls.\r\n See :term:`Glossary <random_state>`.\r\n\r\n selection : {'cyclic', 'random'}, default='cyclic'\r\n If set to 'random', a random coefficient is updated every iteration\r\n rather than looping over features sequentially by default. This\r\n (setting to 'random') often leads to significantly faster convergence\r\n especially when tol is higher than 1e-4.\r\n\r\n Attributes\r\n ----------\r\n coef_ : ndarray of shape (n_targets, n_features)\r\n Parameter vector (W in the cost function formula).\r\n Note that ``coef_`` stores the transpose of ``W``, ``W.T``.\r\n\r\n intercept_ : ndarray of shape (n_targets,)\r\n Independent term in decision function.\r\n\r\n n_iter_ : int\r\n Number of iterations run by the coordinate descent solver to reach\r\n the specified tolerance.\r\n\r\n dual_gap_ : ndarray of shape (n_alphas,)\r\n The dual gaps at the end of the optimization for each alpha.\r\n\r\n eps_ : float\r\n The tolerance scaled scaled by the variance of the target `y`.\r\n\r\n sparse_coef_ : sparse matrix of shape (n_features,) or \\\r\n (n_targets, n_features)\r\n Sparse representation of the `coef_`.\r\n\r\n n_features_in_ : int\r\n Number of features seen during :term:`fit`.\r\n\r\n .. versionadded:: 0.24\r\n\r\n feature_names_in_ : ndarray of shape (`n_features_in_`,)\r\n Names of features seen during :term:`fit`. Defined only when `X`\r\n has feature names that are all strings.\r\n\r\n .. versionadded:: 1.0\r\n\r\n See Also\r\n --------\r\n Lasso: Linear Model trained with L1 prior as regularizer (aka the Lasso).\r\n MultiTaskLasso: Multi-task L1/L2 Lasso with built-in cross-validation.\r\n MultiTaskElasticNet: Multi-task L1/L2 ElasticNet with built-in cross-validation.\r\n\r\n Notes\r\n -----\r\n The algorithm used to fit the model is coordinate descent.\r\n\r\n To avoid unnecessary memory duplication the X and y arguments of the fit\r\n method should be directly passed as Fortran-contiguous numpy arrays.\r\n\r\n Examples\r\n --------\r\n >>> from sklearn import linear_model\r\n >>> clf = linear_model.MultiTaskLasso(alpha=0.1)\r\n >>> clf.fit([[0, 1], [1, 2], [2, 4]], [[0, 0], [1, 1], [2, 3]])\r\n MultiTaskLasso(alpha=0.1)\r\n >>> print(clf.coef_)\r\n [[0. 0.60809415]\r\n [0. 0.94592424]]\r\n >>> print(clf.intercept_)\r\n [-0.41888636 -0.87382323]\r\n \"\"\"\r\n\r\n def __init__(\r\n self,\r\n alpha=1.0,\r\n *,\r\n fit_intercept=True,\r\n normalize=\"deprecated\",\r\n copy_X=True,\r\n max_iter=1000,\r\n tol=1e-4,\r\n warm_start=False,\r\n random_state=None,\r\n selection=\"cyclic\",\r\n ):\r\n self.alpha = alpha\r\n self.fit_intercept = fit_intercept\r\n self.normalize = normalize\r\n self.max_iter = max_iter\r\n self.copy_X = copy_X\r\n self.tol = tol\r\n self.warm_start = warm_start\r\n self.l1_ratio = 1.0\r\n self.random_state = random_state\r\n self.selection = selection\r\n\r\n\r\nclass MultiTaskElasticNetCV(RegressorMixin, LinearModelCV):\r\n \"\"\"Multi-task L1/L2 ElasticNet with built-in cross-validation.\r\n\r\n See glossary entry for :term:`cross-validation estimator`.\r\n\r\n The optimization objective for MultiTaskElasticNet is::\r\n\r\n (1 / (2 * n_samples)) * ||Y - XW||^Fro_2\r\n + alpha * l1_ratio * ||W||_21\r\n + 0.5 * alpha * (1 - l1_ratio) * ||W||_Fro^2\r\n\r\n Where::\r\n\r\n ||W||_21 = \\\\sum_i \\\\sqrt{\\\\sum_j w_{ij}^2}\r\n\r\n i.e. the sum of norm of each row.\r\n\r\n Read more in the :ref:`User Guide <multi_task_elastic_net>`.\r\n\r\n .. versionadded:: 0.15\r\n\r\n Parameters\r\n ----------\r\n l1_ratio : float or list of float, default=0.5\r\n The ElasticNet mixing parameter, with 0 < l1_ratio <= 1.\r\n For l1_ratio = 1 the penalty is an L1/L2 penalty. For l1_ratio = 0 it\r\n is an L2 penalty.\r\n For ``0 < l1_ratio < 1``, the penalty is a combination of L1/L2 and L2.\r\n This parameter can be a list, in which case the different\r\n values are tested by cross-validation and the one giving the best\r\n prediction score is used. Note that a good choice of list of\r\n values for l1_ratio is often to put more values close to 1\r\n (i.e. Lasso) and less close to 0 (i.e. Ridge), as in ``[.1, .5, .7,\r\n .9, .95, .99, 1]``.\r\n\r\n eps : float, default=1e-3\r\n Length of the path. ``eps=1e-3`` means that\r\n ``alpha_min / alpha_max = 1e-3``.\r\n\r\n n_alphas : int, default=100\r\n Number of alphas along the regularization path.\r\n\r\n alphas : array-like, default=None\r\n List of alphas where to compute the models.\r\n If not provided, set automatically.\r\n\r\n fit_intercept : bool, default=True\r\n Whether to calculate the intercept for this model. If set\r\n to false, no intercept will be used in calculations\r\n (i.e. data is expected to be centered).\r\n\r\n normalize : bool, default=False\r\n This parameter is ignored when ``fit_intercept`` is set to False.\r\n If True, the regressors X will be normalized before regression by\r\n subtracting the mean and dividing by the l2-norm.\r\n If you wish to standardize, please use\r\n :class:`~sklearn.preprocessing.StandardScaler` before calling ``fit``\r\n on an estimator with ``normalize=False``.\r\n\r\n .. deprecated:: 1.0\r\n ``normalize`` was deprecated in version 1.0 and will be removed in\r\n 1.2.\r\n\r\n max_iter : int, default=1000\r\n The maximum number of iterations.\r\n\r\n tol : float, default=1e-4\r\n The tolerance for the optimization: if the updates are\r\n smaller than ``tol``, the optimization code checks the\r\n dual gap for optimality and continues until it is smaller\r\n than ``tol``.\r\n\r\n cv : int, cross-validation generator or iterable, default=None\r\n Determines the cross-validation splitting strategy.\r\n Possible inputs for cv are:\r\n\r\n - None, to use the default 5-fold cross-validation,\r\n - int, to specify the number of folds.\r\n - :term:`CV splitter`,\r\n - An iterable yielding (train, test) splits as arrays of indices.\r\n\r\n For int/None inputs, :class:`KFold` is used.\r\n\r\n Refer :ref:`User Guide <cross_validation>` for the various\r\n cross-validation strategies that can be used here.\r\n\r\n .. versionchanged:: 0.22\r\n ``cv`` default value if None changed from 3-fold to 5-fold.\r\n\r\n copy_X : bool, default=True\r\n If ``True``, X will be copied; else, it may be overwritten.\r\n\r\n verbose : bool or int, default=0\r\n Amount of verbosity.\r\n\r\n n_jobs : int, default=None\r\n Number of CPUs to use during the cross validation. Note that this is\r\n used only if multiple values for l1_ratio are given.\r\n ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.\r\n ``-1`` means using all processors. See :term:`Glossary <n_jobs>`\r\n for more details.\r\n\r\n random_state : int, RandomState instance, default=None\r\n The seed of the pseudo random number generator that selects a random\r\n feature to update. Used when ``selection`` == 'random'.\r\n Pass an int for reproducible output across multiple function calls.\r\n See :term:`Glossary <random_state>`.\r\n\r\n selection : {'cyclic', 'random'}, default='cyclic'\r\n If set to 'random', a random coefficient is updated every iteration\r\n rather than looping over features sequentially by default. This\r\n (setting to 'random') often leads to significantly faster convergence\r\n especially when tol is higher than 1e-4.\r\n\r\n Attributes\r\n ----------\r\n intercept_ : ndarray of shape (n_targets,)\r\n Independent term in decision function.\r\n\r\n coef_ : ndarray of shape (n_targets, n_features)\r\n Parameter vector (W in the cost function formula).\r\n Note that ``coef_`` stores the transpose of ``W``, ``W.T``.\r\n\r\n alpha_ : float\r\n The amount of penalization chosen by cross validation.\r\n\r\n mse_path_ : ndarray of shape (n_alphas, n_folds) or \\\r\n (n_l1_ratio, n_alphas, n_folds)\r\n Mean square error for the test set on each fold, varying alpha.\r\n\r\n alphas_ : ndarray of shape (n_alphas,) or (n_l1_ratio, n_alphas)\r\n The grid of alphas used for fitting, for each l1_ratio.\r\n\r\n l1_ratio_ : float\r\n Best l1_ratio obtained by cross-validation.\r\n\r\n n_iter_ : int\r\n Number of iterations run by the coordinate descent solver to reach\r\n the specified tolerance for the optimal alpha.\r\n\r\n dual_gap_ : float\r\n The dual gap at the end of the optimization for the optimal alpha.\r\n\r\n n_features_in_ : int\r\n Number of features seen during :term:`fit`.\r\n\r\n .. versionadded:: 0.24\r\n\r\n feature_names_in_ : ndarray of shape (`n_features_in_`,)\r\n Names of features seen during :term:`fit`. Defined only when `X`\r\n has feature names that are all strings.\r\n\r\n .. versionadded:: 1.0\r\n\r\n See Also\r\n --------\r\n MultiTaskElasticNet : Multi-task L1/L2 ElasticNet with built-in cross-validation.\r\n ElasticNetCV : Elastic net model with best model selection by\r\n cross-validation.\r\n MultiTaskLassoCV : Multi-task Lasso model trained with L1/L2\r\n mixed-norm as regularizer.\r\n\r\n Notes\r\n -----\r\n The algorithm used to fit the model is coordinate descent.\r\n\r\n To avoid unnecessary memory duplication the X and y arguments of the fit\r\n method should be directly passed as Fortran-contiguous numpy arrays.\r\n\r\n Examples\r\n --------\r\n >>> from sklearn import linear_model\r\n >>> clf = linear_model.MultiTaskElasticNetCV(cv=3)\r\n >>> clf.fit([[0,0], [1, 1], [2, 2]],\r\n ... [[0, 0], [1, 1], [2, 2]])\r\n MultiTaskElasticNetCV(cv=3)\r\n >>> print(clf.coef_)\r\n [[0.52875032 0.46958558]\r\n [0.52875032 0.46958558]]\r\n >>> print(clf.intercept_)\r\n [0.00166409 0.00166409]\r\n \"\"\"\r\n\r\n path = staticmethod(enet_path)\r\n\r\n def __init__(\r\n self,\r\n *,\r\n l1_ratio=0.5,\r\n eps=1e-3,\r\n n_alphas=100,\r\n alphas=None,\r\n fit_intercept=True,\r\n normalize=\"deprecated\",\r\n max_iter=1000,\r\n tol=1e-4,\r\n cv=None,\r\n copy_X=True,\r\n verbose=0,\r\n n_jobs=None,\r\n random_state=None,\r\n selection=\"cyclic\",\r\n ):\r\n self.l1_ratio = l1_ratio\r\n self.eps = eps\r\n self.n_alphas = n_alphas\r\n self.alphas = alphas\r\n self.fit_intercept = fit_intercept\r\n self.normalize = normalize\r\n self.max_iter = max_iter\r\n self.tol = tol\r\n self.cv = cv\r\n self.copy_X = copy_X\r\n self.verbose = verbose\r\n self.n_jobs = n_jobs\r\n self.random_state = random_state\r\n self.selection = selection\r\n\r\n def _get_estimator(self):\r\n return MultiTaskElasticNet()\r\n\r\n def _is_multitask(self):\r\n return True\r\n\r\n def _more_tags(self):\r\n return {\"multioutput_only\": True}\r\n\r\n # This is necessary as LinearModelCV now supports sample_weight while\r\n # MultiTaskElasticNet does not (yet).\r\n def fit(self, X, y):\r\n \"\"\"Fit MultiTaskElasticNet model with coordinate descent.\r\n\r\n Fit is on grid of alphas and best alpha estimated by cross-validation.\r\n\r\n Parameters\r\n ----------\r\n X : ndarray of shape (n_samples, n_features)\r\n Training data.\r\n y : ndarray of shape (n_samples, n_targets)\r\n Training target variable. Will be cast to X's dtype if necessary.\r\n\r\n Returns\r\n -------\r\n self : object\r\n Returns MultiTaskElasticNet instance.\r\n \"\"\"\r\n return super().fit(X, y)\r\n\r\n\r\nclass MultiTaskLassoCV(RegressorMixin, LinearModelCV):\r\n \"\"\"Multi-task Lasso model trained with L1/L2 mixed-norm as regularizer.\r\n\r\n See glossary entry for :term:`cross-validation estimator`.\r\n\r\n The optimization objective for MultiTaskLasso is::\r\n\r\n (1 / (2 * n_samples)) * ||Y - XW||^Fro_2 + alpha * ||W||_21\r\n\r\n Where::\r\n\r\n ||W||_21 = \\\\sum_i \\\\sqrt{\\\\sum_j w_{ij}^2}\r\n\r\n i.e. the sum of norm of each row.\r\n\r\n Read more in the :ref:`User Guide <multi_task_lasso>`.\r\n\r\n .. versionadded:: 0.15\r\n\r\n Parameters\r\n ----------\r\n eps : float, default=1e-3\r\n Length of the path. ``eps=1e-3`` means that\r\n ``alpha_min / alpha_max = 1e-3``.\r\n\r\n n_alphas : int, default=100\r\n Number of alphas along the regularization path.\r\n\r\n alphas : array-like, default=None\r\n List of alphas where to compute the models.\r\n If not provided, set automatically.\r\n\r\n fit_intercept : bool, default=True\r\n Whether to calculate the intercept for this model. If set\r\n to false, no intercept will be used in calculations\r\n (i.e. data is expected to be centered).\r\n\r\n normalize : bool, default=False\r\n This parameter is ignored when ``fit_intercept`` is set to False.\r\n If True, the regressors X will be normalized before regression by\r\n subtracting the mean and dividing by the l2-norm.\r\n If you wish to standardize, please use\r\n :class:`~sklearn.preprocessing.StandardScaler` before calling ``fit``\r\n on an estimator with ``normalize=False``.\r\n\r\n .. deprecated:: 1.0\r\n ``normalize`` was deprecated in version 1.0 and will be removed in\r\n 1.2.\r\n\r\n max_iter : int, default=1000\r\n The maximum number of iterations.\r\n\r\n tol : float, default=1e-4\r\n The tolerance for the optimization: if the updates are\r\n smaller than ``tol``, the optimization code checks the\r\n dual gap for optimality and continues until it is smaller\r\n than ``tol``.\r\n\r\n copy_X : bool, default=True\r\n If ``True``, X will be copied; else, it may be overwritten.\r\n\r\n cv : int, cross-validation generator or iterable, default=None\r\n Determines the cross-validation splitting strategy.\r\n Possible inputs for cv are:\r\n\r\n - None, to use the default 5-fold cross-validation,\r\n - int, to specify the number of folds.\r\n - :term:`CV splitter`,\r\n - An iterable yielding (train, test) splits as arrays of indices.\r\n\r\n For int/None inputs, :class:`KFold` is used.\r\n\r\n Refer :ref:`User Guide <cross_validation>` for the various\r\n cross-validation strategies that can be used here.\r\n\r\n .. versionchanged:: 0.22\r\n ``cv`` default value if None changed from 3-fold to 5-fold.\r\n\r\n verbose : bool or int, default=False\r\n Amount of verbosity.\r\n\r\n n_jobs : int, default=None\r\n Number of CPUs to use during the cross validation. Note that this is\r\n used only if multiple values for l1_ratio are given.\r\n ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.\r\n ``-1`` means using all processors. See :term:`Glossary <n_jobs>`\r\n for more details.\r\n\r\n random_state : int, RandomState instance, default=None\r\n The seed of the pseudo random number generator that selects a random\r\n feature to update. Used when ``selection`` == 'random'.\r\n Pass an int for reproducible output across multiple function calls.\r\n See :term:`Glossary <random_state>`.\r\n\r\n selection : {'cyclic', 'random'}, default='cyclic'\r\n If set to 'random', a random coefficient is updated every iteration\r\n rather than looping over features sequentially by default. This\r\n (setting to 'random') often leads to significantly faster convergence\r\n especially when tol is higher than 1e-4.\r\n\r\n Attributes\r\n ----------\r\n intercept_ : ndarray of shape (n_targets,)\r\n Independent term in decision function.\r\n\r\n coef_ : ndarray of shape (n_targets, n_features)\r\n Parameter vector (W in the cost function formula).\r\n Note that ``coef_`` stores the transpose of ``W``, ``W.T``.\r\n\r\n alpha_ : float\r\n The amount of penalization chosen by cross validation.\r\n\r\n mse_path_ : ndarray of shape (n_alphas, n_folds)\r\n Mean square error for the test set on each fold, varying alpha.\r\n\r\n alphas_ : ndarray of shape (n_alphas,)\r\n The grid of alphas used for fitting.\r\n\r\n n_iter_ : int\r\n Number of iterations run by the coordinate descent solver to reach\r\n the specified tolerance for the optimal alpha.\r\n\r\n dual_gap_ : float\r\n The dual gap at the end of the optimization for the optimal alpha.\r\n\r\n n_features_in_ : int\r\n Number of features seen during :term:`fit`.\r\n\r\n .. versionadded:: 0.24\r\n\r\n feature_names_in_ : ndarray of shape (`n_features_in_`,)\r\n Names of features seen during :term:`fit`. Defined only when `X`\r\n has feature names that are all strings.\r\n\r\n .. versionadded:: 1.0\r\n\r\n See Also\r\n --------\r\n MultiTaskElasticNet : Multi-task ElasticNet model trained with L1/L2\r\n mixed-norm as regularizer.\r\n ElasticNetCV : Elastic net model with best model selection by\r\n cross-validation.\r\n MultiTaskElasticNetCV : Multi-task L1/L2 ElasticNet with built-in\r\n cross-validation.\r\n\r\n Notes\r\n -----\r\n The algorithm used to fit the model is coordinate descent.\r\n\r\n To avoid unnecessary memory duplication the X and y arguments of the fit\r\n method should be directly passed as Fortran-contiguous numpy arrays.\r\n\r\n Examples\r\n --------\r\n >>> from sklearn.linear_model import MultiTaskLassoCV\r\n >>> from sklearn.datasets import make_regression\r\n >>> from sklearn.metrics import r2_score\r\n >>> X, y = make_regression(n_targets=2, noise=4, random_state=0)\r\n >>> reg = MultiTaskLassoCV(cv=5, random_state=0).fit(X, y)\r\n >>> r2_score(y, reg.predict(X))\r\n 0.9994...\r\n >>> reg.alpha_\r\n 0.5713...\r\n >>> reg.predict(X[:1,])\r\n array([[153.7971..., 94.9015...]])\r\n \"\"\"\r\n\r\n path = staticmethod(lasso_path)\r\n\r\n def __init__(\r\n self,\r\n *,\r\n eps=1e-3,\r\n n_alphas=100,\r\n alphas=None,\r\n fit_intercept=True,\r\n normalize=\"deprecated\",\r\n max_iter=1000,\r\n tol=1e-4,\r\n copy_X=True,\r\n cv=None,\r\n verbose=False,\r\n n_jobs=None,\r\n random_state=None,\r\n selection=\"cyclic\",\r\n ):\r\n super().__init__(\r\n eps=eps,\r\n n_alphas=n_alphas,\r\n alphas=alphas,\r\n fit_intercept=fit_intercept,\r\n normalize=normalize,\r\n max_iter=max_iter,\r\n tol=tol,\r\n copy_X=copy_X,\r\n cv=cv,\r\n verbose=verbose,\r\n n_jobs=n_jobs,\r\n random_state=random_state,\r\n selection=selection,\r\n )\r\n\r\n def _get_estimator(self):\r\n return MultiTaskLasso()\r\n\r\n def _is_multitask(self):\r\n return True\r\n\r\n def _more_tags(self):\r\n return {\"multioutput_only\": True}\r\n\r\n # This is necessary as LinearModelCV now supports sample_weight while\r\n # MultiTaskElasticNet does not (yet).\r\n def fit(self, X, y):\r\n \"\"\"Fit MultiTaskLasso model with coordinate descent.\r\n\r\n Fit is on grid of alphas and best alpha estimated by cross-validation.\r\n\r\n Parameters\r\n ----------\r\n X : ndarray of shape (n_samples, n_features)\r\n Data.\r\n y : ndarray of shape (n_samples, n_targets)\r\n Target. Will be cast to X's dtype if necessary.\r\n\r\n Returns\r\n -------\r\n self : object\r\n Returns an instance of fitted model.\r\n \"\"\"\r\n return super().fit(X, y)\r\n",
"from __future__ import annotations\r\n\r\nfrom datetime import timedelta\r\nimport operator\r\nfrom sys import getsizeof\r\nfrom typing import (\r\n TYPE_CHECKING,\r\n Any,\r\n Callable,\r\n Hashable,\r\n List,\r\n cast,\r\n)\r\nimport warnings\r\n\r\nimport numpy as np\r\n\r\nfrom pandas._libs import index as libindex\r\nfrom pandas._libs.lib import no_default\r\nfrom pandas._typing import Dtype\r\nfrom pandas.compat.numpy import function as nv\r\nfrom pandas.util._decorators import (\r\n cache_readonly,\r\n doc,\r\n)\r\nfrom pandas.util._exceptions import rewrite_exception\r\n\r\nfrom pandas.core.dtypes.common import (\r\n ensure_platform_int,\r\n ensure_python_int,\r\n is_float,\r\n is_integer,\r\n is_scalar,\r\n is_signed_integer_dtype,\r\n is_timedelta64_dtype,\r\n)\r\nfrom pandas.core.dtypes.generic import ABCTimedeltaIndex\r\n\r\nfrom pandas.core import ops\r\nimport pandas.core.common as com\r\nfrom pandas.core.construction import extract_array\r\nimport pandas.core.indexes.base as ibase\r\nfrom pandas.core.indexes.base import maybe_extract_name\r\nfrom pandas.core.indexes.numeric import (\r\n Float64Index,\r\n Int64Index,\r\n NumericIndex,\r\n)\r\nfrom pandas.core.ops.common import unpack_zerodim_and_defer\r\n\r\nif TYPE_CHECKING:\r\n from pandas import Index\r\n\r\n_empty_range = range(0)\r\n\r\n\r\nclass RangeIndex(NumericIndex):\r\n \"\"\"\r\n Immutable Index implementing a monotonic integer range.\r\n\r\n RangeIndex is a memory-saving special case of Int64Index limited to\r\n representing monotonic ranges. Using RangeIndex may in some instances\r\n improve computing speed.\r\n\r\n This is the default index type used\r\n by DataFrame and Series when no explicit index is provided by the user.\r\n\r\n Parameters\r\n ----------\r\n start : int (default: 0), range, or other RangeIndex instance\r\n If int and \"stop\" is not given, interpreted as \"stop\" instead.\r\n stop : int (default: 0)\r\n step : int (default: 1)\r\n dtype : np.int64\r\n Unused, accepted for homogeneity with other index types.\r\n copy : bool, default False\r\n Unused, accepted for homogeneity with other index types.\r\n name : object, optional\r\n Name to be stored in the index.\r\n\r\n Attributes\r\n ----------\r\n start\r\n stop\r\n step\r\n\r\n Methods\r\n -------\r\n from_range\r\n\r\n See Also\r\n --------\r\n Index : The base pandas Index type.\r\n Int64Index : Index of int64 data.\r\n \"\"\"\r\n\r\n _typ = \"rangeindex\"\r\n _engine_type = libindex.Int64Engine\r\n _dtype_validation_metadata = (is_signed_integer_dtype, \"signed integer\")\r\n _range: range\r\n\r\n # --------------------------------------------------------------------\r\n # Constructors\r\n\r\n def __new__(\r\n cls,\r\n start=None,\r\n stop=None,\r\n step=None,\r\n dtype: Dtype | None = None,\r\n copy: bool = False,\r\n name: Hashable = None,\r\n ) -> RangeIndex:\r\n cls._validate_dtype(dtype)\r\n name = maybe_extract_name(name, start, cls)\r\n\r\n # RangeIndex\r\n if isinstance(start, RangeIndex):\r\n return start.copy(name=name)\r\n elif isinstance(start, range):\r\n return cls._simple_new(start, name=name)\r\n\r\n # validate the arguments\r\n if com.all_none(start, stop, step):\r\n raise TypeError(\"RangeIndex(...) must be called with integers\")\r\n\r\n start = ensure_python_int(start) if start is not None else 0\r\n\r\n if stop is None:\r\n start, stop = 0, start\r\n else:\r\n stop = ensure_python_int(stop)\r\n\r\n step = ensure_python_int(step) if step is not None else 1\r\n if step == 0:\r\n raise ValueError(\"Step must not be zero\")\r\n\r\n rng = range(start, stop, step)\r\n return cls._simple_new(rng, name=name)\r\n\r\n @classmethod\r\n def from_range(\r\n cls, data: range, name=None, dtype: Dtype | None = None\r\n ) -> RangeIndex:\r\n \"\"\"\r\n Create RangeIndex from a range object.\r\n\r\n Returns\r\n -------\r\n RangeIndex\r\n \"\"\"\r\n if not isinstance(data, range):\r\n raise TypeError(\r\n f\"{cls.__name__}(...) must be called with object coercible to a \"\r\n f\"range, {repr(data)} was passed\"\r\n )\r\n cls._validate_dtype(dtype)\r\n return cls._simple_new(data, name=name)\r\n\r\n @classmethod\r\n def _simple_new(cls, values: range, name: Hashable = None) -> RangeIndex:\r\n result = object.__new__(cls)\r\n\r\n assert isinstance(values, range)\r\n\r\n result._range = values\r\n result._name = name\r\n result._cache = {}\r\n result._reset_identity()\r\n return result\r\n\r\n # --------------------------------------------------------------------\r\n\r\n @cache_readonly\r\n def _constructor(self) -> type[Int64Index]:\r\n \"\"\"return the class to use for construction\"\"\"\r\n return Int64Index\r\n\r\n @cache_readonly\r\n def _data(self) -> np.ndarray:\r\n \"\"\"\r\n An int array that for performance reasons is created only when needed.\r\n\r\n The constructed array is saved in ``_cache``.\r\n \"\"\"\r\n return np.arange(self.start, self.stop, self.step, dtype=np.int64)\r\n\r\n @cache_readonly\r\n def _cached_int64index(self) -> Int64Index:\r\n return Int64Index._simple_new(self._data, name=self.name)\r\n\r\n @property\r\n def _int64index(self) -> Int64Index:\r\n # wrap _cached_int64index so we can be sure its name matches self.name\r\n res = self._cached_int64index\r\n res._name = self._name\r\n return res\r\n\r\n def _get_data_as_items(self):\r\n \"\"\"return a list of tuples of start, stop, step\"\"\"\r\n rng = self._range\r\n return [(\"start\", rng.start), (\"stop\", rng.stop), (\"step\", rng.step)]\r\n\r\n def __reduce__(self):\r\n d = self._get_attributes_dict()\r\n d.update(dict(self._get_data_as_items()))\r\n return ibase._new_Index, (type(self), d), None\r\n\r\n # --------------------------------------------------------------------\r\n # Rendering Methods\r\n\r\n def _format_attrs(self):\r\n \"\"\"\r\n Return a list of tuples of the (attr, formatted_value)\r\n \"\"\"\r\n attrs = self._get_data_as_items()\r\n if self.name is not None:\r\n attrs.append((\"name\", ibase.default_pprint(self.name)))\r\n return attrs\r\n\r\n def _format_data(self, name=None):\r\n # we are formatting thru the attributes\r\n return None\r\n\r\n def _format_with_header(self, header: list[str], na_rep: str = \"NaN\") -> list[str]:\r\n if not len(self._range):\r\n return header\r\n first_val_str = str(self._range[0])\r\n last_val_str = str(self._range[-1])\r\n max_length = max(len(first_val_str), len(last_val_str))\r\n\r\n return header + [f\"{x:<{max_length}}\" for x in self._range]\r\n\r\n # --------------------------------------------------------------------\r\n _deprecation_message = (\r\n \"RangeIndex.{} is deprecated and will be \"\r\n \"removed in a future version. Use RangeIndex.{} \"\r\n \"instead\"\r\n )\r\n\r\n @property\r\n def start(self) -> int:\r\n \"\"\"\r\n The value of the `start` parameter (``0`` if this was not supplied).\r\n \"\"\"\r\n # GH 25710\r\n return self._range.start\r\n\r\n @property\r\n def _start(self) -> int:\r\n \"\"\"\r\n The value of the `start` parameter (``0`` if this was not supplied).\r\n\r\n .. deprecated:: 0.25.0\r\n Use ``start`` instead.\r\n \"\"\"\r\n warnings.warn(\r\n self._deprecation_message.format(\"_start\", \"start\"),\r\n FutureWarning,\r\n stacklevel=2,\r\n )\r\n return self.start\r\n\r\n @property\r\n def stop(self) -> int:\r\n \"\"\"\r\n The value of the `stop` parameter.\r\n \"\"\"\r\n return self._range.stop\r\n\r\n @property\r\n def _stop(self) -> int:\r\n \"\"\"\r\n The value of the `stop` parameter.\r\n\r\n .. deprecated:: 0.25.0\r\n Use ``stop`` instead.\r\n \"\"\"\r\n # GH 25710\r\n warnings.warn(\r\n self._deprecation_message.format(\"_stop\", \"stop\"),\r\n FutureWarning,\r\n stacklevel=2,\r\n )\r\n return self.stop\r\n\r\n @property\r\n def step(self) -> int:\r\n \"\"\"\r\n The value of the `step` parameter (``1`` if this was not supplied).\r\n \"\"\"\r\n # GH 25710\r\n return self._range.step\r\n\r\n @property\r\n def _step(self) -> int:\r\n \"\"\"\r\n The value of the `step` parameter (``1`` if this was not supplied).\r\n\r\n .. deprecated:: 0.25.0\r\n Use ``step`` instead.\r\n \"\"\"\r\n # GH 25710\r\n warnings.warn(\r\n self._deprecation_message.format(\"_step\", \"step\"),\r\n FutureWarning,\r\n stacklevel=2,\r\n )\r\n return self.step\r\n\r\n @cache_readonly\r\n def nbytes(self) -> int:\r\n \"\"\"\r\n Return the number of bytes in the underlying data.\r\n \"\"\"\r\n rng = self._range\r\n return getsizeof(rng) + sum(\r\n getsizeof(getattr(rng, attr_name))\r\n for attr_name in [\"start\", \"stop\", \"step\"]\r\n )\r\n\r\n def memory_usage(self, deep: bool = False) -> int:\r\n \"\"\"\r\n Memory usage of my values\r\n\r\n Parameters\r\n ----------\r\n deep : bool\r\n Introspect the data deeply, interrogate\r\n `object` dtypes for system-level memory consumption\r\n\r\n Returns\r\n -------\r\n bytes used\r\n\r\n Notes\r\n -----\r\n Memory usage does not include memory consumed by elements that\r\n are not components of the array if deep=False\r\n\r\n See Also\r\n --------\r\n numpy.ndarray.nbytes\r\n \"\"\"\r\n return self.nbytes\r\n\r\n @property\r\n def dtype(self) -> np.dtype:\r\n return np.dtype(np.int64)\r\n\r\n @property\r\n def is_unique(self) -> bool:\r\n \"\"\"return if the index has unique values\"\"\"\r\n return True\r\n\r\n @cache_readonly\r\n def is_monotonic_increasing(self) -> bool:\r\n return self._range.step > 0 or len(self) <= 1\r\n\r\n @cache_readonly\r\n def is_monotonic_decreasing(self) -> bool:\r\n return self._range.step < 0 or len(self) <= 1\r\n\r\n def __contains__(self, key: Any) -> bool:\r\n hash(key)\r\n try:\r\n key = ensure_python_int(key)\r\n except TypeError:\r\n return False\r\n return key in self._range\r\n\r\n @property\r\n def inferred_type(self) -> str:\r\n return \"integer\"\r\n\r\n # --------------------------------------------------------------------\r\n # Indexing Methods\r\n\r\n @doc(Int64Index.get_loc)\r\n def get_loc(self, key, method=None, tolerance=None):\r\n if method is None and tolerance is None:\r\n if is_integer(key) or (is_float(key) and key.is_integer()):\r\n new_key = int(key)\r\n try:\r\n return self._range.index(new_key)\r\n except ValueError as err:\r\n raise KeyError(key) from err\r\n raise KeyError(key)\r\n return super().get_loc(key, method=method, tolerance=tolerance)\r\n\r\n def _get_indexer(\r\n self,\r\n target: Index,\r\n method: str | None = None,\r\n limit: int | None = None,\r\n tolerance=None,\r\n ) -> np.ndarray:\r\n # -> np.ndarray[np.intp]\r\n if com.any_not_none(method, tolerance, limit):\r\n return super()._get_indexer(\r\n target, method=method, tolerance=tolerance, limit=limit\r\n )\r\n\r\n if self.step > 0:\r\n start, stop, step = self.start, self.stop, self.step\r\n else:\r\n # GH 28678: work on reversed range for simplicity\r\n reverse = self._range[::-1]\r\n start, stop, step = reverse.start, reverse.stop, reverse.step\r\n\r\n if not is_signed_integer_dtype(target):\r\n # checks/conversions/roundings are delegated to general method\r\n return super()._get_indexer(target, method=method, tolerance=tolerance)\r\n\r\n target_array = np.asarray(target)\r\n locs = target_array - start\r\n valid = (locs % step == 0) & (locs >= 0) & (target_array < stop)\r\n locs[~valid] = -1\r\n locs[valid] = locs[valid] / step\r\n\r\n if step != self.step:\r\n # We reversed this range: transform to original locs\r\n locs[valid] = len(self) - 1 - locs[valid]\r\n return ensure_platform_int(locs)\r\n\r\n # --------------------------------------------------------------------\r\n\r\n def repeat(self, repeats, axis=None) -> Int64Index:\r\n return self._int64index.repeat(repeats, axis=axis)\r\n\r\n def delete(self, loc) -> Int64Index: # type: ignore[override]\r\n return self._int64index.delete(loc)\r\n\r\n def take(\r\n self, indices, axis: int = 0, allow_fill: bool = True, fill_value=None, **kwargs\r\n ) -> Int64Index:\r\n with rewrite_exception(\"Int64Index\", type(self).__name__):\r\n return self._int64index.take(\r\n indices,\r\n axis=axis,\r\n allow_fill=allow_fill,\r\n fill_value=fill_value,\r\n **kwargs,\r\n )\r\n\r\n def tolist(self) -> list[int]:\r\n return list(self._range)\r\n\r\n @doc(Int64Index.__iter__)\r\n def __iter__(self):\r\n yield from self._range\r\n\r\n @doc(Int64Index._shallow_copy)\r\n def _shallow_copy(self, values, name: Hashable = no_default):\r\n name = self.name if name is no_default else name\r\n\r\n if values.dtype.kind == \"f\":\r\n return Float64Index(values, name=name)\r\n return Int64Index._simple_new(values, name=name)\r\n\r\n def _view(self: RangeIndex) -> RangeIndex:\r\n result = type(self)._simple_new(self._range, name=self._name)\r\n result._cache = self._cache\r\n return result\r\n\r\n @doc(Int64Index.copy)\r\n def copy(\r\n self,\r\n name: Hashable = None,\r\n deep: bool = False,\r\n dtype: Dtype | None = None,\r\n names=None,\r\n ):\r\n name = self._validate_names(name=name, names=names, deep=deep)[0]\r\n new_index = self._rename(name=name)\r\n\r\n if dtype:\r\n warnings.warn(\r\n \"parameter dtype is deprecated and will be removed in a future \"\r\n \"version. Use the astype method instead.\",\r\n FutureWarning,\r\n stacklevel=2,\r\n )\r\n new_index = new_index.astype(dtype)\r\n return new_index\r\n\r\n def _minmax(self, meth: str):\r\n no_steps = len(self) - 1\r\n if no_steps == -1:\r\n return np.nan\r\n elif (meth == \"min\" and self.step > 0) or (meth == \"max\" and self.step < 0):\r\n return self.start\r\n\r\n return self.start + self.step * no_steps\r\n\r\n def min(self, axis=None, skipna: bool = True, *args, **kwargs) -> int:\r\n \"\"\"The minimum value of the RangeIndex\"\"\"\r\n nv.validate_minmax_axis(axis)\r\n nv.validate_min(args, kwargs)\r\n return self._minmax(\"min\")\r\n\r\n def max(self, axis=None, skipna: bool = True, *args, **kwargs) -> int:\r\n \"\"\"The maximum value of the RangeIndex\"\"\"\r\n nv.validate_minmax_axis(axis)\r\n nv.validate_max(args, kwargs)\r\n return self._minmax(\"max\")\r\n\r\n def argsort(self, *args, **kwargs) -> np.ndarray:\r\n \"\"\"\r\n Returns the indices that would sort the index and its\r\n underlying data.\r\n\r\n Returns\r\n -------\r\n np.ndarray[np.intp]\r\n\r\n See Also\r\n --------\r\n numpy.ndarray.argsort\r\n \"\"\"\r\n ascending = kwargs.pop(\"ascending\", True) # EA compat\r\n nv.validate_argsort(args, kwargs)\r\n\r\n if self._range.step > 0:\r\n result = np.arange(len(self), dtype=np.intp)\r\n else:\r\n result = np.arange(len(self) - 1, -1, -1, dtype=np.intp)\r\n\r\n if not ascending:\r\n result = result[::-1]\r\n return result\r\n\r\n def factorize(\r\n self, sort: bool = False, na_sentinel: int | None = -1\r\n ) -> tuple[np.ndarray, RangeIndex]:\r\n codes = np.arange(len(self), dtype=np.intp)\r\n uniques = self\r\n if sort and self.step < 0:\r\n codes = codes[::-1]\r\n uniques = uniques[::-1]\r\n return codes, uniques\r\n\r\n def equals(self, other: object) -> bool:\r\n \"\"\"\r\n Determines if two Index objects contain the same elements.\r\n \"\"\"\r\n if isinstance(other, RangeIndex):\r\n return self._range == other._range\r\n return super().equals(other)\r\n\r\n # --------------------------------------------------------------------\r\n # Set Operations\r\n\r\n def _intersection(self, other: Index, sort=False):\r\n\r\n if not isinstance(other, RangeIndex):\r\n # Int64Index\r\n return super()._intersection(other, sort=sort)\r\n\r\n if not len(self) or not len(other):\r\n return self._simple_new(_empty_range)\r\n\r\n first = self._range[::-1] if self.step < 0 else self._range\r\n second = other._range[::-1] if other.step < 0 else other._range\r\n\r\n # check whether intervals intersect\r\n # deals with in- and decreasing ranges\r\n int_low = max(first.start, second.start)\r\n int_high = min(first.stop, second.stop)\r\n if int_high <= int_low:\r\n return self._simple_new(_empty_range)\r\n\r\n # Method hint: linear Diophantine equation\r\n # solve intersection problem\r\n # performance hint: for identical step sizes, could use\r\n # cheaper alternative\r\n gcd, s, _ = self._extended_gcd(first.step, second.step)\r\n\r\n # check whether element sets intersect\r\n if (first.start - second.start) % gcd:\r\n return self._simple_new(_empty_range)\r\n\r\n # calculate parameters for the RangeIndex describing the\r\n # intersection disregarding the lower bounds\r\n tmp_start = first.start + (second.start - first.start) * first.step // gcd * s\r\n new_step = first.step * second.step // gcd\r\n new_range = range(tmp_start, int_high, new_step)\r\n new_index = self._simple_new(new_range)\r\n\r\n # adjust index to limiting interval\r\n new_start = new_index._min_fitting_element(int_low)\r\n new_range = range(new_start, new_index.stop, new_index.step)\r\n new_index = self._simple_new(new_range)\r\n\r\n if (self.step < 0 and other.step < 0) is not (new_index.step < 0):\r\n new_index = new_index[::-1]\r\n if sort is None:\r\n new_index = new_index.sort_values()\r\n\r\n return new_index\r\n\r\n def _min_fitting_element(self, lower_limit: int) -> int:\r\n \"\"\"Returns the smallest element greater than or equal to the limit\"\"\"\r\n no_steps = -(-(lower_limit - self.start) // abs(self.step))\r\n return self.start + abs(self.step) * no_steps\r\n\r\n def _extended_gcd(self, a: int, b: int) -> tuple[int, int, int]:\r\n \"\"\"\r\n Extended Euclidean algorithms to solve Bezout's identity:\r\n a*x + b*y = gcd(x, y)\r\n Finds one particular solution for x, y: s, t\r\n Returns: gcd, s, t\r\n \"\"\"\r\n s, old_s = 0, 1\r\n t, old_t = 1, 0\r\n r, old_r = b, a\r\n while r:\r\n quotient = old_r // r\r\n old_r, r = r, old_r - quotient * r\r\n old_s, s = s, old_s - quotient * s\r\n old_t, t = t, old_t - quotient * t\r\n return old_r, old_s, old_t\r\n\r\n def _union(self, other: Index, sort):\r\n \"\"\"\r\n Form the union of two Index objects and sorts if possible\r\n\r\n Parameters\r\n ----------\r\n other : Index or array-like\r\n\r\n sort : False or None, default None\r\n Whether to sort resulting index. ``sort=None`` returns a\r\n monotonically increasing ``RangeIndex`` if possible or a sorted\r\n ``Int64Index`` if not. ``sort=False`` always returns an\r\n unsorted ``Int64Index``\r\n\r\n .. versionadded:: 0.25.0\r\n\r\n Returns\r\n -------\r\n union : Index\r\n \"\"\"\r\n if isinstance(other, RangeIndex) and sort is None:\r\n start_s, step_s = self.start, self.step\r\n end_s = self.start + self.step * (len(self) - 1)\r\n start_o, step_o = other.start, other.step\r\n end_o = other.start + other.step * (len(other) - 1)\r\n if self.step < 0:\r\n start_s, step_s, end_s = end_s, -step_s, start_s\r\n if other.step < 0:\r\n start_o, step_o, end_o = end_o, -step_o, start_o\r\n if len(self) == 1 and len(other) == 1:\r\n step_s = step_o = abs(self.start - other.start)\r\n elif len(self) == 1:\r\n step_s = step_o\r\n elif len(other) == 1:\r\n step_o = step_s\r\n start_r = min(start_s, start_o)\r\n end_r = max(end_s, end_o)\r\n if step_o == step_s:\r\n if (\r\n (start_s - start_o) % step_s == 0\r\n and (start_s - end_o) <= step_s\r\n and (start_o - end_s) <= step_s\r\n ):\r\n return type(self)(start_r, end_r + step_s, step_s)\r\n if (\r\n (step_s % 2 == 0)\r\n and (abs(start_s - start_o) <= step_s / 2)\r\n and (abs(end_s - end_o) <= step_s / 2)\r\n ):\r\n return type(self)(start_r, end_r + step_s / 2, step_s / 2)\r\n elif step_o % step_s == 0:\r\n if (\r\n (start_o - start_s) % step_s == 0\r\n and (start_o + step_s >= start_s)\r\n and (end_o - step_s <= end_s)\r\n ):\r\n return type(self)(start_r, end_r + step_s, step_s)\r\n elif step_s % step_o == 0:\r\n if (\r\n (start_s - start_o) % step_o == 0\r\n and (start_s + step_o >= start_o)\r\n and (end_s - step_o <= end_o)\r\n ):\r\n return type(self)(start_r, end_r + step_o, step_o)\r\n return self._int64index._union(other, sort=sort)\r\n\r\n def _difference(self, other, sort=None):\r\n # optimized set operation if we have another RangeIndex\r\n self._validate_sort_keyword(sort)\r\n self._assert_can_do_setop(other)\r\n other, result_name = self._convert_can_do_setop(other)\r\n\r\n if not isinstance(other, RangeIndex):\r\n return super()._difference(other, sort=sort)\r\n\r\n res_name = ops.get_op_result_name(self, other)\r\n\r\n first = self._range[::-1] if self.step < 0 else self._range\r\n overlap = self.intersection(other)\r\n if overlap.step < 0:\r\n overlap = overlap[::-1]\r\n\r\n if len(overlap) == 0:\r\n return self.rename(name=res_name)\r\n if len(overlap) == len(self):\r\n return self[:0].rename(res_name)\r\n if not isinstance(overlap, RangeIndex):\r\n # We won't end up with RangeIndex, so fall back\r\n return super()._difference(other, sort=sort)\r\n if overlap.step != first.step:\r\n # In some cases we might be able to get a RangeIndex back,\r\n # but not worth the effort.\r\n return super()._difference(other, sort=sort)\r\n\r\n if overlap[0] == first.start:\r\n # The difference is everything after the intersection\r\n new_rng = range(overlap[-1] + first.step, first.stop, first.step)\r\n elif overlap[-1] == first[-1]:\r\n # The difference is everything before the intersection\r\n new_rng = range(first.start, overlap[0], first.step)\r\n else:\r\n # The difference is not range-like\r\n return super()._difference(other, sort=sort)\r\n\r\n new_index = type(self)._simple_new(new_rng, name=res_name)\r\n if first is not self._range:\r\n new_index = new_index[::-1]\r\n return new_index\r\n\r\n def symmetric_difference(self, other, result_name: Hashable = None, sort=None):\r\n if not isinstance(other, RangeIndex) or sort is not None:\r\n return super().symmetric_difference(other, result_name, sort)\r\n\r\n left = self.difference(other)\r\n right = other.difference(self)\r\n result = left.union(right)\r\n\r\n if result_name is not None:\r\n result = result.rename(result_name)\r\n return result\r\n\r\n # --------------------------------------------------------------------\r\n\r\n def _concat(self, indexes: list[Index], name: Hashable) -> Index:\r\n \"\"\"\r\n Overriding parent method for the case of all RangeIndex instances.\r\n\r\n When all members of \"indexes\" are of type RangeIndex: result will be\r\n RangeIndex if possible, Int64Index otherwise. E.g.:\r\n indexes = [RangeIndex(3), RangeIndex(3, 6)] -> RangeIndex(6)\r\n indexes = [RangeIndex(3), RangeIndex(4, 6)] -> Int64Index([0,1,2,4,5])\r\n \"\"\"\r\n if not all(isinstance(x, RangeIndex) for x in indexes):\r\n return super()._concat(indexes, name)\r\n\r\n elif len(indexes) == 1:\r\n return indexes[0]\r\n\r\n rng_indexes = cast(List[RangeIndex], indexes)\r\n\r\n start = step = next_ = None\r\n\r\n # Filter the empty indexes\r\n non_empty_indexes = [obj for obj in rng_indexes if len(obj)]\r\n\r\n for obj in non_empty_indexes:\r\n rng = obj._range\r\n\r\n if start is None:\r\n # This is set by the first non-empty index\r\n start = rng.start\r\n if step is None and len(rng) > 1:\r\n step = rng.step\r\n elif step is None:\r\n # First non-empty index had only one element\r\n if rng.start == start:\r\n values = np.concatenate([x._values for x in rng_indexes])\r\n result = Int64Index(values)\r\n return result.rename(name)\r\n\r\n step = rng.start - start\r\n\r\n non_consecutive = (step != rng.step and len(rng) > 1) or (\r\n next_ is not None and rng.start != next_\r\n )\r\n if non_consecutive:\r\n result = Int64Index(np.concatenate([x._values for x in rng_indexes]))\r\n return result.rename(name)\r\n\r\n if step is not None:\r\n next_ = rng[-1] + step\r\n\r\n if non_empty_indexes:\r\n # Get the stop value from \"next\" or alternatively\r\n # from the last non-empty index\r\n stop = non_empty_indexes[-1].stop if next_ is None else next_\r\n return RangeIndex(start, stop, step).rename(name)\r\n\r\n # Here all \"indexes\" had 0 length, i.e. were empty.\r\n # In this case return an empty range index.\r\n return RangeIndex(0, 0).rename(name)\r\n\r\n def __len__(self) -> int:\r\n \"\"\"\r\n return the length of the RangeIndex\r\n \"\"\"\r\n return len(self._range)\r\n\r\n @property\r\n def size(self) -> int:\r\n return len(self)\r\n\r\n def __getitem__(self, key):\r\n \"\"\"\r\n Conserve RangeIndex type for scalar and slice keys.\r\n \"\"\"\r\n if isinstance(key, slice):\r\n new_range = self._range[key]\r\n return self._simple_new(new_range, name=self._name)\r\n elif is_integer(key):\r\n new_key = int(key)\r\n try:\r\n return self._range[new_key]\r\n except IndexError as err:\r\n raise IndexError(\r\n f\"index {key} is out of bounds for axis 0 with size {len(self)}\"\r\n ) from err\r\n elif is_scalar(key):\r\n raise IndexError(\r\n \"only integers, slices (`:`), \"\r\n \"ellipsis (`...`), numpy.newaxis (`None`) \"\r\n \"and integer or boolean \"\r\n \"arrays are valid indices\"\r\n )\r\n # fall back to Int64Index\r\n return super().__getitem__(key)\r\n\r\n def _getitem_slice(self: RangeIndex, slobj: slice) -> RangeIndex:\r\n \"\"\"\r\n Fastpath for __getitem__ when we know we have a slice.\r\n \"\"\"\r\n res = self._range[slobj]\r\n return type(self)._simple_new(res, name=self._name)\r\n\r\n @unpack_zerodim_and_defer(\"__floordiv__\")\r\n def __floordiv__(self, other):\r\n\r\n if is_integer(other) and other != 0:\r\n if len(self) == 0 or self.start % other == 0 and self.step % other == 0:\r\n start = self.start // other\r\n step = self.step // other\r\n stop = start + len(self) * step\r\n new_range = range(start, stop, step or 1)\r\n return self._simple_new(new_range, name=self.name)\r\n if len(self) == 1:\r\n start = self.start // other\r\n new_range = range(start, start + 1, 1)\r\n return self._simple_new(new_range, name=self.name)\r\n return self._int64index // other\r\n\r\n # --------------------------------------------------------------------\r\n # Reductions\r\n\r\n def all(self, *args, **kwargs) -> bool:\r\n return 0 not in self._range\r\n\r\n def any(self, *args, **kwargs) -> bool:\r\n return any(self._range)\r\n\r\n # --------------------------------------------------------------------\r\n\r\n def _cmp_method(self, other, op):\r\n if isinstance(other, RangeIndex) and self._range == other._range:\r\n # Both are immutable so if ._range attr. are equal, shortcut is possible\r\n return super()._cmp_method(self, op)\r\n return super()._cmp_method(other, op)\r\n\r\n def _arith_method(self, other, op):\r\n \"\"\"\r\n Parameters\r\n ----------\r\n other : Any\r\n op : callable that accepts 2 params\r\n perform the binary op\r\n \"\"\"\r\n\r\n if isinstance(other, ABCTimedeltaIndex):\r\n # Defer to TimedeltaIndex implementation\r\n return NotImplemented\r\n elif isinstance(other, (timedelta, np.timedelta64)):\r\n # GH#19333 is_integer evaluated True on timedelta64,\r\n # so we need to catch these explicitly\r\n return op(self._int64index, other)\r\n elif is_timedelta64_dtype(other):\r\n # Must be an np.ndarray; GH#22390\r\n return op(self._int64index, other)\r\n\r\n if op in [\r\n operator.pow,\r\n ops.rpow,\r\n operator.mod,\r\n ops.rmod,\r\n ops.rfloordiv,\r\n divmod,\r\n ops.rdivmod,\r\n ]:\r\n return op(self._int64index, other)\r\n\r\n step: Callable | None = None\r\n if op in [operator.mul, ops.rmul, operator.truediv, ops.rtruediv]:\r\n step = op\r\n\r\n # TODO: if other is a RangeIndex we may have more efficient options\r\n other = extract_array(other, extract_numpy=True, extract_range=True)\r\n attrs = self._get_attributes_dict()\r\n\r\n left, right = self, other\r\n\r\n try:\r\n # apply if we have an override\r\n if step:\r\n with np.errstate(all=\"ignore\"):\r\n rstep = step(left.step, right)\r\n\r\n # we don't have a representable op\r\n # so return a base index\r\n if not is_integer(rstep) or not rstep:\r\n raise ValueError\r\n\r\n else:\r\n rstep = left.step\r\n\r\n with np.errstate(all=\"ignore\"):\r\n rstart = op(left.start, right)\r\n rstop = op(left.stop, right)\r\n\r\n result = type(self)(rstart, rstop, rstep, **attrs)\r\n\r\n # for compat with numpy / Int64Index\r\n # even if we can represent as a RangeIndex, return\r\n # as a Float64Index if we have float-like descriptors\r\n if not all(is_integer(x) for x in [rstart, rstop, rstep]):\r\n result = result.astype(\"float64\")\r\n\r\n return result\r\n\r\n except (ValueError, TypeError, ZeroDivisionError):\r\n # Defer to Int64Index implementation\r\n return op(self._int64index, other)\r\n # TODO: Do attrs get handled reliably?\r\n",
"# Author: Nikolay Mayorov <[email protected]>\r\n# License: 3-clause BSD\r\n\r\nimport numpy as np\r\nfrom scipy.sparse import issparse\r\nfrom scipy.special import digamma\r\n\r\nfrom ..metrics.cluster import mutual_info_score\r\nfrom ..neighbors import NearestNeighbors, KDTree\r\nfrom ..preprocessing import scale\r\nfrom ..utils import check_random_state\r\nfrom ..utils.fixes import _astype_copy_false\r\nfrom ..utils.validation import check_array, check_X_y\r\nfrom ..utils.multiclass import check_classification_targets\r\n\r\n\r\ndef _compute_mi_cc(x, y, n_neighbors):\r\n \"\"\"Compute mutual information between two continuous variables.\r\n\r\n Parameters\r\n ----------\r\n x, y : ndarray, shape (n_samples,)\r\n Samples of two continuous random variables, must have an identical\r\n shape.\r\n\r\n n_neighbors : int\r\n Number of nearest neighbors to search for each point, see [1]_.\r\n\r\n Returns\r\n -------\r\n mi : float\r\n Estimated mutual information. If it turned out to be negative it is\r\n replace by 0.\r\n\r\n Notes\r\n -----\r\n True mutual information can't be negative. If its estimate by a numerical\r\n method is negative, it means (providing the method is adequate) that the\r\n mutual information is close to 0 and replacing it by 0 is a reasonable\r\n strategy.\r\n\r\n References\r\n ----------\r\n .. [1] A. Kraskov, H. Stogbauer and P. Grassberger, \"Estimating mutual\r\n information\". Phys. Rev. E 69, 2004.\r\n \"\"\"\r\n n_samples = x.size\r\n\r\n x = x.reshape((-1, 1))\r\n y = y.reshape((-1, 1))\r\n xy = np.hstack((x, y))\r\n\r\n # Here we rely on NearestNeighbors to select the fastest algorithm.\r\n nn = NearestNeighbors(metric=\"chebyshev\", n_neighbors=n_neighbors)\r\n\r\n nn.fit(xy)\r\n radius = nn.kneighbors()[0]\r\n radius = np.nextafter(radius[:, -1], 0)\r\n\r\n # KDTree is explicitly fit to allow for the querying of number of\r\n # neighbors within a specified radius\r\n kd = KDTree(x, metric=\"chebyshev\")\r\n nx = kd.query_radius(x, radius, count_only=True, return_distance=False)\r\n nx = np.array(nx) - 1.0\r\n\r\n kd = KDTree(y, metric=\"chebyshev\")\r\n ny = kd.query_radius(y, radius, count_only=True, return_distance=False)\r\n ny = np.array(ny) - 1.0\r\n\r\n mi = (\r\n digamma(n_samples)\r\n + digamma(n_neighbors)\r\n - np.mean(digamma(nx + 1))\r\n - np.mean(digamma(ny + 1))\r\n )\r\n\r\n return max(0, mi)\r\n\r\n\r\ndef _compute_mi_cd(c, d, n_neighbors):\r\n \"\"\"Compute mutual information between continuous and discrete variables.\r\n\r\n Parameters\r\n ----------\r\n c : ndarray, shape (n_samples,)\r\n Samples of a continuous random variable.\r\n\r\n d : ndarray, shape (n_samples,)\r\n Samples of a discrete random variable.\r\n\r\n n_neighbors : int\r\n Number of nearest neighbors to search for each point, see [1]_.\r\n\r\n Returns\r\n -------\r\n mi : float\r\n Estimated mutual information. If it turned out to be negative it is\r\n replace by 0.\r\n\r\n Notes\r\n -----\r\n True mutual information can't be negative. If its estimate by a numerical\r\n method is negative, it means (providing the method is adequate) that the\r\n mutual information is close to 0 and replacing it by 0 is a reasonable\r\n strategy.\r\n\r\n References\r\n ----------\r\n .. [1] B. C. Ross \"Mutual Information between Discrete and Continuous\r\n Data Sets\". PLoS ONE 9(2), 2014.\r\n \"\"\"\r\n n_samples = c.shape[0]\r\n c = c.reshape((-1, 1))\r\n\r\n radius = np.empty(n_samples)\r\n label_counts = np.empty(n_samples)\r\n k_all = np.empty(n_samples)\r\n nn = NearestNeighbors()\r\n for label in np.unique(d):\r\n mask = d == label\r\n count = np.sum(mask)\r\n if count > 1:\r\n k = min(n_neighbors, count - 1)\r\n nn.set_params(n_neighbors=k)\r\n nn.fit(c[mask])\r\n r = nn.kneighbors()[0]\r\n radius[mask] = np.nextafter(r[:, -1], 0)\r\n k_all[mask] = k\r\n label_counts[mask] = count\r\n\r\n # Ignore points with unique labels.\r\n mask = label_counts > 1\r\n n_samples = np.sum(mask)\r\n label_counts = label_counts[mask]\r\n k_all = k_all[mask]\r\n c = c[mask]\r\n radius = radius[mask]\r\n\r\n kd = KDTree(c)\r\n m_all = kd.query_radius(c, radius, count_only=True, return_distance=False)\r\n m_all = np.array(m_all) - 1.0\r\n\r\n mi = (\r\n digamma(n_samples)\r\n + np.mean(digamma(k_all))\r\n - np.mean(digamma(label_counts))\r\n - np.mean(digamma(m_all + 1))\r\n )\r\n\r\n return max(0, mi)\r\n\r\n\r\ndef _compute_mi(x, y, x_discrete, y_discrete, n_neighbors=3):\r\n \"\"\"Compute mutual information between two variables.\r\n\r\n This is a simple wrapper which selects a proper function to call based on\r\n whether `x` and `y` are discrete or not.\r\n \"\"\"\r\n if x_discrete and y_discrete:\r\n return mutual_info_score(x, y)\r\n elif x_discrete and not y_discrete:\r\n return _compute_mi_cd(y, x, n_neighbors)\r\n elif not x_discrete and y_discrete:\r\n return _compute_mi_cd(x, y, n_neighbors)\r\n else:\r\n return _compute_mi_cc(x, y, n_neighbors)\r\n\r\n\r\ndef _iterate_columns(X, columns=None):\r\n \"\"\"Iterate over columns of a matrix.\r\n\r\n Parameters\r\n ----------\r\n X : ndarray or csc_matrix, shape (n_samples, n_features)\r\n Matrix over which to iterate.\r\n\r\n columns : iterable or None, default=None\r\n Indices of columns to iterate over. If None, iterate over all columns.\r\n\r\n Yields\r\n ------\r\n x : ndarray, shape (n_samples,)\r\n Columns of `X` in dense format.\r\n \"\"\"\r\n if columns is None:\r\n columns = range(X.shape[1])\r\n\r\n if issparse(X):\r\n for i in columns:\r\n x = np.zeros(X.shape[0])\r\n start_ptr, end_ptr = X.indptr[i], X.indptr[i + 1]\r\n x[X.indices[start_ptr:end_ptr]] = X.data[start_ptr:end_ptr]\r\n yield x\r\n else:\r\n for i in columns:\r\n yield X[:, i]\r\n\r\n\r\ndef _estimate_mi(\r\n X,\r\n y,\r\n discrete_features=\"auto\",\r\n discrete_target=False,\r\n n_neighbors=3,\r\n copy=True,\r\n random_state=None,\r\n):\r\n \"\"\"Estimate mutual information between the features and the target.\r\n\r\n Parameters\r\n ----------\r\n X : array-like or sparse matrix, shape (n_samples, n_features)\r\n Feature matrix.\r\n\r\n y : array-like of shape (n_samples,)\r\n Target vector.\r\n\r\n discrete_features : {'auto', bool, array-like}, default='auto'\r\n If bool, then determines whether to consider all features discrete\r\n or continuous. If array, then it should be either a boolean mask\r\n with shape (n_features,) or array with indices of discrete features.\r\n If 'auto', it is assigned to False for dense `X` and to True for\r\n sparse `X`.\r\n\r\n discrete_target : bool, default=False\r\n Whether to consider `y` as a discrete variable.\r\n\r\n n_neighbors : int, default=3\r\n Number of neighbors to use for MI estimation for continuous variables,\r\n see [1]_ and [2]_. Higher values reduce variance of the estimation, but\r\n could introduce a bias.\r\n\r\n copy : bool, default=True\r\n Whether to make a copy of the given data. If set to False, the initial\r\n data will be overwritten.\r\n\r\n random_state : int, RandomState instance or None, default=None\r\n Determines random number generation for adding small noise to\r\n continuous variables in order to remove repeated values.\r\n Pass an int for reproducible results across multiple function calls.\r\n See :term:`Glossary <random_state>`.\r\n\r\n Returns\r\n -------\r\n mi : ndarray, shape (n_features,)\r\n Estimated mutual information between each feature and the target.\r\n A negative value will be replaced by 0.\r\n\r\n References\r\n ----------\r\n .. [1] A. Kraskov, H. Stogbauer and P. Grassberger, \"Estimating mutual\r\n information\". Phys. Rev. E 69, 2004.\r\n .. [2] B. C. Ross \"Mutual Information between Discrete and Continuous\r\n Data Sets\". PLoS ONE 9(2), 2014.\r\n \"\"\"\r\n X, y = check_X_y(X, y, accept_sparse=\"csc\", y_numeric=not discrete_target)\r\n n_samples, n_features = X.shape\r\n\r\n if isinstance(discrete_features, (str, bool)):\r\n if isinstance(discrete_features, str):\r\n if discrete_features == \"auto\":\r\n discrete_features = issparse(X)\r\n else:\r\n raise ValueError(\"Invalid string value for discrete_features.\")\r\n discrete_mask = np.empty(n_features, dtype=bool)\r\n discrete_mask.fill(discrete_features)\r\n else:\r\n discrete_features = check_array(discrete_features, ensure_2d=False)\r\n if discrete_features.dtype != \"bool\":\r\n discrete_mask = np.zeros(n_features, dtype=bool)\r\n discrete_mask[discrete_features] = True\r\n else:\r\n discrete_mask = discrete_features\r\n\r\n continuous_mask = ~discrete_mask\r\n if np.any(continuous_mask) and issparse(X):\r\n raise ValueError(\"Sparse matrix `X` can't have continuous features.\")\r\n\r\n rng = check_random_state(random_state)\r\n if np.any(continuous_mask):\r\n if copy:\r\n X = X.copy()\r\n\r\n if not discrete_target:\r\n X[:, continuous_mask] = scale(\r\n X[:, continuous_mask], with_mean=False, copy=False\r\n )\r\n\r\n # Add small noise to continuous features as advised in Kraskov et. al.\r\n X = X.astype(float, **_astype_copy_false(X))\r\n means = np.maximum(1, np.mean(np.abs(X[:, continuous_mask]), axis=0))\r\n X[:, continuous_mask] += (\r\n 1e-10 * means * rng.randn(n_samples, np.sum(continuous_mask))\r\n )\r\n\r\n if not discrete_target:\r\n y = scale(y, with_mean=False)\r\n y += 1e-10 * np.maximum(1, np.mean(np.abs(y))) * rng.randn(n_samples)\r\n\r\n mi = [\r\n _compute_mi(x, y, discrete_feature, discrete_target, n_neighbors)\r\n for x, discrete_feature in zip(_iterate_columns(X), discrete_mask)\r\n ]\r\n\r\n return np.array(mi)\r\n\r\n\r\ndef mutual_info_regression(\r\n X, y, *, discrete_features=\"auto\", n_neighbors=3, copy=True, random_state=None\r\n):\r\n \"\"\"Estimate mutual information for a continuous target variable.\r\n\r\n Mutual information (MI) [1]_ between two random variables is a non-negative\r\n value, which measures the dependency between the variables. It is equal\r\n to zero if and only if two random variables are independent, and higher\r\n values mean higher dependency.\r\n\r\n The function relies on nonparametric methods based on entropy estimation\r\n from k-nearest neighbors distances as described in [2]_ and [3]_. Both\r\n methods are based on the idea originally proposed in [4]_.\r\n\r\n It can be used for univariate features selection, read more in the\r\n :ref:`User Guide <univariate_feature_selection>`.\r\n\r\n Parameters\r\n ----------\r\n X : array-like or sparse matrix, shape (n_samples, n_features)\r\n Feature matrix.\r\n\r\n y : array-like of shape (n_samples,)\r\n Target vector.\r\n\r\n discrete_features : {'auto', bool, array-like}, default='auto'\r\n If bool, then determines whether to consider all features discrete\r\n or continuous. If array, then it should be either a boolean mask\r\n with shape (n_features,) or array with indices of discrete features.\r\n If 'auto', it is assigned to False for dense `X` and to True for\r\n sparse `X`.\r\n\r\n n_neighbors : int, default=3\r\n Number of neighbors to use for MI estimation for continuous variables,\r\n see [2]_ and [3]_. Higher values reduce variance of the estimation, but\r\n could introduce a bias.\r\n\r\n copy : bool, default=True\r\n Whether to make a copy of the given data. If set to False, the initial\r\n data will be overwritten.\r\n\r\n random_state : int, RandomState instance or None, default=None\r\n Determines random number generation for adding small noise to\r\n continuous variables in order to remove repeated values.\r\n Pass an int for reproducible results across multiple function calls.\r\n See :term:`Glossary <random_state>`.\r\n\r\n Returns\r\n -------\r\n mi : ndarray, shape (n_features,)\r\n Estimated mutual information between each feature and the target.\r\n\r\n Notes\r\n -----\r\n 1. The term \"discrete features\" is used instead of naming them\r\n \"categorical\", because it describes the essence more accurately.\r\n For example, pixel intensities of an image are discrete features\r\n (but hardly categorical) and you will get better results if mark them\r\n as such. Also note, that treating a continuous variable as discrete and\r\n vice versa will usually give incorrect results, so be attentive about\r\n that.\r\n 2. True mutual information can't be negative. If its estimate turns out\r\n to be negative, it is replaced by zero.\r\n\r\n References\r\n ----------\r\n .. [1] `Mutual Information\r\n <https://en.wikipedia.org/wiki/Mutual_information>`_\r\n on Wikipedia.\r\n .. [2] A. Kraskov, H. Stogbauer and P. Grassberger, \"Estimating mutual\r\n information\". Phys. Rev. E 69, 2004.\r\n .. [3] B. C. Ross \"Mutual Information between Discrete and Continuous\r\n Data Sets\". PLoS ONE 9(2), 2014.\r\n .. [4] L. F. Kozachenko, N. N. Leonenko, \"Sample Estimate of the Entropy\r\n of a Random Vector\", Probl. Peredachi Inf., 23:2 (1987), 9-16\r\n \"\"\"\r\n return _estimate_mi(X, y, discrete_features, False, n_neighbors, copy, random_state)\r\n\r\n\r\ndef mutual_info_classif(\r\n X, y, *, discrete_features=\"auto\", n_neighbors=3, copy=True, random_state=None\r\n):\r\n \"\"\"Estimate mutual information for a discrete target variable.\r\n\r\n Mutual information (MI) [1]_ between two random variables is a non-negative\r\n value, which measures the dependency between the variables. It is equal\r\n to zero if and only if two random variables are independent, and higher\r\n values mean higher dependency.\r\n\r\n The function relies on nonparametric methods based on entropy estimation\r\n from k-nearest neighbors distances as described in [2]_ and [3]_. Both\r\n methods are based on the idea originally proposed in [4]_.\r\n\r\n It can be used for univariate features selection, read more in the\r\n :ref:`User Guide <univariate_feature_selection>`.\r\n\r\n Parameters\r\n ----------\r\n X : array-like or sparse matrix, shape (n_samples, n_features)\r\n Feature matrix.\r\n\r\n y : array-like of shape (n_samples,)\r\n Target vector.\r\n\r\n discrete_features : {'auto', bool, array-like}, default='auto'\r\n If bool, then determines whether to consider all features discrete\r\n or continuous. If array, then it should be either a boolean mask\r\n with shape (n_features,) or array with indices of discrete features.\r\n If 'auto', it is assigned to False for dense `X` and to True for\r\n sparse `X`.\r\n\r\n n_neighbors : int, default=3\r\n Number of neighbors to use for MI estimation for continuous variables,\r\n see [2]_ and [3]_. Higher values reduce variance of the estimation, but\r\n could introduce a bias.\r\n\r\n copy : bool, default=True\r\n Whether to make a copy of the given data. If set to False, the initial\r\n data will be overwritten.\r\n\r\n random_state : int, RandomState instance or None, default=None\r\n Determines random number generation for adding small noise to\r\n continuous variables in order to remove repeated values.\r\n Pass an int for reproducible results across multiple function calls.\r\n See :term:`Glossary <random_state>`.\r\n\r\n Returns\r\n -------\r\n mi : ndarray, shape (n_features,)\r\n Estimated mutual information between each feature and the target.\r\n\r\n Notes\r\n -----\r\n 1. The term \"discrete features\" is used instead of naming them\r\n \"categorical\", because it describes the essence more accurately.\r\n For example, pixel intensities of an image are discrete features\r\n (but hardly categorical) and you will get better results if mark them\r\n as such. Also note, that treating a continuous variable as discrete and\r\n vice versa will usually give incorrect results, so be attentive about\r\n that.\r\n 2. True mutual information can't be negative. If its estimate turns out\r\n to be negative, it is replaced by zero.\r\n\r\n References\r\n ----------\r\n .. [1] `Mutual Information\r\n <https://en.wikipedia.org/wiki/Mutual_information>`_\r\n on Wikipedia.\r\n .. [2] A. Kraskov, H. Stogbauer and P. Grassberger, \"Estimating mutual\r\n information\". Phys. Rev. E 69, 2004.\r\n .. [3] B. C. Ross \"Mutual Information between Discrete and Continuous\r\n Data Sets\". PLoS ONE 9(2), 2014.\r\n .. [4] L. F. Kozachenko, N. N. Leonenko, \"Sample Estimate of the Entropy\r\n of a Random Vector:, Probl. Peredachi Inf., 23:2 (1987), 9-16\r\n \"\"\"\r\n check_classification_targets(y)\r\n return _estimate_mi(X, y, discrete_features, True, n_neighbors, copy, random_state)\r\n",
"import numpy as np\r\nfrom scipy.stats.mstats import mquantiles\r\n\r\nimport pytest\r\nfrom numpy.testing import assert_allclose\r\n\r\nfrom sklearn.datasets import load_diabetes\r\nfrom sklearn.datasets import load_iris\r\nfrom sklearn.datasets import make_classification, make_regression\r\nfrom sklearn.ensemble import GradientBoostingRegressor\r\nfrom sklearn.ensemble import GradientBoostingClassifier\r\nfrom sklearn.linear_model import LinearRegression\r\nfrom sklearn.utils._testing import _convert_container\r\n\r\nfrom sklearn.inspection import plot_partial_dependence as plot_partial_dependence_func\r\nfrom sklearn.inspection import PartialDependenceDisplay\r\n\r\n\r\n# TODO: Remove when https://github.com/numpy/numpy/issues/14397 is resolved\r\npytestmark = pytest.mark.filterwarnings(\r\n \"ignore:In future, it will be an error for 'np.bool_':DeprecationWarning:\"\r\n \"matplotlib.*\",\r\n # TODO: Remove in 1.2 and convert test to only use\r\n # PartialDependenceDisplay.from_estimator\r\n \"ignore:Function plot_partial_dependence is deprecated\",\r\n)\r\n\r\n\r\n# TODO: Remove in 1.2 and convert test to only use\r\n# PartialDependenceDisplay.from_estimator\r\[email protected](\r\n params=[PartialDependenceDisplay.from_estimator, plot_partial_dependence_func],\r\n ids=[\"from_estimator\", \"function\"],\r\n)\r\ndef plot_partial_dependence(request):\r\n return request.param\r\n\r\n\r\[email protected](scope=\"module\")\r\ndef diabetes():\r\n return load_diabetes()\r\n\r\n\r\[email protected](scope=\"module\")\r\ndef clf_diabetes(diabetes):\r\n clf = GradientBoostingRegressor(n_estimators=10, random_state=1)\r\n clf.fit(diabetes.data, diabetes.target)\r\n return clf\r\n\r\n\r\ndef test_plot_partial_dependence_deprecation(pyplot, clf_diabetes, diabetes):\r\n \"\"\"Check that plot_partial_dependence is deprecated\"\"\"\r\n with pytest.warns(FutureWarning):\r\n plot_partial_dependence_func(clf_diabetes, diabetes.data, [0])\r\n\r\n\r\[email protected](\"ignore:A Bunch will be returned\")\r\[email protected](\"grid_resolution\", [10, 20])\r\ndef test_plot_partial_dependence(\r\n plot_partial_dependence, grid_resolution, pyplot, clf_diabetes, diabetes\r\n):\r\n # Test partial dependence plot function.\r\n # Use columns 0 & 2 as 1 is not quantitative (sex)\r\n feature_names = diabetes.feature_names\r\n disp = plot_partial_dependence(\r\n clf_diabetes,\r\n diabetes.data,\r\n [0, 2, (0, 2)],\r\n grid_resolution=grid_resolution,\r\n feature_names=feature_names,\r\n contour_kw={\"cmap\": \"jet\"},\r\n )\r\n fig = pyplot.gcf()\r\n axs = fig.get_axes()\r\n assert disp.figure_ is fig\r\n assert len(axs) == 4\r\n\r\n assert disp.bounding_ax_ is not None\r\n assert disp.axes_.shape == (1, 3)\r\n assert disp.lines_.shape == (1, 3)\r\n assert disp.contours_.shape == (1, 3)\r\n assert disp.deciles_vlines_.shape == (1, 3)\r\n assert disp.deciles_hlines_.shape == (1, 3)\r\n\r\n assert disp.lines_[0, 2] is None\r\n assert disp.contours_[0, 0] is None\r\n assert disp.contours_[0, 1] is None\r\n\r\n # deciles lines: always show on xaxis, only show on yaxis if 2-way PDP\r\n for i in range(3):\r\n assert disp.deciles_vlines_[0, i] is not None\r\n assert disp.deciles_hlines_[0, 0] is None\r\n assert disp.deciles_hlines_[0, 1] is None\r\n assert disp.deciles_hlines_[0, 2] is not None\r\n\r\n assert disp.features == [(0,), (2,), (0, 2)]\r\n assert np.all(disp.feature_names == feature_names)\r\n assert len(disp.deciles) == 2\r\n for i in [0, 2]:\r\n assert_allclose(\r\n disp.deciles[i],\r\n mquantiles(diabetes.data[:, i], prob=np.arange(0.1, 1.0, 0.1)),\r\n )\r\n\r\n single_feature_positions = [(0, (0, 0)), (2, (0, 1))]\r\n expected_ylabels = [\"Partial dependence\", \"\"]\r\n\r\n for i, (feat_col, pos) in enumerate(single_feature_positions):\r\n ax = disp.axes_[pos]\r\n assert ax.get_ylabel() == expected_ylabels[i]\r\n assert ax.get_xlabel() == diabetes.feature_names[feat_col]\r\n assert_allclose(ax.get_ylim(), disp.pdp_lim[1])\r\n\r\n line = disp.lines_[pos]\r\n\r\n avg_preds = disp.pd_results[i]\r\n assert avg_preds.average.shape == (1, grid_resolution)\r\n target_idx = disp.target_idx\r\n\r\n line_data = line.get_data()\r\n assert_allclose(line_data[0], avg_preds[\"values\"][0])\r\n assert_allclose(line_data[1], avg_preds.average[target_idx].ravel())\r\n\r\n # two feature position\r\n ax = disp.axes_[0, 2]\r\n coutour = disp.contours_[0, 2]\r\n expected_levels = np.linspace(*disp.pdp_lim[2], num=8)\r\n assert_allclose(coutour.levels, expected_levels)\r\n assert coutour.get_cmap().name == \"jet\"\r\n assert ax.get_xlabel() == diabetes.feature_names[0]\r\n assert ax.get_ylabel() == diabetes.feature_names[2]\r\n\r\n\r\[email protected](\"ignore:A Bunch will be returned\")\r\[email protected](\r\n \"kind, subsample, shape\",\r\n [\r\n (\"average\", None, (1, 3)),\r\n (\"individual\", None, (1, 3, 442)),\r\n (\"both\", None, (1, 3, 443)),\r\n (\"individual\", 50, (1, 3, 50)),\r\n (\"both\", 50, (1, 3, 51)),\r\n (\"individual\", 0.5, (1, 3, 221)),\r\n (\"both\", 0.5, (1, 3, 222)),\r\n ],\r\n)\r\ndef test_plot_partial_dependence_kind(\r\n plot_partial_dependence, pyplot, kind, subsample, shape, clf_diabetes, diabetes\r\n):\r\n disp = plot_partial_dependence(\r\n clf_diabetes, diabetes.data, [0, 1, 2], kind=kind, subsample=subsample\r\n )\r\n\r\n assert disp.axes_.shape == (1, 3)\r\n assert disp.lines_.shape == shape\r\n assert disp.contours_.shape == (1, 3)\r\n\r\n assert disp.contours_[0, 0] is None\r\n assert disp.contours_[0, 1] is None\r\n assert disp.contours_[0, 2] is None\r\n\r\n\r\[email protected](\"ignore:A Bunch will be returned\")\r\[email protected](\r\n \"input_type, feature_names_type\",\r\n [\r\n (\"dataframe\", None),\r\n (\"dataframe\", \"list\"),\r\n (\"list\", \"list\"),\r\n (\"array\", \"list\"),\r\n (\"dataframe\", \"array\"),\r\n (\"list\", \"array\"),\r\n (\"array\", \"array\"),\r\n (\"dataframe\", \"series\"),\r\n (\"list\", \"series\"),\r\n (\"array\", \"series\"),\r\n (\"dataframe\", \"index\"),\r\n (\"list\", \"index\"),\r\n (\"array\", \"index\"),\r\n ],\r\n)\r\ndef test_plot_partial_dependence_str_features(\r\n plot_partial_dependence,\r\n pyplot,\r\n clf_diabetes,\r\n diabetes,\r\n input_type,\r\n feature_names_type,\r\n):\r\n if input_type == \"dataframe\":\r\n pd = pytest.importorskip(\"pandas\")\r\n X = pd.DataFrame(diabetes.data, columns=diabetes.feature_names)\r\n elif input_type == \"list\":\r\n X = diabetes.data.tolist()\r\n else:\r\n X = diabetes.data\r\n\r\n if feature_names_type is None:\r\n feature_names = None\r\n else:\r\n feature_names = _convert_container(diabetes.feature_names, feature_names_type)\r\n\r\n grid_resolution = 25\r\n # check with str features and array feature names and single column\r\n disp = plot_partial_dependence(\r\n clf_diabetes,\r\n X,\r\n [(\"age\", \"bmi\"), \"bmi\"],\r\n grid_resolution=grid_resolution,\r\n feature_names=feature_names,\r\n n_cols=1,\r\n line_kw={\"alpha\": 0.8},\r\n )\r\n fig = pyplot.gcf()\r\n axs = fig.get_axes()\r\n assert len(axs) == 3\r\n\r\n assert disp.figure_ is fig\r\n assert disp.axes_.shape == (2, 1)\r\n assert disp.lines_.shape == (2, 1)\r\n assert disp.contours_.shape == (2, 1)\r\n assert disp.deciles_vlines_.shape == (2, 1)\r\n assert disp.deciles_hlines_.shape == (2, 1)\r\n\r\n assert disp.lines_[0, 0] is None\r\n assert disp.deciles_vlines_[0, 0] is not None\r\n assert disp.deciles_hlines_[0, 0] is not None\r\n assert disp.contours_[1, 0] is None\r\n assert disp.deciles_hlines_[1, 0] is None\r\n assert disp.deciles_vlines_[1, 0] is not None\r\n\r\n # line\r\n ax = disp.axes_[1, 0]\r\n assert ax.get_xlabel() == \"bmi\"\r\n assert ax.get_ylabel() == \"Partial dependence\"\r\n\r\n line = disp.lines_[1, 0]\r\n avg_preds = disp.pd_results[1]\r\n target_idx = disp.target_idx\r\n assert line.get_alpha() == 0.8\r\n\r\n line_data = line.get_data()\r\n assert_allclose(line_data[0], avg_preds[\"values\"][0])\r\n assert_allclose(line_data[1], avg_preds.average[target_idx].ravel())\r\n\r\n # contour\r\n ax = disp.axes_[0, 0]\r\n coutour = disp.contours_[0, 0]\r\n expect_levels = np.linspace(*disp.pdp_lim[2], num=8)\r\n assert_allclose(coutour.levels, expect_levels)\r\n assert ax.get_xlabel() == \"age\"\r\n assert ax.get_ylabel() == \"bmi\"\r\n\r\n\r\[email protected](\"ignore:A Bunch will be returned\")\r\ndef test_plot_partial_dependence_custom_axes(\r\n plot_partial_dependence, pyplot, clf_diabetes, diabetes\r\n):\r\n grid_resolution = 25\r\n fig, (ax1, ax2) = pyplot.subplots(1, 2)\r\n disp = plot_partial_dependence(\r\n clf_diabetes,\r\n diabetes.data,\r\n [\"age\", (\"age\", \"bmi\")],\r\n grid_resolution=grid_resolution,\r\n feature_names=diabetes.feature_names,\r\n ax=[ax1, ax2],\r\n )\r\n assert fig is disp.figure_\r\n assert disp.bounding_ax_ is None\r\n assert disp.axes_.shape == (2,)\r\n assert disp.axes_[0] is ax1\r\n assert disp.axes_[1] is ax2\r\n\r\n ax = disp.axes_[0]\r\n assert ax.get_xlabel() == \"age\"\r\n assert ax.get_ylabel() == \"Partial dependence\"\r\n\r\n line = disp.lines_[0]\r\n avg_preds = disp.pd_results[0]\r\n target_idx = disp.target_idx\r\n\r\n line_data = line.get_data()\r\n assert_allclose(line_data[0], avg_preds[\"values\"][0])\r\n assert_allclose(line_data[1], avg_preds.average[target_idx].ravel())\r\n\r\n # contour\r\n ax = disp.axes_[1]\r\n coutour = disp.contours_[1]\r\n expect_levels = np.linspace(*disp.pdp_lim[2], num=8)\r\n assert_allclose(coutour.levels, expect_levels)\r\n assert ax.get_xlabel() == \"age\"\r\n assert ax.get_ylabel() == \"bmi\"\r\n\r\n\r\[email protected](\"ignore:A Bunch will be returned\")\r\[email protected](\r\n \"kind, lines\", [(\"average\", 1), (\"individual\", 442), (\"both\", 443)]\r\n)\r\ndef test_plot_partial_dependence_passing_numpy_axes(\r\n plot_partial_dependence, pyplot, clf_diabetes, diabetes, kind, lines\r\n):\r\n grid_resolution = 25\r\n feature_names = diabetes.feature_names\r\n disp1 = plot_partial_dependence(\r\n clf_diabetes,\r\n diabetes.data,\r\n [\"age\", \"bmi\"],\r\n kind=kind,\r\n grid_resolution=grid_resolution,\r\n feature_names=feature_names,\r\n )\r\n assert disp1.axes_.shape == (1, 2)\r\n assert disp1.axes_[0, 0].get_ylabel() == \"Partial dependence\"\r\n assert disp1.axes_[0, 1].get_ylabel() == \"\"\r\n assert len(disp1.axes_[0, 0].get_lines()) == lines\r\n assert len(disp1.axes_[0, 1].get_lines()) == lines\r\n\r\n lr = LinearRegression()\r\n lr.fit(diabetes.data, diabetes.target)\r\n\r\n disp2 = plot_partial_dependence(\r\n lr,\r\n diabetes.data,\r\n [\"age\", \"bmi\"],\r\n kind=kind,\r\n grid_resolution=grid_resolution,\r\n feature_names=feature_names,\r\n ax=disp1.axes_,\r\n )\r\n\r\n assert np.all(disp1.axes_ == disp2.axes_)\r\n assert len(disp2.axes_[0, 0].get_lines()) == 2 * lines\r\n assert len(disp2.axes_[0, 1].get_lines()) == 2 * lines\r\n\r\n\r\[email protected](\"ignore:A Bunch will be returned\")\r\[email protected](\"nrows, ncols\", [(2, 2), (3, 1)])\r\ndef test_plot_partial_dependence_incorrent_num_axes(\r\n plot_partial_dependence, pyplot, clf_diabetes, diabetes, nrows, ncols\r\n):\r\n grid_resolution = 5\r\n fig, axes = pyplot.subplots(nrows, ncols)\r\n axes_formats = [list(axes.ravel()), tuple(axes.ravel()), axes]\r\n\r\n msg = \"Expected ax to have 2 axes, got {}\".format(nrows * ncols)\r\n\r\n disp = plot_partial_dependence(\r\n clf_diabetes,\r\n diabetes.data,\r\n [\"age\", \"bmi\"],\r\n grid_resolution=grid_resolution,\r\n feature_names=diabetes.feature_names,\r\n )\r\n\r\n for ax_format in axes_formats:\r\n with pytest.raises(ValueError, match=msg):\r\n plot_partial_dependence(\r\n clf_diabetes,\r\n diabetes.data,\r\n [\"age\", \"bmi\"],\r\n grid_resolution=grid_resolution,\r\n feature_names=diabetes.feature_names,\r\n ax=ax_format,\r\n )\r\n\r\n # with axes object\r\n with pytest.raises(ValueError, match=msg):\r\n disp.plot(ax=ax_format)\r\n\r\n\r\[email protected](\"ignore:A Bunch will be returned\")\r\ndef test_plot_partial_dependence_with_same_axes(\r\n plot_partial_dependence, pyplot, clf_diabetes, diabetes\r\n):\r\n # The first call to plot_partial_dependence will create two new axes to\r\n # place in the space of the passed in axes, which results in a total of\r\n # three axes in the figure.\r\n # Currently the API does not allow for the second call to\r\n # plot_partial_dependence to use the same axes again, because it will\r\n # create two new axes in the space resulting in five axes. To get the\r\n # expected behavior one needs to pass the generated axes into the second\r\n # call:\r\n # disp1 = plot_partial_dependence(...)\r\n # disp2 = plot_partial_dependence(..., ax=disp1.axes_)\r\n\r\n grid_resolution = 25\r\n fig, ax = pyplot.subplots()\r\n plot_partial_dependence(\r\n clf_diabetes,\r\n diabetes.data,\r\n [\"age\", \"bmi\"],\r\n grid_resolution=grid_resolution,\r\n feature_names=diabetes.feature_names,\r\n ax=ax,\r\n )\r\n\r\n msg = (\r\n \"The ax was already used in another plot function, please set \"\r\n \"ax=display.axes_ instead\"\r\n )\r\n\r\n with pytest.raises(ValueError, match=msg):\r\n plot_partial_dependence(\r\n clf_diabetes,\r\n diabetes.data,\r\n [\"age\", \"bmi\"],\r\n grid_resolution=grid_resolution,\r\n feature_names=diabetes.feature_names,\r\n ax=ax,\r\n )\r\n\r\n\r\[email protected](\"ignore:A Bunch will be returned\")\r\ndef test_plot_partial_dependence_feature_name_reuse(\r\n plot_partial_dependence, pyplot, clf_diabetes, diabetes\r\n):\r\n # second call to plot does not change the feature names from the first\r\n # call\r\n\r\n feature_names = diabetes.feature_names\r\n disp = plot_partial_dependence(\r\n clf_diabetes,\r\n diabetes.data,\r\n [0, 1],\r\n grid_resolution=10,\r\n feature_names=feature_names,\r\n )\r\n\r\n plot_partial_dependence(\r\n clf_diabetes, diabetes.data, [0, 1], grid_resolution=10, ax=disp.axes_\r\n )\r\n\r\n for i, ax in enumerate(disp.axes_.ravel()):\r\n assert ax.get_xlabel() == feature_names[i]\r\n\r\n\r\[email protected](\"ignore:A Bunch will be returned\")\r\ndef test_plot_partial_dependence_multiclass(plot_partial_dependence, pyplot):\r\n grid_resolution = 25\r\n clf_int = GradientBoostingClassifier(n_estimators=10, random_state=1)\r\n iris = load_iris()\r\n\r\n # Test partial dependence plot function on multi-class input.\r\n clf_int.fit(iris.data, iris.target)\r\n disp_target_0 = plot_partial_dependence(\r\n clf_int, iris.data, [0, 1], target=0, grid_resolution=grid_resolution\r\n )\r\n assert disp_target_0.figure_ is pyplot.gcf()\r\n assert disp_target_0.axes_.shape == (1, 2)\r\n assert disp_target_0.lines_.shape == (1, 2)\r\n assert disp_target_0.contours_.shape == (1, 2)\r\n assert disp_target_0.deciles_vlines_.shape == (1, 2)\r\n assert disp_target_0.deciles_hlines_.shape == (1, 2)\r\n assert all(c is None for c in disp_target_0.contours_.flat)\r\n assert disp_target_0.target_idx == 0\r\n\r\n # now with symbol labels\r\n target = iris.target_names[iris.target]\r\n clf_symbol = GradientBoostingClassifier(n_estimators=10, random_state=1)\r\n clf_symbol.fit(iris.data, target)\r\n disp_symbol = plot_partial_dependence(\r\n clf_symbol, iris.data, [0, 1], target=\"setosa\", grid_resolution=grid_resolution\r\n )\r\n assert disp_symbol.figure_ is pyplot.gcf()\r\n assert disp_symbol.axes_.shape == (1, 2)\r\n assert disp_symbol.lines_.shape == (1, 2)\r\n assert disp_symbol.contours_.shape == (1, 2)\r\n assert disp_symbol.deciles_vlines_.shape == (1, 2)\r\n assert disp_symbol.deciles_hlines_.shape == (1, 2)\r\n assert all(c is None for c in disp_symbol.contours_.flat)\r\n assert disp_symbol.target_idx == 0\r\n\r\n for int_result, symbol_result in zip(\r\n disp_target_0.pd_results, disp_symbol.pd_results\r\n ):\r\n assert_allclose(int_result.average, symbol_result.average)\r\n assert_allclose(int_result[\"values\"], symbol_result[\"values\"])\r\n\r\n # check that the pd plots are different for another target\r\n disp_target_1 = plot_partial_dependence(\r\n clf_int, iris.data, [0, 1], target=1, grid_resolution=grid_resolution\r\n )\r\n target_0_data_y = disp_target_0.lines_[0, 0].get_data()[1]\r\n target_1_data_y = disp_target_1.lines_[0, 0].get_data()[1]\r\n assert any(target_0_data_y != target_1_data_y)\r\n\r\n\r\nmultioutput_regression_data = make_regression(n_samples=50, n_targets=2, random_state=0)\r\n\r\n\r\[email protected](\"ignore:A Bunch will be returned\")\r\[email protected](\"target\", [0, 1])\r\ndef test_plot_partial_dependence_multioutput(plot_partial_dependence, pyplot, target):\r\n # Test partial dependence plot function on multi-output input.\r\n X, y = multioutput_regression_data\r\n clf = LinearRegression().fit(X, y)\r\n\r\n grid_resolution = 25\r\n disp = plot_partial_dependence(\r\n clf, X, [0, 1], target=target, grid_resolution=grid_resolution\r\n )\r\n fig = pyplot.gcf()\r\n axs = fig.get_axes()\r\n assert len(axs) == 3\r\n assert disp.target_idx == target\r\n assert disp.bounding_ax_ is not None\r\n\r\n positions = [(0, 0), (0, 1)]\r\n expected_label = [\"Partial dependence\", \"\"]\r\n\r\n for i, pos in enumerate(positions):\r\n ax = disp.axes_[pos]\r\n assert ax.get_ylabel() == expected_label[i]\r\n assert ax.get_xlabel() == \"{}\".format(i)\r\n\r\n\r\[email protected](\"ignore:A Bunch will be returned\")\r\ndef test_plot_partial_dependence_dataframe(\r\n plot_partial_dependence, pyplot, clf_diabetes, diabetes\r\n):\r\n pd = pytest.importorskip(\"pandas\")\r\n df = pd.DataFrame(diabetes.data, columns=diabetes.feature_names)\r\n\r\n grid_resolution = 25\r\n\r\n plot_partial_dependence(\r\n clf_diabetes,\r\n df,\r\n [\"bp\", \"s1\"],\r\n grid_resolution=grid_resolution,\r\n feature_names=df.columns.tolist(),\r\n )\r\n\r\n\r\ndummy_classification_data = make_classification(random_state=0)\r\n\r\n\r\[email protected](\"ignore:A Bunch will be returned\")\r\[email protected](\r\n \"data, params, err_msg\",\r\n [\r\n (\r\n multioutput_regression_data,\r\n {\"target\": None, \"features\": [0]},\r\n \"target must be specified for multi-output\",\r\n ),\r\n (\r\n multioutput_regression_data,\r\n {\"target\": -1, \"features\": [0]},\r\n r\"target must be in \\[0, n_tasks\\]\",\r\n ),\r\n (\r\n multioutput_regression_data,\r\n {\"target\": 100, \"features\": [0]},\r\n r\"target must be in \\[0, n_tasks\\]\",\r\n ),\r\n (\r\n dummy_classification_data,\r\n {\"features\": [\"foobar\"], \"feature_names\": None},\r\n \"Feature foobar not in feature_names\",\r\n ),\r\n (\r\n dummy_classification_data,\r\n {\"features\": [\"foobar\"], \"feature_names\": [\"abcd\", \"def\"]},\r\n \"Feature foobar not in feature_names\",\r\n ),\r\n (\r\n dummy_classification_data,\r\n {\"features\": [(1, 2, 3)]},\r\n \"Each entry in features must be either an int, \",\r\n ),\r\n (\r\n dummy_classification_data,\r\n {\"features\": [1, {}]},\r\n \"Each entry in features must be either an int, \",\r\n ),\r\n (\r\n dummy_classification_data,\r\n {\"features\": [tuple()]},\r\n \"Each entry in features must be either an int, \",\r\n ),\r\n (\r\n dummy_classification_data,\r\n {\"features\": [123], \"feature_names\": [\"blahblah\"]},\r\n \"All entries of features must be less than \",\r\n ),\r\n (\r\n dummy_classification_data,\r\n {\"features\": [0, 1, 2], \"feature_names\": [\"a\", \"b\", \"a\"]},\r\n \"feature_names should not contain duplicates\",\r\n ),\r\n (\r\n dummy_classification_data,\r\n {\"features\": [(1, 2)], \"kind\": \"individual\"},\r\n \"It is not possible to display individual effects for more than one\",\r\n ),\r\n (\r\n dummy_classification_data,\r\n {\"features\": [(1, 2)], \"kind\": \"both\"},\r\n \"It is not possible to display individual effects for more than one\",\r\n ),\r\n (\r\n dummy_classification_data,\r\n {\"features\": [1], \"subsample\": -1},\r\n \"When an integer, subsample=-1 should be positive.\",\r\n ),\r\n (\r\n dummy_classification_data,\r\n {\"features\": [1], \"subsample\": 1.2},\r\n r\"When a floating-point, subsample=1.2 should be in the \\(0, 1\\) range\",\r\n ),\r\n ],\r\n)\r\ndef test_plot_partial_dependence_error(\r\n plot_partial_dependence, pyplot, data, params, err_msg\r\n):\r\n X, y = data\r\n estimator = LinearRegression().fit(X, y)\r\n\r\n with pytest.raises(ValueError, match=err_msg):\r\n plot_partial_dependence(estimator, X, **params)\r\n\r\n\r\[email protected](\"ignore:A Bunch will be returned\")\r\[email protected](\r\n \"params, err_msg\",\r\n [\r\n ({\"target\": 4, \"features\": [0]}, \"target not in est.classes_, got 4\"),\r\n ({\"target\": None, \"features\": [0]}, \"target must be specified for multi-class\"),\r\n (\r\n {\"target\": 1, \"features\": [4.5]},\r\n \"Each entry in features must be either an int,\",\r\n ),\r\n ],\r\n)\r\ndef test_plot_partial_dependence_multiclass_error(\r\n plot_partial_dependence, pyplot, params, err_msg\r\n):\r\n iris = load_iris()\r\n clf = GradientBoostingClassifier(n_estimators=10, random_state=1)\r\n clf.fit(iris.data, iris.target)\r\n\r\n with pytest.raises(ValueError, match=err_msg):\r\n plot_partial_dependence(clf, iris.data, **params)\r\n\r\n\r\ndef test_plot_partial_dependence_does_not_override_ylabel(\r\n plot_partial_dependence, pyplot, clf_diabetes, diabetes\r\n):\r\n # Non-regression test to be sure to not override the ylabel if it has been\r\n # See https://github.com/scikit-learn/scikit-learn/issues/15772\r\n _, axes = pyplot.subplots(1, 2)\r\n axes[0].set_ylabel(\"Hello world\")\r\n plot_partial_dependence(clf_diabetes, diabetes.data, [0, 1], ax=axes)\r\n\r\n assert axes[0].get_ylabel() == \"Hello world\"\r\n assert axes[1].get_ylabel() == \"Partial dependence\"\r\n\r\n\r\[email protected](\r\n \"kind, expected_shape\",\r\n [(\"average\", (1, 2)), (\"individual\", (1, 2, 50)), (\"both\", (1, 2, 51))],\r\n)\r\ndef test_plot_partial_dependence_subsampling(\r\n plot_partial_dependence, pyplot, clf_diabetes, diabetes, kind, expected_shape\r\n):\r\n # check that the subsampling is properly working\r\n # non-regression test for:\r\n # https://github.com/scikit-learn/scikit-learn/pull/18359\r\n matplotlib = pytest.importorskip(\"matplotlib\")\r\n grid_resolution = 25\r\n feature_names = diabetes.feature_names\r\n\r\n disp1 = plot_partial_dependence(\r\n clf_diabetes,\r\n diabetes.data,\r\n [\"age\", \"bmi\"],\r\n kind=kind,\r\n grid_resolution=grid_resolution,\r\n feature_names=feature_names,\r\n subsample=50,\r\n random_state=0,\r\n )\r\n\r\n assert disp1.lines_.shape == expected_shape\r\n assert all(\r\n [isinstance(line, matplotlib.lines.Line2D) for line in disp1.lines_.ravel()]\r\n )\r\n\r\n\r\[email protected](\r\n \"kind, line_kw, label\",\r\n [\r\n (\"individual\", {}, None),\r\n (\"individual\", {\"label\": \"xxx\"}, None),\r\n (\"average\", {}, None),\r\n (\"average\", {\"label\": \"xxx\"}, \"xxx\"),\r\n (\"both\", {}, \"average\"),\r\n (\"both\", {\"label\": \"xxx\"}, \"xxx\"),\r\n ],\r\n)\r\ndef test_partial_dependence_overwrite_labels(\r\n plot_partial_dependence,\r\n pyplot,\r\n clf_diabetes,\r\n diabetes,\r\n kind,\r\n line_kw,\r\n label,\r\n):\r\n \"\"\"Test that make sure that we can overwrite the label of the PDP plot\"\"\"\r\n disp = plot_partial_dependence(\r\n clf_diabetes,\r\n diabetes.data,\r\n [0, 2],\r\n grid_resolution=25,\r\n feature_names=diabetes.feature_names,\r\n kind=kind,\r\n line_kw=line_kw,\r\n )\r\n\r\n for ax in disp.axes_.ravel():\r\n if label is None:\r\n assert ax.get_legend() is None\r\n else:\r\n legend_text = ax.get_legend().get_texts()\r\n assert len(legend_text) == 1\r\n assert legend_text[0].get_text() == label\r\n\r\n\r\[email protected](\"ignore:A Bunch will be returned\")\r\[email protected](\r\n \"line_kw, pd_line_kw, ice_lines_kw, expected_colors\",\r\n [\r\n ({\"color\": \"r\"}, {\"color\": \"g\"}, {\"color\": \"b\"}, (\"g\", \"b\")),\r\n (None, {\"color\": \"g\"}, {\"color\": \"b\"}, (\"g\", \"b\")),\r\n ({\"color\": \"r\"}, None, {\"color\": \"b\"}, (\"r\", \"b\")),\r\n ({\"color\": \"r\"}, {\"color\": \"g\"}, None, (\"g\", \"r\")),\r\n ({\"color\": \"r\"}, None, None, (\"r\", \"r\")),\r\n ({\"color\": \"r\"}, {\"linestyle\": \"--\"}, {\"linestyle\": \"-.\"}, (\"r\", \"r\")),\r\n ],\r\n)\r\ndef test_plot_partial_dependence_lines_kw(\r\n plot_partial_dependence,\r\n pyplot,\r\n clf_diabetes,\r\n diabetes,\r\n line_kw,\r\n pd_line_kw,\r\n ice_lines_kw,\r\n expected_colors,\r\n):\r\n \"\"\"Check that passing `pd_line_kw` and `ice_lines_kw` will act on the\r\n specific lines in the plot.\r\n \"\"\"\r\n\r\n disp = plot_partial_dependence(\r\n clf_diabetes,\r\n diabetes.data,\r\n [0, 2],\r\n grid_resolution=20,\r\n feature_names=diabetes.feature_names,\r\n n_cols=2,\r\n kind=\"both\",\r\n line_kw=line_kw,\r\n pd_line_kw=pd_line_kw,\r\n ice_lines_kw=ice_lines_kw,\r\n )\r\n\r\n line = disp.lines_[0, 0, -1]\r\n assert line.get_color() == expected_colors[0]\r\n if pd_line_kw is not None and \"linestyle\" in pd_line_kw:\r\n assert line.get_linestyle() == pd_line_kw[\"linestyle\"]\r\n else:\r\n assert line.get_linestyle() == \"-\"\r\n\r\n line = disp.lines_[0, 0, 0]\r\n assert line.get_color() == expected_colors[1]\r\n if ice_lines_kw is not None and \"linestyle\" in ice_lines_kw:\r\n assert line.get_linestyle() == ice_lines_kw[\"linestyle\"]\r\n else:\r\n assert line.get_linestyle() == \"-\"\r\n",
"\"\"\"\r\nTests for behavior if an author does *not* implement EA methods.\r\n\"\"\"\r\nimport numpy as np\r\nimport pytest\r\n\r\nfrom pandas.core.arrays import ExtensionArray\r\n\r\n\r\nclass MyEA(ExtensionArray):\r\n def __init__(self, values):\r\n self._values = values\r\n\r\n\r\[email protected]\r\ndef data():\r\n arr = np.arange(10)\r\n return MyEA(arr)\r\n\r\n\r\nclass TestExtensionArray:\r\n def test_errors(self, data, all_arithmetic_operators):\r\n # invalid ops\r\n op_name = all_arithmetic_operators\r\n with pytest.raises(AttributeError):\r\n getattr(data, op_name)\r\n",
"from string import ascii_letters as letters\r\n\r\nimport numpy as np\r\nimport pytest\r\n\r\nimport pandas.util._test_decorators as td\r\n\r\nimport pandas as pd\r\nfrom pandas import (\r\n DataFrame,\r\n Series,\r\n Timestamp,\r\n date_range,\r\n option_context,\r\n)\r\nimport pandas._testing as tm\r\nimport pandas.core.common as com\r\n\r\nmsg = \"A value is trying to be set on a copy of a slice from a DataFrame\"\r\n\r\n\r\ndef random_text(nobs=100):\r\n df = []\r\n for i in range(nobs):\r\n idx = np.random.randint(len(letters), size=2)\r\n idx.sort()\r\n\r\n df.append([letters[idx[0] : idx[1]]])\r\n\r\n return DataFrame(df, columns=[\"letters\"])\r\n\r\n\r\nclass TestCaching:\r\n def test_slice_consolidate_invalidate_item_cache(self):\r\n\r\n # this is chained assignment, but will 'work'\r\n with option_context(\"chained_assignment\", None):\r\n\r\n # #3970\r\n df = DataFrame({\"aa\": np.arange(5), \"bb\": [2.2] * 5})\r\n\r\n # Creates a second float block\r\n df[\"cc\"] = 0.0\r\n\r\n # caches a reference to the 'bb' series\r\n df[\"bb\"]\r\n\r\n # repr machinery triggers consolidation\r\n repr(df)\r\n\r\n # Assignment to wrong series\r\n df[\"bb\"].iloc[0] = 0.17\r\n df._clear_item_cache()\r\n tm.assert_almost_equal(df[\"bb\"][0], 0.17)\r\n\r\n @pytest.mark.parametrize(\"do_ref\", [True, False])\r\n def test_setitem_cache_updating(self, do_ref):\r\n # GH 5424\r\n cont = [\"one\", \"two\", \"three\", \"four\", \"five\", \"six\", \"seven\"]\r\n\r\n df = DataFrame({\"a\": cont, \"b\": cont[3:] + cont[:3], \"c\": np.arange(7)})\r\n\r\n # ref the cache\r\n if do_ref:\r\n df.loc[0, \"c\"]\r\n\r\n # set it\r\n df.loc[7, \"c\"] = 1\r\n\r\n assert df.loc[0, \"c\"] == 0.0\r\n assert df.loc[7, \"c\"] == 1.0\r\n\r\n def test_setitem_cache_updating_slices(self):\r\n # GH 7084\r\n # not updating cache on series setting with slices\r\n expected = DataFrame(\r\n {\"A\": [600, 600, 600]}, index=date_range(\"5/7/2014\", \"5/9/2014\")\r\n )\r\n out = DataFrame({\"A\": [0, 0, 0]}, index=date_range(\"5/7/2014\", \"5/9/2014\"))\r\n df = DataFrame({\"C\": [\"A\", \"A\", \"A\"], \"D\": [100, 200, 300]})\r\n\r\n # loop through df to update out\r\n six = Timestamp(\"5/7/2014\")\r\n eix = Timestamp(\"5/9/2014\")\r\n for ix, row in df.iterrows():\r\n out.loc[six:eix, row[\"C\"]] = out.loc[six:eix, row[\"C\"]] + row[\"D\"]\r\n\r\n tm.assert_frame_equal(out, expected)\r\n tm.assert_series_equal(out[\"A\"], expected[\"A\"])\r\n\r\n # try via a chain indexing\r\n # this actually works\r\n out = DataFrame({\"A\": [0, 0, 0]}, index=date_range(\"5/7/2014\", \"5/9/2014\"))\r\n for ix, row in df.iterrows():\r\n v = out[row[\"C\"]][six:eix] + row[\"D\"]\r\n out[row[\"C\"]][six:eix] = v\r\n\r\n tm.assert_frame_equal(out, expected)\r\n tm.assert_series_equal(out[\"A\"], expected[\"A\"])\r\n\r\n out = DataFrame({\"A\": [0, 0, 0]}, index=date_range(\"5/7/2014\", \"5/9/2014\"))\r\n for ix, row in df.iterrows():\r\n out.loc[six:eix, row[\"C\"]] += row[\"D\"]\r\n\r\n tm.assert_frame_equal(out, expected)\r\n tm.assert_series_equal(out[\"A\"], expected[\"A\"])\r\n\r\n def test_altering_series_clears_parent_cache(self):\r\n # GH #33675\r\n df = DataFrame([[1, 2], [3, 4]], index=[\"a\", \"b\"], columns=[\"A\", \"B\"])\r\n ser = df[\"A\"]\r\n\r\n assert \"A\" in df._item_cache\r\n\r\n # Adding a new entry to ser swaps in a new array, so \"A\" needs to\r\n # be removed from df._item_cache\r\n ser[\"c\"] = 5\r\n assert len(ser) == 3\r\n assert \"A\" not in df._item_cache\r\n assert df[\"A\"] is not ser\r\n assert len(df[\"A\"]) == 2\r\n\r\n\r\nclass TestChaining:\r\n def test_setitem_chained_setfault(self):\r\n\r\n # GH6026\r\n data = [\"right\", \"left\", \"left\", \"left\", \"right\", \"left\", \"timeout\"]\r\n mdata = [\"right\", \"left\", \"left\", \"left\", \"right\", \"left\", \"none\"]\r\n\r\n df = DataFrame({\"response\": np.array(data)})\r\n mask = df.response == \"timeout\"\r\n df.response[mask] = \"none\"\r\n tm.assert_frame_equal(df, DataFrame({\"response\": mdata}))\r\n\r\n recarray = np.rec.fromarrays([data], names=[\"response\"])\r\n df = DataFrame(recarray)\r\n mask = df.response == \"timeout\"\r\n df.response[mask] = \"none\"\r\n tm.assert_frame_equal(df, DataFrame({\"response\": mdata}))\r\n\r\n df = DataFrame({\"response\": data, \"response1\": data})\r\n mask = df.response == \"timeout\"\r\n df.response[mask] = \"none\"\r\n tm.assert_frame_equal(df, DataFrame({\"response\": mdata, \"response1\": data}))\r\n\r\n # GH 6056\r\n expected = DataFrame({\"A\": [np.nan, \"bar\", \"bah\", \"foo\", \"bar\"]})\r\n df = DataFrame({\"A\": np.array([\"foo\", \"bar\", \"bah\", \"foo\", \"bar\"])})\r\n df[\"A\"].iloc[0] = np.nan\r\n result = df.head()\r\n tm.assert_frame_equal(result, expected)\r\n\r\n df = DataFrame({\"A\": np.array([\"foo\", \"bar\", \"bah\", \"foo\", \"bar\"])})\r\n df.A.iloc[0] = np.nan\r\n result = df.head()\r\n tm.assert_frame_equal(result, expected)\r\n\r\n @pytest.mark.arm_slow\r\n def test_detect_chained_assignment(self):\r\n\r\n pd.set_option(\"chained_assignment\", \"raise\")\r\n\r\n # work with the chain\r\n expected = DataFrame([[-5, 1], [-6, 3]], columns=list(\"AB\"))\r\n df = DataFrame(np.arange(4).reshape(2, 2), columns=list(\"AB\"), dtype=\"int64\")\r\n assert df._is_copy is None\r\n\r\n df[\"A\"][0] = -5\r\n df[\"A\"][1] = -6\r\n tm.assert_frame_equal(df, expected)\r\n\r\n @pytest.mark.arm_slow\r\n def test_detect_chained_assignment_raises(self, using_array_manager):\r\n\r\n # test with the chaining\r\n df = DataFrame(\r\n {\r\n \"A\": Series(range(2), dtype=\"int64\"),\r\n \"B\": np.array(np.arange(2, 4), dtype=np.float64),\r\n }\r\n )\r\n assert df._is_copy is None\r\n\r\n if not using_array_manager:\r\n with pytest.raises(com.SettingWithCopyError, match=msg):\r\n df[\"A\"][0] = -5\r\n\r\n with pytest.raises(com.SettingWithCopyError, match=msg):\r\n df[\"A\"][1] = np.nan\r\n\r\n assert df[\"A\"]._is_copy is None\r\n\r\n else:\r\n # INFO(ArrayManager) for ArrayManager it doesn't matter that it's\r\n # a mixed dataframe\r\n df[\"A\"][0] = -5\r\n df[\"A\"][1] = -6\r\n expected = DataFrame([[-5, 2], [-6, 3]], columns=list(\"AB\"))\r\n expected[\"B\"] = expected[\"B\"].astype(\"float64\")\r\n tm.assert_frame_equal(df, expected)\r\n\r\n @pytest.mark.arm_slow\r\n def test_detect_chained_assignment_fails(self):\r\n\r\n # Using a copy (the chain), fails\r\n df = DataFrame(\r\n {\r\n \"A\": Series(range(2), dtype=\"int64\"),\r\n \"B\": np.array(np.arange(2, 4), dtype=np.float64),\r\n }\r\n )\r\n\r\n with pytest.raises(com.SettingWithCopyError, match=msg):\r\n df.loc[0][\"A\"] = -5\r\n\r\n @pytest.mark.arm_slow\r\n def test_detect_chained_assignment_doc_example(self):\r\n\r\n # Doc example\r\n df = DataFrame(\r\n {\r\n \"a\": [\"one\", \"one\", \"two\", \"three\", \"two\", \"one\", \"six\"],\r\n \"c\": Series(range(7), dtype=\"int64\"),\r\n }\r\n )\r\n assert df._is_copy is None\r\n\r\n with pytest.raises(com.SettingWithCopyError, match=msg):\r\n indexer = df.a.str.startswith(\"o\")\r\n df[indexer][\"c\"] = 42\r\n\r\n @pytest.mark.arm_slow\r\n def test_detect_chained_assignment_object_dtype(self, using_array_manager):\r\n\r\n expected = DataFrame({\"A\": [111, \"bbb\", \"ccc\"], \"B\": [1, 2, 3]})\r\n df = DataFrame({\"A\": [\"aaa\", \"bbb\", \"ccc\"], \"B\": [1, 2, 3]})\r\n\r\n with pytest.raises(com.SettingWithCopyError, match=msg):\r\n df.loc[0][\"A\"] = 111\r\n\r\n if not using_array_manager:\r\n with pytest.raises(com.SettingWithCopyError, match=msg):\r\n df[\"A\"][0] = 111\r\n\r\n df.loc[0, \"A\"] = 111\r\n else:\r\n # INFO(ArrayManager) for ArrayManager it doesn't matter that it's\r\n # a mixed dataframe\r\n df[\"A\"][0] = 111\r\n\r\n tm.assert_frame_equal(df, expected)\r\n\r\n @pytest.mark.arm_slow\r\n def test_detect_chained_assignment_is_copy_pickle(self):\r\n\r\n # gh-5475: Make sure that is_copy is picked up reconstruction\r\n df = DataFrame({\"A\": [1, 2]})\r\n assert df._is_copy is None\r\n\r\n with tm.ensure_clean(\"__tmp__pickle\") as path:\r\n df.to_pickle(path)\r\n df2 = pd.read_pickle(path)\r\n df2[\"B\"] = df2[\"A\"]\r\n df2[\"B\"] = df2[\"A\"]\r\n\r\n @pytest.mark.arm_slow\r\n def test_detect_chained_assignment_setting_entire_column(self):\r\n\r\n # gh-5597: a spurious raise as we are setting the entire column here\r\n\r\n df = random_text(100000)\r\n\r\n # Always a copy\r\n x = df.iloc[[0, 1, 2]]\r\n assert x._is_copy is not None\r\n\r\n x = df.iloc[[0, 1, 2, 4]]\r\n assert x._is_copy is not None\r\n\r\n # Explicitly copy\r\n indexer = df.letters.apply(lambda x: len(x) > 10)\r\n df = df.loc[indexer].copy()\r\n\r\n assert df._is_copy is None\r\n df[\"letters\"] = df[\"letters\"].apply(str.lower)\r\n\r\n @pytest.mark.arm_slow\r\n def test_detect_chained_assignment_implicit_take(self):\r\n\r\n # Implicitly take\r\n df = random_text(100000)\r\n indexer = df.letters.apply(lambda x: len(x) > 10)\r\n df = df.loc[indexer]\r\n\r\n assert df._is_copy is not None\r\n df[\"letters\"] = df[\"letters\"].apply(str.lower)\r\n\r\n @pytest.mark.arm_slow\r\n def test_detect_chained_assignment_implicit_take2(self):\r\n\r\n # Implicitly take 2\r\n df = random_text(100000)\r\n indexer = df.letters.apply(lambda x: len(x) > 10)\r\n\r\n df = df.loc[indexer]\r\n assert df._is_copy is not None\r\n df.loc[:, \"letters\"] = df[\"letters\"].apply(str.lower)\r\n\r\n # Should be ok even though it's a copy!\r\n assert df._is_copy is None\r\n\r\n df[\"letters\"] = df[\"letters\"].apply(str.lower)\r\n assert df._is_copy is None\r\n\r\n @pytest.mark.arm_slow\r\n def test_detect_chained_assignment_str(self):\r\n\r\n df = random_text(100000)\r\n indexer = df.letters.apply(lambda x: len(x) > 10)\r\n df.loc[indexer, \"letters\"] = df.loc[indexer, \"letters\"].apply(str.lower)\r\n\r\n @pytest.mark.arm_slow\r\n def test_detect_chained_assignment_is_copy(self):\r\n\r\n # an identical take, so no copy\r\n df = DataFrame({\"a\": [1]}).dropna()\r\n assert df._is_copy is None\r\n df[\"a\"] += 1\r\n\r\n @pytest.mark.arm_slow\r\n def test_detect_chained_assignment_sorting(self):\r\n\r\n df = DataFrame(np.random.randn(10, 4))\r\n ser = df.iloc[:, 0].sort_values()\r\n\r\n tm.assert_series_equal(ser, df.iloc[:, 0].sort_values())\r\n tm.assert_series_equal(ser, df[0].sort_values())\r\n\r\n @pytest.mark.arm_slow\r\n def test_detect_chained_assignment_false_positives(self):\r\n\r\n # see gh-6025: false positives\r\n df = DataFrame({\"column1\": [\"a\", \"a\", \"a\"], \"column2\": [4, 8, 9]})\r\n str(df)\r\n\r\n df[\"column1\"] = df[\"column1\"] + \"b\"\r\n str(df)\r\n\r\n df = df[df[\"column2\"] != 8]\r\n str(df)\r\n\r\n df[\"column1\"] = df[\"column1\"] + \"c\"\r\n str(df)\r\n\r\n @pytest.mark.arm_slow\r\n def test_detect_chained_assignment_undefined_column(self):\r\n\r\n # from SO:\r\n # https://stackoverflow.com/questions/24054495/potential-bug-setting-value-for-undefined-column-using-iloc\r\n df = DataFrame(np.arange(0, 9), columns=[\"count\"])\r\n df[\"group\"] = \"b\"\r\n\r\n with pytest.raises(com.SettingWithCopyError, match=msg):\r\n df.iloc[0:5][\"group\"] = \"a\"\r\n\r\n @pytest.mark.arm_slow\r\n def test_detect_chained_assignment_changing_dtype(self, using_array_manager):\r\n\r\n # Mixed type setting but same dtype & changing dtype\r\n df = DataFrame(\r\n {\r\n \"A\": date_range(\"20130101\", periods=5),\r\n \"B\": np.random.randn(5),\r\n \"C\": np.arange(5, dtype=\"int64\"),\r\n \"D\": [\"a\", \"b\", \"c\", \"d\", \"e\"],\r\n }\r\n )\r\n\r\n with pytest.raises(com.SettingWithCopyError, match=msg):\r\n df.loc[2][\"D\"] = \"foo\"\r\n\r\n with pytest.raises(com.SettingWithCopyError, match=msg):\r\n df.loc[2][\"C\"] = \"foo\"\r\n\r\n if not using_array_manager:\r\n with pytest.raises(com.SettingWithCopyError, match=msg):\r\n df[\"C\"][2] = \"foo\"\r\n else:\r\n # INFO(ArrayManager) for ArrayManager it doesn't matter if it's\r\n # changing the dtype or not\r\n df[\"C\"][2] = \"foo\"\r\n assert df.loc[2, \"C\"] == \"foo\"\r\n\r\n def test_setting_with_copy_bug(self):\r\n\r\n # operating on a copy\r\n df = DataFrame(\r\n {\"a\": list(range(4)), \"b\": list(\"ab..\"), \"c\": [\"a\", \"b\", np.nan, \"d\"]}\r\n )\r\n mask = pd.isna(df.c)\r\n\r\n with pytest.raises(com.SettingWithCopyError, match=msg):\r\n df[[\"c\"]][mask] = df[[\"b\"]][mask]\r\n\r\n def test_setting_with_copy_bug_no_warning(self):\r\n # invalid warning as we are returning a new object\r\n # GH 8730\r\n df1 = DataFrame({\"x\": Series([\"a\", \"b\", \"c\"]), \"y\": Series([\"d\", \"e\", \"f\"])})\r\n df2 = df1[[\"x\"]]\r\n\r\n # this should not raise\r\n df2[\"y\"] = [\"g\", \"h\", \"i\"]\r\n\r\n def test_detect_chained_assignment_warnings_errors(self):\r\n df = DataFrame({\"A\": [\"aaa\", \"bbb\", \"ccc\"], \"B\": [1, 2, 3]})\r\n with option_context(\"chained_assignment\", \"warn\"):\r\n with tm.assert_produces_warning(com.SettingWithCopyWarning):\r\n df.loc[0][\"A\"] = 111\r\n\r\n with option_context(\"chained_assignment\", \"raise\"):\r\n with pytest.raises(com.SettingWithCopyError, match=msg):\r\n df.loc[0][\"A\"] = 111\r\n\r\n def test_detect_chained_assignment_warnings_filter_and_dupe_cols(self):\r\n # xref gh-13017.\r\n with option_context(\"chained_assignment\", \"warn\"):\r\n df = DataFrame([[1, 2, 3], [4, 5, 6], [7, 8, -9]], columns=[\"a\", \"a\", \"c\"])\r\n\r\n with tm.assert_produces_warning(com.SettingWithCopyWarning):\r\n df.c.loc[df.c > 0] = None\r\n\r\n expected = DataFrame(\r\n [[1, 2, 3], [4, 5, 6], [7, 8, -9]], columns=[\"a\", \"a\", \"c\"]\r\n )\r\n tm.assert_frame_equal(df, expected)\r\n\r\n @pytest.mark.parametrize(\"rhs\", [3, DataFrame({0: [1, 2, 3, 4]})])\r\n def test_detect_chained_assignment_warning_stacklevel(self, rhs):\r\n # GH#42570\r\n df = DataFrame(np.arange(25).reshape(5, 5))\r\n chained = df.loc[:3]\r\n with option_context(\"chained_assignment\", \"warn\"):\r\n with tm.assert_produces_warning(com.SettingWithCopyWarning) as t:\r\n chained[2] = rhs\r\n assert t[0].filename == __file__\r\n\r\n # TODO(ArrayManager) fast_xs with array-like scalars is not yet working\r\n @td.skip_array_manager_not_yet_implemented\r\n def test_chained_getitem_with_lists(self):\r\n\r\n # GH6394\r\n # Regression in chained getitem indexing with embedded list-like from\r\n # 0.12\r\n\r\n df = DataFrame({\"A\": 5 * [np.zeros(3)], \"B\": 5 * [np.ones(3)]})\r\n expected = df[\"A\"].iloc[2]\r\n result = df.loc[2, \"A\"]\r\n tm.assert_numpy_array_equal(result, expected)\r\n result2 = df.iloc[2][\"A\"]\r\n tm.assert_numpy_array_equal(result2, expected)\r\n result3 = df[\"A\"].loc[2]\r\n tm.assert_numpy_array_equal(result3, expected)\r\n result4 = df[\"A\"].iloc[2]\r\n tm.assert_numpy_array_equal(result4, expected)\r\n\r\n def test_cache_updating(self):\r\n # GH 4939, make sure to update the cache on setitem\r\n\r\n df = tm.makeDataFrame()\r\n df[\"A\"] # cache series\r\n df.loc[\"Hello Friend\"] = df.iloc[0]\r\n assert \"Hello Friend\" in df[\"A\"].index\r\n assert \"Hello Friend\" in df[\"B\"].index\r\n\r\n def test_cache_updating2(self):\r\n # 10264\r\n df = DataFrame(\r\n np.zeros((5, 5), dtype=\"int64\"),\r\n columns=[\"a\", \"b\", \"c\", \"d\", \"e\"],\r\n index=range(5),\r\n )\r\n df[\"f\"] = 0\r\n df.f.values[3] = 1\r\n\r\n df.f.values[3] = 2\r\n expected = DataFrame(\r\n np.zeros((5, 6), dtype=\"int64\"),\r\n columns=[\"a\", \"b\", \"c\", \"d\", \"e\", \"f\"],\r\n index=range(5),\r\n )\r\n expected.at[3, \"f\"] = 2\r\n tm.assert_frame_equal(df, expected)\r\n expected = Series([0, 0, 0, 2, 0], name=\"f\")\r\n tm.assert_series_equal(df.f, expected)\r\n\r\n def test_iloc_setitem_chained_assignment(self):\r\n # GH#3970\r\n with option_context(\"chained_assignment\", None):\r\n df = DataFrame({\"aa\": range(5), \"bb\": [2.2] * 5})\r\n df[\"cc\"] = 0.0\r\n\r\n ck = [True] * len(df)\r\n\r\n df[\"bb\"].iloc[0] = 0.13\r\n\r\n # TODO: unused\r\n df_tmp = df.iloc[ck] # noqa\r\n\r\n df[\"bb\"].iloc[0] = 0.15\r\n assert df[\"bb\"].iloc[0] == 0.15\r\n\r\n def test_getitem_loc_assignment_slice_state(self):\r\n # GH 13569\r\n df = DataFrame({\"a\": [10, 20, 30]})\r\n df[\"a\"].loc[4] = 40\r\n tm.assert_frame_equal(df, DataFrame({\"a\": [10, 20, 30]}))\r\n tm.assert_series_equal(df[\"a\"], Series([10, 20, 30], name=\"a\"))\r\n",
"\"\"\"\r\nA collection of functions and objects for creating or placing inset axes.\r\n\"\"\"\r\n\r\nfrom matplotlib import _api, docstring\r\nfrom matplotlib.offsetbox import AnchoredOffsetbox\r\nfrom matplotlib.patches import Patch, Rectangle\r\nfrom matplotlib.path import Path\r\nfrom matplotlib.transforms import Bbox, BboxTransformTo\r\nfrom matplotlib.transforms import IdentityTransform, TransformedBbox\r\n\r\nfrom . import axes_size as Size\r\nfrom .parasite_axes import HostAxes\r\n\r\n\r\nclass InsetPosition:\r\n @docstring.dedent_interpd\r\n def __init__(self, parent, lbwh):\r\n \"\"\"\r\n An object for positioning an inset axes.\r\n\r\n This is created by specifying the normalized coordinates in the axes,\r\n instead of the figure.\r\n\r\n Parameters\r\n ----------\r\n parent : `matplotlib.axes.Axes`\r\n Axes to use for normalizing coordinates.\r\n\r\n lbwh : iterable of four floats\r\n The left edge, bottom edge, width, and height of the inset axes, in\r\n units of the normalized coordinate of the *parent* axes.\r\n\r\n See Also\r\n --------\r\n :meth:`matplotlib.axes.Axes.set_axes_locator`\r\n\r\n Examples\r\n --------\r\n The following bounds the inset axes to a box with 20%% of the parent\r\n axes's height and 40%% of the width. The size of the axes specified\r\n ([0, 0, 1, 1]) ensures that the axes completely fills the bounding box:\r\n\r\n >>> parent_axes = plt.gca()\r\n >>> ax_ins = plt.axes([0, 0, 1, 1])\r\n >>> ip = InsetPosition(ax, [0.5, 0.1, 0.4, 0.2])\r\n >>> ax_ins.set_axes_locator(ip)\r\n \"\"\"\r\n self.parent = parent\r\n self.lbwh = lbwh\r\n\r\n def __call__(self, ax, renderer):\r\n bbox_parent = self.parent.get_position(original=False)\r\n trans = BboxTransformTo(bbox_parent)\r\n bbox_inset = Bbox.from_bounds(*self.lbwh)\r\n bb = TransformedBbox(bbox_inset, trans)\r\n return bb\r\n\r\n\r\nclass AnchoredLocatorBase(AnchoredOffsetbox):\r\n def __init__(self, bbox_to_anchor, offsetbox, loc,\r\n borderpad=0.5, bbox_transform=None):\r\n super().__init__(\r\n loc, pad=0., child=None, borderpad=borderpad,\r\n bbox_to_anchor=bbox_to_anchor, bbox_transform=bbox_transform\r\n )\r\n\r\n def draw(self, renderer):\r\n raise RuntimeError(\"No draw method should be called\")\r\n\r\n def __call__(self, ax, renderer):\r\n self.axes = ax\r\n\r\n fontsize = renderer.points_to_pixels(self.prop.get_size_in_points())\r\n self._update_offset_func(renderer, fontsize)\r\n\r\n width, height, xdescent, ydescent = self.get_extent(renderer)\r\n\r\n px, py = self.get_offset(width, height, 0, 0, renderer)\r\n bbox_canvas = Bbox.from_bounds(px, py, width, height)\r\n tr = ax.figure.transFigure.inverted()\r\n bb = TransformedBbox(bbox_canvas, tr)\r\n\r\n return bb\r\n\r\n\r\nclass AnchoredSizeLocator(AnchoredLocatorBase):\r\n def __init__(self, bbox_to_anchor, x_size, y_size, loc,\r\n borderpad=0.5, bbox_transform=None):\r\n super().__init__(\r\n bbox_to_anchor, None, loc,\r\n borderpad=borderpad, bbox_transform=bbox_transform\r\n )\r\n\r\n self.x_size = Size.from_any(x_size)\r\n self.y_size = Size.from_any(y_size)\r\n\r\n def get_extent(self, renderer):\r\n bbox = self.get_bbox_to_anchor()\r\n dpi = renderer.points_to_pixels(72.)\r\n\r\n r, a = self.x_size.get_size(renderer)\r\n width = bbox.width * r + a * dpi\r\n r, a = self.y_size.get_size(renderer)\r\n height = bbox.height * r + a * dpi\r\n\r\n xd, yd = 0, 0\r\n\r\n fontsize = renderer.points_to_pixels(self.prop.get_size_in_points())\r\n pad = self.pad * fontsize\r\n\r\n return width + 2 * pad, height + 2 * pad, xd + pad, yd + pad\r\n\r\n\r\nclass AnchoredZoomLocator(AnchoredLocatorBase):\r\n def __init__(self, parent_axes, zoom, loc,\r\n borderpad=0.5,\r\n bbox_to_anchor=None,\r\n bbox_transform=None):\r\n self.parent_axes = parent_axes\r\n self.zoom = zoom\r\n if bbox_to_anchor is None:\r\n bbox_to_anchor = parent_axes.bbox\r\n super().__init__(\r\n bbox_to_anchor, None, loc, borderpad=borderpad,\r\n bbox_transform=bbox_transform)\r\n\r\n def get_extent(self, renderer):\r\n bb = TransformedBbox(self.axes.viewLim, self.parent_axes.transData)\r\n fontsize = renderer.points_to_pixels(self.prop.get_size_in_points())\r\n pad = self.pad * fontsize\r\n return (abs(bb.width * self.zoom) + 2 * pad,\r\n abs(bb.height * self.zoom) + 2 * pad,\r\n pad, pad)\r\n\r\n\r\nclass BboxPatch(Patch):\r\n @docstring.dedent_interpd\r\n def __init__(self, bbox, **kwargs):\r\n \"\"\"\r\n Patch showing the shape bounded by a Bbox.\r\n\r\n Parameters\r\n ----------\r\n bbox : `matplotlib.transforms.Bbox`\r\n Bbox to use for the extents of this patch.\r\n\r\n **kwargs\r\n Patch properties. Valid arguments include:\r\n\r\n %(Patch:kwdoc)s\r\n \"\"\"\r\n if \"transform\" in kwargs:\r\n raise ValueError(\"transform should not be set\")\r\n\r\n kwargs[\"transform\"] = IdentityTransform()\r\n super().__init__(**kwargs)\r\n self.bbox = bbox\r\n\r\n def get_path(self):\r\n # docstring inherited\r\n x0, y0, x1, y1 = self.bbox.extents\r\n return Path([(x0, y0), (x1, y0), (x1, y1), (x0, y1), (x0, y0)],\r\n closed=True)\r\n\r\n\r\nclass BboxConnector(Patch):\r\n @staticmethod\r\n def get_bbox_edge_pos(bbox, loc):\r\n \"\"\"\r\n Helper function to obtain the location of a corner of a bbox\r\n\r\n Parameters\r\n ----------\r\n bbox : `matplotlib.transforms.Bbox`\r\n\r\n loc : {1, 2, 3, 4}\r\n Corner of *bbox*. Valid values are::\r\n\r\n 'upper right' : 1,\r\n 'upper left' : 2,\r\n 'lower left' : 3,\r\n 'lower right' : 4\r\n\r\n Returns\r\n -------\r\n x, y : float\r\n Coordinates of the corner specified by *loc*.\r\n \"\"\"\r\n x0, y0, x1, y1 = bbox.extents\r\n if loc == 1:\r\n return x1, y1\r\n elif loc == 2:\r\n return x0, y1\r\n elif loc == 3:\r\n return x0, y0\r\n elif loc == 4:\r\n return x1, y0\r\n\r\n @staticmethod\r\n def connect_bbox(bbox1, bbox2, loc1, loc2=None):\r\n \"\"\"\r\n Helper function to obtain a Path from one bbox to another.\r\n\r\n Parameters\r\n ----------\r\n bbox1, bbox2 : `matplotlib.transforms.Bbox`\r\n Bounding boxes to connect.\r\n\r\n loc1 : {1, 2, 3, 4}\r\n Corner of *bbox1* to use. Valid values are::\r\n\r\n 'upper right' : 1,\r\n 'upper left' : 2,\r\n 'lower left' : 3,\r\n 'lower right' : 4\r\n\r\n loc2 : {1, 2, 3, 4}, optional\r\n Corner of *bbox2* to use. If None, defaults to *loc1*.\r\n Valid values are::\r\n\r\n 'upper right' : 1,\r\n 'upper left' : 2,\r\n 'lower left' : 3,\r\n 'lower right' : 4\r\n\r\n Returns\r\n -------\r\n path : `matplotlib.path.Path`\r\n A line segment from the *loc1* corner of *bbox1* to the *loc2*\r\n corner of *bbox2*.\r\n \"\"\"\r\n if isinstance(bbox1, Rectangle):\r\n bbox1 = TransformedBbox(Bbox.unit(), bbox1.get_transform())\r\n if isinstance(bbox2, Rectangle):\r\n bbox2 = TransformedBbox(Bbox.unit(), bbox2.get_transform())\r\n if loc2 is None:\r\n loc2 = loc1\r\n x1, y1 = BboxConnector.get_bbox_edge_pos(bbox1, loc1)\r\n x2, y2 = BboxConnector.get_bbox_edge_pos(bbox2, loc2)\r\n return Path([[x1, y1], [x2, y2]])\r\n\r\n @docstring.dedent_interpd\r\n def __init__(self, bbox1, bbox2, loc1, loc2=None, **kwargs):\r\n \"\"\"\r\n Connect two bboxes with a straight line.\r\n\r\n Parameters\r\n ----------\r\n bbox1, bbox2 : `matplotlib.transforms.Bbox`\r\n Bounding boxes to connect.\r\n\r\n loc1 : {1, 2, 3, 4}\r\n Corner of *bbox1* to draw the line. Valid values are::\r\n\r\n 'upper right' : 1,\r\n 'upper left' : 2,\r\n 'lower left' : 3,\r\n 'lower right' : 4\r\n\r\n loc2 : {1, 2, 3, 4}, optional\r\n Corner of *bbox2* to draw the line. If None, defaults to *loc1*.\r\n Valid values are::\r\n\r\n 'upper right' : 1,\r\n 'upper left' : 2,\r\n 'lower left' : 3,\r\n 'lower right' : 4\r\n\r\n **kwargs\r\n Patch properties for the line drawn. Valid arguments include:\r\n\r\n %(Patch:kwdoc)s\r\n \"\"\"\r\n if \"transform\" in kwargs:\r\n raise ValueError(\"transform should not be set\")\r\n\r\n kwargs[\"transform\"] = IdentityTransform()\r\n if 'fill' in kwargs:\r\n super().__init__(**kwargs)\r\n else:\r\n fill = bool({'fc', 'facecolor', 'color'}.intersection(kwargs))\r\n super().__init__(fill=fill, **kwargs)\r\n self.bbox1 = bbox1\r\n self.bbox2 = bbox2\r\n self.loc1 = loc1\r\n self.loc2 = loc2\r\n\r\n def get_path(self):\r\n # docstring inherited\r\n return self.connect_bbox(self.bbox1, self.bbox2,\r\n self.loc1, self.loc2)\r\n\r\n\r\nclass BboxConnectorPatch(BboxConnector):\r\n @docstring.dedent_interpd\r\n def __init__(self, bbox1, bbox2, loc1a, loc2a, loc1b, loc2b, **kwargs):\r\n \"\"\"\r\n Connect two bboxes with a quadrilateral.\r\n\r\n The quadrilateral is specified by two lines that start and end at\r\n corners of the bboxes. The four sides of the quadrilateral are defined\r\n by the two lines given, the line between the two corners specified in\r\n *bbox1* and the line between the two corners specified in *bbox2*.\r\n\r\n Parameters\r\n ----------\r\n bbox1, bbox2 : `matplotlib.transforms.Bbox`\r\n Bounding boxes to connect.\r\n\r\n loc1a, loc2a : {1, 2, 3, 4}\r\n Corners of *bbox1* and *bbox2* to draw the first line.\r\n Valid values are::\r\n\r\n 'upper right' : 1,\r\n 'upper left' : 2,\r\n 'lower left' : 3,\r\n 'lower right' : 4\r\n\r\n loc1b, loc2b : {1, 2, 3, 4}\r\n Corners of *bbox1* and *bbox2* to draw the second line.\r\n Valid values are::\r\n\r\n 'upper right' : 1,\r\n 'upper left' : 2,\r\n 'lower left' : 3,\r\n 'lower right' : 4\r\n\r\n **kwargs\r\n Patch properties for the line drawn:\r\n\r\n %(Patch:kwdoc)s\r\n \"\"\"\r\n if \"transform\" in kwargs:\r\n raise ValueError(\"transform should not be set\")\r\n super().__init__(bbox1, bbox2, loc1a, loc2a, **kwargs)\r\n self.loc1b = loc1b\r\n self.loc2b = loc2b\r\n\r\n def get_path(self):\r\n # docstring inherited\r\n path1 = self.connect_bbox(self.bbox1, self.bbox2, self.loc1, self.loc2)\r\n path2 = self.connect_bbox(self.bbox2, self.bbox1,\r\n self.loc2b, self.loc1b)\r\n path_merged = [*path1.vertices, *path2.vertices, path1.vertices[0]]\r\n return Path(path_merged)\r\n\r\n\r\ndef _add_inset_axes(parent_axes, inset_axes):\r\n \"\"\"Helper function to add an inset axes and disable navigation in it.\"\"\"\r\n parent_axes.figure.add_axes(inset_axes)\r\n inset_axes.set_navigate(False)\r\n\r\n\r\[email protected]_interpd\r\ndef inset_axes(parent_axes, width, height, loc='upper right',\r\n bbox_to_anchor=None, bbox_transform=None,\r\n axes_class=None, axes_kwargs=None,\r\n borderpad=0.5):\r\n \"\"\"\r\n Create an inset axes with a given width and height.\r\n\r\n Both sizes used can be specified either in inches or percentage.\r\n For example,::\r\n\r\n inset_axes(parent_axes, width='40%%', height='30%%', loc='lower left')\r\n\r\n creates in inset axes in the lower left corner of *parent_axes* which spans\r\n over 30%% in height and 40%% in width of the *parent_axes*. Since the usage\r\n of `.inset_axes` may become slightly tricky when exceeding such standard\r\n cases, it is recommended to read :doc:`the examples\r\n </gallery/axes_grid1/inset_locator_demo>`.\r\n\r\n Notes\r\n -----\r\n The meaning of *bbox_to_anchor* and *bbox_to_transform* is interpreted\r\n differently from that of legend. The value of bbox_to_anchor\r\n (or the return value of its get_points method; the default is\r\n *parent_axes.bbox*) is transformed by the bbox_transform (the default\r\n is Identity transform) and then interpreted as points in the pixel\r\n coordinate (which is dpi dependent).\r\n\r\n Thus, following three calls are identical and creates an inset axes\r\n with respect to the *parent_axes*::\r\n\r\n axins = inset_axes(parent_axes, \"30%%\", \"40%%\")\r\n axins = inset_axes(parent_axes, \"30%%\", \"40%%\",\r\n bbox_to_anchor=parent_axes.bbox)\r\n axins = inset_axes(parent_axes, \"30%%\", \"40%%\",\r\n bbox_to_anchor=(0, 0, 1, 1),\r\n bbox_transform=parent_axes.transAxes)\r\n\r\n Parameters\r\n ----------\r\n parent_axes : `matplotlib.axes.Axes`\r\n Axes to place the inset axes.\r\n\r\n width, height : float or str\r\n Size of the inset axes to create. If a float is provided, it is\r\n the size in inches, e.g. *width=1.3*. If a string is provided, it is\r\n the size in relative units, e.g. *width='40%%'*. By default, i.e. if\r\n neither *bbox_to_anchor* nor *bbox_transform* are specified, those\r\n are relative to the parent_axes. Otherwise they are to be understood\r\n relative to the bounding box provided via *bbox_to_anchor*.\r\n\r\n loc : str, default: 'upper right'\r\n Location to place the inset axes. Valid locations are\r\n 'upper left', 'upper center', 'upper right',\r\n 'center left', 'center', 'center right',\r\n 'lower left', 'lower center, 'lower right'.\r\n For backward compatibility, numeric values are accepted as well.\r\n See the parameter *loc* of `.Legend` for details.\r\n\r\n bbox_to_anchor : tuple or `matplotlib.transforms.BboxBase`, optional\r\n Bbox that the inset axes will be anchored to. If None,\r\n a tuple of (0, 0, 1, 1) is used if *bbox_transform* is set\r\n to *parent_axes.transAxes* or *parent_axes.figure.transFigure*.\r\n Otherwise, *parent_axes.bbox* is used. If a tuple, can be either\r\n [left, bottom, width, height], or [left, bottom].\r\n If the kwargs *width* and/or *height* are specified in relative units,\r\n the 2-tuple [left, bottom] cannot be used. Note that,\r\n unless *bbox_transform* is set, the units of the bounding box\r\n are interpreted in the pixel coordinate. When using *bbox_to_anchor*\r\n with tuple, it almost always makes sense to also specify\r\n a *bbox_transform*. This might often be the axes transform\r\n *parent_axes.transAxes*.\r\n\r\n bbox_transform : `matplotlib.transforms.Transform`, optional\r\n Transformation for the bbox that contains the inset axes.\r\n If None, a `.transforms.IdentityTransform` is used. The value\r\n of *bbox_to_anchor* (or the return value of its get_points method)\r\n is transformed by the *bbox_transform* and then interpreted\r\n as points in the pixel coordinate (which is dpi dependent).\r\n You may provide *bbox_to_anchor* in some normalized coordinate,\r\n and give an appropriate transform (e.g., *parent_axes.transAxes*).\r\n\r\n axes_class : `matplotlib.axes.Axes` type, default: `.HostAxes`\r\n The type of the newly created inset axes.\r\n\r\n axes_kwargs : dict, optional\r\n Keyword arguments to pass to the constructor of the inset axes.\r\n Valid arguments include:\r\n\r\n %(Axes:kwdoc)s\r\n\r\n borderpad : float, default: 0.5\r\n Padding between inset axes and the bbox_to_anchor.\r\n The units are axes font size, i.e. for a default font size of 10 points\r\n *borderpad = 0.5* is equivalent to a padding of 5 points.\r\n\r\n Returns\r\n -------\r\n inset_axes : *axes_class*\r\n Inset axes object created.\r\n \"\"\"\r\n\r\n if axes_class is None:\r\n axes_class = HostAxes\r\n if axes_kwargs is None:\r\n axes_kwargs = {}\r\n inset_axes = axes_class(parent_axes.figure, parent_axes.get_position(),\r\n **axes_kwargs)\r\n\r\n if bbox_transform in [parent_axes.transAxes,\r\n parent_axes.figure.transFigure]:\r\n if bbox_to_anchor is None:\r\n _api.warn_external(\"Using the axes or figure transform requires a \"\r\n \"bounding box in the respective coordinates. \"\r\n \"Using bbox_to_anchor=(0, 0, 1, 1) now.\")\r\n bbox_to_anchor = (0, 0, 1, 1)\r\n\r\n if bbox_to_anchor is None:\r\n bbox_to_anchor = parent_axes.bbox\r\n\r\n if (isinstance(bbox_to_anchor, tuple) and\r\n (isinstance(width, str) or isinstance(height, str))):\r\n if len(bbox_to_anchor) != 4:\r\n raise ValueError(\"Using relative units for width or height \"\r\n \"requires to provide a 4-tuple or a \"\r\n \"`Bbox` instance to `bbox_to_anchor.\")\r\n\r\n axes_locator = AnchoredSizeLocator(bbox_to_anchor,\r\n width, height,\r\n loc=loc,\r\n bbox_transform=bbox_transform,\r\n borderpad=borderpad)\r\n\r\n inset_axes.set_axes_locator(axes_locator)\r\n\r\n _add_inset_axes(parent_axes, inset_axes)\r\n\r\n return inset_axes\r\n\r\n\r\[email protected]_interpd\r\ndef zoomed_inset_axes(parent_axes, zoom, loc='upper right',\r\n bbox_to_anchor=None, bbox_transform=None,\r\n axes_class=None, axes_kwargs=None,\r\n borderpad=0.5):\r\n \"\"\"\r\n Create an anchored inset axes by scaling a parent axes. For usage, also see\r\n :doc:`the examples </gallery/axes_grid1/inset_locator_demo2>`.\r\n\r\n Parameters\r\n ----------\r\n parent_axes : `matplotlib.axes.Axes`\r\n Axes to place the inset axes.\r\n\r\n zoom : float\r\n Scaling factor of the data axes. *zoom* > 1 will enlarge the\r\n coordinates (i.e., \"zoomed in\"), while *zoom* < 1 will shrink the\r\n coordinates (i.e., \"zoomed out\").\r\n\r\n loc : str, default: 'upper right'\r\n Location to place the inset axes. Valid locations are\r\n 'upper left', 'upper center', 'upper right',\r\n 'center left', 'center', 'center right',\r\n 'lower left', 'lower center, 'lower right'.\r\n For backward compatibility, numeric values are accepted as well.\r\n See the parameter *loc* of `.Legend` for details.\r\n\r\n bbox_to_anchor : tuple or `matplotlib.transforms.BboxBase`, optional\r\n Bbox that the inset axes will be anchored to. If None,\r\n *parent_axes.bbox* is used. If a tuple, can be either\r\n [left, bottom, width, height], or [left, bottom].\r\n If the kwargs *width* and/or *height* are specified in relative units,\r\n the 2-tuple [left, bottom] cannot be used. Note that\r\n the units of the bounding box are determined through the transform\r\n in use. When using *bbox_to_anchor* it almost always makes sense to\r\n also specify a *bbox_transform*. This might often be the axes transform\r\n *parent_axes.transAxes*.\r\n\r\n bbox_transform : `matplotlib.transforms.Transform`, optional\r\n Transformation for the bbox that contains the inset axes.\r\n If None, a `.transforms.IdentityTransform` is used (i.e. pixel\r\n coordinates). This is useful when not providing any argument to\r\n *bbox_to_anchor*. When using *bbox_to_anchor* it almost always makes\r\n sense to also specify a *bbox_transform*. This might often be the\r\n axes transform *parent_axes.transAxes*. Inversely, when specifying\r\n the axes- or figure-transform here, be aware that not specifying\r\n *bbox_to_anchor* will use *parent_axes.bbox*, the units of which are\r\n in display (pixel) coordinates.\r\n\r\n axes_class : `matplotlib.axes.Axes` type, default: `.HostAxes`\r\n The type of the newly created inset axes.\r\n\r\n axes_kwargs : dict, optional\r\n Keyword arguments to pass to the constructor of the inset axes.\r\n Valid arguments include:\r\n\r\n %(Axes:kwdoc)s\r\n\r\n borderpad : float, default: 0.5\r\n Padding between inset axes and the bbox_to_anchor.\r\n The units are axes font size, i.e. for a default font size of 10 points\r\n *borderpad = 0.5* is equivalent to a padding of 5 points.\r\n\r\n Returns\r\n -------\r\n inset_axes : *axes_class*\r\n Inset axes object created.\r\n \"\"\"\r\n\r\n if axes_class is None:\r\n axes_class = HostAxes\r\n if axes_kwargs is None:\r\n axes_kwargs = {}\r\n inset_axes = axes_class(parent_axes.figure, parent_axes.get_position(),\r\n **axes_kwargs)\r\n\r\n axes_locator = AnchoredZoomLocator(parent_axes, zoom=zoom, loc=loc,\r\n bbox_to_anchor=bbox_to_anchor,\r\n bbox_transform=bbox_transform,\r\n borderpad=borderpad)\r\n inset_axes.set_axes_locator(axes_locator)\r\n\r\n _add_inset_axes(parent_axes, inset_axes)\r\n\r\n return inset_axes\r\n\r\n\r\[email protected]_interpd\r\ndef mark_inset(parent_axes, inset_axes, loc1, loc2, **kwargs):\r\n \"\"\"\r\n Draw a box to mark the location of an area represented by an inset axes.\r\n\r\n This function draws a box in *parent_axes* at the bounding box of\r\n *inset_axes*, and shows a connection with the inset axes by drawing lines\r\n at the corners, giving a \"zoomed in\" effect.\r\n\r\n Parameters\r\n ----------\r\n parent_axes : `matplotlib.axes.Axes`\r\n Axes which contains the area of the inset axes.\r\n\r\n inset_axes : `matplotlib.axes.Axes`\r\n The inset axes.\r\n\r\n loc1, loc2 : {1, 2, 3, 4}\r\n Corners to use for connecting the inset axes and the area in the\r\n parent axes.\r\n\r\n **kwargs\r\n Patch properties for the lines and box drawn:\r\n\r\n %(Patch:kwdoc)s\r\n\r\n Returns\r\n -------\r\n pp : `matplotlib.patches.Patch`\r\n The patch drawn to represent the area of the inset axes.\r\n\r\n p1, p2 : `matplotlib.patches.Patch`\r\n The patches connecting two corners of the inset axes and its area.\r\n \"\"\"\r\n rect = TransformedBbox(inset_axes.viewLim, parent_axes.transData)\r\n\r\n if 'fill' in kwargs:\r\n pp = BboxPatch(rect, **kwargs)\r\n else:\r\n fill = bool({'fc', 'facecolor', 'color'}.intersection(kwargs))\r\n pp = BboxPatch(rect, fill=fill, **kwargs)\r\n parent_axes.add_patch(pp)\r\n\r\n p1 = BboxConnector(inset_axes.bbox, rect, loc1=loc1, **kwargs)\r\n inset_axes.add_patch(p1)\r\n p1.set_clip_on(False)\r\n p2 = BboxConnector(inset_axes.bbox, rect, loc1=loc2, **kwargs)\r\n inset_axes.add_patch(p2)\r\n p2.set_clip_on(False)\r\n\r\n return pp, p1, p2\r\n",
"from __future__ import annotations\r\n\r\nimport numbers\r\nfrom typing import TYPE_CHECKING\r\nimport warnings\r\n\r\nimport numpy as np\r\n\r\nfrom pandas._libs import (\r\n lib,\r\n missing as libmissing,\r\n)\r\nfrom pandas._typing import (\r\n ArrayLike,\r\n Dtype,\r\n type_t,\r\n)\r\nfrom pandas.compat.numpy import function as nv\r\n\r\nfrom pandas.core.dtypes.common import (\r\n is_bool_dtype,\r\n is_float,\r\n is_float_dtype,\r\n is_integer_dtype,\r\n is_list_like,\r\n is_numeric_dtype,\r\n pandas_dtype,\r\n)\r\nfrom pandas.core.dtypes.dtypes import (\r\n ExtensionDtype,\r\n register_extension_dtype,\r\n)\r\nfrom pandas.core.dtypes.missing import isna\r\n\r\nfrom pandas.core import ops\r\nfrom pandas.core.arrays.masked import (\r\n BaseMaskedArray,\r\n BaseMaskedDtype,\r\n)\r\n\r\nif TYPE_CHECKING:\r\n import pyarrow\r\n\r\n\r\n@register_extension_dtype\r\nclass BooleanDtype(BaseMaskedDtype):\r\n \"\"\"\r\n Extension dtype for boolean data.\r\n\r\n .. versionadded:: 1.0.0\r\n\r\n .. warning::\r\n\r\n BooleanDtype is considered experimental. The implementation and\r\n parts of the API may change without warning.\r\n\r\n Attributes\r\n ----------\r\n None\r\n\r\n Methods\r\n -------\r\n None\r\n\r\n Examples\r\n --------\r\n >>> pd.BooleanDtype()\r\n BooleanDtype\r\n \"\"\"\r\n\r\n name = \"boolean\"\r\n\r\n # https://github.com/python/mypy/issues/4125\r\n # error: Signature of \"type\" incompatible with supertype \"BaseMaskedDtype\"\r\n @property\r\n def type(self) -> type: # type: ignore[override]\r\n return np.bool_\r\n\r\n @property\r\n def kind(self) -> str:\r\n return \"b\"\r\n\r\n @property\r\n def numpy_dtype(self) -> np.dtype:\r\n return np.dtype(\"bool\")\r\n\r\n @classmethod\r\n def construct_array_type(cls) -> type_t[BooleanArray]:\r\n \"\"\"\r\n Return the array type associated with this dtype.\r\n\r\n Returns\r\n -------\r\n type\r\n \"\"\"\r\n return BooleanArray\r\n\r\n def __repr__(self) -> str:\r\n return \"BooleanDtype\"\r\n\r\n @property\r\n def _is_boolean(self) -> bool:\r\n return True\r\n\r\n @property\r\n def _is_numeric(self) -> bool:\r\n return True\r\n\r\n def __from_arrow__(\r\n self, array: pyarrow.Array | pyarrow.ChunkedArray\r\n ) -> BooleanArray:\r\n \"\"\"\r\n Construct BooleanArray from pyarrow Array/ChunkedArray.\r\n \"\"\"\r\n import pyarrow\r\n\r\n if array.type != pyarrow.bool_():\r\n raise TypeError(f\"Expected array of boolean type, got {array.type} instead\")\r\n\r\n if isinstance(array, pyarrow.Array):\r\n chunks = [array]\r\n else:\r\n # pyarrow.ChunkedArray\r\n chunks = array.chunks\r\n\r\n results = []\r\n for arr in chunks:\r\n buflist = arr.buffers()\r\n data = pyarrow.BooleanArray.from_buffers(\r\n arr.type, len(arr), [None, buflist[1]], offset=arr.offset\r\n ).to_numpy(zero_copy_only=False)\r\n if arr.null_count != 0:\r\n mask = pyarrow.BooleanArray.from_buffers(\r\n arr.type, len(arr), [None, buflist[0]], offset=arr.offset\r\n ).to_numpy(zero_copy_only=False)\r\n mask = ~mask\r\n else:\r\n mask = np.zeros(len(arr), dtype=bool)\r\n\r\n bool_arr = BooleanArray(data, mask)\r\n results.append(bool_arr)\r\n\r\n if not results:\r\n return BooleanArray(\r\n np.array([], dtype=np.bool_), np.array([], dtype=np.bool_)\r\n )\r\n else:\r\n return BooleanArray._concat_same_type(results)\r\n\r\n\r\ndef coerce_to_array(\r\n values, mask=None, copy: bool = False\r\n) -> tuple[np.ndarray, np.ndarray]:\r\n \"\"\"\r\n Coerce the input values array to numpy arrays with a mask.\r\n\r\n Parameters\r\n ----------\r\n values : 1D list-like\r\n mask : bool 1D array, optional\r\n copy : bool, default False\r\n if True, copy the input\r\n\r\n Returns\r\n -------\r\n tuple of (values, mask)\r\n \"\"\"\r\n if isinstance(values, BooleanArray):\r\n if mask is not None:\r\n raise ValueError(\"cannot pass mask for BooleanArray input\")\r\n values, mask = values._data, values._mask\r\n if copy:\r\n values = values.copy()\r\n mask = mask.copy()\r\n return values, mask\r\n\r\n mask_values = None\r\n if isinstance(values, np.ndarray) and values.dtype == np.bool_:\r\n if copy:\r\n values = values.copy()\r\n elif isinstance(values, np.ndarray) and is_numeric_dtype(values.dtype):\r\n mask_values = isna(values)\r\n\r\n values_bool = np.zeros(len(values), dtype=bool)\r\n values_bool[~mask_values] = values[~mask_values].astype(bool)\r\n\r\n if not np.all(\r\n values_bool[~mask_values].astype(values.dtype) == values[~mask_values]\r\n ):\r\n raise TypeError(\"Need to pass bool-like values\")\r\n\r\n values = values_bool\r\n else:\r\n values_object = np.asarray(values, dtype=object)\r\n\r\n inferred_dtype = lib.infer_dtype(values_object, skipna=True)\r\n integer_like = (\"floating\", \"integer\", \"mixed-integer-float\")\r\n if inferred_dtype not in (\"boolean\", \"empty\") + integer_like:\r\n raise TypeError(\"Need to pass bool-like values\")\r\n\r\n mask_values = isna(values_object)\r\n values = np.zeros(len(values), dtype=bool)\r\n values[~mask_values] = values_object[~mask_values].astype(bool)\r\n\r\n # if the values were integer-like, validate it were actually 0/1's\r\n if (inferred_dtype in integer_like) and not (\r\n np.all(\r\n values[~mask_values].astype(float)\r\n == values_object[~mask_values].astype(float)\r\n )\r\n ):\r\n raise TypeError(\"Need to pass bool-like values\")\r\n\r\n if mask is None and mask_values is None:\r\n mask = np.zeros(len(values), dtype=bool)\r\n elif mask is None:\r\n mask = mask_values\r\n else:\r\n if isinstance(mask, np.ndarray) and mask.dtype == np.bool_:\r\n if mask_values is not None:\r\n mask = mask | mask_values\r\n else:\r\n if copy:\r\n mask = mask.copy()\r\n else:\r\n mask = np.array(mask, dtype=bool)\r\n if mask_values is not None:\r\n mask = mask | mask_values\r\n\r\n if values.ndim != 1:\r\n raise ValueError(\"values must be a 1D list-like\")\r\n if mask.ndim != 1:\r\n raise ValueError(\"mask must be a 1D list-like\")\r\n\r\n return values, mask\r\n\r\n\r\nclass BooleanArray(BaseMaskedArray):\r\n \"\"\"\r\n Array of boolean (True/False) data with missing values.\r\n\r\n This is a pandas Extension array for boolean data, under the hood\r\n represented by 2 numpy arrays: a boolean array with the data and\r\n a boolean array with the mask (True indicating missing).\r\n\r\n BooleanArray implements Kleene logic (sometimes called three-value\r\n logic) for logical operations. See :ref:`boolean.kleene` for more.\r\n\r\n To construct an BooleanArray from generic array-like input, use\r\n :func:`pandas.array` specifying ``dtype=\"boolean\"`` (see examples\r\n below).\r\n\r\n .. versionadded:: 1.0.0\r\n\r\n .. warning::\r\n\r\n BooleanArray is considered experimental. The implementation and\r\n parts of the API may change without warning.\r\n\r\n Parameters\r\n ----------\r\n values : numpy.ndarray\r\n A 1-d boolean-dtype array with the data.\r\n mask : numpy.ndarray\r\n A 1-d boolean-dtype array indicating missing values (True\r\n indicates missing).\r\n copy : bool, default False\r\n Whether to copy the `values` and `mask` arrays.\r\n\r\n Attributes\r\n ----------\r\n None\r\n\r\n Methods\r\n -------\r\n None\r\n\r\n Returns\r\n -------\r\n BooleanArray\r\n\r\n Examples\r\n --------\r\n Create an BooleanArray with :func:`pandas.array`:\r\n\r\n >>> pd.array([True, False, None], dtype=\"boolean\")\r\n <BooleanArray>\r\n [True, False, <NA>]\r\n Length: 3, dtype: boolean\r\n \"\"\"\r\n\r\n # The value used to fill '_data' to avoid upcasting\r\n _internal_fill_value = False\r\n _TRUE_VALUES = {\"True\", \"TRUE\", \"true\", \"1\", \"1.0\"}\r\n _FALSE_VALUES = {\"False\", \"FALSE\", \"false\", \"0\", \"0.0\"}\r\n\r\n def __init__(self, values: np.ndarray, mask: np.ndarray, copy: bool = False):\r\n if not (isinstance(values, np.ndarray) and values.dtype == np.bool_):\r\n raise TypeError(\r\n \"values should be boolean numpy array. Use \"\r\n \"the 'pd.array' function instead\"\r\n )\r\n self._dtype = BooleanDtype()\r\n super().__init__(values, mask, copy=copy)\r\n\r\n @property\r\n def dtype(self) -> BooleanDtype:\r\n return self._dtype\r\n\r\n @classmethod\r\n def _from_sequence(\r\n cls, scalars, *, dtype: Dtype | None = None, copy: bool = False\r\n ) -> BooleanArray:\r\n if dtype:\r\n assert dtype == \"boolean\"\r\n values, mask = coerce_to_array(scalars, copy=copy)\r\n return BooleanArray(values, mask)\r\n\r\n @classmethod\r\n def _from_sequence_of_strings(\r\n cls,\r\n strings: list[str],\r\n *,\r\n dtype: Dtype | None = None,\r\n copy: bool = False,\r\n true_values: list[str] | None = None,\r\n false_values: list[str] | None = None,\r\n ) -> BooleanArray:\r\n true_values_union = cls._TRUE_VALUES.union(true_values or [])\r\n false_values_union = cls._FALSE_VALUES.union(false_values or [])\r\n\r\n def map_string(s):\r\n if isna(s):\r\n return s\r\n elif s in true_values_union:\r\n return True\r\n elif s in false_values_union:\r\n return False\r\n else:\r\n raise ValueError(f\"{s} cannot be cast to bool\")\r\n\r\n scalars = [map_string(x) for x in strings]\r\n return cls._from_sequence(scalars, dtype=dtype, copy=copy)\r\n\r\n _HANDLED_TYPES = (np.ndarray, numbers.Number, bool, np.bool_)\r\n\r\n def __array_ufunc__(self, ufunc: np.ufunc, method: str, *inputs, **kwargs):\r\n # For BooleanArray inputs, we apply the ufunc to ._data\r\n # and mask the result.\r\n if method == \"reduce\":\r\n # Not clear how to handle missing values in reductions. Raise.\r\n raise NotImplementedError(\"The 'reduce' method is not supported.\")\r\n out = kwargs.get(\"out\", ())\r\n\r\n for x in inputs + out:\r\n if not isinstance(x, self._HANDLED_TYPES + (BooleanArray,)):\r\n return NotImplemented\r\n\r\n # for binary ops, use our custom dunder methods\r\n result = ops.maybe_dispatch_ufunc_to_dunder_op(\r\n self, ufunc, method, *inputs, **kwargs\r\n )\r\n if result is not NotImplemented:\r\n return result\r\n\r\n mask = np.zeros(len(self), dtype=bool)\r\n inputs2 = []\r\n for x in inputs:\r\n if isinstance(x, BooleanArray):\r\n mask |= x._mask\r\n inputs2.append(x._data)\r\n else:\r\n inputs2.append(x)\r\n\r\n def reconstruct(x):\r\n # we don't worry about scalar `x` here, since we\r\n # raise for reduce up above.\r\n\r\n if is_bool_dtype(x.dtype):\r\n m = mask.copy()\r\n return BooleanArray(x, m)\r\n else:\r\n x[mask] = np.nan\r\n return x\r\n\r\n result = getattr(ufunc, method)(*inputs2, **kwargs)\r\n if isinstance(result, tuple):\r\n tuple(reconstruct(x) for x in result)\r\n else:\r\n return reconstruct(result)\r\n\r\n def _coerce_to_array(self, value) -> tuple[np.ndarray, np.ndarray]:\r\n return coerce_to_array(value)\r\n\r\n def astype(self, dtype, copy: bool = True) -> ArrayLike:\r\n \"\"\"\r\n Cast to a NumPy array or ExtensionArray with 'dtype'.\r\n\r\n Parameters\r\n ----------\r\n dtype : str or dtype\r\n Typecode or data-type to which the array is cast.\r\n copy : bool, default True\r\n Whether to copy the data, even if not necessary. If False,\r\n a copy is made only if the old dtype does not match the\r\n new dtype.\r\n\r\n Returns\r\n -------\r\n ndarray or ExtensionArray\r\n NumPy ndarray, BooleanArray or IntegerArray with 'dtype' for its dtype.\r\n\r\n Raises\r\n ------\r\n TypeError\r\n if incompatible type with an BooleanDtype, equivalent of same_kind\r\n casting\r\n \"\"\"\r\n dtype = pandas_dtype(dtype)\r\n\r\n if isinstance(dtype, ExtensionDtype):\r\n return super().astype(dtype, copy)\r\n\r\n if is_bool_dtype(dtype):\r\n # astype_nansafe converts np.nan to True\r\n if self._hasna:\r\n raise ValueError(\"cannot convert float NaN to bool\")\r\n else:\r\n return self._data.astype(dtype, copy=copy)\r\n\r\n # for integer, error if there are missing values\r\n if is_integer_dtype(dtype) and self._hasna:\r\n raise ValueError(\"cannot convert NA to integer\")\r\n\r\n # for float dtype, ensure we use np.nan before casting (numpy cannot\r\n # deal with pd.NA)\r\n na_value = self._na_value\r\n if is_float_dtype(dtype):\r\n na_value = np.nan\r\n # coerce\r\n return self.to_numpy(dtype=dtype, na_value=na_value, copy=False)\r\n\r\n def _values_for_argsort(self) -> np.ndarray:\r\n \"\"\"\r\n Return values for sorting.\r\n\r\n Returns\r\n -------\r\n ndarray\r\n The transformed values should maintain the ordering between values\r\n within the array.\r\n\r\n See Also\r\n --------\r\n ExtensionArray.argsort : Return the indices that would sort this array.\r\n \"\"\"\r\n data = self._data.copy()\r\n data[self._mask] = -1\r\n return data\r\n\r\n def any(self, *, skipna: bool = True, **kwargs):\r\n \"\"\"\r\n Return whether any element is True.\r\n\r\n Returns False unless there is at least one element that is True.\r\n By default, NAs are skipped. If ``skipna=False`` is specified and\r\n missing values are present, similar :ref:`Kleene logic <boolean.kleene>`\r\n is used as for logical operations.\r\n\r\n Parameters\r\n ----------\r\n skipna : bool, default True\r\n Exclude NA values. If the entire array is NA and `skipna` is\r\n True, then the result will be False, as for an empty array.\r\n If `skipna` is False, the result will still be True if there is\r\n at least one element that is True, otherwise NA will be returned\r\n if there are NA's present.\r\n **kwargs : any, default None\r\n Additional keywords have no effect but might be accepted for\r\n compatibility with NumPy.\r\n\r\n Returns\r\n -------\r\n bool or :attr:`pandas.NA`\r\n\r\n See Also\r\n --------\r\n numpy.any : Numpy version of this method.\r\n BooleanArray.all : Return whether all elements are True.\r\n\r\n Examples\r\n --------\r\n The result indicates whether any element is True (and by default\r\n skips NAs):\r\n\r\n >>> pd.array([True, False, True]).any()\r\n True\r\n >>> pd.array([True, False, pd.NA]).any()\r\n True\r\n >>> pd.array([False, False, pd.NA]).any()\r\n False\r\n >>> pd.array([], dtype=\"boolean\").any()\r\n False\r\n >>> pd.array([pd.NA], dtype=\"boolean\").any()\r\n False\r\n\r\n With ``skipna=False``, the result can be NA if this is logically\r\n required (whether ``pd.NA`` is True or False influences the result):\r\n\r\n >>> pd.array([True, False, pd.NA]).any(skipna=False)\r\n True\r\n >>> pd.array([False, False, pd.NA]).any(skipna=False)\r\n <NA>\r\n \"\"\"\r\n kwargs.pop(\"axis\", None)\r\n nv.validate_any((), kwargs)\r\n\r\n values = self._data.copy()\r\n np.putmask(values, self._mask, False)\r\n result = values.any()\r\n if skipna:\r\n return result\r\n else:\r\n if result or len(self) == 0 or not self._mask.any():\r\n return result\r\n else:\r\n return self.dtype.na_value\r\n\r\n def all(self, *, skipna: bool = True, **kwargs):\r\n \"\"\"\r\n Return whether all elements are True.\r\n\r\n Returns True unless there is at least one element that is False.\r\n By default, NAs are skipped. If ``skipna=False`` is specified and\r\n missing values are present, similar :ref:`Kleene logic <boolean.kleene>`\r\n is used as for logical operations.\r\n\r\n Parameters\r\n ----------\r\n skipna : bool, default True\r\n Exclude NA values. If the entire array is NA and `skipna` is\r\n True, then the result will be True, as for an empty array.\r\n If `skipna` is False, the result will still be False if there is\r\n at least one element that is False, otherwise NA will be returned\r\n if there are NA's present.\r\n **kwargs : any, default None\r\n Additional keywords have no effect but might be accepted for\r\n compatibility with NumPy.\r\n\r\n Returns\r\n -------\r\n bool or :attr:`pandas.NA`\r\n\r\n See Also\r\n --------\r\n numpy.all : Numpy version of this method.\r\n BooleanArray.any : Return whether any element is True.\r\n\r\n Examples\r\n --------\r\n The result indicates whether any element is True (and by default\r\n skips NAs):\r\n\r\n >>> pd.array([True, True, pd.NA]).all()\r\n True\r\n >>> pd.array([True, False, pd.NA]).all()\r\n False\r\n >>> pd.array([], dtype=\"boolean\").all()\r\n True\r\n >>> pd.array([pd.NA], dtype=\"boolean\").all()\r\n True\r\n\r\n With ``skipna=False``, the result can be NA if this is logically\r\n required (whether ``pd.NA`` is True or False influences the result):\r\n\r\n >>> pd.array([True, True, pd.NA]).all(skipna=False)\r\n <NA>\r\n >>> pd.array([True, False, pd.NA]).all(skipna=False)\r\n False\r\n \"\"\"\r\n kwargs.pop(\"axis\", None)\r\n nv.validate_all((), kwargs)\r\n\r\n values = self._data.copy()\r\n np.putmask(values, self._mask, True)\r\n result = values.all()\r\n\r\n if skipna:\r\n return result\r\n else:\r\n if not result or len(self) == 0 or not self._mask.any():\r\n return result\r\n else:\r\n return self.dtype.na_value\r\n\r\n def _logical_method(self, other, op):\r\n\r\n assert op.__name__ in {\"or_\", \"ror_\", \"and_\", \"rand_\", \"xor\", \"rxor\"}\r\n other_is_booleanarray = isinstance(other, BooleanArray)\r\n other_is_scalar = lib.is_scalar(other)\r\n mask = None\r\n\r\n if other_is_booleanarray:\r\n other, mask = other._data, other._mask\r\n elif is_list_like(other):\r\n other = np.asarray(other, dtype=\"bool\")\r\n if other.ndim > 1:\r\n raise NotImplementedError(\"can only perform ops with 1-d structures\")\r\n other, mask = coerce_to_array(other, copy=False)\r\n elif isinstance(other, np.bool_):\r\n other = other.item()\r\n\r\n if other_is_scalar and other is not libmissing.NA and not lib.is_bool(other):\r\n raise TypeError(\r\n \"'other' should be pandas.NA or a bool. \"\r\n f\"Got {type(other).__name__} instead.\"\r\n )\r\n\r\n if not other_is_scalar and len(self) != len(other):\r\n raise ValueError(\"Lengths must match to compare\")\r\n\r\n if op.__name__ in {\"or_\", \"ror_\"}:\r\n result, mask = ops.kleene_or(self._data, other, self._mask, mask)\r\n elif op.__name__ in {\"and_\", \"rand_\"}:\r\n result, mask = ops.kleene_and(self._data, other, self._mask, mask)\r\n elif op.__name__ in {\"xor\", \"rxor\"}:\r\n result, mask = ops.kleene_xor(self._data, other, self._mask, mask)\r\n\r\n # error: Argument 2 to \"BooleanArray\" has incompatible type \"Optional[Any]\";\r\n # expected \"ndarray\"\r\n return BooleanArray(result, mask) # type: ignore[arg-type]\r\n\r\n def _cmp_method(self, other, op):\r\n from pandas.arrays import (\r\n FloatingArray,\r\n IntegerArray,\r\n )\r\n\r\n if isinstance(other, (IntegerArray, FloatingArray)):\r\n return NotImplemented\r\n\r\n mask = None\r\n\r\n if isinstance(other, BooleanArray):\r\n other, mask = other._data, other._mask\r\n\r\n elif is_list_like(other):\r\n other = np.asarray(other)\r\n if other.ndim > 1:\r\n raise NotImplementedError(\"can only perform ops with 1-d structures\")\r\n if len(self) != len(other):\r\n raise ValueError(\"Lengths must match to compare\")\r\n\r\n if other is libmissing.NA:\r\n # numpy does not handle pd.NA well as \"other\" scalar (it returns\r\n # a scalar False instead of an array)\r\n result = np.zeros_like(self._data)\r\n mask = np.ones_like(self._data)\r\n else:\r\n # numpy will show a DeprecationWarning on invalid elementwise\r\n # comparisons, this will raise in the future\r\n with warnings.catch_warnings():\r\n warnings.filterwarnings(\"ignore\", \"elementwise\", FutureWarning)\r\n with np.errstate(all=\"ignore\"):\r\n result = op(self._data, other)\r\n\r\n # nans propagate\r\n if mask is None:\r\n mask = self._mask.copy()\r\n else:\r\n mask = self._mask | mask\r\n\r\n return BooleanArray(result, mask, copy=False)\r\n\r\n def _arith_method(self, other, op):\r\n mask = None\r\n op_name = op.__name__\r\n\r\n if isinstance(other, BooleanArray):\r\n other, mask = other._data, other._mask\r\n\r\n elif is_list_like(other):\r\n other = np.asarray(other)\r\n if other.ndim > 1:\r\n raise NotImplementedError(\"can only perform ops with 1-d structures\")\r\n if len(self) != len(other):\r\n raise ValueError(\"Lengths must match\")\r\n\r\n # nans propagate\r\n if mask is None:\r\n mask = self._mask\r\n if other is libmissing.NA:\r\n mask |= True\r\n else:\r\n mask = self._mask | mask\r\n\r\n if other is libmissing.NA:\r\n # if other is NA, the result will be all NA and we can't run the\r\n # actual op, so we need to choose the resulting dtype manually\r\n if op_name in {\"floordiv\", \"rfloordiv\", \"mod\", \"rmod\", \"pow\", \"rpow\"}:\r\n dtype = \"int8\"\r\n else:\r\n dtype = \"bool\"\r\n result = np.zeros(len(self._data), dtype=dtype)\r\n else:\r\n if op_name in {\"pow\", \"rpow\"} and isinstance(other, np.bool_):\r\n # Avoid DeprecationWarning: In future, it will be an error\r\n # for 'np.bool_' scalars to be interpreted as an index\r\n other = bool(other)\r\n\r\n with np.errstate(all=\"ignore\"):\r\n result = op(self._data, other)\r\n\r\n # divmod returns a tuple\r\n if op_name == \"divmod\":\r\n div, mod = result\r\n return (\r\n self._maybe_mask_result(div, mask, other, \"floordiv\"),\r\n self._maybe_mask_result(mod, mask, other, \"mod\"),\r\n )\r\n\r\n return self._maybe_mask_result(result, mask, other, op_name)\r\n\r\n def _reduce(self, name: str, *, skipna: bool = True, **kwargs):\r\n\r\n if name in {\"any\", \"all\"}:\r\n return getattr(self, name)(skipna=skipna, **kwargs)\r\n\r\n return super()._reduce(name, skipna=skipna, **kwargs)\r\n\r\n def _maybe_mask_result(self, result, mask, other, op_name: str):\r\n \"\"\"\r\n Parameters\r\n ----------\r\n result : array-like\r\n mask : array-like bool\r\n other : scalar or array-like\r\n op_name : str\r\n \"\"\"\r\n # if we have a float operand we are by-definition\r\n # a float result\r\n # or our op is a divide\r\n if (is_float_dtype(other) or is_float(other)) or (\r\n op_name in [\"rtruediv\", \"truediv\"]\r\n ):\r\n from pandas.core.arrays import FloatingArray\r\n\r\n return FloatingArray(result, mask, copy=False)\r\n\r\n elif is_bool_dtype(result):\r\n return BooleanArray(result, mask, copy=False)\r\n\r\n elif is_integer_dtype(result):\r\n from pandas.core.arrays import IntegerArray\r\n\r\n return IntegerArray(result, mask, copy=False)\r\n else:\r\n result[mask] = np.nan\r\n return result\r\n",
"import numpy as np\r\n\r\nfrom matplotlib import docstring\r\nfrom matplotlib.contour import ContourSet\r\nfrom matplotlib.tri.triangulation import Triangulation\r\n\r\n\r\[email protected]_interpd\r\nclass TriContourSet(ContourSet):\r\n \"\"\"\r\n Create and store a set of contour lines or filled regions for\r\n a triangular grid.\r\n\r\n This class is typically not instantiated directly by the user but by\r\n `~.Axes.tricontour` and `~.Axes.tricontourf`.\r\n\r\n %(contour_set_attributes)s\r\n \"\"\"\r\n def __init__(self, ax, *args, **kwargs):\r\n \"\"\"\r\n Draw triangular grid contour lines or filled regions,\r\n depending on whether keyword arg 'filled' is False\r\n (default) or True.\r\n\r\n The first argument of the initializer must be an axes\r\n object. The remaining arguments and keyword arguments\r\n are described in the docstring of `~.Axes.tricontour`.\r\n \"\"\"\r\n super().__init__(ax, *args, **kwargs)\r\n\r\n def _process_args(self, *args, **kwargs):\r\n \"\"\"\r\n Process args and kwargs.\r\n \"\"\"\r\n if isinstance(args[0], TriContourSet):\r\n C = args[0]._contour_generator\r\n if self.levels is None:\r\n self.levels = args[0].levels\r\n self.zmin = args[0].zmin\r\n self.zmax = args[0].zmax\r\n self._mins = args[0]._mins\r\n self._maxs = args[0]._maxs\r\n else:\r\n from matplotlib import _tri\r\n tri, z = self._contour_args(args, kwargs)\r\n C = _tri.TriContourGenerator(tri.get_cpp_triangulation(), z)\r\n self._mins = [tri.x.min(), tri.y.min()]\r\n self._maxs = [tri.x.max(), tri.y.max()]\r\n\r\n self._contour_generator = C\r\n return kwargs\r\n\r\n def _contour_args(self, args, kwargs):\r\n if self.filled:\r\n fn = 'contourf'\r\n else:\r\n fn = 'contour'\r\n tri, args, kwargs = Triangulation.get_from_args_and_kwargs(*args,\r\n **kwargs)\r\n z = np.ma.asarray(args[0])\r\n if z.shape != tri.x.shape:\r\n raise ValueError('z array must have same length as triangulation x'\r\n ' and y arrays')\r\n\r\n # z values must be finite, only need to check points that are included\r\n # in the triangulation.\r\n z_check = z[np.unique(tri.get_masked_triangles())]\r\n if np.ma.is_masked(z_check):\r\n raise ValueError('z must not contain masked points within the '\r\n 'triangulation')\r\n if not np.isfinite(z_check).all():\r\n raise ValueError('z array must not contain non-finite values '\r\n 'within the triangulation')\r\n\r\n z = np.ma.masked_invalid(z, copy=False)\r\n self.zmax = float(z_check.max())\r\n self.zmin = float(z_check.min())\r\n if self.logscale and self.zmin <= 0:\r\n raise ValueError('Cannot %s log of negative values.' % fn)\r\n self._process_contour_level_args(args[1:])\r\n return (tri, z)\r\n\r\n\r\ndocstring.interpd.update(_tricontour_doc=\"\"\"\r\nDraw contour %(type)s on an unstructured triangular grid.\r\n\r\nThe triangulation can be specified in one of two ways; either ::\r\n\r\n %(func)s(triangulation, ...)\r\n\r\nwhere *triangulation* is a `.Triangulation` object, or ::\r\n\r\n %(func)s(x, y, ...)\r\n %(func)s(x, y, triangles, ...)\r\n %(func)s(x, y, triangles=triangles, ...)\r\n %(func)s(x, y, mask=mask, ...)\r\n %(func)s(x, y, triangles, mask=mask, ...)\r\n\r\nin which case a `.Triangulation` object will be created. See that class'\r\ndocstring for an explanation of these cases.\r\n\r\nThe remaining arguments may be::\r\n\r\n %(func)s(..., Z)\r\n\r\nwhere *Z* is the array of values to contour, one per point in the\r\ntriangulation. The level values are chosen automatically.\r\n\r\n::\r\n\r\n %(func)s(..., Z, levels)\r\n\r\ncontour up to *levels+1* automatically chosen contour levels (*levels*\r\nintervals).\r\n\r\n::\r\n\r\n %(func)s(..., Z, levels)\r\n\r\ndraw contour %(type)s at the values specified in sequence *levels*, which must\r\nbe in increasing order.\r\n\r\n::\r\n\r\n %(func)s(Z, **kwargs)\r\n\r\nUse keyword arguments to control colors, linewidth, origin, cmap ... see below\r\nfor more details.\r\n\r\nParameters\r\n----------\r\ntriangulation : `.Triangulation`, optional\r\n The unstructured triangular grid.\r\n\r\n If specified, then *x*, *y*, *triangles*, and *mask* are not accepted.\r\n\r\nx, y : array-like, optional\r\n The coordinates of the values in *Z*.\r\n\r\ntriangles : (ntri, 3) array-like of int, optional\r\n For each triangle, the indices of the three points that make up the\r\n triangle, ordered in an anticlockwise manner. If not specified, the\r\n Delaunay triangulation is calculated.\r\n\r\nmask : (ntri,) array-like of bool, optional\r\n Which triangles are masked out.\r\n\r\nZ : 2D array-like\r\n The height values over which the contour is drawn.\r\n\r\nlevels : int or array-like, optional\r\n Determines the number and positions of the contour lines / regions.\r\n\r\n If an int *n*, use `~matplotlib.ticker.MaxNLocator`, which tries to\r\n automatically choose no more than *n+1* \"nice\" contour levels between\r\n *vmin* and *vmax*.\r\n\r\n If array-like, draw contour lines at the specified levels. The values must\r\n be in increasing order.\r\n\r\nReturns\r\n-------\r\n`~matplotlib.tri.TriContourSet`\r\n\r\nOther Parameters\r\n----------------\r\ncolors : color string or sequence of colors, optional\r\n The colors of the levels, i.e., the contour %(type)s.\r\n\r\n The sequence is cycled for the levels in ascending order. If the sequence\r\n is shorter than the number of levels, it's repeated.\r\n\r\n As a shortcut, single color strings may be used in place of one-element\r\n lists, i.e. ``'red'`` instead of ``['red']`` to color all levels with the\r\n same color. This shortcut does only work for color strings, not for other\r\n ways of specifying colors.\r\n\r\n By default (value *None*), the colormap specified by *cmap* will be used.\r\n\r\nalpha : float, default: 1\r\n The alpha blending value, between 0 (transparent) and 1 (opaque).\r\n\r\ncmap : str or `.Colormap`, default: :rc:`image.cmap`\r\n A `.Colormap` instance or registered colormap name. The colormap maps the\r\n level values to colors.\r\n\r\n If both *colors* and *cmap* are given, an error is raised.\r\n\r\nnorm : `~matplotlib.colors.Normalize`, optional\r\n If a colormap is used, the `.Normalize` instance scales the level values to\r\n the canonical colormap range [0, 1] for mapping to colors. If not given,\r\n the default linear scaling is used.\r\n\r\nvmin, vmax : float, optional\r\n If not *None*, either or both of these values will be supplied to\r\n the `.Normalize` instance, overriding the default color scaling\r\n based on *levels*.\r\n\r\norigin : {*None*, 'upper', 'lower', 'image'}, default: None\r\n Determines the orientation and exact position of *Z* by specifying the\r\n position of ``Z[0, 0]``. This is only relevant, if *X*, *Y* are not given.\r\n\r\n - *None*: ``Z[0, 0]`` is at X=0, Y=0 in the lower left corner.\r\n - 'lower': ``Z[0, 0]`` is at X=0.5, Y=0.5 in the lower left corner.\r\n - 'upper': ``Z[0, 0]`` is at X=N+0.5, Y=0.5 in the upper left corner.\r\n - 'image': Use the value from :rc:`image.origin`.\r\n\r\nextent : (x0, x1, y0, y1), optional\r\n If *origin* is not *None*, then *extent* is interpreted as in `.imshow`: it\r\n gives the outer pixel boundaries. In this case, the position of Z[0, 0] is\r\n the center of the pixel, not a corner. If *origin* is *None*, then\r\n (*x0*, *y0*) is the position of Z[0, 0], and (*x1*, *y1*) is the position\r\n of Z[-1, -1].\r\n\r\n This argument is ignored if *X* and *Y* are specified in the call to\r\n contour.\r\n\r\nlocator : ticker.Locator subclass, optional\r\n The locator is used to determine the contour levels if they are not given\r\n explicitly via *levels*.\r\n Defaults to `~.ticker.MaxNLocator`.\r\n\r\nextend : {'neither', 'both', 'min', 'max'}, default: 'neither'\r\n Determines the ``%(func)s``-coloring of values that are outside the\r\n *levels* range.\r\n\r\n If 'neither', values outside the *levels* range are not colored. If 'min',\r\n 'max' or 'both', color the values below, above or below and above the\r\n *levels* range.\r\n\r\n Values below ``min(levels)`` and above ``max(levels)`` are mapped to the\r\n under/over values of the `.Colormap`. Note that most colormaps do not have\r\n dedicated colors for these by default, so that the over and under values\r\n are the edge values of the colormap. You may want to set these values\r\n explicitly using `.Colormap.set_under` and `.Colormap.set_over`.\r\n\r\n .. note::\r\n\r\n An existing `.TriContourSet` does not get notified if properties of its\r\n colormap are changed. Therefore, an explicit call to\r\n `.ContourSet.changed()` is needed after modifying the colormap. The\r\n explicit call can be left out, if a colorbar is assigned to the\r\n `.TriContourSet` because it internally calls `.ContourSet.changed()`.\r\n\r\nxunits, yunits : registered units, optional\r\n Override axis units by specifying an instance of a\r\n :class:`matplotlib.units.ConversionInterface`.\r\n\r\nantialiased : bool, optional\r\n Enable antialiasing, overriding the defaults. For\r\n filled contours, the default is *True*. For line contours,\r\n it is taken from :rc:`lines.antialiased`.\"\"\")\r\n\r\n\r\[email protected](func='tricontour', type='lines')\r\[email protected]_interpd\r\ndef tricontour(ax, *args, **kwargs):\r\n \"\"\"\r\n %(_tricontour_doc)s\r\n\r\n linewidths : float or array-like, default: :rc:`contour.linewidth`\r\n The line width of the contour lines.\r\n\r\n If a number, all levels will be plotted with this linewidth.\r\n\r\n If a sequence, the levels in ascending order will be plotted with\r\n the linewidths in the order specified.\r\n\r\n If None, this falls back to :rc:`lines.linewidth`.\r\n\r\n linestyles : {*None*, 'solid', 'dashed', 'dashdot', 'dotted'}, optional\r\n If *linestyles* is *None*, the default is 'solid' unless the lines are\r\n monochrome. In that case, negative contours will take their linestyle\r\n from :rc:`contour.negative_linestyle` setting.\r\n\r\n *linestyles* can also be an iterable of the above strings specifying a\r\n set of linestyles to be used. If this iterable is shorter than the\r\n number of contour levels it will be repeated as necessary.\r\n \"\"\"\r\n kwargs['filled'] = False\r\n return TriContourSet(ax, *args, **kwargs)\r\n\r\n\r\[email protected](func='tricontourf', type='regions')\r\[email protected]_interpd\r\ndef tricontourf(ax, *args, **kwargs):\r\n \"\"\"\r\n %(_tricontour_doc)s\r\n\r\n hatches : list[str], optional\r\n A list of cross hatch patterns to use on the filled areas.\r\n If None, no hatching will be added to the contour.\r\n Hatching is supported in the PostScript, PDF, SVG and Agg\r\n backends only.\r\n\r\n Notes\r\n -----\r\n `.tricontourf` fills intervals that are closed at the top; that is, for\r\n boundaries *z1* and *z2*, the filled region is::\r\n\r\n z1 < Z <= z2\r\n\r\n except for the lowest interval, which is closed on both sides (i.e. it\r\n includes the lowest value).\r\n \"\"\"\r\n kwargs['filled'] = True\r\n return TriContourSet(ax, *args, **kwargs)\r\n",
"from datetime import datetime\r\n\r\nimport dateutil\r\nimport numpy as np\r\nimport pytest\r\nimport pytz\r\n\r\nimport pandas as pd\r\nfrom pandas import (\r\n DatetimeIndex,\r\n Index,\r\n Int64Index,\r\n NaT,\r\n PeriodIndex,\r\n Timestamp,\r\n date_range,\r\n)\r\nimport pandas._testing as tm\r\n\r\n\r\nclass TestDatetimeIndex:\r\n def test_astype(self):\r\n # GH 13149, GH 13209\r\n idx = DatetimeIndex([\"2016-05-16\", \"NaT\", NaT, np.NaN], name=\"idx\")\r\n\r\n result = idx.astype(object)\r\n expected = Index(\r\n [Timestamp(\"2016-05-16\")] + [NaT] * 3, dtype=object, name=\"idx\"\r\n )\r\n tm.assert_index_equal(result, expected)\r\n\r\n with tm.assert_produces_warning(FutureWarning):\r\n result = idx.astype(int)\r\n expected = Int64Index(\r\n [1463356800000000000] + [-9223372036854775808] * 3,\r\n dtype=np.int64,\r\n name=\"idx\",\r\n )\r\n tm.assert_index_equal(result, expected)\r\n\r\n rng = date_range(\"1/1/2000\", periods=10, name=\"idx\")\r\n with tm.assert_produces_warning(FutureWarning):\r\n result = rng.astype(\"i8\")\r\n tm.assert_index_equal(result, Index(rng.asi8, name=\"idx\"))\r\n tm.assert_numpy_array_equal(result.values, rng.asi8)\r\n\r\n def test_astype_uint(self):\r\n arr = date_range(\"2000\", periods=2, name=\"idx\")\r\n expected = pd.UInt64Index(\r\n np.array([946684800000000000, 946771200000000000], dtype=\"uint64\"),\r\n name=\"idx\",\r\n )\r\n with tm.assert_produces_warning(FutureWarning):\r\n tm.assert_index_equal(arr.astype(\"uint64\"), expected)\r\n tm.assert_index_equal(arr.astype(\"uint32\"), expected)\r\n\r\n def test_astype_with_tz(self):\r\n\r\n # with tz\r\n rng = date_range(\"1/1/2000\", periods=10, tz=\"US/Eastern\")\r\n with tm.assert_produces_warning(FutureWarning):\r\n # deprecated\r\n result = rng.astype(\"datetime64[ns]\")\r\n with tm.assert_produces_warning(FutureWarning):\r\n # check DatetimeArray while we're here deprecated\r\n rng._data.astype(\"datetime64[ns]\")\r\n\r\n expected = (\r\n date_range(\"1/1/2000\", periods=10, tz=\"US/Eastern\")\r\n .tz_convert(\"UTC\")\r\n .tz_localize(None)\r\n )\r\n tm.assert_index_equal(result, expected)\r\n\r\n def test_astype_tzaware_to_tzaware(self):\r\n # GH 18951: tz-aware to tz-aware\r\n idx = date_range(\"20170101\", periods=4, tz=\"US/Pacific\")\r\n result = idx.astype(\"datetime64[ns, US/Eastern]\")\r\n expected = date_range(\"20170101 03:00:00\", periods=4, tz=\"US/Eastern\")\r\n tm.assert_index_equal(result, expected)\r\n assert result.freq == expected.freq\r\n\r\n def test_astype_tznaive_to_tzaware(self):\r\n # GH 18951: tz-naive to tz-aware\r\n idx = date_range(\"20170101\", periods=4)\r\n idx = idx._with_freq(None) # tz_localize does not preserve freq\r\n with tm.assert_produces_warning(FutureWarning):\r\n # dt64->dt64tz deprecated\r\n result = idx.astype(\"datetime64[ns, US/Eastern]\")\r\n with tm.assert_produces_warning(FutureWarning):\r\n # dt64->dt64tz deprecated\r\n idx._data.astype(\"datetime64[ns, US/Eastern]\")\r\n\r\n expected = date_range(\"20170101\", periods=4, tz=\"US/Eastern\")\r\n expected = expected._with_freq(None)\r\n tm.assert_index_equal(result, expected)\r\n\r\n def test_astype_str_nat(self):\r\n # GH 13149, GH 13209\r\n # verify that we are returning NaT as a string (and not unicode)\r\n\r\n idx = DatetimeIndex([\"2016-05-16\", \"NaT\", NaT, np.NaN])\r\n result = idx.astype(str)\r\n expected = Index([\"2016-05-16\", \"NaT\", \"NaT\", \"NaT\"], dtype=object)\r\n tm.assert_index_equal(result, expected)\r\n\r\n def test_astype_str(self):\r\n # test astype string - #10442\r\n dti = date_range(\"2012-01-01\", periods=4, name=\"test_name\")\r\n result = dti.astype(str)\r\n expected = Index(\r\n [\"2012-01-01\", \"2012-01-02\", \"2012-01-03\", \"2012-01-04\"],\r\n name=\"test_name\",\r\n dtype=object,\r\n )\r\n tm.assert_index_equal(result, expected)\r\n\r\n def test_astype_str_tz_and_name(self):\r\n # test astype string with tz and name\r\n dti = date_range(\"2012-01-01\", periods=3, name=\"test_name\", tz=\"US/Eastern\")\r\n result = dti.astype(str)\r\n expected = Index(\r\n [\r\n \"2012-01-01 00:00:00-05:00\",\r\n \"2012-01-02 00:00:00-05:00\",\r\n \"2012-01-03 00:00:00-05:00\",\r\n ],\r\n name=\"test_name\",\r\n dtype=object,\r\n )\r\n tm.assert_index_equal(result, expected)\r\n\r\n def test_astype_str_freq_and_name(self):\r\n # test astype string with freqH and name\r\n dti = date_range(\"1/1/2011\", periods=3, freq=\"H\", name=\"test_name\")\r\n result = dti.astype(str)\r\n expected = Index(\r\n [\"2011-01-01 00:00:00\", \"2011-01-01 01:00:00\", \"2011-01-01 02:00:00\"],\r\n name=\"test_name\",\r\n dtype=object,\r\n )\r\n tm.assert_index_equal(result, expected)\r\n\r\n def test_astype_str_freq_and_tz(self):\r\n # test astype string with freqH and timezone\r\n dti = date_range(\r\n \"3/6/2012 00:00\", periods=2, freq=\"H\", tz=\"Europe/London\", name=\"test_name\"\r\n )\r\n result = dti.astype(str)\r\n expected = Index(\r\n [\"2012-03-06 00:00:00+00:00\", \"2012-03-06 01:00:00+00:00\"],\r\n dtype=object,\r\n name=\"test_name\",\r\n )\r\n tm.assert_index_equal(result, expected)\r\n\r\n def test_astype_datetime64(self):\r\n # GH 13149, GH 13209\r\n idx = DatetimeIndex([\"2016-05-16\", \"NaT\", NaT, np.NaN], name=\"idx\")\r\n\r\n result = idx.astype(\"datetime64[ns]\")\r\n tm.assert_index_equal(result, idx)\r\n assert result is not idx\r\n\r\n result = idx.astype(\"datetime64[ns]\", copy=False)\r\n tm.assert_index_equal(result, idx)\r\n assert result is idx\r\n\r\n idx_tz = DatetimeIndex([\"2016-05-16\", \"NaT\", NaT, np.NaN], tz=\"EST\", name=\"idx\")\r\n with tm.assert_produces_warning(FutureWarning):\r\n # dt64tz->dt64 deprecated\r\n result = idx_tz.astype(\"datetime64[ns]\")\r\n expected = DatetimeIndex(\r\n [\"2016-05-16 05:00:00\", \"NaT\", \"NaT\", \"NaT\"],\r\n dtype=\"datetime64[ns]\",\r\n name=\"idx\",\r\n )\r\n tm.assert_index_equal(result, expected)\r\n\r\n def test_astype_object(self):\r\n rng = date_range(\"1/1/2000\", periods=20)\r\n\r\n casted = rng.astype(\"O\")\r\n exp_values = list(rng)\r\n\r\n tm.assert_index_equal(casted, Index(exp_values, dtype=np.object_))\r\n assert casted.tolist() == exp_values\r\n\r\n @pytest.mark.parametrize(\"tz\", [None, \"Asia/Tokyo\"])\r\n def test_astype_object_tz(self, tz):\r\n idx = date_range(start=\"2013-01-01\", periods=4, freq=\"M\", name=\"idx\", tz=tz)\r\n expected_list = [\r\n Timestamp(\"2013-01-31\", tz=tz),\r\n Timestamp(\"2013-02-28\", tz=tz),\r\n Timestamp(\"2013-03-31\", tz=tz),\r\n Timestamp(\"2013-04-30\", tz=tz),\r\n ]\r\n expected = Index(expected_list, dtype=object, name=\"idx\")\r\n result = idx.astype(object)\r\n tm.assert_index_equal(result, expected)\r\n assert idx.tolist() == expected_list\r\n\r\n def test_astype_object_with_nat(self):\r\n idx = DatetimeIndex(\r\n [datetime(2013, 1, 1), datetime(2013, 1, 2), NaT, datetime(2013, 1, 4)],\r\n name=\"idx\",\r\n )\r\n expected_list = [\r\n Timestamp(\"2013-01-01\"),\r\n Timestamp(\"2013-01-02\"),\r\n NaT,\r\n Timestamp(\"2013-01-04\"),\r\n ]\r\n expected = Index(expected_list, dtype=object, name=\"idx\")\r\n result = idx.astype(object)\r\n tm.assert_index_equal(result, expected)\r\n assert idx.tolist() == expected_list\r\n\r\n @pytest.mark.parametrize(\r\n \"dtype\",\r\n [float, \"timedelta64\", \"timedelta64[ns]\", \"datetime64\", \"datetime64[D]\"],\r\n )\r\n def test_astype_raises(self, dtype):\r\n # GH 13149, GH 13209\r\n idx = DatetimeIndex([\"2016-05-16\", \"NaT\", NaT, np.NaN])\r\n msg = \"Cannot cast DatetimeIndex to dtype\"\r\n with pytest.raises(TypeError, match=msg):\r\n idx.astype(dtype)\r\n\r\n def test_index_convert_to_datetime_array(self):\r\n def _check_rng(rng):\r\n converted = rng.to_pydatetime()\r\n assert isinstance(converted, np.ndarray)\r\n for x, stamp in zip(converted, rng):\r\n assert isinstance(x, datetime)\r\n assert x == stamp.to_pydatetime()\r\n assert x.tzinfo == stamp.tzinfo\r\n\r\n rng = date_range(\"20090415\", \"20090519\")\r\n rng_eastern = date_range(\"20090415\", \"20090519\", tz=\"US/Eastern\")\r\n rng_utc = date_range(\"20090415\", \"20090519\", tz=\"utc\")\r\n\r\n _check_rng(rng)\r\n _check_rng(rng_eastern)\r\n _check_rng(rng_utc)\r\n\r\n def test_index_convert_to_datetime_array_explicit_pytz(self):\r\n def _check_rng(rng):\r\n converted = rng.to_pydatetime()\r\n assert isinstance(converted, np.ndarray)\r\n for x, stamp in zip(converted, rng):\r\n assert isinstance(x, datetime)\r\n assert x == stamp.to_pydatetime()\r\n assert x.tzinfo == stamp.tzinfo\r\n\r\n rng = date_range(\"20090415\", \"20090519\")\r\n rng_eastern = date_range(\"20090415\", \"20090519\", tz=pytz.timezone(\"US/Eastern\"))\r\n rng_utc = date_range(\"20090415\", \"20090519\", tz=pytz.utc)\r\n\r\n _check_rng(rng)\r\n _check_rng(rng_eastern)\r\n _check_rng(rng_utc)\r\n\r\n def test_index_convert_to_datetime_array_dateutil(self):\r\n def _check_rng(rng):\r\n converted = rng.to_pydatetime()\r\n assert isinstance(converted, np.ndarray)\r\n for x, stamp in zip(converted, rng):\r\n assert isinstance(x, datetime)\r\n assert x == stamp.to_pydatetime()\r\n assert x.tzinfo == stamp.tzinfo\r\n\r\n rng = date_range(\"20090415\", \"20090519\")\r\n rng_eastern = date_range(\"20090415\", \"20090519\", tz=\"dateutil/US/Eastern\")\r\n rng_utc = date_range(\"20090415\", \"20090519\", tz=dateutil.tz.tzutc())\r\n\r\n _check_rng(rng)\r\n _check_rng(rng_eastern)\r\n _check_rng(rng_utc)\r\n\r\n @pytest.mark.parametrize(\r\n \"tz, dtype\",\r\n [[\"US/Pacific\", \"datetime64[ns, US/Pacific]\"], [None, \"datetime64[ns]\"]],\r\n )\r\n def test_integer_index_astype_datetime(self, tz, dtype):\r\n # GH 20997, 20964, 24559\r\n val = [Timestamp(\"2018-01-01\", tz=tz).value]\r\n result = Index(val, name=\"idx\").astype(dtype)\r\n expected = DatetimeIndex([\"2018-01-01\"], tz=tz, name=\"idx\")\r\n tm.assert_index_equal(result, expected)\r\n\r\n def test_dti_astype_period(self):\r\n idx = DatetimeIndex([NaT, \"2011-01-01\", \"2011-02-01\"], name=\"idx\")\r\n\r\n res = idx.astype(\"period[M]\")\r\n exp = PeriodIndex([\"NaT\", \"2011-01\", \"2011-02\"], freq=\"M\", name=\"idx\")\r\n tm.assert_index_equal(res, exp)\r\n\r\n res = idx.astype(\"period[3M]\")\r\n exp = PeriodIndex([\"NaT\", \"2011-01\", \"2011-02\"], freq=\"3M\", name=\"idx\")\r\n tm.assert_index_equal(res, exp)\r\n\r\n\r\nclass TestAstype:\r\n @pytest.mark.parametrize(\"tz\", [None, \"US/Central\"])\r\n def test_astype_category(self, tz):\r\n obj = date_range(\"2000\", periods=2, tz=tz, name=\"idx\")\r\n result = obj.astype(\"category\")\r\n expected = pd.CategoricalIndex(\r\n [Timestamp(\"2000-01-01\", tz=tz), Timestamp(\"2000-01-02\", tz=tz)],\r\n name=\"idx\",\r\n )\r\n tm.assert_index_equal(result, expected)\r\n\r\n result = obj._data.astype(\"category\")\r\n expected = expected.values\r\n tm.assert_categorical_equal(result, expected)\r\n\r\n @pytest.mark.parametrize(\"tz\", [None, \"US/Central\"])\r\n def test_astype_array_fallback(self, tz):\r\n obj = date_range(\"2000\", periods=2, tz=tz, name=\"idx\")\r\n result = obj.astype(bool)\r\n expected = Index(np.array([True, True]), name=\"idx\")\r\n tm.assert_index_equal(result, expected)\r\n\r\n result = obj._data.astype(bool)\r\n expected = np.array([True, True])\r\n tm.assert_numpy_array_equal(result, expected)\r\n",
"import numpy as np\r\nimport pytest\r\n\r\nimport pandas as pd\r\nimport pandas._testing as tm\r\nfrom pandas.core.arrays.sparse import SparseArray\r\n\r\n\r\nclass TestSparseArrayConcat:\r\n @pytest.mark.parametrize(\"kind\", [\"integer\", \"block\"])\r\n def test_basic(self, kind):\r\n a = SparseArray([1, 0, 0, 2], kind=kind)\r\n b = SparseArray([1, 0, 2, 2], kind=kind)\r\n\r\n result = SparseArray._concat_same_type([a, b])\r\n # Can't make any assertions about the sparse index itself\r\n # since we aren't don't merge sparse blocs across arrays\r\n # in to_concat\r\n expected = np.array([1, 2, 1, 2, 2], dtype=\"int64\")\r\n tm.assert_numpy_array_equal(result.sp_values, expected)\r\n assert result.kind == kind\r\n\r\n @pytest.mark.parametrize(\"kind\", [\"integer\", \"block\"])\r\n def test_uses_first_kind(self, kind):\r\n other = \"integer\" if kind == \"block\" else \"block\"\r\n a = SparseArray([1, 0, 0, 2], kind=kind)\r\n b = SparseArray([1, 0, 2, 2], kind=other)\r\n\r\n result = SparseArray._concat_same_type([a, b])\r\n expected = np.array([1, 2, 1, 2, 2], dtype=\"int64\")\r\n tm.assert_numpy_array_equal(result.sp_values, expected)\r\n assert result.kind == kind\r\n\r\n\r\[email protected](\r\n \"other, expected_dtype\",\r\n [\r\n # compatible dtype -> preserve sparse\r\n (pd.Series([3, 4, 5], dtype=\"int64\"), pd.SparseDtype(\"int64\", 0)),\r\n # (pd.Series([3, 4, 5], dtype=\"Int64\"), pd.SparseDtype(\"int64\", 0)),\r\n # incompatible dtype -> Sparse[common dtype]\r\n (pd.Series([1.5, 2.5, 3.5], dtype=\"float64\"), pd.SparseDtype(\"float64\", 0)),\r\n # incompatible dtype -> Sparse[object] dtype\r\n (pd.Series([\"a\", \"b\", \"c\"], dtype=object), pd.SparseDtype(object, 0)),\r\n # categorical with compatible categories -> dtype of the categories\r\n (pd.Series([3, 4, 5], dtype=\"category\"), np.dtype(\"int64\")),\r\n (pd.Series([1.5, 2.5, 3.5], dtype=\"category\"), np.dtype(\"float64\")),\r\n # categorical with incompatible categories -> object dtype\r\n (pd.Series([\"a\", \"b\", \"c\"], dtype=\"category\"), np.dtype(object)),\r\n ],\r\n)\r\ndef test_concat_with_non_sparse(other, expected_dtype):\r\n # https://github.com/pandas-dev/pandas/issues/34336\r\n s_sparse = pd.Series([1, 0, 2], dtype=pd.SparseDtype(\"int64\", 0))\r\n\r\n result = pd.concat([s_sparse, other], ignore_index=True)\r\n expected = pd.Series(list(s_sparse) + list(other)).astype(expected_dtype)\r\n tm.assert_series_equal(result, expected)\r\n\r\n result = pd.concat([other, s_sparse], ignore_index=True)\r\n expected = pd.Series(list(other) + list(s_sparse)).astype(expected_dtype)\r\n tm.assert_series_equal(result, expected)\r\n",
"import numpy as np\r\nimport pytest\r\n\r\nimport pandas as pd\r\nimport pandas._testing as tm\r\n\r\n\r\nclass TestConvertDtypes:\r\n @pytest.mark.parametrize(\r\n \"convert_integer, expected\", [(False, np.dtype(\"int32\")), (True, \"Int32\")]\r\n )\r\n def test_convert_dtypes(self, convert_integer, expected, string_storage):\r\n # Specific types are tested in tests/series/test_dtypes.py\r\n # Just check that it works for DataFrame here\r\n df = pd.DataFrame(\r\n {\r\n \"a\": pd.Series([1, 2, 3], dtype=np.dtype(\"int32\")),\r\n \"b\": pd.Series([\"x\", \"y\", \"z\"], dtype=np.dtype(\"O\")),\r\n }\r\n )\r\n with pd.option_context(\"string_storage\", string_storage):\r\n result = df.convert_dtypes(True, True, convert_integer, False)\r\n expected = pd.DataFrame(\r\n {\r\n \"a\": pd.Series([1, 2, 3], dtype=expected),\r\n \"b\": pd.Series([\"x\", \"y\", \"z\"], dtype=f\"string[{string_storage}]\"),\r\n }\r\n )\r\n tm.assert_frame_equal(result, expected)\r\n\r\n def test_convert_empty(self):\r\n # Empty DataFrame can pass convert_dtypes, see GH#40393\r\n empty_df = pd.DataFrame()\r\n tm.assert_frame_equal(empty_df, empty_df.convert_dtypes())\r\n",
"from datetime import datetime\r\nimport re\r\n\r\nimport numpy as np\r\nimport pytest\r\n\r\nfrom pandas import (\r\n DataFrame,\r\n Index,\r\n MultiIndex,\r\n Series,\r\n _testing as tm,\r\n)\r\n\r\n\r\ndef test_extract_expand_kwarg_wrong_type_raises(any_string_dtype):\r\n # TODO: should this raise TypeError\r\n values = Series([\"fooBAD__barBAD\", np.nan, \"foo\"], dtype=any_string_dtype)\r\n with pytest.raises(ValueError, match=\"expand must be True or False\"):\r\n values.str.extract(\".*(BAD[_]+).*(BAD)\", expand=None)\r\n\r\n\r\ndef test_extract_expand_kwarg(any_string_dtype):\r\n s = Series([\"fooBAD__barBAD\", np.nan, \"foo\"], dtype=any_string_dtype)\r\n expected = DataFrame([\"BAD__\", np.nan, np.nan], dtype=any_string_dtype)\r\n\r\n result = s.str.extract(\".*(BAD[_]+).*\")\r\n tm.assert_frame_equal(result, expected)\r\n\r\n result = s.str.extract(\".*(BAD[_]+).*\", expand=True)\r\n tm.assert_frame_equal(result, expected)\r\n\r\n expected = DataFrame(\r\n [[\"BAD__\", \"BAD\"], [np.nan, np.nan], [np.nan, np.nan]], dtype=any_string_dtype\r\n )\r\n result = s.str.extract(\".*(BAD[_]+).*(BAD)\", expand=False)\r\n tm.assert_frame_equal(result, expected)\r\n\r\n\r\ndef test_extract_expand_False_mixed_object():\r\n ser = Series(\r\n [\"aBAD_BAD\", np.nan, \"BAD_b_BAD\", True, datetime.today(), \"foo\", None, 1, 2.0]\r\n )\r\n\r\n # two groups\r\n result = ser.str.extract(\".*(BAD[_]+).*(BAD)\", expand=False)\r\n er = [np.nan, np.nan] # empty row\r\n expected = DataFrame([[\"BAD_\", \"BAD\"], er, [\"BAD_\", \"BAD\"], er, er, er, er, er, er])\r\n tm.assert_frame_equal(result, expected)\r\n\r\n # single group\r\n result = ser.str.extract(\".*(BAD[_]+).*BAD\", expand=False)\r\n expected = Series(\r\n [\"BAD_\", np.nan, \"BAD_\", np.nan, np.nan, np.nan, np.nan, np.nan, np.nan]\r\n )\r\n tm.assert_series_equal(result, expected)\r\n\r\n\r\ndef test_extract_expand_index_raises():\r\n # GH9980\r\n # Index only works with one regex group since\r\n # multi-group would expand to a frame\r\n idx = Index([\"A1\", \"A2\", \"A3\", \"A4\", \"B5\"])\r\n msg = \"only one regex group is supported with Index\"\r\n with pytest.raises(ValueError, match=msg):\r\n idx.str.extract(\"([AB])([123])\", expand=False)\r\n\r\n\r\ndef test_extract_expand_no_capture_groups_raises(index_or_series, any_string_dtype):\r\n s_or_idx = index_or_series([\"A1\", \"B2\", \"C3\"], dtype=any_string_dtype)\r\n msg = \"pattern contains no capture groups\"\r\n\r\n # no groups\r\n with pytest.raises(ValueError, match=msg):\r\n s_or_idx.str.extract(\"[ABC][123]\", expand=False)\r\n\r\n # only non-capturing groups\r\n with pytest.raises(ValueError, match=msg):\r\n s_or_idx.str.extract(\"(?:[AB]).*\", expand=False)\r\n\r\n\r\ndef test_extract_expand_single_capture_group(index_or_series, any_string_dtype):\r\n # single group renames series/index properly\r\n s_or_idx = index_or_series([\"A1\", \"A2\"], dtype=any_string_dtype)\r\n result = s_or_idx.str.extract(r\"(?P<uno>A)\\d\", expand=False)\r\n\r\n expected = index_or_series([\"A\", \"A\"], name=\"uno\", dtype=any_string_dtype)\r\n if index_or_series == Series:\r\n tm.assert_series_equal(result, expected)\r\n else:\r\n tm.assert_index_equal(result, expected)\r\n\r\n\r\ndef test_extract_expand_capture_groups(any_string_dtype):\r\n s = Series([\"A1\", \"B2\", \"C3\"], dtype=any_string_dtype)\r\n # one group, no matches\r\n result = s.str.extract(\"(_)\", expand=False)\r\n expected = Series([np.nan, np.nan, np.nan], dtype=any_string_dtype)\r\n tm.assert_series_equal(result, expected)\r\n\r\n # two groups, no matches\r\n result = s.str.extract(\"(_)(_)\", expand=False)\r\n expected = DataFrame(\r\n [[np.nan, np.nan], [np.nan, np.nan], [np.nan, np.nan]], dtype=any_string_dtype\r\n )\r\n tm.assert_frame_equal(result, expected)\r\n\r\n # one group, some matches\r\n result = s.str.extract(\"([AB])[123]\", expand=False)\r\n expected = Series([\"A\", \"B\", np.nan], dtype=any_string_dtype)\r\n tm.assert_series_equal(result, expected)\r\n\r\n # two groups, some matches\r\n result = s.str.extract(\"([AB])([123])\", expand=False)\r\n expected = DataFrame(\r\n [[\"A\", \"1\"], [\"B\", \"2\"], [np.nan, np.nan]], dtype=any_string_dtype\r\n )\r\n tm.assert_frame_equal(result, expected)\r\n\r\n # one named group\r\n result = s.str.extract(\"(?P<letter>[AB])\", expand=False)\r\n expected = Series([\"A\", \"B\", np.nan], name=\"letter\", dtype=any_string_dtype)\r\n tm.assert_series_equal(result, expected)\r\n\r\n # two named groups\r\n result = s.str.extract(\"(?P<letter>[AB])(?P<number>[123])\", expand=False)\r\n expected = DataFrame(\r\n [[\"A\", \"1\"], [\"B\", \"2\"], [np.nan, np.nan]],\r\n columns=[\"letter\", \"number\"],\r\n dtype=any_string_dtype,\r\n )\r\n tm.assert_frame_equal(result, expected)\r\n\r\n # mix named and unnamed groups\r\n result = s.str.extract(\"([AB])(?P<number>[123])\", expand=False)\r\n expected = DataFrame(\r\n [[\"A\", \"1\"], [\"B\", \"2\"], [np.nan, np.nan]],\r\n columns=[0, \"number\"],\r\n dtype=any_string_dtype,\r\n )\r\n tm.assert_frame_equal(result, expected)\r\n\r\n # one normal group, one non-capturing group\r\n result = s.str.extract(\"([AB])(?:[123])\", expand=False)\r\n expected = Series([\"A\", \"B\", np.nan], dtype=any_string_dtype)\r\n tm.assert_series_equal(result, expected)\r\n\r\n # two normal groups, one non-capturing group\r\n s = Series([\"A11\", \"B22\", \"C33\"], dtype=any_string_dtype)\r\n result = s.str.extract(\"([AB])([123])(?:[123])\", expand=False)\r\n expected = DataFrame(\r\n [[\"A\", \"1\"], [\"B\", \"2\"], [np.nan, np.nan]], dtype=any_string_dtype\r\n )\r\n tm.assert_frame_equal(result, expected)\r\n\r\n # one optional group followed by one normal group\r\n s = Series([\"A1\", \"B2\", \"3\"], dtype=any_string_dtype)\r\n result = s.str.extract(\"(?P<letter>[AB])?(?P<number>[123])\", expand=False)\r\n expected = DataFrame(\r\n [[\"A\", \"1\"], [\"B\", \"2\"], [np.nan, \"3\"]],\r\n columns=[\"letter\", \"number\"],\r\n dtype=any_string_dtype,\r\n )\r\n tm.assert_frame_equal(result, expected)\r\n\r\n # one normal group followed by one optional group\r\n s = Series([\"A1\", \"B2\", \"C\"], dtype=any_string_dtype)\r\n result = s.str.extract(\"(?P<letter>[ABC])(?P<number>[123])?\", expand=False)\r\n expected = DataFrame(\r\n [[\"A\", \"1\"], [\"B\", \"2\"], [\"C\", np.nan]],\r\n columns=[\"letter\", \"number\"],\r\n dtype=any_string_dtype,\r\n )\r\n tm.assert_frame_equal(result, expected)\r\n\r\n\r\ndef test_extract_expand_capture_groups_index(index, any_string_dtype):\r\n # https://github.com/pandas-dev/pandas/issues/6348\r\n # not passing index to the extractor\r\n data = [\"A1\", \"B2\", \"C\"]\r\n\r\n if len(index) < len(data):\r\n pytest.skip(\"Index too short\")\r\n\r\n index = index[: len(data)]\r\n s = Series(data, index=index, dtype=any_string_dtype)\r\n\r\n result = s.str.extract(r\"(\\d)\", expand=False)\r\n expected = Series([\"1\", \"2\", np.nan], index=index, dtype=any_string_dtype)\r\n tm.assert_series_equal(result, expected)\r\n\r\n result = s.str.extract(r\"(?P<letter>\\D)(?P<number>\\d)?\", expand=False)\r\n expected = DataFrame(\r\n [[\"A\", \"1\"], [\"B\", \"2\"], [\"C\", np.nan]],\r\n columns=[\"letter\", \"number\"],\r\n index=index,\r\n dtype=any_string_dtype,\r\n )\r\n tm.assert_frame_equal(result, expected)\r\n\r\n\r\ndef test_extract_single_series_name_is_preserved(any_string_dtype):\r\n s = Series([\"a3\", \"b3\", \"c2\"], name=\"bob\", dtype=any_string_dtype)\r\n result = s.str.extract(r\"(?P<sue>[a-z])\", expand=False)\r\n expected = Series([\"a\", \"b\", \"c\"], name=\"sue\", dtype=any_string_dtype)\r\n tm.assert_series_equal(result, expected)\r\n\r\n\r\ndef test_extract_expand_True(any_string_dtype):\r\n # Contains tests like those in test_match and some others.\r\n s = Series([\"fooBAD__barBAD\", np.nan, \"foo\"], dtype=any_string_dtype)\r\n\r\n result = s.str.extract(\".*(BAD[_]+).*(BAD)\", expand=True)\r\n expected = DataFrame(\r\n [[\"BAD__\", \"BAD\"], [np.nan, np.nan], [np.nan, np.nan]], dtype=any_string_dtype\r\n )\r\n tm.assert_frame_equal(result, expected)\r\n\r\n\r\ndef test_extract_expand_True_mixed_object():\r\n er = [np.nan, np.nan] # empty row\r\n mixed = Series(\r\n [\r\n \"aBAD_BAD\",\r\n np.nan,\r\n \"BAD_b_BAD\",\r\n True,\r\n datetime.today(),\r\n \"foo\",\r\n None,\r\n 1,\r\n 2.0,\r\n ]\r\n )\r\n\r\n result = mixed.str.extract(\".*(BAD[_]+).*(BAD)\", expand=True)\r\n expected = DataFrame([[\"BAD_\", \"BAD\"], er, [\"BAD_\", \"BAD\"], er, er, er, er, er, er])\r\n tm.assert_frame_equal(result, expected)\r\n\r\n\r\ndef test_extract_expand_True_single_capture_group_raises(\r\n index_or_series, any_string_dtype\r\n):\r\n # these should work for both Series and Index\r\n # no groups\r\n s_or_idx = index_or_series([\"A1\", \"B2\", \"C3\"], dtype=any_string_dtype)\r\n msg = \"pattern contains no capture groups\"\r\n with pytest.raises(ValueError, match=msg):\r\n s_or_idx.str.extract(\"[ABC][123]\", expand=True)\r\n\r\n # only non-capturing groups\r\n with pytest.raises(ValueError, match=msg):\r\n s_or_idx.str.extract(\"(?:[AB]).*\", expand=True)\r\n\r\n\r\ndef test_extract_expand_True_single_capture_group(index_or_series, any_string_dtype):\r\n # single group renames series/index properly\r\n s_or_idx = index_or_series([\"A1\", \"A2\"], dtype=any_string_dtype)\r\n result = s_or_idx.str.extract(r\"(?P<uno>A)\\d\", expand=True)\r\n expected_dtype = \"object\" if index_or_series is Index else any_string_dtype\r\n expected = DataFrame({\"uno\": [\"A\", \"A\"]}, dtype=expected_dtype)\r\n tm.assert_frame_equal(result, expected)\r\n\r\n\r\[email protected](\"name\", [None, \"series_name\"])\r\ndef test_extract_series(name, any_string_dtype):\r\n # extract should give the same result whether or not the series has a name.\r\n s = Series([\"A1\", \"B2\", \"C3\"], name=name, dtype=any_string_dtype)\r\n\r\n # one group, no matches\r\n result = s.str.extract(\"(_)\", expand=True)\r\n expected = DataFrame([np.nan, np.nan, np.nan], dtype=any_string_dtype)\r\n tm.assert_frame_equal(result, expected)\r\n\r\n # two groups, no matches\r\n result = s.str.extract(\"(_)(_)\", expand=True)\r\n expected = DataFrame(\r\n [[np.nan, np.nan], [np.nan, np.nan], [np.nan, np.nan]], dtype=any_string_dtype\r\n )\r\n tm.assert_frame_equal(result, expected)\r\n\r\n # one group, some matches\r\n result = s.str.extract(\"([AB])[123]\", expand=True)\r\n expected = DataFrame([\"A\", \"B\", np.nan], dtype=any_string_dtype)\r\n tm.assert_frame_equal(result, expected)\r\n\r\n # two groups, some matches\r\n result = s.str.extract(\"([AB])([123])\", expand=True)\r\n expected = DataFrame(\r\n [[\"A\", \"1\"], [\"B\", \"2\"], [np.nan, np.nan]], dtype=any_string_dtype\r\n )\r\n tm.assert_frame_equal(result, expected)\r\n\r\n # one named group\r\n result = s.str.extract(\"(?P<letter>[AB])\", expand=True)\r\n expected = DataFrame({\"letter\": [\"A\", \"B\", np.nan]}, dtype=any_string_dtype)\r\n tm.assert_frame_equal(result, expected)\r\n\r\n # two named groups\r\n result = s.str.extract(\"(?P<letter>[AB])(?P<number>[123])\", expand=True)\r\n expected = DataFrame(\r\n [[\"A\", \"1\"], [\"B\", \"2\"], [np.nan, np.nan]],\r\n columns=[\"letter\", \"number\"],\r\n dtype=any_string_dtype,\r\n )\r\n tm.assert_frame_equal(result, expected)\r\n\r\n # mix named and unnamed groups\r\n result = s.str.extract(\"([AB])(?P<number>[123])\", expand=True)\r\n expected = DataFrame(\r\n [[\"A\", \"1\"], [\"B\", \"2\"], [np.nan, np.nan]],\r\n columns=[0, \"number\"],\r\n dtype=any_string_dtype,\r\n )\r\n tm.assert_frame_equal(result, expected)\r\n\r\n # one normal group, one non-capturing group\r\n result = s.str.extract(\"([AB])(?:[123])\", expand=True)\r\n expected = DataFrame([\"A\", \"B\", np.nan], dtype=any_string_dtype)\r\n tm.assert_frame_equal(result, expected)\r\n\r\n\r\ndef test_extract_optional_groups(any_string_dtype):\r\n\r\n # two normal groups, one non-capturing group\r\n s = Series([\"A11\", \"B22\", \"C33\"], dtype=any_string_dtype)\r\n result = s.str.extract(\"([AB])([123])(?:[123])\", expand=True)\r\n expected = DataFrame(\r\n [[\"A\", \"1\"], [\"B\", \"2\"], [np.nan, np.nan]], dtype=any_string_dtype\r\n )\r\n tm.assert_frame_equal(result, expected)\r\n\r\n # one optional group followed by one normal group\r\n s = Series([\"A1\", \"B2\", \"3\"], dtype=any_string_dtype)\r\n result = s.str.extract(\"(?P<letter>[AB])?(?P<number>[123])\", expand=True)\r\n expected = DataFrame(\r\n [[\"A\", \"1\"], [\"B\", \"2\"], [np.nan, \"3\"]],\r\n columns=[\"letter\", \"number\"],\r\n dtype=any_string_dtype,\r\n )\r\n tm.assert_frame_equal(result, expected)\r\n\r\n # one normal group followed by one optional group\r\n s = Series([\"A1\", \"B2\", \"C\"], dtype=any_string_dtype)\r\n result = s.str.extract(\"(?P<letter>[ABC])(?P<number>[123])?\", expand=True)\r\n expected = DataFrame(\r\n [[\"A\", \"1\"], [\"B\", \"2\"], [\"C\", np.nan]],\r\n columns=[\"letter\", \"number\"],\r\n dtype=any_string_dtype,\r\n )\r\n tm.assert_frame_equal(result, expected)\r\n\r\n\r\ndef test_extract_dataframe_capture_groups_index(index, any_string_dtype):\r\n # GH6348\r\n # not passing index to the extractor\r\n\r\n data = [\"A1\", \"B2\", \"C\"]\r\n\r\n if len(index) < len(data):\r\n pytest.skip(\"Index too short\")\r\n\r\n index = index[: len(data)]\r\n s = Series(data, index=index, dtype=any_string_dtype)\r\n\r\n result = s.str.extract(r\"(\\d)\", expand=True)\r\n expected = DataFrame([\"1\", \"2\", np.nan], index=index, dtype=any_string_dtype)\r\n tm.assert_frame_equal(result, expected)\r\n\r\n result = s.str.extract(r\"(?P<letter>\\D)(?P<number>\\d)?\", expand=True)\r\n expected = DataFrame(\r\n [[\"A\", \"1\"], [\"B\", \"2\"], [\"C\", np.nan]],\r\n columns=[\"letter\", \"number\"],\r\n index=index,\r\n dtype=any_string_dtype,\r\n )\r\n tm.assert_frame_equal(result, expected)\r\n\r\n\r\ndef test_extract_single_group_returns_frame(any_string_dtype):\r\n # GH11386 extract should always return DataFrame, even when\r\n # there is only one group. Prior to v0.18.0, extract returned\r\n # Series when there was only one group in the regex.\r\n s = Series([\"a3\", \"b3\", \"c2\"], name=\"series_name\", dtype=any_string_dtype)\r\n result = s.str.extract(r\"(?P<letter>[a-z])\", expand=True)\r\n expected = DataFrame({\"letter\": [\"a\", \"b\", \"c\"]}, dtype=any_string_dtype)\r\n tm.assert_frame_equal(result, expected)\r\n\r\n\r\ndef test_extractall(any_string_dtype):\r\n data = [\r\n \"[email protected]\",\r\n \"[email protected]\",\r\n \"[email protected]\",\r\n \"[email protected] some text [email protected]\",\r\n \"[email protected] some text [email protected] and [email protected]\",\r\n np.nan,\r\n \"\",\r\n ]\r\n expected_tuples = [\r\n (\"dave\", \"google\", \"com\"),\r\n (\"tdhock5\", \"gmail\", \"com\"),\r\n (\"maudelaperriere\", \"gmail\", \"com\"),\r\n (\"rob\", \"gmail\", \"com\"),\r\n (\"steve\", \"gmail\", \"com\"),\r\n (\"a\", \"b\", \"com\"),\r\n (\"c\", \"d\", \"com\"),\r\n (\"e\", \"f\", \"com\"),\r\n ]\r\n pat = r\"\"\"\r\n (?P<user>[a-z0-9]+)\r\n @\r\n (?P<domain>[a-z]+)\r\n \\.\r\n (?P<tld>[a-z]{2,4})\r\n \"\"\"\r\n expected_columns = [\"user\", \"domain\", \"tld\"]\r\n s = Series(data, dtype=any_string_dtype)\r\n # extractall should return a DataFrame with one row for each match, indexed by the\r\n # subject from which the match came.\r\n expected_index = MultiIndex.from_tuples(\r\n [(0, 0), (1, 0), (2, 0), (3, 0), (3, 1), (4, 0), (4, 1), (4, 2)],\r\n names=(None, \"match\"),\r\n )\r\n expected = DataFrame(\r\n expected_tuples, expected_index, expected_columns, dtype=any_string_dtype\r\n )\r\n result = s.str.extractall(pat, flags=re.VERBOSE)\r\n tm.assert_frame_equal(result, expected)\r\n\r\n # The index of the input Series should be used to construct the index of the output\r\n # DataFrame:\r\n mi = MultiIndex.from_tuples(\r\n [\r\n (\"single\", \"Dave\"),\r\n (\"single\", \"Toby\"),\r\n (\"single\", \"Maude\"),\r\n (\"multiple\", \"robAndSteve\"),\r\n (\"multiple\", \"abcdef\"),\r\n (\"none\", \"missing\"),\r\n (\"none\", \"empty\"),\r\n ]\r\n )\r\n s = Series(data, index=mi, dtype=any_string_dtype)\r\n expected_index = MultiIndex.from_tuples(\r\n [\r\n (\"single\", \"Dave\", 0),\r\n (\"single\", \"Toby\", 0),\r\n (\"single\", \"Maude\", 0),\r\n (\"multiple\", \"robAndSteve\", 0),\r\n (\"multiple\", \"robAndSteve\", 1),\r\n (\"multiple\", \"abcdef\", 0),\r\n (\"multiple\", \"abcdef\", 1),\r\n (\"multiple\", \"abcdef\", 2),\r\n ],\r\n names=(None, None, \"match\"),\r\n )\r\n expected = DataFrame(\r\n expected_tuples, expected_index, expected_columns, dtype=any_string_dtype\r\n )\r\n result = s.str.extractall(pat, flags=re.VERBOSE)\r\n tm.assert_frame_equal(result, expected)\r\n\r\n # MultiIndexed subject with names.\r\n s = Series(data, index=mi, dtype=any_string_dtype)\r\n s.index.names = (\"matches\", \"description\")\r\n expected_index.names = (\"matches\", \"description\", \"match\")\r\n expected = DataFrame(\r\n expected_tuples, expected_index, expected_columns, dtype=any_string_dtype\r\n )\r\n result = s.str.extractall(pat, flags=re.VERBOSE)\r\n tm.assert_frame_equal(result, expected)\r\n\r\n\r\[email protected](\r\n \"pat,expected_names\",\r\n [\r\n # optional groups.\r\n (\"(?P<letter>[AB])?(?P<number>[123])\", [\"letter\", \"number\"]),\r\n # only one of two groups has a name.\r\n (\"([AB])?(?P<number>[123])\", [0, \"number\"]),\r\n ],\r\n)\r\ndef test_extractall_column_names(pat, expected_names, any_string_dtype):\r\n s = Series([\"\", \"A1\", \"32\"], dtype=any_string_dtype)\r\n\r\n result = s.str.extractall(pat)\r\n expected = DataFrame(\r\n [(\"A\", \"1\"), (np.nan, \"3\"), (np.nan, \"2\")],\r\n index=MultiIndex.from_tuples([(1, 0), (2, 0), (2, 1)], names=(None, \"match\")),\r\n columns=expected_names,\r\n dtype=any_string_dtype,\r\n )\r\n tm.assert_frame_equal(result, expected)\r\n\r\n\r\ndef test_extractall_single_group(any_string_dtype):\r\n s = Series([\"a3\", \"b3\", \"d4c2\"], name=\"series_name\", dtype=any_string_dtype)\r\n expected_index = MultiIndex.from_tuples(\r\n [(0, 0), (1, 0), (2, 0), (2, 1)], names=(None, \"match\")\r\n )\r\n\r\n # extractall(one named group) returns DataFrame with one named column.\r\n result = s.str.extractall(r\"(?P<letter>[a-z])\")\r\n expected = DataFrame(\r\n {\"letter\": [\"a\", \"b\", \"d\", \"c\"]}, index=expected_index, dtype=any_string_dtype\r\n )\r\n tm.assert_frame_equal(result, expected)\r\n\r\n # extractall(one un-named group) returns DataFrame with one un-named column.\r\n result = s.str.extractall(r\"([a-z])\")\r\n expected = DataFrame(\r\n [\"a\", \"b\", \"d\", \"c\"], index=expected_index, dtype=any_string_dtype\r\n )\r\n tm.assert_frame_equal(result, expected)\r\n\r\n\r\ndef test_extractall_single_group_with_quantifier(any_string_dtype):\r\n # GH#13382\r\n # extractall(one un-named group with quantifier) returns DataFrame with one un-named\r\n # column.\r\n s = Series([\"ab3\", \"abc3\", \"d4cd2\"], name=\"series_name\", dtype=any_string_dtype)\r\n result = s.str.extractall(r\"([a-z]+)\")\r\n expected = DataFrame(\r\n [\"ab\", \"abc\", \"d\", \"cd\"],\r\n index=MultiIndex.from_tuples(\r\n [(0, 0), (1, 0), (2, 0), (2, 1)], names=(None, \"match\")\r\n ),\r\n dtype=any_string_dtype,\r\n )\r\n tm.assert_frame_equal(result, expected)\r\n\r\n\r\[email protected](\r\n \"data, names\",\r\n [\r\n ([], (None,)),\r\n ([], (\"i1\",)),\r\n ([], (None, \"i2\")),\r\n ([], (\"i1\", \"i2\")),\r\n ([\"a3\", \"b3\", \"d4c2\"], (None,)),\r\n ([\"a3\", \"b3\", \"d4c2\"], (\"i1\", \"i2\")),\r\n ([\"a3\", \"b3\", \"d4c2\"], (None, \"i2\")),\r\n ([\"a3\", \"b3\", \"d4c2\"], (\"i1\", \"i2\")),\r\n ],\r\n)\r\ndef test_extractall_no_matches(data, names, any_string_dtype):\r\n # GH19075 extractall with no matches should return a valid MultiIndex\r\n n = len(data)\r\n if len(names) == 1:\r\n index = Index(range(n), name=names[0])\r\n else:\r\n tuples = (tuple([i] * (n - 1)) for i in range(n))\r\n index = MultiIndex.from_tuples(tuples, names=names)\r\n s = Series(data, name=\"series_name\", index=index, dtype=any_string_dtype)\r\n expected_index = MultiIndex.from_tuples([], names=(names + (\"match\",)))\r\n\r\n # one un-named group.\r\n result = s.str.extractall(\"(z)\")\r\n expected = DataFrame(columns=[0], index=expected_index, dtype=any_string_dtype)\r\n tm.assert_frame_equal(result, expected)\r\n\r\n # two un-named groups.\r\n result = s.str.extractall(\"(z)(z)\")\r\n expected = DataFrame(columns=[0, 1], index=expected_index, dtype=any_string_dtype)\r\n tm.assert_frame_equal(result, expected)\r\n\r\n # one named group.\r\n result = s.str.extractall(\"(?P<first>z)\")\r\n expected = DataFrame(\r\n columns=[\"first\"], index=expected_index, dtype=any_string_dtype\r\n )\r\n tm.assert_frame_equal(result, expected)\r\n\r\n # two named groups.\r\n result = s.str.extractall(\"(?P<first>z)(?P<second>z)\")\r\n expected = DataFrame(\r\n columns=[\"first\", \"second\"], index=expected_index, dtype=any_string_dtype\r\n )\r\n tm.assert_frame_equal(result, expected)\r\n\r\n # one named, one un-named.\r\n result = s.str.extractall(\"(z)(?P<second>z)\")\r\n expected = DataFrame(\r\n columns=[0, \"second\"], index=expected_index, dtype=any_string_dtype\r\n )\r\n tm.assert_frame_equal(result, expected)\r\n\r\n\r\ndef test_extractall_stringindex(any_string_dtype):\r\n s = Series([\"a1a2\", \"b1\", \"c1\"], name=\"xxx\", dtype=any_string_dtype)\r\n result = s.str.extractall(r\"[ab](?P<digit>\\d)\")\r\n expected = DataFrame(\r\n {\"digit\": [\"1\", \"2\", \"1\"]},\r\n index=MultiIndex.from_tuples([(0, 0), (0, 1), (1, 0)], names=[None, \"match\"]),\r\n dtype=any_string_dtype,\r\n )\r\n tm.assert_frame_equal(result, expected)\r\n\r\n # index should return the same result as the default index without name thus\r\n # index.name doesn't affect to the result\r\n if any_string_dtype == \"object\":\r\n for idx in [\r\n Index([\"a1a2\", \"b1\", \"c1\"]),\r\n Index([\"a1a2\", \"b1\", \"c1\"], name=\"xxx\"),\r\n ]:\r\n\r\n result = idx.str.extractall(r\"[ab](?P<digit>\\d)\")\r\n tm.assert_frame_equal(result, expected)\r\n\r\n s = Series(\r\n [\"a1a2\", \"b1\", \"c1\"],\r\n name=\"s_name\",\r\n index=Index([\"XX\", \"yy\", \"zz\"], name=\"idx_name\"),\r\n dtype=any_string_dtype,\r\n )\r\n result = s.str.extractall(r\"[ab](?P<digit>\\d)\")\r\n expected = DataFrame(\r\n {\"digit\": [\"1\", \"2\", \"1\"]},\r\n index=MultiIndex.from_tuples(\r\n [(\"XX\", 0), (\"XX\", 1), (\"yy\", 0)], names=[\"idx_name\", \"match\"]\r\n ),\r\n dtype=any_string_dtype,\r\n )\r\n tm.assert_frame_equal(result, expected)\r\n\r\n\r\ndef test_extractall_no_capture_groups_raises(any_string_dtype):\r\n # Does not make sense to use extractall with a regex that has no capture groups.\r\n # (it returns DataFrame with one column for each capture group)\r\n s = Series([\"a3\", \"b3\", \"d4c2\"], name=\"series_name\", dtype=any_string_dtype)\r\n with pytest.raises(ValueError, match=\"no capture groups\"):\r\n s.str.extractall(r\"[a-z]\")\r\n\r\n\r\ndef test_extract_index_one_two_groups():\r\n s = Series([\"a3\", \"b3\", \"d4c2\"], index=[\"A3\", \"B3\", \"D4\"], name=\"series_name\")\r\n r = s.index.str.extract(r\"([A-Z])\", expand=True)\r\n e = DataFrame([\"A\", \"B\", \"D\"])\r\n tm.assert_frame_equal(r, e)\r\n\r\n # Prior to v0.18.0, index.str.extract(regex with one group)\r\n # returned Index. With more than one group, extract raised an\r\n # error (GH9980). Now extract always returns DataFrame.\r\n r = s.index.str.extract(r\"(?P<letter>[A-Z])(?P<digit>[0-9])\", expand=True)\r\n e_list = [(\"A\", \"3\"), (\"B\", \"3\"), (\"D\", \"4\")]\r\n e = DataFrame(e_list, columns=[\"letter\", \"digit\"])\r\n tm.assert_frame_equal(r, e)\r\n\r\n\r\ndef test_extractall_same_as_extract(any_string_dtype):\r\n s = Series([\"a3\", \"b3\", \"c2\"], name=\"series_name\", dtype=any_string_dtype)\r\n\r\n pattern_two_noname = r\"([a-z])([0-9])\"\r\n extract_two_noname = s.str.extract(pattern_two_noname, expand=True)\r\n has_multi_index = s.str.extractall(pattern_two_noname)\r\n no_multi_index = has_multi_index.xs(0, level=\"match\")\r\n tm.assert_frame_equal(extract_two_noname, no_multi_index)\r\n\r\n pattern_two_named = r\"(?P<letter>[a-z])(?P<digit>[0-9])\"\r\n extract_two_named = s.str.extract(pattern_two_named, expand=True)\r\n has_multi_index = s.str.extractall(pattern_two_named)\r\n no_multi_index = has_multi_index.xs(0, level=\"match\")\r\n tm.assert_frame_equal(extract_two_named, no_multi_index)\r\n\r\n pattern_one_named = r\"(?P<group_name>[a-z])\"\r\n extract_one_named = s.str.extract(pattern_one_named, expand=True)\r\n has_multi_index = s.str.extractall(pattern_one_named)\r\n no_multi_index = has_multi_index.xs(0, level=\"match\")\r\n tm.assert_frame_equal(extract_one_named, no_multi_index)\r\n\r\n pattern_one_noname = r\"([a-z])\"\r\n extract_one_noname = s.str.extract(pattern_one_noname, expand=True)\r\n has_multi_index = s.str.extractall(pattern_one_noname)\r\n no_multi_index = has_multi_index.xs(0, level=\"match\")\r\n tm.assert_frame_equal(extract_one_noname, no_multi_index)\r\n\r\n\r\ndef test_extractall_same_as_extract_subject_index(any_string_dtype):\r\n # same as above tests, but s has an MultiIndex.\r\n mi = MultiIndex.from_tuples(\r\n [(\"A\", \"first\"), (\"B\", \"second\"), (\"C\", \"third\")],\r\n names=(\"capital\", \"ordinal\"),\r\n )\r\n s = Series([\"a3\", \"b3\", \"c2\"], index=mi, name=\"series_name\", dtype=any_string_dtype)\r\n\r\n pattern_two_noname = r\"([a-z])([0-9])\"\r\n extract_two_noname = s.str.extract(pattern_two_noname, expand=True)\r\n has_match_index = s.str.extractall(pattern_two_noname)\r\n no_match_index = has_match_index.xs(0, level=\"match\")\r\n tm.assert_frame_equal(extract_two_noname, no_match_index)\r\n\r\n pattern_two_named = r\"(?P<letter>[a-z])(?P<digit>[0-9])\"\r\n extract_two_named = s.str.extract(pattern_two_named, expand=True)\r\n has_match_index = s.str.extractall(pattern_two_named)\r\n no_match_index = has_match_index.xs(0, level=\"match\")\r\n tm.assert_frame_equal(extract_two_named, no_match_index)\r\n\r\n pattern_one_named = r\"(?P<group_name>[a-z])\"\r\n extract_one_named = s.str.extract(pattern_one_named, expand=True)\r\n has_match_index = s.str.extractall(pattern_one_named)\r\n no_match_index = has_match_index.xs(0, level=\"match\")\r\n tm.assert_frame_equal(extract_one_named, no_match_index)\r\n\r\n pattern_one_noname = r\"([a-z])\"\r\n extract_one_noname = s.str.extract(pattern_one_noname, expand=True)\r\n has_match_index = s.str.extractall(pattern_one_noname)\r\n no_match_index = has_match_index.xs(0, level=\"match\")\r\n tm.assert_frame_equal(extract_one_noname, no_match_index)\r\n",
"\"\"\"\r\nThe tests in this package are to ensure the proper resultant dtypes of\r\nset operations.\r\n\"\"\"\r\nfrom datetime import datetime\r\nimport operator\r\n\r\nimport numpy as np\r\nimport pytest\r\n\r\nfrom pandas.core.dtypes.cast import find_common_type\r\n\r\nfrom pandas import (\r\n CategoricalIndex,\r\n DatetimeIndex,\r\n Float64Index,\r\n Index,\r\n Int64Index,\r\n MultiIndex,\r\n Series,\r\n TimedeltaIndex,\r\n Timestamp,\r\n UInt64Index,\r\n)\r\nimport pandas._testing as tm\r\nfrom pandas.api.types import (\r\n is_datetime64tz_dtype,\r\n is_signed_integer_dtype,\r\n pandas_dtype,\r\n)\r\n\r\nCOMPATIBLE_INCONSISTENT_PAIRS = [\r\n (np.float64, np.int64),\r\n (np.float64, np.uint64),\r\n]\r\n\r\n\r\ndef test_union_same_types(index):\r\n # Union with a non-unique, non-monotonic index raises error\r\n # Only needed for bool index factory\r\n idx1 = index.sort_values()\r\n idx2 = index.sort_values()\r\n assert idx1.union(idx2).dtype == idx1.dtype\r\n\r\n\r\ndef test_union_different_types(index_flat, index_flat2):\r\n # This test only considers combinations of indices\r\n # GH 23525\r\n idx1 = index_flat\r\n idx2 = index_flat2\r\n\r\n common_dtype = find_common_type([idx1.dtype, idx2.dtype])\r\n\r\n any_uint64 = idx1.dtype == np.uint64 or idx2.dtype == np.uint64\r\n idx1_signed = is_signed_integer_dtype(idx1.dtype)\r\n idx2_signed = is_signed_integer_dtype(idx2.dtype)\r\n\r\n # Union with a non-unique, non-monotonic index raises error\r\n # This applies to the boolean index\r\n idx1 = idx1.sort_values()\r\n idx2 = idx2.sort_values()\r\n\r\n res1 = idx1.union(idx2)\r\n res2 = idx2.union(idx1)\r\n\r\n if any_uint64 and (idx1_signed or idx2_signed):\r\n assert res1.dtype == np.dtype(\"O\")\r\n assert res2.dtype == np.dtype(\"O\")\r\n else:\r\n assert res1.dtype == common_dtype\r\n assert res2.dtype == common_dtype\r\n\r\n\r\[email protected](\r\n \"idx_fact1,idx_fact2\",\r\n [\r\n (tm.makeIntIndex, tm.makeRangeIndex),\r\n (tm.makeFloatIndex, tm.makeIntIndex),\r\n (tm.makeFloatIndex, tm.makeRangeIndex),\r\n (tm.makeFloatIndex, tm.makeUIntIndex),\r\n ],\r\n)\r\ndef test_compatible_inconsistent_pairs(idx_fact1, idx_fact2):\r\n # GH 23525\r\n idx1 = idx_fact1(10)\r\n idx2 = idx_fact2(20)\r\n\r\n res1 = idx1.union(idx2)\r\n res2 = idx2.union(idx1)\r\n\r\n assert res1.dtype in (idx1.dtype, idx2.dtype)\r\n assert res2.dtype in (idx1.dtype, idx2.dtype)\r\n\r\n\r\[email protected](\r\n \"left, right, expected\",\r\n [\r\n (\"int64\", \"int64\", \"int64\"),\r\n (\"int64\", \"uint64\", \"object\"),\r\n (\"int64\", \"float64\", \"float64\"),\r\n (\"uint64\", \"float64\", \"float64\"),\r\n (\"uint64\", \"uint64\", \"uint64\"),\r\n (\"float64\", \"float64\", \"float64\"),\r\n (\"datetime64[ns]\", \"int64\", \"object\"),\r\n (\"datetime64[ns]\", \"uint64\", \"object\"),\r\n (\"datetime64[ns]\", \"float64\", \"object\"),\r\n (\"datetime64[ns, CET]\", \"int64\", \"object\"),\r\n (\"datetime64[ns, CET]\", \"uint64\", \"object\"),\r\n (\"datetime64[ns, CET]\", \"float64\", \"object\"),\r\n (\"Period[D]\", \"int64\", \"object\"),\r\n (\"Period[D]\", \"uint64\", \"object\"),\r\n (\"Period[D]\", \"float64\", \"object\"),\r\n ],\r\n)\r\[email protected](\"names\", [(\"foo\", \"foo\", \"foo\"), (\"foo\", \"bar\", None)])\r\ndef test_union_dtypes(left, right, expected, names):\r\n left = pandas_dtype(left)\r\n right = pandas_dtype(right)\r\n a = Index([], dtype=left, name=names[0])\r\n b = Index([], dtype=right, name=names[1])\r\n result = a.union(b)\r\n assert result.dtype == expected\r\n assert result.name == names[2]\r\n\r\n # Testing name retention\r\n # TODO: pin down desired dtype; do we want it to be commutative?\r\n result = a.intersection(b)\r\n assert result.name == names[2]\r\n\r\n\r\ndef test_dunder_inplace_setops_deprecated(index):\r\n # GH#37374 these will become logical ops, not setops\r\n\r\n with tm.assert_produces_warning(FutureWarning):\r\n index |= index\r\n\r\n with tm.assert_produces_warning(FutureWarning):\r\n index &= index\r\n\r\n with tm.assert_produces_warning(FutureWarning):\r\n index ^= index\r\n\r\n\r\[email protected](\"values\", [[1, 2, 2, 3], [3, 3]])\r\ndef test_intersection_duplicates(values):\r\n # GH#31326\r\n a = Index(values)\r\n b = Index([3, 3])\r\n result = a.intersection(b)\r\n expected = Index([3])\r\n tm.assert_index_equal(result, expected)\r\n\r\n\r\nclass TestSetOps:\r\n # Set operation tests shared by all indexes in the `index` fixture\r\n @pytest.mark.parametrize(\"case\", [0.5, \"xxx\"])\r\n @pytest.mark.parametrize(\r\n \"method\", [\"intersection\", \"union\", \"difference\", \"symmetric_difference\"]\r\n )\r\n def test_set_ops_error_cases(self, case, method, index):\r\n # non-iterable input\r\n msg = \"Input must be Index or array-like\"\r\n with pytest.raises(TypeError, match=msg):\r\n getattr(index, method)(case)\r\n\r\n def test_intersection_base(self, index):\r\n if isinstance(index, CategoricalIndex):\r\n return\r\n\r\n first = index[:5]\r\n second = index[:3]\r\n intersect = first.intersection(second)\r\n assert tm.equalContents(intersect, second)\r\n\r\n if is_datetime64tz_dtype(index.dtype):\r\n # The second.values below will drop tz, so the rest of this test\r\n # is not applicable.\r\n return\r\n\r\n # GH#10149\r\n cases = [klass(second.values) for klass in [np.array, Series, list]]\r\n for case in cases:\r\n result = first.intersection(case)\r\n assert tm.equalContents(result, second)\r\n\r\n if isinstance(index, MultiIndex):\r\n msg = \"other must be a MultiIndex or a list of tuples\"\r\n with pytest.raises(TypeError, match=msg):\r\n first.intersection([1, 2, 3])\r\n\r\n def test_union_base(self, index):\r\n first = index[3:]\r\n second = index[:5]\r\n everything = index\r\n union = first.union(second)\r\n assert tm.equalContents(union, everything)\r\n\r\n if is_datetime64tz_dtype(index.dtype):\r\n # The second.values below will drop tz, so the rest of this test\r\n # is not applicable.\r\n return\r\n\r\n # GH#10149\r\n cases = [klass(second.values) for klass in [np.array, Series, list]]\r\n for case in cases:\r\n if not isinstance(index, CategoricalIndex):\r\n result = first.union(case)\r\n assert tm.equalContents(result, everything), (\r\n result,\r\n everything,\r\n type(case),\r\n )\r\n\r\n if isinstance(index, MultiIndex):\r\n msg = \"other must be a MultiIndex or a list of tuples\"\r\n with pytest.raises(TypeError, match=msg):\r\n first.union([1, 2, 3])\r\n\r\n def test_difference_base(self, sort, index):\r\n first = index[2:]\r\n second = index[:4]\r\n if isinstance(index, CategoricalIndex) or index.is_boolean():\r\n answer = []\r\n else:\r\n answer = index[4:]\r\n result = first.difference(second, sort)\r\n assert tm.equalContents(result, answer)\r\n\r\n # GH#10149\r\n cases = [klass(second.values) for klass in [np.array, Series, list]]\r\n for case in cases:\r\n if isinstance(index, (DatetimeIndex, TimedeltaIndex)):\r\n assert type(result) == type(answer)\r\n tm.assert_numpy_array_equal(\r\n result.sort_values().asi8, answer.sort_values().asi8\r\n )\r\n else:\r\n result = first.difference(case, sort)\r\n assert tm.equalContents(result, answer)\r\n\r\n if isinstance(index, MultiIndex):\r\n msg = \"other must be a MultiIndex or a list of tuples\"\r\n with pytest.raises(TypeError, match=msg):\r\n first.difference([1, 2, 3], sort)\r\n\r\n def test_symmetric_difference(self, index):\r\n if isinstance(index, CategoricalIndex):\r\n return\r\n if len(index) < 2:\r\n return\r\n if index[0] in index[1:] or index[-1] in index[:-1]:\r\n # index fixture has e.g. an index of bools that does not satisfy this,\r\n # another with [0, 0, 1, 1, 2, 2]\r\n return\r\n\r\n first = index[1:]\r\n second = index[:-1]\r\n answer = index[[0, -1]]\r\n result = first.symmetric_difference(second)\r\n assert tm.equalContents(result, answer)\r\n\r\n # GH#10149\r\n cases = [klass(second.values) for klass in [np.array, Series, list]]\r\n for case in cases:\r\n result = first.symmetric_difference(case)\r\n\r\n if is_datetime64tz_dtype(first):\r\n # second.values casts to tznaive\r\n expected = first.union(case)\r\n tm.assert_index_equal(result, expected)\r\n continue\r\n\r\n assert tm.equalContents(result, answer)\r\n\r\n if isinstance(index, MultiIndex):\r\n msg = \"other must be a MultiIndex or a list of tuples\"\r\n with pytest.raises(TypeError, match=msg):\r\n first.symmetric_difference([1, 2, 3])\r\n\r\n @pytest.mark.parametrize(\r\n \"fname, sname, expected_name\",\r\n [\r\n (\"A\", \"A\", \"A\"),\r\n (\"A\", \"B\", None),\r\n (\"A\", None, None),\r\n (None, \"B\", None),\r\n (None, None, None),\r\n ],\r\n )\r\n def test_corner_union(self, index_flat, fname, sname, expected_name):\r\n # GH#9943, GH#9862\r\n # Test unions with various name combinations\r\n # Do not test MultiIndex or repeats\r\n index = index_flat\r\n if not index.is_unique:\r\n pytest.skip(\"Not for MultiIndex or repeated indices\")\r\n\r\n # Test copy.union(copy)\r\n first = index.copy().set_names(fname)\r\n second = index.copy().set_names(sname)\r\n union = first.union(second)\r\n expected = index.copy().set_names(expected_name)\r\n tm.assert_index_equal(union, expected)\r\n\r\n # Test copy.union(empty)\r\n first = index.copy().set_names(fname)\r\n second = index.drop(index).set_names(sname)\r\n union = first.union(second)\r\n expected = index.copy().set_names(expected_name)\r\n tm.assert_index_equal(union, expected)\r\n\r\n # Test empty.union(copy)\r\n first = index.drop(index).set_names(fname)\r\n second = index.copy().set_names(sname)\r\n union = first.union(second)\r\n expected = index.copy().set_names(expected_name)\r\n tm.assert_index_equal(union, expected)\r\n\r\n # Test empty.union(empty)\r\n first = index.drop(index).set_names(fname)\r\n second = index.drop(index).set_names(sname)\r\n union = first.union(second)\r\n expected = index.drop(index).set_names(expected_name)\r\n tm.assert_index_equal(union, expected)\r\n\r\n @pytest.mark.parametrize(\r\n \"fname, sname, expected_name\",\r\n [\r\n (\"A\", \"A\", \"A\"),\r\n (\"A\", \"B\", None),\r\n (\"A\", None, None),\r\n (None, \"B\", None),\r\n (None, None, None),\r\n ],\r\n )\r\n def test_union_unequal(self, index_flat, fname, sname, expected_name):\r\n index = index_flat\r\n if not index.is_unique:\r\n pytest.skip(\"Not for MultiIndex or repeated indices\")\r\n\r\n # test copy.union(subset) - need sort for unicode and string\r\n first = index.copy().set_names(fname)\r\n second = index[1:].set_names(sname)\r\n union = first.union(second).sort_values()\r\n expected = index.set_names(expected_name).sort_values()\r\n tm.assert_index_equal(union, expected)\r\n\r\n @pytest.mark.parametrize(\r\n \"fname, sname, expected_name\",\r\n [\r\n (\"A\", \"A\", \"A\"),\r\n (\"A\", \"B\", None),\r\n (\"A\", None, None),\r\n (None, \"B\", None),\r\n (None, None, None),\r\n ],\r\n )\r\n def test_corner_intersect(self, index_flat, fname, sname, expected_name):\r\n # GH#35847\r\n # Test intersections with various name combinations\r\n index = index_flat\r\n if not index.is_unique:\r\n pytest.skip(\"Not for MultiIndex or repeated indices\")\r\n\r\n # Test copy.intersection(copy)\r\n first = index.copy().set_names(fname)\r\n second = index.copy().set_names(sname)\r\n intersect = first.intersection(second)\r\n expected = index.copy().set_names(expected_name)\r\n tm.assert_index_equal(intersect, expected)\r\n\r\n # Test copy.intersection(empty)\r\n first = index.copy().set_names(fname)\r\n second = index.drop(index).set_names(sname)\r\n intersect = first.intersection(second)\r\n expected = index.drop(index).set_names(expected_name)\r\n tm.assert_index_equal(intersect, expected)\r\n\r\n # Test empty.intersection(copy)\r\n first = index.drop(index).set_names(fname)\r\n second = index.copy().set_names(sname)\r\n intersect = first.intersection(second)\r\n expected = index.drop(index).set_names(expected_name)\r\n tm.assert_index_equal(intersect, expected)\r\n\r\n # Test empty.intersection(empty)\r\n first = index.drop(index).set_names(fname)\r\n second = index.drop(index).set_names(sname)\r\n intersect = first.intersection(second)\r\n expected = index.drop(index).set_names(expected_name)\r\n tm.assert_index_equal(intersect, expected)\r\n\r\n @pytest.mark.parametrize(\r\n \"fname, sname, expected_name\",\r\n [\r\n (\"A\", \"A\", \"A\"),\r\n (\"A\", \"B\", None),\r\n (\"A\", None, None),\r\n (None, \"B\", None),\r\n (None, None, None),\r\n ],\r\n )\r\n def test_intersect_unequal(self, index_flat, fname, sname, expected_name):\r\n index = index_flat\r\n if not index.is_unique:\r\n pytest.skip(\"Not for MultiIndex or repeated indices\")\r\n\r\n # test copy.intersection(subset) - need sort for unicode and string\r\n first = index.copy().set_names(fname)\r\n second = index[1:].set_names(sname)\r\n intersect = first.intersection(second).sort_values()\r\n expected = index[1:].set_names(expected_name).sort_values()\r\n tm.assert_index_equal(intersect, expected)\r\n\r\n def test_intersection_name_retention_with_nameless(self, index):\r\n if isinstance(index, MultiIndex):\r\n index = index.rename(list(range(index.nlevels)))\r\n else:\r\n index = index.rename(\"foo\")\r\n\r\n other = np.asarray(index)\r\n\r\n result = index.intersection(other)\r\n assert result.name == index.name\r\n\r\n # empty other, same dtype\r\n result = index.intersection(other[:0])\r\n assert result.name == index.name\r\n\r\n # empty `self`\r\n result = index[:0].intersection(other)\r\n assert result.name == index.name\r\n\r\n def test_difference_preserves_type_empty(self, index, sort):\r\n # GH#20040\r\n # If taking difference of a set and itself, it\r\n # needs to preserve the type of the index\r\n if not index.is_unique:\r\n return\r\n result = index.difference(index, sort=sort)\r\n expected = index[:0]\r\n tm.assert_index_equal(result, expected, exact=True)\r\n\r\n def test_difference_name_retention_equals(self, index, sort, names):\r\n if isinstance(index, MultiIndex):\r\n names = [[x] * index.nlevels for x in names]\r\n index = index.rename(names[0])\r\n other = index.rename(names[1])\r\n\r\n assert index.equals(other)\r\n\r\n result = index.difference(other)\r\n expected = index[:0].rename(names[2])\r\n tm.assert_index_equal(result, expected)\r\n\r\n def test_intersection_difference_match_empty(self, index, sort):\r\n # GH#20040\r\n # Test that the intersection of an index with an\r\n # empty index produces the same index as the difference\r\n # of an index with itself. Test for all types\r\n if not index.is_unique:\r\n return\r\n inter = index.intersection(index[:0])\r\n diff = index.difference(index, sort=sort)\r\n tm.assert_index_equal(inter, diff, exact=True)\r\n\r\n\r\[email protected](\r\n \"method\", [\"intersection\", \"union\", \"difference\", \"symmetric_difference\"]\r\n)\r\ndef test_setop_with_categorical(index, sort, method):\r\n if isinstance(index, MultiIndex):\r\n # tested separately in tests.indexes.multi.test_setops\r\n return\r\n\r\n other = index.astype(\"category\")\r\n\r\n result = getattr(index, method)(other, sort=sort)\r\n expected = getattr(index, method)(index, sort=sort)\r\n tm.assert_index_equal(result, expected)\r\n\r\n result = getattr(index, method)(other[:5], sort=sort)\r\n expected = getattr(index, method)(index[:5], sort=sort)\r\n tm.assert_index_equal(result, expected)\r\n\r\n\r\ndef test_intersection_duplicates_all_indexes(index):\r\n # GH#38743\r\n if index.empty:\r\n # No duplicates in empty indexes\r\n return\r\n\r\n def check_intersection_commutative(left, right):\r\n assert left.intersection(right).equals(right.intersection(left))\r\n\r\n idx = index\r\n idx_non_unique = idx[[0, 0, 1, 2]]\r\n\r\n check_intersection_commutative(idx, idx_non_unique)\r\n assert idx.intersection(idx_non_unique).is_unique\r\n\r\n\r\[email protected](\r\n \"cls\",\r\n [\r\n Int64Index,\r\n Float64Index,\r\n DatetimeIndex,\r\n CategoricalIndex,\r\n lambda x: CategoricalIndex(x, categories=set(x)),\r\n TimedeltaIndex,\r\n lambda x: Index(x, dtype=object),\r\n UInt64Index,\r\n ],\r\n)\r\ndef test_union_duplicate_index_subsets_of_each_other(cls):\r\n # GH#31326\r\n a = cls([1, 2, 2, 3])\r\n b = cls([3, 3, 4])\r\n expected = cls([1, 2, 2, 3, 3, 4])\r\n if isinstance(a, CategoricalIndex):\r\n expected = Index([1, 2, 2, 3, 3, 4])\r\n result = a.union(b)\r\n tm.assert_index_equal(result, expected)\r\n result = a.union(b, sort=False)\r\n tm.assert_index_equal(result, expected)\r\n\r\n\r\[email protected](\r\n \"cls\",\r\n [\r\n Int64Index,\r\n Float64Index,\r\n DatetimeIndex,\r\n CategoricalIndex,\r\n TimedeltaIndex,\r\n lambda x: Index(x, dtype=object),\r\n ],\r\n)\r\ndef test_union_with_duplicate_index_and_non_monotonic(cls):\r\n # GH#36289\r\n a = cls([1, 0, 0])\r\n b = cls([0, 1])\r\n expected = cls([0, 0, 1])\r\n\r\n result = a.union(b)\r\n tm.assert_index_equal(result, expected)\r\n\r\n result = b.union(a)\r\n tm.assert_index_equal(result, expected)\r\n\r\n\r\ndef test_union_duplicate_index_different_dtypes():\r\n # GH#36289\r\n a = Index([1, 2, 2, 3])\r\n b = Index([\"1\", \"0\", \"0\"])\r\n expected = Index([1, 2, 2, 3, \"1\", \"0\", \"0\"])\r\n result = a.union(b, sort=False)\r\n tm.assert_index_equal(result, expected)\r\n\r\n\r\ndef test_union_same_value_duplicated_in_both():\r\n # GH#36289\r\n a = Index([0, 0, 1])\r\n b = Index([0, 0, 1, 2])\r\n result = a.union(b)\r\n expected = Index([0, 0, 1, 2])\r\n tm.assert_index_equal(result, expected)\r\n\r\n\r\[email protected](\"dup\", [1, np.nan])\r\ndef test_union_nan_in_both(dup):\r\n # GH#36289\r\n a = Index([np.nan, 1, 2, 2])\r\n b = Index([np.nan, dup, 1, 2])\r\n result = a.union(b, sort=False)\r\n expected = Index([np.nan, dup, 1.0, 2.0, 2.0])\r\n tm.assert_index_equal(result, expected)\r\n\r\n\r\[email protected](\r\n \"cls\",\r\n [\r\n Int64Index,\r\n Float64Index,\r\n DatetimeIndex,\r\n TimedeltaIndex,\r\n lambda x: Index(x, dtype=object),\r\n ],\r\n)\r\ndef test_union_with_duplicate_index_not_subset_and_non_monotonic(cls):\r\n # GH#36289\r\n a = cls([1, 0, 2])\r\n b = cls([0, 0, 1])\r\n expected = cls([0, 0, 1, 2])\r\n\r\n result = a.union(b)\r\n tm.assert_index_equal(result, expected)\r\n\r\n result = b.union(a)\r\n tm.assert_index_equal(result, expected)\r\n\r\n\r\nclass TestSetOpsUnsorted:\r\n # These may eventually belong in a dtype-specific test_setops, or\r\n # parametrized over a more general fixture\r\n def test_intersect_str_dates(self):\r\n dt_dates = [datetime(2012, 2, 9), datetime(2012, 2, 22)]\r\n\r\n index1 = Index(dt_dates, dtype=object)\r\n index2 = Index([\"aa\"], dtype=object)\r\n result = index2.intersection(index1)\r\n\r\n expected = Index([], dtype=object)\r\n tm.assert_index_equal(result, expected)\r\n\r\n @pytest.mark.parametrize(\"index\", [\"string\"], indirect=True)\r\n def test_intersection(self, index, sort):\r\n first = index[:20]\r\n second = index[:10]\r\n intersect = first.intersection(second, sort=sort)\r\n if sort is None:\r\n tm.assert_index_equal(intersect, second.sort_values())\r\n assert tm.equalContents(intersect, second)\r\n\r\n # Corner cases\r\n inter = first.intersection(first, sort=sort)\r\n assert inter is first\r\n\r\n @pytest.mark.parametrize(\r\n \"index2,keeps_name\",\r\n [\r\n (Index([3, 4, 5, 6, 7], name=\"index\"), True), # preserve same name\r\n (Index([3, 4, 5, 6, 7], name=\"other\"), False), # drop diff names\r\n (Index([3, 4, 5, 6, 7]), False),\r\n ],\r\n )\r\n def test_intersection_name_preservation(self, index2, keeps_name, sort):\r\n index1 = Index([1, 2, 3, 4, 5], name=\"index\")\r\n expected = Index([3, 4, 5])\r\n result = index1.intersection(index2, sort)\r\n\r\n if keeps_name:\r\n expected.name = \"index\"\r\n\r\n assert result.name == expected.name\r\n tm.assert_index_equal(result, expected)\r\n\r\n @pytest.mark.parametrize(\"index\", [\"string\"], indirect=True)\r\n @pytest.mark.parametrize(\r\n \"first_name,second_name,expected_name\",\r\n [(\"A\", \"A\", \"A\"), (\"A\", \"B\", None), (None, \"B\", None)],\r\n )\r\n def test_intersection_name_preservation2(\r\n self, index, first_name, second_name, expected_name, sort\r\n ):\r\n first = index[5:20]\r\n second = index[:10]\r\n first.name = first_name\r\n second.name = second_name\r\n intersect = first.intersection(second, sort=sort)\r\n assert intersect.name == expected_name\r\n\r\n def test_chained_union(self, sort):\r\n # Chained unions handles names correctly\r\n i1 = Index([1, 2], name=\"i1\")\r\n i2 = Index([5, 6], name=\"i2\")\r\n i3 = Index([3, 4], name=\"i3\")\r\n union = i1.union(i2.union(i3, sort=sort), sort=sort)\r\n expected = i1.union(i2, sort=sort).union(i3, sort=sort)\r\n tm.assert_index_equal(union, expected)\r\n\r\n j1 = Index([1, 2], name=\"j1\")\r\n j2 = Index([], name=\"j2\")\r\n j3 = Index([], name=\"j3\")\r\n union = j1.union(j2.union(j3, sort=sort), sort=sort)\r\n expected = j1.union(j2, sort=sort).union(j3, sort=sort)\r\n tm.assert_index_equal(union, expected)\r\n\r\n @pytest.mark.parametrize(\"index\", [\"string\"], indirect=True)\r\n def test_union(self, index, sort):\r\n first = index[5:20]\r\n second = index[:10]\r\n everything = index[:20]\r\n\r\n union = first.union(second, sort=sort)\r\n if sort is None:\r\n tm.assert_index_equal(union, everything.sort_values())\r\n assert tm.equalContents(union, everything)\r\n\r\n @pytest.mark.parametrize(\"klass\", [np.array, Series, list])\r\n @pytest.mark.parametrize(\"index\", [\"string\"], indirect=True)\r\n def test_union_from_iterables(self, index, klass, sort):\r\n # GH#10149\r\n first = index[5:20]\r\n second = index[:10]\r\n everything = index[:20]\r\n\r\n case = klass(second.values)\r\n result = first.union(case, sort=sort)\r\n if sort is None:\r\n tm.assert_index_equal(result, everything.sort_values())\r\n assert tm.equalContents(result, everything)\r\n\r\n @pytest.mark.parametrize(\"index\", [\"string\"], indirect=True)\r\n def test_union_identity(self, index, sort):\r\n first = index[5:20]\r\n\r\n union = first.union(first, sort=sort)\r\n # i.e. identity is not preserved when sort is True\r\n assert (union is first) is (not sort)\r\n\r\n # This should no longer be the same object, since [] is not consistent,\r\n # both objects will be recast to dtype('O')\r\n union = first.union([], sort=sort)\r\n assert (union is first) is (not sort)\r\n\r\n union = Index([]).union(first, sort=sort)\r\n assert (union is first) is (not sort)\r\n\r\n @pytest.mark.parametrize(\"index\", [\"string\"], indirect=True)\r\n @pytest.mark.parametrize(\"second_name,expected\", [(None, None), (\"name\", \"name\")])\r\n def test_difference_name_preservation(self, index, second_name, expected, sort):\r\n first = index[5:20]\r\n second = index[:10]\r\n answer = index[10:20]\r\n\r\n first.name = \"name\"\r\n second.name = second_name\r\n result = first.difference(second, sort=sort)\r\n\r\n assert tm.equalContents(result, answer)\r\n\r\n if expected is None:\r\n assert result.name is None\r\n else:\r\n assert result.name == expected\r\n\r\n def test_difference_empty_arg(self, index, sort):\r\n first = index[5:20]\r\n first.name = \"name\"\r\n result = first.difference([], sort)\r\n\r\n tm.assert_index_equal(result, first)\r\n\r\n @pytest.mark.parametrize(\"index\", [\"string\"], indirect=True)\r\n def test_difference_identity(self, index, sort):\r\n first = index[5:20]\r\n first.name = \"name\"\r\n result = first.difference(first, sort)\r\n\r\n assert len(result) == 0\r\n assert result.name == first.name\r\n\r\n @pytest.mark.parametrize(\"index\", [\"string\"], indirect=True)\r\n def test_difference_sort(self, index, sort):\r\n first = index[5:20]\r\n second = index[:10]\r\n\r\n result = first.difference(second, sort)\r\n expected = index[10:20]\r\n\r\n if sort is None:\r\n expected = expected.sort_values()\r\n\r\n tm.assert_index_equal(result, expected)\r\n\r\n @pytest.mark.parametrize(\"opname\", [\"difference\", \"symmetric_difference\"])\r\n def test_difference_incomparable(self, opname):\r\n a = Index([3, Timestamp(\"2000\"), 1])\r\n b = Index([2, Timestamp(\"1999\"), 1])\r\n op = operator.methodcaller(opname, b)\r\n\r\n with tm.assert_produces_warning(RuntimeWarning):\r\n # sort=None, the default\r\n result = op(a)\r\n expected = Index([3, Timestamp(\"2000\"), 2, Timestamp(\"1999\")])\r\n if opname == \"difference\":\r\n expected = expected[:2]\r\n tm.assert_index_equal(result, expected)\r\n\r\n # sort=False\r\n op = operator.methodcaller(opname, b, sort=False)\r\n result = op(a)\r\n tm.assert_index_equal(result, expected)\r\n\r\n @pytest.mark.xfail(reason=\"Not implemented\")\r\n @pytest.mark.parametrize(\"opname\", [\"difference\", \"symmetric_difference\"])\r\n def test_difference_incomparable_true(self, opname):\r\n # TODO: decide on True behaviour\r\n # # sort=True, raises\r\n a = Index([3, Timestamp(\"2000\"), 1])\r\n b = Index([2, Timestamp(\"1999\"), 1])\r\n op = operator.methodcaller(opname, b, sort=True)\r\n\r\n with pytest.raises(TypeError, match=\"Cannot compare\"):\r\n op(a)\r\n\r\n def test_symmetric_difference_mi(self, sort):\r\n index1 = MultiIndex.from_tuples(zip([\"foo\", \"bar\", \"baz\"], [1, 2, 3]))\r\n index2 = MultiIndex.from_tuples([(\"foo\", 1), (\"bar\", 3)])\r\n result = index1.symmetric_difference(index2, sort=sort)\r\n expected = MultiIndex.from_tuples([(\"bar\", 2), (\"baz\", 3), (\"bar\", 3)])\r\n if sort is None:\r\n expected = expected.sort_values()\r\n tm.assert_index_equal(result, expected)\r\n assert tm.equalContents(result, expected)\r\n\r\n @pytest.mark.parametrize(\r\n \"index2,expected\",\r\n [\r\n (Index([0, 1, np.nan]), Index([2.0, 3.0, 0.0])),\r\n (Index([0, 1]), Index([np.nan, 2.0, 3.0, 0.0])),\r\n ],\r\n )\r\n def test_symmetric_difference_missing(self, index2, expected, sort):\r\n # GH#13514 change: {nan} - {nan} == {}\r\n # (GH#6444, sorting of nans, is no longer an issue)\r\n index1 = Index([1, np.nan, 2, 3])\r\n\r\n result = index1.symmetric_difference(index2, sort=sort)\r\n if sort is None:\r\n expected = expected.sort_values()\r\n tm.assert_index_equal(result, expected)\r\n\r\n def test_symmetric_difference_non_index(self, sort):\r\n index1 = Index([1, 2, 3, 4], name=\"index1\")\r\n index2 = np.array([2, 3, 4, 5])\r\n expected = Index([1, 5])\r\n result = index1.symmetric_difference(index2, sort=sort)\r\n assert tm.equalContents(result, expected)\r\n assert result.name == \"index1\"\r\n\r\n result = index1.symmetric_difference(index2, result_name=\"new_name\", sort=sort)\r\n assert tm.equalContents(result, expected)\r\n assert result.name == \"new_name\"\r\n",
"import numpy as np\r\nimport pytest\r\n\r\nfrom pandas import (\r\n Categorical,\r\n DataFrame,\r\n Series,\r\n _testing as tm,\r\n concat,\r\n read_hdf,\r\n)\r\nfrom pandas.tests.io.pytables.common import (\r\n _maybe_remove,\r\n ensure_clean_path,\r\n ensure_clean_store,\r\n)\r\n\r\npytestmark = [\r\n pytest.mark.single,\r\n # pytables https://github.com/PyTables/PyTables/issues/822\r\n pytest.mark.filterwarnings(\r\n \"ignore:a closed node found in the registry:UserWarning\"\r\n ),\r\n]\r\n\r\n\r\ndef test_categorical(setup_path):\r\n\r\n with ensure_clean_store(setup_path) as store:\r\n\r\n # Basic\r\n _maybe_remove(store, \"s\")\r\n s = Series(\r\n Categorical(\r\n [\"a\", \"b\", \"b\", \"a\", \"a\", \"c\"],\r\n categories=[\"a\", \"b\", \"c\", \"d\"],\r\n ordered=False,\r\n )\r\n )\r\n store.append(\"s\", s, format=\"table\")\r\n result = store.select(\"s\")\r\n tm.assert_series_equal(s, result)\r\n\r\n _maybe_remove(store, \"s_ordered\")\r\n s = Series(\r\n Categorical(\r\n [\"a\", \"b\", \"b\", \"a\", \"a\", \"c\"],\r\n categories=[\"a\", \"b\", \"c\", \"d\"],\r\n ordered=True,\r\n )\r\n )\r\n store.append(\"s_ordered\", s, format=\"table\")\r\n result = store.select(\"s_ordered\")\r\n tm.assert_series_equal(s, result)\r\n\r\n _maybe_remove(store, \"df\")\r\n df = DataFrame({\"s\": s, \"vals\": [1, 2, 3, 4, 5, 6]})\r\n store.append(\"df\", df, format=\"table\")\r\n result = store.select(\"df\")\r\n tm.assert_frame_equal(result, df)\r\n\r\n # Dtypes\r\n _maybe_remove(store, \"si\")\r\n s = Series([1, 1, 2, 2, 3, 4, 5]).astype(\"category\")\r\n store.append(\"si\", s)\r\n result = store.select(\"si\")\r\n tm.assert_series_equal(result, s)\r\n\r\n _maybe_remove(store, \"si2\")\r\n s = Series([1, 1, np.nan, 2, 3, 4, 5]).astype(\"category\")\r\n store.append(\"si2\", s)\r\n result = store.select(\"si2\")\r\n tm.assert_series_equal(result, s)\r\n\r\n # Multiple\r\n _maybe_remove(store, \"df2\")\r\n df2 = df.copy()\r\n df2[\"s2\"] = Series(list(\"abcdefg\")).astype(\"category\")\r\n store.append(\"df2\", df2)\r\n result = store.select(\"df2\")\r\n tm.assert_frame_equal(result, df2)\r\n\r\n # Make sure the metadata is OK\r\n info = store.info()\r\n assert \"/df2 \" in info\r\n # assert '/df2/meta/values_block_0/meta' in info\r\n assert \"/df2/meta/values_block_1/meta\" in info\r\n\r\n # unordered\r\n _maybe_remove(store, \"s2\")\r\n s = Series(\r\n Categorical(\r\n [\"a\", \"b\", \"b\", \"a\", \"a\", \"c\"],\r\n categories=[\"a\", \"b\", \"c\", \"d\"],\r\n ordered=False,\r\n )\r\n )\r\n store.append(\"s2\", s, format=\"table\")\r\n result = store.select(\"s2\")\r\n tm.assert_series_equal(result, s)\r\n\r\n # Query\r\n _maybe_remove(store, \"df3\")\r\n store.append(\"df3\", df, data_columns=[\"s\"])\r\n expected = df[df.s.isin([\"b\", \"c\"])]\r\n result = store.select(\"df3\", where=['s in [\"b\",\"c\"]'])\r\n tm.assert_frame_equal(result, expected)\r\n\r\n expected = df[df.s.isin([\"b\", \"c\"])]\r\n result = store.select(\"df3\", where=['s = [\"b\",\"c\"]'])\r\n tm.assert_frame_equal(result, expected)\r\n\r\n expected = df[df.s.isin([\"d\"])]\r\n result = store.select(\"df3\", where=['s in [\"d\"]'])\r\n tm.assert_frame_equal(result, expected)\r\n\r\n expected = df[df.s.isin([\"f\"])]\r\n result = store.select(\"df3\", where=['s in [\"f\"]'])\r\n tm.assert_frame_equal(result, expected)\r\n\r\n # Appending with same categories is ok\r\n store.append(\"df3\", df)\r\n\r\n df = concat([df, df])\r\n expected = df[df.s.isin([\"b\", \"c\"])]\r\n result = store.select(\"df3\", where=['s in [\"b\",\"c\"]'])\r\n tm.assert_frame_equal(result, expected)\r\n\r\n # Appending must have the same categories\r\n df3 = df.copy()\r\n df3[\"s\"] = df3[\"s\"].cat.remove_unused_categories()\r\n\r\n msg = \"cannot append a categorical with different categories to the existing\"\r\n with pytest.raises(ValueError, match=msg):\r\n store.append(\"df3\", df3)\r\n\r\n # Remove, and make sure meta data is removed (its a recursive\r\n # removal so should be).\r\n result = store.select(\"df3/meta/s/meta\")\r\n assert result is not None\r\n store.remove(\"df3\")\r\n\r\n with pytest.raises(\r\n KeyError, match=\"'No object named df3/meta/s/meta in the file'\"\r\n ):\r\n store.select(\"df3/meta/s/meta\")\r\n\r\n\r\ndef test_categorical_conversion(setup_path):\r\n\r\n # GH13322\r\n # Check that read_hdf with categorical columns doesn't return rows if\r\n # where criteria isn't met.\r\n obsids = [\"ESP_012345_6789\", \"ESP_987654_3210\"]\r\n imgids = [\"APF00006np\", \"APF0001imm\"]\r\n data = [4.3, 9.8]\r\n\r\n # Test without categories\r\n df = DataFrame({\"obsids\": obsids, \"imgids\": imgids, \"data\": data})\r\n\r\n # We are expecting an empty DataFrame matching types of df\r\n expected = df.iloc[[], :]\r\n with ensure_clean_path(setup_path) as path:\r\n df.to_hdf(path, \"df\", format=\"table\", data_columns=True)\r\n result = read_hdf(path, \"df\", where=\"obsids=B\")\r\n tm.assert_frame_equal(result, expected)\r\n\r\n # Test with categories\r\n df.obsids = df.obsids.astype(\"category\")\r\n df.imgids = df.imgids.astype(\"category\")\r\n\r\n # We are expecting an empty DataFrame matching types of df\r\n expected = df.iloc[[], :]\r\n with ensure_clean_path(setup_path) as path:\r\n df.to_hdf(path, \"df\", format=\"table\", data_columns=True)\r\n result = read_hdf(path, \"df\", where=\"obsids=B\")\r\n tm.assert_frame_equal(result, expected)\r\n\r\n\r\ndef test_categorical_nan_only_columns(setup_path):\r\n # GH18413\r\n # Check that read_hdf with categorical columns with NaN-only values can\r\n # be read back.\r\n df = DataFrame(\r\n {\r\n \"a\": [\"a\", \"b\", \"c\", np.nan],\r\n \"b\": [np.nan, np.nan, np.nan, np.nan],\r\n \"c\": [1, 2, 3, 4],\r\n \"d\": Series([None] * 4, dtype=object),\r\n }\r\n )\r\n df[\"a\"] = df.a.astype(\"category\")\r\n df[\"b\"] = df.b.astype(\"category\")\r\n df[\"d\"] = df.b.astype(\"category\")\r\n expected = df\r\n with ensure_clean_path(setup_path) as path:\r\n df.to_hdf(path, \"df\", format=\"table\", data_columns=True)\r\n result = read_hdf(path, \"df\")\r\n tm.assert_frame_equal(result, expected)\r\n\r\n\r\[email protected](\r\n \"where, df, expected\",\r\n [\r\n ('col==\"q\"', DataFrame({\"col\": [\"a\", \"b\", \"s\"]}), DataFrame({\"col\": []})),\r\n ('col==\"a\"', DataFrame({\"col\": [\"a\", \"b\", \"s\"]}), DataFrame({\"col\": [\"a\"]})),\r\n ],\r\n)\r\ndef test_convert_value(setup_path, where: str, df: DataFrame, expected: DataFrame):\r\n # GH39420\r\n # Check that read_hdf with categorical columns can filter by where condition.\r\n df.col = df.col.astype(\"category\")\r\n max_widths = {\"col\": 1}\r\n categorical_values = sorted(df.col.unique())\r\n expected.col = expected.col.astype(\"category\")\r\n expected.col = expected.col.cat.set_categories(categorical_values)\r\n\r\n with ensure_clean_path(setup_path) as path:\r\n df.to_hdf(path, \"df\", format=\"table\", min_itemsize=max_widths)\r\n result = read_hdf(path, where=where)\r\n tm.assert_frame_equal(result, expected)\r\n",
"\"\"\"\r\nThis file contains a minimal set of tests for compliance with the extension\r\narray interface test suite, and should contain no other tests.\r\nThe test suite for the full functionality of the array is located in\r\n`pandas/tests/arrays/`.\r\n\r\nThe tests in this file are inherited from the BaseExtensionTests, and only\r\nminimal tweaks should be applied to get the tests passing (by overwriting a\r\nparent method).\r\n\r\nAdditional tests should either be added to one of the BaseExtensionTests\r\nclasses (if they are relevant for the extension interface for all dtypes), or\r\nbe added to the array-specific tests in `pandas/tests/arrays/`.\r\n\r\n\"\"\"\r\nimport string\r\n\r\nimport numpy as np\r\nimport pytest\r\n\r\nimport pandas as pd\r\nfrom pandas.core.arrays.string_ import StringDtype\r\nfrom pandas.tests.extension import base\r\n\r\n\r\ndef split_array(arr):\r\n if arr.dtype.storage != \"pyarrow\":\r\n pytest.skip(\"chunked array n/a\")\r\n\r\n def _split_array(arr):\r\n import pyarrow as pa\r\n\r\n arrow_array = arr._data\r\n split = len(arrow_array) // 2\r\n arrow_array = pa.chunked_array(\r\n [*arrow_array[:split].chunks, *arrow_array[split:].chunks]\r\n )\r\n assert arrow_array.num_chunks == 2\r\n return type(arr)(arrow_array)\r\n\r\n return _split_array(arr)\r\n\r\n\r\[email protected](params=[True, False])\r\ndef chunked(request):\r\n return request.param\r\n\r\n\r\[email protected]\r\ndef dtype(string_storage):\r\n return StringDtype(storage=string_storage)\r\n\r\n\r\[email protected]\r\ndef data(dtype, chunked):\r\n strings = np.random.choice(list(string.ascii_letters), size=100)\r\n while strings[0] == strings[1]:\r\n strings = np.random.choice(list(string.ascii_letters), size=100)\r\n\r\n arr = dtype.construct_array_type()._from_sequence(strings)\r\n return split_array(arr) if chunked else arr\r\n\r\n\r\[email protected]\r\ndef data_missing(dtype, chunked):\r\n \"\"\"Length 2 array with [NA, Valid]\"\"\"\r\n arr = dtype.construct_array_type()._from_sequence([pd.NA, \"A\"])\r\n return split_array(arr) if chunked else arr\r\n\r\n\r\[email protected]\r\ndef data_for_sorting(dtype, chunked):\r\n arr = dtype.construct_array_type()._from_sequence([\"B\", \"C\", \"A\"])\r\n return split_array(arr) if chunked else arr\r\n\r\n\r\[email protected]\r\ndef data_missing_for_sorting(dtype, chunked):\r\n arr = dtype.construct_array_type()._from_sequence([\"B\", pd.NA, \"A\"])\r\n return split_array(arr) if chunked else arr\r\n\r\n\r\[email protected]\r\ndef na_value():\r\n return pd.NA\r\n\r\n\r\[email protected]\r\ndef data_for_grouping(dtype, chunked):\r\n arr = dtype.construct_array_type()._from_sequence(\r\n [\"B\", \"B\", pd.NA, pd.NA, \"A\", \"A\", \"B\", \"C\"]\r\n )\r\n return split_array(arr) if chunked else arr\r\n\r\n\r\nclass TestDtype(base.BaseDtypeTests):\r\n def test_eq_with_str(self, dtype):\r\n assert dtype == f\"string[{dtype.storage}]\"\r\n super().test_eq_with_str(dtype)\r\n\r\n\r\nclass TestInterface(base.BaseInterfaceTests):\r\n def test_view(self, data, request):\r\n if data.dtype.storage == \"pyarrow\":\r\n mark = pytest.mark.xfail(reason=\"not implemented\")\r\n request.node.add_marker(mark)\r\n super().test_view(data)\r\n\r\n\r\nclass TestConstructors(base.BaseConstructorsTests):\r\n def test_from_dtype(self, data):\r\n # base test uses string representation of dtype\r\n pass\r\n\r\n\r\nclass TestReshaping(base.BaseReshapingTests):\r\n def test_transpose(self, data, request):\r\n if data.dtype.storage == \"pyarrow\":\r\n mark = pytest.mark.xfail(reason=\"not implemented\")\r\n request.node.add_marker(mark)\r\n super().test_transpose(data)\r\n\r\n\r\nclass TestGetitem(base.BaseGetitemTests):\r\n pass\r\n\r\n\r\nclass TestSetitem(base.BaseSetitemTests):\r\n def test_setitem_preserves_views(self, data, request):\r\n if data.dtype.storage == \"pyarrow\":\r\n mark = pytest.mark.xfail(reason=\"not implemented\")\r\n request.node.add_marker(mark)\r\n super().test_setitem_preserves_views(data)\r\n\r\n\r\nclass TestMissing(base.BaseMissingTests):\r\n pass\r\n\r\n\r\nclass TestNoReduce(base.BaseNoReduceTests):\r\n @pytest.mark.parametrize(\"skipna\", [True, False])\r\n def test_reduce_series_numeric(self, data, all_numeric_reductions, skipna):\r\n op_name = all_numeric_reductions\r\n\r\n if op_name in [\"min\", \"max\"]:\r\n return None\r\n\r\n s = pd.Series(data)\r\n with pytest.raises(TypeError):\r\n getattr(s, op_name)(skipna=skipna)\r\n\r\n\r\nclass TestMethods(base.BaseMethodsTests):\r\n @pytest.mark.skip(reason=\"returns nullable\")\r\n def test_value_counts(self, all_data, dropna):\r\n return super().test_value_counts(all_data, dropna)\r\n\r\n @pytest.mark.skip(reason=\"returns nullable\")\r\n def test_value_counts_with_normalize(self, data):\r\n pass\r\n\r\n\r\nclass TestCasting(base.BaseCastingTests):\r\n pass\r\n\r\n\r\nclass TestComparisonOps(base.BaseComparisonOpsTests):\r\n def _compare_other(self, s, data, op_name, other):\r\n result = getattr(s, op_name)(other)\r\n expected = getattr(s.astype(object), op_name)(other).astype(\"boolean\")\r\n self.assert_series_equal(result, expected)\r\n\r\n def test_compare_scalar(self, data, all_compare_operators):\r\n op_name = all_compare_operators\r\n s = pd.Series(data)\r\n self._compare_other(s, data, op_name, \"abc\")\r\n\r\n\r\nclass TestParsing(base.BaseParsingTests):\r\n pass\r\n\r\n\r\nclass TestPrinting(base.BasePrintingTests):\r\n pass\r\n\r\n\r\nclass TestGroupBy(base.BaseGroupbyTests):\r\n pass\r\n",
"\"\"\"Spectral Embedding.\"\"\"\r\n\r\n# Author: Gael Varoquaux <[email protected]>\r\n# Wei LI <[email protected]>\r\n# License: BSD 3 clause\r\n\r\n\r\nimport warnings\r\n\r\nimport numpy as np\r\nfrom scipy import sparse\r\nfrom scipy.linalg import eigh\r\nfrom scipy.sparse.linalg import eigsh\r\nfrom scipy.sparse.csgraph import connected_components\r\nfrom scipy.sparse.csgraph import laplacian as csgraph_laplacian\r\n\r\nfrom ..base import BaseEstimator\r\nfrom ..utils import (\r\n check_array,\r\n check_random_state,\r\n check_symmetric,\r\n)\r\nfrom ..utils._arpack import _init_arpack_v0\r\nfrom ..utils.extmath import _deterministic_vector_sign_flip\r\nfrom ..utils.fixes import lobpcg\r\nfrom ..metrics.pairwise import rbf_kernel\r\nfrom ..neighbors import kneighbors_graph, NearestNeighbors\r\nfrom ..utils.deprecation import deprecated\r\n\r\n\r\ndef _graph_connected_component(graph, node_id):\r\n \"\"\"Find the largest graph connected components that contains one\r\n given node.\r\n\r\n Parameters\r\n ----------\r\n graph : array-like of shape (n_samples, n_samples)\r\n Adjacency matrix of the graph, non-zero weight means an edge\r\n between the nodes.\r\n\r\n node_id : int\r\n The index of the query node of the graph.\r\n\r\n Returns\r\n -------\r\n connected_components_matrix : array-like of shape (n_samples,)\r\n An array of bool value indicating the indexes of the nodes\r\n belonging to the largest connected components of the given query\r\n node.\r\n \"\"\"\r\n n_node = graph.shape[0]\r\n if sparse.issparse(graph):\r\n # speed up row-wise access to boolean connection mask\r\n graph = graph.tocsr()\r\n connected_nodes = np.zeros(n_node, dtype=bool)\r\n nodes_to_explore = np.zeros(n_node, dtype=bool)\r\n nodes_to_explore[node_id] = True\r\n for _ in range(n_node):\r\n last_num_component = connected_nodes.sum()\r\n np.logical_or(connected_nodes, nodes_to_explore, out=connected_nodes)\r\n if last_num_component >= connected_nodes.sum():\r\n break\r\n indices = np.where(nodes_to_explore)[0]\r\n nodes_to_explore.fill(False)\r\n for i in indices:\r\n if sparse.issparse(graph):\r\n neighbors = graph[i].toarray().ravel()\r\n else:\r\n neighbors = graph[i]\r\n np.logical_or(nodes_to_explore, neighbors, out=nodes_to_explore)\r\n return connected_nodes\r\n\r\n\r\ndef _graph_is_connected(graph):\r\n \"\"\"Return whether the graph is connected (True) or Not (False).\r\n\r\n Parameters\r\n ----------\r\n graph : {array-like, sparse matrix} of shape (n_samples, n_samples)\r\n Adjacency matrix of the graph, non-zero weight means an edge\r\n between the nodes.\r\n\r\n Returns\r\n -------\r\n is_connected : bool\r\n True means the graph is fully connected and False means not.\r\n \"\"\"\r\n if sparse.isspmatrix(graph):\r\n # sparse graph, find all the connected components\r\n n_connected_components, _ = connected_components(graph)\r\n return n_connected_components == 1\r\n else:\r\n # dense graph, find all connected components start from node 0\r\n return _graph_connected_component(graph, 0).sum() == graph.shape[0]\r\n\r\n\r\ndef _set_diag(laplacian, value, norm_laplacian):\r\n \"\"\"Set the diagonal of the laplacian matrix and convert it to a\r\n sparse format well suited for eigenvalue decomposition.\r\n\r\n Parameters\r\n ----------\r\n laplacian : {ndarray, sparse matrix}\r\n The graph laplacian.\r\n\r\n value : float\r\n The value of the diagonal.\r\n\r\n norm_laplacian : bool\r\n Whether the value of the diagonal should be changed or not.\r\n\r\n Returns\r\n -------\r\n laplacian : {array, sparse matrix}\r\n An array of matrix in a form that is well suited to fast\r\n eigenvalue decomposition, depending on the band width of the\r\n matrix.\r\n \"\"\"\r\n n_nodes = laplacian.shape[0]\r\n # We need all entries in the diagonal to values\r\n if not sparse.isspmatrix(laplacian):\r\n if norm_laplacian:\r\n laplacian.flat[:: n_nodes + 1] = value\r\n else:\r\n laplacian = laplacian.tocoo()\r\n if norm_laplacian:\r\n diag_idx = laplacian.row == laplacian.col\r\n laplacian.data[diag_idx] = value\r\n # If the matrix has a small number of diagonals (as in the\r\n # case of structured matrices coming from images), the\r\n # dia format might be best suited for matvec products:\r\n n_diags = np.unique(laplacian.row - laplacian.col).size\r\n if n_diags <= 7:\r\n # 3 or less outer diagonals on each side\r\n laplacian = laplacian.todia()\r\n else:\r\n # csr has the fastest matvec and is thus best suited to\r\n # arpack\r\n laplacian = laplacian.tocsr()\r\n return laplacian\r\n\r\n\r\ndef spectral_embedding(\r\n adjacency,\r\n *,\r\n n_components=8,\r\n eigen_solver=None,\r\n random_state=None,\r\n eigen_tol=0.0,\r\n norm_laplacian=True,\r\n drop_first=True,\r\n):\r\n \"\"\"Project the sample on the first eigenvectors of the graph Laplacian.\r\n\r\n The adjacency matrix is used to compute a normalized graph Laplacian\r\n whose spectrum (especially the eigenvectors associated to the\r\n smallest eigenvalues) has an interpretation in terms of minimal\r\n number of cuts necessary to split the graph into comparably sized\r\n components.\r\n\r\n This embedding can also 'work' even if the ``adjacency`` variable is\r\n not strictly the adjacency matrix of a graph but more generally\r\n an affinity or similarity matrix between samples (for instance the\r\n heat kernel of a euclidean distance matrix or a k-NN matrix).\r\n\r\n However care must taken to always make the affinity matrix symmetric\r\n so that the eigenvector decomposition works as expected.\r\n\r\n Note : Laplacian Eigenmaps is the actual algorithm implemented here.\r\n\r\n Read more in the :ref:`User Guide <spectral_embedding>`.\r\n\r\n Parameters\r\n ----------\r\n adjacency : {array-like, sparse graph} of shape (n_samples, n_samples)\r\n The adjacency matrix of the graph to embed.\r\n\r\n n_components : int, default=8\r\n The dimension of the projection subspace.\r\n\r\n eigen_solver : {'arpack', 'lobpcg', 'amg'}, default=None\r\n The eigenvalue decomposition strategy to use. AMG requires pyamg\r\n to be installed. It can be faster on very large, sparse problems,\r\n but may also lead to instabilities. If None, then ``'arpack'`` is\r\n used.\r\n\r\n random_state : int, RandomState instance or None, default=None\r\n A pseudo random number generator used for the initialization\r\n of the lobpcg eigen vectors decomposition when `eigen_solver ==\r\n 'amg'`, and for the K-Means initialization. Use an int to make\r\n the results deterministic across calls (See\r\n :term:`Glossary <random_state>`).\r\n\r\n .. note::\r\n When using `eigen_solver == 'amg'`,\r\n it is necessary to also fix the global numpy seed with\r\n `np.random.seed(int)` to get deterministic results. See\r\n https://github.com/pyamg/pyamg/issues/139 for further\r\n information.\r\n\r\n eigen_tol : float, default=0.0\r\n Stopping criterion for eigendecomposition of the Laplacian matrix\r\n when using arpack eigen_solver.\r\n\r\n norm_laplacian : bool, default=True\r\n If True, then compute symmetric normalized Laplacian.\r\n\r\n drop_first : bool, default=True\r\n Whether to drop the first eigenvector. For spectral embedding, this\r\n should be True as the first eigenvector should be constant vector for\r\n connected graph, but for spectral clustering, this should be kept as\r\n False to retain the first eigenvector.\r\n\r\n Returns\r\n -------\r\n embedding : ndarray of shape (n_samples, n_components)\r\n The reduced samples.\r\n\r\n Notes\r\n -----\r\n Spectral Embedding (Laplacian Eigenmaps) is most useful when the graph\r\n has one connected component. If there graph has many components, the first\r\n few eigenvectors will simply uncover the connected components of the graph.\r\n\r\n References\r\n ----------\r\n * https://en.wikipedia.org/wiki/LOBPCG\r\n\r\n * Toward the Optimal Preconditioned Eigensolver: Locally Optimal\r\n Block Preconditioned Conjugate Gradient Method\r\n Andrew V. Knyazev\r\n https://doi.org/10.1137%2FS1064827500366124\r\n \"\"\"\r\n adjacency = check_symmetric(adjacency)\r\n\r\n try:\r\n from pyamg import smoothed_aggregation_solver\r\n except ImportError as e:\r\n if eigen_solver == \"amg\":\r\n raise ValueError(\r\n \"The eigen_solver was set to 'amg', but pyamg is not available.\"\r\n ) from e\r\n\r\n if eigen_solver is None:\r\n eigen_solver = \"arpack\"\r\n elif eigen_solver not in (\"arpack\", \"lobpcg\", \"amg\"):\r\n raise ValueError(\r\n \"Unknown value for eigen_solver: '%s'.\"\r\n \"Should be 'amg', 'arpack', or 'lobpcg'\" % eigen_solver\r\n )\r\n\r\n random_state = check_random_state(random_state)\r\n\r\n n_nodes = adjacency.shape[0]\r\n # Whether to drop the first eigenvector\r\n if drop_first:\r\n n_components = n_components + 1\r\n\r\n if not _graph_is_connected(adjacency):\r\n warnings.warn(\r\n \"Graph is not fully connected, spectral embedding may not work as expected.\"\r\n )\r\n\r\n laplacian, dd = csgraph_laplacian(\r\n adjacency, normed=norm_laplacian, return_diag=True\r\n )\r\n if (\r\n eigen_solver == \"arpack\"\r\n or eigen_solver != \"lobpcg\"\r\n and (not sparse.isspmatrix(laplacian) or n_nodes < 5 * n_components)\r\n ):\r\n # lobpcg used with eigen_solver='amg' has bugs for low number of nodes\r\n # for details see the source code in scipy:\r\n # https://github.com/scipy/scipy/blob/v0.11.0/scipy/sparse/linalg/eigen\r\n # /lobpcg/lobpcg.py#L237\r\n # or matlab:\r\n # https://www.mathworks.com/matlabcentral/fileexchange/48-lobpcg-m\r\n laplacian = _set_diag(laplacian, 1, norm_laplacian)\r\n\r\n # Here we'll use shift-invert mode for fast eigenvalues\r\n # (see https://docs.scipy.org/doc/scipy/reference/tutorial/arpack.html\r\n # for a short explanation of what this means)\r\n # Because the normalized Laplacian has eigenvalues between 0 and 2,\r\n # I - L has eigenvalues between -1 and 1. ARPACK is most efficient\r\n # when finding eigenvalues of largest magnitude (keyword which='LM')\r\n # and when these eigenvalues are very large compared to the rest.\r\n # For very large, very sparse graphs, I - L can have many, many\r\n # eigenvalues very near 1.0. This leads to slow convergence. So\r\n # instead, we'll use ARPACK's shift-invert mode, asking for the\r\n # eigenvalues near 1.0. This effectively spreads-out the spectrum\r\n # near 1.0 and leads to much faster convergence: potentially an\r\n # orders-of-magnitude speedup over simply using keyword which='LA'\r\n # in standard mode.\r\n try:\r\n # We are computing the opposite of the laplacian inplace so as\r\n # to spare a memory allocation of a possibly very large array\r\n laplacian *= -1\r\n v0 = _init_arpack_v0(laplacian.shape[0], random_state)\r\n _, diffusion_map = eigsh(\r\n laplacian, k=n_components, sigma=1.0, which=\"LM\", tol=eigen_tol, v0=v0\r\n )\r\n embedding = diffusion_map.T[n_components::-1]\r\n if norm_laplacian:\r\n # recover u = D^-1/2 x from the eigenvector output x\r\n embedding = embedding / dd\r\n except RuntimeError:\r\n # When submatrices are exactly singular, an LU decomposition\r\n # in arpack fails. We fallback to lobpcg\r\n eigen_solver = \"lobpcg\"\r\n # Revert the laplacian to its opposite to have lobpcg work\r\n laplacian *= -1\r\n\r\n elif eigen_solver == \"amg\":\r\n # Use AMG to get a preconditioner and speed up the eigenvalue\r\n # problem.\r\n if not sparse.issparse(laplacian):\r\n warnings.warn(\"AMG works better for sparse matrices\")\r\n # lobpcg needs double precision floats\r\n laplacian = check_array(laplacian, dtype=np.float64, accept_sparse=True)\r\n laplacian = _set_diag(laplacian, 1, norm_laplacian)\r\n\r\n # The Laplacian matrix is always singular, having at least one zero\r\n # eigenvalue, corresponding to the trivial eigenvector, which is a\r\n # constant. Using a singular matrix for preconditioning may result in\r\n # random failures in LOBPCG and is not supported by the existing\r\n # theory:\r\n # see https://doi.org/10.1007/s10208-015-9297-1\r\n # Shift the Laplacian so its diagononal is not all ones. The shift\r\n # does change the eigenpairs however, so we'll feed the shifted\r\n # matrix to the solver and afterward set it back to the original.\r\n diag_shift = 1e-5 * sparse.eye(laplacian.shape[0])\r\n laplacian += diag_shift\r\n ml = smoothed_aggregation_solver(check_array(laplacian, accept_sparse=\"csr\"))\r\n laplacian -= diag_shift\r\n\r\n M = ml.aspreconditioner()\r\n # Create initial approximation X to eigenvectors\r\n X = random_state.rand(laplacian.shape[0], n_components + 1)\r\n X[:, 0] = dd.ravel()\r\n _, diffusion_map = lobpcg(laplacian, X, M=M, tol=1.0e-5, largest=False)\r\n embedding = diffusion_map.T\r\n if norm_laplacian:\r\n # recover u = D^-1/2 x from the eigenvector output x\r\n embedding = embedding / dd\r\n if embedding.shape[0] == 1:\r\n raise ValueError\r\n\r\n if eigen_solver == \"lobpcg\":\r\n # lobpcg needs double precision floats\r\n laplacian = check_array(laplacian, dtype=np.float64, accept_sparse=True)\r\n if n_nodes < 5 * n_components + 1:\r\n # see note above under arpack why lobpcg has problems with small\r\n # number of nodes\r\n # lobpcg will fallback to eigh, so we short circuit it\r\n if sparse.isspmatrix(laplacian):\r\n laplacian = laplacian.toarray()\r\n _, diffusion_map = eigh(laplacian, check_finite=False)\r\n embedding = diffusion_map.T[:n_components]\r\n if norm_laplacian:\r\n # recover u = D^-1/2 x from the eigenvector output x\r\n embedding = embedding / dd\r\n else:\r\n laplacian = _set_diag(laplacian, 1, norm_laplacian)\r\n # We increase the number of eigenvectors requested, as lobpcg\r\n # doesn't behave well in low dimension and create initial\r\n # approximation X to eigenvectors\r\n X = random_state.rand(laplacian.shape[0], n_components + 1)\r\n X[:, 0] = dd.ravel()\r\n _, diffusion_map = lobpcg(\r\n laplacian, X, tol=1e-5, largest=False, maxiter=2000\r\n )\r\n embedding = diffusion_map.T[:n_components]\r\n if norm_laplacian:\r\n # recover u = D^-1/2 x from the eigenvector output x\r\n embedding = embedding / dd\r\n if embedding.shape[0] == 1:\r\n raise ValueError\r\n\r\n embedding = _deterministic_vector_sign_flip(embedding)\r\n if drop_first:\r\n return embedding[1:n_components].T\r\n else:\r\n return embedding[:n_components].T\r\n\r\n\r\nclass SpectralEmbedding(BaseEstimator):\r\n \"\"\"Spectral embedding for non-linear dimensionality reduction.\r\n\r\n Forms an affinity matrix given by the specified function and\r\n applies spectral decomposition to the corresponding graph laplacian.\r\n The resulting transformation is given by the value of the\r\n eigenvectors for each data point.\r\n\r\n Note : Laplacian Eigenmaps is the actual algorithm implemented here.\r\n\r\n Read more in the :ref:`User Guide <spectral_embedding>`.\r\n\r\n Parameters\r\n ----------\r\n n_components : int, default=2\r\n The dimension of the projected subspace.\r\n\r\n affinity : {'nearest_neighbors', 'rbf', 'precomputed', \\\r\n 'precomputed_nearest_neighbors'} or callable, \\\r\n default='nearest_neighbors'\r\n How to construct the affinity matrix.\r\n - 'nearest_neighbors' : construct the affinity matrix by computing a\r\n graph of nearest neighbors.\r\n - 'rbf' : construct the affinity matrix by computing a radial basis\r\n function (RBF) kernel.\r\n - 'precomputed' : interpret ``X`` as a precomputed affinity matrix.\r\n - 'precomputed_nearest_neighbors' : interpret ``X`` as a sparse graph\r\n of precomputed nearest neighbors, and constructs the affinity matrix\r\n by selecting the ``n_neighbors`` nearest neighbors.\r\n - callable : use passed in function as affinity\r\n the function takes in data matrix (n_samples, n_features)\r\n and return affinity matrix (n_samples, n_samples).\r\n\r\n gamma : float, default=None\r\n Kernel coefficient for rbf kernel. If None, gamma will be set to\r\n 1/n_features.\r\n\r\n random_state : int, RandomState instance or None, default=None\r\n A pseudo random number generator used for the initialization\r\n of the lobpcg eigen vectors decomposition when `eigen_solver ==\r\n 'amg'`, and for the K-Means initialization. Use an int to make\r\n the results deterministic across calls (See\r\n :term:`Glossary <random_state>`).\r\n\r\n .. note::\r\n When using `eigen_solver == 'amg'`,\r\n it is necessary to also fix the global numpy seed with\r\n `np.random.seed(int)` to get deterministic results. See\r\n https://github.com/pyamg/pyamg/issues/139 for further\r\n information.\r\n\r\n eigen_solver : {'arpack', 'lobpcg', 'amg'}, default=None\r\n The eigenvalue decomposition strategy to use. AMG requires pyamg\r\n to be installed. It can be faster on very large, sparse problems.\r\n If None, then ``'arpack'`` is used.\r\n\r\n n_neighbors : int, default=None\r\n Number of nearest neighbors for nearest_neighbors graph building.\r\n If None, n_neighbors will be set to max(n_samples/10, 1).\r\n\r\n n_jobs : int, default=None\r\n The number of parallel jobs to run.\r\n ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.\r\n ``-1`` means using all processors. See :term:`Glossary <n_jobs>`\r\n for more details.\r\n\r\n Attributes\r\n ----------\r\n embedding_ : ndarray of shape (n_samples, n_components)\r\n Spectral embedding of the training matrix.\r\n\r\n affinity_matrix_ : ndarray of shape (n_samples, n_samples)\r\n Affinity_matrix constructed from samples or precomputed.\r\n\r\n n_features_in_ : int\r\n Number of features seen during :term:`fit`.\r\n\r\n .. versionadded:: 0.24\r\n\r\n feature_names_in_ : ndarray of shape (`n_features_in_`,)\r\n Names of features seen during :term:`fit`. Defined only when `X`\r\n has feature names that are all strings.\r\n\r\n .. versionadded:: 1.0\r\n\r\n n_neighbors_ : int\r\n Number of nearest neighbors effectively used.\r\n\r\n Examples\r\n --------\r\n >>> from sklearn.datasets import load_digits\r\n >>> from sklearn.manifold import SpectralEmbedding\r\n >>> X, _ = load_digits(return_X_y=True)\r\n >>> X.shape\r\n (1797, 64)\r\n >>> embedding = SpectralEmbedding(n_components=2)\r\n >>> X_transformed = embedding.fit_transform(X[:100])\r\n >>> X_transformed.shape\r\n (100, 2)\r\n\r\n References\r\n ----------\r\n\r\n - A Tutorial on Spectral Clustering, 2007\r\n Ulrike von Luxburg\r\n http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.165.9323\r\n\r\n - On Spectral Clustering: Analysis and an algorithm, 2001\r\n Andrew Y. Ng, Michael I. Jordan, Yair Weiss\r\n http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.19.8100\r\n\r\n - Normalized cuts and image segmentation, 2000\r\n Jianbo Shi, Jitendra Malik\r\n http://citeseer.ist.psu.edu/viewdoc/summary?doi=10.1.1.160.2324\r\n \"\"\"\r\n\r\n def __init__(\r\n self,\r\n n_components=2,\r\n *,\r\n affinity=\"nearest_neighbors\",\r\n gamma=None,\r\n random_state=None,\r\n eigen_solver=None,\r\n n_neighbors=None,\r\n n_jobs=None,\r\n ):\r\n self.n_components = n_components\r\n self.affinity = affinity\r\n self.gamma = gamma\r\n self.random_state = random_state\r\n self.eigen_solver = eigen_solver\r\n self.n_neighbors = n_neighbors\r\n self.n_jobs = n_jobs\r\n\r\n def _more_tags(self):\r\n return {\r\n \"pairwise\": self.affinity\r\n in [\"precomputed\", \"precomputed_nearest_neighbors\"]\r\n }\r\n\r\n # TODO: Remove in 1.1\r\n # mypy error: Decorated property not supported\r\n @deprecated( # type: ignore\r\n \"Attribute `_pairwise` was deprecated in \"\r\n \"version 0.24 and will be removed in 1.1 (renaming of 0.26).\"\r\n )\r\n @property\r\n def _pairwise(self):\r\n return self.affinity in [\"precomputed\", \"precomputed_nearest_neighbors\"]\r\n\r\n def _get_affinity_matrix(self, X, Y=None):\r\n \"\"\"Calculate the affinity matrix from data\r\n Parameters\r\n ----------\r\n X : array-like of shape (n_samples, n_features)\r\n Training vector, where `n_samples` is the number of samples\r\n and `n_features` is the number of features.\r\n\r\n If affinity is \"precomputed\"\r\n X : array-like of shape (n_samples, n_samples),\r\n Interpret X as precomputed adjacency graph computed from\r\n samples.\r\n\r\n Y: Ignored\r\n\r\n Returns\r\n -------\r\n affinity_matrix of shape (n_samples, n_samples)\r\n \"\"\"\r\n if self.affinity == \"precomputed\":\r\n self.affinity_matrix_ = X\r\n return self.affinity_matrix_\r\n if self.affinity == \"precomputed_nearest_neighbors\":\r\n estimator = NearestNeighbors(\r\n n_neighbors=self.n_neighbors, n_jobs=self.n_jobs, metric=\"precomputed\"\r\n ).fit(X)\r\n connectivity = estimator.kneighbors_graph(X=X, mode=\"connectivity\")\r\n self.affinity_matrix_ = 0.5 * (connectivity + connectivity.T)\r\n return self.affinity_matrix_\r\n if self.affinity == \"nearest_neighbors\":\r\n if sparse.issparse(X):\r\n warnings.warn(\r\n \"Nearest neighbors affinity currently does \"\r\n \"not support sparse input, falling back to \"\r\n \"rbf affinity\"\r\n )\r\n self.affinity = \"rbf\"\r\n else:\r\n self.n_neighbors_ = (\r\n self.n_neighbors\r\n if self.n_neighbors is not None\r\n else max(int(X.shape[0] / 10), 1)\r\n )\r\n self.affinity_matrix_ = kneighbors_graph(\r\n X, self.n_neighbors_, include_self=True, n_jobs=self.n_jobs\r\n )\r\n # currently only symmetric affinity_matrix supported\r\n self.affinity_matrix_ = 0.5 * (\r\n self.affinity_matrix_ + self.affinity_matrix_.T\r\n )\r\n return self.affinity_matrix_\r\n if self.affinity == \"rbf\":\r\n self.gamma_ = self.gamma if self.gamma is not None else 1.0 / X.shape[1]\r\n self.affinity_matrix_ = rbf_kernel(X, gamma=self.gamma_)\r\n return self.affinity_matrix_\r\n self.affinity_matrix_ = self.affinity(X)\r\n return self.affinity_matrix_\r\n\r\n def fit(self, X, y=None):\r\n \"\"\"Fit the model from data in X.\r\n\r\n Parameters\r\n ----------\r\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\r\n Training vector, where `n_samples` is the number of samples\r\n and `n_features` is the number of features.\r\n\r\n If affinity is \"precomputed\"\r\n X : {array-like, sparse matrix}, shape (n_samples, n_samples),\r\n Interpret X as precomputed adjacency graph computed from\r\n samples.\r\n\r\n y : Ignored\r\n\r\n Returns\r\n -------\r\n self : object\r\n Returns the instance itself.\r\n \"\"\"\r\n\r\n X = self._validate_data(\r\n X, accept_sparse=\"csr\", ensure_min_samples=2, estimator=self\r\n )\r\n\r\n random_state = check_random_state(self.random_state)\r\n if isinstance(self.affinity, str):\r\n if self.affinity not in {\r\n \"nearest_neighbors\",\r\n \"rbf\",\r\n \"precomputed\",\r\n \"precomputed_nearest_neighbors\",\r\n }:\r\n raise ValueError(\r\n \"%s is not a valid affinity. Expected \"\r\n \"'precomputed', 'rbf', 'nearest_neighbors' \"\r\n \"or a callable.\"\r\n % self.affinity\r\n )\r\n elif not callable(self.affinity):\r\n raise ValueError(\r\n \"'affinity' is expected to be an affinity name or a callable. Got: %s\"\r\n % self.affinity\r\n )\r\n\r\n affinity_matrix = self._get_affinity_matrix(X)\r\n self.embedding_ = spectral_embedding(\r\n affinity_matrix,\r\n n_components=self.n_components,\r\n eigen_solver=self.eigen_solver,\r\n random_state=random_state,\r\n )\r\n return self\r\n\r\n def fit_transform(self, X, y=None):\r\n \"\"\"Fit the model from data in X and transform X.\r\n\r\n Parameters\r\n ----------\r\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\r\n Training vector, where `n_samples` is the number of samples\r\n and `n_features` is the number of features.\r\n\r\n If affinity is \"precomputed\"\r\n X : {array-like, sparse matrix} of shape (n_samples, n_samples),\r\n Interpret X as precomputed adjacency graph computed from\r\n samples.\r\n\r\n y : Ignored\r\n\r\n Returns\r\n -------\r\n X_new : array-like of shape (n_samples, n_components)\r\n \"\"\"\r\n self.fit(X)\r\n return self.embedding_\r\n",
"from itertools import product\r\n\r\nimport pytest\r\nimport re\r\nimport numpy as np\r\nfrom scipy.sparse import (\r\n bsr_matrix,\r\n coo_matrix,\r\n csc_matrix,\r\n csr_matrix,\r\n dok_matrix,\r\n lil_matrix,\r\n issparse,\r\n)\r\n\r\nfrom sklearn import metrics\r\nfrom sklearn import neighbors, datasets\r\nfrom sklearn.base import clone\r\nfrom sklearn.exceptions import DataConversionWarning\r\nfrom sklearn.exceptions import EfficiencyWarning\r\nfrom sklearn.exceptions import NotFittedError\r\nfrom sklearn.metrics.pairwise import pairwise_distances\r\nfrom sklearn.model_selection import cross_val_score\r\nfrom sklearn.model_selection import train_test_split\r\nfrom sklearn.neighbors import VALID_METRICS_SPARSE, VALID_METRICS\r\nfrom sklearn.neighbors._base import _is_sorted_by_data, _check_precomputed\r\nfrom sklearn.pipeline import make_pipeline\r\nfrom sklearn.utils._testing import assert_array_almost_equal\r\nfrom sklearn.utils._testing import assert_array_equal\r\nfrom sklearn.utils._testing import ignore_warnings\r\nfrom sklearn.utils.validation import check_random_state\r\nfrom sklearn.utils.fixes import sp_version, parse_version\r\n\r\nimport joblib\r\n\r\nrng = np.random.RandomState(0)\r\n# load and shuffle iris dataset\r\niris = datasets.load_iris()\r\nperm = rng.permutation(iris.target.size)\r\niris.data = iris.data[perm]\r\niris.target = iris.target[perm]\r\n\r\n# load and shuffle digits\r\ndigits = datasets.load_digits()\r\nperm = rng.permutation(digits.target.size)\r\ndigits.data = digits.data[perm]\r\ndigits.target = digits.target[perm]\r\n\r\nSPARSE_TYPES = (bsr_matrix, coo_matrix, csc_matrix, csr_matrix, dok_matrix, lil_matrix)\r\nSPARSE_OR_DENSE = SPARSE_TYPES + (np.asarray,)\r\n\r\nALGORITHMS = (\"ball_tree\", \"brute\", \"kd_tree\", \"auto\")\r\nP = (1, 2, 3, 4, np.inf)\r\nJOBLIB_BACKENDS = list(joblib.parallel.BACKENDS.keys())\r\n\r\n# Filter deprecation warnings.\r\nneighbors.kneighbors_graph = ignore_warnings(neighbors.kneighbors_graph)\r\nneighbors.radius_neighbors_graph = ignore_warnings(neighbors.radius_neighbors_graph)\r\n\r\n\r\ndef _weight_func(dist):\r\n \"\"\"Weight function to replace lambda d: d ** -2.\r\n The lambda function is not valid because:\r\n if d==0 then 0^-2 is not valid.\"\"\"\r\n\r\n # Dist could be multidimensional, flatten it so all values\r\n # can be looped\r\n with np.errstate(divide=\"ignore\"):\r\n retval = 1.0 / dist\r\n return retval ** 2\r\n\r\n\r\ndef test_unsupervised_kneighbors(\r\n n_samples=20, n_features=5, n_query_pts=2, n_neighbors=5\r\n):\r\n # Test unsupervised neighbors methods\r\n X = rng.rand(n_samples, n_features)\r\n\r\n test = rng.rand(n_query_pts, n_features)\r\n\r\n for p in P:\r\n results_nodist = []\r\n results = []\r\n\r\n for algorithm in ALGORITHMS:\r\n neigh = neighbors.NearestNeighbors(\r\n n_neighbors=n_neighbors, algorithm=algorithm, p=p\r\n )\r\n neigh.fit(X)\r\n\r\n results_nodist.append(neigh.kneighbors(test, return_distance=False))\r\n results.append(neigh.kneighbors(test, return_distance=True))\r\n\r\n for i in range(len(results) - 1):\r\n assert_array_almost_equal(results_nodist[i], results[i][1])\r\n assert_array_almost_equal(results[i][0], results[i + 1][0])\r\n assert_array_almost_equal(results[i][1], results[i + 1][1])\r\n\r\n\r\[email protected](\r\n \"NearestNeighbors\",\r\n [\r\n neighbors.KNeighborsClassifier,\r\n neighbors.KNeighborsRegressor,\r\n neighbors.NearestNeighbors,\r\n ],\r\n)\r\ndef test_unsupervised_inputs(NearestNeighbors):\r\n # Test unsupervised inputs for neighbors estimators\r\n\r\n X = rng.random_sample((10, 3))\r\n y = rng.randint(3, size=10)\r\n nbrs_fid = neighbors.NearestNeighbors(n_neighbors=1)\r\n nbrs_fid.fit(X)\r\n\r\n dist1, ind1 = nbrs_fid.kneighbors(X)\r\n\r\n nbrs = NearestNeighbors(n_neighbors=1)\r\n\r\n for data in (nbrs_fid, neighbors.BallTree(X), neighbors.KDTree(X)):\r\n nbrs.fit(data, y)\r\n\r\n dist2, ind2 = nbrs.kneighbors(X)\r\n\r\n assert_array_almost_equal(dist1, dist2)\r\n assert_array_almost_equal(ind1, ind2)\r\n\r\n\r\ndef test_n_neighbors_datatype():\r\n # Test to check whether n_neighbors is integer\r\n X = [[1, 1], [1, 1], [1, 1]]\r\n expected_msg = \"n_neighbors does not take .*float.* value, enter integer value\"\r\n msg = \"Expected n_neighbors > 0. Got -3\"\r\n\r\n neighbors_ = neighbors.NearestNeighbors(n_neighbors=3.0)\r\n with pytest.raises(TypeError, match=expected_msg):\r\n neighbors_.fit(X)\r\n with pytest.raises(ValueError, match=msg):\r\n neighbors_.kneighbors(X=X, n_neighbors=-3)\r\n with pytest.raises(TypeError, match=expected_msg):\r\n neighbors_.kneighbors(X=X, n_neighbors=3.0)\r\n\r\n\r\ndef test_not_fitted_error_gets_raised():\r\n X = [[1]]\r\n neighbors_ = neighbors.NearestNeighbors()\r\n with pytest.raises(NotFittedError):\r\n neighbors_.kneighbors_graph(X)\r\n with pytest.raises(NotFittedError):\r\n neighbors_.radius_neighbors_graph(X)\r\n\r\n\r\n@ignore_warnings(category=EfficiencyWarning)\r\ndef check_precomputed(make_train_test, estimators):\r\n \"\"\"Tests unsupervised NearestNeighbors with a distance matrix.\"\"\"\r\n # Note: smaller samples may result in spurious test success\r\n rng = np.random.RandomState(42)\r\n X = rng.random_sample((10, 4))\r\n Y = rng.random_sample((3, 4))\r\n DXX, DYX = make_train_test(X, Y)\r\n for method in [\r\n \"kneighbors\",\r\n ]:\r\n # TODO: also test radius_neighbors, but requires different assertion\r\n\r\n # As a feature matrix (n_samples by n_features)\r\n nbrs_X = neighbors.NearestNeighbors(n_neighbors=3)\r\n nbrs_X.fit(X)\r\n dist_X, ind_X = getattr(nbrs_X, method)(Y)\r\n\r\n # As a dense distance matrix (n_samples by n_samples)\r\n nbrs_D = neighbors.NearestNeighbors(\r\n n_neighbors=3, algorithm=\"brute\", metric=\"precomputed\"\r\n )\r\n nbrs_D.fit(DXX)\r\n dist_D, ind_D = getattr(nbrs_D, method)(DYX)\r\n assert_array_almost_equal(dist_X, dist_D)\r\n assert_array_almost_equal(ind_X, ind_D)\r\n\r\n # Check auto works too\r\n nbrs_D = neighbors.NearestNeighbors(\r\n n_neighbors=3, algorithm=\"auto\", metric=\"precomputed\"\r\n )\r\n nbrs_D.fit(DXX)\r\n dist_D, ind_D = getattr(nbrs_D, method)(DYX)\r\n assert_array_almost_equal(dist_X, dist_D)\r\n assert_array_almost_equal(ind_X, ind_D)\r\n\r\n # Check X=None in prediction\r\n dist_X, ind_X = getattr(nbrs_X, method)(None)\r\n dist_D, ind_D = getattr(nbrs_D, method)(None)\r\n assert_array_almost_equal(dist_X, dist_D)\r\n assert_array_almost_equal(ind_X, ind_D)\r\n\r\n # Must raise a ValueError if the matrix is not of correct shape\r\n with pytest.raises(ValueError):\r\n getattr(nbrs_D, method)(X)\r\n\r\n target = np.arange(X.shape[0])\r\n for Est in estimators:\r\n est = Est(metric=\"euclidean\")\r\n est.radius = est.n_neighbors = 1\r\n pred_X = est.fit(X, target).predict(Y)\r\n est.metric = \"precomputed\"\r\n pred_D = est.fit(DXX, target).predict(DYX)\r\n assert_array_almost_equal(pred_X, pred_D)\r\n\r\n\r\ndef test_precomputed_dense():\r\n def make_train_test(X_train, X_test):\r\n return (\r\n metrics.pairwise_distances(X_train),\r\n metrics.pairwise_distances(X_test, X_train),\r\n )\r\n\r\n estimators = [\r\n neighbors.KNeighborsClassifier,\r\n neighbors.KNeighborsRegressor,\r\n neighbors.RadiusNeighborsClassifier,\r\n neighbors.RadiusNeighborsRegressor,\r\n ]\r\n check_precomputed(make_train_test, estimators)\r\n\r\n\r\[email protected](\"fmt\", [\"csr\", \"lil\"])\r\ndef test_precomputed_sparse_knn(fmt):\r\n def make_train_test(X_train, X_test):\r\n nn = neighbors.NearestNeighbors(n_neighbors=3 + 1).fit(X_train)\r\n return (\r\n nn.kneighbors_graph(X_train, mode=\"distance\").asformat(fmt),\r\n nn.kneighbors_graph(X_test, mode=\"distance\").asformat(fmt),\r\n )\r\n\r\n # We do not test RadiusNeighborsClassifier and RadiusNeighborsRegressor\r\n # since the precomputed neighbors graph is built with k neighbors only.\r\n estimators = [\r\n neighbors.KNeighborsClassifier,\r\n neighbors.KNeighborsRegressor,\r\n ]\r\n check_precomputed(make_train_test, estimators)\r\n\r\n\r\[email protected](\"fmt\", [\"csr\", \"lil\"])\r\ndef test_precomputed_sparse_radius(fmt):\r\n def make_train_test(X_train, X_test):\r\n nn = neighbors.NearestNeighbors(radius=1).fit(X_train)\r\n return (\r\n nn.radius_neighbors_graph(X_train, mode=\"distance\").asformat(fmt),\r\n nn.radius_neighbors_graph(X_test, mode=\"distance\").asformat(fmt),\r\n )\r\n\r\n # We do not test KNeighborsClassifier and KNeighborsRegressor\r\n # since the precomputed neighbors graph is built with a radius.\r\n estimators = [\r\n neighbors.RadiusNeighborsClassifier,\r\n neighbors.RadiusNeighborsRegressor,\r\n ]\r\n check_precomputed(make_train_test, estimators)\r\n\r\n\r\ndef test_is_sorted_by_data():\r\n # Test that _is_sorted_by_data works as expected. In CSR sparse matrix,\r\n # entries in each row can be sorted by indices, by data, or unsorted.\r\n # _is_sorted_by_data should return True when entries are sorted by data,\r\n # and False in all other cases.\r\n\r\n # Test with sorted 1D array\r\n X = csr_matrix(np.arange(10))\r\n assert _is_sorted_by_data(X)\r\n # Test with unsorted 1D array\r\n X[0, 2] = 5\r\n assert not _is_sorted_by_data(X)\r\n\r\n # Test when the data is sorted in each sample, but not necessarily\r\n # between samples\r\n X = csr_matrix([[0, 1, 2], [3, 0, 0], [3, 4, 0], [1, 0, 2]])\r\n assert _is_sorted_by_data(X)\r\n\r\n # Test with duplicates entries in X.indptr\r\n data, indices, indptr = [0, 4, 2, 2], [0, 1, 1, 1], [0, 2, 2, 4]\r\n X = csr_matrix((data, indices, indptr), shape=(3, 3))\r\n assert _is_sorted_by_data(X)\r\n\r\n\r\n@ignore_warnings(category=EfficiencyWarning)\r\ndef test_check_precomputed():\r\n # Test that _check_precomputed returns a graph sorted by data\r\n X = csr_matrix(np.abs(np.random.RandomState(42).randn(10, 10)))\r\n assert not _is_sorted_by_data(X)\r\n Xt = _check_precomputed(X)\r\n assert _is_sorted_by_data(Xt)\r\n\r\n # est with a different number of nonzero entries for each sample\r\n mask = np.random.RandomState(42).randint(2, size=(10, 10))\r\n X = X.toarray()\r\n X[mask == 1] = 0\r\n X = csr_matrix(X)\r\n assert not _is_sorted_by_data(X)\r\n Xt = _check_precomputed(X)\r\n assert _is_sorted_by_data(Xt)\r\n\r\n\r\n@ignore_warnings(category=EfficiencyWarning)\r\ndef test_precomputed_sparse_invalid():\r\n dist = np.array([[0.0, 2.0, 1.0], [2.0, 0.0, 3.0], [1.0, 3.0, 0.0]])\r\n dist_csr = csr_matrix(dist)\r\n neigh = neighbors.NearestNeighbors(n_neighbors=1, metric=\"precomputed\")\r\n neigh.fit(dist_csr)\r\n neigh.kneighbors(None, n_neighbors=1)\r\n neigh.kneighbors(np.array([[0.0, 0.0, 0.0]]), n_neighbors=2)\r\n\r\n # Ensures enough number of nearest neighbors\r\n dist = np.array([[0.0, 2.0, 0.0], [2.0, 0.0, 3.0], [0.0, 3.0, 0.0]])\r\n dist_csr = csr_matrix(dist)\r\n neigh.fit(dist_csr)\r\n msg = \"2 neighbors per samples are required, but some samples have only 1\"\r\n with pytest.raises(ValueError, match=msg):\r\n neigh.kneighbors(None, n_neighbors=1)\r\n\r\n # Checks error with inconsistent distance matrix\r\n dist = np.array([[5.0, 2.0, 1.0], [-2.0, 0.0, 3.0], [1.0, 3.0, 0.0]])\r\n dist_csr = csr_matrix(dist)\r\n msg = \"Negative values in data passed to precomputed distance matrix.\"\r\n with pytest.raises(ValueError, match=msg):\r\n neigh.kneighbors(dist_csr, n_neighbors=1)\r\n\r\n\r\ndef test_precomputed_cross_validation():\r\n # Ensure array is split correctly\r\n rng = np.random.RandomState(0)\r\n X = rng.rand(20, 2)\r\n D = pairwise_distances(X, metric=\"euclidean\")\r\n y = rng.randint(3, size=20)\r\n for Est in (\r\n neighbors.KNeighborsClassifier,\r\n neighbors.RadiusNeighborsClassifier,\r\n neighbors.KNeighborsRegressor,\r\n neighbors.RadiusNeighborsRegressor,\r\n ):\r\n metric_score = cross_val_score(Est(), X, y)\r\n precomp_score = cross_val_score(Est(metric=\"precomputed\"), D, y)\r\n assert_array_equal(metric_score, precomp_score)\r\n\r\n\r\ndef test_unsupervised_radius_neighbors(\r\n n_samples=20, n_features=5, n_query_pts=2, radius=0.5, random_state=0\r\n):\r\n # Test unsupervised radius-based query\r\n rng = np.random.RandomState(random_state)\r\n\r\n X = rng.rand(n_samples, n_features)\r\n\r\n test = rng.rand(n_query_pts, n_features)\r\n\r\n for p in P:\r\n results = []\r\n\r\n for algorithm in ALGORITHMS:\r\n neigh = neighbors.NearestNeighbors(radius=radius, algorithm=algorithm, p=p)\r\n neigh.fit(X)\r\n\r\n ind1 = neigh.radius_neighbors(test, return_distance=False)\r\n\r\n # sort the results: this is not done automatically for\r\n # radius searches\r\n dist, ind = neigh.radius_neighbors(test, return_distance=True)\r\n for (d, i, i1) in zip(dist, ind, ind1):\r\n j = d.argsort()\r\n d[:] = d[j]\r\n i[:] = i[j]\r\n i1[:] = i1[j]\r\n results.append((dist, ind))\r\n\r\n assert_array_almost_equal(\r\n np.concatenate(list(ind)), np.concatenate(list(ind1))\r\n )\r\n\r\n for i in range(len(results) - 1):\r\n assert_array_almost_equal(\r\n np.concatenate(list(results[i][0])),\r\n np.concatenate(list(results[i + 1][0])),\r\n ),\r\n assert_array_almost_equal(\r\n np.concatenate(list(results[i][1])),\r\n np.concatenate(list(results[i + 1][1])),\r\n )\r\n\r\n\r\ndef test_kneighbors_classifier(\r\n n_samples=40, n_features=5, n_test_pts=10, n_neighbors=5, random_state=0\r\n):\r\n # Test k-neighbors classification\r\n rng = np.random.RandomState(random_state)\r\n X = 2 * rng.rand(n_samples, n_features) - 1\r\n y = ((X ** 2).sum(axis=1) < 0.5).astype(int)\r\n y_str = y.astype(str)\r\n\r\n weight_func = _weight_func\r\n\r\n for algorithm in ALGORITHMS:\r\n for weights in [\"uniform\", \"distance\", weight_func]:\r\n knn = neighbors.KNeighborsClassifier(\r\n n_neighbors=n_neighbors, weights=weights, algorithm=algorithm\r\n )\r\n knn.fit(X, y)\r\n epsilon = 1e-5 * (2 * rng.rand(1, n_features) - 1)\r\n y_pred = knn.predict(X[:n_test_pts] + epsilon)\r\n assert_array_equal(y_pred, y[:n_test_pts])\r\n # Test prediction with y_str\r\n knn.fit(X, y_str)\r\n y_pred = knn.predict(X[:n_test_pts] + epsilon)\r\n assert_array_equal(y_pred, y_str[:n_test_pts])\r\n\r\n\r\ndef test_kneighbors_classifier_float_labels(\r\n n_samples=40, n_features=5, n_test_pts=10, n_neighbors=5, random_state=0\r\n):\r\n # Test k-neighbors classification\r\n rng = np.random.RandomState(random_state)\r\n X = 2 * rng.rand(n_samples, n_features) - 1\r\n y = ((X ** 2).sum(axis=1) < 0.5).astype(int)\r\n\r\n knn = neighbors.KNeighborsClassifier(n_neighbors=n_neighbors)\r\n knn.fit(X, y.astype(float))\r\n epsilon = 1e-5 * (2 * rng.rand(1, n_features) - 1)\r\n y_pred = knn.predict(X[:n_test_pts] + epsilon)\r\n assert_array_equal(y_pred, y[:n_test_pts])\r\n\r\n\r\ndef test_kneighbors_classifier_predict_proba():\r\n # Test KNeighborsClassifier.predict_proba() method\r\n X = np.array([[0, 2, 0], [0, 2, 1], [2, 0, 0], [2, 2, 0], [0, 0, 2], [0, 0, 1]])\r\n y = np.array([4, 4, 5, 5, 1, 1])\r\n cls = neighbors.KNeighborsClassifier(n_neighbors=3, p=1) # cityblock dist\r\n cls.fit(X, y)\r\n y_prob = cls.predict_proba(X)\r\n real_prob = np.array(\r\n [\r\n [0, 2.0 / 3, 1.0 / 3],\r\n [1.0 / 3, 2.0 / 3, 0],\r\n [1.0 / 3, 0, 2.0 / 3],\r\n [0, 1.0 / 3, 2.0 / 3],\r\n [2.0 / 3, 1.0 / 3, 0],\r\n [2.0 / 3, 1.0 / 3, 0],\r\n ]\r\n )\r\n assert_array_equal(real_prob, y_prob)\r\n # Check that it also works with non integer labels\r\n cls.fit(X, y.astype(str))\r\n y_prob = cls.predict_proba(X)\r\n assert_array_equal(real_prob, y_prob)\r\n # Check that it works with weights='distance'\r\n cls = neighbors.KNeighborsClassifier(n_neighbors=2, p=1, weights=\"distance\")\r\n cls.fit(X, y)\r\n y_prob = cls.predict_proba(np.array([[0, 2, 0], [2, 2, 2]]))\r\n real_prob = np.array([[0, 1, 0], [0, 0.4, 0.6]])\r\n assert_array_almost_equal(real_prob, y_prob)\r\n\r\n\r\ndef test_radius_neighbors_classifier(\r\n n_samples=40, n_features=5, n_test_pts=10, radius=0.5, random_state=0\r\n):\r\n # Test radius-based classification\r\n rng = np.random.RandomState(random_state)\r\n X = 2 * rng.rand(n_samples, n_features) - 1\r\n y = ((X ** 2).sum(axis=1) < 0.5).astype(int)\r\n y_str = y.astype(str)\r\n\r\n weight_func = _weight_func\r\n\r\n for algorithm in ALGORITHMS:\r\n for weights in [\"uniform\", \"distance\", weight_func]:\r\n neigh = neighbors.RadiusNeighborsClassifier(\r\n radius=radius, weights=weights, algorithm=algorithm\r\n )\r\n neigh.fit(X, y)\r\n epsilon = 1e-5 * (2 * rng.rand(1, n_features) - 1)\r\n y_pred = neigh.predict(X[:n_test_pts] + epsilon)\r\n assert_array_equal(y_pred, y[:n_test_pts])\r\n neigh.fit(X, y_str)\r\n y_pred = neigh.predict(X[:n_test_pts] + epsilon)\r\n assert_array_equal(y_pred, y_str[:n_test_pts])\r\n\r\n\r\n# TODO: Remove in v1.2\r\ndef test_radius_neighbors_classifier_kwargs_is_deprecated():\r\n extra_kwargs = {\r\n \"unused_param\": \"\",\r\n \"extra_param\": None,\r\n }\r\n msg = (\r\n \"Passing additional keyword parameters has no effect and is deprecated \"\r\n \"in 1.0. An error will be raised from 1.2 and beyond. The ignored \"\r\n f\"keyword parameter(s) are: {extra_kwargs.keys()}.\"\r\n )\r\n with pytest.warns(FutureWarning, match=re.escape(msg)):\r\n neighbors.RadiusNeighborsClassifier(**extra_kwargs)\r\n\r\n\r\ndef test_radius_neighbors_classifier_when_no_neighbors():\r\n # Test radius-based classifier when no neighbors found.\r\n # In this case it should rise an informative exception\r\n\r\n X = np.array([[1.0, 1.0], [2.0, 2.0]])\r\n y = np.array([1, 2])\r\n radius = 0.1\r\n\r\n z1 = np.array([[1.01, 1.01], [2.01, 2.01]]) # no outliers\r\n z2 = np.array([[1.01, 1.01], [1.4, 1.4]]) # one outlier\r\n\r\n weight_func = _weight_func\r\n\r\n for outlier_label in [0, -1, None]:\r\n for algorithm in ALGORITHMS:\r\n for weights in [\"uniform\", \"distance\", weight_func]:\r\n rnc = neighbors.RadiusNeighborsClassifier\r\n clf = rnc(\r\n radius=radius,\r\n weights=weights,\r\n algorithm=algorithm,\r\n outlier_label=outlier_label,\r\n )\r\n clf.fit(X, y)\r\n assert_array_equal(np.array([1, 2]), clf.predict(z1))\r\n if outlier_label is None:\r\n with pytest.raises(ValueError):\r\n clf.predict(z2)\r\n\r\n\r\ndef test_radius_neighbors_classifier_outlier_labeling():\r\n # Test radius-based classifier when no neighbors found and outliers\r\n # are labeled.\r\n\r\n X = np.array([[1.0, 1.0], [2.0, 2.0], [0.99, 0.99], [0.98, 0.98], [2.01, 2.01]])\r\n y = np.array([1, 2, 1, 1, 2])\r\n radius = 0.1\r\n\r\n z1 = np.array([[1.01, 1.01], [2.01, 2.01]]) # no outliers\r\n z2 = np.array([[1.4, 1.4], [1.01, 1.01], [2.01, 2.01]]) # one outlier\r\n correct_labels1 = np.array([1, 2])\r\n correct_labels2 = np.array([-1, 1, 2])\r\n outlier_proba = np.array([0, 0])\r\n\r\n weight_func = _weight_func\r\n\r\n for algorithm in ALGORITHMS:\r\n for weights in [\"uniform\", \"distance\", weight_func]:\r\n clf = neighbors.RadiusNeighborsClassifier(\r\n radius=radius, weights=weights, algorithm=algorithm, outlier_label=-1\r\n )\r\n clf.fit(X, y)\r\n assert_array_equal(correct_labels1, clf.predict(z1))\r\n assert_array_equal(correct_labels2, clf.predict(z2))\r\n assert_array_equal(outlier_proba, clf.predict_proba(z2)[0])\r\n\r\n # test outlier_labeling of using predict_proba()\r\n RNC = neighbors.RadiusNeighborsClassifier\r\n X = np.array([[0], [1], [2], [3], [4], [5], [6], [7], [8], [9]])\r\n y = np.array([0, 2, 2, 1, 1, 1, 3, 3, 3, 3])\r\n\r\n # test outlier_label scalar verification\r\n def check_array_exception():\r\n clf = RNC(radius=1, outlier_label=[[5]])\r\n clf.fit(X, y)\r\n\r\n with pytest.raises(TypeError):\r\n check_array_exception()\r\n\r\n # test invalid outlier_label dtype\r\n def check_dtype_exception():\r\n clf = RNC(radius=1, outlier_label=\"a\")\r\n clf.fit(X, y)\r\n\r\n with pytest.raises(TypeError):\r\n check_dtype_exception()\r\n\r\n # test most frequent\r\n clf = RNC(radius=1, outlier_label=\"most_frequent\")\r\n clf.fit(X, y)\r\n proba = clf.predict_proba([[1], [15]])\r\n assert_array_equal(proba[1, :], [0, 0, 0, 1])\r\n\r\n # test manual label in y\r\n clf = RNC(radius=1, outlier_label=1)\r\n clf.fit(X, y)\r\n proba = clf.predict_proba([[1], [15]])\r\n assert_array_equal(proba[1, :], [0, 1, 0, 0])\r\n pred = clf.predict([[1], [15]])\r\n assert_array_equal(pred, [2, 1])\r\n\r\n # test manual label out of y warning\r\n def check_warning():\r\n clf = RNC(radius=1, outlier_label=4)\r\n clf.fit(X, y)\r\n clf.predict_proba([[1], [15]])\r\n\r\n with pytest.warns(UserWarning):\r\n check_warning()\r\n\r\n # test multi output same outlier label\r\n y_multi = [\r\n [0, 1],\r\n [2, 1],\r\n [2, 2],\r\n [1, 2],\r\n [1, 2],\r\n [1, 3],\r\n [3, 3],\r\n [3, 3],\r\n [3, 0],\r\n [3, 0],\r\n ]\r\n clf = RNC(radius=1, outlier_label=1)\r\n clf.fit(X, y_multi)\r\n proba = clf.predict_proba([[7], [15]])\r\n assert_array_equal(proba[1][1, :], [0, 1, 0, 0])\r\n pred = clf.predict([[7], [15]])\r\n assert_array_equal(pred[1, :], [1, 1])\r\n\r\n # test multi output different outlier label\r\n y_multi = [\r\n [0, 0],\r\n [2, 2],\r\n [2, 2],\r\n [1, 1],\r\n [1, 1],\r\n [1, 1],\r\n [3, 3],\r\n [3, 3],\r\n [3, 3],\r\n [3, 3],\r\n ]\r\n clf = RNC(radius=1, outlier_label=[0, 1])\r\n clf.fit(X, y_multi)\r\n proba = clf.predict_proba([[7], [15]])\r\n assert_array_equal(proba[0][1, :], [1, 0, 0, 0])\r\n assert_array_equal(proba[1][1, :], [0, 1, 0, 0])\r\n pred = clf.predict([[7], [15]])\r\n assert_array_equal(pred[1, :], [0, 1])\r\n\r\n # test inconsistent outlier label list length\r\n def check_exception():\r\n clf = RNC(radius=1, outlier_label=[0, 1, 2])\r\n clf.fit(X, y_multi)\r\n\r\n with pytest.raises(ValueError):\r\n check_exception()\r\n\r\n\r\ndef test_radius_neighbors_classifier_zero_distance():\r\n # Test radius-based classifier, when distance to a sample is zero.\r\n\r\n X = np.array([[1.0, 1.0], [2.0, 2.0]])\r\n y = np.array([1, 2])\r\n radius = 0.1\r\n\r\n z1 = np.array([[1.01, 1.01], [2.0, 2.0]])\r\n correct_labels1 = np.array([1, 2])\r\n\r\n weight_func = _weight_func\r\n\r\n for algorithm in ALGORITHMS:\r\n for weights in [\"uniform\", \"distance\", weight_func]:\r\n clf = neighbors.RadiusNeighborsClassifier(\r\n radius=radius, weights=weights, algorithm=algorithm\r\n )\r\n clf.fit(X, y)\r\n with np.errstate(invalid=\"ignore\"):\r\n # Ignore the warning raised in _weight_func when making\r\n # predictions with null distances resulting in np.inf values.\r\n assert_array_equal(correct_labels1, clf.predict(z1))\r\n\r\n\r\ndef test_neighbors_regressors_zero_distance():\r\n # Test radius-based regressor, when distance to a sample is zero.\r\n\r\n X = np.array([[1.0, 1.0], [1.0, 1.0], [2.0, 2.0], [2.5, 2.5]])\r\n y = np.array([1.0, 1.5, 2.0, 0.0])\r\n radius = 0.2\r\n z = np.array([[1.1, 1.1], [2.0, 2.0]])\r\n\r\n rnn_correct_labels = np.array([1.25, 2.0])\r\n\r\n knn_correct_unif = np.array([1.25, 1.0])\r\n knn_correct_dist = np.array([1.25, 2.0])\r\n\r\n for algorithm in ALGORITHMS:\r\n # we don't test for weights=_weight_func since user will be expected\r\n # to handle zero distances themselves in the function.\r\n for weights in [\"uniform\", \"distance\"]:\r\n rnn = neighbors.RadiusNeighborsRegressor(\r\n radius=radius, weights=weights, algorithm=algorithm\r\n )\r\n rnn.fit(X, y)\r\n assert_array_almost_equal(rnn_correct_labels, rnn.predict(z))\r\n\r\n for weights, corr_labels in zip(\r\n [\"uniform\", \"distance\"], [knn_correct_unif, knn_correct_dist]\r\n ):\r\n knn = neighbors.KNeighborsRegressor(\r\n n_neighbors=2, weights=weights, algorithm=algorithm\r\n )\r\n knn.fit(X, y)\r\n assert_array_almost_equal(corr_labels, knn.predict(z))\r\n\r\n\r\ndef test_radius_neighbors_boundary_handling():\r\n \"\"\"Test whether points lying on boundary are handled consistently\r\n\r\n Also ensures that even with only one query point, an object array\r\n is returned rather than a 2d array.\r\n \"\"\"\r\n\r\n X = np.array([[1.5], [3.0], [3.01]])\r\n radius = 3.0\r\n\r\n for algorithm in ALGORITHMS:\r\n nbrs = neighbors.NearestNeighbors(radius=radius, algorithm=algorithm).fit(X)\r\n results = nbrs.radius_neighbors([[0.0]], return_distance=False)\r\n assert results.shape == (1,)\r\n assert results.dtype == object\r\n assert_array_equal(results[0], [0, 1])\r\n\r\n\r\ndef test_radius_neighbors_returns_array_of_objects():\r\n # check that we can pass precomputed distances to\r\n # NearestNeighbors.radius_neighbors()\r\n # non-regression test for\r\n # https://github.com/scikit-learn/scikit-learn/issues/16036\r\n X = csr_matrix(np.ones((4, 4)))\r\n X.setdiag([0, 0, 0, 0])\r\n\r\n nbrs = neighbors.NearestNeighbors(\r\n radius=0.5, algorithm=\"auto\", leaf_size=30, metric=\"precomputed\"\r\n ).fit(X)\r\n neigh_dist, neigh_ind = nbrs.radius_neighbors(X, return_distance=True)\r\n\r\n expected_dist = np.empty(X.shape[0], dtype=object)\r\n expected_dist[:] = [np.array([0]), np.array([0]), np.array([0]), np.array([0])]\r\n expected_ind = np.empty(X.shape[0], dtype=object)\r\n expected_ind[:] = [np.array([0]), np.array([1]), np.array([2]), np.array([3])]\r\n\r\n assert_array_equal(neigh_dist, expected_dist)\r\n assert_array_equal(neigh_ind, expected_ind)\r\n\r\n\r\[email protected](\"algorithm\", [\"ball_tree\", \"kd_tree\", \"brute\"])\r\ndef test_query_equidistant_kth_nn(algorithm):\r\n # For several candidates for the k-th nearest neighbor position,\r\n # the first candidate should be chosen\r\n query_point = np.array([[0, 0]])\r\n equidistant_points = np.array([[1, 0], [0, 1], [-1, 0], [0, -1]])\r\n # The 3rd and 4th points should not replace the 2nd point\r\n # for the 2th nearest neighbor position\r\n k = 2\r\n knn_indices = np.array([[0, 1]])\r\n nn = neighbors.NearestNeighbors(algorithm=algorithm).fit(equidistant_points)\r\n indices = np.sort(nn.kneighbors(query_point, n_neighbors=k, return_distance=False))\r\n assert_array_equal(indices, knn_indices)\r\n\r\n\r\[email protected](\r\n [\"algorithm\", \"metric\"],\r\n [\r\n (\"ball_tree\", \"euclidean\"),\r\n (\"kd_tree\", \"euclidean\"),\r\n (\"brute\", \"euclidean\"),\r\n (\"brute\", \"precomputed\"),\r\n ],\r\n)\r\ndef test_radius_neighbors_sort_results(algorithm, metric):\r\n # Test radius_neighbors[_graph] output when sort_result is True\r\n n_samples = 10\r\n rng = np.random.RandomState(42)\r\n X = rng.random_sample((n_samples, 4))\r\n\r\n if metric == \"precomputed\":\r\n X = neighbors.radius_neighbors_graph(X, radius=np.inf, mode=\"distance\")\r\n model = neighbors.NearestNeighbors(algorithm=algorithm, metric=metric)\r\n model.fit(X)\r\n\r\n # self.radius_neighbors\r\n distances, indices = model.radius_neighbors(X=X, radius=np.inf, sort_results=True)\r\n for ii in range(n_samples):\r\n assert_array_equal(distances[ii], np.sort(distances[ii]))\r\n\r\n # sort_results=True and return_distance=False\r\n if metric != \"precomputed\": # no need to raise with precomputed graph\r\n with pytest.raises(ValueError, match=\"return_distance must be True\"):\r\n model.radius_neighbors(\r\n X=X, radius=np.inf, sort_results=True, return_distance=False\r\n )\r\n\r\n # self.radius_neighbors_graph\r\n graph = model.radius_neighbors_graph(\r\n X=X, radius=np.inf, mode=\"distance\", sort_results=True\r\n )\r\n assert _is_sorted_by_data(graph)\r\n\r\n\r\ndef test_RadiusNeighborsClassifier_multioutput():\r\n # Test k-NN classifier on multioutput data\r\n rng = check_random_state(0)\r\n n_features = 2\r\n n_samples = 40\r\n n_output = 3\r\n\r\n X = rng.rand(n_samples, n_features)\r\n y = rng.randint(0, 3, (n_samples, n_output))\r\n\r\n X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)\r\n\r\n weights = [None, \"uniform\", \"distance\", _weight_func]\r\n\r\n for algorithm, weights in product(ALGORITHMS, weights):\r\n # Stack single output prediction\r\n y_pred_so = []\r\n for o in range(n_output):\r\n rnn = neighbors.RadiusNeighborsClassifier(\r\n weights=weights, algorithm=algorithm\r\n )\r\n rnn.fit(X_train, y_train[:, o])\r\n y_pred_so.append(rnn.predict(X_test))\r\n\r\n y_pred_so = np.vstack(y_pred_so).T\r\n assert y_pred_so.shape == y_test.shape\r\n\r\n # Multioutput prediction\r\n rnn_mo = neighbors.RadiusNeighborsClassifier(\r\n weights=weights, algorithm=algorithm\r\n )\r\n rnn_mo.fit(X_train, y_train)\r\n y_pred_mo = rnn_mo.predict(X_test)\r\n\r\n assert y_pred_mo.shape == y_test.shape\r\n assert_array_almost_equal(y_pred_mo, y_pred_so)\r\n\r\n\r\ndef test_kneighbors_classifier_sparse(\r\n n_samples=40, n_features=5, n_test_pts=10, n_neighbors=5, random_state=0\r\n):\r\n # Test k-NN classifier on sparse matrices\r\n # Like the above, but with various types of sparse matrices\r\n rng = np.random.RandomState(random_state)\r\n X = 2 * rng.rand(n_samples, n_features) - 1\r\n X *= X > 0.2\r\n y = ((X ** 2).sum(axis=1) < 0.5).astype(int)\r\n\r\n for sparsemat in SPARSE_TYPES:\r\n knn = neighbors.KNeighborsClassifier(n_neighbors=n_neighbors, algorithm=\"auto\")\r\n knn.fit(sparsemat(X), y)\r\n epsilon = 1e-5 * (2 * rng.rand(1, n_features) - 1)\r\n for sparsev in SPARSE_TYPES + (np.asarray,):\r\n X_eps = sparsev(X[:n_test_pts] + epsilon)\r\n y_pred = knn.predict(X_eps)\r\n assert_array_equal(y_pred, y[:n_test_pts])\r\n\r\n\r\ndef test_KNeighborsClassifier_multioutput():\r\n # Test k-NN classifier on multioutput data\r\n rng = check_random_state(0)\r\n n_features = 5\r\n n_samples = 50\r\n n_output = 3\r\n\r\n X = rng.rand(n_samples, n_features)\r\n y = rng.randint(0, 3, (n_samples, n_output))\r\n\r\n X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)\r\n\r\n weights = [None, \"uniform\", \"distance\", _weight_func]\r\n\r\n for algorithm, weights in product(ALGORITHMS, weights):\r\n # Stack single output prediction\r\n y_pred_so = []\r\n y_pred_proba_so = []\r\n for o in range(n_output):\r\n knn = neighbors.KNeighborsClassifier(weights=weights, algorithm=algorithm)\r\n knn.fit(X_train, y_train[:, o])\r\n y_pred_so.append(knn.predict(X_test))\r\n y_pred_proba_so.append(knn.predict_proba(X_test))\r\n\r\n y_pred_so = np.vstack(y_pred_so).T\r\n assert y_pred_so.shape == y_test.shape\r\n assert len(y_pred_proba_so) == n_output\r\n\r\n # Multioutput prediction\r\n knn_mo = neighbors.KNeighborsClassifier(weights=weights, algorithm=algorithm)\r\n knn_mo.fit(X_train, y_train)\r\n y_pred_mo = knn_mo.predict(X_test)\r\n\r\n assert y_pred_mo.shape == y_test.shape\r\n assert_array_almost_equal(y_pred_mo, y_pred_so)\r\n\r\n # Check proba\r\n y_pred_proba_mo = knn_mo.predict_proba(X_test)\r\n assert len(y_pred_proba_mo) == n_output\r\n\r\n for proba_mo, proba_so in zip(y_pred_proba_mo, y_pred_proba_so):\r\n assert_array_almost_equal(proba_mo, proba_so)\r\n\r\n\r\ndef test_kneighbors_regressor(\r\n n_samples=40, n_features=5, n_test_pts=10, n_neighbors=3, random_state=0\r\n):\r\n # Test k-neighbors regression\r\n rng = np.random.RandomState(random_state)\r\n X = 2 * rng.rand(n_samples, n_features) - 1\r\n y = np.sqrt((X ** 2).sum(1))\r\n y /= y.max()\r\n\r\n y_target = y[:n_test_pts]\r\n\r\n weight_func = _weight_func\r\n\r\n for algorithm in ALGORITHMS:\r\n for weights in [\"uniform\", \"distance\", weight_func]:\r\n knn = neighbors.KNeighborsRegressor(\r\n n_neighbors=n_neighbors, weights=weights, algorithm=algorithm\r\n )\r\n knn.fit(X, y)\r\n epsilon = 1e-5 * (2 * rng.rand(1, n_features) - 1)\r\n y_pred = knn.predict(X[:n_test_pts] + epsilon)\r\n assert np.all(abs(y_pred - y_target) < 0.3)\r\n\r\n\r\ndef test_KNeighborsRegressor_multioutput_uniform_weight():\r\n # Test k-neighbors in multi-output regression with uniform weight\r\n rng = check_random_state(0)\r\n n_features = 5\r\n n_samples = 40\r\n n_output = 4\r\n\r\n X = rng.rand(n_samples, n_features)\r\n y = rng.rand(n_samples, n_output)\r\n\r\n X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)\r\n for algorithm, weights in product(ALGORITHMS, [None, \"uniform\"]):\r\n knn = neighbors.KNeighborsRegressor(weights=weights, algorithm=algorithm)\r\n knn.fit(X_train, y_train)\r\n\r\n neigh_idx = knn.kneighbors(X_test, return_distance=False)\r\n y_pred_idx = np.array([np.mean(y_train[idx], axis=0) for idx in neigh_idx])\r\n\r\n y_pred = knn.predict(X_test)\r\n\r\n assert y_pred.shape == y_test.shape\r\n assert y_pred_idx.shape == y_test.shape\r\n assert_array_almost_equal(y_pred, y_pred_idx)\r\n\r\n\r\ndef test_kneighbors_regressor_multioutput(\r\n n_samples=40, n_features=5, n_test_pts=10, n_neighbors=3, random_state=0\r\n):\r\n # Test k-neighbors in multi-output regression\r\n rng = np.random.RandomState(random_state)\r\n X = 2 * rng.rand(n_samples, n_features) - 1\r\n y = np.sqrt((X ** 2).sum(1))\r\n y /= y.max()\r\n y = np.vstack([y, y]).T\r\n\r\n y_target = y[:n_test_pts]\r\n\r\n weights = [\"uniform\", \"distance\", _weight_func]\r\n for algorithm, weights in product(ALGORITHMS, weights):\r\n knn = neighbors.KNeighborsRegressor(\r\n n_neighbors=n_neighbors, weights=weights, algorithm=algorithm\r\n )\r\n knn.fit(X, y)\r\n epsilon = 1e-5 * (2 * rng.rand(1, n_features) - 1)\r\n y_pred = knn.predict(X[:n_test_pts] + epsilon)\r\n assert y_pred.shape == y_target.shape\r\n\r\n assert np.all(np.abs(y_pred - y_target) < 0.3)\r\n\r\n\r\ndef test_radius_neighbors_regressor(\r\n n_samples=40, n_features=3, n_test_pts=10, radius=0.5, random_state=0\r\n):\r\n # Test radius-based neighbors regression\r\n rng = np.random.RandomState(random_state)\r\n X = 2 * rng.rand(n_samples, n_features) - 1\r\n y = np.sqrt((X ** 2).sum(1))\r\n y /= y.max()\r\n\r\n y_target = y[:n_test_pts]\r\n\r\n weight_func = _weight_func\r\n\r\n for algorithm in ALGORITHMS:\r\n for weights in [\"uniform\", \"distance\", weight_func]:\r\n neigh = neighbors.RadiusNeighborsRegressor(\r\n radius=radius, weights=weights, algorithm=algorithm\r\n )\r\n neigh.fit(X, y)\r\n epsilon = 1e-5 * (2 * rng.rand(1, n_features) - 1)\r\n y_pred = neigh.predict(X[:n_test_pts] + epsilon)\r\n assert np.all(abs(y_pred - y_target) < radius / 2)\r\n\r\n # test that nan is returned when no nearby observations\r\n for weights in [\"uniform\", \"distance\"]:\r\n neigh = neighbors.RadiusNeighborsRegressor(\r\n radius=radius, weights=weights, algorithm=\"auto\"\r\n )\r\n neigh.fit(X, y)\r\n X_test_nan = np.full((1, n_features), -1.0)\r\n empty_warning_msg = (\r\n \"One or more samples have no neighbors \"\r\n \"within specified radius; predicting NaN.\"\r\n )\r\n with pytest.warns(UserWarning, match=re.escape(empty_warning_msg)):\r\n pred = neigh.predict(X_test_nan)\r\n assert np.all(np.isnan(pred))\r\n\r\n\r\ndef test_RadiusNeighborsRegressor_multioutput_with_uniform_weight():\r\n # Test radius neighbors in multi-output regression (uniform weight)\r\n\r\n rng = check_random_state(0)\r\n n_features = 5\r\n n_samples = 40\r\n n_output = 4\r\n\r\n X = rng.rand(n_samples, n_features)\r\n y = rng.rand(n_samples, n_output)\r\n X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)\r\n\r\n for algorithm, weights in product(ALGORITHMS, [None, \"uniform\"]):\r\n\r\n rnn = neighbors.RadiusNeighborsRegressor(weights=weights, algorithm=algorithm)\r\n rnn.fit(X_train, y_train)\r\n\r\n neigh_idx = rnn.radius_neighbors(X_test, return_distance=False)\r\n y_pred_idx = np.array([np.mean(y_train[idx], axis=0) for idx in neigh_idx])\r\n\r\n y_pred_idx = np.array(y_pred_idx)\r\n y_pred = rnn.predict(X_test)\r\n\r\n assert y_pred_idx.shape == y_test.shape\r\n assert y_pred.shape == y_test.shape\r\n assert_array_almost_equal(y_pred, y_pred_idx)\r\n\r\n\r\ndef test_RadiusNeighborsRegressor_multioutput(\r\n n_samples=40, n_features=5, n_test_pts=10, random_state=0\r\n):\r\n # Test k-neighbors in multi-output regression with various weight\r\n rng = np.random.RandomState(random_state)\r\n X = 2 * rng.rand(n_samples, n_features) - 1\r\n y = np.sqrt((X ** 2).sum(1))\r\n y /= y.max()\r\n y = np.vstack([y, y]).T\r\n\r\n y_target = y[:n_test_pts]\r\n weights = [\"uniform\", \"distance\", _weight_func]\r\n\r\n for algorithm, weights in product(ALGORITHMS, weights):\r\n rnn = neighbors.RadiusNeighborsRegressor(weights=weights, algorithm=algorithm)\r\n rnn.fit(X, y)\r\n epsilon = 1e-5 * (2 * rng.rand(1, n_features) - 1)\r\n y_pred = rnn.predict(X[:n_test_pts] + epsilon)\r\n\r\n assert y_pred.shape == y_target.shape\r\n assert np.all(np.abs(y_pred - y_target) < 0.3)\r\n\r\n\r\n@ignore_warnings(category=EfficiencyWarning)\r\ndef test_kneighbors_regressor_sparse(\r\n n_samples=40, n_features=5, n_test_pts=10, n_neighbors=5, random_state=0\r\n):\r\n # Test radius-based regression on sparse matrices\r\n # Like the above, but with various types of sparse matrices\r\n rng = np.random.RandomState(random_state)\r\n X = 2 * rng.rand(n_samples, n_features) - 1\r\n y = ((X ** 2).sum(axis=1) < 0.25).astype(int)\r\n\r\n for sparsemat in SPARSE_TYPES:\r\n knn = neighbors.KNeighborsRegressor(n_neighbors=n_neighbors, algorithm=\"auto\")\r\n knn.fit(sparsemat(X), y)\r\n\r\n knn_pre = neighbors.KNeighborsRegressor(\r\n n_neighbors=n_neighbors, metric=\"precomputed\"\r\n )\r\n knn_pre.fit(pairwise_distances(X, metric=\"euclidean\"), y)\r\n\r\n for sparsev in SPARSE_OR_DENSE:\r\n X2 = sparsev(X)\r\n assert np.mean(knn.predict(X2).round() == y) > 0.95\r\n\r\n X2_pre = sparsev(pairwise_distances(X, metric=\"euclidean\"))\r\n if sparsev in {dok_matrix, bsr_matrix}:\r\n msg = \"not supported due to its handling of explicit zeros\"\r\n with pytest.raises(TypeError, match=msg):\r\n knn_pre.predict(X2_pre)\r\n else:\r\n assert np.mean(knn_pre.predict(X2_pre).round() == y) > 0.95\r\n\r\n\r\ndef test_neighbors_iris():\r\n # Sanity checks on the iris dataset\r\n # Puts three points of each label in the plane and performs a\r\n # nearest neighbor query on points near the decision boundary.\r\n\r\n for algorithm in ALGORITHMS:\r\n clf = neighbors.KNeighborsClassifier(n_neighbors=1, algorithm=algorithm)\r\n clf.fit(iris.data, iris.target)\r\n assert_array_equal(clf.predict(iris.data), iris.target)\r\n\r\n clf.set_params(n_neighbors=9, algorithm=algorithm)\r\n clf.fit(iris.data, iris.target)\r\n assert np.mean(clf.predict(iris.data) == iris.target) > 0.95\r\n\r\n rgs = neighbors.KNeighborsRegressor(n_neighbors=5, algorithm=algorithm)\r\n rgs.fit(iris.data, iris.target)\r\n assert np.mean(rgs.predict(iris.data).round() == iris.target) > 0.95\r\n\r\n\r\ndef test_neighbors_digits():\r\n # Sanity check on the digits dataset\r\n # the 'brute' algorithm has been observed to fail if the input\r\n # dtype is uint8 due to overflow in distance calculations.\r\n\r\n X = digits.data.astype(\"uint8\")\r\n Y = digits.target\r\n (n_samples, n_features) = X.shape\r\n train_test_boundary = int(n_samples * 0.8)\r\n train = np.arange(0, train_test_boundary)\r\n test = np.arange(train_test_boundary, n_samples)\r\n (X_train, Y_train, X_test, Y_test) = X[train], Y[train], X[test], Y[test]\r\n\r\n clf = neighbors.KNeighborsClassifier(n_neighbors=1, algorithm=\"brute\")\r\n score_uint8 = clf.fit(X_train, Y_train).score(X_test, Y_test)\r\n score_float = clf.fit(X_train.astype(float, copy=False), Y_train).score(\r\n X_test.astype(float, copy=False), Y_test\r\n )\r\n assert score_uint8 == score_float\r\n\r\n\r\ndef test_kneighbors_graph():\r\n # Test kneighbors_graph to build the k-Nearest Neighbor graph.\r\n X = np.array([[0, 1], [1.01, 1.0], [2, 0]])\r\n\r\n # n_neighbors = 1\r\n A = neighbors.kneighbors_graph(X, 1, mode=\"connectivity\", include_self=True)\r\n assert_array_equal(A.toarray(), np.eye(A.shape[0]))\r\n\r\n A = neighbors.kneighbors_graph(X, 1, mode=\"distance\")\r\n assert_array_almost_equal(\r\n A.toarray(), [[0.00, 1.01, 0.0], [1.01, 0.0, 0.0], [0.00, 1.40716026, 0.0]]\r\n )\r\n\r\n # n_neighbors = 2\r\n A = neighbors.kneighbors_graph(X, 2, mode=\"connectivity\", include_self=True)\r\n assert_array_equal(A.toarray(), [[1.0, 1.0, 0.0], [1.0, 1.0, 0.0], [0.0, 1.0, 1.0]])\r\n\r\n A = neighbors.kneighbors_graph(X, 2, mode=\"distance\")\r\n assert_array_almost_equal(\r\n A.toarray(),\r\n [\r\n [0.0, 1.01, 2.23606798],\r\n [1.01, 0.0, 1.40716026],\r\n [2.23606798, 1.40716026, 0.0],\r\n ],\r\n )\r\n\r\n # n_neighbors = 3\r\n A = neighbors.kneighbors_graph(X, 3, mode=\"connectivity\", include_self=True)\r\n assert_array_almost_equal(A.toarray(), [[1, 1, 1], [1, 1, 1], [1, 1, 1]])\r\n\r\n\r\ndef test_kneighbors_graph_sparse(seed=36):\r\n # Test kneighbors_graph to build the k-Nearest Neighbor graph\r\n # for sparse input.\r\n rng = np.random.RandomState(seed)\r\n X = rng.randn(10, 10)\r\n Xcsr = csr_matrix(X)\r\n\r\n for n_neighbors in [1, 2, 3]:\r\n for mode in [\"connectivity\", \"distance\"]:\r\n assert_array_almost_equal(\r\n neighbors.kneighbors_graph(X, n_neighbors, mode=mode).toarray(),\r\n neighbors.kneighbors_graph(Xcsr, n_neighbors, mode=mode).toarray(),\r\n )\r\n\r\n\r\ndef test_radius_neighbors_graph():\r\n # Test radius_neighbors_graph to build the Nearest Neighbor graph.\r\n X = np.array([[0, 1], [1.01, 1.0], [2, 0]])\r\n\r\n A = neighbors.radius_neighbors_graph(X, 1.5, mode=\"connectivity\", include_self=True)\r\n assert_array_equal(A.toarray(), [[1.0, 1.0, 0.0], [1.0, 1.0, 1.0], [0.0, 1.0, 1.0]])\r\n\r\n A = neighbors.radius_neighbors_graph(X, 1.5, mode=\"distance\")\r\n assert_array_almost_equal(\r\n A.toarray(), [[0.0, 1.01, 0.0], [1.01, 0.0, 1.40716026], [0.0, 1.40716026, 0.0]]\r\n )\r\n\r\n\r\ndef test_radius_neighbors_graph_sparse(seed=36):\r\n # Test radius_neighbors_graph to build the Nearest Neighbor graph\r\n # for sparse input.\r\n rng = np.random.RandomState(seed)\r\n X = rng.randn(10, 10)\r\n Xcsr = csr_matrix(X)\r\n\r\n for n_neighbors in [1, 2, 3]:\r\n for mode in [\"connectivity\", \"distance\"]:\r\n assert_array_almost_equal(\r\n neighbors.radius_neighbors_graph(X, n_neighbors, mode=mode).toarray(),\r\n neighbors.radius_neighbors_graph(\r\n Xcsr, n_neighbors, mode=mode\r\n ).toarray(),\r\n )\r\n\r\n\r\ndef test_neighbors_badargs():\r\n # Test bad argument values: these should all raise ValueErrors\r\n X = rng.random_sample((10, 2))\r\n Xsparse = csr_matrix(X)\r\n X3 = rng.random_sample((10, 3))\r\n y = np.ones(10)\r\n\r\n est = neighbors.NearestNeighbors(algorithm=\"blah\")\r\n with pytest.raises(ValueError):\r\n est.fit(X)\r\n\r\n for cls in (\r\n neighbors.KNeighborsClassifier,\r\n neighbors.RadiusNeighborsClassifier,\r\n neighbors.KNeighborsRegressor,\r\n neighbors.RadiusNeighborsRegressor,\r\n ):\r\n est = cls(weights=\"blah\")\r\n with pytest.raises(ValueError):\r\n est.fit(X, y)\r\n est = cls(p=-1)\r\n with pytest.raises(ValueError):\r\n est.fit(X, y)\r\n est = cls(algorithm=\"blah\")\r\n with pytest.raises(ValueError):\r\n est.fit(X, y)\r\n\r\n nbrs = cls(algorithm=\"ball_tree\", metric=\"haversine\")\r\n with pytest.raises(ValueError):\r\n nbrs.predict(X)\r\n with pytest.raises(ValueError):\r\n ignore_warnings(nbrs.fit(Xsparse, y))\r\n\r\n nbrs = cls(metric=\"haversine\", algorithm=\"brute\")\r\n nbrs.fit(X3, y)\r\n msg = \"Haversine distance only valid in 2 dimensions\"\r\n with pytest.raises(ValueError, match=msg):\r\n nbrs.predict(X3)\r\n\r\n nbrs = cls()\r\n with pytest.raises(ValueError):\r\n nbrs.fit(np.ones((0, 2)), np.ones(0))\r\n with pytest.raises(ValueError):\r\n nbrs.fit(X[:, :, None], y)\r\n nbrs.fit(X, y)\r\n with pytest.raises(ValueError):\r\n nbrs.predict([[]])\r\n if issubclass(cls, neighbors.KNeighborsClassifier) or issubclass(\r\n cls, neighbors.KNeighborsRegressor\r\n ):\r\n nbrs = cls(n_neighbors=-1)\r\n with pytest.raises(ValueError):\r\n nbrs.fit(X, y)\r\n\r\n nbrs = neighbors.NearestNeighbors().fit(X)\r\n\r\n with pytest.raises(ValueError):\r\n nbrs.kneighbors_graph(X, mode=\"blah\")\r\n with pytest.raises(ValueError):\r\n nbrs.radius_neighbors_graph(X, mode=\"blah\")\r\n\r\n\r\ndef test_neighbors_metrics(n_samples=20, n_features=3, n_query_pts=2, n_neighbors=5):\r\n # Test computing the neighbors for various metrics\r\n # create a symmetric matrix\r\n V = rng.rand(n_features, n_features)\r\n VI = np.dot(V, V.T)\r\n\r\n metrics = [\r\n (\"euclidean\", {}),\r\n (\"manhattan\", {}),\r\n (\"minkowski\", dict(p=1)),\r\n (\"minkowski\", dict(p=2)),\r\n (\"minkowski\", dict(p=3)),\r\n (\"minkowski\", dict(p=np.inf)),\r\n (\"chebyshev\", {}),\r\n (\"seuclidean\", dict(V=rng.rand(n_features))),\r\n (\"wminkowski\", dict(p=3, w=rng.rand(n_features))),\r\n (\"mahalanobis\", dict(VI=VI)),\r\n (\"haversine\", {}),\r\n ]\r\n algorithms = [\"brute\", \"ball_tree\", \"kd_tree\"]\r\n X = rng.rand(n_samples, n_features)\r\n\r\n test = rng.rand(n_query_pts, n_features)\r\n\r\n for metric, metric_params in metrics:\r\n if metric == \"wminkowski\" and sp_version >= parse_version(\"1.8.0\"):\r\n # wminkowski will be removed in SciPy 1.8.0\r\n continue\r\n results = {}\r\n p = metric_params.pop(\"p\", 2)\r\n for algorithm in algorithms:\r\n # KD tree doesn't support all metrics\r\n if algorithm == \"kd_tree\" and metric not in neighbors.KDTree.valid_metrics:\r\n est = neighbors.NearestNeighbors(\r\n algorithm=algorithm, metric=metric, metric_params=metric_params\r\n )\r\n with pytest.raises(ValueError):\r\n est.fit(X)\r\n continue\r\n neigh = neighbors.NearestNeighbors(\r\n n_neighbors=n_neighbors,\r\n algorithm=algorithm,\r\n metric=metric,\r\n p=p,\r\n metric_params=metric_params,\r\n )\r\n\r\n # Haversine distance only accepts 2D data\r\n feature_sl = slice(None, 2) if metric == \"haversine\" else slice(None)\r\n\r\n neigh.fit(X[:, feature_sl])\r\n\r\n # wminkoski is deprecated in SciPy 1.6.0 and removed in 1.8.0\r\n ExceptionToAssert = None\r\n if (\r\n metric == \"wminkowski\"\r\n and algorithm == \"brute\"\r\n and sp_version >= parse_version(\"1.6.0\")\r\n ):\r\n ExceptionToAssert = DeprecationWarning\r\n\r\n with pytest.warns(ExceptionToAssert):\r\n results[algorithm] = neigh.kneighbors(\r\n test[:, feature_sl], return_distance=True\r\n )\r\n\r\n assert_array_almost_equal(results[\"brute\"][0], results[\"ball_tree\"][0])\r\n assert_array_almost_equal(results[\"brute\"][1], results[\"ball_tree\"][1])\r\n if \"kd_tree\" in results:\r\n assert_array_almost_equal(results[\"brute\"][0], results[\"kd_tree\"][0])\r\n assert_array_almost_equal(results[\"brute\"][1], results[\"kd_tree\"][1])\r\n\r\n\r\ndef test_callable_metric():\r\n def custom_metric(x1, x2):\r\n return np.sqrt(np.sum(x1 ** 2 + x2 ** 2))\r\n\r\n X = np.random.RandomState(42).rand(20, 2)\r\n nbrs1 = neighbors.NearestNeighbors(\r\n n_neighbors=3, algorithm=\"auto\", metric=custom_metric\r\n )\r\n nbrs2 = neighbors.NearestNeighbors(\r\n n_neighbors=3, algorithm=\"brute\", metric=custom_metric\r\n )\r\n\r\n nbrs1.fit(X)\r\n nbrs2.fit(X)\r\n\r\n dist1, ind1 = nbrs1.kneighbors(X)\r\n dist2, ind2 = nbrs2.kneighbors(X)\r\n\r\n assert_array_almost_equal(dist1, dist2)\r\n\r\n\r\ndef test_valid_brute_metric_for_auto_algorithm():\r\n X = rng.rand(12, 12)\r\n Xcsr = csr_matrix(X)\r\n\r\n # check that there is a metric that is valid for brute\r\n # but not ball_tree (so we actually test something)\r\n assert \"cosine\" in VALID_METRICS[\"brute\"]\r\n assert \"cosine\" not in VALID_METRICS[\"ball_tree\"]\r\n\r\n # Metric which don't required any additional parameter\r\n require_params = [\"mahalanobis\", \"wminkowski\", \"seuclidean\"]\r\n for metric in VALID_METRICS[\"brute\"]:\r\n if metric != \"precomputed\" and metric not in require_params:\r\n nn = neighbors.NearestNeighbors(\r\n n_neighbors=3, algorithm=\"auto\", metric=metric\r\n )\r\n if metric != \"haversine\":\r\n nn.fit(X)\r\n nn.kneighbors(X)\r\n else:\r\n nn.fit(X[:, :2])\r\n nn.kneighbors(X[:, :2])\r\n elif metric == \"precomputed\":\r\n X_precomputed = rng.random_sample((10, 4))\r\n Y_precomputed = rng.random_sample((3, 4))\r\n DXX = metrics.pairwise_distances(X_precomputed, metric=\"euclidean\")\r\n DYX = metrics.pairwise_distances(\r\n Y_precomputed, X_precomputed, metric=\"euclidean\"\r\n )\r\n nb_p = neighbors.NearestNeighbors(n_neighbors=3)\r\n nb_p.fit(DXX)\r\n nb_p.kneighbors(DYX)\r\n\r\n for metric in VALID_METRICS_SPARSE[\"brute\"]:\r\n if metric != \"precomputed\" and metric not in require_params:\r\n nn = neighbors.NearestNeighbors(\r\n n_neighbors=3, algorithm=\"auto\", metric=metric\r\n ).fit(Xcsr)\r\n nn.kneighbors(Xcsr)\r\n\r\n # Metric with parameter\r\n VI = np.dot(X, X.T)\r\n list_metrics = [\r\n (\"seuclidean\", dict(V=rng.rand(12))),\r\n (\"wminkowski\", dict(w=rng.rand(12))),\r\n (\"mahalanobis\", dict(VI=VI)),\r\n ]\r\n for metric, params in list_metrics:\r\n nn = neighbors.NearestNeighbors(\r\n n_neighbors=3, algorithm=\"auto\", metric=metric, metric_params=params\r\n ).fit(X)\r\n nn.kneighbors(X)\r\n\r\n\r\ndef test_metric_params_interface():\r\n X = rng.rand(5, 5)\r\n y = rng.randint(0, 2, 5)\r\n est = neighbors.KNeighborsClassifier(metric_params={\"p\": 3})\r\n with pytest.warns(SyntaxWarning):\r\n est.fit(X, y)\r\n\r\n\r\ndef test_predict_sparse_ball_kd_tree():\r\n rng = np.random.RandomState(0)\r\n X = rng.rand(5, 5)\r\n y = rng.randint(0, 2, 5)\r\n nbrs1 = neighbors.KNeighborsClassifier(1, algorithm=\"kd_tree\")\r\n nbrs2 = neighbors.KNeighborsRegressor(1, algorithm=\"ball_tree\")\r\n for model in [nbrs1, nbrs2]:\r\n model.fit(X, y)\r\n with pytest.raises(ValueError):\r\n model.predict(csr_matrix(X))\r\n\r\n\r\ndef test_non_euclidean_kneighbors():\r\n rng = np.random.RandomState(0)\r\n X = rng.rand(5, 5)\r\n\r\n # Find a reasonable radius.\r\n dist_array = pairwise_distances(X).flatten()\r\n np.sort(dist_array)\r\n radius = dist_array[15]\r\n\r\n # Test kneighbors_graph\r\n for metric in [\"manhattan\", \"chebyshev\"]:\r\n nbrs_graph = neighbors.kneighbors_graph(\r\n X, 3, metric=metric, mode=\"connectivity\", include_self=True\r\n ).toarray()\r\n nbrs1 = neighbors.NearestNeighbors(n_neighbors=3, metric=metric).fit(X)\r\n assert_array_equal(nbrs_graph, nbrs1.kneighbors_graph(X).toarray())\r\n\r\n # Test radiusneighbors_graph\r\n for metric in [\"manhattan\", \"chebyshev\"]:\r\n nbrs_graph = neighbors.radius_neighbors_graph(\r\n X, radius, metric=metric, mode=\"connectivity\", include_self=True\r\n ).toarray()\r\n nbrs1 = neighbors.NearestNeighbors(metric=metric, radius=radius).fit(X)\r\n assert_array_equal(nbrs_graph, nbrs1.radius_neighbors_graph(X).A)\r\n\r\n # Raise error when wrong parameters are supplied,\r\n X_nbrs = neighbors.NearestNeighbors(n_neighbors=3, metric=\"manhattan\")\r\n X_nbrs.fit(X)\r\n with pytest.raises(ValueError):\r\n neighbors.kneighbors_graph(X_nbrs, 3, metric=\"euclidean\")\r\n X_nbrs = neighbors.NearestNeighbors(radius=radius, metric=\"manhattan\")\r\n X_nbrs.fit(X)\r\n with pytest.raises(ValueError):\r\n neighbors.radius_neighbors_graph(X_nbrs, radius, metric=\"euclidean\")\r\n\r\n\r\ndef check_object_arrays(nparray, list_check):\r\n for ind, ele in enumerate(nparray):\r\n assert_array_equal(ele, list_check[ind])\r\n\r\n\r\ndef test_k_and_radius_neighbors_train_is_not_query():\r\n # Test kneighbors et.al when query is not training data\r\n\r\n for algorithm in ALGORITHMS:\r\n\r\n nn = neighbors.NearestNeighbors(n_neighbors=1, algorithm=algorithm)\r\n\r\n X = [[0], [1]]\r\n nn.fit(X)\r\n test_data = [[2], [1]]\r\n\r\n # Test neighbors.\r\n dist, ind = nn.kneighbors(test_data)\r\n assert_array_equal(dist, [[1], [0]])\r\n assert_array_equal(ind, [[1], [1]])\r\n dist, ind = nn.radius_neighbors([[2], [1]], radius=1.5)\r\n check_object_arrays(dist, [[1], [1, 0]])\r\n check_object_arrays(ind, [[1], [0, 1]])\r\n\r\n # Test the graph variants.\r\n assert_array_equal(nn.kneighbors_graph(test_data).A, [[0.0, 1.0], [0.0, 1.0]])\r\n assert_array_equal(\r\n nn.kneighbors_graph([[2], [1]], mode=\"distance\").A,\r\n np.array([[0.0, 1.0], [0.0, 0.0]]),\r\n )\r\n rng = nn.radius_neighbors_graph([[2], [1]], radius=1.5)\r\n assert_array_equal(rng.A, [[0, 1], [1, 1]])\r\n\r\n\r\ndef test_k_and_radius_neighbors_X_None():\r\n # Test kneighbors et.al when query is None\r\n for algorithm in ALGORITHMS:\r\n\r\n nn = neighbors.NearestNeighbors(n_neighbors=1, algorithm=algorithm)\r\n\r\n X = [[0], [1]]\r\n nn.fit(X)\r\n\r\n dist, ind = nn.kneighbors()\r\n assert_array_equal(dist, [[1], [1]])\r\n assert_array_equal(ind, [[1], [0]])\r\n dist, ind = nn.radius_neighbors(None, radius=1.5)\r\n check_object_arrays(dist, [[1], [1]])\r\n check_object_arrays(ind, [[1], [0]])\r\n\r\n # Test the graph variants.\r\n rng = nn.radius_neighbors_graph(None, radius=1.5)\r\n kng = nn.kneighbors_graph(None)\r\n for graph in [rng, kng]:\r\n assert_array_equal(graph.A, [[0, 1], [1, 0]])\r\n assert_array_equal(graph.data, [1, 1])\r\n assert_array_equal(graph.indices, [1, 0])\r\n\r\n X = [[0, 1], [0, 1], [1, 1]]\r\n nn = neighbors.NearestNeighbors(n_neighbors=2, algorithm=algorithm)\r\n nn.fit(X)\r\n assert_array_equal(\r\n nn.kneighbors_graph().A,\r\n np.array([[0.0, 1.0, 1.0], [1.0, 0.0, 1.0], [1.0, 1.0, 0]]),\r\n )\r\n\r\n\r\ndef test_k_and_radius_neighbors_duplicates():\r\n # Test behavior of kneighbors when duplicates are present in query\r\n\r\n for algorithm in ALGORITHMS:\r\n nn = neighbors.NearestNeighbors(n_neighbors=1, algorithm=algorithm)\r\n nn.fit([[0], [1]])\r\n\r\n # Do not do anything special to duplicates.\r\n kng = nn.kneighbors_graph([[0], [1]], mode=\"distance\")\r\n assert_array_equal(kng.A, np.array([[0.0, 0.0], [0.0, 0.0]]))\r\n assert_array_equal(kng.data, [0.0, 0.0])\r\n assert_array_equal(kng.indices, [0, 1])\r\n\r\n dist, ind = nn.radius_neighbors([[0], [1]], radius=1.5)\r\n check_object_arrays(dist, [[0, 1], [1, 0]])\r\n check_object_arrays(ind, [[0, 1], [0, 1]])\r\n\r\n rng = nn.radius_neighbors_graph([[0], [1]], radius=1.5)\r\n assert_array_equal(rng.A, np.ones((2, 2)))\r\n\r\n rng = nn.radius_neighbors_graph([[0], [1]], radius=1.5, mode=\"distance\")\r\n rng.sort_indices()\r\n assert_array_equal(rng.A, [[0, 1], [1, 0]])\r\n assert_array_equal(rng.indices, [0, 1, 0, 1])\r\n assert_array_equal(rng.data, [0, 1, 1, 0])\r\n\r\n # Mask the first duplicates when n_duplicates > n_neighbors.\r\n X = np.ones((3, 1))\r\n nn = neighbors.NearestNeighbors(n_neighbors=1, algorithm=\"brute\")\r\n nn.fit(X)\r\n dist, ind = nn.kneighbors()\r\n assert_array_equal(dist, np.zeros((3, 1)))\r\n assert_array_equal(ind, [[1], [0], [1]])\r\n\r\n # Test that zeros are explicitly marked in kneighbors_graph.\r\n kng = nn.kneighbors_graph(mode=\"distance\")\r\n assert_array_equal(kng.A, np.zeros((3, 3)))\r\n assert_array_equal(kng.data, np.zeros(3))\r\n assert_array_equal(kng.indices, [1.0, 0.0, 1.0])\r\n assert_array_equal(\r\n nn.kneighbors_graph().A,\r\n np.array([[0.0, 1.0, 0.0], [1.0, 0.0, 0.0], [0.0, 1.0, 0.0]]),\r\n )\r\n\r\n\r\ndef test_include_self_neighbors_graph():\r\n # Test include_self parameter in neighbors_graph\r\n X = [[2, 3], [4, 5]]\r\n kng = neighbors.kneighbors_graph(X, 1, include_self=True).A\r\n kng_not_self = neighbors.kneighbors_graph(X, 1, include_self=False).A\r\n assert_array_equal(kng, [[1.0, 0.0], [0.0, 1.0]])\r\n assert_array_equal(kng_not_self, [[0.0, 1.0], [1.0, 0.0]])\r\n\r\n rng = neighbors.radius_neighbors_graph(X, 5.0, include_self=True).A\r\n rng_not_self = neighbors.radius_neighbors_graph(X, 5.0, include_self=False).A\r\n assert_array_equal(rng, [[1.0, 1.0], [1.0, 1.0]])\r\n assert_array_equal(rng_not_self, [[0.0, 1.0], [1.0, 0.0]])\r\n\r\n\r\[email protected](\"algorithm\", ALGORITHMS)\r\ndef test_same_knn_parallel(algorithm):\r\n X, y = datasets.make_classification(\r\n n_samples=30, n_features=5, n_redundant=0, random_state=0\r\n )\r\n X_train, X_test, y_train, y_test = train_test_split(X, y)\r\n\r\n clf = neighbors.KNeighborsClassifier(n_neighbors=3, algorithm=algorithm)\r\n clf.fit(X_train, y_train)\r\n y = clf.predict(X_test)\r\n dist, ind = clf.kneighbors(X_test)\r\n graph = clf.kneighbors_graph(X_test, mode=\"distance\").toarray()\r\n\r\n clf.set_params(n_jobs=3)\r\n clf.fit(X_train, y_train)\r\n y_parallel = clf.predict(X_test)\r\n dist_parallel, ind_parallel = clf.kneighbors(X_test)\r\n graph_parallel = clf.kneighbors_graph(X_test, mode=\"distance\").toarray()\r\n\r\n assert_array_equal(y, y_parallel)\r\n assert_array_almost_equal(dist, dist_parallel)\r\n assert_array_equal(ind, ind_parallel)\r\n assert_array_almost_equal(graph, graph_parallel)\r\n\r\n\r\[email protected](\"algorithm\", ALGORITHMS)\r\ndef test_same_radius_neighbors_parallel(algorithm):\r\n X, y = datasets.make_classification(\r\n n_samples=30, n_features=5, n_redundant=0, random_state=0\r\n )\r\n X_train, X_test, y_train, y_test = train_test_split(X, y)\r\n\r\n clf = neighbors.RadiusNeighborsClassifier(radius=10, algorithm=algorithm)\r\n clf.fit(X_train, y_train)\r\n y = clf.predict(X_test)\r\n dist, ind = clf.radius_neighbors(X_test)\r\n graph = clf.radius_neighbors_graph(X_test, mode=\"distance\").toarray()\r\n\r\n clf.set_params(n_jobs=3)\r\n clf.fit(X_train, y_train)\r\n y_parallel = clf.predict(X_test)\r\n dist_parallel, ind_parallel = clf.radius_neighbors(X_test)\r\n graph_parallel = clf.radius_neighbors_graph(X_test, mode=\"distance\").toarray()\r\n\r\n assert_array_equal(y, y_parallel)\r\n for i in range(len(dist)):\r\n assert_array_almost_equal(dist[i], dist_parallel[i])\r\n assert_array_equal(ind[i], ind_parallel[i])\r\n assert_array_almost_equal(graph, graph_parallel)\r\n\r\n\r\[email protected](\"backend\", JOBLIB_BACKENDS)\r\[email protected](\"algorithm\", ALGORITHMS)\r\ndef test_knn_forcing_backend(backend, algorithm):\r\n # Non-regression test which ensure the knn methods are properly working\r\n # even when forcing the global joblib backend.\r\n with joblib.parallel_backend(backend):\r\n X, y = datasets.make_classification(\r\n n_samples=30, n_features=5, n_redundant=0, random_state=0\r\n )\r\n X_train, X_test, y_train, y_test = train_test_split(X, y)\r\n\r\n clf = neighbors.KNeighborsClassifier(\r\n n_neighbors=3, algorithm=algorithm, n_jobs=3\r\n )\r\n clf.fit(X_train, y_train)\r\n clf.predict(X_test)\r\n clf.kneighbors(X_test)\r\n clf.kneighbors_graph(X_test, mode=\"distance\").toarray()\r\n\r\n\r\ndef test_dtype_convert():\r\n classifier = neighbors.KNeighborsClassifier(n_neighbors=1)\r\n CLASSES = 15\r\n X = np.eye(CLASSES)\r\n y = [ch for ch in \"ABCDEFGHIJKLMNOPQRSTU\"[:CLASSES]]\r\n\r\n result = classifier.fit(X, y).predict(X)\r\n assert_array_equal(result, y)\r\n\r\n\r\ndef test_sparse_metric_callable():\r\n def sparse_metric(x, y): # Metric accepting sparse matrix input (only)\r\n assert issparse(x) and issparse(y)\r\n return x.dot(y.T).A.item()\r\n\r\n X = csr_matrix(\r\n [[1, 1, 1, 1, 1], [1, 0, 1, 0, 1], [0, 0, 1, 0, 0]] # Population matrix\r\n )\r\n\r\n Y = csr_matrix([[1, 1, 0, 1, 1], [1, 0, 0, 0, 1]]) # Query matrix\r\n\r\n nn = neighbors.NearestNeighbors(\r\n algorithm=\"brute\", n_neighbors=2, metric=sparse_metric\r\n ).fit(X)\r\n N = nn.kneighbors(Y, return_distance=False)\r\n\r\n # GS indices of nearest neighbours in `X` for `sparse_metric`\r\n gold_standard_nn = np.array([[2, 1], [2, 1]])\r\n\r\n assert_array_equal(N, gold_standard_nn)\r\n\r\n\r\n# ignore conversion to boolean in pairwise_distances\r\n@ignore_warnings(category=DataConversionWarning)\r\ndef test_pairwise_boolean_distance():\r\n # Non-regression test for #4523\r\n # 'brute': uses scipy.spatial.distance through pairwise_distances\r\n # 'ball_tree': uses sklearn.neighbors._dist_metrics\r\n rng = np.random.RandomState(0)\r\n X = rng.uniform(size=(6, 5))\r\n NN = neighbors.NearestNeighbors\r\n\r\n nn1 = NN(metric=\"jaccard\", algorithm=\"brute\").fit(X)\r\n nn2 = NN(metric=\"jaccard\", algorithm=\"ball_tree\").fit(X)\r\n assert_array_equal(nn1.kneighbors(X)[0], nn2.kneighbors(X)[0])\r\n\r\n\r\ndef test_radius_neighbors_predict_proba():\r\n for seed in range(5):\r\n X, y = datasets.make_classification(\r\n n_samples=50,\r\n n_features=5,\r\n n_informative=3,\r\n n_redundant=0,\r\n n_classes=3,\r\n random_state=seed,\r\n )\r\n X_tr, X_te, y_tr, y_te = train_test_split(X, y, random_state=0)\r\n outlier_label = int(2 - seed)\r\n clf = neighbors.RadiusNeighborsClassifier(radius=2, outlier_label=outlier_label)\r\n clf.fit(X_tr, y_tr)\r\n pred = clf.predict(X_te)\r\n proba = clf.predict_proba(X_te)\r\n proba_label = proba.argmax(axis=1)\r\n proba_label = np.where(proba.sum(axis=1) == 0, outlier_label, proba_label)\r\n assert_array_equal(pred, proba_label)\r\n\r\n\r\ndef test_pipeline_with_nearest_neighbors_transformer():\r\n # Test chaining KNeighborsTransformer and classifiers/regressors\r\n rng = np.random.RandomState(0)\r\n X = 2 * rng.rand(40, 5) - 1\r\n X2 = 2 * rng.rand(40, 5) - 1\r\n y = rng.rand(40, 1)\r\n\r\n n_neighbors = 12\r\n radius = 1.5\r\n # We precompute more neighbors than necessary, to have equivalence between\r\n # k-neighbors estimator after radius-neighbors transformer, and vice-versa.\r\n factor = 2\r\n\r\n k_trans = neighbors.KNeighborsTransformer(n_neighbors=n_neighbors, mode=\"distance\")\r\n k_trans_factor = neighbors.KNeighborsTransformer(\r\n n_neighbors=int(n_neighbors * factor), mode=\"distance\"\r\n )\r\n\r\n r_trans = neighbors.RadiusNeighborsTransformer(radius=radius, mode=\"distance\")\r\n r_trans_factor = neighbors.RadiusNeighborsTransformer(\r\n radius=int(radius * factor), mode=\"distance\"\r\n )\r\n\r\n k_reg = neighbors.KNeighborsRegressor(n_neighbors=n_neighbors)\r\n r_reg = neighbors.RadiusNeighborsRegressor(radius=radius)\r\n\r\n test_list = [\r\n (k_trans, k_reg),\r\n (k_trans_factor, r_reg),\r\n (r_trans, r_reg),\r\n (r_trans_factor, k_reg),\r\n ]\r\n\r\n for trans, reg in test_list:\r\n # compare the chained version and the compact version\r\n reg_compact = clone(reg)\r\n reg_precomp = clone(reg)\r\n reg_precomp.set_params(metric=\"precomputed\")\r\n\r\n reg_chain = make_pipeline(clone(trans), reg_precomp)\r\n\r\n y_pred_chain = reg_chain.fit(X, y).predict(X2)\r\n y_pred_compact = reg_compact.fit(X, y).predict(X2)\r\n assert_array_almost_equal(y_pred_chain, y_pred_compact)\r\n\r\n\r\[email protected](\r\n \"X, metric, metric_params, expected_algo\",\r\n [\r\n (np.random.randint(10, size=(10, 10)), \"precomputed\", None, \"brute\"),\r\n (np.random.randn(10, 20), \"euclidean\", None, \"brute\"),\r\n (np.random.randn(8, 5), \"euclidean\", None, \"brute\"),\r\n (np.random.randn(10, 5), \"euclidean\", None, \"kd_tree\"),\r\n (np.random.randn(10, 5), \"seuclidean\", {\"V\": [2] * 5}, \"ball_tree\"),\r\n (np.random.randn(10, 5), \"correlation\", None, \"brute\"),\r\n ],\r\n)\r\ndef test_auto_algorithm(X, metric, metric_params, expected_algo):\r\n model = neighbors.NearestNeighbors(\r\n n_neighbors=4, algorithm=\"auto\", metric=metric, metric_params=metric_params\r\n )\r\n model.fit(X)\r\n assert model._fit_method == expected_algo\r\n\r\n\r\n# TODO: Remove in 1.1\r\[email protected](\r\n \"NearestNeighbors\",\r\n [\r\n neighbors.KNeighborsClassifier,\r\n neighbors.KNeighborsRegressor,\r\n neighbors.NearestNeighbors,\r\n ], # type: ignore\r\n)\r\ndef test_pairwise_deprecated(NearestNeighbors):\r\n nn = NearestNeighbors(metric=\"precomputed\")\r\n msg = r\"Attribute `_pairwise` was deprecated in version 0\\.24\"\r\n with pytest.warns(FutureWarning, match=msg):\r\n nn._pairwise\r\n",
"import numpy as np\r\n\r\nfrom pandas import (\r\n Categorical,\r\n Series,\r\n)\r\nimport pandas._testing as tm\r\n\r\n\r\nclass TestUnique:\r\n def test_unique_data_ownership(self):\r\n # it works! GH#1807\r\n Series(Series([\"a\", \"c\", \"b\"]).unique()).sort_values()\r\n\r\n def test_unique(self):\r\n # GH#714 also, dtype=float\r\n ser = Series([1.2345] * 100)\r\n ser[::2] = np.nan\r\n result = ser.unique()\r\n assert len(result) == 2\r\n\r\n # explicit f4 dtype\r\n ser = Series([1.2345] * 100, dtype=\"f4\")\r\n ser[::2] = np.nan\r\n result = ser.unique()\r\n assert len(result) == 2\r\n\r\n def test_unique_nan_object_dtype(self):\r\n # NAs in object arrays GH#714\r\n ser = Series([\"foo\"] * 100, dtype=\"O\")\r\n ser[::2] = np.nan\r\n result = ser.unique()\r\n assert len(result) == 2\r\n\r\n def test_unique_none(self):\r\n # decision about None\r\n ser = Series([1, 2, 3, None, None, None], dtype=object)\r\n result = ser.unique()\r\n expected = np.array([1, 2, 3, None], dtype=object)\r\n tm.assert_numpy_array_equal(result, expected)\r\n\r\n def test_unique_categorical(self):\r\n # GH#18051\r\n cat = Categorical([])\r\n ser = Series(cat)\r\n result = ser.unique()\r\n tm.assert_categorical_equal(result, cat)\r\n\r\n cat = Categorical([np.nan])\r\n ser = Series(cat)\r\n result = ser.unique()\r\n tm.assert_categorical_equal(result, cat)\r\n",
"\"\"\"Metrics to assess performance on classification task given class prediction.\r\n\r\nFunctions named as ``*_score`` return a scalar value to maximize: the higher\r\nthe better.\r\n\r\nFunction named as ``*_error`` or ``*_loss`` return a scalar value to minimize:\r\nthe lower the better.\r\n\"\"\"\r\n\r\n# Authors: Alexandre Gramfort <[email protected]>\r\n# Mathieu Blondel <[email protected]>\r\n# Olivier Grisel <[email protected]>\r\n# Arnaud Joly <[email protected]>\r\n# Jochen Wersdorfer <[email protected]>\r\n# Lars Buitinck\r\n# Joel Nothman <[email protected]>\r\n# Noel Dawe <[email protected]>\r\n# Jatin Shah <[email protected]>\r\n# Saurabh Jha <[email protected]>\r\n# Bernardo Stein <[email protected]>\r\n# Shangwu Yao <[email protected]>\r\n# Michal Karbownik <[email protected]>\r\n# License: BSD 3 clause\r\n\r\n\r\nimport warnings\r\nimport numpy as np\r\n\r\nfrom scipy.sparse import coo_matrix\r\nfrom scipy.sparse import csr_matrix\r\n\r\nfrom ..preprocessing import LabelBinarizer\r\nfrom ..preprocessing import LabelEncoder\r\nfrom ..utils import assert_all_finite\r\nfrom ..utils import check_array\r\nfrom ..utils import check_consistent_length\r\nfrom ..utils import column_or_1d\r\nfrom ..utils.multiclass import unique_labels\r\nfrom ..utils.multiclass import type_of_target\r\nfrom ..utils.validation import _num_samples\r\nfrom ..utils.sparsefuncs import count_nonzero\r\nfrom ..exceptions import UndefinedMetricWarning\r\n\r\nfrom ._base import _check_pos_label_consistency\r\n\r\n\r\ndef _check_zero_division(zero_division):\r\n if isinstance(zero_division, str) and zero_division == \"warn\":\r\n return\r\n elif isinstance(zero_division, (int, float)) and zero_division in [0, 1]:\r\n return\r\n raise ValueError(\r\n 'Got zero_division={0}. Must be one of [\"warn\", 0, 1]'.format(zero_division)\r\n )\r\n\r\n\r\ndef _check_targets(y_true, y_pred):\r\n \"\"\"Check that y_true and y_pred belong to the same classification task.\r\n\r\n This converts multiclass or binary types to a common shape, and raises a\r\n ValueError for a mix of multilabel and multiclass targets, a mix of\r\n multilabel formats, for the presence of continuous-valued or multioutput\r\n targets, or for targets of different lengths.\r\n\r\n Column vectors are squeezed to 1d, while multilabel formats are returned\r\n as CSR sparse label indicators.\r\n\r\n Parameters\r\n ----------\r\n y_true : array-like\r\n\r\n y_pred : array-like\r\n\r\n Returns\r\n -------\r\n type_true : one of {'multilabel-indicator', 'multiclass', 'binary'}\r\n The type of the true target data, as output by\r\n ``utils.multiclass.type_of_target``.\r\n\r\n y_true : array or indicator matrix\r\n\r\n y_pred : array or indicator matrix\r\n \"\"\"\r\n check_consistent_length(y_true, y_pred)\r\n type_true = type_of_target(y_true)\r\n type_pred = type_of_target(y_pred)\r\n\r\n y_type = {type_true, type_pred}\r\n if y_type == {\"binary\", \"multiclass\"}:\r\n y_type = {\"multiclass\"}\r\n\r\n if len(y_type) > 1:\r\n raise ValueError(\r\n \"Classification metrics can't handle a mix of {0} and {1} targets\".format(\r\n type_true, type_pred\r\n )\r\n )\r\n\r\n # We can't have more than one value on y_type => The set is no more needed\r\n y_type = y_type.pop()\r\n\r\n # No metrics support \"multiclass-multioutput\" format\r\n if y_type not in [\"binary\", \"multiclass\", \"multilabel-indicator\"]:\r\n raise ValueError(\"{0} is not supported\".format(y_type))\r\n\r\n if y_type in [\"binary\", \"multiclass\"]:\r\n y_true = column_or_1d(y_true)\r\n y_pred = column_or_1d(y_pred)\r\n if y_type == \"binary\":\r\n try:\r\n unique_values = np.union1d(y_true, y_pred)\r\n except TypeError as e:\r\n # We expect y_true and y_pred to be of the same data type.\r\n # If `y_true` was provided to the classifier as strings,\r\n # `y_pred` given by the classifier will also be encoded with\r\n # strings. So we raise a meaningful error\r\n raise TypeError(\r\n \"Labels in y_true and y_pred should be of the same type. \"\r\n f\"Got y_true={np.unique(y_true)} and \"\r\n f\"y_pred={np.unique(y_pred)}. Make sure that the \"\r\n \"predictions provided by the classifier coincides with \"\r\n \"the true labels.\"\r\n ) from e\r\n if len(unique_values) > 2:\r\n y_type = \"multiclass\"\r\n\r\n if y_type.startswith(\"multilabel\"):\r\n y_true = csr_matrix(y_true)\r\n y_pred = csr_matrix(y_pred)\r\n y_type = \"multilabel-indicator\"\r\n\r\n return y_type, y_true, y_pred\r\n\r\n\r\ndef _weighted_sum(sample_score, sample_weight, normalize=False):\r\n if normalize:\r\n return np.average(sample_score, weights=sample_weight)\r\n elif sample_weight is not None:\r\n return np.dot(sample_score, sample_weight)\r\n else:\r\n return sample_score.sum()\r\n\r\n\r\ndef accuracy_score(y_true, y_pred, *, normalize=True, sample_weight=None):\r\n \"\"\"Accuracy classification score.\r\n\r\n In multilabel classification, this function computes subset accuracy:\r\n the set of labels predicted for a sample must *exactly* match the\r\n corresponding set of labels in y_true.\r\n\r\n Read more in the :ref:`User Guide <accuracy_score>`.\r\n\r\n Parameters\r\n ----------\r\n y_true : 1d array-like, or label indicator array / sparse matrix\r\n Ground truth (correct) labels.\r\n\r\n y_pred : 1d array-like, or label indicator array / sparse matrix\r\n Predicted labels, as returned by a classifier.\r\n\r\n normalize : bool, default=True\r\n If ``False``, return the number of correctly classified samples.\r\n Otherwise, return the fraction of correctly classified samples.\r\n\r\n sample_weight : array-like of shape (n_samples,), default=None\r\n Sample weights.\r\n\r\n Returns\r\n -------\r\n score : float\r\n If ``normalize == True``, return the fraction of correctly\r\n classified samples (float), else returns the number of correctly\r\n classified samples (int).\r\n\r\n The best performance is 1 with ``normalize == True`` and the number\r\n of samples with ``normalize == False``.\r\n\r\n See Also\r\n --------\r\n jaccard_score, hamming_loss, zero_one_loss\r\n\r\n Notes\r\n -----\r\n In binary classification, this function is equal to the `jaccard_score`\r\n function.\r\n\r\n Examples\r\n --------\r\n >>> from sklearn.metrics import accuracy_score\r\n >>> y_pred = [0, 2, 1, 3]\r\n >>> y_true = [0, 1, 2, 3]\r\n >>> accuracy_score(y_true, y_pred)\r\n 0.5\r\n >>> accuracy_score(y_true, y_pred, normalize=False)\r\n 2\r\n\r\n In the multilabel case with binary label indicators:\r\n\r\n >>> import numpy as np\r\n >>> accuracy_score(np.array([[0, 1], [1, 1]]), np.ones((2, 2)))\r\n 0.5\r\n \"\"\"\r\n\r\n # Compute accuracy for each possible representation\r\n y_type, y_true, y_pred = _check_targets(y_true, y_pred)\r\n check_consistent_length(y_true, y_pred, sample_weight)\r\n if y_type.startswith(\"multilabel\"):\r\n differing_labels = count_nonzero(y_true - y_pred, axis=1)\r\n score = differing_labels == 0\r\n else:\r\n score = y_true == y_pred\r\n\r\n return _weighted_sum(score, sample_weight, normalize)\r\n\r\n\r\ndef confusion_matrix(\r\n y_true, y_pred, *, labels=None, sample_weight=None, normalize=None\r\n):\r\n \"\"\"Compute confusion matrix to evaluate the accuracy of a classification.\r\n\r\n By definition a confusion matrix :math:`C` is such that :math:`C_{i, j}`\r\n is equal to the number of observations known to be in group :math:`i` and\r\n predicted to be in group :math:`j`.\r\n\r\n Thus in binary classification, the count of true negatives is\r\n :math:`C_{0,0}`, false negatives is :math:`C_{1,0}`, true positives is\r\n :math:`C_{1,1}` and false positives is :math:`C_{0,1}`.\r\n\r\n Read more in the :ref:`User Guide <confusion_matrix>`.\r\n\r\n Parameters\r\n ----------\r\n y_true : array-like of shape (n_samples,)\r\n Ground truth (correct) target values.\r\n\r\n y_pred : array-like of shape (n_samples,)\r\n Estimated targets as returned by a classifier.\r\n\r\n labels : array-like of shape (n_classes), default=None\r\n List of labels to index the matrix. This may be used to reorder\r\n or select a subset of labels.\r\n If ``None`` is given, those that appear at least once\r\n in ``y_true`` or ``y_pred`` are used in sorted order.\r\n\r\n sample_weight : array-like of shape (n_samples,), default=None\r\n Sample weights.\r\n\r\n .. versionadded:: 0.18\r\n\r\n normalize : {'true', 'pred', 'all'}, default=None\r\n Normalizes confusion matrix over the true (rows), predicted (columns)\r\n conditions or all the population. If None, confusion matrix will not be\r\n normalized.\r\n\r\n Returns\r\n -------\r\n C : ndarray of shape (n_classes, n_classes)\r\n Confusion matrix whose i-th row and j-th\r\n column entry indicates the number of\r\n samples with true label being i-th class\r\n and predicted label being j-th class.\r\n\r\n See Also\r\n --------\r\n ConfusionMatrixDisplay.from_estimator : Plot the confusion matrix\r\n given an estimator, the data, and the label.\r\n ConfusionMatrixDisplay.from_predictions : Plot the confusion matrix\r\n given the true and predicted labels.\r\n ConfusionMatrixDisplay : Confusion Matrix visualization.\r\n\r\n References\r\n ----------\r\n .. [1] `Wikipedia entry for the Confusion matrix\r\n <https://en.wikipedia.org/wiki/Confusion_matrix>`_\r\n (Wikipedia and other references may use a different\r\n convention for axes).\r\n\r\n Examples\r\n --------\r\n >>> from sklearn.metrics import confusion_matrix\r\n >>> y_true = [2, 0, 2, 2, 0, 1]\r\n >>> y_pred = [0, 0, 2, 2, 0, 2]\r\n >>> confusion_matrix(y_true, y_pred)\r\n array([[2, 0, 0],\r\n [0, 0, 1],\r\n [1, 0, 2]])\r\n\r\n >>> y_true = [\"cat\", \"ant\", \"cat\", \"cat\", \"ant\", \"bird\"]\r\n >>> y_pred = [\"ant\", \"ant\", \"cat\", \"cat\", \"ant\", \"cat\"]\r\n >>> confusion_matrix(y_true, y_pred, labels=[\"ant\", \"bird\", \"cat\"])\r\n array([[2, 0, 0],\r\n [0, 0, 1],\r\n [1, 0, 2]])\r\n\r\n In the binary case, we can extract true positives, etc as follows:\r\n\r\n >>> tn, fp, fn, tp = confusion_matrix([0, 1, 0, 1], [1, 1, 1, 0]).ravel()\r\n >>> (tn, fp, fn, tp)\r\n (0, 2, 1, 1)\r\n\r\n \"\"\"\r\n y_type, y_true, y_pred = _check_targets(y_true, y_pred)\r\n if y_type not in (\"binary\", \"multiclass\"):\r\n raise ValueError(\"%s is not supported\" % y_type)\r\n\r\n if labels is None:\r\n labels = unique_labels(y_true, y_pred)\r\n else:\r\n labels = np.asarray(labels)\r\n n_labels = labels.size\r\n if n_labels == 0:\r\n raise ValueError(\"'labels' should contains at least one label.\")\r\n elif y_true.size == 0:\r\n return np.zeros((n_labels, n_labels), dtype=int)\r\n elif len(np.intersect1d(y_true, labels)) == 0:\r\n raise ValueError(\"At least one label specified must be in y_true\")\r\n\r\n if sample_weight is None:\r\n sample_weight = np.ones(y_true.shape[0], dtype=np.int64)\r\n else:\r\n sample_weight = np.asarray(sample_weight)\r\n\r\n check_consistent_length(y_true, y_pred, sample_weight)\r\n\r\n if normalize not in [\"true\", \"pred\", \"all\", None]:\r\n raise ValueError(\"normalize must be one of {'true', 'pred', 'all', None}\")\r\n\r\n n_labels = labels.size\r\n # If labels are not consecutive integers starting from zero, then\r\n # y_true and y_pred must be converted into index form\r\n need_index_conversion = not (\r\n labels.dtype.kind in {\"i\", \"u\", \"b\"}\r\n and np.all(labels == np.arange(n_labels))\r\n and y_true.min() >= 0\r\n and y_pred.min() >= 0\r\n )\r\n if need_index_conversion:\r\n label_to_ind = {y: x for x, y in enumerate(labels)}\r\n y_pred = np.array([label_to_ind.get(x, n_labels + 1) for x in y_pred])\r\n y_true = np.array([label_to_ind.get(x, n_labels + 1) for x in y_true])\r\n\r\n # intersect y_pred, y_true with labels, eliminate items not in labels\r\n ind = np.logical_and(y_pred < n_labels, y_true < n_labels)\r\n if not np.all(ind):\r\n y_pred = y_pred[ind]\r\n y_true = y_true[ind]\r\n # also eliminate weights of eliminated items\r\n sample_weight = sample_weight[ind]\r\n\r\n # Choose the accumulator dtype to always have high precision\r\n if sample_weight.dtype.kind in {\"i\", \"u\", \"b\"}:\r\n dtype = np.int64\r\n else:\r\n dtype = np.float64\r\n\r\n cm = coo_matrix(\r\n (sample_weight, (y_true, y_pred)),\r\n shape=(n_labels, n_labels),\r\n dtype=dtype,\r\n ).toarray()\r\n\r\n with np.errstate(all=\"ignore\"):\r\n if normalize == \"true\":\r\n cm = cm / cm.sum(axis=1, keepdims=True)\r\n elif normalize == \"pred\":\r\n cm = cm / cm.sum(axis=0, keepdims=True)\r\n elif normalize == \"all\":\r\n cm = cm / cm.sum()\r\n cm = np.nan_to_num(cm)\r\n\r\n return cm\r\n\r\n\r\ndef multilabel_confusion_matrix(\r\n y_true, y_pred, *, sample_weight=None, labels=None, samplewise=False\r\n):\r\n \"\"\"Compute a confusion matrix for each class or sample.\r\n\r\n .. versionadded:: 0.21\r\n\r\n Compute class-wise (default) or sample-wise (samplewise=True) multilabel\r\n confusion matrix to evaluate the accuracy of a classification, and output\r\n confusion matrices for each class or sample.\r\n\r\n In multilabel confusion matrix :math:`MCM`, the count of true negatives\r\n is :math:`MCM_{:,0,0}`, false negatives is :math:`MCM_{:,1,0}`,\r\n true positives is :math:`MCM_{:,1,1}` and false positives is\r\n :math:`MCM_{:,0,1}`.\r\n\r\n Multiclass data will be treated as if binarized under a one-vs-rest\r\n transformation. Returned confusion matrices will be in the order of\r\n sorted unique labels in the union of (y_true, y_pred).\r\n\r\n Read more in the :ref:`User Guide <multilabel_confusion_matrix>`.\r\n\r\n Parameters\r\n ----------\r\n y_true : {array-like, sparse matrix} of shape (n_samples, n_outputs) or \\\r\n (n_samples,)\r\n Ground truth (correct) target values.\r\n\r\n y_pred : {array-like, sparse matrix} of shape (n_samples, n_outputs) or \\\r\n (n_samples,)\r\n Estimated targets as returned by a classifier.\r\n\r\n sample_weight : array-like of shape (n_samples,), default=None\r\n Sample weights.\r\n\r\n labels : array-like of shape (n_classes,), default=None\r\n A list of classes or column indices to select some (or to force\r\n inclusion of classes absent from the data).\r\n\r\n samplewise : bool, default=False\r\n In the multilabel case, this calculates a confusion matrix per sample.\r\n\r\n Returns\r\n -------\r\n multi_confusion : ndarray of shape (n_outputs, 2, 2)\r\n A 2x2 confusion matrix corresponding to each output in the input.\r\n When calculating class-wise multi_confusion (default), then\r\n n_outputs = n_labels; when calculating sample-wise multi_confusion\r\n (samplewise=True), n_outputs = n_samples. If ``labels`` is defined,\r\n the results will be returned in the order specified in ``labels``,\r\n otherwise the results will be returned in sorted order by default.\r\n\r\n See Also\r\n --------\r\n confusion_matrix : Compute confusion matrix to evaluate the accuracy of a\r\n classifier.\r\n\r\n Notes\r\n -----\r\n The `multilabel_confusion_matrix` calculates class-wise or sample-wise\r\n multilabel confusion matrices, and in multiclass tasks, labels are\r\n binarized under a one-vs-rest way; while\r\n :func:`~sklearn.metrics.confusion_matrix` calculates one confusion matrix\r\n for confusion between every two classes.\r\n\r\n Examples\r\n --------\r\n Multilabel-indicator case:\r\n\r\n >>> import numpy as np\r\n >>> from sklearn.metrics import multilabel_confusion_matrix\r\n >>> y_true = np.array([[1, 0, 1],\r\n ... [0, 1, 0]])\r\n >>> y_pred = np.array([[1, 0, 0],\r\n ... [0, 1, 1]])\r\n >>> multilabel_confusion_matrix(y_true, y_pred)\r\n array([[[1, 0],\r\n [0, 1]],\r\n <BLANKLINE>\r\n [[1, 0],\r\n [0, 1]],\r\n <BLANKLINE>\r\n [[0, 1],\r\n [1, 0]]])\r\n\r\n Multiclass case:\r\n\r\n >>> y_true = [\"cat\", \"ant\", \"cat\", \"cat\", \"ant\", \"bird\"]\r\n >>> y_pred = [\"ant\", \"ant\", \"cat\", \"cat\", \"ant\", \"cat\"]\r\n >>> multilabel_confusion_matrix(y_true, y_pred,\r\n ... labels=[\"ant\", \"bird\", \"cat\"])\r\n array([[[3, 1],\r\n [0, 2]],\r\n <BLANKLINE>\r\n [[5, 0],\r\n [1, 0]],\r\n <BLANKLINE>\r\n [[2, 1],\r\n [1, 2]]])\r\n \"\"\"\r\n y_type, y_true, y_pred = _check_targets(y_true, y_pred)\r\n if sample_weight is not None:\r\n sample_weight = column_or_1d(sample_weight)\r\n check_consistent_length(y_true, y_pred, sample_weight)\r\n\r\n if y_type not in (\"binary\", \"multiclass\", \"multilabel-indicator\"):\r\n raise ValueError(\"%s is not supported\" % y_type)\r\n\r\n present_labels = unique_labels(y_true, y_pred)\r\n if labels is None:\r\n labels = present_labels\r\n n_labels = None\r\n else:\r\n n_labels = len(labels)\r\n labels = np.hstack(\r\n [labels, np.setdiff1d(present_labels, labels, assume_unique=True)]\r\n )\r\n\r\n if y_true.ndim == 1:\r\n if samplewise:\r\n raise ValueError(\r\n \"Samplewise metrics are not available outside of \"\r\n \"multilabel classification.\"\r\n )\r\n\r\n le = LabelEncoder()\r\n le.fit(labels)\r\n y_true = le.transform(y_true)\r\n y_pred = le.transform(y_pred)\r\n sorted_labels = le.classes_\r\n\r\n # labels are now from 0 to len(labels) - 1 -> use bincount\r\n tp = y_true == y_pred\r\n tp_bins = y_true[tp]\r\n if sample_weight is not None:\r\n tp_bins_weights = np.asarray(sample_weight)[tp]\r\n else:\r\n tp_bins_weights = None\r\n\r\n if len(tp_bins):\r\n tp_sum = np.bincount(\r\n tp_bins, weights=tp_bins_weights, minlength=len(labels)\r\n )\r\n else:\r\n # Pathological case\r\n true_sum = pred_sum = tp_sum = np.zeros(len(labels))\r\n if len(y_pred):\r\n pred_sum = np.bincount(y_pred, weights=sample_weight, minlength=len(labels))\r\n if len(y_true):\r\n true_sum = np.bincount(y_true, weights=sample_weight, minlength=len(labels))\r\n\r\n # Retain only selected labels\r\n indices = np.searchsorted(sorted_labels, labels[:n_labels])\r\n tp_sum = tp_sum[indices]\r\n true_sum = true_sum[indices]\r\n pred_sum = pred_sum[indices]\r\n\r\n else:\r\n sum_axis = 1 if samplewise else 0\r\n\r\n # All labels are index integers for multilabel.\r\n # Select labels:\r\n if not np.array_equal(labels, present_labels):\r\n if np.max(labels) > np.max(present_labels):\r\n raise ValueError(\r\n \"All labels must be in [0, n labels) for \"\r\n \"multilabel targets. \"\r\n \"Got %d > %d\" % (np.max(labels), np.max(present_labels))\r\n )\r\n if np.min(labels) < 0:\r\n raise ValueError(\r\n \"All labels must be in [0, n labels) for \"\r\n \"multilabel targets. \"\r\n \"Got %d < 0\"\r\n % np.min(labels)\r\n )\r\n\r\n if n_labels is not None:\r\n y_true = y_true[:, labels[:n_labels]]\r\n y_pred = y_pred[:, labels[:n_labels]]\r\n\r\n # calculate weighted counts\r\n true_and_pred = y_true.multiply(y_pred)\r\n tp_sum = count_nonzero(\r\n true_and_pred, axis=sum_axis, sample_weight=sample_weight\r\n )\r\n pred_sum = count_nonzero(y_pred, axis=sum_axis, sample_weight=sample_weight)\r\n true_sum = count_nonzero(y_true, axis=sum_axis, sample_weight=sample_weight)\r\n\r\n fp = pred_sum - tp_sum\r\n fn = true_sum - tp_sum\r\n tp = tp_sum\r\n\r\n if sample_weight is not None and samplewise:\r\n sample_weight = np.array(sample_weight)\r\n tp = np.array(tp)\r\n fp = np.array(fp)\r\n fn = np.array(fn)\r\n tn = sample_weight * y_true.shape[1] - tp - fp - fn\r\n elif sample_weight is not None:\r\n tn = sum(sample_weight) - tp - fp - fn\r\n elif samplewise:\r\n tn = y_true.shape[1] - tp - fp - fn\r\n else:\r\n tn = y_true.shape[0] - tp - fp - fn\r\n\r\n return np.array([tn, fp, fn, tp]).T.reshape(-1, 2, 2)\r\n\r\n\r\ndef cohen_kappa_score(y1, y2, *, labels=None, weights=None, sample_weight=None):\r\n r\"\"\"Cohen's kappa: a statistic that measures inter-annotator agreement.\r\n\r\n This function computes Cohen's kappa [1]_, a score that expresses the level\r\n of agreement between two annotators on a classification problem. It is\r\n defined as\r\n\r\n .. math::\r\n \\kappa = (p_o - p_e) / (1 - p_e)\r\n\r\n where :math:`p_o` is the empirical probability of agreement on the label\r\n assigned to any sample (the observed agreement ratio), and :math:`p_e` is\r\n the expected agreement when both annotators assign labels randomly.\r\n :math:`p_e` is estimated using a per-annotator empirical prior over the\r\n class labels [2]_.\r\n\r\n Read more in the :ref:`User Guide <cohen_kappa>`.\r\n\r\n Parameters\r\n ----------\r\n y1 : array of shape (n_samples,)\r\n Labels assigned by the first annotator.\r\n\r\n y2 : array of shape (n_samples,)\r\n Labels assigned by the second annotator. The kappa statistic is\r\n symmetric, so swapping ``y1`` and ``y2`` doesn't change the value.\r\n\r\n labels : array-like of shape (n_classes,), default=None\r\n List of labels to index the matrix. This may be used to select a\r\n subset of labels. If `None`, all labels that appear at least once in\r\n ``y1`` or ``y2`` are used.\r\n\r\n weights : {'linear', 'quadratic'}, default=None\r\n Weighting type to calculate the score. `None` means no weighted;\r\n \"linear\" means linear weighted; \"quadratic\" means quadratic weighted.\r\n\r\n sample_weight : array-like of shape (n_samples,), default=None\r\n Sample weights.\r\n\r\n Returns\r\n -------\r\n kappa : float\r\n The kappa statistic, which is a number between -1 and 1. The maximum\r\n value means complete agreement; zero or lower means chance agreement.\r\n\r\n References\r\n ----------\r\n .. [1] J. Cohen (1960). \"A coefficient of agreement for nominal scales\".\r\n Educational and Psychological Measurement 20(1):37-46.\r\n doi:10.1177/001316446002000104.\r\n .. [2] `R. Artstein and M. Poesio (2008). \"Inter-coder agreement for\r\n computational linguistics\". Computational Linguistics 34(4):555-596\r\n <https://www.mitpressjournals.org/doi/pdf/10.1162/coli.07-034-R2>`_.\r\n .. [3] `Wikipedia entry for the Cohen's kappa\r\n <https://en.wikipedia.org/wiki/Cohen%27s_kappa>`_.\r\n \"\"\"\r\n confusion = confusion_matrix(y1, y2, labels=labels, sample_weight=sample_weight)\r\n n_classes = confusion.shape[0]\r\n sum0 = np.sum(confusion, axis=0)\r\n sum1 = np.sum(confusion, axis=1)\r\n expected = np.outer(sum0, sum1) / np.sum(sum0)\r\n\r\n if weights is None:\r\n w_mat = np.ones([n_classes, n_classes], dtype=int)\r\n w_mat.flat[:: n_classes + 1] = 0\r\n elif weights == \"linear\" or weights == \"quadratic\":\r\n w_mat = np.zeros([n_classes, n_classes], dtype=int)\r\n w_mat += np.arange(n_classes)\r\n if weights == \"linear\":\r\n w_mat = np.abs(w_mat - w_mat.T)\r\n else:\r\n w_mat = (w_mat - w_mat.T) ** 2\r\n else:\r\n raise ValueError(\"Unknown kappa weighting type.\")\r\n\r\n k = np.sum(w_mat * confusion) / np.sum(w_mat * expected)\r\n return 1 - k\r\n\r\n\r\ndef jaccard_score(\r\n y_true,\r\n y_pred,\r\n *,\r\n labels=None,\r\n pos_label=1,\r\n average=\"binary\",\r\n sample_weight=None,\r\n zero_division=\"warn\",\r\n):\r\n \"\"\"Jaccard similarity coefficient score.\r\n\r\n The Jaccard index [1], or Jaccard similarity coefficient, defined as\r\n the size of the intersection divided by the size of the union of two label\r\n sets, is used to compare set of predicted labels for a sample to the\r\n corresponding set of labels in ``y_true``.\r\n\r\n Read more in the :ref:`User Guide <jaccard_similarity_score>`.\r\n\r\n Parameters\r\n ----------\r\n y_true : 1d array-like, or label indicator array / sparse matrix\r\n Ground truth (correct) labels.\r\n\r\n y_pred : 1d array-like, or label indicator array / sparse matrix\r\n Predicted labels, as returned by a classifier.\r\n\r\n labels : array-like of shape (n_classes,), default=None\r\n The set of labels to include when ``average != 'binary'``, and their\r\n order if ``average is None``. Labels present in the data can be\r\n excluded, for example to calculate a multiclass average ignoring a\r\n majority negative class, while labels not present in the data will\r\n result in 0 components in a macro average. For multilabel targets,\r\n labels are column indices. By default, all labels in ``y_true`` and\r\n ``y_pred`` are used in sorted order.\r\n\r\n pos_label : str or int, default=1\r\n The class to report if ``average='binary'`` and the data is binary.\r\n If the data are multiclass or multilabel, this will be ignored;\r\n setting ``labels=[pos_label]`` and ``average != 'binary'`` will report\r\n scores for that label only.\r\n\r\n average : {'micro', 'macro', 'samples', 'weighted', \\\r\n 'binary'} or None, default='binary'\r\n If ``None``, the scores for each class are returned. Otherwise, this\r\n determines the type of averaging performed on the data:\r\n\r\n ``'binary'``:\r\n Only report results for the class specified by ``pos_label``.\r\n This is applicable only if targets (``y_{true,pred}``) are binary.\r\n ``'micro'``:\r\n Calculate metrics globally by counting the total true positives,\r\n false negatives and false positives.\r\n ``'macro'``:\r\n Calculate metrics for each label, and find their unweighted\r\n mean. This does not take label imbalance into account.\r\n ``'weighted'``:\r\n Calculate metrics for each label, and find their average, weighted\r\n by support (the number of true instances for each label). This\r\n alters 'macro' to account for label imbalance.\r\n ``'samples'``:\r\n Calculate metrics for each instance, and find their average (only\r\n meaningful for multilabel classification).\r\n\r\n sample_weight : array-like of shape (n_samples,), default=None\r\n Sample weights.\r\n\r\n zero_division : \"warn\", {0.0, 1.0}, default=\"warn\"\r\n Sets the value to return when there is a zero division, i.e. when there\r\n there are no negative values in predictions and labels. If set to\r\n \"warn\", this acts like 0, but a warning is also raised.\r\n\r\n Returns\r\n -------\r\n score : float (if average is not None) or array of floats, shape =\\\r\n [n_unique_labels]\r\n\r\n See Also\r\n --------\r\n accuracy_score, f1_score, multilabel_confusion_matrix\r\n\r\n Notes\r\n -----\r\n :func:`jaccard_score` may be a poor metric if there are no\r\n positives for some samples or classes. Jaccard is undefined if there are\r\n no true or predicted labels, and our implementation will return a score\r\n of 0 with a warning.\r\n\r\n References\r\n ----------\r\n .. [1] `Wikipedia entry for the Jaccard index\r\n <https://en.wikipedia.org/wiki/Jaccard_index>`_.\r\n\r\n Examples\r\n --------\r\n >>> import numpy as np\r\n >>> from sklearn.metrics import jaccard_score\r\n >>> y_true = np.array([[0, 1, 1],\r\n ... [1, 1, 0]])\r\n >>> y_pred = np.array([[1, 1, 1],\r\n ... [1, 0, 0]])\r\n\r\n In the binary case:\r\n\r\n >>> jaccard_score(y_true[0], y_pred[0])\r\n 0.6666...\r\n\r\n In the multilabel case:\r\n\r\n >>> jaccard_score(y_true, y_pred, average='samples')\r\n 0.5833...\r\n >>> jaccard_score(y_true, y_pred, average='macro')\r\n 0.6666...\r\n >>> jaccard_score(y_true, y_pred, average=None)\r\n array([0.5, 0.5, 1. ])\r\n\r\n In the multiclass case:\r\n\r\n >>> y_pred = [0, 2, 1, 2]\r\n >>> y_true = [0, 1, 2, 2]\r\n >>> jaccard_score(y_true, y_pred, average=None)\r\n array([1. , 0. , 0.33...])\r\n \"\"\"\r\n labels = _check_set_wise_labels(y_true, y_pred, average, labels, pos_label)\r\n samplewise = average == \"samples\"\r\n MCM = multilabel_confusion_matrix(\r\n y_true,\r\n y_pred,\r\n sample_weight=sample_weight,\r\n labels=labels,\r\n samplewise=samplewise,\r\n )\r\n numerator = MCM[:, 1, 1]\r\n denominator = MCM[:, 1, 1] + MCM[:, 0, 1] + MCM[:, 1, 0]\r\n\r\n if average == \"micro\":\r\n numerator = np.array([numerator.sum()])\r\n denominator = np.array([denominator.sum()])\r\n\r\n jaccard = _prf_divide(\r\n numerator,\r\n denominator,\r\n \"jaccard\",\r\n \"true or predicted\",\r\n average,\r\n (\"jaccard\",),\r\n zero_division=zero_division,\r\n )\r\n if average is None:\r\n return jaccard\r\n if average == \"weighted\":\r\n weights = MCM[:, 1, 0] + MCM[:, 1, 1]\r\n if not np.any(weights):\r\n # numerator is 0, and warning should have already been issued\r\n weights = None\r\n elif average == \"samples\" and sample_weight is not None:\r\n weights = sample_weight\r\n else:\r\n weights = None\r\n return np.average(jaccard, weights=weights)\r\n\r\n\r\ndef matthews_corrcoef(y_true, y_pred, *, sample_weight=None):\r\n \"\"\"Compute the Matthews correlation coefficient (MCC).\r\n\r\n The Matthews correlation coefficient is used in machine learning as a\r\n measure of the quality of binary and multiclass classifications. It takes\r\n into account true and false positives and negatives and is generally\r\n regarded as a balanced measure which can be used even if the classes are of\r\n very different sizes. The MCC is in essence a correlation coefficient value\r\n between -1 and +1. A coefficient of +1 represents a perfect prediction, 0\r\n an average random prediction and -1 an inverse prediction. The statistic\r\n is also known as the phi coefficient. [source: Wikipedia]\r\n\r\n Binary and multiclass labels are supported. Only in the binary case does\r\n this relate to information about true and false positives and negatives.\r\n See references below.\r\n\r\n Read more in the :ref:`User Guide <matthews_corrcoef>`.\r\n\r\n Parameters\r\n ----------\r\n y_true : array, shape = [n_samples]\r\n Ground truth (correct) target values.\r\n\r\n y_pred : array, shape = [n_samples]\r\n Estimated targets as returned by a classifier.\r\n\r\n sample_weight : array-like of shape (n_samples,), default=None\r\n Sample weights.\r\n\r\n .. versionadded:: 0.18\r\n\r\n Returns\r\n -------\r\n mcc : float\r\n The Matthews correlation coefficient (+1 represents a perfect\r\n prediction, 0 an average random prediction and -1 and inverse\r\n prediction).\r\n\r\n References\r\n ----------\r\n .. [1] `Baldi, Brunak, Chauvin, Andersen and Nielsen, (2000). Assessing the\r\n accuracy of prediction algorithms for classification: an overview\r\n <https://doi.org/10.1093/bioinformatics/16.5.412>`_.\r\n\r\n .. [2] `Wikipedia entry for the Matthews Correlation Coefficient\r\n <https://en.wikipedia.org/wiki/Matthews_correlation_coefficient>`_.\r\n\r\n .. [3] `Gorodkin, (2004). Comparing two K-category assignments by a\r\n K-category correlation coefficient\r\n <https://www.sciencedirect.com/science/article/pii/S1476927104000799>`_.\r\n\r\n .. [4] `Jurman, Riccadonna, Furlanello, (2012). A Comparison of MCC and CEN\r\n Error Measures in MultiClass Prediction\r\n <https://journals.plos.org/plosone/article?id=10.1371/journal.pone.0041882>`_.\r\n\r\n Examples\r\n --------\r\n >>> from sklearn.metrics import matthews_corrcoef\r\n >>> y_true = [+1, +1, +1, -1]\r\n >>> y_pred = [+1, -1, +1, +1]\r\n >>> matthews_corrcoef(y_true, y_pred)\r\n -0.33...\r\n \"\"\"\r\n y_type, y_true, y_pred = _check_targets(y_true, y_pred)\r\n check_consistent_length(y_true, y_pred, sample_weight)\r\n if y_type not in {\"binary\", \"multiclass\"}:\r\n raise ValueError(\"%s is not supported\" % y_type)\r\n\r\n lb = LabelEncoder()\r\n lb.fit(np.hstack([y_true, y_pred]))\r\n y_true = lb.transform(y_true)\r\n y_pred = lb.transform(y_pred)\r\n\r\n C = confusion_matrix(y_true, y_pred, sample_weight=sample_weight)\r\n t_sum = C.sum(axis=1, dtype=np.float64)\r\n p_sum = C.sum(axis=0, dtype=np.float64)\r\n n_correct = np.trace(C, dtype=np.float64)\r\n n_samples = p_sum.sum()\r\n cov_ytyp = n_correct * n_samples - np.dot(t_sum, p_sum)\r\n cov_ypyp = n_samples ** 2 - np.dot(p_sum, p_sum)\r\n cov_ytyt = n_samples ** 2 - np.dot(t_sum, t_sum)\r\n\r\n if cov_ypyp * cov_ytyt == 0:\r\n return 0.0\r\n else:\r\n return cov_ytyp / np.sqrt(cov_ytyt * cov_ypyp)\r\n\r\n\r\ndef zero_one_loss(y_true, y_pred, *, normalize=True, sample_weight=None):\r\n \"\"\"Zero-one classification loss.\r\n\r\n If normalize is ``True``, return the fraction of misclassifications\r\n (float), else it returns the number of misclassifications (int). The best\r\n performance is 0.\r\n\r\n Read more in the :ref:`User Guide <zero_one_loss>`.\r\n\r\n Parameters\r\n ----------\r\n y_true : 1d array-like, or label indicator array / sparse matrix\r\n Ground truth (correct) labels.\r\n\r\n y_pred : 1d array-like, or label indicator array / sparse matrix\r\n Predicted labels, as returned by a classifier.\r\n\r\n normalize : bool, default=True\r\n If ``False``, return the number of misclassifications.\r\n Otherwise, return the fraction of misclassifications.\r\n\r\n sample_weight : array-like of shape (n_samples,), default=None\r\n Sample weights.\r\n\r\n Returns\r\n -------\r\n loss : float or int,\r\n If ``normalize == True``, return the fraction of misclassifications\r\n (float), else it returns the number of misclassifications (int).\r\n\r\n Notes\r\n -----\r\n In multilabel classification, the zero_one_loss function corresponds to\r\n the subset zero-one loss: for each sample, the entire set of labels must be\r\n correctly predicted, otherwise the loss for that sample is equal to one.\r\n\r\n See Also\r\n --------\r\n accuracy_score, hamming_loss, jaccard_score\r\n\r\n Examples\r\n --------\r\n >>> from sklearn.metrics import zero_one_loss\r\n >>> y_pred = [1, 2, 3, 4]\r\n >>> y_true = [2, 2, 3, 4]\r\n >>> zero_one_loss(y_true, y_pred)\r\n 0.25\r\n >>> zero_one_loss(y_true, y_pred, normalize=False)\r\n 1\r\n\r\n In the multilabel case with binary label indicators:\r\n\r\n >>> import numpy as np\r\n >>> zero_one_loss(np.array([[0, 1], [1, 1]]), np.ones((2, 2)))\r\n 0.5\r\n \"\"\"\r\n score = accuracy_score(\r\n y_true, y_pred, normalize=normalize, sample_weight=sample_weight\r\n )\r\n\r\n if normalize:\r\n return 1 - score\r\n else:\r\n if sample_weight is not None:\r\n n_samples = np.sum(sample_weight)\r\n else:\r\n n_samples = _num_samples(y_true)\r\n return n_samples - score\r\n\r\n\r\ndef f1_score(\r\n y_true,\r\n y_pred,\r\n *,\r\n labels=None,\r\n pos_label=1,\r\n average=\"binary\",\r\n sample_weight=None,\r\n zero_division=\"warn\",\r\n):\r\n \"\"\"Compute the F1 score, also known as balanced F-score or F-measure.\r\n\r\n The F1 score can be interpreted as a harmonic mean of the precision and\r\n recall, where an F1 score reaches its best value at 1 and worst score at 0.\r\n The relative contribution of precision and recall to the F1 score are\r\n equal. The formula for the F1 score is::\r\n\r\n F1 = 2 * (precision * recall) / (precision + recall)\r\n\r\n In the multi-class and multi-label case, this is the average of\r\n the F1 score of each class with weighting depending on the ``average``\r\n parameter.\r\n\r\n Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.\r\n\r\n Parameters\r\n ----------\r\n y_true : 1d array-like, or label indicator array / sparse matrix\r\n Ground truth (correct) target values.\r\n\r\n y_pred : 1d array-like, or label indicator array / sparse matrix\r\n Estimated targets as returned by a classifier.\r\n\r\n labels : array-like, default=None\r\n The set of labels to include when ``average != 'binary'``, and their\r\n order if ``average is None``. Labels present in the data can be\r\n excluded, for example to calculate a multiclass average ignoring a\r\n majority negative class, while labels not present in the data will\r\n result in 0 components in a macro average. For multilabel targets,\r\n labels are column indices. By default, all labels in ``y_true`` and\r\n ``y_pred`` are used in sorted order.\r\n\r\n .. versionchanged:: 0.17\r\n Parameter `labels` improved for multiclass problem.\r\n\r\n pos_label : str or int, default=1\r\n The class to report if ``average='binary'`` and the data is binary.\r\n If the data are multiclass or multilabel, this will be ignored;\r\n setting ``labels=[pos_label]`` and ``average != 'binary'`` will report\r\n scores for that label only.\r\n\r\n average : {'micro', 'macro', 'samples','weighted', 'binary'} or None, \\\r\n default='binary'\r\n This parameter is required for multiclass/multilabel targets.\r\n If ``None``, the scores for each class are returned. Otherwise, this\r\n determines the type of averaging performed on the data:\r\n\r\n ``'binary'``:\r\n Only report results for the class specified by ``pos_label``.\r\n This is applicable only if targets (``y_{true,pred}``) are binary.\r\n ``'micro'``:\r\n Calculate metrics globally by counting the total true positives,\r\n false negatives and false positives.\r\n ``'macro'``:\r\n Calculate metrics for each label, and find their unweighted\r\n mean. This does not take label imbalance into account.\r\n ``'weighted'``:\r\n Calculate metrics for each label, and find their average weighted\r\n by support (the number of true instances for each label). This\r\n alters 'macro' to account for label imbalance; it can result in an\r\n F-score that is not between precision and recall.\r\n ``'samples'``:\r\n Calculate metrics for each instance, and find their average (only\r\n meaningful for multilabel classification where this differs from\r\n :func:`accuracy_score`).\r\n\r\n sample_weight : array-like of shape (n_samples,), default=None\r\n Sample weights.\r\n\r\n zero_division : \"warn\", 0 or 1, default=\"warn\"\r\n Sets the value to return when there is a zero division, i.e. when all\r\n predictions and labels are negative. If set to \"warn\", this acts as 0,\r\n but warnings are also raised.\r\n\r\n Returns\r\n -------\r\n f1_score : float or array of float, shape = [n_unique_labels]\r\n F1 score of the positive class in binary classification or weighted\r\n average of the F1 scores of each class for the multiclass task.\r\n\r\n See Also\r\n --------\r\n fbeta_score, precision_recall_fscore_support, jaccard_score,\r\n multilabel_confusion_matrix\r\n\r\n References\r\n ----------\r\n .. [1] `Wikipedia entry for the F1-score\r\n <https://en.wikipedia.org/wiki/F1_score>`_.\r\n\r\n Examples\r\n --------\r\n >>> from sklearn.metrics import f1_score\r\n >>> y_true = [0, 1, 2, 0, 1, 2]\r\n >>> y_pred = [0, 2, 1, 0, 0, 1]\r\n >>> f1_score(y_true, y_pred, average='macro')\r\n 0.26...\r\n >>> f1_score(y_true, y_pred, average='micro')\r\n 0.33...\r\n >>> f1_score(y_true, y_pred, average='weighted')\r\n 0.26...\r\n >>> f1_score(y_true, y_pred, average=None)\r\n array([0.8, 0. , 0. ])\r\n >>> y_true = [0, 0, 0, 0, 0, 0]\r\n >>> y_pred = [0, 0, 0, 0, 0, 0]\r\n >>> f1_score(y_true, y_pred, zero_division=1)\r\n 1.0...\r\n >>> # multilabel classification\r\n >>> y_true = [[0, 0, 0], [1, 1, 1], [0, 1, 1]]\r\n >>> y_pred = [[0, 0, 0], [1, 1, 1], [1, 1, 0]]\r\n >>> f1_score(y_true, y_pred, average=None)\r\n array([0.66666667, 1. , 0.66666667])\r\n\r\n Notes\r\n -----\r\n When ``true positive + false positive == 0``, precision is undefined.\r\n When ``true positive + false negative == 0``, recall is undefined.\r\n In such cases, by default the metric will be set to 0, as will f-score,\r\n and ``UndefinedMetricWarning`` will be raised. This behavior can be\r\n modified with ``zero_division``.\r\n \"\"\"\r\n return fbeta_score(\r\n y_true,\r\n y_pred,\r\n beta=1,\r\n labels=labels,\r\n pos_label=pos_label,\r\n average=average,\r\n sample_weight=sample_weight,\r\n zero_division=zero_division,\r\n )\r\n\r\n\r\ndef fbeta_score(\r\n y_true,\r\n y_pred,\r\n *,\r\n beta,\r\n labels=None,\r\n pos_label=1,\r\n average=\"binary\",\r\n sample_weight=None,\r\n zero_division=\"warn\",\r\n):\r\n \"\"\"Compute the F-beta score.\r\n\r\n The F-beta score is the weighted harmonic mean of precision and recall,\r\n reaching its optimal value at 1 and its worst value at 0.\r\n\r\n The `beta` parameter determines the weight of recall in the combined\r\n score. ``beta < 1`` lends more weight to precision, while ``beta > 1``\r\n favors recall (``beta -> 0`` considers only precision, ``beta -> +inf``\r\n only recall).\r\n\r\n Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.\r\n\r\n Parameters\r\n ----------\r\n y_true : 1d array-like, or label indicator array / sparse matrix\r\n Ground truth (correct) target values.\r\n\r\n y_pred : 1d array-like, or label indicator array / sparse matrix\r\n Estimated targets as returned by a classifier.\r\n\r\n beta : float\r\n Determines the weight of recall in the combined score.\r\n\r\n labels : array-like, default=None\r\n The set of labels to include when ``average != 'binary'``, and their\r\n order if ``average is None``. Labels present in the data can be\r\n excluded, for example to calculate a multiclass average ignoring a\r\n majority negative class, while labels not present in the data will\r\n result in 0 components in a macro average. For multilabel targets,\r\n labels are column indices. By default, all labels in ``y_true`` and\r\n ``y_pred`` are used in sorted order.\r\n\r\n .. versionchanged:: 0.17\r\n Parameter `labels` improved for multiclass problem.\r\n\r\n pos_label : str or int, default=1\r\n The class to report if ``average='binary'`` and the data is binary.\r\n If the data are multiclass or multilabel, this will be ignored;\r\n setting ``labels=[pos_label]`` and ``average != 'binary'`` will report\r\n scores for that label only.\r\n\r\n average : {'micro', 'macro', 'samples', 'weighted', 'binary'} or None, \\\r\n default='binary'\r\n This parameter is required for multiclass/multilabel targets.\r\n If ``None``, the scores for each class are returned. Otherwise, this\r\n determines the type of averaging performed on the data:\r\n\r\n ``'binary'``:\r\n Only report results for the class specified by ``pos_label``.\r\n This is applicable only if targets (``y_{true,pred}``) are binary.\r\n ``'micro'``:\r\n Calculate metrics globally by counting the total true positives,\r\n false negatives and false positives.\r\n ``'macro'``:\r\n Calculate metrics for each label, and find their unweighted\r\n mean. This does not take label imbalance into account.\r\n ``'weighted'``:\r\n Calculate metrics for each label, and find their average weighted\r\n by support (the number of true instances for each label). This\r\n alters 'macro' to account for label imbalance; it can result in an\r\n F-score that is not between precision and recall.\r\n ``'samples'``:\r\n Calculate metrics for each instance, and find their average (only\r\n meaningful for multilabel classification where this differs from\r\n :func:`accuracy_score`).\r\n\r\n sample_weight : array-like of shape (n_samples,), default=None\r\n Sample weights.\r\n\r\n zero_division : \"warn\", 0 or 1, default=\"warn\"\r\n Sets the value to return when there is a zero division, i.e. when all\r\n predictions and labels are negative. If set to \"warn\", this acts as 0,\r\n but warnings are also raised.\r\n\r\n Returns\r\n -------\r\n fbeta_score : float (if average is not None) or array of float, shape =\\\r\n [n_unique_labels]\r\n F-beta score of the positive class in binary classification or weighted\r\n average of the F-beta score of each class for the multiclass task.\r\n\r\n See Also\r\n --------\r\n precision_recall_fscore_support, multilabel_confusion_matrix\r\n\r\n Notes\r\n -----\r\n When ``true positive + false positive == 0`` or\r\n ``true positive + false negative == 0``, f-score returns 0 and raises\r\n ``UndefinedMetricWarning``. This behavior can be\r\n modified with ``zero_division``.\r\n\r\n References\r\n ----------\r\n .. [1] R. Baeza-Yates and B. Ribeiro-Neto (2011).\r\n Modern Information Retrieval. Addison Wesley, pp. 327-328.\r\n\r\n .. [2] `Wikipedia entry for the F1-score\r\n <https://en.wikipedia.org/wiki/F1_score>`_.\r\n\r\n Examples\r\n --------\r\n >>> from sklearn.metrics import fbeta_score\r\n >>> y_true = [0, 1, 2, 0, 1, 2]\r\n >>> y_pred = [0, 2, 1, 0, 0, 1]\r\n >>> fbeta_score(y_true, y_pred, average='macro', beta=0.5)\r\n 0.23...\r\n >>> fbeta_score(y_true, y_pred, average='micro', beta=0.5)\r\n 0.33...\r\n >>> fbeta_score(y_true, y_pred, average='weighted', beta=0.5)\r\n 0.23...\r\n >>> fbeta_score(y_true, y_pred, average=None, beta=0.5)\r\n array([0.71..., 0. , 0. ])\r\n \"\"\"\r\n\r\n _, _, f, _ = precision_recall_fscore_support(\r\n y_true,\r\n y_pred,\r\n beta=beta,\r\n labels=labels,\r\n pos_label=pos_label,\r\n average=average,\r\n warn_for=(\"f-score\",),\r\n sample_weight=sample_weight,\r\n zero_division=zero_division,\r\n )\r\n return f\r\n\r\n\r\ndef _prf_divide(\r\n numerator, denominator, metric, modifier, average, warn_for, zero_division=\"warn\"\r\n):\r\n \"\"\"Performs division and handles divide-by-zero.\r\n\r\n On zero-division, sets the corresponding result elements equal to\r\n 0 or 1 (according to ``zero_division``). Plus, if\r\n ``zero_division != \"warn\"`` raises a warning.\r\n\r\n The metric, modifier and average arguments are used only for determining\r\n an appropriate warning.\r\n \"\"\"\r\n mask = denominator == 0.0\r\n denominator = denominator.copy()\r\n denominator[mask] = 1 # avoid infs/nans\r\n result = numerator / denominator\r\n\r\n if not np.any(mask):\r\n return result\r\n\r\n # if ``zero_division=1``, set those with denominator == 0 equal to 1\r\n result[mask] = 0.0 if zero_division in [\"warn\", 0] else 1.0\r\n\r\n # the user will be removing warnings if zero_division is set to something\r\n # different than its default value. If we are computing only f-score\r\n # the warning will be raised only if precision and recall are ill-defined\r\n if zero_division != \"warn\" or metric not in warn_for:\r\n return result\r\n\r\n # build appropriate warning\r\n # E.g. \"Precision and F-score are ill-defined and being set to 0.0 in\r\n # labels with no predicted samples. Use ``zero_division`` parameter to\r\n # control this behavior.\"\r\n\r\n if metric in warn_for and \"f-score\" in warn_for:\r\n msg_start = \"{0} and F-score are\".format(metric.title())\r\n elif metric in warn_for:\r\n msg_start = \"{0} is\".format(metric.title())\r\n elif \"f-score\" in warn_for:\r\n msg_start = \"F-score is\"\r\n else:\r\n return result\r\n\r\n _warn_prf(average, modifier, msg_start, len(result))\r\n\r\n return result\r\n\r\n\r\ndef _warn_prf(average, modifier, msg_start, result_size):\r\n axis0, axis1 = \"sample\", \"label\"\r\n if average == \"samples\":\r\n axis0, axis1 = axis1, axis0\r\n msg = (\r\n \"{0} ill-defined and being set to 0.0 {{0}} \"\r\n \"no {1} {2}s. Use `zero_division` parameter to control\"\r\n \" this behavior.\".format(msg_start, modifier, axis0)\r\n )\r\n if result_size == 1:\r\n msg = msg.format(\"due to\")\r\n else:\r\n msg = msg.format(\"in {0}s with\".format(axis1))\r\n warnings.warn(msg, UndefinedMetricWarning, stacklevel=2)\r\n\r\n\r\ndef _check_set_wise_labels(y_true, y_pred, average, labels, pos_label):\r\n \"\"\"Validation associated with set-wise metrics.\r\n\r\n Returns identified labels.\r\n \"\"\"\r\n average_options = (None, \"micro\", \"macro\", \"weighted\", \"samples\")\r\n if average not in average_options and average != \"binary\":\r\n raise ValueError(\"average has to be one of \" + str(average_options))\r\n\r\n y_type, y_true, y_pred = _check_targets(y_true, y_pred)\r\n # Convert to Python primitive type to avoid NumPy type / Python str\r\n # comparison. See https://github.com/numpy/numpy/issues/6784\r\n present_labels = unique_labels(y_true, y_pred).tolist()\r\n if average == \"binary\":\r\n if y_type == \"binary\":\r\n if pos_label not in present_labels:\r\n if len(present_labels) >= 2:\r\n raise ValueError(\r\n f\"pos_label={pos_label} is not a valid label. It \"\r\n f\"should be one of {present_labels}\"\r\n )\r\n labels = [pos_label]\r\n else:\r\n average_options = list(average_options)\r\n if y_type == \"multiclass\":\r\n average_options.remove(\"samples\")\r\n raise ValueError(\r\n \"Target is %s but average='binary'. Please \"\r\n \"choose another average setting, one of %r.\" % (y_type, average_options)\r\n )\r\n elif pos_label not in (None, 1):\r\n warnings.warn(\r\n \"Note that pos_label (set to %r) is ignored when \"\r\n \"average != 'binary' (got %r). You may use \"\r\n \"labels=[pos_label] to specify a single positive class.\"\r\n % (pos_label, average),\r\n UserWarning,\r\n )\r\n return labels\r\n\r\n\r\ndef precision_recall_fscore_support(\r\n y_true,\r\n y_pred,\r\n *,\r\n beta=1.0,\r\n labels=None,\r\n pos_label=1,\r\n average=None,\r\n warn_for=(\"precision\", \"recall\", \"f-score\"),\r\n sample_weight=None,\r\n zero_division=\"warn\",\r\n):\r\n \"\"\"Compute precision, recall, F-measure and support for each class.\r\n\r\n The precision is the ratio ``tp / (tp + fp)`` where ``tp`` is the number of\r\n true positives and ``fp`` the number of false positives. The precision is\r\n intuitively the ability of the classifier not to label as positive a sample\r\n that is negative.\r\n\r\n The recall is the ratio ``tp / (tp + fn)`` where ``tp`` is the number of\r\n true positives and ``fn`` the number of false negatives. The recall is\r\n intuitively the ability of the classifier to find all the positive samples.\r\n\r\n The F-beta score can be interpreted as a weighted harmonic mean of\r\n the precision and recall, where an F-beta score reaches its best\r\n value at 1 and worst score at 0.\r\n\r\n The F-beta score weights recall more than precision by a factor of\r\n ``beta``. ``beta == 1.0`` means recall and precision are equally important.\r\n\r\n The support is the number of occurrences of each class in ``y_true``.\r\n\r\n If ``pos_label is None`` and in binary classification, this function\r\n returns the average precision, recall and F-measure if ``average``\r\n is one of ``'micro'``, ``'macro'``, ``'weighted'`` or ``'samples'``.\r\n\r\n Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.\r\n\r\n Parameters\r\n ----------\r\n y_true : 1d array-like, or label indicator array / sparse matrix\r\n Ground truth (correct) target values.\r\n\r\n y_pred : 1d array-like, or label indicator array / sparse matrix\r\n Estimated targets as returned by a classifier.\r\n\r\n beta : float, default=1.0\r\n The strength of recall versus precision in the F-score.\r\n\r\n labels : array-like, default=None\r\n The set of labels to include when ``average != 'binary'``, and their\r\n order if ``average is None``. Labels present in the data can be\r\n excluded, for example to calculate a multiclass average ignoring a\r\n majority negative class, while labels not present in the data will\r\n result in 0 components in a macro average. For multilabel targets,\r\n labels are column indices. By default, all labels in ``y_true`` and\r\n ``y_pred`` are used in sorted order.\r\n\r\n pos_label : str or int, default=1\r\n The class to report if ``average='binary'`` and the data is binary.\r\n If the data are multiclass or multilabel, this will be ignored;\r\n setting ``labels=[pos_label]`` and ``average != 'binary'`` will report\r\n scores for that label only.\r\n\r\n average : {'binary', 'micro', 'macro', 'samples','weighted'}, \\\r\n default=None\r\n If ``None``, the scores for each class are returned. Otherwise, this\r\n determines the type of averaging performed on the data:\r\n\r\n ``'binary'``:\r\n Only report results for the class specified by ``pos_label``.\r\n This is applicable only if targets (``y_{true,pred}``) are binary.\r\n ``'micro'``:\r\n Calculate metrics globally by counting the total true positives,\r\n false negatives and false positives.\r\n ``'macro'``:\r\n Calculate metrics for each label, and find their unweighted\r\n mean. This does not take label imbalance into account.\r\n ``'weighted'``:\r\n Calculate metrics for each label, and find their average weighted\r\n by support (the number of true instances for each label). This\r\n alters 'macro' to account for label imbalance; it can result in an\r\n F-score that is not between precision and recall.\r\n ``'samples'``:\r\n Calculate metrics for each instance, and find their average (only\r\n meaningful for multilabel classification where this differs from\r\n :func:`accuracy_score`).\r\n\r\n warn_for : tuple or set, for internal use\r\n This determines which warnings will be made in the case that this\r\n function is being used to return only one of its metrics.\r\n\r\n sample_weight : array-like of shape (n_samples,), default=None\r\n Sample weights.\r\n\r\n zero_division : \"warn\", 0 or 1, default=\"warn\"\r\n Sets the value to return when there is a zero division:\r\n - recall: when there are no positive labels\r\n - precision: when there are no positive predictions\r\n - f-score: both\r\n\r\n If set to \"warn\", this acts as 0, but warnings are also raised.\r\n\r\n Returns\r\n -------\r\n precision : float (if average is not None) or array of float, shape =\\\r\n [n_unique_labels]\r\n\r\n recall : float (if average is not None) or array of float, shape =\\\r\n [n_unique_labels]\r\n\r\n fbeta_score : float (if average is not None) or array of float, shape =\\\r\n [n_unique_labels]\r\n\r\n support : None (if average is not None) or array of int, shape =\\\r\n [n_unique_labels]\r\n The number of occurrences of each label in ``y_true``.\r\n\r\n Notes\r\n -----\r\n When ``true positive + false positive == 0``, precision is undefined.\r\n When ``true positive + false negative == 0``, recall is undefined.\r\n In such cases, by default the metric will be set to 0, as will f-score,\r\n and ``UndefinedMetricWarning`` will be raised. This behavior can be\r\n modified with ``zero_division``.\r\n\r\n References\r\n ----------\r\n .. [1] `Wikipedia entry for the Precision and recall\r\n <https://en.wikipedia.org/wiki/Precision_and_recall>`_.\r\n\r\n .. [2] `Wikipedia entry for the F1-score\r\n <https://en.wikipedia.org/wiki/F1_score>`_.\r\n\r\n .. [3] `Discriminative Methods for Multi-labeled Classification Advances\r\n in Knowledge Discovery and Data Mining (2004), pp. 22-30 by Shantanu\r\n Godbole, Sunita Sarawagi\r\n <http://www.godbole.net/shantanu/pubs/multilabelsvm-pakdd04.pdf>`_.\r\n\r\n Examples\r\n --------\r\n >>> import numpy as np\r\n >>> from sklearn.metrics import precision_recall_fscore_support\r\n >>> y_true = np.array(['cat', 'dog', 'pig', 'cat', 'dog', 'pig'])\r\n >>> y_pred = np.array(['cat', 'pig', 'dog', 'cat', 'cat', 'dog'])\r\n >>> precision_recall_fscore_support(y_true, y_pred, average='macro')\r\n (0.22..., 0.33..., 0.26..., None)\r\n >>> precision_recall_fscore_support(y_true, y_pred, average='micro')\r\n (0.33..., 0.33..., 0.33..., None)\r\n >>> precision_recall_fscore_support(y_true, y_pred, average='weighted')\r\n (0.22..., 0.33..., 0.26..., None)\r\n\r\n It is possible to compute per-label precisions, recalls, F1-scores and\r\n supports instead of averaging:\r\n\r\n >>> precision_recall_fscore_support(y_true, y_pred, average=None,\r\n ... labels=['pig', 'dog', 'cat'])\r\n (array([0. , 0. , 0.66...]),\r\n array([0., 0., 1.]), array([0. , 0. , 0.8]),\r\n array([2, 2, 2]))\r\n \"\"\"\r\n _check_zero_division(zero_division)\r\n if beta < 0:\r\n raise ValueError(\"beta should be >=0 in the F-beta score\")\r\n labels = _check_set_wise_labels(y_true, y_pred, average, labels, pos_label)\r\n\r\n # Calculate tp_sum, pred_sum, true_sum ###\r\n samplewise = average == \"samples\"\r\n MCM = multilabel_confusion_matrix(\r\n y_true,\r\n y_pred,\r\n sample_weight=sample_weight,\r\n labels=labels,\r\n samplewise=samplewise,\r\n )\r\n tp_sum = MCM[:, 1, 1]\r\n pred_sum = tp_sum + MCM[:, 0, 1]\r\n true_sum = tp_sum + MCM[:, 1, 0]\r\n\r\n if average == \"micro\":\r\n tp_sum = np.array([tp_sum.sum()])\r\n pred_sum = np.array([pred_sum.sum()])\r\n true_sum = np.array([true_sum.sum()])\r\n\r\n # Finally, we have all our sufficient statistics. Divide! #\r\n beta2 = beta ** 2\r\n\r\n # Divide, and on zero-division, set scores and/or warn according to\r\n # zero_division:\r\n precision = _prf_divide(\r\n tp_sum, pred_sum, \"precision\", \"predicted\", average, warn_for, zero_division\r\n )\r\n recall = _prf_divide(\r\n tp_sum, true_sum, \"recall\", \"true\", average, warn_for, zero_division\r\n )\r\n\r\n # warn for f-score only if zero_division is warn, it is in warn_for\r\n # and BOTH prec and rec are ill-defined\r\n if zero_division == \"warn\" and (\"f-score\",) == warn_for:\r\n if (pred_sum[true_sum == 0] == 0).any():\r\n _warn_prf(average, \"true nor predicted\", \"F-score is\", len(true_sum))\r\n\r\n # if tp == 0 F will be 1 only if all predictions are zero, all labels are\r\n # zero, and zero_division=1. In all other case, 0\r\n if np.isposinf(beta):\r\n f_score = recall\r\n else:\r\n denom = beta2 * precision + recall\r\n\r\n denom[denom == 0.0] = 1 # avoid division by 0\r\n f_score = (1 + beta2) * precision * recall / denom\r\n\r\n # Average the results\r\n if average == \"weighted\":\r\n weights = true_sum\r\n if weights.sum() == 0:\r\n zero_division_value = np.float64(1.0)\r\n if zero_division in [\"warn\", 0]:\r\n zero_division_value = np.float64(0.0)\r\n # precision is zero_division if there are no positive predictions\r\n # recall is zero_division if there are no positive labels\r\n # fscore is zero_division if all labels AND predictions are\r\n # negative\r\n if pred_sum.sum() == 0:\r\n return (\r\n zero_division_value,\r\n zero_division_value,\r\n zero_division_value,\r\n None,\r\n )\r\n else:\r\n return (np.float64(0.0), zero_division_value, np.float64(0.0), None)\r\n\r\n elif average == \"samples\":\r\n weights = sample_weight\r\n else:\r\n weights = None\r\n\r\n if average is not None:\r\n assert average != \"binary\" or len(precision) == 1\r\n precision = np.average(precision, weights=weights)\r\n recall = np.average(recall, weights=weights)\r\n f_score = np.average(f_score, weights=weights)\r\n true_sum = None # return no support\r\n\r\n return precision, recall, f_score, true_sum\r\n\r\n\r\ndef precision_score(\r\n y_true,\r\n y_pred,\r\n *,\r\n labels=None,\r\n pos_label=1,\r\n average=\"binary\",\r\n sample_weight=None,\r\n zero_division=\"warn\",\r\n):\r\n \"\"\"Compute the precision.\r\n\r\n The precision is the ratio ``tp / (tp + fp)`` where ``tp`` is the number of\r\n true positives and ``fp`` the number of false positives. The precision is\r\n intuitively the ability of the classifier not to label as positive a sample\r\n that is negative.\r\n\r\n The best value is 1 and the worst value is 0.\r\n\r\n Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.\r\n\r\n Parameters\r\n ----------\r\n y_true : 1d array-like, or label indicator array / sparse matrix\r\n Ground truth (correct) target values.\r\n\r\n y_pred : 1d array-like, or label indicator array / sparse matrix\r\n Estimated targets as returned by a classifier.\r\n\r\n labels : array-like, default=None\r\n The set of labels to include when ``average != 'binary'``, and their\r\n order if ``average is None``. Labels present in the data can be\r\n excluded, for example to calculate a multiclass average ignoring a\r\n majority negative class, while labels not present in the data will\r\n result in 0 components in a macro average. For multilabel targets,\r\n labels are column indices. By default, all labels in ``y_true`` and\r\n ``y_pred`` are used in sorted order.\r\n\r\n .. versionchanged:: 0.17\r\n Parameter `labels` improved for multiclass problem.\r\n\r\n pos_label : str or int, default=1\r\n The class to report if ``average='binary'`` and the data is binary.\r\n If the data are multiclass or multilabel, this will be ignored;\r\n setting ``labels=[pos_label]`` and ``average != 'binary'`` will report\r\n scores for that label only.\r\n\r\n average : {'micro', 'macro', 'samples', 'weighted', 'binary'} or None, \\\r\n default='binary'\r\n This parameter is required for multiclass/multilabel targets.\r\n If ``None``, the scores for each class are returned. Otherwise, this\r\n determines the type of averaging performed on the data:\r\n\r\n ``'binary'``:\r\n Only report results for the class specified by ``pos_label``.\r\n This is applicable only if targets (``y_{true,pred}``) are binary.\r\n ``'micro'``:\r\n Calculate metrics globally by counting the total true positives,\r\n false negatives and false positives.\r\n ``'macro'``:\r\n Calculate metrics for each label, and find their unweighted\r\n mean. This does not take label imbalance into account.\r\n ``'weighted'``:\r\n Calculate metrics for each label, and find their average weighted\r\n by support (the number of true instances for each label). This\r\n alters 'macro' to account for label imbalance; it can result in an\r\n F-score that is not between precision and recall.\r\n ``'samples'``:\r\n Calculate metrics for each instance, and find their average (only\r\n meaningful for multilabel classification where this differs from\r\n :func:`accuracy_score`).\r\n\r\n sample_weight : array-like of shape (n_samples,), default=None\r\n Sample weights.\r\n\r\n zero_division : \"warn\", 0 or 1, default=\"warn\"\r\n Sets the value to return when there is a zero division. If set to\r\n \"warn\", this acts as 0, but warnings are also raised.\r\n\r\n Returns\r\n -------\r\n precision : float (if average is not None) or array of float of shape \\\r\n (n_unique_labels,)\r\n Precision of the positive class in binary classification or weighted\r\n average of the precision of each class for the multiclass task.\r\n\r\n See Also\r\n --------\r\n precision_recall_fscore_support, multilabel_confusion_matrix\r\n\r\n Notes\r\n -----\r\n When ``true positive + false positive == 0``, precision returns 0 and\r\n raises ``UndefinedMetricWarning``. This behavior can be\r\n modified with ``zero_division``.\r\n\r\n Examples\r\n --------\r\n >>> from sklearn.metrics import precision_score\r\n >>> y_true = [0, 1, 2, 0, 1, 2]\r\n >>> y_pred = [0, 2, 1, 0, 0, 1]\r\n >>> precision_score(y_true, y_pred, average='macro')\r\n 0.22...\r\n >>> precision_score(y_true, y_pred, average='micro')\r\n 0.33...\r\n >>> precision_score(y_true, y_pred, average='weighted')\r\n 0.22...\r\n >>> precision_score(y_true, y_pred, average=None)\r\n array([0.66..., 0. , 0. ])\r\n >>> y_pred = [0, 0, 0, 0, 0, 0]\r\n >>> precision_score(y_true, y_pred, average=None)\r\n array([0.33..., 0. , 0. ])\r\n >>> precision_score(y_true, y_pred, average=None, zero_division=1)\r\n array([0.33..., 1. , 1. ])\r\n >>> # multilabel classification\r\n >>> y_true = [[0, 0, 0], [1, 1, 1], [0, 1, 1]]\r\n >>> y_pred = [[0, 0, 0], [1, 1, 1], [1, 1, 0]]\r\n >>> precision_score(y_true, y_pred, average=None)\r\n array([0.5, 1. , 1. ])\r\n \"\"\"\r\n p, _, _, _ = precision_recall_fscore_support(\r\n y_true,\r\n y_pred,\r\n labels=labels,\r\n pos_label=pos_label,\r\n average=average,\r\n warn_for=(\"precision\",),\r\n sample_weight=sample_weight,\r\n zero_division=zero_division,\r\n )\r\n return p\r\n\r\n\r\ndef recall_score(\r\n y_true,\r\n y_pred,\r\n *,\r\n labels=None,\r\n pos_label=1,\r\n average=\"binary\",\r\n sample_weight=None,\r\n zero_division=\"warn\",\r\n):\r\n \"\"\"Compute the recall.\r\n\r\n The recall is the ratio ``tp / (tp + fn)`` where ``tp`` is the number of\r\n true positives and ``fn`` the number of false negatives. The recall is\r\n intuitively the ability of the classifier to find all the positive samples.\r\n\r\n The best value is 1 and the worst value is 0.\r\n\r\n Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.\r\n\r\n Parameters\r\n ----------\r\n y_true : 1d array-like, or label indicator array / sparse matrix\r\n Ground truth (correct) target values.\r\n\r\n y_pred : 1d array-like, or label indicator array / sparse matrix\r\n Estimated targets as returned by a classifier.\r\n\r\n labels : array-like, default=None\r\n The set of labels to include when ``average != 'binary'``, and their\r\n order if ``average is None``. Labels present in the data can be\r\n excluded, for example to calculate a multiclass average ignoring a\r\n majority negative class, while labels not present in the data will\r\n result in 0 components in a macro average. For multilabel targets,\r\n labels are column indices. By default, all labels in ``y_true`` and\r\n ``y_pred`` are used in sorted order.\r\n\r\n .. versionchanged:: 0.17\r\n Parameter `labels` improved for multiclass problem.\r\n\r\n pos_label : str or int, default=1\r\n The class to report if ``average='binary'`` and the data is binary.\r\n If the data are multiclass or multilabel, this will be ignored;\r\n setting ``labels=[pos_label]`` and ``average != 'binary'`` will report\r\n scores for that label only.\r\n\r\n average : {'micro', 'macro', 'samples', 'weighted', 'binary'} or None, \\\r\n default='binary'\r\n This parameter is required for multiclass/multilabel targets.\r\n If ``None``, the scores for each class are returned. Otherwise, this\r\n determines the type of averaging performed on the data:\r\n\r\n ``'binary'``:\r\n Only report results for the class specified by ``pos_label``.\r\n This is applicable only if targets (``y_{true,pred}``) are binary.\r\n ``'micro'``:\r\n Calculate metrics globally by counting the total true positives,\r\n false negatives and false positives.\r\n ``'macro'``:\r\n Calculate metrics for each label, and find their unweighted\r\n mean. This does not take label imbalance into account.\r\n ``'weighted'``:\r\n Calculate metrics for each label, and find their average weighted\r\n by support (the number of true instances for each label). This\r\n alters 'macro' to account for label imbalance; it can result in an\r\n F-score that is not between precision and recall. Weighted recall\r\n is equal to accuracy.\r\n ``'samples'``:\r\n Calculate metrics for each instance, and find their average (only\r\n meaningful for multilabel classification where this differs from\r\n :func:`accuracy_score`).\r\n\r\n sample_weight : array-like of shape (n_samples,), default=None\r\n Sample weights.\r\n\r\n zero_division : \"warn\", 0 or 1, default=\"warn\"\r\n Sets the value to return when there is a zero division. If set to\r\n \"warn\", this acts as 0, but warnings are also raised.\r\n\r\n Returns\r\n -------\r\n recall : float (if average is not None) or array of float of shape \\\r\n (n_unique_labels,)\r\n Recall of the positive class in binary classification or weighted\r\n average of the recall of each class for the multiclass task.\r\n\r\n See Also\r\n --------\r\n precision_recall_fscore_support, balanced_accuracy_score,\r\n multilabel_confusion_matrix\r\n\r\n Notes\r\n -----\r\n When ``true positive + false negative == 0``, recall returns 0 and raises\r\n ``UndefinedMetricWarning``. This behavior can be modified with\r\n ``zero_division``.\r\n\r\n Examples\r\n --------\r\n >>> from sklearn.metrics import recall_score\r\n >>> y_true = [0, 1, 2, 0, 1, 2]\r\n >>> y_pred = [0, 2, 1, 0, 0, 1]\r\n >>> recall_score(y_true, y_pred, average='macro')\r\n 0.33...\r\n >>> recall_score(y_true, y_pred, average='micro')\r\n 0.33...\r\n >>> recall_score(y_true, y_pred, average='weighted')\r\n 0.33...\r\n >>> recall_score(y_true, y_pred, average=None)\r\n array([1., 0., 0.])\r\n >>> y_true = [0, 0, 0, 0, 0, 0]\r\n >>> recall_score(y_true, y_pred, average=None)\r\n array([0.5, 0. , 0. ])\r\n >>> recall_score(y_true, y_pred, average=None, zero_division=1)\r\n array([0.5, 1. , 1. ])\r\n >>> # multilabel classification\r\n >>> y_true = [[0, 0, 0], [1, 1, 1], [0, 1, 1]]\r\n >>> y_pred = [[0, 0, 0], [1, 1, 1], [1, 1, 0]]\r\n >>> recall_score(y_true, y_pred, average=None)\r\n array([1. , 1. , 0.5])\r\n \"\"\"\r\n _, r, _, _ = precision_recall_fscore_support(\r\n y_true,\r\n y_pred,\r\n labels=labels,\r\n pos_label=pos_label,\r\n average=average,\r\n warn_for=(\"recall\",),\r\n sample_weight=sample_weight,\r\n zero_division=zero_division,\r\n )\r\n return r\r\n\r\n\r\ndef balanced_accuracy_score(y_true, y_pred, *, sample_weight=None, adjusted=False):\r\n \"\"\"Compute the balanced accuracy.\r\n\r\n The balanced accuracy in binary and multiclass classification problems to\r\n deal with imbalanced datasets. It is defined as the average of recall\r\n obtained on each class.\r\n\r\n The best value is 1 and the worst value is 0 when ``adjusted=False``.\r\n\r\n Read more in the :ref:`User Guide <balanced_accuracy_score>`.\r\n\r\n .. versionadded:: 0.20\r\n\r\n Parameters\r\n ----------\r\n y_true : 1d array-like\r\n Ground truth (correct) target values.\r\n\r\n y_pred : 1d array-like\r\n Estimated targets as returned by a classifier.\r\n\r\n sample_weight : array-like of shape (n_samples,), default=None\r\n Sample weights.\r\n\r\n adjusted : bool, default=False\r\n When true, the result is adjusted for chance, so that random\r\n performance would score 0, while keeping perfect performance at a score\r\n of 1.\r\n\r\n Returns\r\n -------\r\n balanced_accuracy : float\r\n\r\n See Also\r\n --------\r\n recall_score, roc_auc_score\r\n\r\n Notes\r\n -----\r\n Some literature promotes alternative definitions of balanced accuracy. Our\r\n definition is equivalent to :func:`accuracy_score` with class-balanced\r\n sample weights, and shares desirable properties with the binary case.\r\n See the :ref:`User Guide <balanced_accuracy_score>`.\r\n\r\n References\r\n ----------\r\n .. [1] Brodersen, K.H.; Ong, C.S.; Stephan, K.E.; Buhmann, J.M. (2010).\r\n The balanced accuracy and its posterior distribution.\r\n Proceedings of the 20th International Conference on Pattern\r\n Recognition, 3121-24.\r\n .. [2] John. D. Kelleher, Brian Mac Namee, Aoife D'Arcy, (2015).\r\n `Fundamentals of Machine Learning for Predictive Data Analytics:\r\n Algorithms, Worked Examples, and Case Studies\r\n <https://mitpress.mit.edu/books/fundamentals-machine-learning-predictive-data-analytics>`_.\r\n\r\n Examples\r\n --------\r\n >>> from sklearn.metrics import balanced_accuracy_score\r\n >>> y_true = [0, 1, 0, 0, 1, 0]\r\n >>> y_pred = [0, 1, 0, 0, 0, 1]\r\n >>> balanced_accuracy_score(y_true, y_pred)\r\n 0.625\r\n\r\n \"\"\"\r\n C = confusion_matrix(y_true, y_pred, sample_weight=sample_weight)\r\n with np.errstate(divide=\"ignore\", invalid=\"ignore\"):\r\n per_class = np.diag(C) / C.sum(axis=1)\r\n if np.any(np.isnan(per_class)):\r\n warnings.warn(\"y_pred contains classes not in y_true\")\r\n per_class = per_class[~np.isnan(per_class)]\r\n score = np.mean(per_class)\r\n if adjusted:\r\n n_classes = len(per_class)\r\n chance = 1 / n_classes\r\n score -= chance\r\n score /= 1 - chance\r\n return score\r\n\r\n\r\ndef classification_report(\r\n y_true,\r\n y_pred,\r\n *,\r\n labels=None,\r\n target_names=None,\r\n sample_weight=None,\r\n digits=2,\r\n output_dict=False,\r\n zero_division=\"warn\",\r\n):\r\n \"\"\"Build a text report showing the main classification metrics.\r\n\r\n Read more in the :ref:`User Guide <classification_report>`.\r\n\r\n Parameters\r\n ----------\r\n y_true : 1d array-like, or label indicator array / sparse matrix\r\n Ground truth (correct) target values.\r\n\r\n y_pred : 1d array-like, or label indicator array / sparse matrix\r\n Estimated targets as returned by a classifier.\r\n\r\n labels : array-like of shape (n_labels,), default=None\r\n Optional list of label indices to include in the report.\r\n\r\n target_names : list of str of shape (n_labels,), default=None\r\n Optional display names matching the labels (same order).\r\n\r\n sample_weight : array-like of shape (n_samples,), default=None\r\n Sample weights.\r\n\r\n digits : int, default=2\r\n Number of digits for formatting output floating point values.\r\n When ``output_dict`` is ``True``, this will be ignored and the\r\n returned values will not be rounded.\r\n\r\n output_dict : bool, default=False\r\n If True, return output as dict.\r\n\r\n .. versionadded:: 0.20\r\n\r\n zero_division : \"warn\", 0 or 1, default=\"warn\"\r\n Sets the value to return when there is a zero division. If set to\r\n \"warn\", this acts as 0, but warnings are also raised.\r\n\r\n Returns\r\n -------\r\n report : str or dict\r\n Text summary of the precision, recall, F1 score for each class.\r\n Dictionary returned if output_dict is True. Dictionary has the\r\n following structure::\r\n\r\n {'label 1': {'precision':0.5,\r\n 'recall':1.0,\r\n 'f1-score':0.67,\r\n 'support':1},\r\n 'label 2': { ... },\r\n ...\r\n }\r\n\r\n The reported averages include macro average (averaging the unweighted\r\n mean per label), weighted average (averaging the support-weighted mean\r\n per label), and sample average (only for multilabel classification).\r\n Micro average (averaging the total true positives, false negatives and\r\n false positives) is only shown for multi-label or multi-class\r\n with a subset of classes, because it corresponds to accuracy\r\n otherwise and would be the same for all metrics.\r\n See also :func:`precision_recall_fscore_support` for more details\r\n on averages.\r\n\r\n Note that in binary classification, recall of the positive class\r\n is also known as \"sensitivity\"; recall of the negative class is\r\n \"specificity\".\r\n\r\n See Also\r\n --------\r\n precision_recall_fscore_support, confusion_matrix,\r\n multilabel_confusion_matrix\r\n\r\n Examples\r\n --------\r\n >>> from sklearn.metrics import classification_report\r\n >>> y_true = [0, 1, 2, 2, 2]\r\n >>> y_pred = [0, 0, 2, 2, 1]\r\n >>> target_names = ['class 0', 'class 1', 'class 2']\r\n >>> print(classification_report(y_true, y_pred, target_names=target_names))\r\n precision recall f1-score support\r\n <BLANKLINE>\r\n class 0 0.50 1.00 0.67 1\r\n class 1 0.00 0.00 0.00 1\r\n class 2 1.00 0.67 0.80 3\r\n <BLANKLINE>\r\n accuracy 0.60 5\r\n macro avg 0.50 0.56 0.49 5\r\n weighted avg 0.70 0.60 0.61 5\r\n <BLANKLINE>\r\n >>> y_pred = [1, 1, 0]\r\n >>> y_true = [1, 1, 1]\r\n >>> print(classification_report(y_true, y_pred, labels=[1, 2, 3]))\r\n precision recall f1-score support\r\n <BLANKLINE>\r\n 1 1.00 0.67 0.80 3\r\n 2 0.00 0.00 0.00 0\r\n 3 0.00 0.00 0.00 0\r\n <BLANKLINE>\r\n micro avg 1.00 0.67 0.80 3\r\n macro avg 0.33 0.22 0.27 3\r\n weighted avg 1.00 0.67 0.80 3\r\n <BLANKLINE>\r\n \"\"\"\r\n\r\n y_type, y_true, y_pred = _check_targets(y_true, y_pred)\r\n\r\n if labels is None:\r\n labels = unique_labels(y_true, y_pred)\r\n labels_given = False\r\n else:\r\n labels = np.asarray(labels)\r\n labels_given = True\r\n\r\n # labelled micro average\r\n micro_is_accuracy = (y_type == \"multiclass\" or y_type == \"binary\") and (\r\n not labels_given or (set(labels) == set(unique_labels(y_true, y_pred)))\r\n )\r\n\r\n if target_names is not None and len(labels) != len(target_names):\r\n if labels_given:\r\n warnings.warn(\r\n \"labels size, {0}, does not match size of target_names, {1}\".format(\r\n len(labels), len(target_names)\r\n )\r\n )\r\n else:\r\n raise ValueError(\r\n \"Number of classes, {0}, does not match size of \"\r\n \"target_names, {1}. Try specifying the labels \"\r\n \"parameter\".format(len(labels), len(target_names))\r\n )\r\n if target_names is None:\r\n target_names = [\"%s\" % l for l in labels]\r\n\r\n headers = [\"precision\", \"recall\", \"f1-score\", \"support\"]\r\n # compute per-class results without averaging\r\n p, r, f1, s = precision_recall_fscore_support(\r\n y_true,\r\n y_pred,\r\n labels=labels,\r\n average=None,\r\n sample_weight=sample_weight,\r\n zero_division=zero_division,\r\n )\r\n rows = zip(target_names, p, r, f1, s)\r\n\r\n if y_type.startswith(\"multilabel\"):\r\n average_options = (\"micro\", \"macro\", \"weighted\", \"samples\")\r\n else:\r\n average_options = (\"micro\", \"macro\", \"weighted\")\r\n\r\n if output_dict:\r\n report_dict = {label[0]: label[1:] for label in rows}\r\n for label, scores in report_dict.items():\r\n report_dict[label] = dict(zip(headers, [i.item() for i in scores]))\r\n else:\r\n longest_last_line_heading = \"weighted avg\"\r\n name_width = max(len(cn) for cn in target_names)\r\n width = max(name_width, len(longest_last_line_heading), digits)\r\n head_fmt = \"{:>{width}s} \" + \" {:>9}\" * len(headers)\r\n report = head_fmt.format(\"\", *headers, width=width)\r\n report += \"\\n\\n\"\r\n row_fmt = \"{:>{width}s} \" + \" {:>9.{digits}f}\" * 3 + \" {:>9}\\n\"\r\n for row in rows:\r\n report += row_fmt.format(*row, width=width, digits=digits)\r\n report += \"\\n\"\r\n\r\n # compute all applicable averages\r\n for average in average_options:\r\n if average.startswith(\"micro\") and micro_is_accuracy:\r\n line_heading = \"accuracy\"\r\n else:\r\n line_heading = average + \" avg\"\r\n\r\n # compute averages with specified averaging method\r\n avg_p, avg_r, avg_f1, _ = precision_recall_fscore_support(\r\n y_true,\r\n y_pred,\r\n labels=labels,\r\n average=average,\r\n sample_weight=sample_weight,\r\n zero_division=zero_division,\r\n )\r\n avg = [avg_p, avg_r, avg_f1, np.sum(s)]\r\n\r\n if output_dict:\r\n report_dict[line_heading] = dict(zip(headers, [i.item() for i in avg]))\r\n else:\r\n if line_heading == \"accuracy\":\r\n row_fmt_accuracy = (\r\n \"{:>{width}s} \"\r\n + \" {:>9.{digits}}\" * 2\r\n + \" {:>9.{digits}f}\"\r\n + \" {:>9}\\n\"\r\n )\r\n report += row_fmt_accuracy.format(\r\n line_heading, \"\", \"\", *avg[2:], width=width, digits=digits\r\n )\r\n else:\r\n report += row_fmt.format(line_heading, *avg, width=width, digits=digits)\r\n\r\n if output_dict:\r\n if \"accuracy\" in report_dict.keys():\r\n report_dict[\"accuracy\"] = report_dict[\"accuracy\"][\"precision\"]\r\n return report_dict\r\n else:\r\n return report\r\n\r\n\r\ndef hamming_loss(y_true, y_pred, *, sample_weight=None):\r\n \"\"\"Compute the average Hamming loss.\r\n\r\n The Hamming loss is the fraction of labels that are incorrectly predicted.\r\n\r\n Read more in the :ref:`User Guide <hamming_loss>`.\r\n\r\n Parameters\r\n ----------\r\n y_true : 1d array-like, or label indicator array / sparse matrix\r\n Ground truth (correct) labels.\r\n\r\n y_pred : 1d array-like, or label indicator array / sparse matrix\r\n Predicted labels, as returned by a classifier.\r\n\r\n sample_weight : array-like of shape (n_samples,), default=None\r\n Sample weights.\r\n\r\n .. versionadded:: 0.18\r\n\r\n Returns\r\n -------\r\n loss : float or int\r\n Return the average Hamming loss between element of ``y_true`` and\r\n ``y_pred``.\r\n\r\n See Also\r\n --------\r\n accuracy_score, jaccard_score, zero_one_loss\r\n\r\n Notes\r\n -----\r\n In multiclass classification, the Hamming loss corresponds to the Hamming\r\n distance between ``y_true`` and ``y_pred`` which is equivalent to the\r\n subset ``zero_one_loss`` function, when `normalize` parameter is set to\r\n True.\r\n\r\n In multilabel classification, the Hamming loss is different from the\r\n subset zero-one loss. The zero-one loss considers the entire set of labels\r\n for a given sample incorrect if it does not entirely match the true set of\r\n labels. Hamming loss is more forgiving in that it penalizes only the\r\n individual labels.\r\n\r\n The Hamming loss is upperbounded by the subset zero-one loss, when\r\n `normalize` parameter is set to True. It is always between 0 and 1,\r\n lower being better.\r\n\r\n References\r\n ----------\r\n .. [1] Grigorios Tsoumakas, Ioannis Katakis. Multi-Label Classification:\r\n An Overview. International Journal of Data Warehousing & Mining,\r\n 3(3), 1-13, July-September 2007.\r\n\r\n .. [2] `Wikipedia entry on the Hamming distance\r\n <https://en.wikipedia.org/wiki/Hamming_distance>`_.\r\n\r\n Examples\r\n --------\r\n >>> from sklearn.metrics import hamming_loss\r\n >>> y_pred = [1, 2, 3, 4]\r\n >>> y_true = [2, 2, 3, 4]\r\n >>> hamming_loss(y_true, y_pred)\r\n 0.25\r\n\r\n In the multilabel case with binary label indicators:\r\n\r\n >>> import numpy as np\r\n >>> hamming_loss(np.array([[0, 1], [1, 1]]), np.zeros((2, 2)))\r\n 0.75\r\n \"\"\"\r\n\r\n y_type, y_true, y_pred = _check_targets(y_true, y_pred)\r\n check_consistent_length(y_true, y_pred, sample_weight)\r\n\r\n if sample_weight is None:\r\n weight_average = 1.0\r\n else:\r\n weight_average = np.mean(sample_weight)\r\n\r\n if y_type.startswith(\"multilabel\"):\r\n n_differences = count_nonzero(y_true - y_pred, sample_weight=sample_weight)\r\n return n_differences / (y_true.shape[0] * y_true.shape[1] * weight_average)\r\n\r\n elif y_type in [\"binary\", \"multiclass\"]:\r\n return _weighted_sum(y_true != y_pred, sample_weight, normalize=True)\r\n else:\r\n raise ValueError(\"{0} is not supported\".format(y_type))\r\n\r\n\r\ndef log_loss(\r\n y_true, y_pred, *, eps=1e-15, normalize=True, sample_weight=None, labels=None\r\n):\r\n r\"\"\"Log loss, aka logistic loss or cross-entropy loss.\r\n\r\n This is the loss function used in (multinomial) logistic regression\r\n and extensions of it such as neural networks, defined as the negative\r\n log-likelihood of a logistic model that returns ``y_pred`` probabilities\r\n for its training data ``y_true``.\r\n The log loss is only defined for two or more labels.\r\n For a single sample with true label :math:`y \\in \\{0,1\\}` and\r\n a probability estimate :math:`p = \\operatorname{Pr}(y = 1)`, the log\r\n loss is:\r\n\r\n .. math::\r\n L_{\\log}(y, p) = -(y \\log (p) + (1 - y) \\log (1 - p))\r\n\r\n Read more in the :ref:`User Guide <log_loss>`.\r\n\r\n Parameters\r\n ----------\r\n y_true : array-like or label indicator matrix\r\n Ground truth (correct) labels for n_samples samples.\r\n\r\n y_pred : array-like of float, shape = (n_samples, n_classes) or (n_samples,)\r\n Predicted probabilities, as returned by a classifier's\r\n predict_proba method. If ``y_pred.shape = (n_samples,)``\r\n the probabilities provided are assumed to be that of the\r\n positive class. The labels in ``y_pred`` are assumed to be\r\n ordered alphabetically, as done by\r\n :class:`preprocessing.LabelBinarizer`.\r\n\r\n eps : float, default=1e-15\r\n Log loss is undefined for p=0 or p=1, so probabilities are\r\n clipped to max(eps, min(1 - eps, p)).\r\n\r\n normalize : bool, default=True\r\n If true, return the mean loss per sample.\r\n Otherwise, return the sum of the per-sample losses.\r\n\r\n sample_weight : array-like of shape (n_samples,), default=None\r\n Sample weights.\r\n\r\n labels : array-like, default=None\r\n If not provided, labels will be inferred from y_true. If ``labels``\r\n is ``None`` and ``y_pred`` has shape (n_samples,) the labels are\r\n assumed to be binary and are inferred from ``y_true``.\r\n\r\n .. versionadded:: 0.18\r\n\r\n Returns\r\n -------\r\n loss : float\r\n\r\n Notes\r\n -----\r\n The logarithm used is the natural logarithm (base-e).\r\n\r\n Examples\r\n --------\r\n >>> from sklearn.metrics import log_loss\r\n >>> log_loss([\"spam\", \"ham\", \"ham\", \"spam\"],\r\n ... [[.1, .9], [.9, .1], [.8, .2], [.35, .65]])\r\n 0.21616...\r\n\r\n References\r\n ----------\r\n C.M. Bishop (2006). Pattern Recognition and Machine Learning. Springer,\r\n p. 209.\r\n \"\"\"\r\n y_pred = check_array(y_pred, ensure_2d=False)\r\n check_consistent_length(y_pred, y_true, sample_weight)\r\n\r\n lb = LabelBinarizer()\r\n\r\n if labels is not None:\r\n lb.fit(labels)\r\n else:\r\n lb.fit(y_true)\r\n\r\n if len(lb.classes_) == 1:\r\n if labels is None:\r\n raise ValueError(\r\n \"y_true contains only one label ({0}). Please \"\r\n \"provide the true labels explicitly through the \"\r\n \"labels argument.\".format(lb.classes_[0])\r\n )\r\n else:\r\n raise ValueError(\r\n \"The labels array needs to contain at least two \"\r\n \"labels for log_loss, \"\r\n \"got {0}.\".format(lb.classes_)\r\n )\r\n\r\n transformed_labels = lb.transform(y_true)\r\n\r\n if transformed_labels.shape[1] == 1:\r\n transformed_labels = np.append(\r\n 1 - transformed_labels, transformed_labels, axis=1\r\n )\r\n\r\n # Clipping\r\n y_pred = np.clip(y_pred, eps, 1 - eps)\r\n\r\n # If y_pred is of single dimension, assume y_true to be binary\r\n # and then check.\r\n if y_pred.ndim == 1:\r\n y_pred = y_pred[:, np.newaxis]\r\n if y_pred.shape[1] == 1:\r\n y_pred = np.append(1 - y_pred, y_pred, axis=1)\r\n\r\n # Check if dimensions are consistent.\r\n transformed_labels = check_array(transformed_labels)\r\n if len(lb.classes_) != y_pred.shape[1]:\r\n if labels is None:\r\n raise ValueError(\r\n \"y_true and y_pred contain different number of \"\r\n \"classes {0}, {1}. Please provide the true \"\r\n \"labels explicitly through the labels argument. \"\r\n \"Classes found in \"\r\n \"y_true: {2}\".format(\r\n transformed_labels.shape[1], y_pred.shape[1], lb.classes_\r\n )\r\n )\r\n else:\r\n raise ValueError(\r\n \"The number of classes in labels is different \"\r\n \"from that in y_pred. Classes found in \"\r\n \"labels: {0}\".format(lb.classes_)\r\n )\r\n\r\n # Renormalize\r\n y_pred /= y_pred.sum(axis=1)[:, np.newaxis]\r\n loss = -(transformed_labels * np.log(y_pred)).sum(axis=1)\r\n\r\n return _weighted_sum(loss, sample_weight, normalize)\r\n\r\n\r\ndef hinge_loss(y_true, pred_decision, *, labels=None, sample_weight=None):\r\n \"\"\"Average hinge loss (non-regularized).\r\n\r\n In binary class case, assuming labels in y_true are encoded with +1 and -1,\r\n when a prediction mistake is made, ``margin = y_true * pred_decision`` is\r\n always negative (since the signs disagree), implying ``1 - margin`` is\r\n always greater than 1. The cumulated hinge loss is therefore an upper\r\n bound of the number of mistakes made by the classifier.\r\n\r\n In multiclass case, the function expects that either all the labels are\r\n included in y_true or an optional labels argument is provided which\r\n contains all the labels. The multilabel margin is calculated according\r\n to Crammer-Singer's method. As in the binary case, the cumulated hinge loss\r\n is an upper bound of the number of mistakes made by the classifier.\r\n\r\n Read more in the :ref:`User Guide <hinge_loss>`.\r\n\r\n Parameters\r\n ----------\r\n y_true : array of shape (n_samples,)\r\n True target, consisting of integers of two values. The positive label\r\n must be greater than the negative label.\r\n\r\n pred_decision : array of shape (n_samples,) or (n_samples, n_classes)\r\n Predicted decisions, as output by decision_function (floats).\r\n\r\n labels : array-like, default=None\r\n Contains all the labels for the problem. Used in multiclass hinge loss.\r\n\r\n sample_weight : array-like of shape (n_samples,), default=None\r\n Sample weights.\r\n\r\n Returns\r\n -------\r\n loss : float\r\n\r\n References\r\n ----------\r\n .. [1] `Wikipedia entry on the Hinge loss\r\n <https://en.wikipedia.org/wiki/Hinge_loss>`_.\r\n\r\n .. [2] Koby Crammer, Yoram Singer. On the Algorithmic\r\n Implementation of Multiclass Kernel-based Vector\r\n Machines. Journal of Machine Learning Research 2,\r\n (2001), 265-292.\r\n\r\n .. [3] `L1 AND L2 Regularization for Multiclass Hinge Loss Models\r\n by Robert C. Moore, John DeNero\r\n <http://www.ttic.edu/sigml/symposium2011/papers/\r\n Moore+DeNero_Regularization.pdf>`_.\r\n\r\n Examples\r\n --------\r\n >>> from sklearn import svm\r\n >>> from sklearn.metrics import hinge_loss\r\n >>> X = [[0], [1]]\r\n >>> y = [-1, 1]\r\n >>> est = svm.LinearSVC(random_state=0)\r\n >>> est.fit(X, y)\r\n LinearSVC(random_state=0)\r\n >>> pred_decision = est.decision_function([[-2], [3], [0.5]])\r\n >>> pred_decision\r\n array([-2.18..., 2.36..., 0.09...])\r\n >>> hinge_loss([-1, 1, 1], pred_decision)\r\n 0.30...\r\n\r\n In the multiclass case:\r\n\r\n >>> import numpy as np\r\n >>> X = np.array([[0], [1], [2], [3]])\r\n >>> Y = np.array([0, 1, 2, 3])\r\n >>> labels = np.array([0, 1, 2, 3])\r\n >>> est = svm.LinearSVC()\r\n >>> est.fit(X, Y)\r\n LinearSVC()\r\n >>> pred_decision = est.decision_function([[-1], [2], [3]])\r\n >>> y_true = [0, 2, 3]\r\n >>> hinge_loss(y_true, pred_decision, labels=labels)\r\n 0.56...\r\n \"\"\"\r\n check_consistent_length(y_true, pred_decision, sample_weight)\r\n pred_decision = check_array(pred_decision, ensure_2d=False)\r\n y_true = column_or_1d(y_true)\r\n y_true_unique = np.unique(labels if labels is not None else y_true)\r\n\r\n if y_true_unique.size > 2:\r\n\r\n if pred_decision.ndim <= 1:\r\n raise ValueError(\r\n \"The shape of pred_decision cannot be 1d array\"\r\n \"with a multiclass target. pred_decision shape \"\r\n \"must be (n_samples, n_classes), that is \"\r\n f\"({y_true.shape[0]}, {y_true_unique.size}).\"\r\n f\" Got: {pred_decision.shape}\"\r\n )\r\n\r\n # pred_decision.ndim > 1 is true\r\n if y_true_unique.size != pred_decision.shape[1]:\r\n if labels is None:\r\n raise ValueError(\r\n \"Please include all labels in y_true \"\r\n \"or pass labels as third argument\"\r\n )\r\n else:\r\n raise ValueError(\r\n \"The shape of pred_decision is not \"\r\n \"consistent with the number of classes. \"\r\n \"With a multiclass target, pred_decision \"\r\n \"shape must be \"\r\n \"(n_samples, n_classes), that is \"\r\n f\"({y_true.shape[0]}, {y_true_unique.size}). \"\r\n f\"Got: {pred_decision.shape}\"\r\n )\r\n if labels is None:\r\n labels = y_true_unique\r\n le = LabelEncoder()\r\n le.fit(labels)\r\n y_true = le.transform(y_true)\r\n mask = np.ones_like(pred_decision, dtype=bool)\r\n mask[np.arange(y_true.shape[0]), y_true] = False\r\n margin = pred_decision[~mask]\r\n margin -= np.max(pred_decision[mask].reshape(y_true.shape[0], -1), axis=1)\r\n\r\n else:\r\n # Handles binary class case\r\n # this code assumes that positive and negative labels\r\n # are encoded as +1 and -1 respectively\r\n pred_decision = column_or_1d(pred_decision)\r\n pred_decision = np.ravel(pred_decision)\r\n\r\n lbin = LabelBinarizer(neg_label=-1)\r\n y_true = lbin.fit_transform(y_true)[:, 0]\r\n\r\n try:\r\n margin = y_true * pred_decision\r\n except TypeError:\r\n raise TypeError(\"pred_decision should be an array of floats.\")\r\n\r\n losses = 1 - margin\r\n # The hinge_loss doesn't penalize good enough predictions.\r\n np.clip(losses, 0, None, out=losses)\r\n return np.average(losses, weights=sample_weight)\r\n\r\n\r\ndef brier_score_loss(y_true, y_prob, *, sample_weight=None, pos_label=None):\r\n \"\"\"Compute the Brier score loss.\r\n\r\n The smaller the Brier score loss, the better, hence the naming with \"loss\".\r\n The Brier score measures the mean squared difference between the predicted\r\n probability and the actual outcome. The Brier score always\r\n takes on a value between zero and one, since this is the largest\r\n possible difference between a predicted probability (which must be\r\n between zero and one) and the actual outcome (which can take on values\r\n of only 0 and 1). It can be decomposed is the sum of refinement loss and\r\n calibration loss.\r\n\r\n The Brier score is appropriate for binary and categorical outcomes that\r\n can be structured as true or false, but is inappropriate for ordinal\r\n variables which can take on three or more values (this is because the\r\n Brier score assumes that all possible outcomes are equivalently\r\n \"distant\" from one another). Which label is considered to be the positive\r\n label is controlled via the parameter `pos_label`, which defaults to\r\n the greater label unless `y_true` is all 0 or all -1, in which case\r\n `pos_label` defaults to 1.\r\n\r\n Read more in the :ref:`User Guide <brier_score_loss>`.\r\n\r\n Parameters\r\n ----------\r\n y_true : array of shape (n_samples,)\r\n True targets.\r\n\r\n y_prob : array of shape (n_samples,)\r\n Probabilities of the positive class.\r\n\r\n sample_weight : array-like of shape (n_samples,), default=None\r\n Sample weights.\r\n\r\n pos_label : int or str, default=None\r\n Label of the positive class. `pos_label` will be inferred in the\r\n following manner:\r\n\r\n * if `y_true` in {-1, 1} or {0, 1}, `pos_label` defaults to 1;\r\n * else if `y_true` contains string, an error will be raised and\r\n `pos_label` should be explicitly specified;\r\n * otherwise, `pos_label` defaults to the greater label,\r\n i.e. `np.unique(y_true)[-1]`.\r\n\r\n Returns\r\n -------\r\n score : float\r\n Brier score loss.\r\n\r\n Examples\r\n --------\r\n >>> import numpy as np\r\n >>> from sklearn.metrics import brier_score_loss\r\n >>> y_true = np.array([0, 1, 1, 0])\r\n >>> y_true_categorical = np.array([\"spam\", \"ham\", \"ham\", \"spam\"])\r\n >>> y_prob = np.array([0.1, 0.9, 0.8, 0.3])\r\n >>> brier_score_loss(y_true, y_prob)\r\n 0.037...\r\n >>> brier_score_loss(y_true, 1-y_prob, pos_label=0)\r\n 0.037...\r\n >>> brier_score_loss(y_true_categorical, y_prob, pos_label=\"ham\")\r\n 0.037...\r\n >>> brier_score_loss(y_true, np.array(y_prob) > 0.5)\r\n 0.0\r\n\r\n References\r\n ----------\r\n .. [1] `Wikipedia entry for the Brier score\r\n <https://en.wikipedia.org/wiki/Brier_score>`_.\r\n \"\"\"\r\n y_true = column_or_1d(y_true)\r\n y_prob = column_or_1d(y_prob)\r\n assert_all_finite(y_true)\r\n assert_all_finite(y_prob)\r\n check_consistent_length(y_true, y_prob, sample_weight)\r\n\r\n y_type = type_of_target(y_true)\r\n if y_type != \"binary\":\r\n raise ValueError(\r\n \"Only binary classification is supported. The type of the target \"\r\n f\"is {y_type}.\"\r\n )\r\n\r\n if y_prob.max() > 1:\r\n raise ValueError(\"y_prob contains values greater than 1.\")\r\n if y_prob.min() < 0:\r\n raise ValueError(\"y_prob contains values less than 0.\")\r\n\r\n try:\r\n pos_label = _check_pos_label_consistency(pos_label, y_true)\r\n except ValueError:\r\n classes = np.unique(y_true)\r\n if classes.dtype.kind not in (\"O\", \"U\", \"S\"):\r\n # for backward compatibility, if classes are not string then\r\n # `pos_label` will correspond to the greater label\r\n pos_label = classes[-1]\r\n else:\r\n raise\r\n y_true = np.array(y_true == pos_label, int)\r\n return np.average((y_true - y_prob) ** 2, weights=sample_weight)\r\n",
"\"\"\"\r\nEA-compatible analogue to to np.putmask\r\n\"\"\"\r\nfrom __future__ import annotations\r\n\r\nfrom typing import Any\r\nimport warnings\r\n\r\nimport numpy as np\r\n\r\nfrom pandas._libs import lib\r\nfrom pandas._typing import ArrayLike\r\n\r\nfrom pandas.core.dtypes.cast import (\r\n convert_scalar_for_putitemlike,\r\n find_common_type,\r\n infer_dtype_from,\r\n)\r\nfrom pandas.core.dtypes.common import (\r\n is_float_dtype,\r\n is_integer_dtype,\r\n is_list_like,\r\n)\r\nfrom pandas.core.dtypes.missing import isna_compat\r\n\r\nfrom pandas.core.arrays import ExtensionArray\r\n\r\n\r\ndef putmask_inplace(values: ArrayLike, mask: np.ndarray, value: Any) -> None:\r\n \"\"\"\r\n ExtensionArray-compatible implementation of np.putmask. The main\r\n difference is we do not handle repeating or truncating like numpy.\r\n\r\n Parameters\r\n ----------\r\n mask : np.ndarray[bool]\r\n We assume extract_bool_array has already been called.\r\n value : Any\r\n \"\"\"\r\n\r\n if lib.is_scalar(value) and isinstance(values, np.ndarray):\r\n value = convert_scalar_for_putitemlike(value, values.dtype)\r\n\r\n if (\r\n not isinstance(values, np.ndarray)\r\n or (values.dtype == object and not lib.is_scalar(value))\r\n # GH#43424: np.putmask raises TypeError if we cannot cast between types with\r\n # rule = \"safe\", a stricter guarantee we may not have here\r\n or (\r\n isinstance(value, np.ndarray) and not np.can_cast(value.dtype, values.dtype)\r\n )\r\n ):\r\n # GH#19266 using np.putmask gives unexpected results with listlike value\r\n if is_list_like(value) and len(value) == len(values):\r\n values[mask] = value[mask]\r\n else:\r\n values[mask] = value\r\n else:\r\n # GH#37833 np.putmask is more performant than __setitem__\r\n np.putmask(values, mask, value)\r\n\r\n\r\ndef putmask_smart(values: np.ndarray, mask: np.ndarray, new) -> np.ndarray:\r\n \"\"\"\r\n Return a new ndarray, try to preserve dtype if possible.\r\n\r\n Parameters\r\n ----------\r\n values : np.ndarray\r\n `values`, updated in-place.\r\n mask : np.ndarray[bool]\r\n Applies to both sides (array like).\r\n new : `new values` either scalar or an array like aligned with `values`\r\n\r\n Returns\r\n -------\r\n values : ndarray with updated values\r\n this *may* be a copy of the original\r\n\r\n See Also\r\n --------\r\n ndarray.putmask\r\n \"\"\"\r\n # we cannot use np.asarray() here as we cannot have conversions\r\n # that numpy does when numeric are mixed with strings\r\n\r\n # n should be the length of the mask or a scalar here\r\n if not is_list_like(new):\r\n new = np.broadcast_to(new, mask.shape)\r\n\r\n # see if we are only masking values that if putted\r\n # will work in the current dtype\r\n try:\r\n nn = new[mask]\r\n except TypeError:\r\n # TypeError: only integer scalar arrays can be converted to a scalar index\r\n pass\r\n else:\r\n # make sure that we have a nullable type if we have nulls\r\n if not isna_compat(values, nn[0]):\r\n pass\r\n elif not (is_float_dtype(nn.dtype) or is_integer_dtype(nn.dtype)):\r\n # only compare integers/floats\r\n pass\r\n elif not (is_float_dtype(values.dtype) or is_integer_dtype(values.dtype)):\r\n # only compare integers/floats\r\n pass\r\n else:\r\n\r\n # we ignore ComplexWarning here\r\n with warnings.catch_warnings(record=True):\r\n warnings.simplefilter(\"ignore\", np.ComplexWarning)\r\n nn_at = nn.astype(values.dtype)\r\n\r\n comp = nn == nn_at\r\n if is_list_like(comp) and comp.all():\r\n nv = values.copy()\r\n nv[mask] = nn_at\r\n return nv\r\n\r\n new = np.asarray(new)\r\n\r\n if values.dtype.kind == new.dtype.kind:\r\n # preserves dtype if possible\r\n return _putmask_preserve(values, new, mask)\r\n\r\n dtype = find_common_type([values.dtype, new.dtype])\r\n # error: Argument 1 to \"astype\" of \"_ArrayOrScalarCommon\" has incompatible type\r\n # \"Union[dtype[Any], ExtensionDtype]\"; expected \"Union[dtype[Any], None, type,\r\n # _SupportsDType, str, Union[Tuple[Any, int], Tuple[Any, Union[int, Sequence[int]]],\r\n # List[Any], _DTypeDict, Tuple[Any, Any]]]\"\r\n values = values.astype(dtype) # type: ignore[arg-type]\r\n\r\n return _putmask_preserve(values, new, mask)\r\n\r\n\r\ndef _putmask_preserve(new_values: np.ndarray, new, mask: np.ndarray):\r\n try:\r\n new_values[mask] = new[mask]\r\n except (IndexError, ValueError):\r\n new_values[mask] = new\r\n return new_values\r\n\r\n\r\ndef putmask_without_repeat(values: np.ndarray, mask: np.ndarray, new: Any) -> None:\r\n \"\"\"\r\n np.putmask will truncate or repeat if `new` is a listlike with\r\n len(new) != len(values). We require an exact match.\r\n\r\n Parameters\r\n ----------\r\n values : np.ndarray\r\n mask : np.ndarray[bool]\r\n new : Any\r\n \"\"\"\r\n if getattr(new, \"ndim\", 0) >= 1:\r\n new = new.astype(values.dtype, copy=False)\r\n\r\n # TODO: this prob needs some better checking for 2D cases\r\n nlocs = mask.sum()\r\n if nlocs > 0 and is_list_like(new) and getattr(new, \"ndim\", 1) == 1:\r\n if nlocs == len(new):\r\n # GH#30567\r\n # If length of ``new`` is less than the length of ``values``,\r\n # `np.putmask` would first repeat the ``new`` array and then\r\n # assign the masked values hence produces incorrect result.\r\n # `np.place` on the other hand uses the ``new`` values at it is\r\n # to place in the masked locations of ``values``\r\n np.place(values, mask, new)\r\n # i.e. values[mask] = new\r\n elif mask.shape[-1] == len(new) or len(new) == 1:\r\n np.putmask(values, mask, new)\r\n else:\r\n raise ValueError(\"cannot assign mismatch length to masked array\")\r\n else:\r\n np.putmask(values, mask, new)\r\n\r\n\r\ndef validate_putmask(values: ArrayLike, mask: np.ndarray) -> tuple[np.ndarray, bool]:\r\n \"\"\"\r\n Validate mask and check if this putmask operation is a no-op.\r\n \"\"\"\r\n mask = extract_bool_array(mask)\r\n if mask.shape != values.shape:\r\n raise ValueError(\"putmask: mask and data must be the same size\")\r\n\r\n noop = not mask.any()\r\n return mask, noop\r\n\r\n\r\ndef extract_bool_array(mask: ArrayLike) -> np.ndarray:\r\n \"\"\"\r\n If we have a SparseArray or BooleanArray, convert it to ndarray[bool].\r\n \"\"\"\r\n if isinstance(mask, ExtensionArray):\r\n # We could have BooleanArray, Sparse[bool], ...\r\n # Except for BooleanArray, this is equivalent to just\r\n # np.asarray(mask, dtype=bool)\r\n mask = mask.to_numpy(dtype=bool, na_value=False)\r\n\r\n mask = np.asarray(mask, dtype=bool)\r\n return mask\r\n\r\n\r\ndef setitem_datetimelike_compat(values: np.ndarray, num_set: int, other):\r\n \"\"\"\r\n Parameters\r\n ----------\r\n values : np.ndarray\r\n num_set : int\r\n For putmask, this is mask.sum()\r\n other : Any\r\n \"\"\"\r\n if values.dtype == object:\r\n dtype, _ = infer_dtype_from(other, pandas_dtype=True)\r\n\r\n if isinstance(dtype, np.dtype) and dtype.kind in [\"m\", \"M\"]:\r\n # https://github.com/numpy/numpy/issues/12550\r\n # timedelta64 will incorrectly cast to int\r\n if not is_list_like(other):\r\n other = [other] * num_set\r\n else:\r\n other = list(other)\r\n\r\n return other\r\n",
"import os\r\n\r\nimport numpy\r\nfrom numpy.distutils.misc_util import Configuration\r\n\r\n\r\ndef configuration(parent_package=\"\", top_path=None):\r\n config = Configuration(\"cluster\", parent_package, top_path)\r\n libraries = []\r\n if os.name == \"posix\":\r\n libraries.append(\"m\")\r\n config.add_extension(\r\n \"_expected_mutual_info_fast\",\r\n sources=[\"_expected_mutual_info_fast.pyx\"],\r\n include_dirs=[numpy.get_include()],\r\n libraries=libraries,\r\n )\r\n\r\n config.add_subpackage(\"tests\")\r\n\r\n return config\r\n\r\n\r\nif __name__ == \"__main__\":\r\n from numpy.distutils.core import setup\r\n\r\n setup(**configuration().todict())\r\n",
"import numpy as np\r\nimport pytest\r\n\r\nfrom sklearn.utils.metaestimators import if_delegate_has_method\r\nfrom sklearn.utils.metaestimators import available_if\r\n\r\n\r\nclass Prefix:\r\n def func(self):\r\n pass\r\n\r\n\r\nclass MockMetaEstimator:\r\n \"\"\"This is a mock meta estimator\"\"\"\r\n\r\n a_prefix = Prefix()\r\n\r\n @if_delegate_has_method(delegate=\"a_prefix\")\r\n def func(self):\r\n \"\"\"This is a mock delegated function\"\"\"\r\n pass\r\n\r\n\r\ndef test_delegated_docstring():\r\n assert \"This is a mock delegated function\" in str(\r\n MockMetaEstimator.__dict__[\"func\"].__doc__\r\n )\r\n assert \"This is a mock delegated function\" in str(MockMetaEstimator.func.__doc__)\r\n assert \"This is a mock delegated function\" in str(MockMetaEstimator().func.__doc__)\r\n\r\n\r\nclass MetaEst:\r\n \"\"\"A mock meta estimator\"\"\"\r\n\r\n def __init__(self, sub_est, better_sub_est=None):\r\n self.sub_est = sub_est\r\n self.better_sub_est = better_sub_est\r\n\r\n @if_delegate_has_method(delegate=\"sub_est\")\r\n def predict(self):\r\n pass\r\n\r\n\r\nclass MetaEstTestTuple(MetaEst):\r\n \"\"\"A mock meta estimator to test passing a tuple of delegates\"\"\"\r\n\r\n @if_delegate_has_method(delegate=(\"sub_est\", \"better_sub_est\"))\r\n def predict(self):\r\n pass\r\n\r\n\r\nclass MetaEstTestList(MetaEst):\r\n \"\"\"A mock meta estimator to test passing a list of delegates\"\"\"\r\n\r\n @if_delegate_has_method(delegate=[\"sub_est\", \"better_sub_est\"])\r\n def predict(self):\r\n pass\r\n\r\n\r\nclass HasPredict:\r\n \"\"\"A mock sub-estimator with predict method\"\"\"\r\n\r\n def predict(self):\r\n pass\r\n\r\n\r\nclass HasNoPredict:\r\n \"\"\"A mock sub-estimator with no predict method\"\"\"\r\n\r\n pass\r\n\r\n\r\nclass HasPredictAsNDArray:\r\n \"\"\"A mock sub-estimator where predict is a NumPy array\"\"\"\r\n\r\n predict = np.ones((10, 2), dtype=np.int64)\r\n\r\n\r\ndef test_if_delegate_has_method():\r\n assert hasattr(MetaEst(HasPredict()), \"predict\")\r\n assert not hasattr(MetaEst(HasNoPredict()), \"predict\")\r\n assert not hasattr(MetaEstTestTuple(HasNoPredict(), HasNoPredict()), \"predict\")\r\n assert hasattr(MetaEstTestTuple(HasPredict(), HasNoPredict()), \"predict\")\r\n assert not hasattr(MetaEstTestTuple(HasNoPredict(), HasPredict()), \"predict\")\r\n assert not hasattr(MetaEstTestList(HasNoPredict(), HasPredict()), \"predict\")\r\n assert hasattr(MetaEstTestList(HasPredict(), HasPredict()), \"predict\")\r\n\r\n\r\nclass AvailableParameterEstimator:\r\n \"\"\"This estimator's `available` parameter toggles the presence of a method\"\"\"\r\n\r\n def __init__(self, available=True):\r\n self.available = available\r\n\r\n @available_if(lambda est: est.available)\r\n def available_func(self):\r\n \"\"\"This is a mock available_if function\"\"\"\r\n pass\r\n\r\n\r\ndef test_available_if_docstring():\r\n assert \"This is a mock available_if function\" in str(\r\n AvailableParameterEstimator.__dict__[\"available_func\"].__doc__\r\n )\r\n assert \"This is a mock available_if function\" in str(\r\n AvailableParameterEstimator.available_func.__doc__\r\n )\r\n assert \"This is a mock available_if function\" in str(\r\n AvailableParameterEstimator().available_func.__doc__\r\n )\r\n\r\n\r\ndef test_available_if():\r\n assert hasattr(AvailableParameterEstimator(), \"available_func\")\r\n assert not hasattr(AvailableParameterEstimator(available=False), \"available_func\")\r\n\r\n\r\ndef test_available_if_unbound_method():\r\n # This is a non regression test for:\r\n # https://github.com/scikit-learn/scikit-learn/issues/20614\r\n # to make sure that decorated functions can be used as an unbound method,\r\n # for instance when monkeypatching.\r\n est = AvailableParameterEstimator()\r\n AvailableParameterEstimator.available_func(est)\r\n\r\n est = AvailableParameterEstimator(available=False)\r\n with pytest.raises(\r\n AttributeError,\r\n match=\"This 'AvailableParameterEstimator' has no attribute 'available_func'\",\r\n ):\r\n AvailableParameterEstimator.available_func(est)\r\n\r\n\r\ndef test_if_delegate_has_method_numpy_array():\r\n \"\"\"Check that we can check for an attribute that is a NumPy array.\r\n\r\n This is a non-regression test for:\r\n https://github.com/scikit-learn/scikit-learn/issues/21144\r\n \"\"\"\r\n estimator = MetaEst(HasPredictAsNDArray())\r\n assert hasattr(estimator, \"predict\")\r\n",
"from datetime import datetime\r\n\r\nimport numpy as np\r\n\r\nfrom pandas import (\r\n Index,\r\n Series,\r\n)\r\nimport pandas._testing as tm\r\n\r\n\r\nclass TestRename:\r\n def test_rename(self, datetime_series):\r\n ts = datetime_series\r\n renamer = lambda x: x.strftime(\"%Y%m%d\")\r\n renamed = ts.rename(renamer)\r\n assert renamed.index[0] == renamer(ts.index[0])\r\n\r\n # dict\r\n rename_dict = dict(zip(ts.index, renamed.index))\r\n renamed2 = ts.rename(rename_dict)\r\n tm.assert_series_equal(renamed, renamed2)\r\n\r\n # partial dict\r\n s = Series(np.arange(4), index=[\"a\", \"b\", \"c\", \"d\"], dtype=\"int64\")\r\n renamed = s.rename({\"b\": \"foo\", \"d\": \"bar\"})\r\n tm.assert_index_equal(renamed.index, Index([\"a\", \"foo\", \"c\", \"bar\"]))\r\n\r\n # index with name\r\n renamer = Series(\r\n np.arange(4), index=Index([\"a\", \"b\", \"c\", \"d\"], name=\"name\"), dtype=\"int64\"\r\n )\r\n renamed = renamer.rename({})\r\n assert renamed.index.name == renamer.index.name\r\n\r\n def test_rename_by_series(self):\r\n s = Series(range(5), name=\"foo\")\r\n renamer = Series({1: 10, 2: 20})\r\n result = s.rename(renamer)\r\n expected = Series(range(5), index=[0, 10, 20, 3, 4], name=\"foo\")\r\n tm.assert_series_equal(result, expected)\r\n\r\n def test_rename_set_name(self):\r\n s = Series(range(4), index=list(\"abcd\"))\r\n for name in [\"foo\", 123, 123.0, datetime(2001, 11, 11), (\"foo\",)]:\r\n result = s.rename(name)\r\n assert result.name == name\r\n tm.assert_numpy_array_equal(result.index.values, s.index.values)\r\n assert s.name is None\r\n\r\n def test_rename_set_name_inplace(self):\r\n s = Series(range(3), index=list(\"abc\"))\r\n for name in [\"foo\", 123, 123.0, datetime(2001, 11, 11), (\"foo\",)]:\r\n s.rename(name, inplace=True)\r\n assert s.name == name\r\n\r\n exp = np.array([\"a\", \"b\", \"c\"], dtype=np.object_)\r\n tm.assert_numpy_array_equal(s.index.values, exp)\r\n\r\n def test_rename_axis_supported(self):\r\n # Supporting axis for compatibility, detailed in GH-18589\r\n s = Series(range(5))\r\n s.rename({}, axis=0)\r\n s.rename({}, axis=\"index\")\r\n # FIXME: dont leave commenred-out\r\n # TODO: clean up shared index validation\r\n # with pytest.raises(ValueError, match=\"No axis named 5\"):\r\n # s.rename({}, axis=5)\r\n\r\n def test_rename_inplace(self, datetime_series):\r\n renamer = lambda x: x.strftime(\"%Y%m%d\")\r\n expected = renamer(datetime_series.index[0])\r\n\r\n datetime_series.rename(renamer, inplace=True)\r\n assert datetime_series.index[0] == expected\r\n\r\n def test_rename_with_custom_indexer(self):\r\n # GH 27814\r\n class MyIndexer:\r\n pass\r\n\r\n ix = MyIndexer()\r\n s = Series([1, 2, 3]).rename(ix)\r\n assert s.name is ix\r\n\r\n def test_rename_with_custom_indexer_inplace(self):\r\n # GH 27814\r\n class MyIndexer:\r\n pass\r\n\r\n ix = MyIndexer()\r\n s = Series([1, 2, 3])\r\n s.rename(ix, inplace=True)\r\n assert s.name is ix\r\n\r\n def test_rename_callable(self):\r\n # GH 17407\r\n s = Series(range(1, 6), index=Index(range(2, 7), name=\"IntIndex\"))\r\n result = s.rename(str)\r\n expected = s.rename(lambda i: str(i))\r\n tm.assert_series_equal(result, expected)\r\n\r\n assert result.name == expected.name\r\n",
"\"\"\"\r\nthese are systematically testing all of the args to value_counts\r\nwith different size combinations. This is to ensure stability of the sorting\r\nand proper parameter handling\r\n\"\"\"\r\n\r\nfrom itertools import product\r\n\r\nimport numpy as np\r\nimport pytest\r\n\r\nfrom pandas import (\r\n Categorical,\r\n CategoricalIndex,\r\n DataFrame,\r\n Grouper,\r\n MultiIndex,\r\n Series,\r\n date_range,\r\n to_datetime,\r\n)\r\nimport pandas._testing as tm\r\n\r\n\r\n# our starting frame\r\ndef seed_df(seed_nans, n, m):\r\n np.random.seed(1234)\r\n days = date_range(\"2015-08-24\", periods=10)\r\n\r\n frame = DataFrame(\r\n {\r\n \"1st\": np.random.choice(list(\"abcd\"), n),\r\n \"2nd\": np.random.choice(days, n),\r\n \"3rd\": np.random.randint(1, m + 1, n),\r\n }\r\n )\r\n\r\n if seed_nans:\r\n frame.loc[1::11, \"1st\"] = np.nan\r\n frame.loc[3::17, \"2nd\"] = np.nan\r\n frame.loc[7::19, \"3rd\"] = np.nan\r\n frame.loc[8::19, \"3rd\"] = np.nan\r\n frame.loc[9::19, \"3rd\"] = np.nan\r\n\r\n return frame\r\n\r\n\r\n# create input df, keys, and the bins\r\nbinned = []\r\nids = []\r\nfor seed_nans in [True, False]:\r\n for n, m in product((100, 1000), (5, 20)):\r\n\r\n df = seed_df(seed_nans, n, m)\r\n bins = None, np.arange(0, max(5, df[\"3rd\"].max()) + 1, 2)\r\n keys = \"1st\", \"2nd\", [\"1st\", \"2nd\"]\r\n for k, b in product(keys, bins):\r\n binned.append((df, k, b, n, m))\r\n ids.append(f\"{k}-{n}-{m}\")\r\n\r\n\r\[email protected]\r\[email protected](\"df, keys, bins, n, m\", binned, ids=ids)\r\[email protected](\"isort\", [True, False])\r\[email protected](\"normalize\", [True, False])\r\[email protected](\"sort\", [True, False])\r\[email protected](\"ascending\", [True, False])\r\[email protected](\"dropna\", [True, False])\r\ndef test_series_groupby_value_counts(\r\n df, keys, bins, n, m, isort, normalize, sort, ascending, dropna\r\n):\r\n def rebuild_index(df):\r\n arr = list(map(df.index.get_level_values, range(df.index.nlevels)))\r\n df.index = MultiIndex.from_arrays(arr, names=df.index.names)\r\n return df\r\n\r\n kwargs = {\r\n \"normalize\": normalize,\r\n \"sort\": sort,\r\n \"ascending\": ascending,\r\n \"dropna\": dropna,\r\n \"bins\": bins,\r\n }\r\n\r\n gr = df.groupby(keys, sort=isort)\r\n left = gr[\"3rd\"].value_counts(**kwargs)\r\n\r\n gr = df.groupby(keys, sort=isort)\r\n right = gr[\"3rd\"].apply(Series.value_counts, **kwargs)\r\n right.index.names = right.index.names[:-1] + [\"3rd\"]\r\n\r\n # have to sort on index because of unstable sort on values\r\n left, right = map(rebuild_index, (left, right)) # xref GH9212\r\n tm.assert_series_equal(left.sort_index(), right.sort_index())\r\n\r\n\r\ndef test_series_groupby_value_counts_with_grouper():\r\n # GH28479\r\n df = DataFrame(\r\n {\r\n \"Timestamp\": [\r\n 1565083561,\r\n 1565083561 + 86400,\r\n 1565083561 + 86500,\r\n 1565083561 + 86400 * 2,\r\n 1565083561 + 86400 * 3,\r\n 1565083561 + 86500 * 3,\r\n 1565083561 + 86400 * 4,\r\n ],\r\n \"Food\": [\"apple\", \"apple\", \"banana\", \"banana\", \"orange\", \"orange\", \"pear\"],\r\n }\r\n ).drop([3])\r\n\r\n df[\"Datetime\"] = to_datetime(df[\"Timestamp\"].apply(lambda t: str(t)), unit=\"s\")\r\n dfg = df.groupby(Grouper(freq=\"1D\", key=\"Datetime\"))\r\n\r\n # have to sort on index because of unstable sort on values xref GH9212\r\n result = dfg[\"Food\"].value_counts().sort_index()\r\n expected = dfg[\"Food\"].apply(Series.value_counts).sort_index()\r\n expected.index.names = result.index.names\r\n\r\n tm.assert_series_equal(result, expected)\r\n\r\n\r\[email protected](\"columns\", [[\"A\", \"B\"], [\"A\", \"B\", \"C\"]])\r\ndef test_series_groupby_value_counts_empty(columns):\r\n # GH39172\r\n df = DataFrame(columns=columns)\r\n dfg = df.groupby(columns[:-1])\r\n\r\n result = dfg[columns[-1]].value_counts()\r\n expected = Series([], name=columns[-1], dtype=result.dtype)\r\n expected.index = MultiIndex.from_arrays([[]] * len(columns), names=columns)\r\n\r\n tm.assert_series_equal(result, expected)\r\n\r\n\r\[email protected](\"columns\", [[\"A\", \"B\"], [\"A\", \"B\", \"C\"]])\r\ndef test_series_groupby_value_counts_one_row(columns):\r\n # GH42618\r\n df = DataFrame(data=[range(len(columns))], columns=columns)\r\n dfg = df.groupby(columns[:-1])\r\n\r\n result = dfg[columns[-1]].value_counts()\r\n expected = df.value_counts().rename(columns[-1])\r\n\r\n tm.assert_series_equal(result, expected)\r\n\r\n\r\ndef test_series_groupby_value_counts_on_categorical():\r\n # GH38672\r\n\r\n s = Series(Categorical([\"a\"], categories=[\"a\", \"b\"]))\r\n result = s.groupby([0]).value_counts()\r\n\r\n expected = Series(\r\n data=[1, 0],\r\n index=MultiIndex.from_arrays(\r\n [\r\n [0, 0],\r\n CategoricalIndex(\r\n [\"a\", \"b\"], categories=[\"a\", \"b\"], ordered=False, dtype=\"category\"\r\n ),\r\n ]\r\n ),\r\n name=0,\r\n )\r\n\r\n # Expected:\r\n # 0 a 1\r\n # b 0\r\n # Name: 0, dtype: int64\r\n\r\n tm.assert_series_equal(result, expected)\r\n"
] | [
[
"pandas.Series",
"numpy.arange",
"pandas.Float64Index",
"pandas.DataFrame",
"numpy.random.randn",
"pandas._testing.makeDateIndex",
"pandas._testing.assert_series_equal",
"numpy.array"
],
[
"pandas._testing.assert_equal",
"pandas._libs.tslibs.timezones.maybe_get_tz",
"pandas.DataFrame",
"pandas._libs.tslibs.timezones.tz_compare",
"pandas.date_range",
"pandas._testing.assert_frame_equal"
],
[
"numpy.log",
"pandas._testing.assert_numpy_array_equal",
"pandas.Series",
"numpy.asarray",
"pandas.Categorical",
"pandas.DataFrame",
"pandas._testing.assert_frame_equal",
"pandas._testing.assert_categorical_equal",
"numpy.random.permutation",
"pandas.date_range",
"pandas._testing.assert_series_equal",
"numpy.array",
"numpy.sum",
"numpy.random.randint"
],
[
"pandas._testing.assert_frame_equal",
"numpy.random.random",
"pandas.Index",
"pandas.DataFrame"
],
[
"numpy.nonzero",
"numpy.set_printoptions",
"numpy.any",
"numpy.get_printoptions"
],
[
"pandas.concat",
"pandas.Series",
"numpy.tile",
"pandas.DataFrame",
"pandas.DatetimeIndex",
"pandas.date_range",
"pandas._testing.assert_series_equal",
"pandas.Timestamp",
"pandas._testing.assert_index_equal"
],
[
"pandas._testing.assert_numpy_array_equal",
"numpy.arange",
"numpy.dtype",
"pandas.core.construction.sanitize_array",
"numpy.array"
],
[
"pandas.core.dtypes.cast.maybe_cast_pointwise_result",
"pandas.core.common.get_callable_name",
"pandas._libs.lib.generate_slices",
"numpy.dtype",
"pandas.core.sorting.get_group_index_sorter",
"pandas.core.dtypes.common.is_complex_dtype",
"pandas.core.sorting.get_group_index",
"pandas._libs.reduction.check_result_array",
"pandas._libs.reduction.extract_result",
"pandas.core.dtypes.common.is_numeric_dtype",
"pandas.core.dtypes.common.ensure_float64",
"pandas.core.series.Series",
"pandas.core.common.is_builtin_func",
"pandas.errors.AbstractMethodError",
"numpy.arange",
"numpy.lexsort",
"numpy.flatnonzero",
"numpy.diff",
"pandas.core.dtypes.common.ensure_int64",
"pandas.core.dtypes.common.is_float_dtype",
"pandas.core.groupby.grouper.Grouping",
"numpy.zeros",
"pandas._libs.reduction.apply_frame_axis0",
"pandas.core.sorting.decons_obs_group_ids",
"pandas.core.dtypes.common.is_categorical_dtype",
"pandas.core.arrays.integer.Int64Dtype",
"pandas.core.dtypes.common.is_integer_dtype",
"pandas.core.dtypes.common.is_datetime64_any_dtype",
"pandas.core.sorting.compress_group_index",
"pandas.core.dtypes.common.is_timedelta64_dtype",
"pandas.core.indexes.api.Index",
"pandas.core.sorting.get_indexer_dict",
"pandas.core.dtypes.common.ensure_platform_int",
"pandas.core.dtypes.common.is_sparse",
"pandas.core.dtypes.common.needs_i8_conversion",
"pandas.core.arrays.floating.Float64Dtype",
"pandas.core.dtypes.cast.maybe_downcast_to_dtype",
"pandas._libs.reduction.SeriesGrouper",
"pandas.core.dtypes.common.is_bool_dtype",
"pandas._libs.reduction.SeriesBinGrouper",
"pandas.core.indexes.api.MultiIndex",
"pandas.core.sorting.get_flattened_list",
"pandas.core.indexes.api.ensure_index",
"pandas._libs.lib.maybe_convert_objects",
"numpy.bincount",
"pandas.core.dtypes.missing.isna",
"pandas.core.dtypes.common.is_1d_only_ea_obj",
"numpy.empty"
],
[
"pandas.Series",
"pandas.Categorical",
"pandas.array",
"pandas.Index",
"pandas.Interval",
"pandas.date_range",
"pandas.DataFrame.from_records",
"pandas._testing.assert_series_equal"
],
[
"numpy.dot",
"numpy.asarray",
"numpy.mean",
"numpy.argmin",
"numpy.moveaxis",
"scipy.sparse.issparse",
"numpy.may_share_memory",
"numpy.flatnonzero",
"numpy.atleast_1d",
"numpy.finfo",
"numpy.zeros",
"numpy.asfortranarray",
"scipy.sparse.csr_matrix",
"numpy.log10",
"numpy.sum",
"scipy.sparse.isspmatrix",
"numpy.sort",
"numpy.average",
"numpy.empty"
],
[
"numpy.asarray",
"numpy.dtype",
"numpy.concatenate",
"pandas.core.indexes.numeric.Float64Index",
"pandas.core.ops.common.unpack_zerodim_and_defer",
"pandas.core.common.any_not_none",
"pandas.core.indexes.numeric.Int64Index",
"pandas.core.dtypes.common.is_signed_integer_dtype",
"numpy.arange",
"pandas.core.common.all_none",
"pandas.core.indexes.base.maybe_extract_name",
"pandas.core.ops.get_op_result_name",
"pandas.core.dtypes.common.is_float",
"pandas.core.indexes.base.default_pprint",
"pandas.core.dtypes.common.is_timedelta64_dtype",
"pandas.compat.numpy.function.validate_argsort",
"pandas.core.indexes.numeric.Int64Index._simple_new",
"pandas.core.dtypes.common.ensure_platform_int",
"numpy.errstate",
"pandas.compat.numpy.function.validate_minmax_axis",
"pandas.core.dtypes.common.is_scalar",
"pandas.compat.numpy.function.validate_max",
"pandas.core.dtypes.common.is_integer",
"pandas.compat.numpy.function.validate_min",
"pandas.core.dtypes.common.ensure_python_int",
"pandas.util._decorators.doc",
"pandas.core.construction.extract_array"
],
[
"numpy.hstack",
"scipy.sparse.issparse",
"numpy.abs",
"numpy.unique",
"scipy.special.digamma",
"numpy.any",
"numpy.array",
"numpy.nextafter",
"numpy.zeros",
"numpy.sum",
"numpy.empty"
],
[
"sklearn.datasets.make_classification",
"numpy.linspace",
"sklearn.inspection.plot_partial_dependence",
"numpy.arange",
"sklearn.datasets.load_iris",
"sklearn.datasets.load_diabetes",
"sklearn.ensemble.GradientBoostingRegressor",
"numpy.all",
"sklearn.datasets.make_regression",
"sklearn.ensemble.GradientBoostingClassifier",
"sklearn.linear_model.LinearRegression",
"numpy.testing.assert_allclose",
"sklearn.utils._testing._convert_container"
],
[
"numpy.arange"
],
[
"pandas._testing.assert_almost_equal",
"pandas.Series",
"pandas.DataFrame",
"numpy.random.randn",
"pandas.isna",
"pandas._testing.assert_frame_equal",
"pandas._testing.assert_numpy_array_equal",
"pandas.Timestamp",
"numpy.arange",
"pandas._testing.makeDataFrame",
"pandas.set_option",
"pandas._testing.assert_series_equal",
"numpy.zeros",
"pandas._testing.assert_produces_warning",
"pandas.option_context",
"pandas.date_range",
"numpy.array",
"numpy.rec.fromarrays",
"pandas._testing.ensure_clean",
"numpy.ones",
"pandas.read_pickle"
],
[
"matplotlib.transforms.Bbox.unit",
"matplotlib._api.warn_external",
"matplotlib.transforms.IdentityTransform",
"matplotlib.path.Path",
"matplotlib.transforms.BboxTransformTo",
"matplotlib.transforms.Bbox.from_bounds",
"matplotlib.transforms.TransformedBbox"
],
[
"numpy.asarray",
"pandas._libs.lib.is_scalar",
"pandas.core.ops.kleene_or",
"numpy.dtype",
"numpy.zeros_like",
"pandas.compat.numpy.function.validate_any",
"pandas.compat.numpy.function.validate_all",
"pandas.core.dtypes.common.is_numeric_dtype",
"numpy.ones_like",
"pandas.core.ops.maybe_dispatch_ufunc_to_dunder_op",
"pandas.core.dtypes.common.is_float_dtype",
"pandas.core.dtypes.common.is_float",
"numpy.putmask",
"pandas.core.dtypes.common.is_integer_dtype",
"pandas.core.dtypes.common.is_list_like",
"pandas.core.ops.kleene_xor",
"pandas.core.dtypes.common.pandas_dtype",
"pandas.core.arrays.FloatingArray",
"numpy.errstate",
"numpy.array",
"pandas.core.dtypes.common.is_bool_dtype",
"pandas.core.ops.kleene_and",
"pandas.core.arrays.IntegerArray",
"pandas.core.dtypes.missing.isna",
"pandas._libs.lib.is_bool",
"pandas._libs.lib.infer_dtype"
],
[
"matplotlib.tri.triangulation.Triangulation.get_from_args_and_kwargs",
"matplotlib.docstring.Substitution",
"numpy.ma.asarray",
"numpy.isfinite",
"numpy.ma.masked_invalid",
"matplotlib.docstring.interpd.update",
"numpy.ma.is_masked"
],
[
"pandas._testing.assert_produces_warning",
"pandas._testing.assert_numpy_array_equal",
"pandas.PeriodIndex",
"pandas.Timestamp",
"pandas.Index",
"pandas.DatetimeIndex",
"pandas._testing.assert_categorical_equal",
"pandas.Int64Index",
"pandas.date_range",
"numpy.array",
"pandas._testing.assert_index_equal"
],
[
"pandas.concat",
"pandas._testing.assert_numpy_array_equal",
"pandas.Series",
"pandas.SparseDtype",
"numpy.dtype",
"pandas.core.arrays.sparse.SparseArray",
"pandas.core.arrays.sparse.SparseArray._concat_same_type",
"pandas._testing.assert_series_equal",
"numpy.array"
],
[
"pandas.Series",
"pandas.option_context",
"numpy.dtype",
"pandas.DataFrame",
"pandas._testing.assert_frame_equal"
],
[
"pandas.Series",
"pandas.Index",
"pandas.DataFrame",
"pandas.MultiIndex.from_tuples",
"pandas._testing.assert_series_equal",
"pandas._testing.assert_frame_equal",
"pandas._testing.assert_index_equal"
],
[
"pandas._testing.equalContents",
"pandas.api.types.is_signed_integer_dtype",
"pandas._testing.assert_produces_warning",
"pandas.api.types.pandas_dtype",
"pandas.Timestamp",
"numpy.asarray",
"pandas.Index",
"pandas.core.dtypes.cast.find_common_type",
"pandas.MultiIndex.from_tuples",
"numpy.dtype",
"pandas.api.types.is_datetime64tz_dtype",
"numpy.array",
"pandas._testing.assert_index_equal"
],
[
"pandas.concat",
"pandas.read_hdf",
"pandas.tests.io.pytables.common._maybe_remove",
"pandas.tests.io.pytables.common.ensure_clean_store",
"pandas.Series",
"pandas.Categorical",
"pandas.DataFrame",
"pandas.tests.io.pytables.common.ensure_clean_path",
"pandas._testing.assert_series_equal",
"pandas._testing.assert_frame_equal"
],
[
"pandas.Series",
"pandas.core.arrays.string_.StringDtype"
],
[
"scipy.sparse.isspmatrix",
"scipy.sparse.issparse",
"numpy.unique",
"scipy.sparse.eye",
"scipy.sparse.csgraph.laplacian",
"numpy.logical_or",
"scipy.linalg.eigh",
"scipy.sparse.linalg.eigsh",
"numpy.zeros",
"numpy.where",
"scipy.sparse.csgraph.connected_components"
],
[
"numpy.dot",
"sklearn.datasets.make_classification",
"sklearn.utils._testing.ignore_warnings",
"sklearn.neighbors.KNeighborsTransformer",
"numpy.vstack",
"sklearn.neighbors.KDTree",
"sklearn.base.clone",
"numpy.random.randn",
"numpy.mean",
"numpy.random.randint",
"sklearn.neighbors.RadiusNeighborsRegressor",
"scipy.sparse.issparse",
"numpy.arange",
"numpy.eye",
"sklearn.neighbors.KNeighborsClassifier",
"numpy.full",
"sklearn.neighbors._base._check_precomputed",
"sklearn.neighbors.RadiusNeighborsClassifier",
"sklearn.neighbors.NearestNeighbors",
"sklearn.utils._testing.assert_array_equal",
"numpy.zeros",
"numpy.isnan",
"sklearn.neighbors.kneighbors_graph",
"sklearn.neighbors.RadiusNeighborsTransformer",
"sklearn.datasets.load_iris",
"sklearn.model_selection.train_test_split",
"scipy.sparse.csr_matrix",
"sklearn.neighbors._base._is_sorted_by_data",
"sklearn.utils.fixes.parse_version",
"sklearn.utils.validation.check_random_state",
"numpy.errstate",
"numpy.array",
"sklearn.metrics.pairwise.pairwise_distances",
"numpy.random.RandomState",
"numpy.sum",
"sklearn.metrics.pairwise_distances",
"sklearn.neighbors.BallTree",
"numpy.abs",
"numpy.sort",
"numpy.ones",
"sklearn.neighbors.KNeighborsRegressor",
"sklearn.datasets.load_digits",
"sklearn.utils._testing.assert_array_almost_equal",
"sklearn.neighbors.radius_neighbors_graph",
"numpy.empty"
],
[
"pandas._testing.assert_numpy_array_equal",
"pandas.Series",
"pandas.Categorical",
"pandas._testing.assert_categorical_equal",
"numpy.array"
],
[
"numpy.diag",
"numpy.dot",
"numpy.sqrt",
"numpy.asarray",
"numpy.nan_to_num",
"numpy.all",
"numpy.max",
"numpy.mean",
"numpy.any",
"numpy.searchsorted",
"numpy.trace",
"numpy.hstack",
"scipy.sparse.coo_matrix",
"numpy.ones_like",
"numpy.clip",
"numpy.unique",
"numpy.arange",
"numpy.intersect1d",
"numpy.outer",
"numpy.ravel",
"numpy.zeros",
"numpy.log",
"numpy.min",
"numpy.isnan",
"numpy.union1d",
"scipy.sparse.csr_matrix",
"numpy.append",
"numpy.errstate",
"numpy.array",
"numpy.logical_and",
"numpy.sum",
"numpy.abs",
"numpy.array_equal",
"numpy.isposinf",
"numpy.setdiff1d",
"numpy.ones",
"numpy.float64",
"numpy.average"
],
[
"numpy.putmask",
"pandas.core.dtypes.common.is_list_like",
"numpy.can_cast",
"pandas.core.dtypes.cast.infer_dtype_from",
"pandas.core.dtypes.common.is_integer_dtype",
"numpy.asarray",
"pandas._libs.lib.is_scalar",
"pandas.core.dtypes.cast.find_common_type",
"numpy.broadcast_to",
"pandas.core.dtypes.common.is_float_dtype",
"pandas.core.dtypes.cast.convert_scalar_for_putitemlike",
"numpy.place",
"pandas.core.dtypes.missing.isna_compat"
],
[
"numpy.distutils.misc_util.Configuration",
"numpy.get_include"
],
[
"sklearn.utils.metaestimators.available_if",
"numpy.ones",
"sklearn.utils.metaestimators.if_delegate_has_method"
],
[
"pandas._testing.assert_numpy_array_equal",
"pandas.Series",
"numpy.arange",
"pandas.Index",
"pandas._testing.assert_series_equal",
"numpy.array"
],
[
"pandas.CategoricalIndex",
"pandas.Series",
"numpy.random.seed",
"numpy.random.choice",
"pandas.Categorical",
"pandas.Grouper",
"pandas.DataFrame",
"pandas.MultiIndex.from_arrays",
"pandas.date_range",
"pandas._testing.assert_series_equal",
"numpy.random.randint"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.0",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"1.5",
"2.0",
"1.3",
"1.4"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.7",
"1.0",
"0.10",
"1.2",
"0.14",
"0.19",
"1.5",
"0.12",
"0.17",
"0.13",
"1.6",
"1.4",
"1.9",
"1.3",
"1.10",
"0.15",
"0.18",
"0.16",
"1.8"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.0",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [
"1.11",
"1.10",
"1.12",
"1.19",
"1.13",
"1.16",
"1.9",
"1.18",
"1.20",
"1.7",
"1.15",
"1.14",
"1.17",
"1.8"
],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.3",
"1.1",
"1.5",
"1.0",
"1.2"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.7",
"1.0",
"0.10",
"1.2",
"0.14",
"0.19",
"1.5",
"0.12",
"0.17",
"0.13",
"1.6",
"1.4",
"1.9",
"1.3",
"1.10",
"0.15",
"0.18",
"0.16",
"1.8"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.7",
"1.0",
"0.10",
"1.2",
"0.14",
"0.19",
"1.5",
"0.12",
"0.17",
"0.13",
"1.6",
"1.4",
"1.9",
"1.3",
"1.10",
"0.15",
"0.18",
"0.16",
"1.8"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"1.2",
"1.3",
"1.4"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
MountainRange/mobius_score | [
"fc900ab456b3e3431cfa6d9684b97ec6321d0a23"
] | [
"audiospec.py"
] | [
"\nimport numpy as np\nimport librosa\nfrom tqdm import tqdm\nfrom audiomisc import ks_key\n\nfrom constants import VERTICALCUTOFF, FFT_SIZE, FFT_HOP\n\ndef stft(x, fft_size, hopsamp):\n window = np.hanning(fft_size)\n return np.array([np.fft.rfft(window*x[i:i+fft_size])\n for i in range(0, len(x)-fft_size, hopsamp)])\n\ndef wav_to_spec(fn):\n input_signal, sample_rate = librosa.load(fn, sr=44100)\n stft_mag = np.array([])\n split = int(1e6)#int(264600)\n fft_size = FFT_SIZE\n hopsamp = fft_size // FFT_HOP\n for i in tqdm(range(len(input_signal)//split)):\n temp_signal = input_signal[(split*i):(split*(i+1))]\n stft_full = stft(temp_signal, fft_size, hopsamp)\n\n stft_full = abs(stft_full)\n if np.max(stft_full) != 0:\n stft_full = (stft_full - np.mean(stft_full)) / np.std(stft_full)\n stft_full += abs(np.min(stft_full))\n stft_full *= 255.0/np.max(stft_full)\n \n if stft_mag.shape[0] != 0:\n stft_mag = np.concatenate((stft_mag, stft_full))\n else:\n stft_mag = stft_full\n\n print(\"Calculating tempo\")\n tempo, _ = librosa.beat.beat_track(y=input_signal, sr=sample_rate, hop_length=512)\n\n print(\"Calculating music key\")\n chroma = librosa.feature.chroma_stft(y=input_signal, sr=sample_rate)\n chroma = [sum(x)/len(x) for x in chroma]\n bestmajor, bestminor = ks_key(chroma)\n if max(bestmajor) > max(bestminor):\n key = np.argmax(bestmajor)\n # C, Db, D, Eb, E, F, F#, G, Ab, A, Bb, B\n keymap = [0, -5, 2, -3, 4, -1, 6, 1, -4, 3, -2, 5]\n else:\n key = np.argmax(bestminor)\n # c, c#, d, eb, e, f, f#, g, g#, a, bb, b\n keymap = [-3, 4, -1, -6, 1, -4, 3, -2, 5, 0, -5, 2]\n \n return stft_mag[:, :VERTICALCUTOFF].T, tempo, keymap[key]"
] | [
[
"numpy.fft.rfft",
"numpy.min",
"numpy.concatenate",
"numpy.max",
"numpy.std",
"numpy.argmax",
"numpy.mean",
"numpy.hanning",
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Prasad9/Detect-Flags-SSD | [
"c0d662bde99ed8df33d72bd06d61d5eb869d31a5"
] | [
"detect/image_detector.py"
] | [
"from __future__ import print_function\nimport mxnet as mx\nimport numpy as np\nfrom timeit import default_timer as timer\nfrom dataset.iterator import DetTestImageIter\nimport cv2\n\nclass ImageDetector(object):\n\t\"\"\"\n\tSSD detector which hold a detection network and wraps detection API\n\n\tParameters:\n\t----------\n\tsymbol : mx.Symbol\n\t\tdetection network Symbol\n\tmodel_prefix : str\n\t\tname prefix of trained model\n\tepoch : int\n\t\tload epoch of trained model\n\tdata_shape : int\n\t\tinput data resize shape\n\tmean_pixels : tuple of float\n\t\t(mean_r, mean_g, mean_b)\n\tbatch_size : int\n\t\trun detection with batch size\n\tctx : mx.ctx\n\t\tdevice to use, if None, use mx.cpu() as default context\n\t\"\"\"\n\tdef __init__(self, symbol, model_prefix, epoch, data_shape, mean_pixels, \\\n\t\t\t\t\tclasses, thresh = 0.6, plot_confidence = True, batch_size=1, ctx=None):\n\t\tself.ctx = ctx\n\t\tif self.ctx is None:\n\t\t\tself.ctx = mx.cpu()\n\t\tload_symbol, args, auxs = mx.model.load_checkpoint(model_prefix, epoch)\n\t\tif symbol is None:\n\t\t\tsymbol = load_symbol\n\t\tself.mod = mx.mod.Module(symbol, label_names=None, context=ctx)\n\t\tself.data_shape = data_shape\n\t\tself.mod.bind(data_shapes=[('data', (batch_size, 3, data_shape, data_shape))])\n\t\tself.mod.set_params(args, auxs)\n\t\tself.data_shape = data_shape\n\t\tself.mean_pixels = mean_pixels\n\t\tself.classes = classes\n\t\tself.colors = []\n\t\tself.fill_random_colors_int()\n\t\tself.thresh = thresh\n\t\tself.plot_confidence = plot_confidence\n\n\tdef fill_random_colors(self):\n\t\timport random\n\t\tfor i in range(len(self.classes)):\n\t\t\tself.colors.append((random.random(), random.random(), random.random()))\n\n\t\t#print(self.colors)\n\n\tdef fill_random_colors_int(self):\n\t\timport random\n\t\tfor i in range(len(self.classes)):\n\t\t\tself.colors.append((random.randint(0, 255), random.randint(0, 255), random.randint(0, 255)))\n\n\t\t#print(self.colors)\n\n\n\tdef detect(self, det_iter, show_timer=False):\n\t\t\"\"\"\n\t\tdetect all images in iterator\n\n\t\tParameters:\n\t\t----------\n\t\tdet_iter : DetIter\n\t\t\titerator for all testing images\n\t\tshow_timer : Boolean\n\t\t\twhether to print out detection exec time\n\n\t\tReturns:\n\t\t----------\n\t\tlist of detection results\n\t\t\"\"\"\n\t\tnum_images = det_iter._size\n\t\tresult = []\n\t\tdetections = []\n\t\t#if not isinstance(det_iter, mx.io.PrefetchingIter):\n\t\t#\tdet_iter = mx.io.PrefetchingIter(det_iter)\n\t\tstart = timer()\n\t\tfor pred, _, _ in self.mod.iter_predict(det_iter):\n\t\t\tdetections.append(pred[0].asnumpy())\n\t\ttime_elapsed = timer() - start\n\t\tif show_timer:\n\t\t\tprint(\"Detection time for {} images: {:.4f} sec\".format(num_images, time_elapsed))\n\t\tfor output in detections:\n\t\t\tfor i in range(output.shape[0]):\n\t\t\t\tdet = output[i, :, :]\n\t\t\t\tres = det[np.where(det[:, 0] >= 0)[0]]\n\t\t\t\tresult.append(res)\n\t\tresized_img = det_iter.current_data()\n\t\treturn result, resized_img\n\n\tdef im_detect(self, img, show_timer=False):\n\t\t\"\"\"\n\t\twrapper for detecting multiple images\n\n\t\tParameters:\n\t\t----------\n\t\tim_list : list of str\n\t\t\timage path or list of image paths\n\t\troot_dir : str\n\t\t\tdirectory of input images, optional if image path already\n\t\t\thas full directory information\n\t\textension : str\n\t\t\timage extension, eg. \".jpg\", optional\n\n\t\tReturns:\n\t\t----------\n\t\tlist of detection results in format [det0, det1...], det is in\n\t\tformat np.array([id, score, xmin, ymin, xmax, ymax]...)\n\t\t\"\"\"\n\t\tim_list = [img]\n\t\ttest_iter = DetTestImageIter(im_list, 1, self.data_shape, self.mean_pixels)\n\t\treturn self.detect(test_iter, show_timer)\n\n\tdef plot_rects(self, img, dets):\n\t\timg_shape = img.shape\n\t\tfor i in range(dets.shape[0]):\n\t\t\tcls_id = int(dets[i, 0])\n\t\t\tif cls_id >= 0:\n\t\t\t\tscore = dets[i, 1]\n\t\t\t\t#print('Score is {}, class {}'.format(score, cls_id))\n\t\t\t\tif score > self.thresh:\n\t\t\t\t\txmin = int(dets[i, 2] * img_shape[1])\n\t\t\t\t\tymin = int(dets[i, 3] * img_shape[0])\n\t\t\t\t\txmax = int(dets[i, 4] * img_shape[1])\n\t\t\t\t\tymax = int(dets[i, 5] * img_shape[0])\n\n\t\t\t\t\tcv2.rectangle(img, (xmin, ymin), (xmax, ymax), self.colors[cls_id], 4)\n\n\t\t\t\t\tclass_name = self.classes[cls_id]\n\t\t\t\t\tcv2.putText(img, class_name, (xmin, ymin), cv2.FONT_HERSHEY_SIMPLEX, 2, (0, 0, 255), 4)\n\t\t\t\t\t#print('Class id = {}, Score = {}, Country = {}, rect = ({}, {}, {}, {})'.format(cls_id, score, class_name, xmin, ymin, xmax, ymax))\n\n\tdef detect_and_visualize_image(self, img, show_timer=False):\n\t\t\"\"\"\n\t\twrapper for im_detect and visualize_detection\n\n\t\tParameters:\n\t\t----------\n\t\tim_list : list of str or str\n\t\timage path or list of image paths\n\t\troot_dir : str or None\n\t\tdirectory of input images, optional if image path already\n\t\thas full directory information\n\t\textension : str or None\n\t\timage extension, eg. \".jpg\", optional\n\n\t\tReturns:\n\t\t----------\n\n\t\t\"\"\"\n\t\tdets, resized_img = self.im_detect(img, show_timer=show_timer)\n\t\tresized_img = resized_img.asnumpy()\n\t\tresized_img /= 255.0\n\t\tfor k, det in enumerate(dets):\n\t\t\tself.plot_rects(resized_img, det)\n\t\treturn resized_img\n\n\tdef scale_and_plot_rects(self, img, dets):\n\t\timg_shape = img.shape\n\t\tfor i in range(dets.shape[0]):\n\t\t\tcls_id = int(dets[i, 0])\n\t\t\tif cls_id >= 0:\n\t\t\t\tscore = dets[i, 1]\n\t\t\t\t#print('Score is {}, class {}'.format(score, cls_id))\n\t\t\t\tif score > self.thresh:\n\t\t\t\t\txmin = int(dets[i, 2] * img_shape[1])\n\t\t\t\t\tymin = int(dets[i, 3] * img_shape[0])\n\t\t\t\t\txmax = int(dets[i, 4] * img_shape[1])\n\t\t\t\t\tymax = int(dets[i, 5] * img_shape[0])\n\n\t\t\t\t\tcv2.rectangle(img, (xmin, ymin), (xmax, ymax), self.colors[cls_id], 4)\n\n\t\t\t\t\tclass_name = self.classes[cls_id]\n\t\t\t\t\tcv2.putText(img, class_name, (xmin, ymin - 15), cv2.FONT_HERSHEY_SIMPLEX, 2, (255, 0, 255), 3)\n\t\t\t\t\tif self.plot_confidence:\n\t\t\t\t\t\tscore_color = (0, 255, 0) if score > 0.5 else (255, 0, 0)\n\t\t\t\t\t\tcv2.putText(img, '{:.3f}'.format(score), (xmax - 60, ymin - 15), cv2.FONT_HERSHEY_SIMPLEX, 1, score_color, 1)\n\t\t\t\t\t\n\n\tdef detect_and_layover_image(self, img, show_timer=False):\n\t\t\"\"\"\n\t\twrapper for im_detect and visualize_detection\n\n\t\tParameters:\n\t\t----------\n\t\tim_list : list of str or str\n\t\timage path or list of image paths\n\t\troot_dir : str or None\n\t\tdirectory of input images, optional if image path already\n\t\thas full directory information\n\t\textension : str or None\n\t\timage extension, eg. \".jpg\", optional\n\n\t\tReturns:\n\t\t----------\n\n\t\t\"\"\"\n\t\tdets, _ = self.im_detect(img, show_timer=show_timer)\n\t\tfor k, det in enumerate(dets):\n\t\t\tself.scale_and_plot_rects(img, det)\n\t\treturn img\n"
] | [
[
"numpy.where"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
richardtjornhammar/graphtastic | [
"1e64d408ffb3e09d5ad068986c847032d5cfdcbd"
] | [
"src/graphtastic/clustering.py"
] | [
"\"\"\"\nCopyright 2022 RICHARD TJÖRNHAMMAR\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\"\"\"\nimport numpy as np\nimport typing\nimport sys\n\ntry :\n from numba import jit\n bUseNumba = True\nexcept ImportError :\n print ( \"ImportError:\",\" NUMBA. WILL NOT USE IT\")\n bUseNumba = False\nexcept OSError:\n print ( \"OSError:\",\" NUMBA. WILL NOT USE IT\")\n bUseNumba = False\n\n# THE FOLLOWING KMEANS ALGORITHM IS THE AUTHOR OWN LOCAL VERSION\nif bUseNumba :\n @jit(nopython=True)\n def seeded_kmeans( dat:np.array, cent:np.array ) :\n #\n # PYTHON ADAPTATION OF MY C++ CODE THAT CAN BE FOUND IN\n # https://github.com/richardtjornhammar/RichTools/blob/master/src/cluster.cc\n # AROUND LINE 2345\n # AGAIN CONSIDER USING THE C++ VERSION SINCE IT IS ALOT FASTER\n # HERE WE SPEED IT UP USING NUMBA IF THE USER HAS IT INSTALLED AS A MODULE\n #\n NN , MM = np.shape ( dat )\n KK , LL = np.shape ( cent )\n if not LL == MM :\n print ( 'WARNING DATA FORMAT ERROR. NON COALESCING COORDINATE AXIS' )\n\n labels = [ int(z) for z in np.zeros(NN) ]\n w = labels\n counts = np.zeros(KK)\n tmp_ce = np.zeros(KK*MM).reshape(KK,MM)\n old_error , error , TOL = 0. , 1. , 1.0E-10\n while abs ( error - old_error ) > TOL :\n old_error = error\n error = 0.\n counts = counts * 0.\n tmp_ce = tmp_ce * 0.\n # START BC\n for h in range ( NN ) :\n min_distance = 1.0E30\n for i in range ( KK ) :\n distance = np.sum( ( dat[h]-cent[i] )**2 )\n if distance < min_distance :\n labels[h] = i\n min_distance = distance\n tmp_ce[labels[h]] += dat[ h ]\n counts[labels[h]] += 1.0\n error += min_distance\n # END BC\n for i in range ( KK ) :\n if counts[i]>0:\n cent[i] = tmp_ce[i]/counts[i]\n centroids = cent\n return ( labels , centroids )\nelse :\n def seeded_kmeans( dat:np.array, cent:np.array ) :\n #\n # SLOW SLUGGISH KMEANS WITH A DUBBLE FOR LOOP\n # IN PYTHON! WOW! SUCH SPEED!\n #\n NN , MM = np.shape ( dat )\n KK , LL = np.shape ( cent )\n if not LL == MM :\n print ( 'WARNING DATA FORMAT ERROR. NON COALESCING COORDINATE AXIS' )\n\n labels = [ int(z) for z in np.zeros(NN) ]\n w = labels\n counts = np.zeros(KK)\n tmp_ce = np.zeros(KK*MM).reshape(KK,MM)\n old_error , error , TOL = 0. , 1. , 1.0E-10\n while abs ( error - old_error ) > TOL :\n old_error = error\n error = 0.\n counts = counts * 0.\n tmp_ce = tmp_ce * 0.\n # START BC\n for h in range ( NN ) :\n min_distance = 1.0E30\n for i in range ( KK ) :\n distance = np.sum( ( dat[h]-cent[i] )**2 )\n if distance < min_distance :\n labels[h] = i\n min_distance = distance\n tmp_ce[labels[h]] += dat[ h ]\n counts[labels[h]] += 1.0\n error += min_distance\n # END BC\n for i in range ( KK ) :\n if counts[i]>0:\n cent[i] = tmp_ce[i]/counts[i]\n centroids = cent\n return ( labels , centroids )\n\n\nif bUseNumba :\n @jit(nopython=True)\n def connectivity ( B:np.array , val:float , bVerbose:bool = False ) :\n description = \"\"\" This is a cutoff based clustering algorithm. The intended use is to supply a distance matrix and a cutoff value (then becomes symmetric positive). For a small distance cutoff, you should see all the parts of the system and for a large distance cutoff, you should see the entire system. It has been employed for statistical analysis work as well as the original application where it was employed to segment molecular systems.\"\"\"\n if bVerbose :\n print ( \"CONNECTIVITY CLUSTERING OF \", np.shape(B), \" MATRIX\" )\n # PYTHON ADAPTATION OF MY C++ CODE THAT CAN BE FOUND IN\n # https://github.com/richardtjornhammar/RichTools/blob/master/src/cluster.cc\n # AROUND LINE 2277\n # CONSIDER COMPILING AND USING THAT AS A MODULE INSTEAD OF THIS SINCE IT IS\n # A LOT FASTER\n # FOR A DESCRIPTION READ PAGE 30 (16 INTERNAL NUMBERING) of:\n # https://kth.diva-portal.org/smash/get/diva2:748464/FULLTEXT01.pdf\n #\n # https://github.com/richardtjornhammar/RichTools/blob/master/src/cluster.cc\n # ADDED TO RICHTOOLS HERE: https://github.com/richardtjornhammar/RichTools/commit/74b35df9c623bf03570707a24eafe828f461ed90#diff-25a6634263c1b1f6fc4697a04e2b9904ea4b042a89af59dc93ec1f5d44848a26\n # CONNECTIVITY SEARCH FOR (connectivity) CONNECTIVITY\n #\n nr_sq,mr_sq = np.shape(B)\n if nr_sq != mr_sq :\n print ( 'ERROR: FAILED' )\n N = mr_sq\n res , nvisi, s, NN, ndx, C = [0], [0], [0], [0], [0], 0\n res .append(0)\n for i in range(N) :\n nvisi.append(i+1)\n res.append(0); res.append(0)\n ndx.append(i)\n\n res = res[1:]\n nvisi = nvisi[1:]\n ndx = ndx[1:]\n while ( len(ndx)>0 ) :\n i = ndx[-1] ; ndx = ndx[:-1]\n NN = []\n if ( nvisi[i]>0 ) :\n C-=1\n for j in range(N) :\n if ( B[i,j]<=val ) :\n NN.append(j)\n while ( len(NN)>0 ) :\n # back pop_back\n k = NN[-1]; NN = NN[:-1]\n nvisi[k] = C\n for j in range(N):\n if ( B[j,k]<=val ) :\n for q in range(N) :\n if ( nvisi[q] == j+1 ) :\n NN.append(q)\n if bVerbose : # VERBOSE\n print ( \"INFO \"+str(-1*C) +\" clusters\" )\n Nc = [ 0 for i in range(-1*C) ]\n for q in range(N) :\n res[ q*2+1 ] = q;\n res[ q*2 ] = nvisi[q]-C;\n Nc [res[q*2]]+= 1;\n if bVerbose :\n print ( \" \"+str(res[q*2])+\" \"+str(res[2*q+1]) )\n if bVerbose :\n for i in range(-1*C) :\n print( \"CLUSTER \" +str(i)+ \" HAS \" + str(Nc[i]) + \" ELEMENTS\")\n return ( Nc , np.array(res[:-1]).reshape(-1,2) )\nelse :\n def connectivity ( B:np.array , val:float , bVerbose:bool = False ) :\n description=\"\"\"\nThis is a cutoff based clustering algorithm. The intended use is to supply a distance matrix and a cutoff value (then becomes symmetric positive). For a small distanc>\n \"\"\"\n if bVerbose :\n print ( \"CONNECTIVITY CLUSTERING OF \", np.shape(B), \" MATRIX\" )\n # PYTHON ADAPTATION OF MY C++ CODE THAT CAN BE FOUND IN\n # https://github.com/richardtjornhammar/RichTools/blob/master/src/cluster.cc\n # AROUND LINE 2277\n # CONSIDER COMPILING AND USING THAT AS A MODULE INSTEAD OF THIS SINCE IT IS\n # A LOT FASTER\n # FOR A DESCRIPTION READ PAGE 30 (16 INTERNAL NUMBERING) of:\n # https://kth.diva-portal.org/smash/get/diva2:748464/FULLTEXT01.pdf\n #\n nr_sq,mr_sq = np.shape(B)\n if nr_sq != mr_sq :\n print ( 'ERROR' )\n return ( -1 )\n N = mr_sq\n res , nvisi, s, NN, ndx, C = [], [], [], [], [], 0\n res .append(0)\n for i in range(N) :\n nvisi.append(i+1)\n res.append(0); res.append(0)\n ndx.append(i)\n while ( len(ndx)>0 ) :\n i = ndx[-1] ; ndx = ndx[:-1]\n NN = []\n if ( nvisi[i]>0 ) :\n C-=1\n for j in range(N) :\n if ( B[i,j]<=val ) :\n NN.append(j)\n while ( len(NN)>0 ) :\n # back pop_back\n k = NN[-1]; NN = NN[:-1]\n nvisi[k] = C\n for j in range(N):\n if ( B[j,k]<=val ) :\n for q in range(N) :\n if ( nvisi[q] == j+1 ) :\n NN.append(q)\n if bVerbose : # VERBOSE\n print ( \"INFO \"+str(-1*C) +\" clusters\" )\n Nc = [ 0 for i in range(-1*C) ]\n for q in range(N) :\n res[ q*2+1 ] = q;\n res[ q*2 ] = nvisi[q]-C;\n Nc [res[q*2]]+= 1;\n if bVerbose :\n print ( \" \"+str(res[q*2])+\" \"+str(res[2*q+1]) )\n if bVerbose:\n for i in range(-1*C) :\n print( \"CLUSTER \" +str(i)+ \" HAS \" + str(Nc[i]) + \" ELEMENTS\")\n return ( Nc , np.array(res[:-1]).reshape(-1,2) )\n\nif bUseNumba :\n @jit(nopython=True)\n def connectedness ( distm:np.array , alpha:float , n_connections:int=1 ) -> list :\n #\n # AN ALTERNATIVE METHOD\n # DOES THE SAME THING AS THE CONNECTIVITY CODE IN MY\n # CLUSTERING MODULE (in src/impetuous/clustering.py )\n # OR IN https://github.com/richardtjornhammar/RichTools/blob/master/src/cluster.cc\n # https://github.com/richardtjornhammar/RichTools/commit/74b35df9c623bf03570707a24eafe828f461ed90#diff-25a6634263c1b1f6fc4697a04e2b9904ea4b042a89af59dc93ec1f5d44848a26\n # CONNECTIVITY SEARCH FOR (connectivity) CONNECTIVITY\n #\n # THIS ROUTINE RETURNS A LIST BELONGING TO THE CLUSTERS\n # WITH THE SET OF INDICES THAT MAPS TO THE CLUSTER\n #\n if len ( distm.shape ) < 2 :\n print ( 'PLEASE SUBMIT A SQUARE DISTANCE MATRIX' )\n\n def b2i ( a:list ) -> list :\n return ( [ i for b,i in zip(a,range(len(a))) if b ] )\n def f2i ( a:list,alf:float ) -> list :\n return ( b2i( a<=alf ) )\n\n L = []\n for a in distm :\n bAdd = True\n ids = set( f2i(a,alpha) )\n for i in range(len(L)) :\n if len( L[i]&ids ) >= n_connections :\n L[i] = L[i] | ids\n bAdd = False\n break\n if bAdd and len(ids) >= n_connections :\n L .append( ids )\n return ( L )\nelse :\n def connectedness ( distm:np.array , alpha:float , n_connections:int=1 ) -> list :\n #\n # AN ALTERNATIVE METHOD\n # DOES THE SAME THING AS THE CONNECTIVITY CODE IN MY\n # CLUSTERING MODULE (in src/impetuous/clustering.py )\n # OR IN https://github.com/richardtjornhammar/RichTools/blob/master/src/cluster.cc\n # as of commit https://github.com/richardtjornhammar/RichTools/commit/76201bb07687017ae16a4e57cb1ed9fd8c394f18 2016\n # CONNECTIVITY SEARCH FOR (connectivity) CONNECTIVITY\n #\n # THIS ROUTINE RETURNS A LIST BELONGING TO THE CLUSTERS\n # WITH THE SET OF INDICES THAT MAPS TO THE CLUSTER\n #\n if len ( distm.shape ) < 2 :\n print ( 'PLEASE SUBMIT A SQUARE DISTANCE MATRIX' )\n\n def b2i ( a:list ) -> list :\n return ( [ i for b,i in zip(a,range(len(a))) if b ] )\n def f2i ( a:list,alf:float ) -> list :\n return ( b2i( a<=alf ) )\n\n L = []\n for a in distm :\n bAdd = True\n ids = set( f2i(a,alpha) )\n for i in range(len(L)) :\n if len( L[i]&ids ) >= n_connections :\n L[i] = L[i] | ids\n bAdd = False\n break\n if bAdd and len(ids) >= n_connections :\n L .append( ids )\n return ( L )\n\ndef dbscan ( coordinates:np.array = None , distance_matrix:np.array = None ,\n eps:float = None, minPts:int = None , bVerbose:bool = False ) -> dict :\n\n def absolute_coordinates_to_distance_matrix ( Q:np.array , power:int=2 , bInvPow:bool=False ) -> np.array :\n # UNUSED FALLBACK\n DP = np.array( [ np.sum((np.array(p)-np.array(q))**power) for p in Q for q in Q] ).reshape(np.shape(Q)[0],np.shape(Q)[0])\n if bInvPow :\n DP = DP**(1.0/power)\n return ( DP )\n\n if bVerbose :\n print ( \"THIS IMPLEMENTATION FOR DBSCAN\" )\n print ( \"ASSESSMENT OF NOISE DIFFERS FROM\" )\n print ( \"THE IMPLEMENTATION FOUND IN SKLEARN\" )\n print ( \"ASSUMES LINEAR DISTANCES, NOT SQUARED\" )\n #\n # FOR A DESCRIPTION OF THE CONNECTIVITY READ PAGE 30 (16 INTERNAL NUMBERING) of:\n # https://kth.diva-portal.org/smash/get/diva2:748464/FULLTEXT01.pdf\n #from impetuous.clustering import absolute_coordinates_to_distance_matrix\n #from impetuous.clustering import connectivity\n\n import operator\n if not operator.xor( coordinates is None , distance_matrix is None ) :\n print ( \"ONLY SUPPLY A SINGE DATA FRAME OR A DISTANCE MATRIX\" )\n print ( \"dbscan FAILED\" )\n print ( \"DATA MATRICES NEEDS TO BE SPECIFIED WITH \\\" distance_matrix = ... \\\" \" )\n exit(1)\n\n if distance_matrix is None :\n from graphtastic.fit import absolute_coordinates_to_distance_matrix\n distance_matrix_ = absolute_coordinates_to_distance_matrix ( coordinates )\n eps = eps**2.0\n else :\n distance_matrix_ = distance_matrix\n\n isNoise = np.sum(distance_matrix_<eps,0)-1 < minPts\n i_ = 0\n for ib in isNoise :\n if ib :\n distance_matrix_ [ i_] = ( 1+eps )*10.0\n distance_matrix_.T[i_] = ( 1+eps )*10.0\n distance_matrix_[i_][i_] = 0.\n i_ = i_+1\n clustercontent , clustercontacts = connectivity ( distance_matrix_ , eps )\n return ( {'cluster content': clustercontent, 'clusterid-particleid' : clustercontacts, 'is noise':isNoise} )\n\ndef reformat_dbscan_results ( results:dict ) -> dict :\n if True :\n clusters = {}\n for icontent in range(len(results['cluster content'])) :\n content = results[ 'cluster content' ][ icontent ]\n for c in results [ 'clusterid-particleid' ] :\n if c[0] == icontent :\n if results[ 'is noise' ][c[1]] :\n icontent=-1\n if icontent in clusters:\n clusters[ icontent ] .append( c[1] )\n else :\n clusters[ icontent ] = [ c[1] ]\n return ( clusters )\n"
] | [
[
"numpy.array",
"numpy.shape",
"numpy.sum",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
TochkaAI/Paddle | [
"f249a5f05f0f5832279244d88c8cb4eaaad1fbd4",
"f249a5f05f0f5832279244d88c8cb4eaaad1fbd4",
"f249a5f05f0f5832279244d88c8cb4eaaad1fbd4"
] | [
"python/paddle/fluid/tests/unittests/ir/inference/test_trt_activation_pass.py",
"python/paddle/fluid/tests/unittests/npu/test_truncated_gaussian_random_op_npu.py",
"python/paddle/fluid/tests/unittests/test_imperative_basic.py"
] | [
"# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\nimport shutil\nimport unittest\nimport numpy as np\nfrom inference_pass_test import InferencePassTest\nimport paddle.fluid as fluid\nimport paddle.fluid.core as core\nfrom paddle.fluid.core import PassVersionChecker\nfrom paddle.fluid.core import AnalysisConfig\n\n\nclass TensorRTSubgraphPassActivationTest(InferencePassTest):\n def setUpTensorRTParam(self):\n self.enable_trt = True\n self.trt_parameters = TensorRTSubgraphPassActivationTest.TensorRTParam(\n 1 << 30, 32, 0, AnalysisConfig.Precision.Float32, False, False)\n\n def setUp(self):\n self.setUpTensorRTParam()\n with fluid.program_guard(self.main_program, self.startup_program):\n data = fluid.data(\n name=\"data\", shape=[-1, 6, 64, 64], dtype=\"float32\")\n act_out = self.append_act(data)\n out = fluid.layers.batch_norm(act_out, is_test=True)\n self.feeds = {\n \"data\": np.random.random([1, 6, 64, 64]).astype(\"float32\"),\n }\n self.fetch_list = [out]\n\n def append_act(self, x):\n return fluid.layers.relu(x)\n\n def test_check_output(self):\n if core.is_compiled_with_cuda():\n use_gpu = True\n if os.path.exists(self.path + \"_opt_cache\"):\n shutil.rmtree(self.path + \"_opt_cache\")\n if self.trt_parameters.precision == AnalysisConfig.Precision.Float32:\n self.check_output_with_option(use_gpu)\n else:\n self.check_output_with_option(use_gpu, 1e-3)\n self.assertTrue(\n PassVersionChecker.IsCompatible('tensorrt_subgraph_pass'))\n\n\nclass TensorRTSubgraphPassLeakyReluTest(TensorRTSubgraphPassActivationTest):\n def append_act(self, x):\n return fluid.layers.leaky_relu(x)\n\n\nclass TensorRTSubgraphPassRelu6Test(TensorRTSubgraphPassActivationTest):\n def append_act(self, x):\n return fluid.layers.relu6(x)\n\n\nclass TensorRTSubgraphPassSoftMaxTest(TensorRTSubgraphPassActivationTest):\n def append_act(self, x):\n return fluid.layers.softmax(x)\n\n\nclass TensorRTSubgraphPassSigmoidTest(TensorRTSubgraphPassActivationTest):\n def append_act(self, x):\n return fluid.layers.sigmoid(x)\n\n\nclass TensorRTSubgraphPassHardSwishTest(TensorRTSubgraphPassActivationTest):\n def append_act(self, x):\n return fluid.layers.hard_swish(x)\n\n\nclass TensorRTSubgraphPassHardSigmoidTest(TensorRTSubgraphPassActivationTest):\n def append_act(self, x):\n return fluid.layers.hard_sigmoid(x)\n\n\nclass TensorRTSubgraphPassHardSwishPluginTest(\n TensorRTSubgraphPassActivationTest):\n def append_act(self, x):\n return fluid.layers.hard_swish(x, threshold=4.0, scale=8.0)\n\n\nclass TensorRTSubgraphPassClipTest(TensorRTSubgraphPassActivationTest):\n def append_act(self, x):\n return fluid.layers.clip(x, 0, 1)\n\n\nclass TensorRTSubgraphPassTanhTest(TensorRTSubgraphPassActivationTest):\n def append_act(self, x):\n return fluid.layers.tanh(x)\n\n\nclass TensorRTSubgraphPassSwishTest(TensorRTSubgraphPassActivationTest):\n def setUpTensorRTParam(self):\n self.enable_trt = True\n self.trt_parameters = TensorRTSubgraphPassActivationTest.TensorRTParam(\n 1 << 30, 32, 0, AnalysisConfig.Precision.Float32, True, False)\n\n def append_act(self, x):\n return fluid.layers.swish(x)\n\n\nclass TensorRTSubgraphPassSwishFp16SerializeTest(\n TensorRTSubgraphPassActivationTest):\n def setUpTensorRTParam(self):\n self.enable_trt = True\n self.trt_parameters = TensorRTSubgraphPassActivationTest.TensorRTParam(\n 1 << 30, 32, 0, AnalysisConfig.Precision.Half, True, False)\n\n def append_act(self, x):\n return fluid.layers.swish(x)\n\n\nclass TensorRTSubgraphPassDynamicSwishFp16SerializeTest(\n TensorRTSubgraphPassActivationTest):\n def setUpTensorRTParam(self):\n self.enable_trt = True\n self.trt_parameters = TensorRTSubgraphPassActivationTest.TensorRTParam(\n 1 << 30, 32, 0, AnalysisConfig.Precision.Half, True, False)\n self.dynamic_shape_params = TensorRTSubgraphPassActivationTest.DynamicShapeParam(\n {\n 'data': [1, 6, 8, 8]\n }, {'data': [1, 6, 512, 512]}, {'data': [1, 6, 256, 256]}, False)\n\n def append_act(self, x):\n return fluid.layers.swish(x)\n\n\nclass TensorRTSubgraphPassPreluAllTest(TensorRTSubgraphPassActivationTest):\n def append_act(self, x):\n return fluid.layers.prelu(x, mode='all')\n\n\nclass TensorRTSubgraphPassPreluChannelTest(TensorRTSubgraphPassActivationTest):\n def append_act(self, x):\n return fluid.layers.prelu(x, mode='channel')\n\n\nclass TensorRTSubgraphPassPreluElementTest(TensorRTSubgraphPassActivationTest):\n def append_act(self, x):\n return fluid.layers.prelu(x, mode='element')\n\n\nclass TensorRTSubgraphPassGeluTest(TensorRTSubgraphPassActivationTest):\n def append_act(self, x):\n return fluid.layers.gelu(x)\n\n\nclass TensorRTSubgraphPassGeluDynamicTest(TensorRTSubgraphPassActivationTest):\n def setUpTensorRTParam(self):\n self.enable_trt = True\n self.trt_parameters = TensorRTSubgraphPassActivationTest.TensorRTParam(\n 1 << 30, 32, 0, AnalysisConfig.Precision.Float32, False, False)\n self.dynamic_shape_params = TensorRTSubgraphPassActivationTest.DynamicShapeParam(\n {\n 'data': [1, 6, 8, 8]\n }, {'data': [1, 6, 512, 512]}, {'data': [1, 6, 256, 256]}, False)\n\n def append_act(self, x):\n return fluid.layers.gelu(x)\n\n\nclass TensorRTSubgraphPassGeluFp16Test(TensorRTSubgraphPassActivationTest):\n def setUpTensorRTParam(self):\n self.enable_trt = True\n self.trt_parameters = TensorRTSubgraphPassActivationTest.TensorRTParam(\n 1 << 30, 32, 0, AnalysisConfig.Precision.Half, False, False)\n\n def append_act(self, x):\n return fluid.layers.gelu(x)\n\n\nclass TensorRTSubgraphPassGeluFp16SerializeTest(\n TensorRTSubgraphPassActivationTest):\n def setUpTensorRTParam(self):\n self.enable_trt = True\n self.trt_parameters = TensorRTSubgraphPassActivationTest.TensorRTParam(\n 1 << 30, 32, 0, AnalysisConfig.Precision.Half, True, False)\n\n def append_act(self, x):\n return fluid.layers.gelu(x)\n\n\nclass TensorRTSubgraphPassGeluFp16DynamicTest(\n TensorRTSubgraphPassActivationTest):\n def setUpTensorRTParam(self):\n self.enable_trt = True\n self.trt_parameters = TensorRTSubgraphPassActivationTest.TensorRTParam(\n 1 << 30, 32, 0, AnalysisConfig.Precision.Half, False, False)\n self.dynamic_shape_params = TensorRTSubgraphPassActivationTest.DynamicShapeParam(\n {\n 'data': [1, 6, 8, 8]\n }, {'data': [1, 6, 512, 512]}, {'data': [1, 6, 256, 256]}, False)\n\n def append_act(self, x):\n return fluid.layers.gelu(x)\n\n\nclass TensorRTSubgraphPassGeluFp16DynamicSerializeTest(\n TensorRTSubgraphPassActivationTest):\n def setUpTensorRTParam(self):\n self.enable_trt = True\n self.trt_parameters = TensorRTSubgraphPassActivationTest.TensorRTParam(\n 1 << 30, 32, 0, AnalysisConfig.Precision.Half, True, False)\n self.dynamic_shape_params = TensorRTSubgraphPassActivationTest.DynamicShapeParam(\n {\n 'data': [1, 6, 8, 8]\n }, {'data': [1, 6, 512, 512]}, {'data': [1, 6, 256, 256]}, False)\n\n def append_act(self, x):\n return fluid.layers.gelu(x)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n",
"# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import print_function\n\nimport numpy as np\nimport unittest\nimport sys\nsys.path.append(\"..\")\nfrom op_test import OpTest\nimport paddle\nimport paddle.fluid as fluid\nimport paddle.fluid.core as core\nfrom paddle.fluid.op import Operator\nfrom paddle.fluid.executor import Executor\n\npaddle.enable_static()\nSEED = 2021\n\n\[email protected](not paddle.is_compiled_with_npu(),\n \"core is not compiled with NPU\")\nclass TestTruncatedNormal(unittest.TestCase):\n def _test(self, run_npu=True):\n main_prog = paddle.static.Program()\n startup_prog = paddle.static.Program()\n scope = paddle.fluid.core.Scope()\n\n main_prog.random_seed = SEED\n startup_prog.random_seed = SEED\n np.random.seed(SEED)\n paddle.seed(SEED)\n\n with fluid.scope_guard(scope):\n with paddle.static.program_guard(main_prog, startup_prog):\n weight_attr = paddle.framework.ParamAttr(\n name=\"linear_weight\",\n initializer=paddle.nn.initializer.TruncatedNormal(\n mean=0.0, std=2.0))\n linear = paddle.nn.Linear(\n 2, 2, weight_attr=weight_attr, bias_attr=False)\n\n if run_npu:\n place = paddle.NPUPlace(0)\n else:\n place = paddle.CPUPlace()\n\n exe = paddle.static.Executor(place)\n w = exe.run(startup_prog, fetch_list=['linear_weight'])\n return w\n\n def test_npu(self):\n cpu_w = self._test(False)\n npu_w = self._test(True)\n\n self.assertTrue(np.allclose(npu_w, cpu_w))\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport contextlib\nimport unittest\nimport numpy as np\n\nimport paddle.fluid as fluid\nfrom paddle.fluid import core\nfrom paddle.fluid import Linear\nfrom paddle.fluid.layer_helper import LayerHelper\nfrom test_imperative_base import new_program_scope\nimport paddle.fluid.dygraph_utils as dygraph_utils\nfrom paddle.fluid.dygraph.layer_object_helper import LayerObjectHelper\nimport paddle\n\n\nclass MyLayer(fluid.Layer):\n def __init__(self):\n super(MyLayer, self).__init__()\n\n def forward(self, inputs):\n x = fluid.layers.relu(inputs)\n self._x_for_debug = x\n x = fluid.layers.elementwise_mul(x, x)\n x = fluid.layers.reduce_sum(x)\n return [x]\n\n\nclass MLP(fluid.Layer):\n def __init__(self, input_size):\n super(MLP, self).__init__()\n self._linear1 = None\n self._linear1 = Linear(\n input_size,\n 3,\n param_attr=fluid.ParamAttr(\n initializer=fluid.initializer.Constant(value=0.1)),\n bias_attr=fluid.ParamAttr(\n initializer=fluid.initializer.Constant(value=0.1)))\n self._linear2 = Linear(\n 3,\n 4,\n param_attr=fluid.ParamAttr(\n initializer=fluid.initializer.Constant(value=0.1)),\n bias_attr=fluid.ParamAttr(\n initializer=fluid.initializer.Constant(value=0.1)))\n\n def forward(self, inputs):\n x = self._linear1(inputs)\n x = self._linear2(x)\n x = fluid.layers.reduce_sum(x)\n return x\n\n\nclass SimpleRNNCell(fluid.Layer):\n def __init__(self, step_input_size, hidden_size, output_size, param_attr):\n super(SimpleRNNCell, self).__init__()\n self.step_input_size = step_input_size\n self.hidden_size = hidden_size\n self.output_size = output_size\n self._dtype = core.VarDesc.VarType.FP32\n self.param_attr = param_attr\n\n i2h_param_shape = [self.step_input_size, self.hidden_size]\n h2h_param_shape = [self.hidden_size, self.hidden_size]\n h2o_param_shape = [self.output_size, self.hidden_size]\n self._i2h_w = None\n self._i2h_w = self.create_parameter(\n attr=self.param_attr,\n shape=i2h_param_shape,\n dtype=self._dtype,\n is_bias=False)\n self._h2h_w = self.create_parameter(\n attr=self.param_attr,\n shape=h2h_param_shape,\n dtype=self._dtype,\n is_bias=False)\n self._h2o_w = self.create_parameter(\n attr=self.param_attr,\n shape=h2o_param_shape,\n dtype=self._dtype,\n is_bias=False)\n\n def forward(self, input, pre_hidden):\n tmp_i2h = self.create_variable(dtype=self._dtype)\n tmp_h2h = self.create_variable(dtype=self._dtype)\n hidden = self.create_variable(dtype=self._dtype)\n out = self.create_variable(dtype=self._dtype)\n softmax_out = self.create_variable(dtype=self._dtype)\n reduce_out = self.create_variable(dtype=self._dtype)\n self._helper.append_op(\n type=\"mul\",\n inputs={\"X\": input,\n \"Y\": self._i2h_w},\n outputs={\"Out\": tmp_i2h},\n attrs={\"x_num_col_dims\": 1,\n \"y_num_col_dims\": 1})\n\n self._helper.append_op(\n type=\"mul\",\n inputs={\"X\": pre_hidden,\n \"Y\": self._h2h_w},\n outputs={\"Out\": tmp_h2h},\n attrs={\"x_num_col_dims\": 1,\n \"y_num_col_dims\": 1})\n\n self._helper.append_op(\n type=\"elementwise_add\",\n inputs={'X': tmp_h2h,\n 'Y': tmp_i2h},\n outputs={'Out': hidden},\n attrs={'axis': -1,\n 'use_mkldnn': False})\n hidden = self._helper.append_activation(hidden, act='tanh')\n\n self._helper.append_op(\n type=\"mul\",\n inputs={\"X\": hidden,\n \"Y\": self._h2o_w},\n outputs={\"Out\": out},\n attrs={\"x_num_col_dims\": 1,\n \"y_num_col_dims\": 1})\n\n self._helper.append_op(\n type=\"softmax\",\n inputs={\"X\": out},\n outputs={\"Out\": softmax_out},\n attrs={\"use_cudnn\": False})\n\n self._helper.append_op(\n type='reduce_sum',\n inputs={'X': softmax_out},\n outputs={'Out': reduce_out},\n attrs={'keep_dim': False,\n 'reduce_all': True})\n\n return reduce_out, hidden\n\n\nclass SimpleRNN(fluid.Layer):\n def __init__(self):\n super(SimpleRNN, self).__init__()\n self.seq_len = 4\n self._cell = SimpleRNNCell(\n 3,\n 3,\n 3,\n fluid.ParamAttr(initializer=fluid.initializer.Constant(value=0.1)))\n\n def forward(self, inputs):\n outs = list()\n pre_hiddens = list()\n\n init_hidden = self.create_parameter(\n attr=fluid.ParamAttr(\n initializer=fluid.initializer.Constant(value=0.1)),\n shape=[1, 3],\n dtype='float32',\n is_bias=False)\n pre_hidden = init_hidden\n for i in range(self.seq_len):\n input = fluid.layers.slice(\n inputs, axes=[1], starts=[i], ends=[i + 1])\n input = fluid.layers.reshape(input, shape=[1, 3])\n out_softmax, pre_hidden = self._cell(input, pre_hidden)\n outs.append(out_softmax)\n\n return outs, pre_hiddens\n\n\nclass TestImperative(unittest.TestCase):\n def test_functional_dygraph_context(self):\n self.assertFalse(fluid.dygraph.enabled())\n fluid.enable_dygraph()\n self.assertTrue(fluid.dygraph.enabled())\n np_inp = np.array([[1.0, 2.0], [3.0, 4.0]], dtype=np.float32)\n var_inp = fluid.dygraph.base.to_variable(np_inp)\n mlp = MLP(input_size=2)\n out = mlp(var_inp)\n dy_out1 = out.numpy()\n out.backward()\n dy_grad1 = mlp._linear1.weight.gradient()\n fluid.disable_dygraph()\n self.assertFalse(fluid.dygraph.enabled())\n with fluid.dygraph.guard():\n self.assertTrue(fluid.dygraph.enabled())\n var_inp = fluid.dygraph.base.to_variable(np_inp)\n mlp = MLP(input_size=2)\n out = mlp(var_inp)\n dy_out2 = out.numpy()\n out.backward()\n dy_grad2 = mlp._linear1.weight.gradient()\n self.assertFalse(fluid.dygraph.enabled())\n self.assertTrue(np.array_equal(dy_out1, dy_out2))\n self.assertTrue(np.array_equal(dy_grad1, dy_grad2))\n\n def test_functional_paddle_imperative_dygraph_context(self):\n self.assertFalse(paddle.in_dynamic_mode())\n paddle.disable_static()\n self.assertTrue(paddle.in_dynamic_mode())\n np_inp = np.array([[1.0, 2.0], [3.0, 4.0]], dtype=np.float32)\n var_inp = paddle.to_tensor(np_inp)\n mlp = MLP(input_size=2)\n out = mlp(var_inp)\n dy_out1 = out.numpy()\n out.backward()\n dy_grad1 = mlp._linear1.weight.gradient()\n paddle.enable_static()\n self.assertFalse(paddle.in_dynamic_mode())\n paddle.disable_static()\n self.assertTrue(paddle.in_dynamic_mode())\n var_inp = paddle.to_tensor(np_inp)\n mlp = MLP(input_size=2)\n out = mlp(var_inp)\n dy_out2 = out.numpy()\n out.backward()\n dy_grad2 = mlp._linear1.weight.gradient()\n paddle.enable_static()\n self.assertFalse(paddle.in_dynamic_mode())\n self.assertTrue(np.array_equal(dy_out1, dy_out2))\n self.assertTrue(np.array_equal(dy_grad1, dy_grad2))\n\n def test_isinstance(self):\n var = fluid.layers.data(shape=[1], name='x', dtype='float32')\n self.assertTrue(isinstance(var, fluid.Variable))\n with fluid.dygraph.guard():\n var_base = fluid.dygraph.base.to_variable(np.array([3, 4, 5]))\n self.assertTrue(isinstance(var_base, core.VarBase))\n self.assertTrue(isinstance(var_base, fluid.Variable))\n\n def test_create_VarBase(self):\n x = np.ones([2, 2], np.float32)\n y = np.zeros([3, 3], np.float32)\n t = fluid.Tensor()\n t.set(x, fluid.CPUPlace())\n with fluid.dygraph.guard():\n tmp = fluid.core.VarBase(value=x, place=fluid.core.CPUPlace())\n tmp2 = fluid.core.VarBase(y, fluid.core.CPUPlace())\n tmp3 = fluid.dygraph.base.to_variable(x)\n tmp4 = fluid.core.VarBase(y)\n tmp5 = fluid.core.VarBase(value=x)\n tmp6 = fluid.core.VarBase(t)\n\n self.assertTrue(np.array_equal(x, tmp.numpy()))\n self.assertTrue(np.array_equal(y, tmp2.numpy()))\n self.assertTrue(np.array_equal(x, tmp3.numpy()))\n self.assertTrue(np.array_equal(y, tmp4.numpy()))\n self.assertTrue(np.array_equal(x, tmp5.numpy()))\n self.assertTrue(np.array_equal(x, tmp6.numpy()))\n\n def test_no_grad_guard(self):\n data = np.array([[2, 3], [4, 5]]).astype('float32')\n with fluid.dygraph.guard():\n l0 = fluid.Linear(2, 2)\n self.assertTrue(l0.weight._grad_ivar() is None)\n l1 = fluid.Linear(2, 2)\n with fluid.dygraph.no_grad():\n self.assertTrue(l1.weight.stop_gradient is False)\n tmp = l1.weight * 2\n self.assertTrue(tmp.stop_gradient)\n x = fluid.dygraph.to_variable(data)\n y = l0(x) + tmp\n o = l1(y)\n o.backward()\n\n self.assertTrue(tmp._grad_ivar() is None)\n self.assertTrue(l0.weight._grad_ivar() is not None)\n\n def test_paddle_imperative_no_grad_guard(self):\n data = np.array([[2, 3], [4, 5]]).astype('float32')\n with fluid.dygraph.guard():\n l0 = fluid.Linear(2, 2)\n self.assertTrue(l0.weight._grad_ivar() is None)\n l1 = fluid.Linear(2, 2)\n with paddle.no_grad():\n self.assertTrue(l1.weight.stop_gradient is False)\n tmp = l1.weight * 2\n self.assertTrue(tmp.stop_gradient)\n x = fluid.dygraph.to_variable(data)\n y = l0(x) + tmp\n o = l1(y)\n o.backward()\n\n self.assertTrue(tmp._grad_ivar() is None)\n self.assertTrue(l0.weight._grad_ivar() is not None)\n\n def test_paddle_imperative_set_grad_enabled(self):\n data = np.array([[2, 3], [4, 5]]).astype('float32')\n with fluid.dygraph.guard():\n l0 = fluid.Linear(2, 2)\n self.assertTrue(l0.weight._grad_ivar() is None)\n l1 = fluid.Linear(2, 2)\n with paddle.set_grad_enabled(False):\n self.assertTrue(l1.weight.stop_gradient is False)\n tmp = l1.weight * 2\n with paddle.set_grad_enabled(True):\n tmp2 = l1.weight * 2\n self.assertTrue(tmp.stop_gradient)\n self.assertTrue(tmp2.stop_gradient is False)\n x = fluid.dygraph.to_variable(data)\n y = l0(x) + tmp2\n o = l1(y)\n o.backward()\n\n self.assertTrue(tmp._grad_ivar() is None)\n self.assertTrue(tmp2._grad_ivar() is not None)\n self.assertTrue(l0.weight._grad_ivar() is not None)\n\n def test_sum_op(self):\n x = np.ones([2, 2], np.float32)\n with fluid.dygraph.guard():\n inputs = []\n for _ in range(10):\n tmp = fluid.dygraph.base.to_variable(x)\n tmp.stop_gradient = False\n inputs.append(tmp)\n ret = fluid.layers.sums(inputs)\n loss = fluid.layers.reduce_sum(ret)\n loss.backward()\n with fluid.dygraph.guard():\n inputs2 = []\n for _ in range(10):\n tmp = fluid.dygraph.base.to_variable(x)\n tmp.stop_gradient = False\n inputs2.append(tmp)\n ret2 = fluid.layers.sums(inputs2)\n loss2 = fluid.layers.reduce_sum(ret2)\n fluid.set_flags({'FLAGS_sort_sum_gradient': True})\n loss2.backward()\n\n self.assertTrue(np.allclose(ret.numpy(), x * 10))\n self.assertTrue(np.allclose(inputs[0].gradient(), x))\n self.assertTrue(np.allclose(ret2.numpy(), x * 10))\n a = inputs2[0].gradient()\n self.assertTrue(np.allclose(inputs2[0].gradient(), x))\n\n def test_empty_var(self):\n with fluid.dygraph.guard():\n cur_program = fluid.Program()\n cur_block = cur_program.current_block()\n new_variable = cur_block.create_var(\n name=\"X\", shape=[-1, 23, 48], dtype='float32')\n try:\n new_variable.numpy()\n except Exception as e:\n assert type(e) == ValueError\n\n try:\n new_variable.backward()\n except Exception as e:\n assert type(e) == core.EnforceNotMet\n\n try:\n new_variable.clear_gradient()\n except Exception as e:\n assert type(e) == core.EnforceNotMet\n\n def test_empty_grad(self):\n with fluid.dygraph.guard():\n x = np.ones([2, 2], np.float32)\n new_var = fluid.dygraph.base.to_variable(x)\n try:\n new_var.gradient()\n except Exception as e:\n assert type(e) == ValueError\n\n try:\n new_var.clear_gradient()\n except Exception as e:\n assert type(e) == core.EnforceNotMet\n\n with fluid.dygraph.guard():\n cur_program = fluid.Program()\n cur_block = cur_program.current_block()\n new_variable = cur_block.create_var(\n name=\"X\", shape=[-1, 23, 48], dtype='float32')\n try:\n new_variable.gradient()\n except Exception as e:\n assert type(e) == ValueError\n\n def test_set_persistable(self):\n with fluid.dygraph.guard():\n x = np.ones([2, 2], np.float32)\n new_var = fluid.dygraph.base.to_variable(x)\n self.assertFalse(new_var.persistable)\n new_var.persistable = True\n self.assertTrue(new_var.persistable)\n\n def test_layer(self):\n with fluid.dygraph.guard():\n cl = core.Layer()\n cl.forward([])\n l = fluid.Layer(\"l\")\n self.assertRaises(NotImplementedError, l.forward, [])\n\n def test_layer_in_out(self):\n np_inp = np.array([1.0, 2.0, -1.0], dtype=np.float32)\n with fluid.dygraph.guard():\n var_inp = fluid.dygraph.base.to_variable(np_inp)\n var_inp.stop_gradient = False\n l = MyLayer()\n x = l(var_inp)[0]\n self.assertIsNotNone(x)\n dy_out = x.numpy()\n x.backward()\n dy_grad = l._x_for_debug.gradient()\n\n with fluid.dygraph.guard():\n var_inp2 = fluid.dygraph.base.to_variable(np_inp)\n var_inp2.stop_gradient = False\n l2 = MyLayer()\n x2 = l2(var_inp2)[0]\n self.assertIsNotNone(x2)\n dy_out2 = x2.numpy()\n fluid.set_flags({'FLAGS_sort_sum_gradient': True})\n x2.backward()\n dy_grad2 = l2._x_for_debug.gradient()\n\n with new_program_scope():\n inp = fluid.layers.data(\n name=\"inp\", shape=[3], append_batch_size=False)\n l = MyLayer()\n x = l(inp)[0]\n param_grads = fluid.backward.append_backward(\n x, parameter_list=[l._x_for_debug.name])[0]\n exe = fluid.Executor(fluid.CPUPlace(\n ) if not core.is_compiled_with_cuda() else fluid.CUDAPlace(0))\n\n static_out, static_grad = exe.run(\n feed={inp.name: np_inp},\n fetch_list=[x.name, param_grads[1].name])\n\n self.assertTrue(np.allclose(dy_out, static_out))\n self.assertTrue(np.allclose(dy_grad, static_grad))\n self.assertTrue(np.allclose(dy_out2, static_out))\n self.assertTrue(np.allclose(dy_grad2, static_grad))\n\n def test_mlp(self):\n np_inp = np.array([[1.0, 2.0], [3.0, 4.0]], dtype=np.float32)\n with fluid.dygraph.guard():\n var_inp = fluid.dygraph.base.to_variable(np_inp)\n mlp = MLP(input_size=2)\n out = mlp(var_inp)\n dy_out = out.numpy()\n out.backward()\n dy_grad = mlp._linear1.weight.gradient()\n\n with fluid.dygraph.guard():\n var_inp2 = fluid.dygraph.base.to_variable(np_inp)\n mlp2 = MLP(input_size=2)\n out2 = mlp2(var_inp2)\n dy_out2 = out2.numpy()\n fluid.set_flags({'FLAGS_sort_sum_gradient': True})\n out2.backward()\n dy_grad2 = mlp2._linear1.weight.gradient()\n\n with new_program_scope():\n inp = fluid.layers.data(\n name=\"inp\", shape=[2, 2], append_batch_size=False)\n mlp = MLP(input_size=2)\n out = mlp(inp)\n param_grads = fluid.backward.append_backward(\n out, parameter_list=[mlp._linear1.weight.name])[0]\n exe = fluid.Executor(fluid.CPUPlace(\n ) if not core.is_compiled_with_cuda() else fluid.CUDAPlace(0))\n exe.run(fluid.default_startup_program())\n\n static_out, static_grad = exe.run(\n feed={inp.name: np_inp},\n fetch_list=[out.name, param_grads[1].name])\n\n self.assertTrue(np.allclose(dy_out, static_out))\n self.assertTrue(np.allclose(dy_grad, static_grad))\n self.assertTrue(np.allclose(dy_out2, static_out))\n self.assertTrue(np.allclose(dy_grad2, static_grad))\n\n params = mlp.parameters(True)\n self.assertEqual(\"linear_0.w_0\", params[0].name)\n self.assertEqual(\"linear_0.b_0\", params[1].name)\n self.assertEqual(\"linear_1.w_0\", params[2].name)\n self.assertEqual(\"linear_1.b_0\", params[3].name)\n self.assertEqual(len(params), 4)\n\n sublayers = mlp.sublayers()\n self.assertEqual(mlp._linear1, sublayers[0])\n self.assertEqual(mlp._linear2, sublayers[1])\n self.assertEqual(len(sublayers), 2)\n\n def test_gradient_accumulation(self):\n def test_single_api(sort_sum_gradient):\n fluid.set_flags({'FLAGS_sort_sum_gradient': sort_sum_gradient})\n x = paddle.to_tensor(5., stop_gradient=False)\n for i in range(10):\n y = paddle.pow(x, 4.0)\n y.backward()\n self.assertEqual(x.grad.numpy(), (i + 1) * 500)\n x.clear_gradient()\n self.assertEqual(x.grad.numpy(), 0.)\n for i in range(10):\n y = paddle.pow(x, 4.0)\n y.backward()\n self.assertEqual(x.grad.numpy(), (i + 1) * 500)\n x.clear_grad()\n self.assertEqual(x.grad.numpy(), 0.)\n\n def test_simple_net(sort_sum_gradient):\n fluid.set_flags({'FLAGS_sort_sum_gradient': sort_sum_gradient})\n x = paddle.to_tensor(5., stop_gradient=False)\n y = paddle.to_tensor(2., stop_gradient=False)\n z = paddle.to_tensor(3., stop_gradient=False)\n\n def fun(x, y, z):\n loss1 = x * x * y\n loss2 = x * z\n loss1.backward(retain_graph=True)\n loss2.backward(retain_graph=True)\n self.assertTrue(np.array_equal(x.grad.numpy(), [23.]))\n self.assertTrue(np.array_equal(y.grad.numpy(), [25.]))\n self.assertTrue(np.array_equal(z.grad.numpy(), [5.]))\n x.clear_grad()\n y.clear_grad()\n z.clear_grad()\n\n dx = paddle.grad([loss1], x, create_graph=True)[0]\n loss = loss1 + loss2 + dx\n # loss = x*x*y + x*z + 2*x*y\n return loss\n\n loss = fun(x, y, z)\n loss.backward(retain_graph=True)\n # x.grad = 2*x*y + z + 2*y = 27 \n self.assertTrue(np.array_equal(x.grad.numpy(), [27]))\n\n loss.backward(retain_graph=True)\n self.assertTrue(np.array_equal(x.grad.numpy(), [54]))\n\n loss.backward()\n self.assertTrue(np.array_equal(x.grad.numpy(), [81]))\n\n with self.assertRaises(RuntimeError):\n loss.backward()\n\n loss1 = x * x * y\n loss2 = x * z\n dx = paddle.grad([loss1], x, create_graph=True)[0]\n loss = loss1 + loss2 + dx\n loss.backward()\n self.assertTrue(np.array_equal(dx.grad.numpy(), [1]))\n self.assertTrue(np.array_equal(x.grad.numpy(), [108]))\n\n def test_mlp(sort_sum_gradient):\n fluid.set_flags({'FLAGS_sort_sum_gradient': sort_sum_gradient})\n input_size = 5\n paddle.seed(1)\n mlp1 = MLP(input_size=input_size)\n # generate the gradient of each step\n mlp2 = MLP(input_size=input_size)\n\n expected_weight1_grad = 0.\n expected_bias1_grad = 0.\n expected_weight2_grad = 0.\n expected_bias2_grad = 0.\n\n for batch_id in range(100):\n x = paddle.uniform([10, input_size])\n detach_x = x.detach()\n clear_loss = mlp2(detach_x)\n clear_loss.backward()\n expected_weight1_grad = (\n expected_weight1_grad + mlp2._linear1.weight.grad.numpy())\n expected_bias1_grad = (\n expected_bias1_grad + mlp2._linear1.bias.grad.numpy())\n expected_weight2_grad = (\n expected_weight2_grad + mlp2._linear2.weight.grad.numpy())\n expected_bias2_grad = (\n expected_bias2_grad + mlp2._linear2.bias.grad.numpy())\n\n loss = mlp1(x)\n loss.backward()\n\n self.assertTrue(np.array_equal(loss.grad.numpy(), [1]))\n self.assertTrue(\n np.allclose(mlp1._linear1.weight.grad.numpy(),\n expected_weight1_grad))\n self.assertTrue(\n np.allclose(mlp1._linear1.bias.grad.numpy(),\n expected_bias1_grad))\n self.assertTrue(\n np.allclose(mlp1._linear2.weight.grad.numpy(),\n expected_weight2_grad))\n self.assertTrue(\n np.allclose(mlp1._linear2.bias.grad.numpy(),\n expected_bias2_grad))\n\n mlp2.clear_gradients()\n self.assertTrue(np.array_equal(clear_loss.grad.numpy(), [1]))\n if ((batch_id + 1) % 10) == 0:\n mlp1.clear_gradients()\n expected_weight1_grad = 0.\n expected_bias1_grad = 0.\n expected_weight2_grad = 0.\n expected_bias2_grad = 0.\n\n with fluid.dygraph.guard():\n test_single_api(False)\n test_single_api(True)\n test_simple_net(False)\n test_simple_net(True)\n test_mlp(False)\n test_mlp(True)\n\n def test_dygraph_vs_static(self):\n np_inp1 = np.random.rand(4, 3, 3)\n np_inp2 = np.random.rand(4, 3, 3)\n\n # dynamic graph\n with fluid.dygraph.guard():\n inp1 = fluid.dygraph.to_variable(np_inp1)\n inp2 = fluid.dygraph.to_variable(np_inp2)\n if np.sum(np_inp1) < np.sum(np_inp2):\n x = fluid.layers.elementwise_add(inp1, inp2)\n else:\n x = fluid.layers.elementwise_sub(inp1, inp2)\n dygraph_result = x.numpy()\n\n # static graph\n with new_program_scope():\n inp_data1 = fluid.layers.data(\n name='inp1', shape=[3, 3], dtype=np.float32)\n inp_data2 = fluid.layers.data(\n name='inp2', shape=[3, 3], dtype=np.float32)\n\n a = fluid.layers.expand(\n fluid.layers.reshape(\n fluid.layers.reduce_sum(inp_data1), [1, 1]), [4, 1])\n b = fluid.layers.expand(\n fluid.layers.reshape(\n fluid.layers.reduce_sum(inp_data2), [1, 1]), [4, 1])\n cond = fluid.layers.less_than(x=a, y=b)\n\n ie = fluid.layers.IfElse(cond)\n with ie.true_block():\n d1 = ie.input(inp_data1)\n d2 = ie.input(inp_data2)\n d3 = fluid.layers.elementwise_add(d1, d2)\n ie.output(d3)\n\n with ie.false_block():\n d1 = ie.input(inp_data1)\n d2 = ie.input(inp_data2)\n d3 = fluid.layers.elementwise_sub(d1, d2)\n ie.output(d3)\n out = ie()\n\n exe = fluid.Executor(fluid.CPUPlace(\n ) if not core.is_compiled_with_cuda() else fluid.CUDAPlace(0))\n static_result = exe.run(fluid.default_main_program(),\n feed={'inp1': np_inp1,\n 'inp2': np_inp2},\n fetch_list=out)[0]\n self.assertTrue(np.allclose(dygraph_result, static_result))\n\n def test_rnn(self):\n np_inp = np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0],\n [10.0, 11.0, 12.0]])\n np_inp = np_inp.reshape((1, 4, 3))\n np_inp = np_inp.astype(np.float32)\n with fluid.dygraph.guard():\n var_inp = fluid.dygraph.base.to_variable(np_inp)\n var_inp = fluid.layers.reshape(var_inp, shape=[1, 4, 3])\n simple_rnn = SimpleRNN()\n outs, pre_hiddens = simple_rnn.forward(var_inp)\n dy_out = outs[3].numpy()\n outs[3].backward()\n dy_grad_h2o = simple_rnn._cell._h2o_w.gradient()\n dy_grad_h2h = simple_rnn._cell._h2h_w.gradient()\n dy_grad_i2h = simple_rnn._cell._i2h_w.gradient()\n\n with fluid.dygraph.guard():\n var_inp2 = fluid.dygraph.base.to_variable(np_inp)\n var_inp2 = fluid.layers.reshape(var_inp2, shape=[1, 4, 3])\n simple_rnn2 = SimpleRNN()\n outs2, pre_hiddens2 = simple_rnn2.forward(var_inp2)\n dy_out2 = outs2[3].numpy()\n fluid.set_flags({'FLAGS_sort_sum_gradient': True})\n outs2[3].backward()\n dy_grad_h2o2 = simple_rnn2._cell._h2o_w.gradient()\n dy_grad_h2h2 = simple_rnn2._cell._h2h_w.gradient()\n dy_grad_i2h2 = simple_rnn2._cell._i2h_w.gradient()\n\n with new_program_scope():\n inp = fluid.layers.data(\n name=\"inp\", shape=[1, 4, 3], append_batch_size=False)\n simple_rnn = SimpleRNN()\n outs, pre_hiddens = simple_rnn(inp)\n param_grads = fluid.backward.append_backward(outs[3])\n exe = fluid.Executor(fluid.CPUPlace())\n exe.run(fluid.default_startup_program())\n static_out, static_grad_h2o, static_grad_h2h, static_grad_i2h = exe.run(\n feed={inp.name: np_inp},\n fetch_list=[\n outs[3].name, param_grads[0][1].name,\n param_grads[1][1].name, param_grads[2][1].name\n ])\n\n self.assertTrue(np.allclose(dy_out, static_out))\n self.assertTrue(np.allclose(dy_grad_h2o, static_grad_h2o))\n self.assertTrue(np.allclose(dy_grad_h2h, static_grad_h2h))\n self.assertTrue(np.allclose(dy_grad_i2h, static_grad_i2h))\n self.assertTrue(np.allclose(dy_out2, static_out))\n self.assertTrue(np.allclose(dy_grad_h2o2, static_grad_h2o))\n self.assertTrue(np.allclose(dy_grad_h2h2, static_grad_h2h))\n self.assertTrue(np.allclose(dy_grad_i2h2, static_grad_i2h))\n\n def test_layer_attrs(self):\n layer = fluid.dygraph.Layer(\"test\")\n layer.test_attr = 1\n self.assertFalse(hasattr(layer, \"whatever\"))\n self.assertTrue(hasattr(layer, \"test_attr\"))\n self.assertEqual(layer.test_attr, 1)\n\n my_layer = MyLayer()\n my_layer.w1 = my_layer.create_parameter([3, 3])\n my_layer.add_parameter('w2', None)\n self.assertEqual(len(my_layer.parameters()), 1)\n self.assertRaises(TypeError, my_layer.__setattr__, 'w1', 'str')\n my_layer.w1 = None\n self.assertEqual(len(my_layer.parameters()), 0)\n my_layer.l1 = fluid.dygraph.Linear(3, 3)\n self.assertEqual(len(my_layer.sublayers()), 1)\n self.assertRaises(TypeError, my_layer.__setattr__, 'l1', 'str')\n my_layer.l1 = None\n self.assertEqual(len(my_layer.sublayers()), 0)\n\n\nclass TestDygraphUtils(unittest.TestCase):\n def test_append_activation_in_dygraph_exception(self):\n with new_program_scope():\n np_inp = np.random.random(size=(10, 20, 30)).astype(np.float32)\n a = fluid.layers.data(\"a\", [10, 20])\n func = dygraph_utils._append_activation_in_dygraph\n self.assertRaises(AssertionError, func, a, act=\"sigmoid\")\n\n def test_append_activation_in_dygraph1(self):\n a_np = np.random.random(size=(10, 20, 30)).astype(np.float32)\n func = dygraph_utils._append_activation_in_dygraph\n with fluid.dygraph.guard():\n a = fluid.dygraph.to_variable(a_np)\n res1 = func(a, act=\"hard_sigmoid\")\n res2 = fluid.layers.hard_sigmoid(a)\n self.assertTrue(np.array_equal(res1.numpy(), res2.numpy()))\n\n def test_append_activation_in_dygraph2(self):\n a_np = np.random.random(size=(10, 20, 30)).astype(np.float32)\n func = dygraph_utils._append_activation_in_dygraph\n with fluid.dygraph.guard():\n a = fluid.dygraph.to_variable(a_np)\n res1 = func(a, act=\"sigmoid\", use_mkldnn=True, use_cudnn=True)\n res2 = fluid.layers.sigmoid(a)\n self.assertTrue(np.allclose(res1.numpy(), res2.numpy()))\n\n def test_append_activation_in_dygraph3(self):\n a_np = np.random.random(size=(10, 20, 30)).astype(np.float32)\n helper = LayerObjectHelper(fluid.unique_name.generate(\"test\"))\n func = helper.append_activation\n with fluid.dygraph.guard():\n a = fluid.dygraph.to_variable(a_np)\n res1 = func(a, act=\"sigmoid\", use_cudnn=True)\n res2 = fluid.layers.sigmoid(a)\n self.assertTrue(np.array_equal(res1.numpy(), res2.numpy()))\n\n def test_append_activation_in_dygraph_use_mkldnn(self):\n a_np = np.random.uniform(-2, 2, (10, 20, 30)).astype(np.float32)\n helper = LayerHelper(\n fluid.unique_name.generate(\"test\"), act=\"relu\", use_mkldnn=True)\n func = helper.append_activation\n with fluid.dygraph.guard():\n a = fluid.dygraph.to_variable(a_np)\n res1 = func(a)\n res2 = fluid.layers.relu(a)\n self.assertTrue(np.array_equal(res1.numpy(), res2.numpy()))\n\n def test_append_activation_in_dygraph_global_use_mkldnn(self):\n a_np = np.random.uniform(-2, 2, (10, 20, 30)).astype(np.float32)\n helper = LayerHelper(fluid.unique_name.generate(\"test\"), act=\"relu\")\n func = helper.append_activation\n with fluid.dygraph.guard(fluid.core.CPUPlace()):\n a = fluid.dygraph.to_variable(a_np)\n fluid.set_flags({'FLAGS_use_mkldnn': True})\n try:\n res1 = func(a)\n finally:\n fluid.set_flags({'FLAGS_use_mkldnn': False})\n res2 = fluid.layers.relu(a)\n self.assertTrue(np.array_equal(res1.numpy(), res2.numpy()))\n\n def test_append_bias_in_dygraph_exception(self):\n with new_program_scope():\n np_inp = np.random.random(size=(10, 20, 30)).astype(np.float32)\n a = fluid.layers.data(\"a\", [10, 20])\n func = dygraph_utils._append_bias_in_dygraph\n self.assertRaises(AssertionError, func, a)\n\n def test_append_bias_in_dygraph(self):\n a_np = np.random.random(size=(10, 20, 30)).astype(np.float32)\n func = dygraph_utils._append_bias_in_dygraph\n with fluid.dygraph.guard():\n a = fluid.dygraph.to_variable(a_np)\n res1 = func(a, bias=a)\n res2 = a + a\n self.assertTrue(np.array_equal(res1.numpy(), res2.numpy()))\n\n\nclass TestDygraphGuardWithError(unittest.TestCase):\n def test_without_guard(self):\n with fluid.dygraph.guard():\n x = fluid.dygraph.to_variable(np.zeros([10, 10]))\n with self.assertRaisesRegexp(TypeError,\n \"Please use `with fluid.dygraph.guard()\"):\n y = fluid.layers.matmul(x, x)\n\n\nif __name__ == '__main__':\n paddle.enable_static()\n unittest.main()\n"
] | [
[
"numpy.random.random"
],
[
"numpy.allclose",
"numpy.random.seed"
],
[
"numpy.random.random",
"numpy.allclose",
"numpy.array_equal",
"numpy.ones",
"numpy.random.rand",
"numpy.random.uniform",
"numpy.array",
"numpy.zeros",
"numpy.sum"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
naveenkambham/big_five_personality_machine_learning | [
"a4d673e7e72287f2448b6a7b2729e5231b4f7ab2",
"a4d673e7e72287f2448b6a7b2729e5231b4f7ab2"
] | [
"UnitTests/test_battery_sensor_features_extractor.py",
"FeatureExtraction/wifi_sensor_features_extractor.py"
] | [
"\"\"\"\nDeveloper : Naveen Kambham\nDescription: Unit testing for battery sensor feature extractor code. Majority of the data extraction code has to be tested visually by looking at the plots distributions.\n\"\"\"\n#Importing the required libraries.\nimport unittest\nimport numpy as np\nfrom FeatureExtraction import battery_sensor_features_extractor\n\n\n\nclass BatterySensorTestCase(unittest.TestCase):\n \"\"\"\n Tests for battery_sensor_features_extractor.py\n \"\"\"\n def test_TakeMostProbableTimeInStudy(self):\n \"\"\"\n to test the most probable time functionality\n :return:\n \"\"\"\n #case 1 multiple values in each day\n result= battery_sensor_features_extractor.TakeMostProbableTimeInStudy([1,1,1,1,2,2,3,3,3,3,3,3,3,3],[1,2,0])\n self.assertEqual(result,3)\n\n # case 2 only one value in a day\n result = battery_sensor_features_extractor.TakeMostProbableTimeInStudy(\n [1, 1, 1, 1, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3], [1])\n self.assertEqual(result, 4)\n\n # case 3 only one value in a day and it is not exists in the study times so far seen\n result = battery_sensor_features_extractor.TakeMostProbableTimeInStudy(\n [1, 1, 1, 1, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3], [0])\n self.assertEqual(result, 0)\n\n def test_extract(self):\n \"\"\"\n testing the feature extractor code\n :return:\n \"\"\"\n #extracting the features\n df_battery=battery_sensor_features_extractor.extract(r\"/home/naveen/Data/Shed10/Filtered/battery_events.csv\")\n\n # charging should atleast be greater than 0\n self.assertTrue(np.min(df_battery['Battery_Charging_Duration'] >=0))\n self.assertTrue(np.min(df_battery['CharginTimeDaily'] >=0) and np.max(df_battery['CharginTimeDaily'] <=24))\n\n\nif __name__ == '__main__':\n unittest.main()",
"\"\"\"\nDeveloper : Naveen Kambham\nDescription: Code to find campus arrival time, campus departure time, time spent in campus, number of unique WiFi routers visited\naround the city for each day using the known MAC addresses of University and geographic coordinates for each campus WiFirouter.\n\"\"\"\n#Importing the required libraries.\nimport numpy as np\nimport pandas as pd\nimport FeatureExtraction.CommonFunctions.dataprocessing_helper as dataprocessor\n\n\ndef CountDistinctStrings(list):\n return np.count_nonzero(np.unique(list))\n\ndef get_campus_entry_leave_times(file):\n \"\"\"\n method to find the campus entry time, leave time and time on campus\n First time in a day a phone sees a campus router is entry time of the participant/phone and\n last time is leave time. Difference of these two gives time spent on campus.\n :param file path:\n :return dataframe:\n \"\"\"\n #Read the data in to data frame\n df = pd.read_csv(file)\n\n\n #consider only university wifi router address records and split the record_time in to date and time\n df_with_UofS_Wifi = df.loc[df.ssid.isin(['uofs-secure','uofs-public','uofs-guest'])]\n df_with_UofS_Wifi['Date'],df_with_UofS_Wifi['Time'] = zip(*df_with_UofS_Wifi['record_time'].map(lambda x:x.split(' ')))\n\n #Group by Id, Date\n grouped= df_with_UofS_Wifi.groupby(['user_id','Date'])\n\n\n #From the aggreagation get the min, max times i.e campues entry, leave times\n lst_campus_entry_leaving_timings = [(key[0],key[1], min(value['Time']), max(value['Time'])) for (key, value) in grouped.__iter__()]\n\n # create data frame out of three features.\n df = pd.DataFrame(lst_campus_entry_leaving_timings, columns=['ID','Date', 'EntryTime','LeavingTime'])\n df['Time_In_School']= df['EntryTime'] - df['LeavingTime']\n\n return df\n\ndef get_diff_wifi_seen(file):\n \"\"\"\n method to find the different routers a phone seen throguh wifi sensors. Here routers with out hand-shake-connection\n are also recorded by the wifi sensor\n :param file:\n :return df:\n \"\"\"\n\n #read the data in to df and split the record_time in to Date and Time\n df = pd.read_csv(file)\n df['ssid']= df['ssid'].astype(str)\n df['record_time']= df['record_time'].astype(str)\n df['Date'],df['Time'] = zip(*df['record_time'].map(lambda x:x.split(' ')))\n\n #Group by Id, Date\n grouped= df.groupby(['user_id','Date'])\n\n\n #count distinct wifi strings\n lst_wifi_routers_visited_daily = [(key[0],key[1],CountDistinctStrings(value['ssid'])) for (key, value) in grouped.__iter__()]\n df_wifis_seen = pd.DataFrame(lst_wifi_routers_visited_daily, columns=['ID','Date','WifiCountPerDay'])\n\n return df_wifis_seen\n\n\n\ndef extract(path):\n \"\"\"\n method to extrac campus entry time, leave time, time spent in campus and different buildings, wifi routers seen\n \"\"\"\n\n #extracting campus entry leave times, time in school and different wifi router seen in city\n df_campus_entry_leave_times=get_campus_entry_leave_times(path)\n df_diff_wifi_seen= get_diff_wifi_seen(path)\n\n\n #merging the data frames\n df_wifi_features= dataprocessor.merge([df_campus_entry_leave_times, df_diff_wifi_seen], ['ID', 'Date'])\n return df_wifi_features\n\n\n\n\n#stand alone code to test the data\n# df_wifi =main(r\"/home/naveen/Data/Shed10/wifi.csv\")\n# print(len(df_wifi))\n"
] | [
[
"numpy.max",
"numpy.min"
],
[
"pandas.read_csv",
"pandas.DataFrame",
"numpy.unique"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.3",
"1.1",
"1.5",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
KOLANICH/qiskit-terra | [
"3947f258ddb31a2b8dd17aff5d2d041d29d74601"
] | [
"qiskit/quantum_info/operators/measures.py"
] | [
"# This code is part of Qiskit.\n#\n# (C) Copyright IBM 2017.\n#\n# This code is licensed under the Apache License, Version 2.0. You may\n# obtain a copy of this license in the LICENSE.txt file in the root directory\n# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.\n#\n# Any modifications or derivative works of this code must retain this\n# copyright notice, and modified files need to carry a notice indicating\n# that they have been altered from the originals.\n\n# pylint: disable=invalid-name\n\"\"\"\nA collection of useful quantum information functions for operators.\n\"\"\"\n\nimport warnings\nimport numpy as np\nfrom scipy import sparse\n\nfrom qiskit.exceptions import QiskitError\nfrom qiskit.circuit.gate import Gate\nfrom qiskit.quantum_info.operators.base_operator import BaseOperator\nfrom qiskit.quantum_info.operators.operator import Operator\nfrom qiskit.quantum_info.operators.channel.quantum_channel import QuantumChannel\nfrom qiskit.quantum_info.operators.channel import Choi, SuperOp\nfrom qiskit.quantum_info.states.densitymatrix import DensityMatrix\nfrom qiskit.quantum_info.states.measures import state_fidelity\n\ntry:\n import cvxpy\n _HAS_CVX = True\nexcept ImportError:\n _HAS_CVX = False\n\n\ndef process_fidelity(channel,\n target=None,\n require_cp=True,\n require_tp=False):\n r\"\"\"Return the process fidelity of a noisy quantum channel.\n\n\n The process fidelity :math:`F_{\\text{pro}}(\\mathcal{E}, \\methcal{F})`\n between two quantum channels :math:`\\mathcal{E}, \\mathcal{F}` is given by\n\n .. math:\n F_{\\text{pro}}(\\mathcal{E}, \\mathcal{F})\n = F(\\rho_{\\mathcal{E}}, \\rho_{\\mathcal{F}})\n\n where :math:`F` is the :func:`~qiskit.quantum_info.state_fidelity`,\n :math:`\\rho_{\\mathcal{E}} = \\Lambda_{\\mathcal{E}} / d` is the\n normalized :class:`~qiskit.quantum_info.Choi` matrix for the channel\n :math:`\\mathcal{E}`, and :math:`d` is the input dimension of\n :math:`\\mathcal{E}`.\n\n When the target channel is unitary this is equivalent to\n\n .. math::\n F_{\\text{pro}}(\\mathcal{E}, U)\n = \\frac{Tr[S_U^\\dagger S_{\\mathcal{E}}]}{d^2}\n\n where :math:`S_{\\mathcal{E}}, S_{U}` are the\n :class:`~qiskit.quantum_info.SuperOp` matrices for the *input* quantum\n channel :math:`\\mathcal{E}` and *target* unitary :math:`U` respectively,\n and :math:`d` is the input dimension of the channel.\n\n Args:\n channel (Operator or QuantumChannel): input quantum channel.\n target (Operator or QuantumChannel or None): target quantum channel.\n If `None` target is the identity operator [Default: None].\n require_cp (bool): require channel to be completely-positive\n [Default: True].\n require_tp (bool): require channel to be trace-preserving\n [Default: False].\n\n Returns:\n float: The process fidelity :math:`F_{\\text{pro}}`.\n\n Raises:\n QiskitError: if the channel and target do not have the same dimensions.\n QiskitError: if the channel and target are not completely-positive\n (with ``require_cp=True``) or not trace-preserving\n (with ``require_tp=True``).\n \"\"\"\n # Format inputs\n channel = _input_formatter(\n channel, SuperOp, 'process_fidelity', 'channel')\n target = _input_formatter(\n target, Operator, 'process_fidelity', 'target')\n\n if target:\n # Validate dimensions\n if channel.dim != target.dim:\n raise QiskitError(\n 'Input quantum channel and target unitary must have the same '\n 'dimensions ({} != {}).'.format(channel.dim, target.dim))\n\n # Validate complete-positivity and trace-preserving\n for label, chan in [('Input', channel), ('Target', target)]:\n if isinstance(chan, Operator) and (require_cp or require_tp):\n is_unitary = chan.is_unitary()\n # Validate as unitary\n if require_cp and not is_unitary:\n raise QiskitError('{} channel is not completely-positive'.format(label))\n if require_tp and not is_unitary:\n raise QiskitError('{} channel is not trace-preserving'.format(label))\n elif chan is not None:\n # Validate as QuantumChannel\n if require_cp and not chan.is_cp():\n raise QiskitError('{} channel is not completely-positive'.format(label))\n if require_tp and not chan.is_tp():\n raise QiskitError('{} channel is not trace-preserving'.format(label))\n\n if isinstance(target, Operator):\n # Compute fidelity with unitary target by applying the inverse\n # to channel and computing fidelity with the identity\n channel = channel @ target.adjoint()\n target = None\n\n input_dim, _ = channel.dim\n if target is None:\n # Compute process fidelity with identity channel\n if isinstance(channel, Operator):\n # |Tr[U]/dim| ** 2\n fid = np.abs(np.trace(channel.data) / input_dim)**2\n else:\n # Tr[S] / (dim ** 2)\n fid = np.trace(SuperOp(channel).data) / (input_dim**2)\n return float(np.real(fid))\n\n # For comparing two non-unitary channels we compute the state fidelity of\n # the normalized Choi-matrices. This is equivalent to the previous definition\n # when the target is a unitary channel.\n state1 = DensityMatrix(Choi(channel).data / input_dim)\n state2 = DensityMatrix(Choi(target).data / input_dim)\n return state_fidelity(state1, state2, validate=False)\n\n\ndef average_gate_fidelity(channel,\n target=None,\n require_cp=True,\n require_tp=False):\n r\"\"\"Return the average gate fidelity of a noisy quantum channel.\n\n The average gate fidelity :math:`F_{\\text{ave}}` is given by\n\n .. math::\n F_{\\text{ave}}(\\mathcal{E}, U)\n &= \\int d\\psi \\langle\\psi|U^\\dagger\n \\mathcal{E}(|\\psi\\rangle\\!\\langle\\psi|)U|\\psi\\rangle \\\\\n &= \\frac{d F_{\\text{pro}}(\\mathcal{E}, U) + 1}{d + 1}\n\n where :math:`F_{\\text{pro}}(\\mathcal{E}, U)` is the\n :meth:`~qiskit.quantum_info.process_fidelity` of the input quantum\n *channel* :math:`\\mathcal{E}` with a *target* unitary :math:`U`, and\n :math:`d` is the dimension of the *channel*.\n\n Args:\n channel (QuantumChannel or Operator): noisy quantum channel.\n target (Operator or None): target unitary operator.\n If `None` target is the identity operator [Default: None].\n require_cp (bool): require channel to be completely-positive\n [Default: True].\n require_tp (bool): require channel to be trace-preserving\n [Default: False].\n\n Returns:\n float: The average gate fidelity :math:`F_{\\text{ave}}`.\n\n Raises:\n QiskitError: if the channel and target do not have the same dimensions,\n or have different input and output dimensions.\n QiskitError: if the channel and target or are not completely-positive\n (with ``require_cp=True``) or not trace-preserving\n (with ``require_tp=True``).\n \"\"\"\n # Format inputs\n channel = _input_formatter(\n channel, SuperOp, 'average_gate_fidelity', 'channel')\n target = _input_formatter(\n target, Operator, 'average_gate_fidelity', 'target')\n\n if target is not None:\n try:\n target = Operator(target)\n except QiskitError:\n raise QiskitError(\n 'Target channel is not a unitary channel. To compare '\n 'two non-unitary channels use the '\n '`qiskit.quantum_info.process_fidelity` function instead.')\n dim, _ = channel.dim\n f_pro = process_fidelity(channel,\n target=target,\n require_cp=require_cp,\n require_tp=require_tp)\n return (dim * f_pro + 1) / (dim + 1)\n\n\ndef gate_error(channel, target=None, require_cp=True, require_tp=False):\n r\"\"\"Return the gate error of a noisy quantum channel.\n\n The gate error :math:`E` is given by the average gate infidelity\n\n .. math::\n E(\\mathcal{E}, U) = 1 - F_{\\text{ave}}(\\mathcal{E}, U)\n\n where :math:`F_{\\text{ave}}(\\mathcal{E}, U)` is the\n :meth:`~qiskit.quantum_info.average_gate_fidelity` of the input\n quantum *channel* :math:`\\mathcal{E}` with a *target* unitary\n :math:`U`.\n\n Args:\n channel (QuantumChannel): noisy quantum channel.\n target (Operator or None): target unitary operator.\n If `None` target is the identity operator [Default: None].\n require_cp (bool): require channel to be completely-positive\n [Default: True].\n require_tp (bool): require channel to be trace-preserving\n [Default: False].\n\n Returns:\n float: The average gate error :math:`E`.\n\n Raises:\n QiskitError: if the channel and target do not have the same dimensions,\n or have different input and output dimensions.\n QiskitError: if the channel and target or are not completely-positive\n (with ``require_cp=True``) or not trace-preserving\n (with ``require_tp=True``).\n \"\"\"\n # Format inputs\n channel = _input_formatter(\n channel, SuperOp, 'gate_error', 'channel')\n target = _input_formatter(\n target, Operator, 'gate_error', 'target')\n return 1 - average_gate_fidelity(\n channel, target=target, require_cp=require_cp, require_tp=require_tp)\n\n\ndef diamond_norm(choi, **kwargs):\n r\"\"\"Return the diamond norm of the input quantum channel object.\n\n This function computes the completely-bounded trace-norm (often\n referred to as the diamond-norm) of the input quantum channel object\n using the semidefinite-program from reference [1].\n\n Args:\n choi(Choi or QuantumChannel): a quantum channel object or\n Choi-matrix array.\n kwargs: optional arguments to pass to CVXPY solver.\n\n Returns:\n float: The completely-bounded trace norm\n :math:`\\|\\mathcal{E}\\|_{\\diamond}`.\n\n Raises:\n QiskitError: if CVXPY package cannot be found.\n\n Additional Information:\n The input to this function is typically *not* a CPTP quantum\n channel, but rather the *difference* between two quantum channels\n :math:`\\|\\Delta\\mathcal{E}\\|_\\diamond` where\n :math:`\\Delta\\mathcal{E} = \\mathcal{E}_1 - \\mathcal{E}_2`.\n\n Reference:\n J. Watrous. \"Simpler semidefinite programs for completely bounded\n norms\", arXiv:1207.5726 [quant-ph] (2012).\n\n .. note::\n\n This function requires the optional CVXPY package to be installed.\n Any additional kwargs will be passed to the ``cvxpy.solve``\n function. See the CVXPY documentation for information on available\n SDP solvers.\n \"\"\"\n _cvxpy_check('`diamond_norm`') # Check CVXPY is installed\n\n choi = Choi(_input_formatter(choi, Choi, 'diamond_norm', 'choi'))\n\n def cvx_bmat(mat_r, mat_i):\n \"\"\"Block matrix for embedding complex matrix in reals\"\"\"\n return cvxpy.bmat([[mat_r, -mat_i], [mat_i, mat_r]])\n\n # Dimension of input and output spaces\n dim_in = choi._input_dim\n dim_out = choi._output_dim\n size = dim_in * dim_out\n\n # SDP Variables to convert to real valued problem\n r0_r = cvxpy.Variable((dim_in, dim_in))\n r0_i = cvxpy.Variable((dim_in, dim_in))\n r0 = cvx_bmat(r0_r, r0_i)\n\n r1_r = cvxpy.Variable((dim_in, dim_in))\n r1_i = cvxpy.Variable((dim_in, dim_in))\n r1 = cvx_bmat(r1_r, r1_i)\n\n x_r = cvxpy.Variable((size, size))\n x_i = cvxpy.Variable((size, size))\n iden = sparse.eye(dim_out)\n\n # Watrous uses row-vec convention for his Choi matrix while we use\n # col-vec. It turns out row-vec convention is requried for CVXPY too\n # since the cvxpy.kron function must have a constant as its first argument.\n c_r = cvxpy.bmat([[cvxpy.kron(iden, r0_r), x_r], [x_r.T, cvxpy.kron(iden, r1_r)]])\n c_i = cvxpy.bmat([[cvxpy.kron(iden, r0_i), x_i], [-x_i.T, cvxpy.kron(iden, r1_i)]])\n c = cvx_bmat(c_r, c_i)\n\n # Convert col-vec convention Choi-matrix to row-vec convention and\n # then take Transpose: Choi_C -> Choi_R.T\n choi_rt = np.transpose(\n np.reshape(choi.data, (dim_in, dim_out, dim_in, dim_out)),\n (3, 2, 1, 0)).reshape(choi.data.shape)\n choi_rt_r = choi_rt.real\n choi_rt_i = choi_rt.imag\n\n # Constraints\n cons = [\n r0 >> 0, r0_r == r0_r.T, r0_i == - r0_i.T, cvxpy.trace(r0_r) == 1,\n r1 >> 0, r1_r == r1_r.T, r1_i == - r1_i.T, cvxpy.trace(r1_r) == 1,\n c >> 0\n ]\n\n # Objective function\n obj = cvxpy.Maximize(cvxpy.trace(choi_rt_r @ x_r) + cvxpy.trace(choi_rt_i @ x_i))\n prob = cvxpy.Problem(obj, cons)\n sol = prob.solve(**kwargs)\n return sol\n\n\ndef _cvxpy_check(name):\n \"\"\"Check that a supported CVXPY version is installed\"\"\"\n # Check if CVXPY package is installed\n if not _HAS_CVX:\n raise QiskitError(\n 'CVXPY backage is requried for {}. Install'\n ' with `pip install cvxpy` to use.'.format(name))\n # Check CVXPY version\n version = cvxpy.__version__\n if version[0] != '1':\n raise ImportError(\n 'Incompatible CVXPY version {} found.'\n ' Install version >=1.0.'.format(version))\n\n\n# pylint: disable=too-many-return-statements\ndef _input_formatter(obj, fallback_class, func_name, arg_name):\n \"\"\"Formatting function for input conversion\"\"\"\n # Empty input\n if obj is None:\n return obj\n\n # Channel-like input\n if isinstance(obj, QuantumChannel):\n return obj\n if hasattr(obj, 'to_quantumchannel'):\n return obj.to_quantumchannel()\n if hasattr(obj, 'to_channel'):\n return obj.to_channel()\n\n # Unitary-like input\n if isinstance(obj, (Gate, BaseOperator)):\n return Operator(obj)\n if hasattr(obj, 'to_operator'):\n return obj.to_operator()\n\n warnings.warn(\n 'Passing in a list or Numpy array to `{}` `{}` argument is '\n 'deprecated as of 0.17.0 since the matrix representation cannot be inferred '\n 'unambiguously. Use a Gate or BaseOperator subclass (eg. Operator, '\n 'SuperOp, Choi) object instead.'.format(func_name, arg_name),\n DeprecationWarning)\n warnings.warn(\n 'Treating array input as a {} object'.format(fallback_class.__name__))\n return fallback_class(obj)\n"
] | [
[
"scipy.sparse.eye",
"numpy.real",
"numpy.trace",
"numpy.reshape"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.7",
"1.0",
"0.10",
"1.2",
"0.14",
"0.19",
"1.5",
"0.12",
"0.17",
"0.13",
"1.6",
"1.4",
"1.9",
"1.3",
"1.10",
"0.15",
"0.18",
"0.16",
"1.8"
],
"tensorflow": []
}
] |
noenfugler/jesse | [
"217a3168620a755c1a9576d9deb27105db7dccf8",
"217a3168620a755c1a9576d9deb27105db7dccf8",
"217a3168620a755c1a9576d9deb27105db7dccf8",
"217a3168620a755c1a9576d9deb27105db7dccf8"
] | [
"jesse/indicators/supersmoother.py",
"jesse/indicators/sinwma.py",
"jesse/indicators/damiani_volatmeter.py",
"jesse/indicators/alligator.py"
] | [
"from typing import Union\n\nimport numpy as np\nfrom numba import njit\n\nfrom jesse.helpers import get_candle_source, slice_candles\n\n\ndef supersmoother(candles: np.ndarray, period: int = 14, source_type: str = \"close\", sequential: bool = False) -> Union[\n float, np.ndarray]:\n \"\"\"\n Super Smoother Filter 2pole Butterworth\n This indicator was described by John F. Ehlers\n\n :param candles: np.ndarray\n :param period: int - default=14\n :param source_type: str - default: \"close\"\n :param sequential: bool - default=False\n\n :return: float | np.ndarray\n \"\"\"\n\n candles = slice_candles(candles, sequential)\n\n # Accept normal array too.\n if len(candles.shape) == 1:\n source = candles\n else:\n source = get_candle_source(candles, source_type=source_type)\n\n res = supersmoother_fast(source, period)\n\n return res if sequential else res[-1]\n\n\n@njit\ndef supersmoother_fast(source, period):\n a = np.exp(-1.414 * np.pi / period)\n b = 2 * a * np.cos(1.414 * np.pi / period)\n newseries = np.copy(source)\n for i in range(2, source.shape[0]):\n newseries[i] = (1 + a ** 2 - b) / 2 * (source[i] + source[i - 1]) \\\n + b * newseries[i - 1] - a ** 2 * newseries[i - 2]\n return newseries\n",
"from typing import Union\n\nimport numpy as np\nfrom numpy.lib.stride_tricks import sliding_window_view\n\nfrom jesse.helpers import get_candle_source, slice_candles, same_length\n\n\ndef sinwma(candles: np.ndarray, period: int = 14, source_type: str = \"close\", sequential: bool = False) -> Union[\n float, np.ndarray]:\n \"\"\"\n Sine Weighted Moving Average (SINWMA)\n\n :param candles: np.ndarray\n :param period: int - default: 14\n :param source_type: str - default: \"close\"\n :param sequential: bool - default=False\n\n :return: float | np.ndarray\n \"\"\"\n candles = slice_candles(candles, sequential)\n\n source = get_candle_source(candles, source_type=source_type)\n sines = np.array([np.sin((i + 1) * np.pi / (period + 1)) for i in range(0, period)])\n w = sines / sines.sum()\n swv = sliding_window_view(source, window_shape=period)\n res = np.average(swv, weights=w, axis=-1)\n\n return same_length(candles, res) if sequential else res[-1]\n",
"from collections import namedtuple\n\nimport numpy as np\nimport talib\nfrom numba import njit\n\nfrom jesse.helpers import get_candle_source\nfrom jesse.helpers import slice_candles\n\nDamianiVolatmeter = namedtuple('DamianiVolatmeter', ['vol', 'anti'])\n\n\ndef damiani_volatmeter(candles: np.ndarray, vis_atr: int = 13, vis_std: int = 20, sed_atr: int = 40, sed_std: int = 100,\n threshold: float = 1.4, source_type: str = \"close\",\n sequential: bool = False) -> DamianiVolatmeter:\n \"\"\"\n Damiani Volatmeter\n\n :param candles: np.ndarray\n :param vis_atr: int - default=13\n :param vis_std: int - default=20\n :param sed_atr: int - default=40\n :param sed_std: int - default=100\n :param threshold: float - default=1.4\n :param source_type: str - default: \"close\"\n :param sequential: bool - default=False\n\n :return: float | np.ndarray\n \"\"\"\n\n candles = slice_candles(candles, sequential)\n\n source = get_candle_source(candles, source_type=source_type)\n\n atrvis = talib.ATR(candles[:, 3], candles[:, 4], candles[:, 2], timeperiod=vis_atr)\n atrsed = talib.ATR(candles[:, 3], candles[:, 4], candles[:, 2], timeperiod=sed_atr)\n\n vol, t = damiani_volatmeter_fast(source, sed_std, atrvis, atrsed, vis_std, threshold)\n\n if sequential:\n return DamianiVolatmeter(vol, t)\n else:\n return DamianiVolatmeter(vol[-1], t[-1])\n\n\n@njit\ndef damiani_volatmeter_fast(source, sed_std, atrvis, atrsed, vis_std,\n threshold): # Function is compiled to machine code when called the first time\n lag_s = 0.5\n\n vol = np.full_like(source, 0)\n t = np.full_like(source, 0)\n for i in range(source.shape[0]):\n if not (i < sed_std):\n vol[i] = atrvis[i] / atrsed[i] + lag_s * (vol[i - 1] - vol[i - 3])\n anti_thres = np.std(source[i - vis_std:i]) / np.std(source[i - sed_std:i])\n t[i] = threshold - anti_thres\n return vol, t\n",
"from collections import namedtuple\n\nimport numpy as np\n\nfrom jesse.helpers import get_candle_source, np_shift, slice_candles\n\nAG = namedtuple('AG', ['jaw', 'teeth', 'lips'])\n\n\ndef alligator(candles: np.ndarray, source_type: str = \"close\", sequential: bool = False) -> AG:\n \"\"\"\n Alligator\n\n :param candles: np.ndarray\n :param source_type: str - default: \"close\"\n :param sequential: bool - default=False\n\n :return: AG(jaw, teeth, lips)\n \"\"\"\n candles = slice_candles(candles, sequential)\n\n source = get_candle_source(candles, source_type=source_type)\n\n jaw = np_shift(numpy_ewma(source, 13), 8, fill_value=np.nan)\n teeth = np_shift(numpy_ewma(source, 8), 5, fill_value=np.nan)\n lips = np_shift(numpy_ewma(source, 5), 3, fill_value=np.nan)\n\n if sequential:\n return AG(jaw, teeth, lips)\n else:\n return AG(jaw[-1], teeth[-1], lips[-1])\n\n\ndef numpy_ewma(data: np.ndarray, window: int):\n \"\"\"\n\n :param data:\n :param window:\n :return:\n \"\"\"\n alpha = 1 / window\n scale = 1 / (1 - alpha)\n n = data.shape[0]\n scale_arr = (1 - alpha) ** (-1 * np.arange(n))\n weights = (1 - alpha) ** np.arange(n)\n pw0 = (1 - alpha) ** (n - 1)\n mult = data * pw0 * scale_arr\n cumsums = mult.cumsum()\n out = cumsums * scale_arr[::-1] / weights.cumsum()\n\n return out\n"
] | [
[
"numpy.copy",
"numpy.exp",
"numpy.cos"
],
[
"numpy.lib.stride_tricks.sliding_window_view",
"numpy.average",
"numpy.sin"
],
[
"numpy.full_like",
"numpy.std"
],
[
"numpy.arange"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
WeilerP/cellrank | [
"c8c2b9f6bd2448861fb414435aee7620ca5a0bad",
"c8c2b9f6bd2448861fb414435aee7620ca5a0bad",
"c8c2b9f6bd2448861fb414435aee7620ca5a0bad",
"c8c2b9f6bd2448861fb414435aee7620ca5a0bad"
] | [
"cellrank/pl/_circular_projection.py",
"examples/other/plot_model.py",
"cellrank/tl/kernels/_cytotrace_kernel.py",
"cellrank/tl/estimators/_base_estimator.py"
] | [
"from typing import Any, Tuple, Union, Mapping, Callable, Optional, Sequence\nfrom typing_extensions import Literal\n\nfrom enum import auto\nfrom types import MappingProxyType\nfrom pathlib import Path\n\nimport scvelo as scv\nfrom anndata import AnnData\nfrom cellrank import logging as logg\nfrom cellrank.tl import Lineage\nfrom cellrank._key import Key\nfrom scanpy._utils import deprecated_arg_names\nfrom cellrank.tl._enum import ModeEnum\nfrom cellrank.ul._docs import d\nfrom cellrank.pl._utils import _held_karp\nfrom cellrank.tl._utils import save_fig, _unique_order_preserving\nfrom cellrank.ul._utils import _check_collection\nfrom cellrank.tl._lineage import PrimingDegree\n\nimport numpy as np\nimport pandas as pd\nfrom sklearn.metrics import pairwise_distances\n\nimport matplotlib.pyplot as plt\nfrom matplotlib.colors import LogNorm, LinearSegmentedColormap\nfrom matplotlib.collections import LineCollection\n\n\nclass LineageOrder(ModeEnum): # noqa: D101\n DEFAULT = auto()\n OPTIMAL = auto()\n\n\nclass LabelRot(ModeEnum): # noqa: D101\n DEFAULT = auto()\n BEST = auto()\n\n\nMetric_T = Union[str, Callable, np.ndarray, pd.DataFrame]\n_N = 200\n\n\ndef _get_distances(data: Union[np.ndarray, Lineage], metric: Metric_T) -> np.ndarray:\n if isinstance(data, Lineage):\n data = data.X\n\n if isinstance(metric, str) or callable(metric):\n metric = pairwise_distances(data.T, metric=metric)\n elif isinstance(metric, (pd.DataFrame, np.ndarray)):\n shape = (data.shape[1], data.shape[1])\n if metric.shape != shape:\n raise ValueError(\n f\"Expected an `numpy.array` or `pandas.DataFrame` of shape `{shape}`, found `{metric.shape}`.\"\n )\n else:\n raise TypeError(\n f\"Expected either metric defined by `str`, `callable` or a pairwise distance matrix of type\"\n f\" `numpy.ndarray` or `pandas.DataFrame`, found `{type(metric).__name__}`.\"\n )\n\n return np.asarray(metric, dtype=np.float64)\n\n\ndef _get_optimal_order(data: Lineage, metric: Metric_T) -> Tuple[float, np.ndarray]:\n \"\"\"Solve the TSP using dynamic programming.\"\"\"\n return _held_karp(_get_distances(data, metric))\n\n\[email protected]\n@deprecated_arg_names({\"labeldistance\": \"label_distance\", \"labelrot\": \"label_rot\"})\ndef circular_projection(\n adata: AnnData,\n keys: Union[str, Sequence[str]],\n backward: bool = False,\n lineages: Optional[Union[str, Sequence[str]]] = None,\n early_cells: Optional[Union[Mapping[str, Sequence[str]], Sequence[str]]] = None,\n lineage_order: Optional[Literal[\"default\", \"optimal\"]] = None,\n metric: Union[str, Callable, np.ndarray, pd.DataFrame] = \"correlation\",\n normalize_by_mean: bool = True,\n ncols: int = 4,\n space: float = 0.25,\n use_raw: bool = False,\n text_kwargs: Mapping[str, Any] = MappingProxyType({}),\n label_distance: float = 1.25,\n label_rot: Union[Literal[\"default\", \"best\"], float] = \"best\",\n show_edges: bool = True,\n key_added: Optional[str] = None,\n figsize: Optional[Tuple[float, float]] = None,\n dpi: Optional[int] = None,\n save: Optional[Union[str, Path]] = None,\n **kwargs: Any,\n):\n r\"\"\"\n Plot absorption probabilities on a circular embedding as in :cite:`velten:17`.\n\n Parameters\n ----------\n %(adata)s\n keys\n Keys in :attr:`anndata.AnnData.obs` or :attr:`anndata.AnnData.var_names`. Additional keys are:\n\n - `'kl_divergence'` - as in :cite:`velten:17`, computes KL-divergence between the fate probabilities\n of a cell and the average fate probabilities. See ``early_cells`` for more information.\n - `'entropy'` - as in :cite:`setty:19`, computes entropy over a cells fate probabilities.\n\n %(backward)s\n lineages\n Lineages to plot. If `None`, plot all lineages.\n early_cells\n Cell ids or a mask marking early cells used to define the average fate probabilities. If `None`, use all cells.\n Only used when `'kl_divergence'` is in ``keys``. If a :class:`dict`, key specifies a cluster key in\n :attr:`anndata.AnnData.obs` and the values specify cluster labels containing early cells.\n lineage_order\n Can be one of the following:\n\n - `None` - it will determined automatically, based on the number of lineages.\n - `'optimal'` - order lineages optimally by solving the Travelling salesman problem (TSP).\n Recommended for <= `20` lineages.\n - `'default'` - use the order as specified by ``lineages``.\n\n metric\n Metric to use when constructing pairwise distance matrix when ``lineage_order = 'optimal'``. For available\n options, see :func:`sklearn.metrics.pairwise_distances`.\n normalize_by_mean\n If `True`, normalize each lineage by its mean probability, as done in :cite:`velten:17`.\n ncols\n Number of columns when plotting multiple ``keys``.\n space\n Horizontal and vertical space between for :func:`matplotlib.pyplot.subplots_adjust`.\n use_raw\n Whether to access :attr:`anndata.AnnData.raw` when there are ``keys`` in :attr:`anndata.AnnData.var_names`.\n text_kwargs\n Keyword arguments for :func:`matplotlib.pyplot.text`.\n label_distance\n Distance at which the lineage labels will be drawn.\n label_rot\n How to rotate the labels. Valid options are:\n\n - `'best'` - rotate labels so that they are easily readable.\n - `'default'` - use :mod:`matplotlib`'s default.\n - `None` - same as `'default'`.\n\n If a :class:`float`, all labels will be rotated by this many degrees.\n show_edges\n Whether to show the edges surrounding the simplex.\n key_added\n Key in :attr:`anndata.AnnData.obsm` where to add the circular embedding. If `None`, it will be set to\n `'X_fate_simplex_{fwd,bwd}'`, based on ``backward``.\n %(plotting)s\n kwargs\n Keyword arguments for :func:`scvelo.pl.scatter`.\n\n Returns\n -------\n %(just_plots)s\n Also updates ``adata`` with the following fields:\n\n - :attr:`anndata.AnnData.obsm` ``['{key_added}']`` - the circular projection.\n - :attr:`anndata.AnnData.obs` ``['to_{initial,terminal}_states_{method}']`` - the priming degree,\n if a method is present in ``keys``.\n \"\"\"\n if label_distance is not None and label_distance < 0:\n raise ValueError(\n f\"Expected `label_distance` to be positive, found `{label_distance}`.\"\n )\n\n if label_rot is None:\n label_rot = LabelRot.DEFAULT\n label_rot = LabelRot(label_rot)\n\n suffix = \"bwd\" if backward else \"fwd\"\n if key_added is None:\n key_added = \"X_fate_simplex_\" + suffix\n\n if isinstance(keys, str):\n keys = (keys,)\n\n keys = _unique_order_preserving(keys)\n keys_ = _check_collection(\n adata, keys, \"obs\", key_name=\"Observation\", raise_exc=False\n ) + _check_collection(\n adata, keys, \"var_names\", key_name=\"Gene\", raise_exc=False, use_raw=use_raw\n )\n haystack = set(PrimingDegree)\n keys = keys_ + [k for k in keys if k in haystack]\n keys = _unique_order_preserving(keys)\n\n if not len(keys):\n raise ValueError(\"No valid keys have been selected.\")\n\n lineage_key = Key.obsm.abs_probs(backward)\n if lineage_key not in adata.obsm:\n raise KeyError(f\"Lineages key `{lineage_key!r}` not found in `adata.obsm`.\")\n\n probs: Lineage = adata.obsm[lineage_key]\n\n if isinstance(lineages, str):\n lineages = (lineages,)\n elif lineages is None:\n lineages = probs.names\n\n probs = adata.obsm[lineage_key][lineages]\n n_lin = probs.shape[1]\n if n_lin < 3:\n raise ValueError(f\"Expected at least `3` lineages, found `{n_lin}`.\")\n\n X = probs.X.copy()\n if normalize_by_mean:\n X /= np.mean(X, axis=0)[None, :]\n X /= X.sum(1)[:, None]\n # this happens when cells for sel. lineages sum to 1 (or when the lineage average is 0, which is unlikely)\n X = np.nan_to_num(X, nan=1.0 / n_lin, copy=False)\n\n if lineage_order is None:\n lineage_order = (\n LineageOrder.OPTIMAL if 3 < n_lin <= 20 else LineageOrder.DEFAULT\n )\n logg.debug(f\"Set ordering to `{lineage_order}`\")\n lineage_order = LineageOrder(lineage_order)\n\n if lineage_order == LineageOrder.OPTIMAL:\n logg.info(f\"Solving TSP for `{n_lin}` states\")\n _, order = _get_optimal_order(X, metric=metric)\n else:\n order = np.arange(n_lin)\n\n probs = probs[:, order]\n X = X[:, order]\n\n angle_vec = np.linspace(0, 2 * np.pi, n_lin, endpoint=False)\n angle_vec_sin = np.cos(angle_vec)\n angle_vec_cos = np.sin(angle_vec)\n\n x = np.sum(X * angle_vec_sin, axis=1)\n y = np.sum(X * angle_vec_cos, axis=1)\n adata.obsm[key_added] = np.c_[x, y]\n\n nrows = int(np.ceil(len(keys) / ncols))\n fig, ax = plt.subplots(\n nrows=nrows,\n ncols=ncols,\n figsize=(ncols * 5, nrows * 5) if figsize is None else figsize,\n dpi=dpi,\n )\n\n fig.subplots_adjust(wspace=space, hspace=space)\n axes = np.ravel([ax])\n\n text_kwargs = dict(text_kwargs)\n text_kwargs[\"ha\"] = \"center\"\n text_kwargs[\"va\"] = \"center\"\n\n _i = 0\n for _i, (k, ax) in enumerate(zip(keys, axes)):\n\n set_lognorm, colorbar = False, kwargs.pop(\"colorbar\", True)\n try:\n _ = PrimingDegree(k)\n logg.debug(f\"Calculating priming degree using `method={k}`\")\n val = probs.priming_degree(method=k, early_cells=early_cells)\n k = f\"{lineage_key}_{k}\"\n adata.obs[k] = val\n except ValueError:\n pass\n\n scv.pl.scatter(\n adata,\n basis=key_added,\n color=k,\n show=False,\n ax=ax,\n use_raw=use_raw,\n norm=LogNorm() if set_lognorm else None,\n colorbar=colorbar,\n **kwargs,\n )\n if colorbar and set_lognorm:\n cbar = ax.collections[0].colorbar\n cax = cbar.locator.axis\n ticks = cax.minor.locator.tick_values(cbar.vmin, cbar.vmax)\n ticks = [ticks[0], ticks[len(ticks) // 2 + 1], ticks[-1]]\n cbar.set_ticks(ticks)\n cbar.set_ticklabels([f\"{t:.2f}\" for t in ticks])\n cbar.update_ticks()\n\n patches, texts = ax.pie(\n np.ones_like(angle_vec),\n labeldistance=label_distance,\n rotatelabels=True,\n labels=probs.names[::-1],\n startangle=-360 / len(angle_vec) / 2,\n counterclock=False,\n textprops=text_kwargs,\n )\n\n for patch in patches:\n patch.set_visible(False)\n\n # clockwise\n for color, text in zip(probs.colors[::-1], texts):\n if isinstance(label_rot, (int, float)):\n text.set_rotation(label_rot)\n elif label_rot == LabelRot.BEST:\n rot = text.get_rotation()\n text.set_rotation(rot + 90 + (1 - rot // 180) * 180)\n elif label_rot != LabelRot.DEFAULT:\n raise NotImplementedError(\n f\"Label rotation `{label_rot}` is not yet implemented.\"\n )\n text.set_color(color)\n\n if not show_edges:\n continue\n\n for i, color in enumerate(probs.colors):\n next = (i + 1) % n_lin\n x = 1.04 * np.linspace(angle_vec_sin[i], angle_vec_sin[next], _N)\n y = 1.04 * np.linspace(angle_vec_cos[i], angle_vec_cos[next], _N)\n points = np.array([x, y]).T.reshape(-1, 1, 2)\n segments = np.concatenate([points[:-1], points[1:]], axis=1)\n\n cmap = LinearSegmentedColormap.from_list(\n \"abs_prob_cmap\", [color, probs.colors[next]], N=_N\n )\n lc = LineCollection(segments, cmap=cmap, zorder=-1)\n lc.set_array(np.linspace(0, 1, _N))\n lc.set_linewidth(2)\n ax.add_collection(lc)\n\n for j in range(_i + 1, len(axes)):\n axes[j].remove()\n\n if save is not None:\n save_fig(fig, save)\n",
"\"\"\"\nFit models and plot gene trends\n-------------------------------\n\nThis example shows how to prepare, fit and plot various models to gene expression trends.\n\nWe will focus mostly on Generalized Additive Models (`GAMs <https://en.wikipedia.org/wiki/Generalized_additive_model>`_)\nand show how to do this for :mod:`sklearn` estimators towards the end. GAMs are flexible models that are well suited to\nmodel non-linear gene trends as they often appear in single-cell data. Further, they have the advantage that it is\nrelatively straightforward to derive a confidence interval around the main trend.\n\"\"\"\n\nimport cellrank as cr\n\nfrom sklearn.svm import SVR\n\nadata = cr.datasets.pancreas_preprocessed(\"../example.h5ad\")\nadata\n\n# %%\n# First, we need to compute a set of terminal states and we have to estimate fate probabilities towards these. The fate\n# probabilities will be used as weights when fitting the model - they determine how much each cell contributes to each\n# lineage.\ncr.tl.terminal_states(\n adata,\n cluster_key=\"clusters\",\n weight_connectivities=0.2,\n softmax_scale=4,\n show_progress_bar=False,\n)\ncr.tl.lineages(adata)\n\n# %%\n# Models in :mod:`cellrank.ul.models` follow similar patterns as :mod:`sklearn` models.\n# We begin by initializing and preparing the model for fitting. :meth:`cellrank.ul.models.BaseModel.prepare` requires\n# only the gene name and the lineage name and must be called before :meth:`cellrank.ul.models.BaseModel.fit`. It also\n# includes various useful parameters, such as ``time_range`` or ``weight_threshold``, which determine the start and\n# end pseudotime and the minimum required threshold for lineage probabilities, respectively.\nmodel = cr.ul.models.GAM(adata)\nmodel.prepare(\n gene=\"Pak3\",\n lineage=\"Alpha\",\n time_key=\"dpt_pseudotime\",\n data_key=\"Ms\",\n n_test_points=100,\n)\n# %%\n# CellRank allows any pseudotime from ``adata.obs`` to be passed via the ``time_key``, e.g. Diffusion Pseudotime (DPT)\n# :cite:`haghverdi:16`, :mod:`scvelo`’s latent time :cite:`bergen:20`, Palantir’s pseudotime :cite:`setty:19`, etc.\n#\n# Further, CellRank accepts imputed gene expression values stored in ``adata.layers`` via the ``data_key``, i.e. you\n# can pass MAGIC :cite:`vandijk:18` imputed data, :func:`scvelo.pp.moments` :cite:`bergen:20` (used below) or any other\n# form of imputation.\n\n# %%\n# Once the model has been prepared, it is ready for fitting and prediction.\ny_hat = model.fit().predict()\ny_hat\n\n# %%\n# Optionally, we can also get the confidence interval. Models which don't have a method to compute it, such as\n# :class:`cellrank.ul.models.SKLearnModel` wrapper for some :mod:`sklearn` estimators, can use the default the\n# confidence interval :meth:`cellrank.ul.models.BaseModel.default_confidence_interval`.\nconf_int = model.confidence_interval()\nconf_int[:5]\n\n# %%\n# After the prediction and optionally the confidence interval calculation, we can plot the smoothed gene expression.\n#\n# Cells in this plot have been colored by their fate probability of reaching the `Alpha` terminal state. We\n# include these probabilities as weights in the loss function when fitting the model. This allows us to weight each\n# cell by its relative contribution to the lineage, without needing to subset cells for each lineage.\nmodel.plot(conf_int=True)\n\n# %%\n# Lastly, wrapping :mod:`sklearn` estimators is fairly simple, we just pass the instance\n# to :class:`cellrank.ul.models.SKLearnModel`.\nsvr = SVR()\nmodel = cr.ul.models.SKLearnModel(adata, model=svr)\nmodel\n",
"from typing import Any\nfrom typing_extensions import Literal\n\nfrom enum import auto\n\nfrom anndata import AnnData\nfrom cellrank import logging as logg\nfrom cellrank._key import Key\nfrom cellrank.tl._enum import ModeEnum\nfrom cellrank.ul._docs import d, inject_docs\nfrom cellrank.tl._utils import _correlation_test_helper\nfrom cellrank.tl.kernels._pseudotime_kernel import PseudotimeKernel\n\nimport numpy as np\nfrom scipy.stats import gmean, hmean\nfrom scipy.sparse import issparse\n\n\nclass CytoTRACEAggregation(ModeEnum): # noqa: D101\n MEAN = auto()\n MEDIAN = auto()\n GMEAN = auto()\n HMEAN = auto()\n\n\[email protected]\nclass CytoTRACEKernel(PseudotimeKernel):\n \"\"\"\n Kernel which computes directed transition probabilities based on a KNN graph and the CytoTRACE score \\\n :cite:`gulati:20`.\n\n The KNN graph contains information about the (undirected) connectivities among cells, reflecting their similarity.\n CytoTRACE can be used to estimate cellular plasticity and in turn, a pseudotemporal ordering of cells from more\n plastic to less plastic states.\n This kernel internally uses the :class:`cellrank.tl.kernels.PseudotimeKernel` to direct the KNN graph\n on the basis of the CytoTRACE-derived pseudotime.\n\n %(density_correction)s\n\n Parameters\n ----------\n %(adata)s\n %(backward)s\n %(cytotrace.parameters)s\n\n %(cond_num)s\n check_connectivity\n Check whether the underlying KNN graph is connected.\n kwargs\n Keyword arguments for :class:`cellrank.tl.kernels.PseudotimeKernel`.\n\n Example\n -------\n Workflow::\n\n # import packages and load data\n import scvelo as scv\n import cellrank as cr\n adata = cr.datasets.pancreas()\n\n # standard pre-processing\n sc.pp.filter_genes(adata, min_cells=10)\n sc.pp.normalize_total(adata)\n sc.pp.log1p(adata)\n sc.pp.highly_variable_genes(adata)\n\n # CytoTRACE by default uses imputed data - a simple way to compute KNN-imputed data is to use scVelo's moments\n # function. However, note that this function expects `spliced` counts because it's designed for RNA velocity,\n # so we're using a simple hack here:\n if 'spliced' not in adata.layers or 'unspliced' not in adata.layers:\n adata.layers['spliced'] = adata.X\n adata.layers['unspliced'] = adata.X\n\n # compute KNN-imputation using scVelo's moments function\n scv.pp.moments(adata)\n\n # import and initialize the CytoTRACE kernel, compute transition matrix - done!\n from cellrank.tl.kernels import CytoTRACEKernel\n ctk = CytoTRACEKernel(adata).compute_transition_matrix()\n \"\"\"\n\n def __init__(\n self,\n adata: AnnData,\n backward: bool = False,\n layer: str = \"Ms\",\n aggregation: Literal[\n \"mean\", \"median\", \"hmean\", \"gmean\"\n ] = CytoTRACEAggregation.MEAN,\n use_raw: bool = False,\n compute_cond_num: bool = False,\n check_connectivity: bool = False,\n **kwargs: Any,\n ):\n super().__init__(\n adata,\n backward=backward,\n time_key=Key.cytotrace(\"pseudotime\"),\n compute_cond_num=compute_cond_num,\n check_connectivity=check_connectivity,\n layer=layer,\n aggregation=aggregation,\n use_raw=use_raw,\n **kwargs,\n )\n self._time_key = Key.cytotrace(\"pseudotime\") # quirk or PT kernel\n\n def _read_from_adata(\n self,\n time_key: str,\n layer: str = \"Ms\",\n aggregation: Literal[\n \"mean\", \"median\", \"hmean\", \"gmean\"\n ] = CytoTRACEAggregation.MEAN,\n use_raw: bool = True,\n **kwargs: Any,\n ) -> None:\n self.compute_cytotrace(layer=layer, aggregation=aggregation, use_raw=use_raw)\n\n super()._read_from_adata(time_key=time_key, **kwargs)\n\n @d.get_sections(base=\"cytotrace\", sections=[\"Parameters\"])\n @inject_docs(ct=CytoTRACEAggregation)\n def compute_cytotrace(\n self,\n layer: str = \"Ms\",\n aggregation: Literal[\n \"mean\", \"median\", \"hmean\", \"gmean\"\n ] = CytoTRACEAggregation.MEAN,\n use_raw: bool = False,\n ) -> None:\n \"\"\"\n Re-implementation of the CytoTRACE algorithm :cite:`gulati:20` to estimate cellular plasticity.\n\n Computes the number of genes expressed per cell and ranks genes according to their correlation with this\n measure. Next, it selects to top-correlating genes and aggregates their (imputed) expression to obtain\n the CytoTRACE score. A high score stands for high differentiation potential (naive, plastic cells) and\n a low score stands for low differentiation potential (mature, differentiation cells).\n\n Parameters\n ----------\n layer\n Key in :attr:`anndata.AnnData.layers` or `'X'` for :attr:`anndata.AnnData.X`\n from where to get the expression.\n aggregation\n How to aggregate expression of the top-correlating genes. Valid options are:\n\n - `{ct.MEAN!r}` - arithmetic mean.\n - `{ct.MEDIAN!r}` - median.\n - `{ct.HMEAN!r}` - harmonic mean.\n - `{ct.GMEAN!r}` - geometric mean.\n\n use_raw\n Whether to use the :attr:`anndata.AnnData.raw` to compute the number of genes expressed per cell\n (#genes/cell) and the correlation of gene expression across cells with #genes/cell.\n\n Returns\n -------\n Nothing, just modifies :attr:`anndata.AnnData.obs` with the following keys:\n\n - `'ct_score'` - the normalized CytoTRACE score.\n - `'ct_pseudotime'` - associated pseudotime, essentially `1 - CytoTRACE score`.\n - `'ct_num_exp_genes'` - the number of genes expressed per cell, basis of the CytoTRACE score.\n\n It also modifies :attr:`anndata.AnnData.var` with the following keys:\n\n - `'ct_gene_corr'` - the correlation as specified above.\n - `'ct_correlates'` - indication of the genes used to compute the CytoTRACE score, i.e. the ones that\n correlated best with `'num_exp_genes'`.\n\n Notes\n -----\n This will not exactly reproduce the results of the original CytoTRACE algorithm :cite:`gulati:20` because we\n allow for any normalization and imputation techniques whereas CytoTRACE has built-in specific methods for that.\n \"\"\"\n # check use_raw\n aggregation = CytoTRACEAggregation(aggregation)\n if use_raw and self.adata.raw is None:\n logg.warning(\"`adata.raw` is `None`. Setting `use_raw=False`\")\n use_raw = False\n if use_raw and self.adata.raw.n_vars != self.adata.n_vars:\n logg.warning(\n f\"`adata.raw` has different number of genes ({self.adata.raw.n_vars}) \"\n f\"than `adata` ({self.adata.n_vars}). Setting `use_raw=False`\"\n )\n use_raw = False\n\n adata_mraw = self.adata.raw if use_raw else self.adata\n if layer != \"X\" and layer not in self.adata.layers:\n raise KeyError(\n f\"Unable to find `{layer!r}` in `adata.layers`. \"\n f\"Valid option are: `{sorted({'X'} | set(self.adata.layers.keys()))}`.\"\n )\n\n msg = f\"Computing CytoTRACE score with `{self.adata.n_vars}` genes\"\n if self.adata.n_vars < 10000:\n msg += \". Consider using more than `10000` genes\"\n start = logg.info(msg)\n\n # compute number of expressed genes per cell\n logg.debug(\n f\"Computing number of genes expressed per cell with `use_raw={use_raw}`\"\n )\n num_exp_genes = np.array((adata_mraw.X > 0).sum(axis=1)).reshape(-1)\n self.adata.obs[Key.cytotrace(\"num_exp_genes\")] = num_exp_genes\n\n # fmt: off\n # compute correlation with all genes\n logg.debug(\"Correlating all genes with number of genes expressed per cell\")\n gene_corr, _, _, _ = _correlation_test_helper(adata_mraw.X.T, num_exp_genes[:, None])\n\n # annotate the top 200 genes in terms of correlation\n logg.debug(\"Finding the top `200` most correlated genes\")\n self.adata.var[Key.cytotrace(\"gene_corr\")] = gene_corr\n top_200 = self.adata.var.sort_values(by=Key.cytotrace(\"gene_corr\"), ascending=False).index[:200]\n self.adata.var[Key.cytotrace(\"correlates\")] = False\n self.adata.var.loc[top_200, Key.cytotrace(\"correlates\")] = True\n\n # compute mean/median over top 200 genes, aggregate over genes and shift to [0, 1] range\n logg.debug(f\"Aggregating imputed gene expression using aggregation `{aggregation}` in layer `{layer}`\")\n corr_mask = self.adata.var[Key.cytotrace(\"correlates\")]\n imputed_exp = self.adata[:, corr_mask].X if layer == \"X\" else self.adata[:, corr_mask].layers[layer]\n if issparse(imputed_exp):\n imputed_exp = imputed_exp.A\n\n # aggregate across the top 200 genes\n if aggregation == CytoTRACEAggregation.MEAN:\n cytotrace_score = np.mean(imputed_exp, axis=1)\n elif aggregation == CytoTRACEAggregation.MEDIAN:\n cytotrace_score = np.median(imputed_exp, axis=1)\n elif aggregation == CytoTRACEAggregation.GMEAN:\n cytotrace_score = gmean(imputed_exp, axis=1)\n elif aggregation == CytoTRACEAggregation.HMEAN:\n cytotrace_score = hmean(imputed_exp, axis=1)\n else:\n raise NotImplementedError(f\"Aggregation method `{aggregation}` is not yet implemented.\")\n # fmt: on\n\n # scale to 0-1 range\n cytotrace_score -= np.min(cytotrace_score)\n cytotrace_score /= np.max(cytotrace_score)\n self.adata.obs[Key.cytotrace(\"score\")] = cytotrace_score\n self.adata.obs[Key.cytotrace(\"pseudotime\")] = 1 - cytotrace_score\n\n self.adata.uns[Key.cytotrace(\"params\")] = {\n \"aggregation\": aggregation,\n \"layer\": layer,\n \"use_raw\": use_raw,\n }\n\n logg.info(\n f\"Adding `adata.obs[{Key.cytotrace('score')!r}]`\\n\"\n f\" `adata.obs[{Key.cytotrace('pseudotime')!r}]`\\n\"\n f\" `adata.obs[{Key.cytotrace('num_exp_genes')!r}]`\\n\"\n f\" `adata.var[{Key.cytotrace('gene_corr')!r}]`\\n\"\n f\" `adata.var[{Key.cytotrace('correlates')!r}]`\\n\"\n f\" `adata.uns[{Key.cytotrace('params')!r}]`\\n\"\n f\" Finish\",\n time=start,\n )\n",
"from typing import Any, Dict, List, Tuple, Union, Mapping, Callable, Optional, Sequence\nfrom typing_extensions import Literal\n\nfrom abc import ABC, abstractmethod\nfrom copy import copy as copy_\nfrom copy import deepcopy\nfrom inspect import Parameter, signature, getmembers, currentframe\nfrom contextlib import contextmanager\n\nfrom anndata import AnnData\nfrom cellrank._key import Key\nfrom cellrank.ul._docs import d\nfrom cellrank.tl._mixins import IOMixin, KernelMixin, AnnDataMixin\nfrom cellrank.tl.kernels import PrecomputedKernel\nfrom cellrank.tl._lineage import Lineage\nfrom cellrank.tl.kernels._base_kernel import Kernel, KernelExpression\n\nimport numpy as np\nimport pandas as pd\nfrom scipy.sparse import spmatrix, csr_matrix\n\nAttr_t = (\n Literal[\"X\", \"raw\", \"layers\", \"obs\", \"var\", \"obsm\", \"varm\", \"obsp\", \"varp\", \"uns\"],\n)\n\n\[email protected]_sections(base=\"base_estimator\", sections=[\"Parameters\"])\nclass BaseEstimator(IOMixin, KernelMixin, AnnDataMixin, ABC):\n \"\"\"\n Base class for all estimators.\n\n Parameters\n ----------\n obj\n Can be one of the following:\n\n - :class:`cellrank.tl.kernels.Kernel` - kernel object.\n - :class:`anndata.AnnData` - annotated data object containing transition matrix in\n :attr:`anndata.AnnData.obsp`.\n - :class:`numpy.ndarray` - row-normalized sparse transition matrix.\n - :class:`scipy.sparse.spmatrix` - row-normalized sparse transition matrix.\n obsp_key\n Key in :attr:`anndata.AnnData.obsp` where the transition matrix is stored.\n Only used when ``obj`` is an :class:`anndata.AnnData` object.\n \"\"\"\n\n def __init__(\n self,\n obj: Union[AnnData, np.ndarray, spmatrix, KernelExpression],\n obsp_key: Optional[str] = None,\n ):\n if isinstance(obj, Kernel):\n if obj._transition_matrix is None:\n raise ValueError(\n \"Compute transition matrix first as `.compute_transition_matrix()`.\"\n )\n kernel = obj\n elif isinstance(obj, KernelExpression):\n # this will fail if not all kernels have transition matrix computed\n kernel = obj.compute_transition_matrix()\n elif isinstance(obj, (np.ndarray, spmatrix)):\n kernel = PrecomputedKernel(obj)\n elif isinstance(obj, AnnData):\n if obsp_key is None:\n raise ValueError(\n \"Specify `obsp_key=...` when supplying an `AnnData` object.\"\n )\n elif obsp_key not in obj.obsp:\n raise KeyError(\n f\"Unable to find transition matrix in `adata.obsp[{obsp_key!r}]`.\"\n )\n kernel = PrecomputedKernel(obsp_key, adata=obj)\n else:\n raise TypeError(\n f\"Expected an object of type `KernelExpression`, `numpy.ndarray`, `scipy.sparse.spmatrix` \"\n f\"or `anndata.AnnData`, got `{type(obj).__name__}`.\"\n )\n\n super().__init__(kernel=kernel)\n\n self._params: Dict[str, Any] = {}\n self._shadow_adata = AnnData(\n X=csr_matrix(self.adata.shape, dtype=self.adata.X.dtype),\n obs=self.adata.obs[[]].copy(),\n var=self.adata.var[[]].copy(),\n raw=None if self.adata.raw is None else self.adata.raw.to_adata(),\n )\n\n def __init_subclass__(cls, **kwargs: Any):\n super().__init_subclass__()\n\n def _set(\n self,\n attr: Optional[str] = None,\n obj: Optional[Union[pd.DataFrame, Mapping[str, Any]]] = None,\n key: Optional[str] = None,\n value: Optional[\n Union[np.ndarray, pd.Series, pd.DataFrame, Lineage, AnnData, Dict[str, Any]]\n ] = None,\n copy: bool = True,\n shadow_only: bool = False,\n ) -> None:\n \"\"\"\n Set an attribute and optionally update ``obj[{key}]``.\n\n Parameters\n ----------\n attr\n Attribute to set. Only updated when we're not in the shadow. If `None`, don't update anything.\n See :attr:`_in_shadow` and ``obj`` for more information.\n obj\n Object which to update with ``value`` alongside the ``attr`.\n Usually, an attribute of :attr:`adata` is passed here.\n key\n Key in ``obj`` to update with ``value``. Only used when ``obj != None``.\n value\n Value to set. If `None` and ``key != None``, it removes the values under ``obj[key]``, if present.\n copy\n Whether to copy the ``value`` before setting it in ``obj``.\n shadow_only\n Whether or not to update the ``obj`` if we are not in the shadow.\n\n Returns\n -------\n Nothing, just optionally updates ``attr`` and/or ``obj[{key}]``.\n\n Raises\n ------\n AttributeError\n If ``attr`` doesn't exist.\n \"\"\"\n if not self._in_shadow:\n if attr is not None:\n if not hasattr(self, attr):\n raise AttributeError(attr)\n setattr(self, attr, value)\n if shadow_only:\n return\n\n if obj is None:\n return\n\n if key is not None:\n if value is None:\n try:\n del obj[key]\n except KeyError:\n pass\n else:\n obj[key] = copy_(value) if copy else value\n\n def _get(\n self,\n attr: str,\n obj: Union[pd.DataFrame, Mapping[str, Any]],\n key: str,\n where: Optional[Literal[\"obs\", \"obsm\", \"var\", \"varm\", \"uns\"]] = None,\n dtype: Optional[Union[type, Tuple[type, ...]]] = None,\n copy: bool = True,\n allow_missing: bool = False,\n ) -> None:\n \"\"\"\n Get data from an object and set an attribute.\n\n Parameters\n ----------\n attr\n Attribute to set.\n obj\n Object from which to extract the data.\n key\n Key in ``obj`` where the data is stored.\n where\n Attribute of :attr:`_shadow_adata` where to save the extracted data. If `None`, don't update it.\n dtype\n Valid type(s) of the extracted data.\n copy\n Copy the data before setting the ``attr``.\n allow_missing\n Whether or not to allow ``key`` to be missing in ``obj``.\n\n Returns\n -------\n Nothing, just updates ``attr`` with the extracted values and optionally :attr:`_shadow_adata`.\n\n Raises\n ------\n AttributeError\n If ``attr`` doesn't exist.\n TypeError\n If ``dtype != None`` and the extracted values are not instances of ``dtype``.\n KeyError\n If ``allow_missing = False`` and ``key`` was not found in ``obj``.\n \"\"\"\n if not hasattr(self, attr):\n raise AttributeError(attr)\n\n try:\n data = obj[key]\n if dtype is not None and not isinstance(data, dtype):\n raise TypeError(\n f\"Expected `.{attr}` to be of type `{dtype}`, found `{type(data).__name__}`.\"\n )\n if copy:\n data = copy_(data)\n setattr(self, attr, data)\n if where is not None:\n getattr(self._shadow_adata, where)[key] = data\n except KeyError:\n if not allow_missing:\n raise\n\n @property\n @contextmanager\n def _shadow(self) -> None:\n \"\"\"\n Temporarily set :attr:`adata` to :attr:`_shadow_adata`.\n\n Used to construct the serialization object in :meth:`to_adata`.\n \"\"\"\n if self._in_shadow:\n yield\n else:\n adata = self.adata\n try:\n self.adata = self._shadow_adata\n yield\n finally:\n self.adata = adata\n\n @property\n def _in_shadow(self) -> bool:\n \"\"\"Return `True` if :attr:`adata` is :attr:`_shadow_adata`.\"\"\"\n return self.adata is self._shadow_adata\n\n def _create_params(\n self,\n locs: Optional[Mapping[str, Any]] = None,\n func: Optional[Callable] = None,\n remove: Sequence[str] = (),\n ) -> Dict[str, Any]:\n \"\"\"\n Create parameters of interest from a function call.\n\n Parameters\n ----------\n locs\n Environment from which to get the parameters. If `None`, get the caller's environment.\n func\n Function of interest. If `None`, use the caller.\n remove\n Keys in ``locs`` which should not be included in the result.\n\n Returns\n -------\n The parameters as a :class:`dict`.\n\n Notes\n -----\n *args/**kwargs are always ignored and the values in ``locs`` are not copied.\n \"\"\"\n frame = currentframe()\n try:\n if locs is None:\n locs = frame.f_back.f_locals\n if func is None or True:\n name = frame.f_back.f_code.co_name\n func = dict(getmembers(self)).get(name, None)\n if not callable(func):\n raise TypeError(\n f\"Expected `func` to be `callable`, found `{type(func).__name__}`.\"\n )\n\n params = {}\n for name, param in signature(func).parameters.items():\n if param.kind in (Parameter.VAR_POSITIONAL, Parameter.VAR_KEYWORD):\n continue\n if name in remove:\n continue\n if name in locs:\n params[name] = locs[name]\n\n return params\n except AttributeError:\n # frame can be None\n return {}\n except TypeError:\n return {}\n finally:\n del frame\n\n def _read_params(self, key: str) -> Dict[str, Any]:\n \"\"\"\n Read ``key`` from estimator params in :attr:`adata`.\n\n Usually called in :meth:`_read_adata` during :meth:`from_adata`.\n \"\"\"\n ekey = Key.uns.estimator(self.backward) + \"_params\"\n return dict(self.adata.uns.get(ekey, {}).get(key, {}))\n\n @d.dedent\n def to_adata(\n self,\n keep: Union[Literal[\"all\"], Sequence[Attr_t]] = (\"X\", \"raw\"),\n *,\n copy: Union[bool, Sequence[Attr_t]] = True,\n ) -> AnnData:\n \"\"\"\n %(to_adata.full_desc)s\n\n Parameters\n ----------\n keep\n Which attributes to keep from the underlying :attr:`adata`. Valid options are:\n\n - `'all'` - keep all attributes specified in the signature.\n - :class:`typing.Sequence` - keep only subset of these attributes.\n - :class:`dict` - the keys correspond the attribute names and values to a subset of keys\n which to keep from this attribute. If the values are specified either as `True` or `'all'`,\n everything from this attribute will be kept.\n copy\n Whether to copy the data. Can be specified on per-attribute basis. Useful for attributes that store arrays.\n Attributes not specified here will not be copied.\n\n Returns\n -------\n %(adata)s\n \"\"\" # noqa: D400\n\n def handle_attribute(attr: Attr_t, keys: List[str], *, copy: bool) -> None:\n try:\n if attr == \"X\":\n adata.X = deepcopy(self.adata.X) if copy else self.adata.X\n return\n if attr == \"raw\":\n adata.raw = (\n self.adata.raw.to_adata()\n if self.adata.raw is not None\n else None\n )\n return\n\n old = getattr(self.adata, attr)\n new = getattr(adata, attr)\n if keys == [\"all\"]:\n keys = list(old.keys())\n\n # fmt: off\n if isinstance(new, pd.DataFrame):\n old = old[keys]\n # avoid duplicates\n old = old[old.columns.difference(new.columns)]\n setattr(adata, attr, pd.merge(new, old, how=\"inner\", left_index=True, right_index=True, copy=copy))\n elif isinstance(new, Mapping):\n old = {k: old[k] for k in keys}\n # old has preference, since it's user supplied\n setattr(adata, attr, {**new, **(deepcopy(old) if copy else old)})\n else:\n raise TypeError(f\"Expected `adata.{attr}` to be either `Mapping` or `pandas. DataFrame`, \"\n f\"found `{type(new).__name__}`.\")\n # fmt: on\n except KeyError:\n missing = sorted(k for k in keys if k not in old)\n raise KeyError(\n f\"Unable to find key(s) `{missing}` in `adata.{attr}`.\"\n ) from None\n\n adata = self._shadow_adata.copy()\n _adata = self.adata\n try:\n # kernel and estimator share the adata\n self.adata = adata\n self.kernel.write_to_adata()\n finally:\n self.adata = _adata\n key = Key.uns.estimator(self.backward) + \"_params\"\n adata.uns[key] = deepcopy(self.params)\n\n # fmt: off\n if isinstance(keep, str):\n if keep == 'all':\n keep = [\"X\", \"raw\", \"layers\", \"obs\", \"var\", \"obsm\", \"varm\", \"obsp\", \"varp\", \"uns\"]\n else:\n keep = [keep]\n if not isinstance(keep, Mapping):\n keep = {attr: True for attr in keep}\n if isinstance(copy, bool):\n copy = {attr: copy for attr in keep}\n elif isinstance(copy, str):\n copy = [copy]\n if not isinstance(copy, Mapping):\n copy = {attr: True for attr in copy}\n # fmt: on\n\n for attr, keys in keep.items():\n if keys is True:\n keys = [\"all\"]\n elif isinstance(keys, str):\n keys = [keys]\n if keys is False or not len(keys):\n continue\n handle_attribute(attr, keys=list(keys), copy=copy.get(attr, False))\n\n return adata\n\n @classmethod\n @d.dedent\n def from_adata(cls, adata: AnnData, obsp_key: str) -> \"BaseEstimator\":\n \"\"\"\n %(from_adata.full_desc)s\n\n Parameters\n ----------\n %(adata)s\n obsp_key\n Key in :attr:`anndata.AnnData.obsp` where the transition matrix is stored.\n\n Returns\n -------\n %(from_adata.returns)s\n \"\"\" # noqa: D400\n return super().from_adata(adata, obsp_key=obsp_key)\n\n @d.dedent\n def copy(self, *, deep: bool = False) -> \"BaseEstimator\":\n \"\"\"\n Return a copy of self.\n\n Parameters\n ----------\n deep\n Whether to return a deep copy or not. If `True`, this also copies the :attr:`adata`.\n\n Returns\n -------\n A copy of self.\n \"\"\"\n k = deepcopy(self.kernel) if deep else copy_(self.kernel)\n res = type(self)(k)\n for k, v in self.__dict__.items():\n if isinstance(v, Mapping):\n res.__dict__[k] = deepcopy(v)\n elif k != \"_kernel\":\n res.__dict__[k] = deepcopy(v) if deep else copy_(v)\n\n return res\n\n def __copy__(self) -> \"BaseEstimator\":\n return self.copy(deep=False)\n\n def __repr__(self) -> str:\n return f\"{self.__class__.__name__}[n={len(self)}, kernel={repr(self.kernel)}]\"\n\n def __str__(self) -> str:\n return f\"{self.__class__.__name__}[n={len(self)}, kernel={str(self.kernel)}]\"\n\n @property\n def params(self) -> Dict[str, Any]:\n \"\"\"Estimator parameters.\"\"\"\n return self._params\n\n @abstractmethod\n def fit(self, *args: Any, **kwargs: Any) -> \"BaseEstimator\":\n \"\"\"\n Fit an estimator.\n\n Parameters\n ----------\n args\n Positional arguments.\n kwargs\n Keyword arguments.\n\n Returns\n -------\n Self.\n \"\"\"\n\n @abstractmethod\n def predict(self, *args: Any, **kwargs: Any) -> None:\n \"\"\"\n Run a prediction.\n\n Parameters\n ----------\n args\n Positional arguments.\n kwargs\n Keyword arguments.\n\n Returns\n -------\n Nothing.\n \"\"\"\n"
] | [
[
"sklearn.metrics.pairwise_distances",
"numpy.ones_like",
"matplotlib.colors.LogNorm",
"numpy.linspace",
"numpy.asarray",
"numpy.arange",
"matplotlib.collections.LineCollection",
"numpy.cos",
"matplotlib.pyplot.subplots",
"numpy.sin",
"numpy.nan_to_num",
"numpy.concatenate",
"numpy.mean",
"numpy.ravel",
"numpy.array",
"matplotlib.colors.LinearSegmentedColormap.from_list",
"numpy.sum"
],
[
"sklearn.svm.SVR"
],
[
"scipy.stats.gmean",
"scipy.sparse.issparse",
"numpy.min",
"numpy.median",
"scipy.stats.hmean",
"numpy.max",
"numpy.mean"
],
[
"pandas.merge",
"scipy.sparse.csr_matrix"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"1.6",
"0.14",
"1.10",
"0.15",
"1.4",
"1.3",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"0.12",
"1.0",
"0.17",
"0.16",
"1.8"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [
"1.7",
"1.0",
"0.10",
"1.2",
"0.14",
"0.19",
"1.5",
"0.12",
"0.17",
"0.13",
"1.6",
"1.4",
"1.9",
"1.3",
"1.10",
"0.15",
"0.18",
"0.16",
"1.8"
],
"tensorflow": []
}
] |
blnm/RSE | [
"6a3f0dd858ea4b6dafcfb1d97bb979e101d9911c"
] | [
"RAdam.py"
] | [
"import tensorflow as tf\r\nfrom tensorflow.python.eager import context\r\nfrom tensorflow.python.framework import ops\r\nfrom tensorflow.python.ops import clip_ops\r\nfrom tensorflow.python.ops import control_flow_ops\r\nfrom tensorflow.python.ops import math_ops\r\nfrom tensorflow.python.ops import resource_variable_ops\r\nfrom tensorflow.python.ops import state_ops\r\nfrom tensorflow.python.training import optimizer\r\n\r\n__all__ = ['RAdamOptimizer']\r\n\r\n\r\nclass RAdamOptimizer(optimizer.Optimizer):\r\n \"\"\"RAdam optimizer.\r\n\r\n According to the paper\r\n [On The Variance Of The Adaptive Learning Rate And Beyond](https://arxiv.org/pdf/1908.03265v1.pdf).\r\n \"\"\"\r\n\r\n def __init__(self,\r\n learning_rate=0.001,\r\n beta1=0.9,\r\n beta2=0.999,\r\n epsilon=1e-7,\r\n L2_decay=0.,\r\n amsgrad=False,\r\n total_steps=0,\r\n warmup_proportion=0.1,\r\n min_lr=0.,\r\n use_locking=False,\r\n name=\"RAdam\",\r\n decay_vars=None,\r\n L1_decay=0.0,\r\n clip_gradients=False, clip_multiplier=3.0, clip_epsilon=1e-2):\r\n r\"\"\"Construct a new Adam optimizer.\r\n\r\n Args:\r\n learning_rate: A Tensor or a floating point value. The learning rate.\r\n beta1: A float value or a constant float tensor. The exponential decay\r\n rate for the 1st moment estimates.\r\n beta2: A float value or a constant float tensor. The exponential decay\r\n rate for the 2nd moment estimates.\r\n epsilon: A small constant for numerical stability. This epsilon is\r\n \"epsilon hat\" in the Kingma and Ba paper (in the formula just before\r\n Section 2.1), not the epsilon in Algorithm 1 of the paper.\r\n L2_decay: A floating point value. Weight decay for each param.\r\n amsgrad: boolean. Whether to apply AMSGrad variant of this algorithm from\r\n the paper \"On the Convergence of Adam and beyond\".\r\n total_steps: An integer. Total number of training steps.\r\n Enable warmup by setting a positive value.\r\n warmup_proportion: A floating point value. The proportion of increasing steps.\r\n min_lr: A floating point value. Minimum learning rate after warmup.\r\n name: Optional name for the operations created when applying gradients.\r\n Defaults to \"Adam\". @compatibility(eager) When eager execution is\r\n enabled, `learning_rate`, `beta_1`, `beta_2`, and `epsilon` can each be\r\n a callable that takes no arguments and returns the actual value to use.\r\n This can be useful for changing these values across different\r\n invocations of optimizer functions. @end_compatibility\r\n **kwargs: keyword arguments. Allowed to be {`clipnorm`, `clipvalue`, `lr`,\r\n `decay`}. `clipnorm` is clip gradients by norm; `clipvalue` is clip\r\n gradients by value, `decay` is included for backward compatibility to\r\n allow time inverse decay of learning rate. `lr` is included for backward\r\n compatibility, recommended to use `learning_rate` instead.\r\n \"\"\"\r\n super(RAdamOptimizer, self).__init__(use_locking, name)\r\n self._lr = learning_rate\r\n self._beta1 = beta1\r\n self._beta2 = beta2\r\n self._epsilon = epsilon\r\n self._weight_decay = L2_decay\r\n self._L1_decay = L1_decay\r\n self._amsgrad = amsgrad\r\n self._total_steps = float(total_steps)\r\n self._warmup_proportion = warmup_proportion\r\n self._min_lr = min_lr\r\n self._initial_weight_decay = L2_decay\r\n self._initial_total_steps = total_steps\r\n self.clip_multiplier = clip_multiplier\r\n self.clip_epsilon = clip_epsilon\r\n self.clip_gradients = clip_gradients\r\n self.clip_multiplier_t = ops.convert_to_tensor(self.clip_multiplier, name=\"clip_multiplier\")\r\n self.clip_epsilon_t = ops.convert_to_tensor(self.clip_epsilon, name=\"clip_epsilon\")\r\n\r\n self._lr_t = None\r\n self._step_t = None\r\n self._beta1_t = None\r\n self._beta2_t = None\r\n self._epsilon_t = None\r\n self._weight_decay_t = None\r\n self._total_steps_t = None\r\n self._warmup_proportion_t = None\r\n self._min_lr_t = None\r\n self.reg_vars = set(decay_vars) if decay_vars is not None else set()\r\n\r\n def _get_beta_accumulators(self):\r\n with ops.init_scope():\r\n if context.executing_eagerly():\r\n graph = None\r\n else:\r\n graph = ops.get_default_graph()\r\n return (self._get_non_slot_variable(\"step\", graph=graph),\r\n self._get_non_slot_variable(\"beta1_power\", graph=graph),\r\n self._get_non_slot_variable(\"beta2_power\", graph=graph))\r\n\r\n def _create_slots_internal(self, var_list):\r\n first_var = min(var_list, key=lambda x: x.name)\r\n self._create_non_slot_variable(initial_value=1.0, name=\"step\", colocate_with=first_var)\r\n self._create_non_slot_variable(initial_value=self._beta1, name=\"beta1_power\", colocate_with=first_var)\r\n self._create_non_slot_variable(initial_value=self._beta2, name=\"beta2_power\", colocate_with=first_var)\r\n for v in var_list:\r\n self._zeros_slot(v, \"m\", self._name)\r\n self._zeros_slot(v, \"v\", self._name)\r\n if self._amsgrad:\r\n self._zeros_slot(v, \"vhat\", self._name)\r\n\r\n def _prepare(self):\r\n lr = self._call_if_callable(self._lr)\r\n beta1 = self._call_if_callable(self._beta1)\r\n beta2 = self._call_if_callable(self._beta2)\r\n epsilon = self._call_if_callable(self._epsilon)\r\n weight_decay = self._call_if_callable(self._weight_decay)\r\n total_steps = self._call_if_callable(self._total_steps)\r\n warmup_proportion = self._call_if_callable(self._warmup_proportion)\r\n min_lr = self._call_if_callable(self._min_lr)\r\n\r\n self._lr_t = ops.convert_to_tensor(lr, name=\"learning_rate\")\r\n self._beta1_t = ops.convert_to_tensor(beta1, name=\"beta1\")\r\n self._beta2_t = ops.convert_to_tensor(beta2, name=\"beta2\")\r\n self._epsilon_t = ops.convert_to_tensor(epsilon, name=\"epsilon\")\r\n self._weight_decay_t = ops.convert_to_tensor(weight_decay, name=\"weight_decay\")\r\n self._total_steps_t = ops.convert_to_tensor(total_steps, name=\"total_steps\")\r\n self._warmup_proportion_t = ops.convert_to_tensor(warmup_proportion, name=\"warmup_proportion\")\r\n self._min_lr_t = ops.convert_to_tensor(min_lr, name=\"min_lr\")\r\n\r\n def apply_gradients(self, grads_and_vars, global_step=None, name=None):\r\n tvars = list(zip(*grads_and_vars))[1]\r\n self._create_slots_internal(tvars)\r\n\r\n return super().apply_gradients(grads_and_vars, global_step, name)\r\n\r\n def _apply_dense(self, grad, var):\r\n return self._resource_apply_dense(grad, var)\r\n\r\n def _resource_apply_dense(self, grad, var):\r\n step, beta1_power, beta2_power = self._get_beta_accumulators()\r\n beta1_power = math_ops.cast(beta1_power, var.dtype.base_dtype)\r\n beta2_power = math_ops.cast(beta2_power, var.dtype.base_dtype)\r\n lr_t = math_ops.cast(self._lr_t, var.dtype.base_dtype)\r\n\r\n if self._initial_total_steps > 0:\r\n total_steps = math_ops.cast(self._total_steps_t, var.dtype.base_dtype)\r\n warmup_proportion = math_ops.cast(self._warmup_proportion_t, var.dtype.base_dtype)\r\n min_lr = math_ops.cast(self._min_lr_t, var.dtype.base_dtype)\r\n warmup_steps = total_steps * warmup_proportion\r\n decay_steps = math_ops.maximum(total_steps - warmup_steps, 1)\r\n decay_rate = (min_lr - lr_t) / decay_steps\r\n lr_t = tf.where(\r\n step <= warmup_steps,\r\n lr_t * (step / warmup_steps),\r\n lr_t + decay_rate * math_ops.minimum(step - warmup_steps, decay_steps),\r\n )\r\n\r\n beta1_t = math_ops.cast(self._beta1_t, var.dtype.base_dtype)\r\n beta2_t = math_ops.cast(self._beta2_t, var.dtype.base_dtype)\r\n epsilon_t = math_ops.cast(self._epsilon_t, var.dtype.base_dtype)\r\n\r\n v = self.get_slot(var, \"v\")\r\n\r\n if self.clip_gradients:\r\n clipVal = math_ops.sqrt(\r\n tf.reduce_sum(v) / (1.0 - beta2_power)) * self.clip_multiplier_t + self.clip_epsilon_t\r\n grad = clip_ops.clip_by_norm(grad, clipVal)\r\n\r\n sma_inf = 2.0 / (1.0 - beta2_t) - 1.0\r\n sma_t = sma_inf - 2.0 * step * beta2_power / (1.0 - beta2_power)\r\n\r\n m = self.get_slot(var, \"m\")\r\n\r\n v_t = state_ops.assign(v, beta2_t * v + (1.0 - beta2_t) * math_ops.square(grad), use_locking=self._use_locking)\r\n v_corr_t = math_ops.sqrt(v_t / (1.0 - beta2_power)) + epsilon_t\r\n grad_corr = grad / v_corr_t\r\n\r\n m_t = state_ops.assign(m, beta1_t * m + (1.0 - beta1_t) * grad_corr, use_locking=self._use_locking)\r\n m_corr_t = m_t / (1.0 - beta1_power)\r\n\r\n r_t = math_ops.sqrt((sma_t - 4.0) / (sma_inf - 4.0) *\r\n (sma_t - 2.0) / (sma_inf - 2.0) *\r\n sma_inf / sma_t)\r\n\r\n var_t = tf.where(sma_t >= 5.0, r_t * m_corr_t, m_corr_t)\r\n\r\n if var in self.reg_vars:\r\n if self._initial_weight_decay > 0.0:\r\n var_t += math_ops.cast(self._weight_decay_t, var.dtype.base_dtype) * var\r\n if self._L1_decay > 0.0:\r\n var_t += math_ops.cast(self._L1_decay, var.dtype.base_dtype) * math_ops.sign(var)\r\n\r\n with tf.control_dependencies([var_t]):\r\n var_update = state_ops.assign_sub(var, lr_t * var_t, use_locking=self._use_locking)\r\n\r\n updates = [var_update, m_t, v_t]\r\n return control_flow_ops.group(*updates)\r\n\r\n def _apply_sparse_shared(self, grad, var, indices, scatter_add):\r\n step, beta1_power, beta2_power = self._get_beta_accumulators()\r\n beta1_power = math_ops.cast(beta1_power, var.dtype.base_dtype)\r\n beta2_power = math_ops.cast(beta2_power, var.dtype.base_dtype)\r\n lr_t = math_ops.cast(self._lr_t, var.dtype.base_dtype)\r\n\r\n if self._initial_total_steps > 0:\r\n total_steps = math_ops.cast(self._total_steps_t, var.dtype.base_dtype)\r\n warmup_proportion = math_ops.cast(self._warmup_proportion_t, var.dtype.base_dtype)\r\n min_lr = math_ops.cast(self._min_lr_t, var.dtype.base_dtype)\r\n warmup_steps = total_steps * warmup_proportion\r\n decay_steps = math_ops.maximum(total_steps - warmup_steps, 1)\r\n decay_rate = (min_lr - lr_t) / decay_steps\r\n lr_t = tf.where(\r\n step <= warmup_steps,\r\n lr_t * (step / warmup_steps),\r\n lr_t + decay_rate * math_ops.minimum(step - warmup_steps, decay_steps),\r\n )\r\n\r\n beta1_t = math_ops.cast(self._beta1_t, var.dtype.base_dtype)\r\n beta2_t = math_ops.cast(self._beta2_t, var.dtype.base_dtype)\r\n epsilon_t = math_ops.cast(self._epsilon_t, var.dtype.base_dtype)\r\n v = self.get_slot(var, \"v\")\r\n\r\n if self.clip_gradients:\r\n clipVal = math_ops.sqrt(\r\n tf.reduce_sum(v) / (1.0 - beta2_power)) * self.clip_multiplier_t + self.clip_epsilon_t\r\n grad = clip_ops.clip_by_norm(grad, clipVal)\r\n\r\n sma_inf = 2.0 / (1.0 - beta2_t) - 1.0\r\n sma_t = sma_inf - 2.0 * step * beta2_power / (1.0 - beta2_power)\r\n\r\n m = self.get_slot(var, \"m\")\r\n m_scaled_g_values = grad * (1 - beta1_t)\r\n m_t = state_ops.assign(m, m * beta1_t, use_locking=self._use_locking)\r\n with ops.control_dependencies([m_t]):\r\n m_t = scatter_add(m, indices, m_scaled_g_values)\r\n m_corr_t = m_t / (1.0 - beta1_power)\r\n\r\n v_scaled_g_values = (grad * grad) * (1 - beta2_t)\r\n v_t = state_ops.assign(v, v * beta2_t, use_locking=self._use_locking)\r\n with ops.control_dependencies([v_t]):\r\n v_t = scatter_add(v, indices, v_scaled_g_values)\r\n if self._amsgrad:\r\n vhat = self.get_slot(var, 'vhat')\r\n vhat_t = state_ops.assign(vhat, math_ops.maximum(vhat, v_t), use_locking=self._use_locking)\r\n v_corr_t = math_ops.sqrt(vhat_t / (1.0 - beta2_power)) + epsilon_t\r\n else:\r\n v_corr_t = math_ops.sqrt(v_t / (1.0 - beta2_power)) + epsilon_t\r\n\r\n r_t = math_ops.sqrt((sma_t - 4.0) / (sma_inf - 4.0) *\r\n (sma_t - 2.0) / (sma_inf - 2.0) *\r\n sma_inf / sma_t)\r\n\r\n var_t = tf.where(sma_t >= 5.0, r_t * m_corr_t / v_corr_t, m_corr_t)\r\n\r\n if var in self.reg_vars:\r\n if self._initial_weight_decay > 0.0:\r\n var_t += math_ops.cast(self._weight_decay_t, var.dtype.base_dtype) * var\r\n if self._L1_decay > 0.0:\r\n var_t += math_ops.cast(self._L1_decay, var.dtype.base_dtype) * math_ops.sign(var)\r\n\r\n var_update = state_ops.assign_sub(var, lr_t * var_t, use_locking=self._use_locking)\r\n\r\n updates = [var_update, m_t, v_t]\r\n if self._amsgrad:\r\n updates.append(vhat_t)\r\n return control_flow_ops.group(*updates)\r\n\r\n def _apply_sparse(self, grad, var):\r\n return self._apply_sparse_shared(\r\n grad.values,\r\n var,\r\n grad.indices,\r\n lambda x, i, v: state_ops.scatter_add(x, i, v, use_locking=self._use_locking))\r\n\r\n def _resource_scatter_add(self, x, i, v):\r\n with ops.control_dependencies([resource_variable_ops.resource_scatter_add(x.handle, i, v)]):\r\n return x.value()\r\n\r\n def _resource_apply_sparse(self, grad, var, indices):\r\n return self._apply_sparse_shared(grad, var, indices, self._resource_scatter_add)\r\n\r\n def _finish(self, update_ops, name_scope):\r\n with ops.control_dependencies(update_ops):\r\n step, beta1_power, beta2_power = self._get_beta_accumulators()\r\n with ops.colocate_with(beta1_power):\r\n update_step = step.assign(step + 1.0, use_locking=self._use_locking)\r\n update_beta1 = beta1_power.assign(beta1_power * self._beta1_t, use_locking=self._use_locking)\r\n update_beta2 = beta2_power.assign(beta2_power * self._beta2_t, use_locking=self._use_locking)\r\n return control_flow_ops.group(*update_ops + [update_step, update_beta1, update_beta2], name=name_scope)\r\n"
] | [
[
"tensorflow.control_dependencies",
"tensorflow.reduce_sum",
"tensorflow.python.ops.state_ops.assign_sub",
"tensorflow.python.ops.math_ops.sqrt",
"tensorflow.where",
"tensorflow.python.ops.state_ops.assign",
"tensorflow.python.eager.context.executing_eagerly",
"tensorflow.python.ops.state_ops.scatter_add",
"tensorflow.python.framework.ops.control_dependencies",
"tensorflow.python.ops.math_ops.cast",
"tensorflow.python.ops.math_ops.minimum",
"tensorflow.python.ops.math_ops.square",
"tensorflow.python.framework.ops.init_scope",
"tensorflow.python.framework.ops.colocate_with",
"tensorflow.python.ops.clip_ops.clip_by_norm",
"tensorflow.python.framework.ops.convert_to_tensor",
"tensorflow.python.ops.resource_variable_ops.resource_scatter_add",
"tensorflow.python.ops.control_flow_ops.group",
"tensorflow.python.framework.ops.get_default_graph",
"tensorflow.python.ops.math_ops.sign",
"tensorflow.python.ops.math_ops.maximum"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.7",
"1.10",
"1.12"
]
}
] |
rhshadrach/pandas | [
"777c0f90c6067c636fcd76ce003a8fbfcc311d7b"
] | [
"pandas/core/generic.py"
] | [
"import collections\nfrom datetime import timedelta\nimport functools\nimport gc\nimport json\nimport operator\nimport pickle\nimport re\nfrom textwrap import dedent\nfrom typing import (\n TYPE_CHECKING,\n Any,\n Callable,\n Dict,\n FrozenSet,\n Hashable,\n List,\n Mapping,\n Optional,\n Sequence,\n Set,\n Tuple,\n Type,\n Union,\n)\nimport warnings\nimport weakref\n\nimport numpy as np\n\nfrom pandas._config import config\n\nfrom pandas._libs import Timestamp, iNaT, lib\nfrom pandas._typing import (\n Axis,\n FilePathOrBuffer,\n FrameOrSeries,\n JSONSerializable,\n Label,\n Level,\n Renamer,\n)\nfrom pandas.compat import set_function_name\nfrom pandas.compat._optional import import_optional_dependency\nfrom pandas.compat.numpy import function as nv\nfrom pandas.errors import AbstractMethodError\nfrom pandas.util._decorators import (\n Appender,\n Substitution,\n doc,\n rewrite_axis_style_signature,\n)\nfrom pandas.util._validators import (\n validate_bool_kwarg,\n validate_fillna_kwargs,\n validate_percentile,\n)\n\nfrom pandas.core.dtypes.common import (\n ensure_int64,\n ensure_object,\n ensure_str,\n is_bool,\n is_bool_dtype,\n is_datetime64_any_dtype,\n is_datetime64tz_dtype,\n is_dict_like,\n is_extension_array_dtype,\n is_float,\n is_integer,\n is_list_like,\n is_number,\n is_numeric_dtype,\n is_object_dtype,\n is_re_compilable,\n is_scalar,\n is_timedelta64_dtype,\n pandas_dtype,\n)\nfrom pandas.core.dtypes.generic import ABCDataFrame, ABCSeries\nfrom pandas.core.dtypes.inference import is_hashable\nfrom pandas.core.dtypes.missing import isna, notna\n\nimport pandas as pd\nfrom pandas.core import missing, nanops\nimport pandas.core.algorithms as algos\nfrom pandas.core.base import PandasObject, SelectionMixin\nimport pandas.core.common as com\nfrom pandas.core.construction import create_series_with_explicit_dtype\nfrom pandas.core.indexes.api import (\n Index,\n InvalidIndexError,\n MultiIndex,\n RangeIndex,\n ensure_index,\n)\nfrom pandas.core.indexes.datetimes import DatetimeIndex\nfrom pandas.core.indexes.period import Period, PeriodIndex\nimport pandas.core.indexing as indexing\nfrom pandas.core.internals import BlockManager\nfrom pandas.core.missing import find_valid_index\nfrom pandas.core.ops import _align_method_FRAME\n\nfrom pandas.io.formats import format as fmt\nfrom pandas.io.formats.format import DataFrameFormatter, format_percentiles\nfrom pandas.io.formats.printing import pprint_thing\nfrom pandas.tseries.frequencies import to_offset\n\nif TYPE_CHECKING:\n from pandas.core.resample import Resampler\n\n# goal is to be able to define the docs close to function, while still being\n# able to share\n_shared_docs: Dict[str, str] = dict()\n_shared_doc_kwargs = dict(\n axes=\"keywords for axes\",\n klass=\"Series/DataFrame\",\n axes_single_arg=\"int or labels for object\",\n args_transpose=\"axes to permute (int or label for object)\",\n optional_by=\"\"\"\n by : str or list of str\n Name or list of names to sort by\"\"\",\n)\n\n\ndef _single_replace(self, to_replace, method, inplace, limit):\n \"\"\"\n Replaces values in a Series using the fill method specified when no\n replacement value is given in the replace method\n \"\"\"\n if self.ndim != 1:\n raise TypeError(\n f\"cannot replace {to_replace} with method {method} on a \"\n f\"{type(self).__name__}\"\n )\n\n orig_dtype = self.dtype\n result = self if inplace else self.copy()\n fill_f = missing.get_fill_func(method)\n\n mask = missing.mask_missing(result.values, to_replace)\n values = fill_f(result.values, limit=limit, mask=mask)\n\n if values.dtype == orig_dtype and inplace:\n return\n\n result = pd.Series(values, index=self.index, dtype=self.dtype).__finalize__(self)\n\n if inplace:\n self._update_inplace(result._data)\n return\n\n return result\n\n\nbool_t = bool # Need alias because NDFrame has def bool:\n\n\nclass NDFrame(PandasObject, SelectionMixin, indexing.IndexingMixin):\n \"\"\"\n N-dimensional analogue of DataFrame. Store multi-dimensional in a\n size-mutable, labeled data structure\n\n Parameters\n ----------\n data : BlockManager\n axes : list\n copy : bool, default False\n \"\"\"\n\n _internal_names: List[str] = [\n \"_data\",\n \"_cacher\",\n \"_item_cache\",\n \"_cache\",\n \"_is_copy\",\n \"_subtyp\",\n \"_name\",\n \"_index\",\n \"_default_kind\",\n \"_default_fill_value\",\n \"_metadata\",\n \"__array_struct__\",\n \"__array_interface__\",\n ]\n _internal_names_set: Set[str] = set(_internal_names)\n _accessors: Set[str] = set()\n _deprecations: FrozenSet[str] = frozenset([\"get_values\"])\n _metadata: List[str] = []\n _is_copy = None\n _data: BlockManager\n _attrs: Dict[Optional[Hashable], Any]\n _typ: str\n\n # ----------------------------------------------------------------------\n # Constructors\n\n def __init__(\n self,\n data: BlockManager,\n copy: bool = False,\n attrs: Optional[Mapping[Optional[Hashable], Any]] = None,\n ):\n # copy kwarg is retained for mypy compat, is not used\n\n object.__setattr__(self, \"_is_copy\", None)\n object.__setattr__(self, \"_data\", data)\n object.__setattr__(self, \"_item_cache\", {})\n if attrs is None:\n attrs = {}\n else:\n attrs = dict(attrs)\n object.__setattr__(self, \"_attrs\", attrs)\n\n @classmethod\n def _init_mgr(cls, mgr, axes=None, dtype=None, copy=False):\n \"\"\" passed a manager and a axes dict \"\"\"\n for a, axe in axes.items():\n if axe is not None:\n mgr = mgr.reindex_axis(\n axe, axis=cls._get_block_manager_axis(a), copy=False\n )\n\n # make a copy if explicitly requested\n if copy:\n mgr = mgr.copy()\n if dtype is not None:\n # avoid further copies if we can\n if len(mgr.blocks) > 1 or mgr.blocks[0].values.dtype != dtype:\n mgr = mgr.astype(dtype=dtype)\n return mgr\n\n # ----------------------------------------------------------------------\n\n @property\n def attrs(self) -> Dict[Optional[Hashable], Any]:\n \"\"\"\n Dictionary of global attributes on this object.\n\n .. warning::\n\n attrs is experimental and may change without warning.\n \"\"\"\n if self._attrs is None:\n self._attrs = {}\n return self._attrs\n\n @attrs.setter\n def attrs(self, value: Mapping[Optional[Hashable], Any]) -> None:\n self._attrs = dict(value)\n\n @classmethod\n def _validate_dtype(cls, dtype):\n \"\"\" validate the passed dtype \"\"\"\n if dtype is not None:\n dtype = pandas_dtype(dtype)\n\n # a compound dtype\n if dtype.kind == \"V\":\n raise NotImplementedError(\n \"compound dtypes are not implemented \"\n f\"in the {cls.__name__} constructor\"\n )\n\n return dtype\n\n # ----------------------------------------------------------------------\n # Construction\n\n @property\n def _constructor(self: FrameOrSeries) -> Type[FrameOrSeries]:\n \"\"\"\n Used when a manipulation result has the same dimensions as the\n original.\n \"\"\"\n raise AbstractMethodError(self)\n\n @property\n def _constructor_sliced(self):\n \"\"\"\n Used when a manipulation result has one lower dimension(s) as the\n original, such as DataFrame single columns slicing.\n \"\"\"\n raise AbstractMethodError(self)\n\n @property\n def _constructor_expanddim(self):\n \"\"\"\n Used when a manipulation result has one higher dimension as the\n original, such as Series.to_frame()\n \"\"\"\n raise NotImplementedError\n\n # ----------------------------------------------------------------------\n # Axis\n _AXIS_ALIASES = {\"rows\": 0}\n _AXIS_IALIASES = {0: \"rows\"}\n _stat_axis_number = 0\n _stat_axis_name = \"index\"\n _ix = None\n _AXIS_ORDERS: List[str]\n _AXIS_NUMBERS: Dict[str, int]\n _AXIS_NAMES: Dict[int, str]\n _AXIS_REVERSED: bool\n _info_axis_number: int\n _info_axis_name: str\n _AXIS_LEN: int\n\n def _construct_axes_dict(self, axes=None, **kwargs):\n \"\"\"Return an axes dictionary for myself.\"\"\"\n d = {a: self._get_axis(a) for a in (axes or self._AXIS_ORDERS)}\n d.update(kwargs)\n return d\n\n @classmethod\n def _construct_axes_from_arguments(\n cls, args, kwargs, require_all: bool = False, sentinel=None\n ):\n \"\"\"\n Construct and returns axes if supplied in args/kwargs.\n\n If require_all, raise if all axis arguments are not supplied\n return a tuple of (axes, kwargs).\n\n sentinel specifies the default parameter when an axis is not\n supplied; useful to distinguish when a user explicitly passes None\n in scenarios where None has special meaning.\n \"\"\"\n # construct the args\n args = list(args)\n for a in cls._AXIS_ORDERS:\n\n # look for a argument by position\n if a not in kwargs:\n try:\n kwargs[a] = args.pop(0)\n except IndexError as err:\n if require_all:\n raise TypeError(\n \"not enough/duplicate arguments specified!\"\n ) from err\n\n axes = {a: kwargs.pop(a, sentinel) for a in cls._AXIS_ORDERS}\n return axes, kwargs\n\n @classmethod\n def _get_axis_number(cls, axis):\n axis = cls._AXIS_ALIASES.get(axis, axis)\n if is_integer(axis):\n if axis in cls._AXIS_NAMES:\n return axis\n else:\n try:\n return cls._AXIS_NUMBERS[axis]\n except KeyError:\n pass\n raise ValueError(f\"No axis named {axis} for object type {cls}\")\n\n @classmethod\n def _get_axis_name(cls, axis):\n axis = cls._AXIS_ALIASES.get(axis, axis)\n if isinstance(axis, str):\n if axis in cls._AXIS_NUMBERS:\n return axis\n else:\n try:\n return cls._AXIS_NAMES[axis]\n except KeyError:\n pass\n raise ValueError(f\"No axis named {axis} for object type {cls}\")\n\n def _get_axis(self, axis):\n name = self._get_axis_name(axis)\n return getattr(self, name)\n\n @classmethod\n def _get_block_manager_axis(cls, axis):\n \"\"\"Map the axis to the block_manager axis.\"\"\"\n axis = cls._get_axis_number(axis)\n if cls._AXIS_REVERSED:\n m = cls._AXIS_LEN - 1\n return m - axis\n return axis\n\n def _get_axis_resolvers(self, axis: str) -> Dict[str, ABCSeries]:\n # index or columns\n axis_index = getattr(self, axis)\n d = dict()\n prefix = axis[0]\n\n for i, name in enumerate(axis_index.names):\n if name is not None:\n key = level = name\n else:\n # prefix with 'i' or 'c' depending on the input axis\n # e.g., you must do ilevel_0 for the 0th level of an unnamed\n # multiiindex\n key = f\"{prefix}level_{i}\"\n level = i\n\n level_values = axis_index.get_level_values(level)\n s = level_values.to_series()\n s.index = axis_index\n d[key] = s\n\n # put the index/columns itself in the dict\n if isinstance(axis_index, MultiIndex):\n dindex = axis_index\n else:\n dindex = axis_index.to_series()\n\n d[axis] = dindex\n return d\n\n def _get_index_resolvers(self) -> Dict[str, ABCSeries]:\n from pandas.core.computation.parsing import clean_column_name\n\n d: Dict[str, ABCSeries] = {}\n for axis_name in self._AXIS_ORDERS:\n d.update(self._get_axis_resolvers(axis_name))\n\n return {clean_column_name(k): v for k, v in d.items() if not isinstance(k, int)}\n\n def _get_cleaned_column_resolvers(self) -> Dict[str, ABCSeries]:\n \"\"\"\n Return the special character free column resolvers of a dataframe.\n\n Column names with special characters are 'cleaned up' so that they can\n be referred to by backtick quoting.\n Used in :meth:`DataFrame.eval`.\n \"\"\"\n from pandas.core.computation.parsing import clean_column_name\n\n if isinstance(self, ABCSeries):\n return {clean_column_name(self.name): self}\n\n return {\n clean_column_name(k): v for k, v in self.items() if not isinstance(k, int)\n }\n\n @property\n def _info_axis(self):\n return getattr(self, self._info_axis_name)\n\n @property\n def _stat_axis(self):\n return getattr(self, self._stat_axis_name)\n\n @property\n def shape(self) -> Tuple[int, ...]:\n \"\"\"\n Return a tuple of axis dimensions\n \"\"\"\n return tuple(len(self._get_axis(a)) for a in self._AXIS_ORDERS)\n\n @property\n def axes(self) -> List[Index]:\n \"\"\"\n Return index label(s) of the internal NDFrame\n \"\"\"\n # we do it this way because if we have reversed axes, then\n # the block manager shows then reversed\n return [self._get_axis(a) for a in self._AXIS_ORDERS]\n\n @property\n def ndim(self) -> int:\n \"\"\"\n Return an int representing the number of axes / array dimensions.\n\n Return 1 if Series. Otherwise return 2 if DataFrame.\n\n See Also\n --------\n ndarray.ndim : Number of array dimensions.\n\n Examples\n --------\n >>> s = pd.Series({'a': 1, 'b': 2, 'c': 3})\n >>> s.ndim\n 1\n\n >>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4]})\n >>> df.ndim\n 2\n \"\"\"\n return self._data.ndim\n\n @property\n def size(self) -> int:\n \"\"\"\n Return an int representing the number of elements in this object.\n\n Return the number of rows if Series. Otherwise return the number of\n rows times number of columns if DataFrame.\n\n See Also\n --------\n ndarray.size : Number of elements in the array.\n\n Examples\n --------\n >>> s = pd.Series({'a': 1, 'b': 2, 'c': 3})\n >>> s.size\n 3\n\n >>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4]})\n >>> df.size\n 4\n \"\"\"\n return np.prod(self.shape)\n\n @property\n def _selected_obj(self: FrameOrSeries) -> FrameOrSeries:\n \"\"\" internal compat with SelectionMixin \"\"\"\n return self\n\n @property\n def _obj_with_exclusions(self: FrameOrSeries) -> FrameOrSeries:\n \"\"\" internal compat with SelectionMixin \"\"\"\n return self\n\n def set_axis(self, labels, axis: Axis = 0, inplace: bool = False):\n \"\"\"\n Assign desired index to given axis.\n\n Indexes for%(extended_summary_sub)s row labels can be changed by assigning\n a list-like or Index.\n\n .. versionchanged:: 0.21.0\n\n The signature is now `labels` and `axis`, consistent with\n the rest of pandas API. Previously, the `axis` and `labels`\n arguments were respectively the first and second positional\n arguments.\n\n Parameters\n ----------\n labels : list-like, Index\n The values for the new index.\n\n axis : %(axes_single_arg)s, default 0\n The axis to update. The value 0 identifies the rows%(axis_description_sub)s.\n\n inplace : bool, default False\n Whether to return a new %(klass)s instance.\n\n Returns\n -------\n renamed : %(klass)s or None\n An object of type %(klass)s if inplace=False, None otherwise.\n\n See Also\n --------\n %(klass)s.rename_axis : Alter the name of the index%(see_also_sub)s.\n \"\"\"\n if inplace:\n setattr(self, self._get_axis_name(axis), labels)\n else:\n obj = self.copy()\n obj.set_axis(labels, axis=axis, inplace=True)\n return obj\n\n def _set_axis(self, axis: int, labels: Index) -> None:\n labels = ensure_index(labels)\n self._data.set_axis(axis, labels)\n self._clear_item_cache()\n\n def swapaxes(self: FrameOrSeries, axis1, axis2, copy=True) -> FrameOrSeries:\n \"\"\"\n Interchange axes and swap values axes appropriately.\n\n Returns\n -------\n y : same as input\n \"\"\"\n i = self._get_axis_number(axis1)\n j = self._get_axis_number(axis2)\n\n if i == j:\n if copy:\n return self.copy()\n return self\n\n mapping = {i: j, j: i}\n\n new_axes = (self._get_axis(mapping.get(k, k)) for k in range(self._AXIS_LEN))\n new_values = self.values.swapaxes(i, j)\n if copy:\n new_values = new_values.copy()\n\n return self._constructor(new_values, *new_axes).__finalize__(self)\n\n def droplevel(self: FrameOrSeries, level, axis=0) -> FrameOrSeries:\n \"\"\"\n Return DataFrame with requested index / column level(s) removed.\n\n .. versionadded:: 0.24.0\n\n Parameters\n ----------\n level : int, str, or list-like\n If a string is given, must be the name of a level\n If list-like, elements must be names or positional indexes\n of levels.\n\n axis : {0 or 'index', 1 or 'columns'}, default 0\n Axis along which the level(s) is removed:\n\n * 0 or 'index': remove level(s) in column.\n * 1 or 'columns': remove level(s) in row.\n\n Returns\n -------\n DataFrame\n DataFrame with requested index / column level(s) removed.\n\n Examples\n --------\n >>> df = pd.DataFrame([\n ... [1, 2, 3, 4],\n ... [5, 6, 7, 8],\n ... [9, 10, 11, 12]\n ... ]).set_index([0, 1]).rename_axis(['a', 'b'])\n\n >>> df.columns = pd.MultiIndex.from_tuples([\n ... ('c', 'e'), ('d', 'f')\n ... ], names=['level_1', 'level_2'])\n\n >>> df\n level_1 c d\n level_2 e f\n a b\n 1 2 3 4\n 5 6 7 8\n 9 10 11 12\n\n >>> df.droplevel('a')\n level_1 c d\n level_2 e f\n b\n 2 3 4\n 6 7 8\n 10 11 12\n\n >>> df.droplevel('level_2', axis=1)\n level_1 c d\n a b\n 1 2 3 4\n 5 6 7 8\n 9 10 11 12\n \"\"\"\n labels = self._get_axis(axis)\n new_labels = labels.droplevel(level)\n result = self.set_axis(new_labels, axis=axis, inplace=False)\n return result\n\n def pop(self: FrameOrSeries, item) -> FrameOrSeries:\n \"\"\"\n Return item and drop from frame. Raise KeyError if not found.\n\n Parameters\n ----------\n item : str\n Label of column to be popped.\n\n Returns\n -------\n Series\n\n Examples\n --------\n >>> df = pd.DataFrame([('falcon', 'bird', 389.0),\n ... ('parrot', 'bird', 24.0),\n ... ('lion', 'mammal', 80.5),\n ... ('monkey', 'mammal', np.nan)],\n ... columns=('name', 'class', 'max_speed'))\n >>> df\n name class max_speed\n 0 falcon bird 389.0\n 1 parrot bird 24.0\n 2 lion mammal 80.5\n 3 monkey mammal NaN\n\n >>> df.pop('class')\n 0 bird\n 1 bird\n 2 mammal\n 3 mammal\n Name: class, dtype: object\n\n >>> df\n name max_speed\n 0 falcon 389.0\n 1 parrot 24.0\n 2 lion 80.5\n 3 monkey NaN\n \"\"\"\n result = self[item]\n del self[item]\n try:\n result._reset_cacher()\n except AttributeError:\n pass\n\n return result\n\n def squeeze(self, axis=None):\n \"\"\"\n Squeeze 1 dimensional axis objects into scalars.\n\n Series or DataFrames with a single element are squeezed to a scalar.\n DataFrames with a single column or a single row are squeezed to a\n Series. Otherwise the object is unchanged.\n\n This method is most useful when you don't know if your\n object is a Series or DataFrame, but you do know it has just a single\n column. In that case you can safely call `squeeze` to ensure you have a\n Series.\n\n Parameters\n ----------\n axis : {0 or 'index', 1 or 'columns', None}, default None\n A specific axis to squeeze. By default, all length-1 axes are\n squeezed.\n\n Returns\n -------\n DataFrame, Series, or scalar\n The projection after squeezing `axis` or all the axes.\n\n See Also\n --------\n Series.iloc : Integer-location based indexing for selecting scalars.\n DataFrame.iloc : Integer-location based indexing for selecting Series.\n Series.to_frame : Inverse of DataFrame.squeeze for a\n single-column DataFrame.\n\n Examples\n --------\n >>> primes = pd.Series([2, 3, 5, 7])\n\n Slicing might produce a Series with a single value:\n\n >>> even_primes = primes[primes % 2 == 0]\n >>> even_primes\n 0 2\n dtype: int64\n\n >>> even_primes.squeeze()\n 2\n\n Squeezing objects with more than one value in every axis does nothing:\n\n >>> odd_primes = primes[primes % 2 == 1]\n >>> odd_primes\n 1 3\n 2 5\n 3 7\n dtype: int64\n\n >>> odd_primes.squeeze()\n 1 3\n 2 5\n 3 7\n dtype: int64\n\n Squeezing is even more effective when used with DataFrames.\n\n >>> df = pd.DataFrame([[1, 2], [3, 4]], columns=['a', 'b'])\n >>> df\n a b\n 0 1 2\n 1 3 4\n\n Slicing a single column will produce a DataFrame with the columns\n having only one value:\n\n >>> df_a = df[['a']]\n >>> df_a\n a\n 0 1\n 1 3\n\n So the columns can be squeezed down, resulting in a Series:\n\n >>> df_a.squeeze('columns')\n 0 1\n 1 3\n Name: a, dtype: int64\n\n Slicing a single row from a single column will produce a single\n scalar DataFrame:\n\n >>> df_0a = df.loc[df.index < 1, ['a']]\n >>> df_0a\n a\n 0 1\n\n Squeezing the rows produces a single scalar Series:\n\n >>> df_0a.squeeze('rows')\n a 1\n Name: 0, dtype: int64\n\n Squeezing all axes will project directly into a scalar:\n\n >>> df_0a.squeeze()\n 1\n \"\"\"\n axis = self._AXIS_NAMES if axis is None else (self._get_axis_number(axis),)\n return self.iloc[\n tuple(\n 0 if i in axis and len(a) == 1 else slice(None)\n for i, a in enumerate(self.axes)\n )\n ]\n\n # ----------------------------------------------------------------------\n # Rename\n\n def rename(\n self: FrameOrSeries,\n mapper: Optional[Renamer] = None,\n *,\n index: Optional[Renamer] = None,\n columns: Optional[Renamer] = None,\n axis: Optional[Axis] = None,\n copy: bool = True,\n inplace: bool = False,\n level: Optional[Level] = None,\n errors: str = \"ignore\",\n ) -> Optional[FrameOrSeries]:\n \"\"\"\n Alter axes input function or functions. Function / dict values must be\n unique (1-to-1). Labels not contained in a dict / Series will be left\n as-is. Extra labels listed don't throw an error. Alternatively, change\n ``Series.name`` with a scalar value (Series only).\n\n Parameters\n ----------\n %(axes)s : scalar, list-like, dict-like or function, optional\n Scalar or list-like will alter the ``Series.name`` attribute,\n and raise on DataFrame.\n dict-like or functions are transformations to apply to\n that axis' values\n copy : bool, default True\n Also copy underlying data.\n inplace : bool, default False\n Whether to return a new %(klass)s. If True then value of copy is\n ignored.\n level : int or level name, default None\n In case of a MultiIndex, only rename labels in the specified\n level.\n errors : {'ignore', 'raise'}, default 'ignore'\n If 'raise', raise a `KeyError` when a dict-like `mapper`, `index`,\n or `columns` contains labels that are not present in the Index\n being transformed.\n If 'ignore', existing keys will be renamed and extra keys will be\n ignored.\n\n Returns\n -------\n renamed : %(klass)s (new object)\n\n Raises\n ------\n KeyError\n If any of the labels is not found in the selected axis and\n \"errors='raise'\".\n\n See Also\n --------\n NDFrame.rename_axis\n\n Examples\n --------\n >>> s = pd.Series([1, 2, 3])\n >>> s\n 0 1\n 1 2\n 2 3\n dtype: int64\n >>> s.rename(\"my_name\") # scalar, changes Series.name\n 0 1\n 1 2\n 2 3\n Name: my_name, dtype: int64\n >>> s.rename(lambda x: x ** 2) # function, changes labels\n 0 1\n 1 2\n 4 3\n dtype: int64\n >>> s.rename({1: 3, 2: 5}) # mapping, changes labels\n 0 1\n 3 2\n 5 3\n dtype: int64\n\n Since ``DataFrame`` doesn't have a ``.name`` attribute,\n only mapping-type arguments are allowed.\n\n >>> df = pd.DataFrame({\"A\": [1, 2, 3], \"B\": [4, 5, 6]})\n >>> df.rename(2)\n Traceback (most recent call last):\n ...\n TypeError: 'int' object is not callable\n\n ``DataFrame.rename`` supports two calling conventions\n\n * ``(index=index_mapper, columns=columns_mapper, ...)``\n * ``(mapper, axis={'index', 'columns'}, ...)``\n\n We *highly* recommend using keyword arguments to clarify your\n intent.\n\n >>> df.rename(index=str, columns={\"A\": \"a\", \"B\": \"c\"})\n a c\n 0 1 4\n 1 2 5\n 2 3 6\n\n >>> df.rename(index=str, columns={\"A\": \"a\", \"C\": \"c\"})\n a B\n 0 1 4\n 1 2 5\n 2 3 6\n\n Using axis-style parameters\n\n >>> df.rename(str.lower, axis='columns')\n a b\n 0 1 4\n 1 2 5\n 2 3 6\n\n >>> df.rename({1: 2, 2: 4}, axis='index')\n A B\n 0 1 4\n 2 2 5\n 4 3 6\n\n See the :ref:`user guide <basics.rename>` for more.\n \"\"\"\n if mapper is None and index is None and columns is None:\n raise TypeError(\"must pass an index to rename\")\n\n if index is not None or columns is not None:\n if axis is not None:\n raise TypeError(\n \"Cannot specify both 'axis' and any of 'index' or 'columns'\"\n )\n elif mapper is not None:\n raise TypeError(\n \"Cannot specify both 'mapper' and any of 'index' or 'columns'\"\n )\n else:\n # use the mapper argument\n if axis and self._get_axis_number(axis) == 1:\n columns = mapper\n else:\n index = mapper\n\n result = self if inplace else self.copy(deep=copy)\n\n for axis_no, replacements in enumerate((index, columns)):\n if replacements is None:\n continue\n\n ax = self._get_axis(axis_no)\n baxis = self._get_block_manager_axis(axis_no)\n f = com.get_rename_function(replacements)\n\n if level is not None:\n level = ax._get_level_number(level)\n\n # GH 13473\n if not callable(replacements):\n indexer = ax.get_indexer_for(replacements)\n if errors == \"raise\" and len(indexer[indexer == -1]):\n missing_labels = [\n label\n for index, label in enumerate(replacements)\n if indexer[index] == -1\n ]\n raise KeyError(f\"{missing_labels} not found in axis\")\n\n result._data = result._data.rename_axis(\n f, axis=baxis, copy=copy, level=level\n )\n result._clear_item_cache()\n\n if inplace:\n self._update_inplace(result._data)\n return None\n else:\n return result.__finalize__(self)\n\n @rewrite_axis_style_signature(\"mapper\", [(\"copy\", True), (\"inplace\", False)])\n def rename_axis(self, mapper=lib.no_default, **kwargs):\n \"\"\"\n Set the name of the axis for the index or columns.\n\n Parameters\n ----------\n mapper : scalar, list-like, optional\n Value to set the axis name attribute.\n index, columns : scalar, list-like, dict-like or function, optional\n A scalar, list-like, dict-like or functions transformations to\n apply to that axis' values.\n\n Use either ``mapper`` and ``axis`` to\n specify the axis to target with ``mapper``, or ``index``\n and/or ``columns``.\n\n .. versionchanged:: 0.24.0\n\n axis : {0 or 'index', 1 or 'columns'}, default 0\n The axis to rename.\n copy : bool, default True\n Also copy underlying data.\n inplace : bool, default False\n Modifies the object directly, instead of creating a new Series\n or DataFrame.\n\n Returns\n -------\n Series, DataFrame, or None\n The same type as the caller or None if `inplace` is True.\n\n See Also\n --------\n Series.rename : Alter Series index labels or name.\n DataFrame.rename : Alter DataFrame index labels or name.\n Index.rename : Set new names on index.\n\n Notes\n -----\n ``DataFrame.rename_axis`` supports two calling conventions\n\n * ``(index=index_mapper, columns=columns_mapper, ...)``\n * ``(mapper, axis={'index', 'columns'}, ...)``\n\n The first calling convention will only modify the names of\n the index and/or the names of the Index object that is the columns.\n In this case, the parameter ``copy`` is ignored.\n\n The second calling convention will modify the names of the\n the corresponding index if mapper is a list or a scalar.\n However, if mapper is dict-like or a function, it will use the\n deprecated behavior of modifying the axis *labels*.\n\n We *highly* recommend using keyword arguments to clarify your\n intent.\n\n Examples\n --------\n **Series**\n\n >>> s = pd.Series([\"dog\", \"cat\", \"monkey\"])\n >>> s\n 0 dog\n 1 cat\n 2 monkey\n dtype: object\n >>> s.rename_axis(\"animal\")\n animal\n 0 dog\n 1 cat\n 2 monkey\n dtype: object\n\n **DataFrame**\n\n >>> df = pd.DataFrame({\"num_legs\": [4, 4, 2],\n ... \"num_arms\": [0, 0, 2]},\n ... [\"dog\", \"cat\", \"monkey\"])\n >>> df\n num_legs num_arms\n dog 4 0\n cat 4 0\n monkey 2 2\n >>> df = df.rename_axis(\"animal\")\n >>> df\n num_legs num_arms\n animal\n dog 4 0\n cat 4 0\n monkey 2 2\n >>> df = df.rename_axis(\"limbs\", axis=\"columns\")\n >>> df\n limbs num_legs num_arms\n animal\n dog 4 0\n cat 4 0\n monkey 2 2\n\n **MultiIndex**\n\n >>> df.index = pd.MultiIndex.from_product([['mammal'],\n ... ['dog', 'cat', 'monkey']],\n ... names=['type', 'name'])\n >>> df\n limbs num_legs num_arms\n type name\n mammal dog 4 0\n cat 4 0\n monkey 2 2\n\n >>> df.rename_axis(index={'type': 'class'})\n limbs num_legs num_arms\n class name\n mammal dog 4 0\n cat 4 0\n monkey 2 2\n\n >>> df.rename_axis(columns=str.upper)\n LIMBS num_legs num_arms\n type name\n mammal dog 4 0\n cat 4 0\n monkey 2 2\n \"\"\"\n axes, kwargs = self._construct_axes_from_arguments(\n (), kwargs, sentinel=lib.no_default\n )\n copy = kwargs.pop(\"copy\", True)\n inplace = kwargs.pop(\"inplace\", False)\n axis = kwargs.pop(\"axis\", 0)\n if axis is not None:\n axis = self._get_axis_number(axis)\n\n if kwargs:\n raise TypeError(\n \"rename_axis() got an unexpected keyword \"\n f'argument \"{list(kwargs.keys())[0]}\"'\n )\n\n inplace = validate_bool_kwarg(inplace, \"inplace\")\n\n if mapper is not lib.no_default:\n # Use v0.23 behavior if a scalar or list\n non_mapper = is_scalar(mapper) or (\n is_list_like(mapper) and not is_dict_like(mapper)\n )\n if non_mapper:\n return self._set_axis_name(mapper, axis=axis, inplace=inplace)\n else:\n raise ValueError(\"Use `.rename` to alter labels with a mapper.\")\n else:\n # Use new behavior. Means that index and/or columns\n # is specified\n result = self if inplace else self.copy(deep=copy)\n\n for axis in range(self._AXIS_LEN):\n v = axes.get(self._AXIS_NAMES[axis])\n if v is lib.no_default:\n continue\n non_mapper = is_scalar(v) or (is_list_like(v) and not is_dict_like(v))\n if non_mapper:\n newnames = v\n else:\n f = com.get_rename_function(v)\n curnames = self._get_axis(axis).names\n newnames = [f(name) for name in curnames]\n result._set_axis_name(newnames, axis=axis, inplace=True)\n if not inplace:\n return result\n\n def _set_axis_name(self, name, axis=0, inplace=False):\n \"\"\"\n Set the name(s) of the axis.\n\n Parameters\n ----------\n name : str or list of str\n Name(s) to set.\n axis : {0 or 'index', 1 or 'columns'}, default 0\n The axis to set the label. The value 0 or 'index' specifies index,\n and the value 1 or 'columns' specifies columns.\n inplace : bool, default False\n If `True`, do operation inplace and return None.\n\n .. versionadded:: 0.21.0\n\n Returns\n -------\n Series, DataFrame, or None\n The same type as the caller or `None` if `inplace` is `True`.\n\n See Also\n --------\n DataFrame.rename : Alter the axis labels of :class:`DataFrame`.\n Series.rename : Alter the index labels or set the index name\n of :class:`Series`.\n Index.rename : Set the name of :class:`Index` or :class:`MultiIndex`.\n\n Examples\n --------\n >>> df = pd.DataFrame({\"num_legs\": [4, 4, 2]},\n ... [\"dog\", \"cat\", \"monkey\"])\n >>> df\n num_legs\n dog 4\n cat 4\n monkey 2\n >>> df._set_axis_name(\"animal\")\n num_legs\n animal\n dog 4\n cat 4\n monkey 2\n >>> df.index = pd.MultiIndex.from_product(\n ... [[\"mammal\"], ['dog', 'cat', 'monkey']])\n >>> df._set_axis_name([\"type\", \"name\"])\n legs\n type name\n mammal dog 4\n cat 4\n monkey 2\n \"\"\"\n axis = self._get_axis_number(axis)\n idx = self._get_axis(axis).set_names(name)\n\n inplace = validate_bool_kwarg(inplace, \"inplace\")\n renamed = self if inplace else self.copy()\n renamed.set_axis(idx, axis=axis, inplace=True)\n if not inplace:\n return renamed\n\n # ----------------------------------------------------------------------\n # Comparison Methods\n\n def _indexed_same(self, other) -> bool:\n return all(\n self._get_axis(a).equals(other._get_axis(a)) for a in self._AXIS_ORDERS\n )\n\n def equals(self, other):\n \"\"\"\n Test whether two objects contain the same elements.\n\n This function allows two Series or DataFrames to be compared against\n each other to see if they have the same shape and elements. NaNs in\n the same location are considered equal. The column headers do not\n need to have the same type, but the elements within the columns must\n be the same dtype.\n\n Parameters\n ----------\n other : Series or DataFrame\n The other Series or DataFrame to be compared with the first.\n\n Returns\n -------\n bool\n True if all elements are the same in both objects, False\n otherwise.\n\n See Also\n --------\n Series.eq : Compare two Series objects of the same length\n and return a Series where each element is True if the element\n in each Series is equal, False otherwise.\n DataFrame.eq : Compare two DataFrame objects of the same shape and\n return a DataFrame where each element is True if the respective\n element in each DataFrame is equal, False otherwise.\n testing.assert_series_equal : Raises an AssertionError if left and\n right are not equal. Provides an easy interface to ignore\n inequality in dtypes, indexes and precision among others.\n testing.assert_frame_equal : Like assert_series_equal, but targets\n DataFrames.\n numpy.array_equal : Return True if two arrays have the same shape\n and elements, False otherwise.\n\n Notes\n -----\n This function requires that the elements have the same dtype as their\n respective elements in the other Series or DataFrame. However, the\n column labels do not need to have the same type, as long as they are\n still considered equal.\n\n Examples\n --------\n >>> df = pd.DataFrame({1: [10], 2: [20]})\n >>> df\n 1 2\n 0 10 20\n\n DataFrames df and exactly_equal have the same types and values for\n their elements and column labels, which will return True.\n\n >>> exactly_equal = pd.DataFrame({1: [10], 2: [20]})\n >>> exactly_equal\n 1 2\n 0 10 20\n >>> df.equals(exactly_equal)\n True\n\n DataFrames df and different_column_type have the same element\n types and values, but have different types for the column labels,\n which will still return True.\n\n >>> different_column_type = pd.DataFrame({1.0: [10], 2.0: [20]})\n >>> different_column_type\n 1.0 2.0\n 0 10 20\n >>> df.equals(different_column_type)\n True\n\n DataFrames df and different_data_type have different types for the\n same values for their elements, and will return False even though\n their column labels are the same values and types.\n\n >>> different_data_type = pd.DataFrame({1: [10.0], 2: [20.0]})\n >>> different_data_type\n 1 2\n 0 10.0 20.0\n >>> df.equals(different_data_type)\n False\n \"\"\"\n if not isinstance(other, self._constructor):\n return False\n return self._data.equals(other._data)\n\n # -------------------------------------------------------------------------\n # Unary Methods\n\n def __neg__(self):\n values = self._values\n if is_bool_dtype(values):\n arr = operator.inv(values)\n elif (\n is_numeric_dtype(values)\n or is_timedelta64_dtype(values)\n or is_object_dtype(values)\n ):\n arr = operator.neg(values)\n else:\n raise TypeError(f\"Unary negative expects numeric dtype, not {values.dtype}\")\n return self.__array_wrap__(arr)\n\n def __pos__(self):\n values = self._values\n if is_bool_dtype(values):\n arr = values\n elif (\n is_numeric_dtype(values)\n or is_timedelta64_dtype(values)\n or is_object_dtype(values)\n ):\n arr = operator.pos(values)\n else:\n raise TypeError(f\"Unary plus expects numeric dtype, not {values.dtype}\")\n return self.__array_wrap__(arr)\n\n def __invert__(self):\n if not self.size:\n # inv fails with 0 len\n return self\n\n new_data = self._data.apply(operator.invert)\n result = self._constructor(new_data).__finalize__(self)\n return result\n\n def __nonzero__(self):\n raise ValueError(\n f\"The truth value of a {type(self).__name__} is ambiguous. \"\n \"Use a.empty, a.bool(), a.item(), a.any() or a.all().\"\n )\n\n __bool__ = __nonzero__\n\n def bool(self):\n \"\"\"\n Return the bool of a single element PandasObject.\n\n This must be a boolean scalar value, either True or False. Raise a\n ValueError if the PandasObject does not have exactly 1 element, or that\n element is not boolean\n\n Returns\n -------\n bool\n Same single boolean value converted to bool type.\n \"\"\"\n v = self.squeeze()\n if isinstance(v, (bool, np.bool_)):\n return bool(v)\n elif is_scalar(v):\n raise ValueError(\n \"bool cannot act on a non-boolean single element \"\n f\"{type(self).__name__}\"\n )\n\n self.__nonzero__()\n\n def __abs__(self: FrameOrSeries) -> FrameOrSeries:\n return self.abs()\n\n def __round__(self: FrameOrSeries, decimals: int = 0) -> FrameOrSeries:\n return self.round(decimals)\n\n # -------------------------------------------------------------------------\n # Label or Level Combination Helpers\n #\n # A collection of helper methods for DataFrame/Series operations that\n # accept a combination of column/index labels and levels. All such\n # operations should utilize/extend these methods when possible so that we\n # have consistent precedence and validation logic throughout the library.\n\n def _is_level_reference(self, key, axis=0):\n \"\"\"\n Test whether a key is a level reference for a given axis.\n\n To be considered a level reference, `key` must be a string that:\n - (axis=0): Matches the name of an index level and does NOT match\n a column label.\n - (axis=1): Matches the name of a column level and does NOT match\n an index label.\n\n Parameters\n ----------\n key : str\n Potential level name for the given axis\n axis : int, default 0\n Axis that levels are associated with (0 for index, 1 for columns)\n\n Returns\n -------\n is_level : bool\n \"\"\"\n axis = self._get_axis_number(axis)\n\n return (\n key is not None\n and is_hashable(key)\n and key in self.axes[axis].names\n and not self._is_label_reference(key, axis=axis)\n )\n\n def _is_label_reference(self, key, axis=0) -> bool_t:\n \"\"\"\n Test whether a key is a label reference for a given axis.\n\n To be considered a label reference, `key` must be a string that:\n - (axis=0): Matches a column label\n - (axis=1): Matches an index label\n\n Parameters\n ----------\n key: str\n Potential label name\n axis: int, default 0\n Axis perpendicular to the axis that labels are associated with\n (0 means search for column labels, 1 means search for index labels)\n\n Returns\n -------\n is_label: bool\n \"\"\"\n axis = self._get_axis_number(axis)\n other_axes = (ax for ax in range(self._AXIS_LEN) if ax != axis)\n\n return (\n key is not None\n and is_hashable(key)\n and any(key in self.axes[ax] for ax in other_axes)\n )\n\n def _is_label_or_level_reference(self, key: str, axis: int = 0) -> bool_t:\n \"\"\"\n Test whether a key is a label or level reference for a given axis.\n\n To be considered either a label or a level reference, `key` must be a\n string that:\n - (axis=0): Matches a column label or an index level\n - (axis=1): Matches an index label or a column level\n\n Parameters\n ----------\n key: str\n Potential label or level name\n axis: int, default 0\n Axis that levels are associated with (0 for index, 1 for columns)\n\n Returns\n -------\n is_label_or_level: bool\n \"\"\"\n return self._is_level_reference(key, axis=axis) or self._is_label_reference(\n key, axis=axis\n )\n\n def _check_label_or_level_ambiguity(self, key, axis: int = 0) -> None:\n \"\"\"\n Check whether `key` is ambiguous.\n\n By ambiguous, we mean that it matches both a level of the input\n `axis` and a label of the other axis.\n\n Parameters\n ----------\n key: str or object\n Label or level name.\n axis: int, default 0\n Axis that levels are associated with (0 for index, 1 for columns).\n\n Raises\n ------\n ValueError: `key` is ambiguous\n \"\"\"\n axis = self._get_axis_number(axis)\n other_axes = (ax for ax in range(self._AXIS_LEN) if ax != axis)\n\n if (\n key is not None\n and is_hashable(key)\n and key in self.axes[axis].names\n and any(key in self.axes[ax] for ax in other_axes)\n ):\n\n # Build an informative and grammatical warning\n level_article, level_type = (\n (\"an\", \"index\") if axis == 0 else (\"a\", \"column\")\n )\n\n label_article, label_type = (\n (\"a\", \"column\") if axis == 0 else (\"an\", \"index\")\n )\n\n msg = (\n f\"'{key}' is both {level_article} {level_type} level and \"\n f\"{label_article} {label_type} label, which is ambiguous.\"\n )\n raise ValueError(msg)\n\n def _get_label_or_level_values(self, key: str, axis: int = 0) -> np.ndarray:\n \"\"\"\n Return a 1-D array of values associated with `key`, a label or level\n from the given `axis`.\n\n Retrieval logic:\n - (axis=0): Return column values if `key` matches a column label.\n Otherwise return index level values if `key` matches an index\n level.\n - (axis=1): Return row values if `key` matches an index label.\n Otherwise return column level values if 'key' matches a column\n level\n\n Parameters\n ----------\n key: str\n Label or level name.\n axis: int, default 0\n Axis that levels are associated with (0 for index, 1 for columns)\n\n Returns\n -------\n values: np.ndarray\n\n Raises\n ------\n KeyError\n if `key` matches neither a label nor a level\n ValueError\n if `key` matches multiple labels\n FutureWarning\n if `key` is ambiguous. This will become an ambiguity error in a\n future version\n \"\"\"\n axis = self._get_axis_number(axis)\n other_axes = [ax for ax in range(self._AXIS_LEN) if ax != axis]\n\n if self._is_label_reference(key, axis=axis):\n self._check_label_or_level_ambiguity(key, axis=axis)\n values = self.xs(key, axis=other_axes[0])._values\n elif self._is_level_reference(key, axis=axis):\n values = self.axes[axis].get_level_values(key)._values\n else:\n raise KeyError(key)\n\n # Check for duplicates\n if values.ndim > 1:\n\n if other_axes and isinstance(self._get_axis(other_axes[0]), MultiIndex):\n multi_message = (\n \"\\n\"\n \"For a multi-index, the label must be a \"\n \"tuple with elements corresponding to each level.\"\n )\n else:\n multi_message = \"\"\n\n label_axis_name = \"column\" if axis == 0 else \"index\"\n raise ValueError(\n (\n f\"The {label_axis_name} label '{key}' \"\n f\"is not unique.{multi_message}\"\n )\n )\n\n return values\n\n def _drop_labels_or_levels(self, keys, axis: int = 0):\n \"\"\"\n Drop labels and/or levels for the given `axis`.\n\n For each key in `keys`:\n - (axis=0): If key matches a column label then drop the column.\n Otherwise if key matches an index level then drop the level.\n - (axis=1): If key matches an index label then drop the row.\n Otherwise if key matches a column level then drop the level.\n\n Parameters\n ----------\n keys: str or list of str\n labels or levels to drop\n axis: int, default 0\n Axis that levels are associated with (0 for index, 1 for columns)\n\n Returns\n -------\n dropped: DataFrame\n\n Raises\n ------\n ValueError\n if any `keys` match neither a label nor a level\n \"\"\"\n axis = self._get_axis_number(axis)\n\n # Validate keys\n keys = com.maybe_make_list(keys)\n invalid_keys = [\n k for k in keys if not self._is_label_or_level_reference(k, axis=axis)\n ]\n\n if invalid_keys:\n raise ValueError(\n (\n \"The following keys are not valid labels or \"\n f\"levels for axis {axis}: {invalid_keys}\"\n )\n )\n\n # Compute levels and labels to drop\n levels_to_drop = [k for k in keys if self._is_level_reference(k, axis=axis)]\n\n labels_to_drop = [k for k in keys if not self._is_level_reference(k, axis=axis)]\n\n # Perform copy upfront and then use inplace operations below.\n # This ensures that we always perform exactly one copy.\n # ``copy`` and/or ``inplace`` options could be added in the future.\n dropped = self.copy()\n\n if axis == 0:\n # Handle dropping index levels\n if levels_to_drop:\n dropped.reset_index(levels_to_drop, drop=True, inplace=True)\n\n # Handle dropping columns labels\n if labels_to_drop:\n dropped.drop(labels_to_drop, axis=1, inplace=True)\n else:\n # Handle dropping column levels\n if levels_to_drop:\n if isinstance(dropped.columns, MultiIndex):\n # Drop the specified levels from the MultiIndex\n dropped.columns = dropped.columns.droplevel(levels_to_drop)\n else:\n # Drop the last level of Index by replacing with\n # a RangeIndex\n dropped.columns = RangeIndex(dropped.columns.size)\n\n # Handle dropping index labels\n if labels_to_drop:\n dropped.drop(labels_to_drop, axis=0, inplace=True)\n\n return dropped\n\n # ----------------------------------------------------------------------\n # Iteration\n\n def __hash__(self):\n raise TypeError(\n f\"{repr(type(self).__name__)} objects are mutable, \"\n f\"thus they cannot be hashed\"\n )\n\n def __iter__(self):\n \"\"\"\n Iterate over info axis.\n\n Returns\n -------\n iterator\n Info axis as iterator.\n \"\"\"\n return iter(self._info_axis)\n\n # can we get a better explanation of this?\n def keys(self):\n \"\"\"\n Get the 'info axis' (see Indexing for more).\n\n This is index for Series, columns for DataFrame.\n\n Returns\n -------\n Index\n Info axis.\n \"\"\"\n return self._info_axis\n\n def items(self):\n \"\"\"\n Iterate over (label, values) on info axis\n\n This is index for Series and columns for DataFrame.\n\n Returns\n -------\n Generator\n \"\"\"\n for h in self._info_axis:\n yield h, self[h]\n\n @Appender(items.__doc__)\n def iteritems(self):\n return self.items()\n\n def __len__(self) -> int:\n \"\"\"Returns length of info axis\"\"\"\n return len(self._info_axis)\n\n def __contains__(self, key) -> bool_t:\n \"\"\"True if the key is in the info axis\"\"\"\n return key in self._info_axis\n\n @property\n def empty(self) -> bool_t:\n \"\"\"\n Indicator whether DataFrame is empty.\n\n True if DataFrame is entirely empty (no items), meaning any of the\n axes are of length 0.\n\n Returns\n -------\n bool\n If DataFrame is empty, return True, if not return False.\n\n See Also\n --------\n Series.dropna : Return series without null values.\n DataFrame.dropna : Return DataFrame with labels on given axis omitted\n where (all or any) data are missing.\n\n Notes\n -----\n If DataFrame contains only NaNs, it is still not considered empty. See\n the example below.\n\n Examples\n --------\n An example of an actual empty DataFrame. Notice the index is empty:\n\n >>> df_empty = pd.DataFrame({'A' : []})\n >>> df_empty\n Empty DataFrame\n Columns: [A]\n Index: []\n >>> df_empty.empty\n True\n\n If we only have NaNs in our DataFrame, it is not considered empty! We\n will need to drop the NaNs to make the DataFrame empty:\n\n >>> df = pd.DataFrame({'A' : [np.nan]})\n >>> df\n A\n 0 NaN\n >>> df.empty\n False\n >>> df.dropna().empty\n True\n \"\"\"\n return any(len(self._get_axis(a)) == 0 for a in self._AXIS_ORDERS)\n\n # ----------------------------------------------------------------------\n # Array Interface\n\n # This is also set in IndexOpsMixin\n # GH#23114 Ensure ndarray.__op__(DataFrame) returns NotImplemented\n __array_priority__ = 1000\n\n def __array__(self, dtype=None) -> np.ndarray:\n return np.asarray(self._values, dtype=dtype)\n\n def __array_wrap__(self, result, context=None):\n result = lib.item_from_zerodim(result)\n if is_scalar(result):\n # e.g. we get here with np.ptp(series)\n # ptp also requires the item_from_zerodim\n return result\n d = self._construct_axes_dict(self._AXIS_ORDERS, copy=False)\n return self._constructor(result, **d).__finalize__(self)\n\n # ideally we would define this to avoid the getattr checks, but\n # is slower\n # @property\n # def __array_interface__(self):\n # \"\"\" provide numpy array interface method \"\"\"\n # values = self.values\n # return dict(typestr=values.dtype.str,shape=values.shape,data=values)\n\n # ----------------------------------------------------------------------\n # Picklability\n\n def __getstate__(self) -> Dict[str, Any]:\n meta = {k: getattr(self, k, None) for k in self._metadata}\n return dict(\n _data=self._data,\n _typ=self._typ,\n _metadata=self._metadata,\n attrs=self.attrs,\n **meta,\n )\n\n def __setstate__(self, state):\n\n if isinstance(state, BlockManager):\n self._data = state\n elif isinstance(state, dict):\n typ = state.get(\"_typ\")\n if typ is not None:\n attrs = state.get(\"_attrs\", {})\n object.__setattr__(self, \"_attrs\", attrs)\n\n # set in the order of internal names\n # to avoid definitional recursion\n # e.g. say fill_value needing _data to be\n # defined\n meta = set(self._internal_names + self._metadata)\n for k in list(meta):\n if k in state:\n v = state[k]\n object.__setattr__(self, k, v)\n\n for k, v in state.items():\n if k not in meta:\n object.__setattr__(self, k, v)\n\n else:\n raise NotImplementedError(\"Pre-0.12 pickles are no longer supported\")\n elif len(state) == 2:\n raise NotImplementedError(\"Pre-0.12 pickles are no longer supported\")\n\n self._item_cache = {}\n\n # ----------------------------------------------------------------------\n # Rendering Methods\n\n def __repr__(self) -> str:\n # string representation based upon iterating over self\n # (since, by definition, `PandasContainers` are iterable)\n prepr = f\"[{','.join(map(pprint_thing, self))}]\"\n return f\"{type(self).__name__}({prepr})\"\n\n def _repr_latex_(self):\n \"\"\"\n Returns a LaTeX representation for a particular object.\n Mainly for use with nbconvert (jupyter notebook conversion to pdf).\n \"\"\"\n if config.get_option(\"display.latex.repr\"):\n return self.to_latex()\n else:\n return None\n\n def _repr_data_resource_(self):\n \"\"\"\n Not a real Jupyter special repr method, but we use the same\n naming convention.\n \"\"\"\n if config.get_option(\"display.html.table_schema\"):\n data = self.head(config.get_option(\"display.max_rows\"))\n payload = json.loads(\n data.to_json(orient=\"table\"), object_pairs_hook=collections.OrderedDict\n )\n return payload\n\n # ----------------------------------------------------------------------\n # I/O Methods\n\n _shared_docs[\n \"to_markdown\"\n ] = \"\"\"\n Print %(klass)s in Markdown-friendly format.\n\n .. versionadded:: 1.0.0\n\n Parameters\n ----------\n buf : str, Path or StringIO-like, optional, default None\n Buffer to write to. If None, the output is returned as a string.\n mode : str, optional\n Mode in which file is opened.\n **kwargs\n These parameters will be passed to `tabulate`.\n\n Returns\n -------\n str\n %(klass)s in Markdown-friendly format.\n \"\"\"\n\n _shared_docs[\n \"to_excel\"\n ] = \"\"\"\n Write %(klass)s to an Excel sheet.\n\n To write a single %(klass)s to an Excel .xlsx file it is only necessary to\n specify a target file name. To write to multiple sheets it is necessary to\n create an `ExcelWriter` object with a target file name, and specify a sheet\n in the file to write to.\n\n Multiple sheets may be written to by specifying unique `sheet_name`.\n With all data written to the file it is necessary to save the changes.\n Note that creating an `ExcelWriter` object with a file name that already\n exists will result in the contents of the existing file being erased.\n\n Parameters\n ----------\n excel_writer : str or ExcelWriter object\n File path or existing ExcelWriter.\n sheet_name : str, default 'Sheet1'\n Name of sheet which will contain DataFrame.\n na_rep : str, default ''\n Missing data representation.\n float_format : str, optional\n Format string for floating point numbers. For example\n ``float_format=\"%%.2f\"`` will format 0.1234 to 0.12.\n columns : sequence or list of str, optional\n Columns to write.\n header : bool or list of str, default True\n Write out the column names. If a list of string is given it is\n assumed to be aliases for the column names.\n index : bool, default True\n Write row names (index).\n index_label : str or sequence, optional\n Column label for index column(s) if desired. If not specified, and\n `header` and `index` are True, then the index names are used. A\n sequence should be given if the DataFrame uses MultiIndex.\n startrow : int, default 0\n Upper left cell row to dump data frame.\n startcol : int, default 0\n Upper left cell column to dump data frame.\n engine : str, optional\n Write engine to use, 'openpyxl' or 'xlsxwriter'. You can also set this\n via the options ``io.excel.xlsx.writer``, ``io.excel.xls.writer``, and\n ``io.excel.xlsm.writer``.\n merge_cells : bool, default True\n Write MultiIndex and Hierarchical Rows as merged cells.\n encoding : str, optional\n Encoding of the resulting excel file. Only necessary for xlwt,\n other writers support unicode natively.\n inf_rep : str, default 'inf'\n Representation for infinity (there is no native representation for\n infinity in Excel).\n verbose : bool, default True\n Display more information in the error logs.\n freeze_panes : tuple of int (length 2), optional\n Specifies the one-based bottommost row and rightmost column that\n is to be frozen.\n\n See Also\n --------\n to_csv : Write DataFrame to a comma-separated values (csv) file.\n ExcelWriter : Class for writing DataFrame objects into excel sheets.\n read_excel : Read an Excel file into a pandas DataFrame.\n read_csv : Read a comma-separated values (csv) file into DataFrame.\n\n Notes\n -----\n For compatibility with :meth:`~DataFrame.to_csv`,\n to_excel serializes lists and dicts to strings before writing.\n\n Once a workbook has been saved it is not possible write further data\n without rewriting the whole workbook.\n\n Examples\n --------\n\n Create, write to and save a workbook:\n\n >>> df1 = pd.DataFrame([['a', 'b'], ['c', 'd']],\n ... index=['row 1', 'row 2'],\n ... columns=['col 1', 'col 2'])\n >>> df1.to_excel(\"output.xlsx\") # doctest: +SKIP\n\n To specify the sheet name:\n\n >>> df1.to_excel(\"output.xlsx\",\n ... sheet_name='Sheet_name_1') # doctest: +SKIP\n\n If you wish to write to more than one sheet in the workbook, it is\n necessary to specify an ExcelWriter object:\n\n >>> df2 = df1.copy()\n >>> with pd.ExcelWriter('output.xlsx') as writer: # doctest: +SKIP\n ... df1.to_excel(writer, sheet_name='Sheet_name_1')\n ... df2.to_excel(writer, sheet_name='Sheet_name_2')\n\n ExcelWriter can also be used to append to an existing Excel file:\n\n >>> with pd.ExcelWriter('output.xlsx',\n ... mode='a') as writer: # doctest: +SKIP\n ... df.to_excel(writer, sheet_name='Sheet_name_3')\n\n To set the library that is used to write the Excel file,\n you can pass the `engine` keyword (the default engine is\n automatically chosen depending on the file extension):\n\n >>> df1.to_excel('output1.xlsx', engine='xlsxwriter') # doctest: +SKIP\n \"\"\"\n\n @Appender(_shared_docs[\"to_excel\"] % dict(klass=\"object\"))\n def to_excel(\n self,\n excel_writer,\n sheet_name=\"Sheet1\",\n na_rep=\"\",\n float_format=None,\n columns=None,\n header=True,\n index=True,\n index_label=None,\n startrow=0,\n startcol=0,\n engine=None,\n merge_cells=True,\n encoding=None,\n inf_rep=\"inf\",\n verbose=True,\n freeze_panes=None,\n ) -> None:\n df = self if isinstance(self, ABCDataFrame) else self.to_frame()\n\n from pandas.io.formats.excel import ExcelFormatter\n\n formatter = ExcelFormatter(\n df,\n na_rep=na_rep,\n cols=columns,\n header=header,\n float_format=float_format,\n index=index,\n index_label=index_label,\n merge_cells=merge_cells,\n inf_rep=inf_rep,\n )\n formatter.write(\n excel_writer,\n sheet_name=sheet_name,\n startrow=startrow,\n startcol=startcol,\n freeze_panes=freeze_panes,\n engine=engine,\n )\n\n def to_json(\n self,\n path_or_buf: Optional[FilePathOrBuffer] = None,\n orient: Optional[str] = None,\n date_format: Optional[str] = None,\n double_precision: int = 10,\n force_ascii: bool_t = True,\n date_unit: str = \"ms\",\n default_handler: Optional[Callable[[Any], JSONSerializable]] = None,\n lines: bool_t = False,\n compression: Optional[str] = \"infer\",\n index: bool_t = True,\n indent: Optional[int] = None,\n ) -> Optional[str]:\n \"\"\"\n Convert the object to a JSON string.\n\n Note NaN's and None will be converted to null and datetime objects\n will be converted to UNIX timestamps.\n\n Parameters\n ----------\n path_or_buf : str or file handle, optional\n File path or object. If not specified, the result is returned as\n a string.\n orient : str\n Indication of expected JSON string format.\n\n * Series:\n\n - default is 'index'\n - allowed values are: {'split','records','index','table'}.\n\n * DataFrame:\n\n - default is 'columns'\n - allowed values are: {'split', 'records', 'index', 'columns',\n 'values', 'table'}.\n\n * The format of the JSON string:\n\n - 'split' : dict like {'index' -> [index], 'columns' -> [columns],\n 'data' -> [values]}\n - 'records' : list like [{column -> value}, ... , {column -> value}]\n - 'index' : dict like {index -> {column -> value}}\n - 'columns' : dict like {column -> {index -> value}}\n - 'values' : just the values array\n - 'table' : dict like {'schema': {schema}, 'data': {data}}\n\n Describing the data, where data component is like ``orient='records'``.\n\n .. versionchanged:: 0.20.0\n\n date_format : {None, 'epoch', 'iso'}\n Type of date conversion. 'epoch' = epoch milliseconds,\n 'iso' = ISO8601. The default depends on the `orient`. For\n ``orient='table'``, the default is 'iso'. For all other orients,\n the default is 'epoch'.\n double_precision : int, default 10\n The number of decimal places to use when encoding\n floating point values.\n force_ascii : bool, default True\n Force encoded string to be ASCII.\n date_unit : str, default 'ms' (milliseconds)\n The time unit to encode to, governs timestamp and ISO8601\n precision. One of 's', 'ms', 'us', 'ns' for second, millisecond,\n microsecond, and nanosecond respectively.\n default_handler : callable, default None\n Handler to call if object cannot otherwise be converted to a\n suitable format for JSON. Should receive a single argument which is\n the object to convert and return a serialisable object.\n lines : bool, default False\n If 'orient' is 'records' write out line delimited json format. Will\n throw ValueError if incorrect 'orient' since others are not list\n like.\n\n compression : {'infer', 'gzip', 'bz2', 'zip', 'xz', None}\n\n A string representing the compression to use in the output file,\n only used when the first argument is a filename. By default, the\n compression is inferred from the filename.\n\n .. versionadded:: 0.21.0\n .. versionchanged:: 0.24.0\n 'infer' option added and set to default\n index : bool, default True\n Whether to include the index values in the JSON string. Not\n including the index (``index=False``) is only supported when\n orient is 'split' or 'table'.\n\n .. versionadded:: 0.23.0\n\n indent : int, optional\n Length of whitespace used to indent each record.\n\n .. versionadded:: 1.0.0\n\n Returns\n -------\n None or str\n If path_or_buf is None, returns the resulting json format as a\n string. Otherwise returns None.\n\n See Also\n --------\n read_json : Convert a JSON string to pandas object.\n\n Notes\n -----\n The behavior of ``indent=0`` varies from the stdlib, which does not\n indent the output but does insert newlines. Currently, ``indent=0``\n and the default ``indent=None`` are equivalent in pandas, though this\n may change in a future release.\n\n Examples\n --------\n >>> df = pd.DataFrame([['a', 'b'], ['c', 'd']],\n ... index=['row 1', 'row 2'],\n ... columns=['col 1', 'col 2'])\n >>> df.to_json(orient='split')\n '{\"columns\":[\"col 1\",\"col 2\"],\n \"index\":[\"row 1\",\"row 2\"],\n \"data\":[[\"a\",\"b\"],[\"c\",\"d\"]]}'\n\n Encoding/decoding a Dataframe using ``'records'`` formatted JSON.\n Note that index labels are not preserved with this encoding.\n\n >>> df.to_json(orient='records')\n '[{\"col 1\":\"a\",\"col 2\":\"b\"},{\"col 1\":\"c\",\"col 2\":\"d\"}]'\n\n Encoding/decoding a Dataframe using ``'index'`` formatted JSON:\n\n >>> df.to_json(orient='index')\n '{\"row 1\":{\"col 1\":\"a\",\"col 2\":\"b\"},\"row 2\":{\"col 1\":\"c\",\"col 2\":\"d\"}}'\n\n Encoding/decoding a Dataframe using ``'columns'`` formatted JSON:\n\n >>> df.to_json(orient='columns')\n '{\"col 1\":{\"row 1\":\"a\",\"row 2\":\"c\"},\"col 2\":{\"row 1\":\"b\",\"row 2\":\"d\"}}'\n\n Encoding/decoding a Dataframe using ``'values'`` formatted JSON:\n\n >>> df.to_json(orient='values')\n '[[\"a\",\"b\"],[\"c\",\"d\"]]'\n\n Encoding with Table Schema\n\n >>> df.to_json(orient='table')\n '{\"schema\": {\"fields\": [{\"name\": \"index\", \"type\": \"string\"},\n {\"name\": \"col 1\", \"type\": \"string\"},\n {\"name\": \"col 2\", \"type\": \"string\"}],\n \"primaryKey\": \"index\",\n \"pandas_version\": \"0.20.0\"},\n \"data\": [{\"index\": \"row 1\", \"col 1\": \"a\", \"col 2\": \"b\"},\n {\"index\": \"row 2\", \"col 1\": \"c\", \"col 2\": \"d\"}]}'\n \"\"\"\n from pandas.io import json\n\n if date_format is None and orient == \"table\":\n date_format = \"iso\"\n elif date_format is None:\n date_format = \"epoch\"\n\n config.is_nonnegative_int(indent)\n indent = indent or 0\n\n return json.to_json(\n path_or_buf=path_or_buf,\n obj=self,\n orient=orient,\n date_format=date_format,\n double_precision=double_precision,\n force_ascii=force_ascii,\n date_unit=date_unit,\n default_handler=default_handler,\n lines=lines,\n compression=compression,\n index=index,\n indent=indent,\n )\n\n def to_hdf(\n self,\n path_or_buf,\n key: str,\n mode: str = \"a\",\n complevel: Optional[int] = None,\n complib: Optional[str] = None,\n append: bool_t = False,\n format: Optional[str] = None,\n index: bool_t = True,\n min_itemsize: Optional[Union[int, Dict[str, int]]] = None,\n nan_rep=None,\n dropna: Optional[bool_t] = None,\n data_columns: Optional[List[str]] = None,\n errors: str = \"strict\",\n encoding: str = \"UTF-8\",\n ) -> None:\n \"\"\"\n Write the contained data to an HDF5 file using HDFStore.\n\n Hierarchical Data Format (HDF) is self-describing, allowing an\n application to interpret the structure and contents of a file with\n no outside information. One HDF file can hold a mix of related objects\n which can be accessed as a group or as individual objects.\n\n In order to add another DataFrame or Series to an existing HDF file\n please use append mode and a different a key.\n\n For more information see the :ref:`user guide <io.hdf5>`.\n\n Parameters\n ----------\n path_or_buf : str or pandas.HDFStore\n File path or HDFStore object.\n key : str\n Identifier for the group in the store.\n mode : {'a', 'w', 'r+'}, default 'a'\n Mode to open file:\n\n - 'w': write, a new file is created (an existing file with\n the same name would be deleted).\n - 'a': append, an existing file is opened for reading and\n writing, and if the file does not exist it is created.\n - 'r+': similar to 'a', but the file must already exist.\n complevel : {0-9}, optional\n Specifies a compression level for data.\n A value of 0 disables compression.\n complib : {'zlib', 'lzo', 'bzip2', 'blosc'}, default 'zlib'\n Specifies the compression library to be used.\n As of v0.20.2 these additional compressors for Blosc are supported\n (default if no compressor specified: 'blosc:blosclz'):\n {'blosc:blosclz', 'blosc:lz4', 'blosc:lz4hc', 'blosc:snappy',\n 'blosc:zlib', 'blosc:zstd'}.\n Specifying a compression library which is not available issues\n a ValueError.\n append : bool, default False\n For Table formats, append the input data to the existing.\n format : {'fixed', 'table', None}, default 'fixed'\n Possible values:\n\n - 'fixed': Fixed format. Fast writing/reading. Not-appendable,\n nor searchable.\n - 'table': Table format. Write as a PyTables Table structure\n which may perform worse but allow more flexible operations\n like searching / selecting subsets of the data.\n - If None, pd.get_option('io.hdf.default_format') is checked,\n followed by fallback to \"fixed\"\n errors : str, default 'strict'\n Specifies how encoding and decoding errors are to be handled.\n See the errors argument for :func:`open` for a full list\n of options.\n encoding : str, default \"UTF-8\"\n min_itemsize : dict or int, optional\n Map column names to minimum string sizes for columns.\n nan_rep : Any, optional\n How to represent null values as str.\n Not allowed with append=True.\n data_columns : list of columns or True, optional\n List of columns to create as indexed data columns for on-disk\n queries, or True to use all columns. By default only the axes\n of the object are indexed. See :ref:`io.hdf5-query-data-columns`.\n Applicable only to format='table'.\n\n See Also\n --------\n DataFrame.read_hdf : Read from HDF file.\n DataFrame.to_parquet : Write a DataFrame to the binary parquet format.\n DataFrame.to_sql : Write to a sql table.\n DataFrame.to_feather : Write out feather-format for DataFrames.\n DataFrame.to_csv : Write out to a csv file.\n\n Examples\n --------\n >>> df = pd.DataFrame({'A': [1, 2, 3], 'B': [4, 5, 6]},\n ... index=['a', 'b', 'c'])\n >>> df.to_hdf('data.h5', key='df', mode='w')\n\n We can add another object to the same file:\n\n >>> s = pd.Series([1, 2, 3, 4])\n >>> s.to_hdf('data.h5', key='s')\n\n Reading from HDF file:\n\n >>> pd.read_hdf('data.h5', 'df')\n A B\n a 1 4\n b 2 5\n c 3 6\n >>> pd.read_hdf('data.h5', 's')\n 0 1\n 1 2\n 2 3\n 3 4\n dtype: int64\n\n Deleting file with data:\n\n >>> import os\n >>> os.remove('data.h5')\n \"\"\"\n from pandas.io import pytables\n\n pytables.to_hdf(\n path_or_buf,\n key,\n self,\n mode=mode,\n complevel=complevel,\n complib=complib,\n append=append,\n format=format,\n index=index,\n min_itemsize=min_itemsize,\n nan_rep=nan_rep,\n dropna=dropna,\n data_columns=data_columns,\n errors=errors,\n encoding=encoding,\n )\n\n def to_sql(\n self,\n name: str,\n con,\n schema=None,\n if_exists: str = \"fail\",\n index: bool_t = True,\n index_label=None,\n chunksize=None,\n dtype=None,\n method=None,\n ) -> None:\n \"\"\"\n Write records stored in a DataFrame to a SQL database.\n\n Databases supported by SQLAlchemy [1]_ are supported. Tables can be\n newly created, appended to, or overwritten.\n\n Parameters\n ----------\n name : str\n Name of SQL table.\n con : sqlalchemy.engine.Engine or sqlite3.Connection\n Using SQLAlchemy makes it possible to use any DB supported by that\n library. Legacy support is provided for sqlite3.Connection objects. The user\n is responsible for engine disposal and connection closure for the SQLAlchemy\n connectable See `here \\\n <https://docs.sqlalchemy.org/en/13/core/connections.html>`_.\n\n schema : str, optional\n Specify the schema (if database flavor supports this). If None, use\n default schema.\n if_exists : {'fail', 'replace', 'append'}, default 'fail'\n How to behave if the table already exists.\n\n * fail: Raise a ValueError.\n * replace: Drop the table before inserting new values.\n * append: Insert new values to the existing table.\n\n index : bool, default True\n Write DataFrame index as a column. Uses `index_label` as the column\n name in the table.\n index_label : str or sequence, default None\n Column label for index column(s). If None is given (default) and\n `index` is True, then the index names are used.\n A sequence should be given if the DataFrame uses MultiIndex.\n chunksize : int, optional\n Specify the number of rows in each batch to be written at a time.\n By default, all rows will be written at once.\n dtype : dict or scalar, optional\n Specifying the datatype for columns. If a dictionary is used, the\n keys should be the column names and the values should be the\n SQLAlchemy types or strings for the sqlite3 legacy mode. If a\n scalar is provided, it will be applied to all columns.\n method : {None, 'multi', callable}, optional\n Controls the SQL insertion clause used:\n\n * None : Uses standard SQL ``INSERT`` clause (one per row).\n * 'multi': Pass multiple values in a single ``INSERT`` clause.\n * callable with signature ``(pd_table, conn, keys, data_iter)``.\n\n Details and a sample callable implementation can be found in the\n section :ref:`insert method <io.sql.method>`.\n\n .. versionadded:: 0.24.0\n\n Raises\n ------\n ValueError\n When the table already exists and `if_exists` is 'fail' (the\n default).\n\n See Also\n --------\n read_sql : Read a DataFrame from a table.\n\n Notes\n -----\n Timezone aware datetime columns will be written as\n ``Timestamp with timezone`` type with SQLAlchemy if supported by the\n database. Otherwise, the datetimes will be stored as timezone unaware\n timestamps local to the original timezone.\n\n .. versionadded:: 0.24.0\n\n References\n ----------\n .. [1] https://docs.sqlalchemy.org\n .. [2] https://www.python.org/dev/peps/pep-0249/\n\n Examples\n --------\n Create an in-memory SQLite database.\n\n >>> from sqlalchemy import create_engine\n >>> engine = create_engine('sqlite://', echo=False)\n\n Create a table from scratch with 3 rows.\n\n >>> df = pd.DataFrame({'name' : ['User 1', 'User 2', 'User 3']})\n >>> df\n name\n 0 User 1\n 1 User 2\n 2 User 3\n\n >>> df.to_sql('users', con=engine)\n >>> engine.execute(\"SELECT * FROM users\").fetchall()\n [(0, 'User 1'), (1, 'User 2'), (2, 'User 3')]\n\n >>> df1 = pd.DataFrame({'name' : ['User 4', 'User 5']})\n >>> df1.to_sql('users', con=engine, if_exists='append')\n >>> engine.execute(\"SELECT * FROM users\").fetchall()\n [(0, 'User 1'), (1, 'User 2'), (2, 'User 3'),\n (0, 'User 4'), (1, 'User 5')]\n\n Overwrite the table with just ``df1``.\n\n >>> df1.to_sql('users', con=engine, if_exists='replace',\n ... index_label='id')\n >>> engine.execute(\"SELECT * FROM users\").fetchall()\n [(0, 'User 4'), (1, 'User 5')]\n\n Specify the dtype (especially useful for integers with missing values).\n Notice that while pandas is forced to store the data as floating point,\n the database supports nullable integers. When fetching the data with\n Python, we get back integer scalars.\n\n >>> df = pd.DataFrame({\"A\": [1, None, 2]})\n >>> df\n A\n 0 1.0\n 1 NaN\n 2 2.0\n\n >>> from sqlalchemy.types import Integer\n >>> df.to_sql('integers', con=engine, index=False,\n ... dtype={\"A\": Integer()})\n\n >>> engine.execute(\"SELECT * FROM integers\").fetchall()\n [(1,), (None,), (2,)]\n \"\"\"\n from pandas.io import sql\n\n sql.to_sql(\n self,\n name,\n con,\n schema=schema,\n if_exists=if_exists,\n index=index,\n index_label=index_label,\n chunksize=chunksize,\n dtype=dtype,\n method=method,\n )\n\n def to_pickle(\n self,\n path,\n compression: Optional[str] = \"infer\",\n protocol: int = pickle.HIGHEST_PROTOCOL,\n ) -> None:\n \"\"\"\n Pickle (serialize) object to file.\n\n Parameters\n ----------\n path : str\n File path where the pickled object will be stored.\n compression : {'infer', 'gzip', 'bz2', 'zip', 'xz', None}, \\\n default 'infer'\n A string representing the compression to use in the output file. By\n default, infers from the file extension in specified path.\n protocol : int\n Int which indicates which protocol should be used by the pickler,\n default HIGHEST_PROTOCOL (see [1]_ paragraph 12.1.2). The possible\n values are 0, 1, 2, 3, 4. A negative value for the protocol\n parameter is equivalent to setting its value to HIGHEST_PROTOCOL.\n\n .. [1] https://docs.python.org/3/library/pickle.html.\n .. versionadded:: 0.21.0.\n\n See Also\n --------\n read_pickle : Load pickled pandas object (or any object) from file.\n DataFrame.to_hdf : Write DataFrame to an HDF5 file.\n DataFrame.to_sql : Write DataFrame to a SQL database.\n DataFrame.to_parquet : Write a DataFrame to the binary parquet format.\n\n Examples\n --------\n >>> original_df = pd.DataFrame({\"foo\": range(5), \"bar\": range(5, 10)})\n >>> original_df\n foo bar\n 0 0 5\n 1 1 6\n 2 2 7\n 3 3 8\n 4 4 9\n >>> original_df.to_pickle(\"./dummy.pkl\")\n\n >>> unpickled_df = pd.read_pickle(\"./dummy.pkl\")\n >>> unpickled_df\n foo bar\n 0 0 5\n 1 1 6\n 2 2 7\n 3 3 8\n 4 4 9\n\n >>> import os\n >>> os.remove(\"./dummy.pkl\")\n \"\"\"\n from pandas.io.pickle import to_pickle\n\n to_pickle(self, path, compression=compression, protocol=protocol)\n\n def to_clipboard(\n self, excel: bool_t = True, sep: Optional[str] = None, **kwargs\n ) -> None:\n r\"\"\"\n Copy object to the system clipboard.\n\n Write a text representation of object to the system clipboard.\n This can be pasted into Excel, for example.\n\n Parameters\n ----------\n excel : bool, default True\n Produce output in a csv format for easy pasting into excel.\n\n - True, use the provided separator for csv pasting.\n - False, write a string representation of the object to the clipboard.\n\n sep : str, default ``'\\t'``\n Field delimiter.\n **kwargs\n These parameters will be passed to DataFrame.to_csv.\n\n See Also\n --------\n DataFrame.to_csv : Write a DataFrame to a comma-separated values\n (csv) file.\n read_clipboard : Read text from clipboard and pass to read_table.\n\n Notes\n -----\n Requirements for your platform.\n\n - Linux : `xclip`, or `xsel` (with `PyQt4` modules)\n - Windows : none\n - OS X : none\n\n Examples\n --------\n Copy the contents of a DataFrame to the clipboard.\n\n >>> df = pd.DataFrame([[1, 2, 3], [4, 5, 6]], columns=['A', 'B', 'C'])\n >>> df.to_clipboard(sep=',')\n ... # Wrote the following to the system clipboard:\n ... # ,A,B,C\n ... # 0,1,2,3\n ... # 1,4,5,6\n\n We can omit the index by passing the keyword `index` and setting\n it to false.\n\n >>> df.to_clipboard(sep=',', index=False)\n ... # Wrote the following to the system clipboard:\n ... # A,B,C\n ... # 1,2,3\n ... # 4,5,6\n \"\"\"\n from pandas.io import clipboards\n\n clipboards.to_clipboard(self, excel=excel, sep=sep, **kwargs)\n\n def to_xarray(self):\n \"\"\"\n Return an xarray object from the pandas object.\n\n Returns\n -------\n xarray.DataArray or xarray.Dataset\n Data in the pandas structure converted to Dataset if the object is\n a DataFrame, or a DataArray if the object is a Series.\n\n See Also\n --------\n DataFrame.to_hdf : Write DataFrame to an HDF5 file.\n DataFrame.to_parquet : Write a DataFrame to the binary parquet format.\n\n Notes\n -----\n See the `xarray docs <https://xarray.pydata.org/en/stable/>`__\n\n Examples\n --------\n >>> df = pd.DataFrame([('falcon', 'bird', 389.0, 2),\n ... ('parrot', 'bird', 24.0, 2),\n ... ('lion', 'mammal', 80.5, 4),\n ... ('monkey', 'mammal', np.nan, 4)],\n ... columns=['name', 'class', 'max_speed',\n ... 'num_legs'])\n >>> df\n name class max_speed num_legs\n 0 falcon bird 389.0 2\n 1 parrot bird 24.0 2\n 2 lion mammal 80.5 4\n 3 monkey mammal NaN 4\n\n >>> df.to_xarray()\n <xarray.Dataset>\n Dimensions: (index: 4)\n Coordinates:\n * index (index) int64 0 1 2 3\n Data variables:\n name (index) object 'falcon' 'parrot' 'lion' 'monkey'\n class (index) object 'bird' 'bird' 'mammal' 'mammal'\n max_speed (index) float64 389.0 24.0 80.5 nan\n num_legs (index) int64 2 2 4 4\n\n >>> df['max_speed'].to_xarray()\n <xarray.DataArray 'max_speed' (index: 4)>\n array([389. , 24. , 80.5, nan])\n Coordinates:\n * index (index) int64 0 1 2 3\n\n >>> dates = pd.to_datetime(['2018-01-01', '2018-01-01',\n ... '2018-01-02', '2018-01-02'])\n >>> df_multiindex = pd.DataFrame({'date': dates,\n ... 'animal': ['falcon', 'parrot',\n ... 'falcon', 'parrot'],\n ... 'speed': [350, 18, 361, 15]})\n >>> df_multiindex = df_multiindex.set_index(['date', 'animal'])\n\n >>> df_multiindex\n speed\n date animal\n 2018-01-01 falcon 350\n parrot 18\n 2018-01-02 falcon 361\n parrot 15\n\n >>> df_multiindex.to_xarray()\n <xarray.Dataset>\n Dimensions: (animal: 2, date: 2)\n Coordinates:\n * date (date) datetime64[ns] 2018-01-01 2018-01-02\n * animal (animal) object 'falcon' 'parrot'\n Data variables:\n speed (date, animal) int64 350 18 361 15\n \"\"\"\n xarray = import_optional_dependency(\"xarray\")\n\n if self.ndim == 1:\n return xarray.DataArray.from_series(self)\n else:\n return xarray.Dataset.from_dataframe(self)\n\n @Substitution(returns=fmt.return_docstring)\n def to_latex(\n self,\n buf=None,\n columns=None,\n col_space=None,\n header=True,\n index=True,\n na_rep=\"NaN\",\n formatters=None,\n float_format=None,\n sparsify=None,\n index_names=True,\n bold_rows=False,\n column_format=None,\n longtable=None,\n escape=None,\n encoding=None,\n decimal=\".\",\n multicolumn=None,\n multicolumn_format=None,\n multirow=None,\n caption=None,\n label=None,\n ):\n r\"\"\"\n Render object to a LaTeX tabular, longtable, or nested table/tabular.\n\n Requires ``\\usepackage{booktabs}``. The output can be copy/pasted\n into a main LaTeX document or read from an external file\n with ``\\input{table.tex}``.\n\n .. versionchanged:: 0.20.2\n Added to Series.\n\n .. versionchanged:: 1.0.0\n Added caption and label arguments.\n\n Parameters\n ----------\n buf : str, Path or StringIO-like, optional, default None\n Buffer to write to. If None, the output is returned as a string.\n columns : list of label, optional\n The subset of columns to write. Writes all columns by default.\n col_space : int, optional\n The minimum width of each column.\n header : bool or list of str, default True\n Write out the column names. If a list of strings is given,\n it is assumed to be aliases for the column names.\n index : bool, default True\n Write row names (index).\n na_rep : str, default 'NaN'\n Missing data representation.\n formatters : list of functions or dict of {str: function}, optional\n Formatter functions to apply to columns' elements by position or\n name. The result of each function must be a unicode string.\n List must be of length equal to the number of columns.\n float_format : one-parameter function or str, optional, default None\n Formatter for floating point numbers. For example\n ``float_format=\"%%.2f\"`` and ``float_format=\"{:0.2f}\".format`` will\n both result in 0.1234 being formatted as 0.12.\n sparsify : bool, optional\n Set to False for a DataFrame with a hierarchical index to print\n every multiindex key at each row. By default, the value will be\n read from the config module.\n index_names : bool, default True\n Prints the names of the indexes.\n bold_rows : bool, default False\n Make the row labels bold in the output.\n column_format : str, optional\n The columns format as specified in `LaTeX table format\n <https://en.wikibooks.org/wiki/LaTeX/Tables>`__ e.g. 'rcl' for 3\n columns. By default, 'l' will be used for all columns except\n columns of numbers, which default to 'r'.\n longtable : bool, optional\n By default, the value will be read from the pandas config\n module. Use a longtable environment instead of tabular. Requires\n adding a \\usepackage{longtable} to your LaTeX preamble.\n escape : bool, optional\n By default, the value will be read from the pandas config\n module. When set to False prevents from escaping latex special\n characters in column names.\n encoding : str, optional\n A string representing the encoding to use in the output file,\n defaults to 'utf-8'.\n decimal : str, default '.'\n Character recognized as decimal separator, e.g. ',' in Europe.\n multicolumn : bool, default True\n Use \\multicolumn to enhance MultiIndex columns.\n The default will be read from the config module.\n multicolumn_format : str, default 'l'\n The alignment for multicolumns, similar to `column_format`\n The default will be read from the config module.\n multirow : bool, default False\n Use \\multirow to enhance MultiIndex rows. Requires adding a\n \\usepackage{multirow} to your LaTeX preamble. Will print\n centered labels (instead of top-aligned) across the contained\n rows, separating groups via clines. The default will be read\n from the pandas config module.\n caption : str, optional\n The LaTeX caption to be placed inside ``\\caption{}`` in the output.\n\n .. versionadded:: 1.0.0\n\n label : str, optional\n The LaTeX label to be placed inside ``\\label{}`` in the output.\n This is used with ``\\ref{}`` in the main ``.tex`` file.\n\n .. versionadded:: 1.0.0\n %(returns)s\n See Also\n --------\n DataFrame.to_string : Render a DataFrame to a console-friendly\n tabular output.\n DataFrame.to_html : Render a DataFrame as an HTML table.\n\n Examples\n --------\n >>> df = pd.DataFrame({'name': ['Raphael', 'Donatello'],\n ... 'mask': ['red', 'purple'],\n ... 'weapon': ['sai', 'bo staff']})\n >>> print(df.to_latex(index=False)) # doctest: +NORMALIZE_WHITESPACE\n \\begin{tabular}{lll}\n \\toprule\n name & mask & weapon \\\\\n \\midrule\n Raphael & red & sai \\\\\n Donatello & purple & bo staff \\\\\n \\bottomrule\n \\end{tabular}\n \"\"\"\n # Get defaults from the pandas config\n if self.ndim == 1:\n self = self.to_frame()\n if longtable is None:\n longtable = config.get_option(\"display.latex.longtable\")\n if escape is None:\n escape = config.get_option(\"display.latex.escape\")\n if multicolumn is None:\n multicolumn = config.get_option(\"display.latex.multicolumn\")\n if multicolumn_format is None:\n multicolumn_format = config.get_option(\"display.latex.multicolumn_format\")\n if multirow is None:\n multirow = config.get_option(\"display.latex.multirow\")\n\n formatter = DataFrameFormatter(\n self,\n columns=columns,\n col_space=col_space,\n na_rep=na_rep,\n header=header,\n index=index,\n formatters=formatters,\n float_format=float_format,\n bold_rows=bold_rows,\n sparsify=sparsify,\n index_names=index_names,\n escape=escape,\n decimal=decimal,\n )\n return formatter.to_latex(\n buf=buf,\n column_format=column_format,\n longtable=longtable,\n encoding=encoding,\n multicolumn=multicolumn,\n multicolumn_format=multicolumn_format,\n multirow=multirow,\n caption=caption,\n label=label,\n )\n\n def to_csv(\n self,\n path_or_buf: Optional[FilePathOrBuffer] = None,\n sep: str = \",\",\n na_rep: str = \"\",\n float_format: Optional[str] = None,\n columns: Optional[Sequence[Label]] = None,\n header: Union[bool_t, List[str]] = True,\n index: bool_t = True,\n index_label: Optional[Union[bool_t, str, Sequence[Label]]] = None,\n mode: str = \"w\",\n encoding: Optional[str] = None,\n compression: Optional[Union[str, Mapping[str, str]]] = \"infer\",\n quoting: Optional[int] = None,\n quotechar: str = '\"',\n line_terminator: Optional[str] = None,\n chunksize: Optional[int] = None,\n date_format: Optional[str] = None,\n doublequote: bool_t = True,\n escapechar: Optional[str] = None,\n decimal: Optional[str] = \".\",\n ) -> Optional[str]:\n r\"\"\"\n Write object to a comma-separated values (csv) file.\n\n .. versionchanged:: 0.24.0\n The order of arguments for Series was changed.\n\n Parameters\n ----------\n path_or_buf : str or file handle, default None\n File path or object, if None is provided the result is returned as\n a string. If a file object is passed it should be opened with\n `newline=''`, disabling universal newlines.\n\n .. versionchanged:: 0.24.0\n\n Was previously named \"path\" for Series.\n\n sep : str, default ','\n String of length 1. Field delimiter for the output file.\n na_rep : str, default ''\n Missing data representation.\n float_format : str, default None\n Format string for floating point numbers.\n columns : sequence, optional\n Columns to write.\n header : bool or list of str, default True\n Write out the column names. If a list of strings is given it is\n assumed to be aliases for the column names.\n\n .. versionchanged:: 0.24.0\n\n Previously defaulted to False for Series.\n\n index : bool, default True\n Write row names (index).\n index_label : str or sequence, or False, default None\n Column label for index column(s) if desired. If None is given, and\n `header` and `index` are True, then the index names are used. A\n sequence should be given if the object uses MultiIndex. If\n False do not print fields for index names. Use index_label=False\n for easier importing in R.\n mode : str\n Python write mode, default 'w'.\n encoding : str, optional\n A string representing the encoding to use in the output file,\n defaults to 'utf-8'.\n compression : str or dict, default 'infer'\n If str, represents compression mode. If dict, value at 'method' is\n the compression mode. Compression mode may be any of the following\n possible values: {'infer', 'gzip', 'bz2', 'zip', 'xz', None}. If\n compression mode is 'infer' and `path_or_buf` is path-like, then\n detect compression mode from the following extensions: '.gz',\n '.bz2', '.zip' or '.xz'. (otherwise no compression). If dict given\n and mode is 'zip' or inferred as 'zip', other entries passed as\n additional compression options.\n\n .. versionchanged:: 1.0.0\n\n May now be a dict with key 'method' as compression mode\n and other entries as additional compression options if\n compression mode is 'zip'.\n\n quoting : optional constant from csv module\n Defaults to csv.QUOTE_MINIMAL. If you have set a `float_format`\n then floats are converted to strings and thus csv.QUOTE_NONNUMERIC\n will treat them as non-numeric.\n quotechar : str, default '\\\"'\n String of length 1. Character used to quote fields.\n line_terminator : str, optional\n The newline character or character sequence to use in the output\n file. Defaults to `os.linesep`, which depends on the OS in which\n this method is called ('\\n' for linux, '\\r\\n' for Windows, i.e.).\n\n .. versionchanged:: 0.24.0\n chunksize : int or None\n Rows to write at a time.\n date_format : str, default None\n Format string for datetime objects.\n doublequote : bool, default True\n Control quoting of `quotechar` inside a field.\n escapechar : str, default None\n String of length 1. Character used to escape `sep` and `quotechar`\n when appropriate.\n decimal : str, default '.'\n Character recognized as decimal separator. E.g. use ',' for\n European data.\n\n Returns\n -------\n None or str\n If path_or_buf is None, returns the resulting csv format as a\n string. Otherwise returns None.\n\n See Also\n --------\n read_csv : Load a CSV file into a DataFrame.\n to_excel : Write DataFrame to an Excel file.\n\n Examples\n --------\n >>> df = pd.DataFrame({'name': ['Raphael', 'Donatello'],\n ... 'mask': ['red', 'purple'],\n ... 'weapon': ['sai', 'bo staff']})\n >>> df.to_csv(index=False)\n 'name,mask,weapon\\nRaphael,red,sai\\nDonatello,purple,bo staff\\n'\n\n Create 'out.zip' containing 'out.csv'\n\n >>> compression_opts = dict(method='zip',\n ... archive_name='out.csv') # doctest: +SKIP\n >>> df.to_csv('out.zip', index=False,\n ... compression=compression_opts) # doctest: +SKIP\n \"\"\"\n df = self if isinstance(self, ABCDataFrame) else self.to_frame()\n\n from pandas.io.formats.csvs import CSVFormatter\n\n formatter = CSVFormatter(\n df,\n path_or_buf,\n line_terminator=line_terminator,\n sep=sep,\n encoding=encoding,\n compression=compression,\n quoting=quoting,\n na_rep=na_rep,\n float_format=float_format,\n cols=columns,\n header=header,\n index=index,\n index_label=index_label,\n mode=mode,\n chunksize=chunksize,\n quotechar=quotechar,\n date_format=date_format,\n doublequote=doublequote,\n escapechar=escapechar,\n decimal=decimal,\n )\n formatter.save()\n\n if path_or_buf is None:\n return formatter.path_or_buf.getvalue()\n\n return None\n\n # ----------------------------------------------------------------------\n # Lookup Caching\n\n def _set_as_cached(self, item, cacher) -> None:\n \"\"\"\n Set the _cacher attribute on the calling object with a weakref to\n cacher.\n \"\"\"\n self._cacher = (item, weakref.ref(cacher))\n\n def _reset_cacher(self) -> None:\n \"\"\"\n Reset the cacher.\n \"\"\"\n if hasattr(self, \"_cacher\"):\n del self._cacher\n\n def _maybe_cache_changed(self, item, value) -> None:\n \"\"\"\n The object has called back to us saying maybe it has changed.\n \"\"\"\n self._data.set(item, value)\n\n @property\n def _is_cached(self) -> bool_t:\n \"\"\"Return boolean indicating if self is cached or not.\"\"\"\n return getattr(self, \"_cacher\", None) is not None\n\n def _get_cacher(self):\n \"\"\"return my cacher or None\"\"\"\n cacher = getattr(self, \"_cacher\", None)\n if cacher is not None:\n cacher = cacher[1]()\n return cacher\n\n def _maybe_update_cacher(\n self, clear: bool_t = False, verify_is_copy: bool_t = True\n ) -> None:\n \"\"\"\n See if we need to update our parent cacher if clear, then clear our\n cache.\n\n Parameters\n ----------\n clear : bool, default False\n Clear the item cache.\n verify_is_copy : bool, default True\n Provide is_copy checks.\n \"\"\"\n cacher = getattr(self, \"_cacher\", None)\n if cacher is not None:\n ref = cacher[1]()\n\n # we are trying to reference a dead referant, hence\n # a copy\n if ref is None:\n del self._cacher\n else:\n # Note: we need to call ref._maybe_cache_changed even in the\n # case where it will raise. (Uh, not clear why)\n try:\n ref._maybe_cache_changed(cacher[0], self)\n except AssertionError:\n # ref._data.setitem can raise\n # AssertionError because of shape mismatch\n pass\n\n if verify_is_copy:\n self._check_setitem_copy(stacklevel=5, t=\"referant\")\n\n if clear:\n self._clear_item_cache()\n\n def _clear_item_cache(self) -> None:\n self._item_cache.clear()\n\n # ----------------------------------------------------------------------\n # Indexing Methods\n\n def take(\n self: FrameOrSeries, indices, axis=0, is_copy: Optional[bool_t] = None, **kwargs\n ) -> FrameOrSeries:\n \"\"\"\n Return the elements in the given *positional* indices along an axis.\n\n This means that we are not indexing according to actual values in\n the index attribute of the object. We are indexing according to the\n actual position of the element in the object.\n\n Parameters\n ----------\n indices : array-like\n An array of ints indicating which positions to take.\n axis : {0 or 'index', 1 or 'columns', None}, default 0\n The axis on which to select elements. ``0`` means that we are\n selecting rows, ``1`` means that we are selecting columns.\n is_copy : bool\n Before pandas 1.0, ``is_copy=False`` can be specified to ensure\n that the return value is an actual copy. Starting with pandas 1.0,\n ``take`` always returns a copy, and the keyword is therefore\n deprecated.\n\n .. deprecated:: 1.0.0\n **kwargs\n For compatibility with :meth:`numpy.take`. Has no effect on the\n output.\n\n Returns\n -------\n taken : same type as caller\n An array-like containing the elements taken from the object.\n\n See Also\n --------\n DataFrame.loc : Select a subset of a DataFrame by labels.\n DataFrame.iloc : Select a subset of a DataFrame by positions.\n numpy.take : Take elements from an array along an axis.\n\n Examples\n --------\n >>> df = pd.DataFrame([('falcon', 'bird', 389.0),\n ... ('parrot', 'bird', 24.0),\n ... ('lion', 'mammal', 80.5),\n ... ('monkey', 'mammal', np.nan)],\n ... columns=['name', 'class', 'max_speed'],\n ... index=[0, 2, 3, 1])\n >>> df\n name class max_speed\n 0 falcon bird 389.0\n 2 parrot bird 24.0\n 3 lion mammal 80.5\n 1 monkey mammal NaN\n\n Take elements at positions 0 and 3 along the axis 0 (default).\n\n Note how the actual indices selected (0 and 1) do not correspond to\n our selected indices 0 and 3. That's because we are selecting the 0th\n and 3rd rows, not rows whose indices equal 0 and 3.\n\n >>> df.take([0, 3])\n name class max_speed\n 0 falcon bird 389.0\n 1 monkey mammal NaN\n\n Take elements at indices 1 and 2 along the axis 1 (column selection).\n\n >>> df.take([1, 2], axis=1)\n class max_speed\n 0 bird 389.0\n 2 bird 24.0\n 3 mammal 80.5\n 1 mammal NaN\n\n We may take elements using negative integers for positive indices,\n starting from the end of the object, just like with Python lists.\n\n >>> df.take([-1, -2])\n name class max_speed\n 1 monkey mammal NaN\n 3 lion mammal 80.5\n \"\"\"\n if is_copy is not None:\n warnings.warn(\n \"is_copy is deprecated and will be removed in a future version. \"\n \"'take' always returns a copy, so there is no need to specify this.\",\n FutureWarning,\n stacklevel=2,\n )\n\n nv.validate_take(tuple(), kwargs)\n\n self._consolidate_inplace()\n\n new_data = self._data.take(\n indices, axis=self._get_block_manager_axis(axis), verify=True\n )\n return self._constructor(new_data).__finalize__(self)\n\n def _take_with_is_copy(self: FrameOrSeries, indices, axis=0) -> FrameOrSeries:\n \"\"\"\n Internal version of the `take` method that sets the `_is_copy`\n attribute to keep track of the parent dataframe (using in indexing\n for the SettingWithCopyWarning).\n\n See the docstring of `take` for full explanation of the parameters.\n \"\"\"\n result = self.take(indices=indices, axis=axis)\n # Maybe set copy if we didn't actually change the index.\n if not result._get_axis(axis).equals(self._get_axis(axis)):\n result._set_is_copy(self)\n return result\n\n def xs(self, key, axis=0, level=None, drop_level: bool_t = True):\n \"\"\"\n Return cross-section from the Series/DataFrame.\n\n This method takes a `key` argument to select data at a particular\n level of a MultiIndex.\n\n Parameters\n ----------\n key : label or tuple of label\n Label contained in the index, or partially in a MultiIndex.\n axis : {0 or 'index', 1 or 'columns'}, default 0\n Axis to retrieve cross-section on.\n level : object, defaults to first n levels (n=1 or len(key))\n In case of a key partially contained in a MultiIndex, indicate\n which levels are used. Levels can be referred by label or position.\n drop_level : bool, default True\n If False, returns object with same levels as self.\n\n Returns\n -------\n Series or DataFrame\n Cross-section from the original Series or DataFrame\n corresponding to the selected index levels.\n\n See Also\n --------\n DataFrame.loc : Access a group of rows and columns\n by label(s) or a boolean array.\n DataFrame.iloc : Purely integer-location based indexing\n for selection by position.\n\n Notes\n -----\n `xs` can not be used to set values.\n\n MultiIndex Slicers is a generic way to get/set values on\n any level or levels.\n It is a superset of `xs` functionality, see\n :ref:`MultiIndex Slicers <advanced.mi_slicers>`.\n\n Examples\n --------\n >>> d = {'num_legs': [4, 4, 2, 2],\n ... 'num_wings': [0, 0, 2, 2],\n ... 'class': ['mammal', 'mammal', 'mammal', 'bird'],\n ... 'animal': ['cat', 'dog', 'bat', 'penguin'],\n ... 'locomotion': ['walks', 'walks', 'flies', 'walks']}\n >>> df = pd.DataFrame(data=d)\n >>> df = df.set_index(['class', 'animal', 'locomotion'])\n >>> df\n num_legs num_wings\n class animal locomotion\n mammal cat walks 4 0\n dog walks 4 0\n bat flies 2 2\n bird penguin walks 2 2\n\n Get values at specified index\n\n >>> df.xs('mammal')\n num_legs num_wings\n animal locomotion\n cat walks 4 0\n dog walks 4 0\n bat flies 2 2\n\n Get values at several indexes\n\n >>> df.xs(('mammal', 'dog'))\n num_legs num_wings\n locomotion\n walks 4 0\n\n Get values at specified index and level\n\n >>> df.xs('cat', level=1)\n num_legs num_wings\n class locomotion\n mammal walks 4 0\n\n Get values at several indexes and levels\n\n >>> df.xs(('bird', 'walks'),\n ... level=[0, 'locomotion'])\n num_legs num_wings\n animal\n penguin 2 2\n\n Get values at specified column and axis\n\n >>> df.xs('num_wings', axis=1)\n class animal locomotion\n mammal cat walks 0\n dog walks 0\n bat flies 2\n bird penguin walks 2\n Name: num_wings, dtype: int64\n \"\"\"\n axis = self._get_axis_number(axis)\n labels = self._get_axis(axis)\n if level is not None:\n loc, new_ax = labels.get_loc_level(key, level=level, drop_level=drop_level)\n\n # create the tuple of the indexer\n _indexer = [slice(None)] * self.ndim\n _indexer[axis] = loc\n indexer = tuple(_indexer)\n\n result = self.iloc[indexer]\n setattr(result, result._get_axis_name(axis), new_ax)\n return result\n\n if axis == 1:\n return self[key]\n\n self._consolidate_inplace()\n\n index = self.index\n if isinstance(index, MultiIndex):\n loc, new_index = self.index.get_loc_level(key, drop_level=drop_level)\n else:\n loc = self.index.get_loc(key)\n\n if isinstance(loc, np.ndarray):\n if loc.dtype == np.bool_:\n (inds,) = loc.nonzero()\n return self._take_with_is_copy(inds, axis=axis)\n else:\n return self._take_with_is_copy(loc, axis=axis)\n\n if not is_scalar(loc):\n new_index = self.index[loc]\n\n if is_scalar(loc):\n # In this case loc should be an integer\n if self.ndim == 1:\n # if we encounter an array-like and we only have 1 dim\n # that means that their are list/ndarrays inside the Series!\n # so just return them (GH 6394)\n return self._values[loc]\n\n new_values = self._data.fast_xs(loc)\n\n result = self._constructor_sliced(\n new_values,\n index=self.columns,\n name=self.index[loc],\n dtype=new_values.dtype,\n )\n\n else:\n result = self.iloc[loc]\n result.index = new_index\n\n # this could be a view\n # but only in a single-dtyped view sliceable case\n result._set_is_copy(self, copy=not result._is_view)\n return result\n\n _xs: Callable = xs\n\n def __getitem__(self, item):\n raise AbstractMethodError(self)\n\n def _get_item_cache(self, item):\n \"\"\"Return the cached item, item represents a label indexer.\"\"\"\n cache = self._item_cache\n res = cache.get(item)\n if res is None:\n values = self._data.get(item)\n res = self._box_item_values(item, values)\n cache[item] = res\n res._set_as_cached(item, self)\n\n # for a chain\n res._is_copy = self._is_copy\n return res\n\n def _box_item_values(self, key, values):\n raise AbstractMethodError(self)\n\n def _slice(self: FrameOrSeries, slobj: slice, axis=0) -> FrameOrSeries:\n \"\"\"\n Construct a slice of this container.\n\n Slicing with this method is *always* positional.\n \"\"\"\n assert isinstance(slobj, slice), type(slobj)\n axis = self._get_block_manager_axis(axis)\n result = self._constructor(self._data.get_slice(slobj, axis=axis))\n result = result.__finalize__(self)\n\n # this could be a view\n # but only in a single-dtyped view sliceable case\n is_copy = axis != 0 or result._is_view\n result._set_is_copy(self, copy=is_copy)\n return result\n\n def _set_item(self, key, value) -> None:\n self._data.set(key, value)\n self._clear_item_cache()\n\n def _set_is_copy(self, ref, copy: bool_t = True) -> None:\n if not copy:\n self._is_copy = None\n else:\n assert ref is not None\n self._is_copy = weakref.ref(ref)\n\n def _check_is_chained_assignment_possible(self) -> bool_t:\n \"\"\"\n Check if we are a view, have a cacher, and are of mixed type.\n If so, then force a setitem_copy check.\n\n Should be called just near setting a value\n\n Will return a boolean if it we are a view and are cached, but a\n single-dtype meaning that the cacher should be updated following\n setting.\n \"\"\"\n if self._is_view and self._is_cached:\n ref = self._get_cacher()\n if ref is not None and ref._is_mixed_type:\n self._check_setitem_copy(stacklevel=4, t=\"referant\", force=True)\n return True\n elif self._is_copy:\n self._check_setitem_copy(stacklevel=4, t=\"referant\")\n return False\n\n def _check_setitem_copy(self, stacklevel=4, t=\"setting\", force=False):\n \"\"\"\n\n Parameters\n ----------\n stacklevel : int, default 4\n the level to show of the stack when the error is output\n t : str, the type of setting error\n force : bool, default False\n If True, then force showing an error.\n\n validate if we are doing a setitem on a chained copy.\n\n If you call this function, be sure to set the stacklevel such that the\n user will see the error *at the level of setting*\n\n It is technically possible to figure out that we are setting on\n a copy even WITH a multi-dtyped pandas object. In other words, some\n blocks may be views while other are not. Currently _is_view will ALWAYS\n return False for multi-blocks to avoid having to handle this case.\n\n df = DataFrame(np.arange(0,9), columns=['count'])\n df['group'] = 'b'\n\n # This technically need not raise SettingWithCopy if both are view\n # (which is not # generally guaranteed but is usually True. However,\n # this is in general not a good practice and we recommend using .loc.\n df.iloc[0:5]['group'] = 'a'\n\n \"\"\"\n # return early if the check is not needed\n if not (force or self._is_copy):\n return\n\n value = config.get_option(\"mode.chained_assignment\")\n if value is None:\n return\n\n # see if the copy is not actually referred; if so, then dissolve\n # the copy weakref\n if self._is_copy is not None and not isinstance(self._is_copy, str):\n r = self._is_copy()\n if not gc.get_referents(r) or r.shape == self.shape:\n self._is_copy = None\n return\n\n # a custom message\n if isinstance(self._is_copy, str):\n t = self._is_copy\n\n elif t == \"referant\":\n t = (\n \"\\n\"\n \"A value is trying to be set on a copy of a slice from a \"\n \"DataFrame\\n\\n\"\n \"See the caveats in the documentation: \"\n \"https://pandas.pydata.org/pandas-docs/stable/user_guide/\"\n \"indexing.html#returning-a-view-versus-a-copy\"\n )\n\n else:\n t = (\n \"\\n\"\n \"A value is trying to be set on a copy of a slice from a \"\n \"DataFrame.\\n\"\n \"Try using .loc[row_indexer,col_indexer] = value \"\n \"instead\\n\\nSee the caveats in the documentation: \"\n \"https://pandas.pydata.org/pandas-docs/stable/user_guide/\"\n \"indexing.html#returning-a-view-versus-a-copy\"\n )\n\n if value == \"raise\":\n raise com.SettingWithCopyError(t)\n elif value == \"warn\":\n warnings.warn(t, com.SettingWithCopyWarning, stacklevel=stacklevel)\n\n def __delitem__(self, key) -> None:\n \"\"\"\n Delete item\n \"\"\"\n deleted = False\n\n maybe_shortcut = False\n if self.ndim == 2 and isinstance(self.columns, MultiIndex):\n try:\n maybe_shortcut = key not in self.columns._engine\n except TypeError:\n pass\n\n if maybe_shortcut:\n # Allow shorthand to delete all columns whose first len(key)\n # elements match key:\n if not isinstance(key, tuple):\n key = (key,)\n for col in self.columns:\n if isinstance(col, tuple) and col[: len(key)] == key:\n del self[col]\n deleted = True\n if not deleted:\n # If the above loop ran and didn't delete anything because\n # there was no match, this call should raise the appropriate\n # exception:\n self._data.delete(key)\n\n # delete from the caches\n try:\n del self._item_cache[key]\n except KeyError:\n pass\n\n # ----------------------------------------------------------------------\n # Unsorted\n\n def get(self, key, default=None):\n \"\"\"\n Get item from object for given key (ex: DataFrame column).\n\n Returns default value if not found.\n\n Parameters\n ----------\n key : object\n\n Returns\n -------\n value : same type as items contained in object\n \"\"\"\n try:\n return self[key]\n except (KeyError, ValueError, IndexError):\n return default\n\n @property\n def _is_view(self) -> bool_t:\n \"\"\"Return boolean indicating if self is view of another array \"\"\"\n return self._data.is_view\n\n def reindex_like(\n self: FrameOrSeries,\n other,\n method: Optional[str] = None,\n copy: bool_t = True,\n limit=None,\n tolerance=None,\n ) -> FrameOrSeries:\n \"\"\"\n Return an object with matching indices as other object.\n\n Conform the object to the same index on all axes. Optional\n filling logic, placing NaN in locations having no value\n in the previous index. A new object is produced unless the\n new index is equivalent to the current one and copy=False.\n\n Parameters\n ----------\n other : Object of the same data type\n Its row and column indices are used to define the new indices\n of this object.\n method : {None, 'backfill'/'bfill', 'pad'/'ffill', 'nearest'}\n Method to use for filling holes in reindexed DataFrame.\n Please note: this is only applicable to DataFrames/Series with a\n monotonically increasing/decreasing index.\n\n * None (default): don't fill gaps\n * pad / ffill: propagate last valid observation forward to next\n valid\n * backfill / bfill: use next valid observation to fill gap\n * nearest: use nearest valid observations to fill gap.\n\n copy : bool, default True\n Return a new object, even if the passed indexes are the same.\n limit : int, default None\n Maximum number of consecutive labels to fill for inexact matches.\n tolerance : optional\n Maximum distance between original and new labels for inexact\n matches. The values of the index at the matching locations most\n satisfy the equation ``abs(index[indexer] - target) <= tolerance``.\n\n Tolerance may be a scalar value, which applies the same tolerance\n to all values, or list-like, which applies variable tolerance per\n element. List-like includes list, tuple, array, Series, and must be\n the same size as the index and its dtype must exactly match the\n index's type.\n\n .. versionadded:: 0.21.0 (list-like tolerance)\n\n Returns\n -------\n Series or DataFrame\n Same type as caller, but with changed indices on each axis.\n\n See Also\n --------\n DataFrame.set_index : Set row labels.\n DataFrame.reset_index : Remove row labels or move them to new columns.\n DataFrame.reindex : Change to new indices or expand indices.\n\n Notes\n -----\n Same as calling\n ``.reindex(index=other.index, columns=other.columns,...)``.\n\n Examples\n --------\n >>> df1 = pd.DataFrame([[24.3, 75.7, 'high'],\n ... [31, 87.8, 'high'],\n ... [22, 71.6, 'medium'],\n ... [35, 95, 'medium']],\n ... columns=['temp_celsius', 'temp_fahrenheit',\n ... 'windspeed'],\n ... index=pd.date_range(start='2014-02-12',\n ... end='2014-02-15', freq='D'))\n\n >>> df1\n temp_celsius temp_fahrenheit windspeed\n 2014-02-12 24.3 75.7 high\n 2014-02-13 31.0 87.8 high\n 2014-02-14 22.0 71.6 medium\n 2014-02-15 35.0 95.0 medium\n\n >>> df2 = pd.DataFrame([[28, 'low'],\n ... [30, 'low'],\n ... [35.1, 'medium']],\n ... columns=['temp_celsius', 'windspeed'],\n ... index=pd.DatetimeIndex(['2014-02-12', '2014-02-13',\n ... '2014-02-15']))\n\n >>> df2\n temp_celsius windspeed\n 2014-02-12 28.0 low\n 2014-02-13 30.0 low\n 2014-02-15 35.1 medium\n\n >>> df2.reindex_like(df1)\n temp_celsius temp_fahrenheit windspeed\n 2014-02-12 28.0 NaN low\n 2014-02-13 30.0 NaN low\n 2014-02-14 NaN NaN NaN\n 2014-02-15 35.1 NaN medium\n \"\"\"\n d = other._construct_axes_dict(\n axes=self._AXIS_ORDERS,\n method=method,\n copy=copy,\n limit=limit,\n tolerance=tolerance,\n )\n\n return self.reindex(**d)\n\n def drop(\n self,\n labels=None,\n axis=0,\n index=None,\n columns=None,\n level=None,\n inplace: bool_t = False,\n errors: str = \"raise\",\n ):\n\n inplace = validate_bool_kwarg(inplace, \"inplace\")\n\n if labels is not None:\n if index is not None or columns is not None:\n raise ValueError(\"Cannot specify both 'labels' and 'index'/'columns'\")\n axis_name = self._get_axis_name(axis)\n axes = {axis_name: labels}\n elif index is not None or columns is not None:\n axes, _ = self._construct_axes_from_arguments((index, columns), {})\n else:\n raise ValueError(\n \"Need to specify at least one of 'labels', 'index' or 'columns'\"\n )\n\n obj = self\n\n for axis, labels in axes.items():\n if labels is not None:\n obj = obj._drop_axis(labels, axis, level=level, errors=errors)\n\n if inplace:\n self._update_inplace(obj)\n else:\n return obj\n\n def _drop_axis(\n self: FrameOrSeries, labels, axis, level=None, errors: str = \"raise\"\n ) -> FrameOrSeries:\n \"\"\"\n Drop labels from specified axis. Used in the ``drop`` method\n internally.\n\n Parameters\n ----------\n labels : single label or list-like\n axis : int or axis name\n level : int or level name, default None\n For MultiIndex\n errors : {'ignore', 'raise'}, default 'raise'\n If 'ignore', suppress error and existing labels are dropped.\n\n \"\"\"\n axis = self._get_axis_number(axis)\n axis_name = self._get_axis_name(axis)\n axis = self._get_axis(axis)\n\n if axis.is_unique:\n if level is not None:\n if not isinstance(axis, MultiIndex):\n raise AssertionError(\"axis must be a MultiIndex\")\n new_axis = axis.drop(labels, level=level, errors=errors)\n else:\n new_axis = axis.drop(labels, errors=errors)\n result = self.reindex(**{axis_name: new_axis})\n\n # Case for non-unique axis\n else:\n labels = ensure_object(com.index_labels_to_array(labels))\n if level is not None:\n if not isinstance(axis, MultiIndex):\n raise AssertionError(\"axis must be a MultiIndex\")\n indexer = ~axis.get_level_values(level).isin(labels)\n\n # GH 18561 MultiIndex.drop should raise if label is absent\n if errors == \"raise\" and indexer.all():\n raise KeyError(f\"{labels} not found in axis\")\n else:\n indexer = ~axis.isin(labels)\n # Check if label doesn't exist along axis\n labels_missing = (axis.get_indexer_for(labels) == -1).any()\n if errors == \"raise\" and labels_missing:\n raise KeyError(f\"{labels} not found in axis\")\n\n slicer = [slice(None)] * self.ndim\n slicer[self._get_axis_number(axis_name)] = indexer\n\n result = self.loc[tuple(slicer)]\n\n return result\n\n def _update_inplace(self, result, verify_is_copy: bool_t = True) -> None:\n \"\"\"\n Replace self internals with result.\n\n Parameters\n ----------\n verify_is_copy : bool, default True\n Provide is_copy checks.\n \"\"\"\n # NOTE: This does *not* call __finalize__ and that's an explicit\n # decision that we may revisit in the future.\n\n self._reset_cache()\n self._clear_item_cache()\n self._data = getattr(result, \"_data\", result)\n self._maybe_update_cacher(verify_is_copy=verify_is_copy)\n\n def add_prefix(self: FrameOrSeries, prefix: str) -> FrameOrSeries:\n \"\"\"\n Prefix labels with string `prefix`.\n\n For Series, the row labels are prefixed.\n For DataFrame, the column labels are prefixed.\n\n Parameters\n ----------\n prefix : str\n The string to add before each label.\n\n Returns\n -------\n Series or DataFrame\n New Series or DataFrame with updated labels.\n\n See Also\n --------\n Series.add_suffix: Suffix row labels with string `suffix`.\n DataFrame.add_suffix: Suffix column labels with string `suffix`.\n\n Examples\n --------\n >>> s = pd.Series([1, 2, 3, 4])\n >>> s\n 0 1\n 1 2\n 2 3\n 3 4\n dtype: int64\n\n >>> s.add_prefix('item_')\n item_0 1\n item_1 2\n item_2 3\n item_3 4\n dtype: int64\n\n >>> df = pd.DataFrame({'A': [1, 2, 3, 4], 'B': [3, 4, 5, 6]})\n >>> df\n A B\n 0 1 3\n 1 2 4\n 2 3 5\n 3 4 6\n\n >>> df.add_prefix('col_')\n col_A col_B\n 0 1 3\n 1 2 4\n 2 3 5\n 3 4 6\n \"\"\"\n f = functools.partial(\"{prefix}{}\".format, prefix=prefix)\n\n mapper = {self._info_axis_name: f}\n return self.rename(**mapper) # type: ignore\n\n def add_suffix(self: FrameOrSeries, suffix: str) -> FrameOrSeries:\n \"\"\"\n Suffix labels with string `suffix`.\n\n For Series, the row labels are suffixed.\n For DataFrame, the column labels are suffixed.\n\n Parameters\n ----------\n suffix : str\n The string to add after each label.\n\n Returns\n -------\n Series or DataFrame\n New Series or DataFrame with updated labels.\n\n See Also\n --------\n Series.add_prefix: Prefix row labels with string `prefix`.\n DataFrame.add_prefix: Prefix column labels with string `prefix`.\n\n Examples\n --------\n >>> s = pd.Series([1, 2, 3, 4])\n >>> s\n 0 1\n 1 2\n 2 3\n 3 4\n dtype: int64\n\n >>> s.add_suffix('_item')\n 0_item 1\n 1_item 2\n 2_item 3\n 3_item 4\n dtype: int64\n\n >>> df = pd.DataFrame({'A': [1, 2, 3, 4], 'B': [3, 4, 5, 6]})\n >>> df\n A B\n 0 1 3\n 1 2 4\n 2 3 5\n 3 4 6\n\n >>> df.add_suffix('_col')\n A_col B_col\n 0 1 3\n 1 2 4\n 2 3 5\n 3 4 6\n \"\"\"\n f = functools.partial(\"{}{suffix}\".format, suffix=suffix)\n\n mapper = {self._info_axis_name: f}\n return self.rename(**mapper) # type: ignore\n\n def sort_values(\n self,\n axis=0,\n ascending=True,\n inplace: bool_t = False,\n kind: str = \"quicksort\",\n na_position: str = \"last\",\n ignore_index: bool_t = False,\n ):\n \"\"\"\n Sort by the values along either axis.\n\n Parameters\n ----------%(optional_by)s\n axis : %(axes_single_arg)s, default 0\n Axis to be sorted.\n ascending : bool or list of bool, default True\n Sort ascending vs. descending. Specify list for multiple sort\n orders. If this is a list of bools, must match the length of\n the by.\n inplace : bool, default False\n If True, perform operation in-place.\n kind : {'quicksort', 'mergesort', 'heapsort'}, default 'quicksort'\n Choice of sorting algorithm. See also ndarray.np.sort for more\n information. `mergesort` is the only stable algorithm. For\n DataFrames, this option is only applied when sorting on a single\n column or label.\n na_position : {'first', 'last'}, default 'last'\n Puts NaNs at the beginning if `first`; `last` puts NaNs at the\n end.\n ignore_index : bool, default False\n If True, the resulting axis will be labeled 0, 1, …, n - 1.\n\n .. versionadded:: 1.0.0\n\n Returns\n -------\n sorted_obj : DataFrame or None\n DataFrame with sorted values if inplace=False, None otherwise.\n\n Examples\n --------\n >>> df = pd.DataFrame({\n ... 'col1': ['A', 'A', 'B', np.nan, 'D', 'C'],\n ... 'col2': [2, 1, 9, 8, 7, 4],\n ... 'col3': [0, 1, 9, 4, 2, 3],\n ... })\n >>> df\n col1 col2 col3\n 0 A 2 0\n 1 A 1 1\n 2 B 9 9\n 3 NaN 8 4\n 4 D 7 2\n 5 C 4 3\n\n Sort by col1\n\n >>> df.sort_values(by=['col1'])\n col1 col2 col3\n 0 A 2 0\n 1 A 1 1\n 2 B 9 9\n 5 C 4 3\n 4 D 7 2\n 3 NaN 8 4\n\n Sort by multiple columns\n\n >>> df.sort_values(by=['col1', 'col2'])\n col1 col2 col3\n 1 A 1 1\n 0 A 2 0\n 2 B 9 9\n 5 C 4 3\n 4 D 7 2\n 3 NaN 8 4\n\n Sort Descending\n\n >>> df.sort_values(by='col1', ascending=False)\n col1 col2 col3\n 4 D 7 2\n 5 C 4 3\n 2 B 9 9\n 0 A 2 0\n 1 A 1 1\n 3 NaN 8 4\n\n Putting NAs first\n\n >>> df.sort_values(by='col1', ascending=False, na_position='first')\n col1 col2 col3\n 3 NaN 8 4\n 4 D 7 2\n 5 C 4 3\n 2 B 9 9\n 0 A 2 0\n 1 A 1 1\n \"\"\"\n raise AbstractMethodError(self)\n\n def reindex(self: FrameOrSeries, *args, **kwargs) -> FrameOrSeries:\n \"\"\"\n Conform %(klass)s to new index with optional filling logic.\n\n Places NA/NaN in locations having no value in the previous index. A new object\n is produced unless the new index is equivalent to the current one and\n ``copy=False``.\n\n Parameters\n ----------\n %(optional_labels)s\n %(axes)s : array-like, optional\n New labels / index to conform to, should be specified using\n keywords. Preferably an Index object to avoid duplicating data.\n %(optional_axis)s\n method : {None, 'backfill'/'bfill', 'pad'/'ffill', 'nearest'}\n Method to use for filling holes in reindexed DataFrame.\n Please note: this is only applicable to DataFrames/Series with a\n monotonically increasing/decreasing index.\n\n * None (default): don't fill gaps\n * pad / ffill: Propagate last valid observation forward to next\n valid.\n * backfill / bfill: Use next valid observation to fill gap.\n * nearest: Use nearest valid observations to fill gap.\n\n copy : bool, default True\n Return a new object, even if the passed indexes are the same.\n level : int or name\n Broadcast across a level, matching Index values on the\n passed MultiIndex level.\n fill_value : scalar, default np.NaN\n Value to use for missing values. Defaults to NaN, but can be any\n \"compatible\" value.\n limit : int, default None\n Maximum number of consecutive elements to forward or backward fill.\n tolerance : optional\n Maximum distance between original and new labels for inexact\n matches. The values of the index at the matching locations most\n satisfy the equation ``abs(index[indexer] - target) <= tolerance``.\n\n Tolerance may be a scalar value, which applies the same tolerance\n to all values, or list-like, which applies variable tolerance per\n element. List-like includes list, tuple, array, Series, and must be\n the same size as the index and its dtype must exactly match the\n index's type.\n\n .. versionadded:: 0.21.0 (list-like tolerance)\n\n Returns\n -------\n %(klass)s with changed index.\n\n See Also\n --------\n DataFrame.set_index : Set row labels.\n DataFrame.reset_index : Remove row labels or move them to new columns.\n DataFrame.reindex_like : Change to same indices as other DataFrame.\n\n Examples\n --------\n ``DataFrame.reindex`` supports two calling conventions\n\n * ``(index=index_labels, columns=column_labels, ...)``\n * ``(labels, axis={'index', 'columns'}, ...)``\n\n We *highly* recommend using keyword arguments to clarify your\n intent.\n\n Create a dataframe with some fictional data.\n\n >>> index = ['Firefox', 'Chrome', 'Safari', 'IE10', 'Konqueror']\n >>> df = pd.DataFrame({'http_status': [200, 200, 404, 404, 301],\n ... 'response_time': [0.04, 0.02, 0.07, 0.08, 1.0]},\n ... index=index)\n >>> df\n http_status response_time\n Firefox 200 0.04\n Chrome 200 0.02\n Safari 404 0.07\n IE10 404 0.08\n Konqueror 301 1.00\n\n Create a new index and reindex the dataframe. By default\n values in the new index that do not have corresponding\n records in the dataframe are assigned ``NaN``.\n\n >>> new_index = ['Safari', 'Iceweasel', 'Comodo Dragon', 'IE10',\n ... 'Chrome']\n >>> df.reindex(new_index)\n http_status response_time\n Safari 404.0 0.07\n Iceweasel NaN NaN\n Comodo Dragon NaN NaN\n IE10 404.0 0.08\n Chrome 200.0 0.02\n\n We can fill in the missing values by passing a value to\n the keyword ``fill_value``. Because the index is not monotonically\n increasing or decreasing, we cannot use arguments to the keyword\n ``method`` to fill the ``NaN`` values.\n\n >>> df.reindex(new_index, fill_value=0)\n http_status response_time\n Safari 404 0.07\n Iceweasel 0 0.00\n Comodo Dragon 0 0.00\n IE10 404 0.08\n Chrome 200 0.02\n\n >>> df.reindex(new_index, fill_value='missing')\n http_status response_time\n Safari 404 0.07\n Iceweasel missing missing\n Comodo Dragon missing missing\n IE10 404 0.08\n Chrome 200 0.02\n\n We can also reindex the columns.\n\n >>> df.reindex(columns=['http_status', 'user_agent'])\n http_status user_agent\n Firefox 200 NaN\n Chrome 200 NaN\n Safari 404 NaN\n IE10 404 NaN\n Konqueror 301 NaN\n\n Or we can use \"axis-style\" keyword arguments\n\n >>> df.reindex(['http_status', 'user_agent'], axis=\"columns\")\n http_status user_agent\n Firefox 200 NaN\n Chrome 200 NaN\n Safari 404 NaN\n IE10 404 NaN\n Konqueror 301 NaN\n\n To further illustrate the filling functionality in\n ``reindex``, we will create a dataframe with a\n monotonically increasing index (for example, a sequence\n of dates).\n\n >>> date_index = pd.date_range('1/1/2010', periods=6, freq='D')\n >>> df2 = pd.DataFrame({\"prices\": [100, 101, np.nan, 100, 89, 88]},\n ... index=date_index)\n >>> df2\n prices\n 2010-01-01 100.0\n 2010-01-02 101.0\n 2010-01-03 NaN\n 2010-01-04 100.0\n 2010-01-05 89.0\n 2010-01-06 88.0\n\n Suppose we decide to expand the dataframe to cover a wider\n date range.\n\n >>> date_index2 = pd.date_range('12/29/2009', periods=10, freq='D')\n >>> df2.reindex(date_index2)\n prices\n 2009-12-29 NaN\n 2009-12-30 NaN\n 2009-12-31 NaN\n 2010-01-01 100.0\n 2010-01-02 101.0\n 2010-01-03 NaN\n 2010-01-04 100.0\n 2010-01-05 89.0\n 2010-01-06 88.0\n 2010-01-07 NaN\n\n The index entries that did not have a value in the original data frame\n (for example, '2009-12-29') are by default filled with ``NaN``.\n If desired, we can fill in the missing values using one of several\n options.\n\n For example, to back-propagate the last valid value to fill the ``NaN``\n values, pass ``bfill`` as an argument to the ``method`` keyword.\n\n >>> df2.reindex(date_index2, method='bfill')\n prices\n 2009-12-29 100.0\n 2009-12-30 100.0\n 2009-12-31 100.0\n 2010-01-01 100.0\n 2010-01-02 101.0\n 2010-01-03 NaN\n 2010-01-04 100.0\n 2010-01-05 89.0\n 2010-01-06 88.0\n 2010-01-07 NaN\n\n Please note that the ``NaN`` value present in the original dataframe\n (at index value 2010-01-03) will not be filled by any of the\n value propagation schemes. This is because filling while reindexing\n does not look at dataframe values, but only compares the original and\n desired indexes. If you do want to fill in the ``NaN`` values present\n in the original dataframe, use the ``fillna()`` method.\n\n See the :ref:`user guide <basics.reindexing>` for more.\n \"\"\"\n # TODO: Decide if we care about having different examples for different\n # kinds\n\n # construct the args\n axes, kwargs = self._construct_axes_from_arguments(args, kwargs)\n method = missing.clean_reindex_fill_method(kwargs.pop(\"method\", None))\n level = kwargs.pop(\"level\", None)\n copy = kwargs.pop(\"copy\", True)\n limit = kwargs.pop(\"limit\", None)\n tolerance = kwargs.pop(\"tolerance\", None)\n fill_value = kwargs.pop(\"fill_value\", None)\n\n # Series.reindex doesn't use / need the axis kwarg\n # We pop and ignore it here, to make writing Series/Frame generic code\n # easier\n kwargs.pop(\"axis\", None)\n\n if kwargs:\n raise TypeError(\n \"reindex() got an unexpected keyword \"\n f'argument \"{list(kwargs.keys())[0]}\"'\n )\n\n self._consolidate_inplace()\n\n # if all axes that are requested to reindex are equal, then only copy\n # if indicated must have index names equal here as well as values\n if all(\n self._get_axis(axis).identical(ax)\n for axis, ax in axes.items()\n if ax is not None\n ):\n if copy:\n return self.copy()\n return self\n\n # check if we are a multi reindex\n if self._needs_reindex_multi(axes, method, level):\n return self._reindex_multi(axes, copy, fill_value)\n\n # perform the reindex on the axes\n return self._reindex_axes(\n axes, level, limit, tolerance, method, fill_value, copy\n ).__finalize__(self)\n\n def _reindex_axes(\n self: FrameOrSeries, axes, level, limit, tolerance, method, fill_value, copy\n ) -> FrameOrSeries:\n \"\"\"Perform the reindex for all the axes.\"\"\"\n obj = self\n for a in self._AXIS_ORDERS:\n labels = axes[a]\n if labels is None:\n continue\n\n ax = self._get_axis(a)\n new_index, indexer = ax.reindex(\n labels, level=level, limit=limit, tolerance=tolerance, method=method\n )\n\n axis = self._get_axis_number(a)\n obj = obj._reindex_with_indexers(\n {axis: [new_index, indexer]},\n fill_value=fill_value,\n copy=copy,\n allow_dups=False,\n )\n\n return obj\n\n def _needs_reindex_multi(self, axes, method, level) -> bool_t:\n \"\"\"Check if we do need a multi reindex.\"\"\"\n return (\n (com.count_not_none(*axes.values()) == self._AXIS_LEN)\n and method is None\n and level is None\n and not self._is_mixed_type\n )\n\n def _reindex_multi(self, axes, copy, fill_value):\n raise AbstractMethodError(self)\n\n def _reindex_with_indexers(\n self: FrameOrSeries,\n reindexers,\n fill_value=None,\n copy: bool_t = False,\n allow_dups: bool_t = False,\n ) -> FrameOrSeries:\n \"\"\"allow_dups indicates an internal call here \"\"\"\n # reindex doing multiple operations on different axes if indicated\n new_data = self._data\n for axis in sorted(reindexers.keys()):\n index, indexer = reindexers[axis]\n baxis = self._get_block_manager_axis(axis)\n\n if index is None:\n continue\n\n index = ensure_index(index)\n if indexer is not None:\n indexer = ensure_int64(indexer)\n\n # TODO: speed up on homogeneous DataFrame objects\n new_data = new_data.reindex_indexer(\n index,\n indexer,\n axis=baxis,\n fill_value=fill_value,\n allow_dups=allow_dups,\n copy=copy,\n )\n\n if copy and new_data is self._data:\n new_data = new_data.copy()\n\n return self._constructor(new_data).__finalize__(self)\n\n def filter(\n self: FrameOrSeries,\n items=None,\n like: Optional[str] = None,\n regex: Optional[str] = None,\n axis=None,\n ) -> FrameOrSeries:\n \"\"\"\n Subset the dataframe rows or columns according to the specified index labels.\n\n Note that this routine does not filter a dataframe on its\n contents. The filter is applied to the labels of the index.\n\n Parameters\n ----------\n items : list-like\n Keep labels from axis which are in items.\n like : str\n Keep labels from axis for which \"like in label == True\".\n regex : str (regular expression)\n Keep labels from axis for which re.search(regex, label) == True.\n axis : {0 or ‘index’, 1 or ‘columns’, None}, default None\n The axis to filter on, expressed either as an index (int)\n or axis name (str). By default this is the info axis,\n 'index' for Series, 'columns' for DataFrame.\n\n Returns\n -------\n same type as input object\n\n See Also\n --------\n DataFrame.loc : Access a group of rows and columns\n by label(s) or a boolean array.\n\n Notes\n -----\n The ``items``, ``like``, and ``regex`` parameters are\n enforced to be mutually exclusive.\n\n ``axis`` defaults to the info axis that is used when indexing\n with ``[]``.\n\n Examples\n --------\n >>> df = pd.DataFrame(np.array(([1, 2, 3], [4, 5, 6])),\n ... index=['mouse', 'rabbit'],\n ... columns=['one', 'two', 'three'])\n\n >>> # select columns by name\n >>> df.filter(items=['one', 'three'])\n one three\n mouse 1 3\n rabbit 4 6\n\n >>> # select columns by regular expression\n >>> df.filter(regex='e$', axis=1)\n one three\n mouse 1 3\n rabbit 4 6\n\n >>> # select rows containing 'bbi'\n >>> df.filter(like='bbi', axis=0)\n one two three\n rabbit 4 5 6\n \"\"\"\n nkw = com.count_not_none(items, like, regex)\n if nkw > 1:\n raise TypeError(\n \"Keyword arguments `items`, `like`, or `regex` \"\n \"are mutually exclusive\"\n )\n\n if axis is None:\n axis = self._info_axis_name\n labels = self._get_axis(axis)\n\n if items is not None:\n name = self._get_axis_name(axis)\n return self.reindex(**{name: [r for r in items if r in labels]})\n elif like:\n\n def f(x):\n return like in ensure_str(x)\n\n values = labels.map(f)\n return self.loc(axis=axis)[values]\n elif regex:\n\n def f(x):\n return matcher.search(ensure_str(x)) is not None\n\n matcher = re.compile(regex)\n values = labels.map(f)\n return self.loc(axis=axis)[values]\n else:\n raise TypeError(\"Must pass either `items`, `like`, or `regex`\")\n\n def head(self: FrameOrSeries, n: int = 5) -> FrameOrSeries:\n \"\"\"\n Return the first `n` rows.\n\n This function returns the first `n` rows for the object based\n on position. It is useful for quickly testing if your object\n has the right type of data in it.\n\n For negative values of `n`, this function returns all rows except\n the last `n` rows, equivalent to ``df[:-n]``.\n\n Parameters\n ----------\n n : int, default 5\n Number of rows to select.\n\n Returns\n -------\n same type as caller\n The first `n` rows of the caller object.\n\n See Also\n --------\n DataFrame.tail: Returns the last `n` rows.\n\n Examples\n --------\n >>> df = pd.DataFrame({'animal': ['alligator', 'bee', 'falcon', 'lion',\n ... 'monkey', 'parrot', 'shark', 'whale', 'zebra']})\n >>> df\n animal\n 0 alligator\n 1 bee\n 2 falcon\n 3 lion\n 4 monkey\n 5 parrot\n 6 shark\n 7 whale\n 8 zebra\n\n Viewing the first 5 lines\n\n >>> df.head()\n animal\n 0 alligator\n 1 bee\n 2 falcon\n 3 lion\n 4 monkey\n\n Viewing the first `n` lines (three in this case)\n\n >>> df.head(3)\n animal\n 0 alligator\n 1 bee\n 2 falcon\n\n For negative values of `n`\n\n >>> df.head(-3)\n animal\n 0 alligator\n 1 bee\n 2 falcon\n 3 lion\n 4 monkey\n 5 parrot\n \"\"\"\n return self.iloc[:n]\n\n def tail(self: FrameOrSeries, n: int = 5) -> FrameOrSeries:\n \"\"\"\n Return the last `n` rows.\n\n This function returns last `n` rows from the object based on\n position. It is useful for quickly verifying data, for example,\n after sorting or appending rows.\n\n For negative values of `n`, this function returns all rows except\n the first `n` rows, equivalent to ``df[n:]``.\n\n Parameters\n ----------\n n : int, default 5\n Number of rows to select.\n\n Returns\n -------\n type of caller\n The last `n` rows of the caller object.\n\n See Also\n --------\n DataFrame.head : The first `n` rows of the caller object.\n\n Examples\n --------\n >>> df = pd.DataFrame({'animal': ['alligator', 'bee', 'falcon', 'lion',\n ... 'monkey', 'parrot', 'shark', 'whale', 'zebra']})\n >>> df\n animal\n 0 alligator\n 1 bee\n 2 falcon\n 3 lion\n 4 monkey\n 5 parrot\n 6 shark\n 7 whale\n 8 zebra\n\n Viewing the last 5 lines\n\n >>> df.tail()\n animal\n 4 monkey\n 5 parrot\n 6 shark\n 7 whale\n 8 zebra\n\n Viewing the last `n` lines (three in this case)\n\n >>> df.tail(3)\n animal\n 6 shark\n 7 whale\n 8 zebra\n\n For negative values of `n`\n\n >>> df.tail(-3)\n animal\n 3 lion\n 4 monkey\n 5 parrot\n 6 shark\n 7 whale\n 8 zebra\n \"\"\"\n if n == 0:\n return self.iloc[0:0]\n return self.iloc[-n:]\n\n def sample(\n self: FrameOrSeries,\n n=None,\n frac=None,\n replace=False,\n weights=None,\n random_state=None,\n axis=None,\n ) -> FrameOrSeries:\n \"\"\"\n Return a random sample of items from an axis of object.\n\n You can use `random_state` for reproducibility.\n\n Parameters\n ----------\n n : int, optional\n Number of items from axis to return. Cannot be used with `frac`.\n Default = 1 if `frac` = None.\n frac : float, optional\n Fraction of axis items to return. Cannot be used with `n`.\n replace : bool, default False\n Allow or disallow sampling of the same row more than once.\n weights : str or ndarray-like, optional\n Default 'None' results in equal probability weighting.\n If passed a Series, will align with target object on index. Index\n values in weights not found in sampled object will be ignored and\n index values in sampled object not in weights will be assigned\n weights of zero.\n If called on a DataFrame, will accept the name of a column\n when axis = 0.\n Unless weights are a Series, weights must be same length as axis\n being sampled.\n If weights do not sum to 1, they will be normalized to sum to 1.\n Missing values in the weights column will be treated as zero.\n Infinite values not allowed.\n random_state : int or numpy.random.RandomState, optional\n Seed for the random number generator (if int), or numpy RandomState\n object.\n axis : {0 or ‘index’, 1 or ‘columns’, None}, default None\n Axis to sample. Accepts axis number or name. Default is stat axis\n for given data type (0 for Series and DataFrames).\n\n Returns\n -------\n Series or DataFrame\n A new object of same type as caller containing `n` items randomly\n sampled from the caller object.\n\n See Also\n --------\n numpy.random.choice: Generates a random sample from a given 1-D numpy\n array.\n\n Notes\n -----\n If `frac` > 1, `replacement` should be set to `True`.\n\n Examples\n --------\n >>> df = pd.DataFrame({'num_legs': [2, 4, 8, 0],\n ... 'num_wings': [2, 0, 0, 0],\n ... 'num_specimen_seen': [10, 2, 1, 8]},\n ... index=['falcon', 'dog', 'spider', 'fish'])\n >>> df\n num_legs num_wings num_specimen_seen\n falcon 2 2 10\n dog 4 0 2\n spider 8 0 1\n fish 0 0 8\n\n Extract 3 random elements from the ``Series`` ``df['num_legs']``:\n Note that we use `random_state` to ensure the reproducibility of\n the examples.\n\n >>> df['num_legs'].sample(n=3, random_state=1)\n fish 0\n spider 8\n falcon 2\n Name: num_legs, dtype: int64\n\n A random 50% sample of the ``DataFrame`` with replacement:\n\n >>> df.sample(frac=0.5, replace=True, random_state=1)\n num_legs num_wings num_specimen_seen\n dog 4 0 2\n fish 0 0 8\n\n An upsample sample of the ``DataFrame`` with replacement:\n Note that `replace` parameter has to be `True` for `frac` parameter > 1.\n\n >>> df.sample(frac=2, replace=True, random_state=1)\n num_legs num_wings num_specimen_seen\n dog 4 0 2\n fish 0 0 8\n falcon 2 2 10\n falcon 2 2 10\n fish 0 0 8\n dog 4 0 2\n fish 0 0 8\n dog 4 0 2\n\n Using a DataFrame column as weights. Rows with larger value in the\n `num_specimen_seen` column are more likely to be sampled.\n\n >>> df.sample(n=2, weights='num_specimen_seen', random_state=1)\n num_legs num_wings num_specimen_seen\n falcon 2 2 10\n fish 0 0 8\n \"\"\"\n if axis is None:\n axis = self._stat_axis_number\n\n axis = self._get_axis_number(axis)\n axis_length = self.shape[axis]\n\n # Process random_state argument\n rs = com.random_state(random_state)\n\n # Check weights for compliance\n if weights is not None:\n\n # If a series, align with frame\n if isinstance(weights, ABCSeries):\n weights = weights.reindex(self.axes[axis])\n\n # Strings acceptable if a dataframe and axis = 0\n if isinstance(weights, str):\n if isinstance(self, ABCDataFrame):\n if axis == 0:\n try:\n weights = self[weights]\n except KeyError as err:\n raise KeyError(\n \"String passed to weights not a valid column\"\n ) from err\n else:\n raise ValueError(\n \"Strings can only be passed to \"\n \"weights when sampling from rows on \"\n \"a DataFrame\"\n )\n else:\n raise ValueError(\n \"Strings cannot be passed as weights \"\n \"when sampling from a Series.\"\n )\n\n weights = pd.Series(weights, dtype=\"float64\")\n\n if len(weights) != axis_length:\n raise ValueError(\n \"Weights and axis to be sampled must be of same length\"\n )\n\n if (weights == np.inf).any() or (weights == -np.inf).any():\n raise ValueError(\"weight vector may not include `inf` values\")\n\n if (weights < 0).any():\n raise ValueError(\"weight vector many not include negative values\")\n\n # If has nan, set to zero.\n weights = weights.fillna(0)\n\n # Renormalize if don't sum to 1\n if weights.sum() != 1:\n if weights.sum() != 0:\n weights = weights / weights.sum()\n else:\n raise ValueError(\"Invalid weights: weights sum to zero\")\n\n weights = weights.values\n\n # If no frac or n, default to n=1.\n if n is None and frac is None:\n n = 1\n elif frac is not None and frac > 1 and not replace:\n raise ValueError(\n \"Replace has to be set to `True` when \"\n \"upsampling the population `frac` > 1.\"\n )\n elif n is not None and frac is None and n % 1 != 0:\n raise ValueError(\"Only integers accepted as `n` values\")\n elif n is None and frac is not None:\n n = int(round(frac * axis_length))\n elif n is not None and frac is not None:\n raise ValueError(\"Please enter a value for `frac` OR `n`, not both\")\n\n # Check for negative sizes\n if n < 0:\n raise ValueError(\n \"A negative number of rows requested. Please provide positive value.\"\n )\n\n locs = rs.choice(axis_length, size=n, replace=replace, p=weights)\n return self.take(locs, axis=axis)\n\n _shared_docs[\n \"pipe\"\n ] = r\"\"\"\n Apply func(self, \\*args, \\*\\*kwargs).\n\n Parameters\n ----------\n func : function\n Function to apply to the %(klass)s.\n ``args``, and ``kwargs`` are passed into ``func``.\n Alternatively a ``(callable, data_keyword)`` tuple where\n ``data_keyword`` is a string indicating the keyword of\n ``callable`` that expects the %(klass)s.\n args : iterable, optional\n Positional arguments passed into ``func``.\n kwargs : mapping, optional\n A dictionary of keyword arguments passed into ``func``.\n\n Returns\n -------\n object : the return type of ``func``.\n\n See Also\n --------\n DataFrame.apply : Apply a function along input axis of DataFrame.\n DataFrame.applymap : Apply a function elementwise on a whole DataFrame.\n Series.map : Apply a mapping correspondence on a\n :class:`~pandas.Series`.\n\n Notes\n -----\n\n Use ``.pipe`` when chaining together functions that expect\n Series, DataFrames or GroupBy objects. Instead of writing\n\n >>> f(g(h(df), arg1=a), arg2=b, arg3=c)\n\n You can write\n\n >>> (df.pipe(h)\n ... .pipe(g, arg1=a)\n ... .pipe(f, arg2=b, arg3=c)\n ... )\n\n If you have a function that takes the data as (say) the second\n argument, pass a tuple indicating which keyword expects the\n data. For example, suppose ``f`` takes its data as ``arg2``:\n\n >>> (df.pipe(h)\n ... .pipe(g, arg1=a)\n ... .pipe((f, 'arg2'), arg1=a, arg3=c)\n ... )\n \"\"\"\n\n @Appender(_shared_docs[\"pipe\"] % _shared_doc_kwargs)\n def pipe(self, func, *args, **kwargs):\n return com.pipe(self, func, *args, **kwargs)\n\n _shared_docs[\"aggregate\"] = dedent(\n \"\"\"\n Aggregate using one or more operations over the specified axis.\n %(versionadded)s\n Parameters\n ----------\n func : function, str, list or dict\n Function to use for aggregating the data. If a function, must either\n work when passed a %(klass)s or when passed to %(klass)s.apply.\n\n Accepted combinations are:\n\n - function\n - string function name\n - list of functions and/or function names, e.g. ``[np.sum, 'mean']``\n - dict of axis labels -> functions, function names or list of such.\n %(axis)s\n *args\n Positional arguments to pass to `func`.\n **kwargs\n Keyword arguments to pass to `func`.\n\n Returns\n -------\n scalar, Series or DataFrame\n\n The return can be:\n\n * scalar : when Series.agg is called with single function\n * Series : when DataFrame.agg is called with a single function\n * DataFrame : when DataFrame.agg is called with several functions\n\n Return scalar, Series or DataFrame.\n %(see_also)s\n Notes\n -----\n `agg` is an alias for `aggregate`. Use the alias.\n\n A passed user-defined-function will be passed a Series for evaluation.\n %(examples)s\"\"\"\n )\n\n _shared_docs[\n \"transform\"\n ] = \"\"\"\n Call ``func`` on self producing a %(klass)s with transformed values.\n\n Produced %(klass)s will have same axis length as self.\n\n Parameters\n ----------\n func : function, str, list or dict\n Function to use for transforming the data. If a function, must either\n work when passed a %(klass)s or when passed to %(klass)s.apply.\n\n Accepted combinations are:\n\n - function\n - string function name\n - list of functions and/or function names, e.g. ``[np.exp. 'sqrt']``\n - dict of axis labels -> functions, function names or list of such.\n %(axis)s\n *args\n Positional arguments to pass to `func`.\n **kwargs\n Keyword arguments to pass to `func`.\n\n Returns\n -------\n %(klass)s\n A %(klass)s that must have the same length as self.\n\n Raises\n ------\n ValueError : If the returned %(klass)s has a different length than self.\n\n See Also\n --------\n %(klass)s.agg : Only perform aggregating type operations.\n %(klass)s.apply : Invoke function on a %(klass)s.\n\n Examples\n --------\n >>> df = pd.DataFrame({'A': range(3), 'B': range(1, 4)})\n >>> df\n A B\n 0 0 1\n 1 1 2\n 2 2 3\n >>> df.transform(lambda x: x + 1)\n A B\n 0 1 2\n 1 2 3\n 2 3 4\n\n Even though the resulting %(klass)s must have the same length as the\n input %(klass)s, it is possible to provide several input functions:\n\n >>> s = pd.Series(range(3))\n >>> s\n 0 0\n 1 1\n 2 2\n dtype: int64\n >>> s.transform([np.sqrt, np.exp])\n sqrt exp\n 0 0.000000 1.000000\n 1 1.000000 2.718282\n 2 1.414214 7.389056\n \"\"\"\n\n # ----------------------------------------------------------------------\n # Attribute access\n\n def __finalize__(\n self: FrameOrSeries, other, method=None, **kwargs\n ) -> FrameOrSeries:\n \"\"\"\n Propagate metadata from other to self.\n\n Parameters\n ----------\n other : the object from which to get the attributes that we are going\n to propagate\n method : optional, a passed method name ; possibly to take different\n types of propagation actions based on this\n\n \"\"\"\n if isinstance(other, NDFrame):\n for name in other.attrs:\n self.attrs[name] = other.attrs[name]\n # For subclasses using _metadata.\n for name in self._metadata:\n assert isinstance(name, str)\n object.__setattr__(self, name, getattr(other, name, None))\n return self\n\n def __getattr__(self, name: str):\n \"\"\"\n After regular attribute access, try looking up the name\n This allows simpler access to columns for interactive use.\n \"\"\"\n # Note: obj.x will always call obj.__getattribute__('x') prior to\n # calling obj.__getattr__('x').\n\n if (\n name in self._internal_names_set\n or name in self._metadata\n or name in self._accessors\n ):\n return object.__getattribute__(self, name)\n else:\n if self._info_axis._can_hold_identifiers_and_holds_name(name):\n return self[name]\n return object.__getattribute__(self, name)\n\n def __setattr__(self, name: str, value) -> None:\n \"\"\"\n After regular attribute access, try setting the name\n This allows simpler access to columns for interactive use.\n \"\"\"\n # first try regular attribute access via __getattribute__, so that\n # e.g. ``obj.x`` and ``obj.x = 4`` will always reference/modify\n # the same attribute.\n\n try:\n object.__getattribute__(self, name)\n return object.__setattr__(self, name, value)\n except AttributeError:\n pass\n\n # if this fails, go on to more involved attribute setting\n # (note that this matches __getattr__, above).\n if name in self._internal_names_set:\n object.__setattr__(self, name, value)\n elif name in self._metadata:\n object.__setattr__(self, name, value)\n else:\n try:\n existing = getattr(self, name)\n if isinstance(existing, Index):\n object.__setattr__(self, name, value)\n elif name in self._info_axis:\n self[name] = value\n else:\n object.__setattr__(self, name, value)\n except (AttributeError, TypeError):\n if isinstance(self, ABCDataFrame) and (is_list_like(value)):\n warnings.warn(\n \"Pandas doesn't allow columns to be \"\n \"created via a new attribute name - see \"\n \"https://pandas.pydata.org/pandas-docs/\"\n \"stable/indexing.html#attribute-access\",\n stacklevel=2,\n )\n object.__setattr__(self, name, value)\n\n def _dir_additions(self):\n \"\"\"\n add the string-like attributes from the info_axis.\n If info_axis is a MultiIndex, it's first level values are used.\n \"\"\"\n additions = {\n c\n for c in self._info_axis.unique(level=0)[:100]\n if isinstance(c, str) and c.isidentifier()\n }\n return super()._dir_additions().union(additions)\n\n # ----------------------------------------------------------------------\n # Consolidation of internals\n\n def _protect_consolidate(self, f):\n \"\"\"\n Consolidate _data -- if the blocks have changed, then clear the\n cache\n \"\"\"\n blocks_before = len(self._data.blocks)\n result = f()\n if len(self._data.blocks) != blocks_before:\n self._clear_item_cache()\n return result\n\n def _consolidate_inplace(self) -> None:\n \"\"\"Consolidate data in place and return None\"\"\"\n\n def f():\n self._data = self._data.consolidate()\n\n self._protect_consolidate(f)\n\n def _consolidate(self, inplace: bool_t = False):\n \"\"\"\n Compute NDFrame with \"consolidated\" internals (data of each dtype\n grouped together in a single ndarray).\n\n Parameters\n ----------\n inplace : bool, default False\n If False return new object, otherwise modify existing object.\n\n Returns\n -------\n consolidated : same type as caller\n \"\"\"\n inplace = validate_bool_kwarg(inplace, \"inplace\")\n if inplace:\n self._consolidate_inplace()\n else:\n f = lambda: self._data.consolidate()\n cons_data = self._protect_consolidate(f)\n return self._constructor(cons_data).__finalize__(self)\n\n @property\n def _is_mixed_type(self) -> bool_t:\n f = lambda: self._data.is_mixed_type\n return self._protect_consolidate(f)\n\n @property\n def _is_numeric_mixed_type(self) -> bool_t:\n f = lambda: self._data.is_numeric_mixed_type\n return self._protect_consolidate(f)\n\n def _check_inplace_setting(self, value) -> bool_t:\n \"\"\" check whether we allow in-place setting with this type of value \"\"\"\n if self._is_mixed_type:\n if not self._is_numeric_mixed_type:\n\n # allow an actual np.nan thru\n if is_float(value) and np.isnan(value):\n return True\n\n raise TypeError(\n \"Cannot do inplace boolean setting on \"\n \"mixed-types with a non np.nan value\"\n )\n\n return True\n\n def _get_numeric_data(self):\n return self._constructor(self._data.get_numeric_data()).__finalize__(self)\n\n def _get_bool_data(self):\n return self._constructor(self._data.get_bool_data()).__finalize__(self)\n\n # ----------------------------------------------------------------------\n # Internal Interface Methods\n\n @property\n def values(self) -> np.ndarray:\n \"\"\"\n Return a Numpy representation of the DataFrame.\n\n .. warning::\n\n We recommend using :meth:`DataFrame.to_numpy` instead.\n\n Only the values in the DataFrame will be returned, the axes labels\n will be removed.\n\n Returns\n -------\n numpy.ndarray\n The values of the DataFrame.\n\n See Also\n --------\n DataFrame.to_numpy : Recommended alternative to this method.\n DataFrame.index : Retrieve the index labels.\n DataFrame.columns : Retrieving the column names.\n\n Notes\n -----\n The dtype will be a lower-common-denominator dtype (implicit\n upcasting); that is to say if the dtypes (even of numeric types)\n are mixed, the one that accommodates all will be chosen. Use this\n with care if you are not dealing with the blocks.\n\n e.g. If the dtypes are float16 and float32, dtype will be upcast to\n float32. If dtypes are int32 and uint8, dtype will be upcast to\n int32. By :func:`numpy.find_common_type` convention, mixing int64\n and uint64 will result in a float64 dtype.\n\n Examples\n --------\n A DataFrame where all columns are the same type (e.g., int64) results\n in an array of the same type.\n\n >>> df = pd.DataFrame({'age': [ 3, 29],\n ... 'height': [94, 170],\n ... 'weight': [31, 115]})\n >>> df\n age height weight\n 0 3 94 31\n 1 29 170 115\n >>> df.dtypes\n age int64\n height int64\n weight int64\n dtype: object\n >>> df.values\n array([[ 3, 94, 31],\n [ 29, 170, 115]], dtype=int64)\n\n A DataFrame with mixed type columns(e.g., str/object, int64, float32)\n results in an ndarray of the broadest type that accommodates these\n mixed types (e.g., object).\n\n >>> df2 = pd.DataFrame([('parrot', 24.0, 'second'),\n ... ('lion', 80.5, 1),\n ... ('monkey', np.nan, None)],\n ... columns=('name', 'max_speed', 'rank'))\n >>> df2.dtypes\n name object\n max_speed float64\n rank object\n dtype: object\n >>> df2.values\n array([['parrot', 24.0, 'second'],\n ['lion', 80.5, 1],\n ['monkey', nan, None]], dtype=object)\n \"\"\"\n self._consolidate_inplace()\n return self._data.as_array(transpose=self._AXIS_REVERSED)\n\n @property\n def _values(self) -> np.ndarray:\n \"\"\"internal implementation\"\"\"\n return self.values\n\n def _internal_get_values(self) -> np.ndarray:\n \"\"\"\n Return an ndarray after converting sparse values to dense.\n\n This is the same as ``.values`` for non-sparse data. For sparse\n data contained in a `SparseArray`, the data are first\n converted to a dense representation.\n\n Returns\n -------\n numpy.ndarray\n Numpy representation of DataFrame.\n\n See Also\n --------\n values : Numpy representation of DataFrame.\n SparseArray : Container for sparse data.\n \"\"\"\n return self.values\n\n @property\n def dtypes(self):\n \"\"\"\n Return the dtypes in the DataFrame.\n\n This returns a Series with the data type of each column.\n The result's index is the original DataFrame's columns. Columns\n with mixed types are stored with the ``object`` dtype. See\n :ref:`the User Guide <basics.dtypes>` for more.\n\n Returns\n -------\n pandas.Series\n The data type of each column.\n\n Examples\n --------\n >>> df = pd.DataFrame({'float': [1.0],\n ... 'int': [1],\n ... 'datetime': [pd.Timestamp('20180310')],\n ... 'string': ['foo']})\n >>> df.dtypes\n float float64\n int int64\n datetime datetime64[ns]\n string object\n dtype: object\n \"\"\"\n from pandas import Series\n\n return Series(self._data.get_dtypes(), index=self._info_axis, dtype=np.object_)\n\n def _to_dict_of_blocks(self, copy: bool_t = True):\n \"\"\"\n Return a dict of dtype -> Constructor Types that\n each is a homogeneous dtype.\n\n Internal ONLY\n \"\"\"\n return {\n k: self._constructor(v).__finalize__(self)\n for k, v, in self._data.to_dict(copy=copy).items()\n }\n\n def astype(\n self: FrameOrSeries, dtype, copy: bool_t = True, errors: str = \"raise\"\n ) -> FrameOrSeries:\n \"\"\"\n Cast a pandas object to a specified dtype ``dtype``.\n\n Parameters\n ----------\n dtype : data type, or dict of column name -> data type\n Use a numpy.dtype or Python type to cast entire pandas object to\n the same type. Alternatively, use {col: dtype, ...}, where col is a\n column label and dtype is a numpy.dtype or Python type to cast one\n or more of the DataFrame's columns to column-specific types.\n copy : bool, default True\n Return a copy when ``copy=True`` (be very careful setting\n ``copy=False`` as changes to values then may propagate to other\n pandas objects).\n errors : {'raise', 'ignore'}, default 'raise'\n Control raising of exceptions on invalid data for provided dtype.\n\n - ``raise`` : allow exceptions to be raised\n - ``ignore`` : suppress exceptions. On error return original object.\n\n Returns\n -------\n casted : same type as caller\n\n See Also\n --------\n to_datetime : Convert argument to datetime.\n to_timedelta : Convert argument to timedelta.\n to_numeric : Convert argument to a numeric type.\n numpy.ndarray.astype : Cast a numpy array to a specified type.\n\n Examples\n --------\n Create a DataFrame:\n\n >>> d = {'col1': [1, 2], 'col2': [3, 4]}\n >>> df = pd.DataFrame(data=d)\n >>> df.dtypes\n col1 int64\n col2 int64\n dtype: object\n\n Cast all columns to int32:\n\n >>> df.astype('int32').dtypes\n col1 int32\n col2 int32\n dtype: object\n\n Cast col1 to int32 using a dictionary:\n\n >>> df.astype({'col1': 'int32'}).dtypes\n col1 int32\n col2 int64\n dtype: object\n\n Create a series:\n\n >>> ser = pd.Series([1, 2], dtype='int32')\n >>> ser\n 0 1\n 1 2\n dtype: int32\n >>> ser.astype('int64')\n 0 1\n 1 2\n dtype: int64\n\n Convert to categorical type:\n\n >>> ser.astype('category')\n 0 1\n 1 2\n dtype: category\n Categories (2, int64): [1, 2]\n\n Convert to ordered categorical type with custom ordering:\n\n >>> cat_dtype = pd.api.types.CategoricalDtype(\n ... categories=[2, 1], ordered=True)\n >>> ser.astype(cat_dtype)\n 0 1\n 1 2\n dtype: category\n Categories (2, int64): [2 < 1]\n\n Note that using ``copy=False`` and changing data on a new\n pandas object may propagate changes:\n\n >>> s1 = pd.Series([1, 2])\n >>> s2 = s1.astype('int64', copy=False)\n >>> s2[0] = 10\n >>> s1 # note that s1[0] has changed too\n 0 10\n 1 2\n dtype: int64\n \"\"\"\n if is_dict_like(dtype):\n if self.ndim == 1: # i.e. Series\n if len(dtype) > 1 or self.name not in dtype:\n raise KeyError(\n \"Only the Series name can be used for \"\n \"the key in Series dtype mappings.\"\n )\n new_type = dtype[self.name]\n return self.astype(new_type, copy, errors)\n\n for col_name in dtype.keys():\n if col_name not in self:\n raise KeyError(\n \"Only a column name can be used for the \"\n \"key in a dtype mappings argument.\"\n )\n results = []\n for col_name, col in self.items():\n if col_name in dtype:\n results.append(\n col.astype(dtype=dtype[col_name], copy=copy, errors=errors)\n )\n else:\n results.append(col.copy() if copy else col)\n\n elif is_extension_array_dtype(dtype) and self.ndim > 1:\n # GH 18099/22869: columnwise conversion to extension dtype\n # GH 24704: use iloc to handle duplicate column names\n results = [\n self.iloc[:, i].astype(dtype, copy=copy)\n for i in range(len(self.columns))\n ]\n\n else:\n # else, only a single dtype is given\n new_data = self._data.astype(dtype=dtype, copy=copy, errors=errors)\n return self._constructor(new_data).__finalize__(self)\n\n # GH 19920: retain column metadata after concat\n result = pd.concat(results, axis=1, copy=False)\n result.columns = self.columns\n return result\n\n def copy(self: FrameOrSeries, deep: bool_t = True) -> FrameOrSeries:\n \"\"\"\n Make a copy of this object's indices and data.\n\n When ``deep=True`` (default), a new object will be created with a\n copy of the calling object's data and indices. Modifications to\n the data or indices of the copy will not be reflected in the\n original object (see notes below).\n\n When ``deep=False``, a new object will be created without copying\n the calling object's data or index (only references to the data\n and index are copied). Any changes to the data of the original\n will be reflected in the shallow copy (and vice versa).\n\n Parameters\n ----------\n deep : bool, default True\n Make a deep copy, including a copy of the data and the indices.\n With ``deep=False`` neither the indices nor the data are copied.\n\n Returns\n -------\n copy : Series or DataFrame\n Object type matches caller.\n\n Notes\n -----\n When ``deep=True``, data is copied but actual Python objects\n will not be copied recursively, only the reference to the object.\n This is in contrast to `copy.deepcopy` in the Standard Library,\n which recursively copies object data (see examples below).\n\n While ``Index`` objects are copied when ``deep=True``, the underlying\n numpy array is not copied for performance reasons. Since ``Index`` is\n immutable, the underlying data can be safely shared and a copy\n is not needed.\n\n Examples\n --------\n >>> s = pd.Series([1, 2], index=[\"a\", \"b\"])\n >>> s\n a 1\n b 2\n dtype: int64\n\n >>> s_copy = s.copy()\n >>> s_copy\n a 1\n b 2\n dtype: int64\n\n **Shallow copy versus default (deep) copy:**\n\n >>> s = pd.Series([1, 2], index=[\"a\", \"b\"])\n >>> deep = s.copy()\n >>> shallow = s.copy(deep=False)\n\n Shallow copy shares data and index with original.\n\n >>> s is shallow\n False\n >>> s.values is shallow.values and s.index is shallow.index\n True\n\n Deep copy has own copy of data and index.\n\n >>> s is deep\n False\n >>> s.values is deep.values or s.index is deep.index\n False\n\n Updates to the data shared by shallow copy and original is reflected\n in both; deep copy remains unchanged.\n\n >>> s[0] = 3\n >>> shallow[1] = 4\n >>> s\n a 3\n b 4\n dtype: int64\n >>> shallow\n a 3\n b 4\n dtype: int64\n >>> deep\n a 1\n b 2\n dtype: int64\n\n Note that when copying an object containing Python objects, a deep copy\n will copy the data, but will not do so recursively. Updating a nested\n data object will be reflected in the deep copy.\n\n >>> s = pd.Series([[1, 2], [3, 4]])\n >>> deep = s.copy()\n >>> s[0][0] = 10\n >>> s\n 0 [10, 2]\n 1 [3, 4]\n dtype: object\n >>> deep\n 0 [10, 2]\n 1 [3, 4]\n dtype: object\n \"\"\"\n data = self._data.copy(deep=deep)\n return self._constructor(data).__finalize__(self)\n\n def __copy__(self: FrameOrSeries, deep: bool_t = True) -> FrameOrSeries:\n return self.copy(deep=deep)\n\n def __deepcopy__(self: FrameOrSeries, memo=None) -> FrameOrSeries:\n \"\"\"\n Parameters\n ----------\n memo, default None\n Standard signature. Unused\n \"\"\"\n return self.copy(deep=True)\n\n def _convert(\n self: FrameOrSeries,\n datetime: bool_t = False,\n numeric: bool_t = False,\n timedelta: bool_t = False,\n coerce: bool_t = False,\n copy: bool_t = True,\n ) -> FrameOrSeries:\n \"\"\"\n Attempt to infer better dtype for object columns\n\n Parameters\n ----------\n datetime : bool, default False\n If True, convert to date where possible.\n numeric : bool, default False\n If True, attempt to convert to numbers (including strings), with\n unconvertible values becoming NaN.\n timedelta : bool, default False\n If True, convert to timedelta where possible.\n coerce : bool, default False\n If True, force conversion with unconvertible values converted to\n nulls (NaN or NaT).\n copy : bool, default True\n If True, return a copy even if no copy is necessary (e.g. no\n conversion was done). Note: This is meant for internal use, and\n should not be confused with inplace.\n\n Returns\n -------\n converted : same as input object\n \"\"\"\n validate_bool_kwarg(datetime, \"datetime\")\n validate_bool_kwarg(numeric, \"numeric\")\n validate_bool_kwarg(timedelta, \"timedelta\")\n validate_bool_kwarg(coerce, \"coerce\")\n validate_bool_kwarg(copy, \"copy\")\n return self._constructor(\n self._data.convert(\n datetime=datetime,\n numeric=numeric,\n timedelta=timedelta,\n coerce=coerce,\n copy=copy,\n )\n ).__finalize__(self)\n\n def infer_objects(self: FrameOrSeries) -> FrameOrSeries:\n \"\"\"\n Attempt to infer better dtypes for object columns.\n\n Attempts soft conversion of object-dtyped\n columns, leaving non-object and unconvertible\n columns unchanged. The inference rules are the\n same as during normal Series/DataFrame construction.\n\n .. versionadded:: 0.21.0\n\n Returns\n -------\n converted : same type as input object\n\n See Also\n --------\n to_datetime : Convert argument to datetime.\n to_timedelta : Convert argument to timedelta.\n to_numeric : Convert argument to numeric type.\n convert_dtypes : Convert argument to best possible dtype.\n\n Examples\n --------\n >>> df = pd.DataFrame({\"A\": [\"a\", 1, 2, 3]})\n >>> df = df.iloc[1:]\n >>> df\n A\n 1 1\n 2 2\n 3 3\n\n >>> df.dtypes\n A object\n dtype: object\n\n >>> df.infer_objects().dtypes\n A int64\n dtype: object\n \"\"\"\n # numeric=False necessary to only soft convert;\n # python objects will still be converted to\n # native numpy numeric types\n return self._constructor(\n self._data.convert(\n datetime=True, numeric=False, timedelta=True, coerce=False, copy=True\n )\n ).__finalize__(self)\n\n def convert_dtypes(\n self: FrameOrSeries,\n infer_objects: bool_t = True,\n convert_string: bool_t = True,\n convert_integer: bool_t = True,\n convert_boolean: bool_t = True,\n ) -> FrameOrSeries:\n \"\"\"\n Convert columns to best possible dtypes using dtypes supporting ``pd.NA``.\n\n .. versionadded:: 1.0.0\n\n Parameters\n ----------\n infer_objects : bool, default True\n Whether object dtypes should be converted to the best possible types.\n convert_string : bool, default True\n Whether object dtypes should be converted to ``StringDtype()``.\n convert_integer : bool, default True\n Whether, if possible, conversion can be done to integer extension types.\n convert_boolean : bool, defaults True\n Whether object dtypes should be converted to ``BooleanDtypes()``.\n\n Returns\n -------\n Series or DataFrame\n Copy of input object with new dtype.\n\n See Also\n --------\n infer_objects : Infer dtypes of objects.\n to_datetime : Convert argument to datetime.\n to_timedelta : Convert argument to timedelta.\n to_numeric : Convert argument to a numeric type.\n\n Notes\n -----\n By default, ``convert_dtypes`` will attempt to convert a Series (or each\n Series in a DataFrame) to dtypes that support ``pd.NA``. By using the options\n ``convert_string``, ``convert_integer``, and ``convert_boolean``, it is\n possible to turn off individual conversions to ``StringDtype``, the integer\n extension types or ``BooleanDtype``, respectively.\n\n For object-dtyped columns, if ``infer_objects`` is ``True``, use the inference\n rules as during normal Series/DataFrame construction. Then, if possible,\n convert to ``StringDtype``, ``BooleanDtype`` or an appropriate integer extension\n type, otherwise leave as ``object``.\n\n If the dtype is integer, convert to an appropriate integer extension type.\n\n If the dtype is numeric, and consists of all integers, convert to an\n appropriate integer extension type.\n\n In the future, as new dtypes are added that support ``pd.NA``, the results\n of this method will change to support those new dtypes.\n\n Examples\n --------\n >>> df = pd.DataFrame(\n ... {\n ... \"a\": pd.Series([1, 2, 3], dtype=np.dtype(\"int32\")),\n ... \"b\": pd.Series([\"x\", \"y\", \"z\"], dtype=np.dtype(\"O\")),\n ... \"c\": pd.Series([True, False, np.nan], dtype=np.dtype(\"O\")),\n ... \"d\": pd.Series([\"h\", \"i\", np.nan], dtype=np.dtype(\"O\")),\n ... \"e\": pd.Series([10, np.nan, 20], dtype=np.dtype(\"float\")),\n ... \"f\": pd.Series([np.nan, 100.5, 200], dtype=np.dtype(\"float\")),\n ... }\n ... )\n\n Start with a DataFrame with default dtypes.\n\n >>> df\n a b c d e f\n 0 1 x True h 10.0 NaN\n 1 2 y False i NaN 100.5\n 2 3 z NaN NaN 20.0 200.0\n\n >>> df.dtypes\n a int32\n b object\n c object\n d object\n e float64\n f float64\n dtype: object\n\n Convert the DataFrame to use best possible dtypes.\n\n >>> dfn = df.convert_dtypes()\n >>> dfn\n a b c d e f\n 0 1 x True h 10 NaN\n 1 2 y False i <NA> 100.5\n 2 3 z <NA> <NA> 20 200.0\n\n >>> dfn.dtypes\n a Int32\n b string\n c boolean\n d string\n e Int64\n f float64\n dtype: object\n\n Start with a Series of strings and missing data represented by ``np.nan``.\n\n >>> s = pd.Series([\"a\", \"b\", np.nan])\n >>> s\n 0 a\n 1 b\n 2 NaN\n dtype: object\n\n Obtain a Series with dtype ``StringDtype``.\n\n >>> s.convert_dtypes()\n 0 a\n 1 b\n 2 <NA>\n dtype: string\n \"\"\"\n if self.ndim == 1:\n return self._convert_dtypes(\n infer_objects, convert_string, convert_integer, convert_boolean\n )\n else:\n results = [\n col._convert_dtypes(\n infer_objects, convert_string, convert_integer, convert_boolean\n )\n for col_name, col in self.items()\n ]\n result = pd.concat(results, axis=1, copy=False)\n return result\n\n # ----------------------------------------------------------------------\n # Filling NA's\n\n @doc(**_shared_doc_kwargs)\n def fillna(\n self: FrameOrSeries,\n value=None,\n method=None,\n axis=None,\n inplace: bool_t = False,\n limit=None,\n downcast=None,\n ) -> Optional[FrameOrSeries]:\n \"\"\"\n Fill NA/NaN values using the specified method.\n\n Parameters\n ----------\n value : scalar, dict, Series, or DataFrame\n Value to use to fill holes (e.g. 0), alternately a\n dict/Series/DataFrame of values specifying which value to use for\n each index (for a Series) or column (for a DataFrame). Values not\n in the dict/Series/DataFrame will not be filled. This value cannot\n be a list.\n method : {{'backfill', 'bfill', 'pad', 'ffill', None}}, default None\n Method to use for filling holes in reindexed Series\n pad / ffill: propagate last valid observation forward to next valid\n backfill / bfill: use next valid observation to fill gap.\n axis : {axes_single_arg}\n Axis along which to fill missing values.\n inplace : bool, default False\n If True, fill in-place. Note: this will modify any\n other views on this object (e.g., a no-copy slice for a column in a\n DataFrame).\n limit : int, default None\n If method is specified, this is the maximum number of consecutive\n NaN values to forward/backward fill. In other words, if there is\n a gap with more than this number of consecutive NaNs, it will only\n be partially filled. If method is not specified, this is the\n maximum number of entries along the entire axis where NaNs will be\n filled. Must be greater than 0 if not None.\n downcast : dict, default is None\n A dict of item->dtype of what to downcast if possible,\n or the string 'infer' which will try to downcast to an appropriate\n equal type (e.g. float64 to int64 if possible).\n\n Returns\n -------\n {klass} or None\n Object with missing values filled or None if ``inplace=True``.\n\n See Also\n --------\n interpolate : Fill NaN values using interpolation.\n reindex : Conform object to new index.\n asfreq : Convert TimeSeries to specified frequency.\n\n Examples\n --------\n >>> df = pd.DataFrame([[np.nan, 2, np.nan, 0],\n ... [3, 4, np.nan, 1],\n ... [np.nan, np.nan, np.nan, 5],\n ... [np.nan, 3, np.nan, 4]],\n ... columns=list('ABCD'))\n >>> df\n A B C D\n 0 NaN 2.0 NaN 0\n 1 3.0 4.0 NaN 1\n 2 NaN NaN NaN 5\n 3 NaN 3.0 NaN 4\n\n Replace all NaN elements with 0s.\n\n >>> df.fillna(0)\n A B C D\n 0 0.0 2.0 0.0 0\n 1 3.0 4.0 0.0 1\n 2 0.0 0.0 0.0 5\n 3 0.0 3.0 0.0 4\n\n We can also propagate non-null values forward or backward.\n\n >>> df.fillna(method='ffill')\n A B C D\n 0 NaN 2.0 NaN 0\n 1 3.0 4.0 NaN 1\n 2 3.0 4.0 NaN 5\n 3 3.0 3.0 NaN 4\n\n Replace all NaN elements in column 'A', 'B', 'C', and 'D', with 0, 1,\n 2, and 3 respectively.\n\n >>> values = {{'A': 0, 'B': 1, 'C': 2, 'D': 3}}\n >>> df.fillna(value=values)\n A B C D\n 0 0.0 2.0 2.0 0\n 1 3.0 4.0 2.0 1\n 2 0.0 1.0 2.0 5\n 3 0.0 3.0 2.0 4\n\n Only replace the first NaN element.\n\n >>> df.fillna(value=values, limit=1)\n A B C D\n 0 0.0 2.0 2.0 0\n 1 3.0 4.0 NaN 1\n 2 NaN 1.0 NaN 5\n 3 NaN 3.0 NaN 4\n \"\"\"\n inplace = validate_bool_kwarg(inplace, \"inplace\")\n value, method = validate_fillna_kwargs(value, method)\n\n self._consolidate_inplace()\n\n # set the default here, so functions examining the signaure\n # can detect if something was set (e.g. in groupby) (GH9221)\n if axis is None:\n axis = 0\n axis = self._get_axis_number(axis)\n\n if value is None:\n\n if self._is_mixed_type and axis == 1:\n if inplace:\n raise NotImplementedError()\n result = self.T.fillna(method=method, limit=limit).T\n\n # need to downcast here because of all of the transposes\n result._data = result._data.downcast()\n\n return result\n\n new_data = self._data.interpolate(\n method=method,\n axis=axis,\n limit=limit,\n inplace=inplace,\n coerce=True,\n downcast=downcast,\n )\n else:\n if len(self._get_axis(axis)) == 0:\n return self\n\n if self.ndim == 1:\n if isinstance(value, (dict, ABCSeries)):\n value = create_series_with_explicit_dtype(\n value, dtype_if_empty=object\n )\n elif not is_list_like(value):\n pass\n else:\n raise TypeError(\n '\"value\" parameter must be a scalar, dict '\n \"or Series, but you passed a \"\n f'\"{type(value).__name__}\"'\n )\n\n new_data = self._data.fillna(\n value=value, limit=limit, inplace=inplace, downcast=downcast\n )\n\n elif isinstance(value, (dict, ABCSeries)):\n if axis == 1:\n raise NotImplementedError(\n \"Currently only can fill \"\n \"with dict/Series column \"\n \"by column\"\n )\n\n result = self if inplace else self.copy()\n for k, v in value.items():\n if k not in result:\n continue\n obj = result[k]\n obj.fillna(v, limit=limit, inplace=True, downcast=downcast)\n return result if not inplace else None\n\n elif not is_list_like(value):\n new_data = self._data.fillna(\n value=value, limit=limit, inplace=inplace, downcast=downcast\n )\n elif isinstance(value, ABCDataFrame) and self.ndim == 2:\n new_data = self.where(self.notna(), value)\n else:\n raise ValueError(f\"invalid fill value with a {type(value)}\")\n\n if inplace:\n self._update_inplace(new_data)\n return None\n else:\n return self._constructor(new_data).__finalize__(self)\n\n def ffill(\n self: FrameOrSeries,\n axis=None,\n inplace: bool_t = False,\n limit=None,\n downcast=None,\n ) -> Optional[FrameOrSeries]:\n \"\"\"\n Synonym for :meth:`DataFrame.fillna` with ``method='ffill'``.\n\n Returns\n -------\n %(klass)s or None\n Object with missing values filled or None if ``inplace=True``.\n \"\"\"\n return self.fillna(\n method=\"ffill\", axis=axis, inplace=inplace, limit=limit, downcast=downcast\n )\n\n def bfill(\n self: FrameOrSeries,\n axis=None,\n inplace: bool_t = False,\n limit=None,\n downcast=None,\n ) -> Optional[FrameOrSeries]:\n \"\"\"\n Synonym for :meth:`DataFrame.fillna` with ``method='bfill'``.\n\n Returns\n -------\n %(klass)s or None\n Object with missing values filled or None if ``inplace=True``.\n \"\"\"\n return self.fillna(\n method=\"bfill\", axis=axis, inplace=inplace, limit=limit, downcast=downcast\n )\n\n _shared_docs[\n \"replace\"\n ] = \"\"\"\n Replace values given in `to_replace` with `value`.\n\n Values of the %(klass)s are replaced with other values dynamically.\n This differs from updating with ``.loc`` or ``.iloc``, which require\n you to specify a location to update with some value.\n\n Parameters\n ----------\n to_replace : str, regex, list, dict, Series, int, float, or None\n How to find the values that will be replaced.\n\n * numeric, str or regex:\n\n - numeric: numeric values equal to `to_replace` will be\n replaced with `value`\n - str: string exactly matching `to_replace` will be replaced\n with `value`\n - regex: regexs matching `to_replace` will be replaced with\n `value`\n\n * list of str, regex, or numeric:\n\n - First, if `to_replace` and `value` are both lists, they\n **must** be the same length.\n - Second, if ``regex=True`` then all of the strings in **both**\n lists will be interpreted as regexs otherwise they will match\n directly. This doesn't matter much for `value` since there\n are only a few possible substitution regexes you can use.\n - str, regex and numeric rules apply as above.\n\n * dict:\n\n - Dicts can be used to specify different replacement values\n for different existing values. For example,\n ``{'a': 'b', 'y': 'z'}`` replaces the value 'a' with 'b' and\n 'y' with 'z'. To use a dict in this way the `value`\n parameter should be `None`.\n - For a DataFrame a dict can specify that different values\n should be replaced in different columns. For example,\n ``{'a': 1, 'b': 'z'}`` looks for the value 1 in column 'a'\n and the value 'z' in column 'b' and replaces these values\n with whatever is specified in `value`. The `value` parameter\n should not be ``None`` in this case. You can treat this as a\n special case of passing two lists except that you are\n specifying the column to search in.\n - For a DataFrame nested dictionaries, e.g.,\n ``{'a': {'b': np.nan}}``, are read as follows: look in column\n 'a' for the value 'b' and replace it with NaN. The `value`\n parameter should be ``None`` to use a nested dict in this\n way. You can nest regular expressions as well. Note that\n column names (the top-level dictionary keys in a nested\n dictionary) **cannot** be regular expressions.\n\n * None:\n\n - This means that the `regex` argument must be a string,\n compiled regular expression, or list, dict, ndarray or\n Series of such elements. If `value` is also ``None`` then\n this **must** be a nested dictionary or Series.\n\n See the examples section for examples of each of these.\n value : scalar, dict, list, str, regex, default None\n Value to replace any values matching `to_replace` with.\n For a DataFrame a dict of values can be used to specify which\n value to use for each column (columns not in the dict will not be\n filled). Regular expressions, strings and lists or dicts of such\n objects are also allowed.\n inplace : bool, default False\n If True, in place. Note: this will modify any\n other views on this object (e.g. a column from a DataFrame).\n Returns the caller if this is True.\n limit : int, default None\n Maximum size gap to forward or backward fill.\n regex : bool or same types as `to_replace`, default False\n Whether to interpret `to_replace` and/or `value` as regular\n expressions. If this is ``True`` then `to_replace` *must* be a\n string. Alternatively, this could be a regular expression or a\n list, dict, or array of regular expressions in which case\n `to_replace` must be ``None``.\n method : {'pad', 'ffill', 'bfill', `None`}\n The method to use when for replacement, when `to_replace` is a\n scalar, list or tuple and `value` is ``None``.\n\n .. versionchanged:: 0.23.0\n Added to DataFrame.\n\n Returns\n -------\n %(klass)s\n Object after replacement.\n\n Raises\n ------\n AssertionError\n * If `regex` is not a ``bool`` and `to_replace` is not\n ``None``.\n\n TypeError\n * If `to_replace` is not a scalar, array-like, ``dict``, or ``None``\n * If `to_replace` is a ``dict`` and `value` is not a ``list``,\n ``dict``, ``ndarray``, or ``Series``\n * If `to_replace` is ``None`` and `regex` is not compilable\n into a regular expression or is a list, dict, ndarray, or\n Series.\n * When replacing multiple ``bool`` or ``datetime64`` objects and\n the arguments to `to_replace` does not match the type of the\n value being replaced\n\n ValueError\n * If a ``list`` or an ``ndarray`` is passed to `to_replace` and\n `value` but they are not the same length.\n\n See Also\n --------\n %(klass)s.fillna : Fill NA values.\n %(klass)s.where : Replace values based on boolean condition.\n Series.str.replace : Simple string replacement.\n\n Notes\n -----\n * Regex substitution is performed under the hood with ``re.sub``. The\n rules for substitution for ``re.sub`` are the same.\n * Regular expressions will only substitute on strings, meaning you\n cannot provide, for example, a regular expression matching floating\n point numbers and expect the columns in your frame that have a\n numeric dtype to be matched. However, if those floating point\n numbers *are* strings, then you can do this.\n * This method has *a lot* of options. You are encouraged to experiment\n and play with this method to gain intuition about how it works.\n * When dict is used as the `to_replace` value, it is like\n key(s) in the dict are the to_replace part and\n value(s) in the dict are the value parameter.\n\n Examples\n --------\n\n **Scalar `to_replace` and `value`**\n\n >>> s = pd.Series([0, 1, 2, 3, 4])\n >>> s.replace(0, 5)\n 0 5\n 1 1\n 2 2\n 3 3\n 4 4\n dtype: int64\n\n >>> df = pd.DataFrame({'A': [0, 1, 2, 3, 4],\n ... 'B': [5, 6, 7, 8, 9],\n ... 'C': ['a', 'b', 'c', 'd', 'e']})\n >>> df.replace(0, 5)\n A B C\n 0 5 5 a\n 1 1 6 b\n 2 2 7 c\n 3 3 8 d\n 4 4 9 e\n\n **List-like `to_replace`**\n\n >>> df.replace([0, 1, 2, 3], 4)\n A B C\n 0 4 5 a\n 1 4 6 b\n 2 4 7 c\n 3 4 8 d\n 4 4 9 e\n\n >>> df.replace([0, 1, 2, 3], [4, 3, 2, 1])\n A B C\n 0 4 5 a\n 1 3 6 b\n 2 2 7 c\n 3 1 8 d\n 4 4 9 e\n\n >>> s.replace([1, 2], method='bfill')\n 0 0\n 1 3\n 2 3\n 3 3\n 4 4\n dtype: int64\n\n **dict-like `to_replace`**\n\n >>> df.replace({0: 10, 1: 100})\n A B C\n 0 10 5 a\n 1 100 6 b\n 2 2 7 c\n 3 3 8 d\n 4 4 9 e\n\n >>> df.replace({'A': 0, 'B': 5}, 100)\n A B C\n 0 100 100 a\n 1 1 6 b\n 2 2 7 c\n 3 3 8 d\n 4 4 9 e\n\n >>> df.replace({'A': {0: 100, 4: 400}})\n A B C\n 0 100 5 a\n 1 1 6 b\n 2 2 7 c\n 3 3 8 d\n 4 400 9 e\n\n **Regular expression `to_replace`**\n\n >>> df = pd.DataFrame({'A': ['bat', 'foo', 'bait'],\n ... 'B': ['abc', 'bar', 'xyz']})\n >>> df.replace(to_replace=r'^ba.$', value='new', regex=True)\n A B\n 0 new abc\n 1 foo new\n 2 bait xyz\n\n >>> df.replace({'A': r'^ba.$'}, {'A': 'new'}, regex=True)\n A B\n 0 new abc\n 1 foo bar\n 2 bait xyz\n\n >>> df.replace(regex=r'^ba.$', value='new')\n A B\n 0 new abc\n 1 foo new\n 2 bait xyz\n\n >>> df.replace(regex={r'^ba.$': 'new', 'foo': 'xyz'})\n A B\n 0 new abc\n 1 xyz new\n 2 bait xyz\n\n >>> df.replace(regex=[r'^ba.$', 'foo'], value='new')\n A B\n 0 new abc\n 1 new new\n 2 bait xyz\n\n Note that when replacing multiple ``bool`` or ``datetime64`` objects,\n the data types in the `to_replace` parameter must match the data\n type of the value being replaced:\n\n >>> df = pd.DataFrame({'A': [True, False, True],\n ... 'B': [False, True, False]})\n >>> df.replace({'a string': 'new value', True: False}) # raises\n Traceback (most recent call last):\n ...\n TypeError: Cannot compare types 'ndarray(dtype=bool)' and 'str'\n\n This raises a ``TypeError`` because one of the ``dict`` keys is not of\n the correct type for replacement.\n\n Compare the behavior of ``s.replace({'a': None})`` and\n ``s.replace('a', None)`` to understand the peculiarities\n of the `to_replace` parameter:\n\n >>> s = pd.Series([10, 'a', 'a', 'b', 'a'])\n\n When one uses a dict as the `to_replace` value, it is like the\n value(s) in the dict are equal to the `value` parameter.\n ``s.replace({'a': None})`` is equivalent to\n ``s.replace(to_replace={'a': None}, value=None, method=None)``:\n\n >>> s.replace({'a': None})\n 0 10\n 1 None\n 2 None\n 3 b\n 4 None\n dtype: object\n\n When ``value=None`` and `to_replace` is a scalar, list or\n tuple, `replace` uses the method parameter (default 'pad') to do the\n replacement. So this is why the 'a' values are being replaced by 10\n in rows 1 and 2 and 'b' in row 4 in this case.\n The command ``s.replace('a', None)`` is actually equivalent to\n ``s.replace(to_replace='a', value=None, method='pad')``:\n\n >>> s.replace('a', None)\n 0 10\n 1 10\n 2 10\n 3 b\n 4 b\n dtype: object\n \"\"\"\n\n @Appender(_shared_docs[\"replace\"] % _shared_doc_kwargs)\n def replace(\n self,\n to_replace=None,\n value=None,\n inplace=False,\n limit=None,\n regex=False,\n method=\"pad\",\n ):\n if not (\n is_scalar(to_replace)\n or isinstance(to_replace, pd.Series)\n or is_re_compilable(to_replace)\n or is_list_like(to_replace)\n ):\n raise TypeError(\n \"Expecting 'to_replace' to be either a scalar, array-like, \"\n \"dict or None, got invalid type \"\n f\"{repr(type(to_replace).__name__)}\"\n )\n\n inplace = validate_bool_kwarg(inplace, \"inplace\")\n if not is_bool(regex) and to_replace is not None:\n raise AssertionError(\"'to_replace' must be 'None' if 'regex' is not a bool\")\n\n self._consolidate_inplace()\n\n if value is None:\n # passing a single value that is scalar like\n # when value is None (GH5319), for compat\n if not is_dict_like(to_replace) and not is_dict_like(regex):\n to_replace = [to_replace]\n\n if isinstance(to_replace, (tuple, list)):\n if isinstance(self, ABCDataFrame):\n return self.apply(\n _single_replace, args=(to_replace, method, inplace, limit)\n )\n return _single_replace(self, to_replace, method, inplace, limit)\n\n if not is_dict_like(to_replace):\n if not is_dict_like(regex):\n raise TypeError(\n 'If \"to_replace\" and \"value\" are both None '\n 'and \"to_replace\" is not a list, then '\n \"regex must be a mapping\"\n )\n to_replace = regex\n regex = True\n\n items = list(to_replace.items())\n keys, values = zip(*items) if items else ([], [])\n\n are_mappings = [is_dict_like(v) for v in values]\n\n if any(are_mappings):\n if not all(are_mappings):\n raise TypeError(\n \"If a nested mapping is passed, all values \"\n \"of the top level mapping must be mappings\"\n )\n # passed a nested dict/Series\n to_rep_dict = {}\n value_dict = {}\n\n for k, v in items:\n keys, values = list(zip(*v.items())) or ([], [])\n\n to_rep_dict[k] = list(keys)\n value_dict[k] = list(values)\n\n to_replace, value = to_rep_dict, value_dict\n else:\n to_replace, value = keys, values\n\n return self.replace(\n to_replace, value, inplace=inplace, limit=limit, regex=regex\n )\n else:\n\n # need a non-zero len on all axes\n if not self.size:\n return self\n\n new_data = self._data\n if is_dict_like(to_replace):\n if is_dict_like(value): # {'A' : NA} -> {'A' : 0}\n res = self if inplace else self.copy()\n for c, src in to_replace.items():\n if c in value and c in self:\n # object conversion is handled in\n # series.replace which is called recursively\n res[c] = res[c].replace(\n to_replace=src,\n value=value[c],\n inplace=False,\n regex=regex,\n )\n return None if inplace else res\n\n # {'A': NA} -> 0\n elif not is_list_like(value):\n keys = [(k, src) for k, src in to_replace.items() if k in self]\n keys_len = len(keys) - 1\n for i, (k, src) in enumerate(keys):\n convert = i == keys_len\n new_data = new_data.replace(\n to_replace=src,\n value=value,\n filter=[k],\n inplace=inplace,\n regex=regex,\n convert=convert,\n )\n else:\n raise TypeError(\"value argument must be scalar, dict, or Series\")\n\n elif is_list_like(to_replace): # [NA, ''] -> [0, 'missing']\n if is_list_like(value):\n if len(to_replace) != len(value):\n raise ValueError(\n f\"Replacement lists must match in length. \"\n f\"Expecting {len(to_replace)} got {len(value)} \"\n )\n\n new_data = self._data.replace_list(\n src_list=to_replace,\n dest_list=value,\n inplace=inplace,\n regex=regex,\n )\n\n else: # [NA, ''] -> 0\n new_data = self._data.replace(\n to_replace=to_replace, value=value, inplace=inplace, regex=regex\n )\n elif to_replace is None:\n if not (\n is_re_compilable(regex)\n or is_list_like(regex)\n or is_dict_like(regex)\n ):\n raise TypeError(\n f\"'regex' must be a string or a compiled regular expression \"\n f\"or a list or dict of strings or regular expressions, \"\n f\"you passed a {repr(type(regex).__name__)}\"\n )\n return self.replace(\n regex, value, inplace=inplace, limit=limit, regex=True\n )\n else:\n\n # dest iterable dict-like\n if is_dict_like(value): # NA -> {'A' : 0, 'B' : -1}\n new_data = self._data\n\n for k, v in value.items():\n if k in self:\n new_data = new_data.replace(\n to_replace=to_replace,\n value=v,\n filter=[k],\n inplace=inplace,\n regex=regex,\n )\n\n elif not is_list_like(value): # NA -> 0\n new_data = self._data.replace(\n to_replace=to_replace, value=value, inplace=inplace, regex=regex\n )\n else:\n raise TypeError(\n f'Invalid \"to_replace\" type: {repr(type(to_replace).__name__)}'\n )\n\n if inplace:\n self._update_inplace(new_data)\n else:\n return self._constructor(new_data).__finalize__(self)\n\n _shared_docs[\n \"interpolate\"\n ] = \"\"\"\n Please note that only ``method='linear'`` is supported for\n DataFrame/Series with a MultiIndex.\n\n Parameters\n ----------\n method : str, default 'linear'\n Interpolation technique to use. One of:\n\n * 'linear': Ignore the index and treat the values as equally\n spaced. This is the only method supported on MultiIndexes.\n * 'time': Works on daily and higher resolution data to interpolate\n given length of interval.\n * 'index', 'values': use the actual numerical values of the index.\n * 'pad': Fill in NaNs using existing values.\n * 'nearest', 'zero', 'slinear', 'quadratic', 'cubic', 'spline',\n 'barycentric', 'polynomial': Passed to\n `scipy.interpolate.interp1d`. These methods use the numerical\n values of the index. Both 'polynomial' and 'spline' require that\n you also specify an `order` (int), e.g.\n ``df.interpolate(method='polynomial', order=5)``.\n * 'krogh', 'piecewise_polynomial', 'spline', 'pchip', 'akima':\n Wrappers around the SciPy interpolation methods of similar\n names. See `Notes`.\n * 'from_derivatives': Refers to\n `scipy.interpolate.BPoly.from_derivatives` which\n replaces 'piecewise_polynomial' interpolation method in\n scipy 0.18.\n axis : {0 or 'index', 1 or 'columns', None}, default None\n Axis to interpolate along.\n limit : int, optional\n Maximum number of consecutive NaNs to fill. Must be greater than\n 0.\n inplace : bool, default False\n Update the data in place if possible.\n limit_direction : {'forward', 'backward', 'both'}, default 'forward'\n If limit is specified, consecutive NaNs will be filled in this\n direction.\n limit_area : {`None`, 'inside', 'outside'}, default None\n If limit is specified, consecutive NaNs will be filled with this\n restriction.\n\n * ``None``: No fill restriction.\n * 'inside': Only fill NaNs surrounded by valid values\n (interpolate).\n * 'outside': Only fill NaNs outside valid values (extrapolate).\n\n .. versionadded:: 0.23.0\n\n downcast : optional, 'infer' or None, defaults to None\n Downcast dtypes if possible.\n **kwargs\n Keyword arguments to pass on to the interpolating function.\n\n Returns\n -------\n Series or DataFrame\n Returns the same object type as the caller, interpolated at\n some or all ``NaN`` values.\n\n See Also\n --------\n fillna : Fill missing values using different methods.\n scipy.interpolate.Akima1DInterpolator : Piecewise cubic polynomials\n (Akima interpolator).\n scipy.interpolate.BPoly.from_derivatives : Piecewise polynomial in the\n Bernstein basis.\n scipy.interpolate.interp1d : Interpolate a 1-D function.\n scipy.interpolate.KroghInterpolator : Interpolate polynomial (Krogh\n interpolator).\n scipy.interpolate.PchipInterpolator : PCHIP 1-d monotonic cubic\n interpolation.\n scipy.interpolate.CubicSpline : Cubic spline data interpolator.\n\n Notes\n -----\n The 'krogh', 'piecewise_polynomial', 'spline', 'pchip' and 'akima'\n methods are wrappers around the respective SciPy implementations of\n similar names. These use the actual numerical values of the index.\n For more information on their behavior, see the\n `SciPy documentation\n <https://docs.scipy.org/doc/scipy/reference/interpolate.html#univariate-interpolation>`__\n and `SciPy tutorial\n <https://docs.scipy.org/doc/scipy/reference/tutorial/interpolate.html>`__.\n\n Examples\n --------\n Filling in ``NaN`` in a :class:`~pandas.Series` via linear\n interpolation.\n\n >>> s = pd.Series([0, 1, np.nan, 3])\n >>> s\n 0 0.0\n 1 1.0\n 2 NaN\n 3 3.0\n dtype: float64\n >>> s.interpolate()\n 0 0.0\n 1 1.0\n 2 2.0\n 3 3.0\n dtype: float64\n\n Filling in ``NaN`` in a Series by padding, but filling at most two\n consecutive ``NaN`` at a time.\n\n >>> s = pd.Series([np.nan, \"single_one\", np.nan,\n ... \"fill_two_more\", np.nan, np.nan, np.nan,\n ... 4.71, np.nan])\n >>> s\n 0 NaN\n 1 single_one\n 2 NaN\n 3 fill_two_more\n 4 NaN\n 5 NaN\n 6 NaN\n 7 4.71\n 8 NaN\n dtype: object\n >>> s.interpolate(method='pad', limit=2)\n 0 NaN\n 1 single_one\n 2 single_one\n 3 fill_two_more\n 4 fill_two_more\n 5 fill_two_more\n 6 NaN\n 7 4.71\n 8 4.71\n dtype: object\n\n Filling in ``NaN`` in a Series via polynomial interpolation or splines:\n Both 'polynomial' and 'spline' methods require that you also specify\n an ``order`` (int).\n\n >>> s = pd.Series([0, 2, np.nan, 8])\n >>> s.interpolate(method='polynomial', order=2)\n 0 0.000000\n 1 2.000000\n 2 4.666667\n 3 8.000000\n dtype: float64\n\n Fill the DataFrame forward (that is, going down) along each column\n using linear interpolation.\n\n Note how the last entry in column 'a' is interpolated differently,\n because there is no entry after it to use for interpolation.\n Note how the first entry in column 'b' remains ``NaN``, because there\n is no entry before it to use for interpolation.\n\n >>> df = pd.DataFrame([(0.0, np.nan, -1.0, 1.0),\n ... (np.nan, 2.0, np.nan, np.nan),\n ... (2.0, 3.0, np.nan, 9.0),\n ... (np.nan, 4.0, -4.0, 16.0)],\n ... columns=list('abcd'))\n >>> df\n a b c d\n 0 0.0 NaN -1.0 1.0\n 1 NaN 2.0 NaN NaN\n 2 2.0 3.0 NaN 9.0\n 3 NaN 4.0 -4.0 16.0\n >>> df.interpolate(method='linear', limit_direction='forward', axis=0)\n a b c d\n 0 0.0 NaN -1.0 1.0\n 1 1.0 2.0 -2.0 5.0\n 2 2.0 3.0 -3.0 9.0\n 3 2.0 4.0 -4.0 16.0\n\n Using polynomial interpolation.\n\n >>> df['d'].interpolate(method='polynomial', order=2)\n 0 1.0\n 1 4.0\n 2 9.0\n 3 16.0\n Name: d, dtype: float64\n \"\"\"\n\n @Appender(_shared_docs[\"interpolate\"] % _shared_doc_kwargs)\n def interpolate(\n self,\n method=\"linear\",\n axis=0,\n limit=None,\n inplace=False,\n limit_direction=\"forward\",\n limit_area=None,\n downcast=None,\n **kwargs,\n ):\n \"\"\"\n Interpolate values according to different methods.\n \"\"\"\n inplace = validate_bool_kwarg(inplace, \"inplace\")\n\n axis = self._get_axis_number(axis)\n\n if axis == 0:\n ax = self._info_axis_name\n _maybe_transposed_self = self\n elif axis == 1:\n _maybe_transposed_self = self.T\n ax = 1\n\n ax = _maybe_transposed_self._get_axis_number(ax)\n\n if _maybe_transposed_self.ndim == 2:\n alt_ax = 1 - ax\n else:\n alt_ax = ax\n\n if isinstance(_maybe_transposed_self.index, MultiIndex) and method != \"linear\":\n raise ValueError(\n \"Only `method=linear` interpolation is supported on MultiIndexes.\"\n )\n\n if _maybe_transposed_self._data.get_dtype_counts().get(\"object\") == len(\n _maybe_transposed_self.T\n ):\n raise TypeError(\n \"Cannot interpolate with all object-dtype columns \"\n \"in the DataFrame. Try setting at least one \"\n \"column to a numeric dtype.\"\n )\n\n # create/use the index\n if method == \"linear\":\n # prior default\n index = np.arange(len(_maybe_transposed_self._get_axis(alt_ax)))\n else:\n index = _maybe_transposed_self._get_axis(alt_ax)\n methods = {\"index\", \"values\", \"nearest\", \"time\"}\n is_numeric_or_datetime = (\n is_numeric_dtype(index)\n or is_datetime64_any_dtype(index)\n or is_timedelta64_dtype(index)\n )\n if method not in methods and not is_numeric_or_datetime:\n raise ValueError(\n \"Index column must be numeric or datetime type when \"\n f\"using {method} method other than linear. \"\n \"Try setting a numeric or datetime index column before \"\n \"interpolating.\"\n )\n\n if isna(index).any():\n raise NotImplementedError(\n \"Interpolation with NaNs in the index \"\n \"has not been implemented. Try filling \"\n \"those NaNs before interpolating.\"\n )\n data = _maybe_transposed_self._data\n new_data = data.interpolate(\n method=method,\n axis=ax,\n index=index,\n limit=limit,\n limit_direction=limit_direction,\n limit_area=limit_area,\n inplace=inplace,\n downcast=downcast,\n **kwargs,\n )\n\n if inplace:\n if axis == 1:\n new_data = self._constructor(new_data).T._data\n self._update_inplace(new_data)\n else:\n res = self._constructor(new_data).__finalize__(self)\n if axis == 1:\n res = res.T\n return res\n\n # ----------------------------------------------------------------------\n # Timeseries methods Methods\n\n def asof(self, where, subset=None):\n \"\"\"\n Return the last row(s) without any NaNs before `where`.\n\n The last row (for each element in `where`, if list) without any\n NaN is taken.\n In case of a :class:`~pandas.DataFrame`, the last row without NaN\n considering only the subset of columns (if not `None`)\n\n If there is no good value, NaN is returned for a Series or\n a Series of NaN values for a DataFrame\n\n Parameters\n ----------\n where : date or array-like of dates\n Date(s) before which the last row(s) are returned.\n subset : str or array-like of str, default `None`\n For DataFrame, if not `None`, only use these columns to\n check for NaNs.\n\n Returns\n -------\n scalar, Series, or DataFrame\n\n The return can be:\n\n * scalar : when `self` is a Series and `where` is a scalar\n * Series: when `self` is a Series and `where` is an array-like,\n or when `self` is a DataFrame and `where` is a scalar\n * DataFrame : when `self` is a DataFrame and `where` is an\n array-like\n\n Return scalar, Series, or DataFrame.\n\n See Also\n --------\n merge_asof : Perform an asof merge. Similar to left join.\n\n Notes\n -----\n Dates are assumed to be sorted. Raises if this is not the case.\n\n Examples\n --------\n A Series and a scalar `where`.\n\n >>> s = pd.Series([1, 2, np.nan, 4], index=[10, 20, 30, 40])\n >>> s\n 10 1.0\n 20 2.0\n 30 NaN\n 40 4.0\n dtype: float64\n\n >>> s.asof(20)\n 2.0\n\n For a sequence `where`, a Series is returned. The first value is\n NaN, because the first element of `where` is before the first\n index value.\n\n >>> s.asof([5, 20])\n 5 NaN\n 20 2.0\n dtype: float64\n\n Missing values are not considered. The following is ``2.0``, not\n NaN, even though NaN is at the index location for ``30``.\n\n >>> s.asof(30)\n 2.0\n\n Take all columns into consideration\n\n >>> df = pd.DataFrame({'a': [10, 20, 30, 40, 50],\n ... 'b': [None, None, None, None, 500]},\n ... index=pd.DatetimeIndex(['2018-02-27 09:01:00',\n ... '2018-02-27 09:02:00',\n ... '2018-02-27 09:03:00',\n ... '2018-02-27 09:04:00',\n ... '2018-02-27 09:05:00']))\n >>> df.asof(pd.DatetimeIndex(['2018-02-27 09:03:30',\n ... '2018-02-27 09:04:30']))\n a b\n 2018-02-27 09:03:30 NaN NaN\n 2018-02-27 09:04:30 NaN NaN\n\n Take a single column into consideration\n\n >>> df.asof(pd.DatetimeIndex(['2018-02-27 09:03:30',\n ... '2018-02-27 09:04:30']),\n ... subset=['a'])\n a b\n 2018-02-27 09:03:30 30.0 NaN\n 2018-02-27 09:04:30 40.0 NaN\n \"\"\"\n if isinstance(where, str):\n where = Timestamp(where)\n\n if not self.index.is_monotonic:\n raise ValueError(\"asof requires a sorted index\")\n\n is_series = isinstance(self, ABCSeries)\n if is_series:\n if subset is not None:\n raise ValueError(\"subset is not valid for Series\")\n else:\n if subset is None:\n subset = self.columns\n if not is_list_like(subset):\n subset = [subset]\n\n is_list = is_list_like(where)\n if not is_list:\n start = self.index[0]\n if isinstance(self.index, PeriodIndex):\n where = Period(where, freq=self.index.freq)\n\n if where < start:\n if not is_series:\n from pandas import Series\n\n return Series(index=self.columns, name=where, dtype=np.float64)\n return np.nan\n\n # It's always much faster to use a *while* loop here for\n # Series than pre-computing all the NAs. However a\n # *while* loop is extremely expensive for DataFrame\n # so we later pre-compute all the NAs and use the same\n # code path whether *where* is a scalar or list.\n # See PR: https://github.com/pandas-dev/pandas/pull/14476\n if is_series:\n loc = self.index.searchsorted(where, side=\"right\")\n if loc > 0:\n loc -= 1\n\n values = self._values\n while loc > 0 and isna(values[loc]):\n loc -= 1\n return values[loc]\n\n if not isinstance(where, Index):\n where = Index(where) if is_list else Index([where])\n\n nulls = self.isna() if is_series else self[subset].isna().any(1)\n if nulls.all():\n if is_series:\n return self._constructor(np.nan, index=where, name=self.name)\n elif is_list:\n from pandas import DataFrame\n\n return DataFrame(np.nan, index=where, columns=self.columns)\n else:\n from pandas import Series\n\n return Series(np.nan, index=self.columns, name=where[0])\n\n locs = self.index.asof_locs(where, ~(nulls.values))\n\n # mask the missing\n missing = locs == -1\n data = self.take(locs)\n data.index = where\n data.loc[missing] = np.nan\n return data if is_list else data.iloc[-1]\n\n # ----------------------------------------------------------------------\n # Action Methods\n\n _shared_docs[\n \"isna\"\n ] = \"\"\"\n Detect missing values.\n\n Return a boolean same-sized object indicating if the values are NA.\n NA values, such as None or :attr:`numpy.NaN`, gets mapped to True\n values.\n Everything else gets mapped to False values. Characters such as empty\n strings ``''`` or :attr:`numpy.inf` are not considered NA values\n (unless you set ``pandas.options.mode.use_inf_as_na = True``).\n\n Returns\n -------\n %(klass)s\n Mask of bool values for each element in %(klass)s that\n indicates whether an element is not an NA value.\n\n See Also\n --------\n %(klass)s.isnull : Alias of isna.\n %(klass)s.notna : Boolean inverse of isna.\n %(klass)s.dropna : Omit axes labels with missing values.\n isna : Top-level isna.\n\n Examples\n --------\n Show which entries in a DataFrame are NA.\n\n >>> df = pd.DataFrame({'age': [5, 6, np.NaN],\n ... 'born': [pd.NaT, pd.Timestamp('1939-05-27'),\n ... pd.Timestamp('1940-04-25')],\n ... 'name': ['Alfred', 'Batman', ''],\n ... 'toy': [None, 'Batmobile', 'Joker']})\n >>> df\n age born name toy\n 0 5.0 NaT Alfred None\n 1 6.0 1939-05-27 Batman Batmobile\n 2 NaN 1940-04-25 Joker\n\n >>> df.isna()\n age born name toy\n 0 False True False True\n 1 False False False False\n 2 True False False False\n\n Show which entries in a Series are NA.\n\n >>> ser = pd.Series([5, 6, np.NaN])\n >>> ser\n 0 5.0\n 1 6.0\n 2 NaN\n dtype: float64\n\n >>> ser.isna()\n 0 False\n 1 False\n 2 True\n dtype: bool\n \"\"\"\n\n @Appender(_shared_docs[\"isna\"] % _shared_doc_kwargs)\n def isna(self: FrameOrSeries) -> FrameOrSeries:\n return isna(self).__finalize__(self)\n\n @Appender(_shared_docs[\"isna\"] % _shared_doc_kwargs)\n def isnull(self: FrameOrSeries) -> FrameOrSeries:\n return isna(self).__finalize__(self)\n\n _shared_docs[\n \"notna\"\n ] = \"\"\"\n Detect existing (non-missing) values.\n\n Return a boolean same-sized object indicating if the values are not NA.\n Non-missing values get mapped to True. Characters such as empty\n strings ``''`` or :attr:`numpy.inf` are not considered NA values\n (unless you set ``pandas.options.mode.use_inf_as_na = True``).\n NA values, such as None or :attr:`numpy.NaN`, get mapped to False\n values.\n\n Returns\n -------\n %(klass)s\n Mask of bool values for each element in %(klass)s that\n indicates whether an element is not an NA value.\n\n See Also\n --------\n %(klass)s.notnull : Alias of notna.\n %(klass)s.isna : Boolean inverse of notna.\n %(klass)s.dropna : Omit axes labels with missing values.\n notna : Top-level notna.\n\n Examples\n --------\n Show which entries in a DataFrame are not NA.\n\n >>> df = pd.DataFrame({'age': [5, 6, np.NaN],\n ... 'born': [pd.NaT, pd.Timestamp('1939-05-27'),\n ... pd.Timestamp('1940-04-25')],\n ... 'name': ['Alfred', 'Batman', ''],\n ... 'toy': [None, 'Batmobile', 'Joker']})\n >>> df\n age born name toy\n 0 5.0 NaT Alfred None\n 1 6.0 1939-05-27 Batman Batmobile\n 2 NaN 1940-04-25 Joker\n\n >>> df.notna()\n age born name toy\n 0 True False True False\n 1 True True True True\n 2 False True True True\n\n Show which entries in a Series are not NA.\n\n >>> ser = pd.Series([5, 6, np.NaN])\n >>> ser\n 0 5.0\n 1 6.0\n 2 NaN\n dtype: float64\n\n >>> ser.notna()\n 0 True\n 1 True\n 2 False\n dtype: bool\n \"\"\"\n\n @Appender(_shared_docs[\"notna\"] % _shared_doc_kwargs)\n def notna(self: FrameOrSeries) -> FrameOrSeries:\n return notna(self).__finalize__(self)\n\n @Appender(_shared_docs[\"notna\"] % _shared_doc_kwargs)\n def notnull(self: FrameOrSeries) -> FrameOrSeries:\n return notna(self).__finalize__(self)\n\n def _clip_with_scalar(self, lower, upper, inplace: bool_t = False):\n if (lower is not None and np.any(isna(lower))) or (\n upper is not None and np.any(isna(upper))\n ):\n raise ValueError(\"Cannot use an NA value as a clip threshold\")\n\n result = self\n mask = isna(self.values)\n\n with np.errstate(all=\"ignore\"):\n if upper is not None:\n subset = self.to_numpy() <= upper\n result = result.where(subset, upper, axis=None, inplace=False)\n if lower is not None:\n subset = self.to_numpy() >= lower\n result = result.where(subset, lower, axis=None, inplace=False)\n\n if np.any(mask):\n result[mask] = np.nan\n\n if inplace:\n self._update_inplace(result)\n else:\n return result\n\n def _clip_with_one_bound(self, threshold, method, axis, inplace):\n\n if axis is not None:\n axis = self._get_axis_number(axis)\n\n # method is self.le for upper bound and self.ge for lower bound\n if is_scalar(threshold) and is_number(threshold):\n if method.__name__ == \"le\":\n return self._clip_with_scalar(None, threshold, inplace=inplace)\n return self._clip_with_scalar(threshold, None, inplace=inplace)\n\n subset = method(threshold, axis=axis) | isna(self)\n\n # GH #15390\n # In order for where method to work, the threshold must\n # be transformed to NDFrame from other array like structure.\n if (not isinstance(threshold, ABCSeries)) and is_list_like(threshold):\n if isinstance(self, ABCSeries):\n threshold = self._constructor(threshold, index=self.index)\n else:\n threshold = _align_method_FRAME(self, threshold, axis, flex=None)[1]\n return self.where(subset, threshold, axis=axis, inplace=inplace)\n\n def clip(\n self: FrameOrSeries,\n lower=None,\n upper=None,\n axis=None,\n inplace: bool_t = False,\n *args,\n **kwargs,\n ) -> FrameOrSeries:\n \"\"\"\n Trim values at input threshold(s).\n\n Assigns values outside boundary to boundary values. Thresholds\n can be singular values or array like, and in the latter case\n the clipping is performed element-wise in the specified axis.\n\n Parameters\n ----------\n lower : float or array_like, default None\n Minimum threshold value. All values below this\n threshold will be set to it.\n upper : float or array_like, default None\n Maximum threshold value. All values above this\n threshold will be set to it.\n axis : int or str axis name, optional\n Align object with lower and upper along the given axis.\n inplace : bool, default False\n Whether to perform the operation in place on the data.\n\n .. versionadded:: 0.21.0\n *args, **kwargs\n Additional keywords have no effect but might be accepted\n for compatibility with numpy.\n\n Returns\n -------\n Series or DataFrame\n Same type as calling object with the values outside the\n clip boundaries replaced.\n\n Examples\n --------\n >>> data = {'col_0': [9, -3, 0, -1, 5], 'col_1': [-2, -7, 6, 8, -5]}\n >>> df = pd.DataFrame(data)\n >>> df\n col_0 col_1\n 0 9 -2\n 1 -3 -7\n 2 0 6\n 3 -1 8\n 4 5 -5\n\n Clips per column using lower and upper thresholds:\n\n >>> df.clip(-4, 6)\n col_0 col_1\n 0 6 -2\n 1 -3 -4\n 2 0 6\n 3 -1 6\n 4 5 -4\n\n Clips using specific lower and upper thresholds per column element:\n\n >>> t = pd.Series([2, -4, -1, 6, 3])\n >>> t\n 0 2\n 1 -4\n 2 -1\n 3 6\n 4 3\n dtype: int64\n\n >>> df.clip(t, t + 4, axis=0)\n col_0 col_1\n 0 6 2\n 1 -3 -4\n 2 0 3\n 3 6 8\n 4 5 3\n \"\"\"\n inplace = validate_bool_kwarg(inplace, \"inplace\")\n\n axis = nv.validate_clip_with_axis(axis, args, kwargs)\n if axis is not None:\n axis = self._get_axis_number(axis)\n\n # GH 17276\n # numpy doesn't like NaN as a clip value\n # so ignore\n # GH 19992\n # numpy doesn't drop a list-like bound containing NaN\n if not is_list_like(lower) and np.any(isna(lower)):\n lower = None\n if not is_list_like(upper) and np.any(isna(upper)):\n upper = None\n\n # GH 2747 (arguments were reversed)\n if lower is not None and upper is not None:\n if is_scalar(lower) and is_scalar(upper):\n lower, upper = min(lower, upper), max(lower, upper)\n\n # fast-path for scalars\n if (lower is None or (is_scalar(lower) and is_number(lower))) and (\n upper is None or (is_scalar(upper) and is_number(upper))\n ):\n return self._clip_with_scalar(lower, upper, inplace=inplace)\n\n result = self\n if lower is not None:\n result = result._clip_with_one_bound(\n lower, method=self.ge, axis=axis, inplace=inplace\n )\n if upper is not None:\n if inplace:\n result = self\n result = result._clip_with_one_bound(\n upper, method=self.le, axis=axis, inplace=inplace\n )\n\n return result\n\n _shared_docs[\n \"groupby\"\n ] = \"\"\"\n Group %(klass)s using a mapper or by a Series of columns.\n\n A groupby operation involves some combination of splitting the\n object, applying a function, and combining the results. This can be\n used to group large amounts of data and compute operations on these\n groups.\n\n Parameters\n ----------\n by : mapping, function, label, or list of labels\n Used to determine the groups for the groupby.\n If ``by`` is a function, it's called on each value of the object's\n index. If a dict or Series is passed, the Series or dict VALUES\n will be used to determine the groups (the Series' values are first\n aligned; see ``.align()`` method). If an ndarray is passed, the\n values are used as-is determine the groups. A label or list of\n labels may be passed to group by the columns in ``self``. Notice\n that a tuple is interpreted as a (single) key.\n axis : {0 or 'index', 1 or 'columns'}, default 0\n Split along rows (0) or columns (1).\n level : int, level name, or sequence of such, default None\n If the axis is a MultiIndex (hierarchical), group by a particular\n level or levels.\n as_index : bool, default True\n For aggregated output, return object with group labels as the\n index. Only relevant for DataFrame input. as_index=False is\n effectively \"SQL-style\" grouped output.\n sort : bool, default True\n Sort group keys. Get better performance by turning this off.\n Note this does not influence the order of observations within each\n group. Groupby preserves the order of rows within each group.\n group_keys : bool, default True\n When calling apply, add group keys to index to identify pieces.\n squeeze : bool, default False\n Reduce the dimensionality of the return type if possible,\n otherwise return a consistent type.\n observed : bool, default False\n This only applies if any of the groupers are Categoricals.\n If True: only show observed values for categorical groupers.\n If False: show all values for categorical groupers.\n\n .. versionadded:: 0.23.0\n\n Returns\n -------\n %(klass)sGroupBy\n Returns a groupby object that contains information about the groups.\n\n See Also\n --------\n resample : Convenience method for frequency conversion and resampling\n of time series.\n\n Notes\n -----\n See the `user guide\n <https://pandas.pydata.org/pandas-docs/stable/groupby.html>`_ for more.\n \"\"\"\n\n def asfreq(\n self: FrameOrSeries,\n freq,\n method=None,\n how: Optional[str] = None,\n normalize: bool_t = False,\n fill_value=None,\n ) -> FrameOrSeries:\n \"\"\"\n Convert TimeSeries to specified frequency.\n\n Optionally provide filling method to pad/backfill missing values.\n\n Returns the original data conformed to a new index with the specified\n frequency. ``resample`` is more appropriate if an operation, such as\n summarization, is necessary to represent the data at the new frequency.\n\n Parameters\n ----------\n freq : DateOffset or str\n method : {'backfill'/'bfill', 'pad'/'ffill'}, default None\n Method to use for filling holes in reindexed Series (note this\n does not fill NaNs that already were present):\n\n * 'pad' / 'ffill': propagate last valid observation forward to next\n valid\n * 'backfill' / 'bfill': use NEXT valid observation to fill.\n how : {'start', 'end'}, default end\n For PeriodIndex only (see PeriodIndex.asfreq).\n normalize : bool, default False\n Whether to reset output index to midnight.\n fill_value : scalar, optional\n Value to use for missing values, applied during upsampling (note\n this does not fill NaNs that already were present).\n\n Returns\n -------\n converted : same type as caller\n\n See Also\n --------\n reindex\n\n Notes\n -----\n To learn more about the frequency strings, please see `this link\n <https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases>`__.\n\n Examples\n --------\n Start by creating a series with 4 one minute timestamps.\n\n >>> index = pd.date_range('1/1/2000', periods=4, freq='T')\n >>> series = pd.Series([0.0, None, 2.0, 3.0], index=index)\n >>> df = pd.DataFrame({'s':series})\n >>> df\n s\n 2000-01-01 00:00:00 0.0\n 2000-01-01 00:01:00 NaN\n 2000-01-01 00:02:00 2.0\n 2000-01-01 00:03:00 3.0\n\n Upsample the series into 30 second bins.\n\n >>> df.asfreq(freq='30S')\n s\n 2000-01-01 00:00:00 0.0\n 2000-01-01 00:00:30 NaN\n 2000-01-01 00:01:00 NaN\n 2000-01-01 00:01:30 NaN\n 2000-01-01 00:02:00 2.0\n 2000-01-01 00:02:30 NaN\n 2000-01-01 00:03:00 3.0\n\n Upsample again, providing a ``fill value``.\n\n >>> df.asfreq(freq='30S', fill_value=9.0)\n s\n 2000-01-01 00:00:00 0.0\n 2000-01-01 00:00:30 9.0\n 2000-01-01 00:01:00 NaN\n 2000-01-01 00:01:30 9.0\n 2000-01-01 00:02:00 2.0\n 2000-01-01 00:02:30 9.0\n 2000-01-01 00:03:00 3.0\n\n Upsample again, providing a ``method``.\n\n >>> df.asfreq(freq='30S', method='bfill')\n s\n 2000-01-01 00:00:00 0.0\n 2000-01-01 00:00:30 NaN\n 2000-01-01 00:01:00 NaN\n 2000-01-01 00:01:30 2.0\n 2000-01-01 00:02:00 2.0\n 2000-01-01 00:02:30 3.0\n 2000-01-01 00:03:00 3.0\n \"\"\"\n from pandas.core.resample import asfreq\n\n return asfreq(\n self,\n freq,\n method=method,\n how=how,\n normalize=normalize,\n fill_value=fill_value,\n )\n\n def at_time(\n self: FrameOrSeries, time, asof: bool_t = False, axis=None\n ) -> FrameOrSeries:\n \"\"\"\n Select values at particular time of day (e.g., 9:30AM).\n\n Parameters\n ----------\n time : datetime.time or str\n axis : {0 or 'index', 1 or 'columns'}, default 0\n\n .. versionadded:: 0.24.0\n\n Returns\n -------\n Series or DataFrame\n\n Raises\n ------\n TypeError\n If the index is not a :class:`DatetimeIndex`\n\n See Also\n --------\n between_time : Select values between particular times of the day.\n first : Select initial periods of time series based on a date offset.\n last : Select final periods of time series based on a date offset.\n DatetimeIndex.indexer_at_time : Get just the index locations for\n values at particular time of the day.\n\n Examples\n --------\n >>> i = pd.date_range('2018-04-09', periods=4, freq='12H')\n >>> ts = pd.DataFrame({'A': [1, 2, 3, 4]}, index=i)\n >>> ts\n A\n 2018-04-09 00:00:00 1\n 2018-04-09 12:00:00 2\n 2018-04-10 00:00:00 3\n 2018-04-10 12:00:00 4\n\n >>> ts.at_time('12:00')\n A\n 2018-04-09 12:00:00 2\n 2018-04-10 12:00:00 4\n \"\"\"\n if axis is None:\n axis = self._stat_axis_number\n axis = self._get_axis_number(axis)\n\n index = self._get_axis(axis)\n try:\n indexer = index.indexer_at_time(time, asof=asof)\n except AttributeError as err:\n raise TypeError(\"Index must be DatetimeIndex\") from err\n\n return self._take_with_is_copy(indexer, axis=axis)\n\n def between_time(\n self: FrameOrSeries,\n start_time,\n end_time,\n include_start: bool_t = True,\n include_end: bool_t = True,\n axis=None,\n ) -> FrameOrSeries:\n \"\"\"\n Select values between particular times of the day (e.g., 9:00-9:30 AM).\n\n By setting ``start_time`` to be later than ``end_time``,\n you can get the times that are *not* between the two times.\n\n Parameters\n ----------\n start_time : datetime.time or str\n Initial time as a time filter limit.\n end_time : datetime.time or str\n End time as a time filter limit.\n include_start : bool, default True\n Whether the start time needs to be included in the result.\n include_end : bool, default True\n Whether the end time needs to be included in the result.\n axis : {0 or 'index', 1 or 'columns'}, default 0\n Determine range time on index or columns value.\n\n .. versionadded:: 0.24.0\n\n Returns\n -------\n Series or DataFrame\n Data from the original object filtered to the specified dates range.\n\n Raises\n ------\n TypeError\n If the index is not a :class:`DatetimeIndex`\n\n See Also\n --------\n at_time : Select values at a particular time of the day.\n first : Select initial periods of time series based on a date offset.\n last : Select final periods of time series based on a date offset.\n DatetimeIndex.indexer_between_time : Get just the index locations for\n values between particular times of the day.\n\n Examples\n --------\n >>> i = pd.date_range('2018-04-09', periods=4, freq='1D20min')\n >>> ts = pd.DataFrame({'A': [1, 2, 3, 4]}, index=i)\n >>> ts\n A\n 2018-04-09 00:00:00 1\n 2018-04-10 00:20:00 2\n 2018-04-11 00:40:00 3\n 2018-04-12 01:00:00 4\n\n >>> ts.between_time('0:15', '0:45')\n A\n 2018-04-10 00:20:00 2\n 2018-04-11 00:40:00 3\n\n You get the times that are *not* between two times by setting\n ``start_time`` later than ``end_time``:\n\n >>> ts.between_time('0:45', '0:15')\n A\n 2018-04-09 00:00:00 1\n 2018-04-12 01:00:00 4\n \"\"\"\n if axis is None:\n axis = self._stat_axis_number\n axis = self._get_axis_number(axis)\n\n index = self._get_axis(axis)\n try:\n indexer = index.indexer_between_time(\n start_time,\n end_time,\n include_start=include_start,\n include_end=include_end,\n )\n except AttributeError as err:\n raise TypeError(\"Index must be DatetimeIndex\") from err\n\n return self._take_with_is_copy(indexer, axis=axis)\n\n def resample(\n self,\n rule,\n axis=0,\n closed: Optional[str] = None,\n label: Optional[str] = None,\n convention: str = \"start\",\n kind: Optional[str] = None,\n loffset=None,\n base: int = 0,\n on=None,\n level=None,\n ) -> \"Resampler\":\n \"\"\"\n Resample time-series data.\n\n Convenience method for frequency conversion and resampling of time\n series. Object must have a datetime-like index (`DatetimeIndex`,\n `PeriodIndex`, or `TimedeltaIndex`), or pass datetime-like values\n to the `on` or `level` keyword.\n\n Parameters\n ----------\n rule : DateOffset, Timedelta or str\n The offset string or object representing target conversion.\n axis : {0 or 'index', 1 or 'columns'}, default 0\n Which axis to use for up- or down-sampling. For `Series` this\n will default to 0, i.e. along the rows. Must be\n `DatetimeIndex`, `TimedeltaIndex` or `PeriodIndex`.\n closed : {'right', 'left'}, default None\n Which side of bin interval is closed. The default is 'left'\n for all frequency offsets except for 'M', 'A', 'Q', 'BM',\n 'BA', 'BQ', and 'W' which all have a default of 'right'.\n label : {'right', 'left'}, default None\n Which bin edge label to label bucket with. The default is 'left'\n for all frequency offsets except for 'M', 'A', 'Q', 'BM',\n 'BA', 'BQ', and 'W' which all have a default of 'right'.\n convention : {'start', 'end', 's', 'e'}, default 'start'\n For `PeriodIndex` only, controls whether to use the start or\n end of `rule`.\n kind : {'timestamp', 'period'}, optional, default None\n Pass 'timestamp' to convert the resulting index to a\n `DateTimeIndex` or 'period' to convert it to a `PeriodIndex`.\n By default the input representation is retained.\n loffset : timedelta, default None\n Adjust the resampled time labels.\n base : int, default 0\n For frequencies that evenly subdivide 1 day, the \"origin\" of the\n aggregated intervals. For example, for '5min' frequency, base could\n range from 0 through 4. Defaults to 0.\n on : str, optional\n For a DataFrame, column to use instead of index for resampling.\n Column must be datetime-like.\n\n level : str or int, optional\n For a MultiIndex, level (name or number) to use for\n resampling. `level` must be datetime-like.\n\n Returns\n -------\n Resampler object\n\n See Also\n --------\n groupby : Group by mapping, function, label, or list of labels.\n Series.resample : Resample a Series.\n DataFrame.resample: Resample a DataFrame.\n\n Notes\n -----\n See the `user guide\n <https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#resampling>`_\n for more.\n\n To learn more about the offset strings, please see `this link\n <https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#dateoffset-objects>`__.\n\n Examples\n --------\n Start by creating a series with 9 one minute timestamps.\n\n >>> index = pd.date_range('1/1/2000', periods=9, freq='T')\n >>> series = pd.Series(range(9), index=index)\n >>> series\n 2000-01-01 00:00:00 0\n 2000-01-01 00:01:00 1\n 2000-01-01 00:02:00 2\n 2000-01-01 00:03:00 3\n 2000-01-01 00:04:00 4\n 2000-01-01 00:05:00 5\n 2000-01-01 00:06:00 6\n 2000-01-01 00:07:00 7\n 2000-01-01 00:08:00 8\n Freq: T, dtype: int64\n\n Downsample the series into 3 minute bins and sum the values\n of the timestamps falling into a bin.\n\n >>> series.resample('3T').sum()\n 2000-01-01 00:00:00 3\n 2000-01-01 00:03:00 12\n 2000-01-01 00:06:00 21\n Freq: 3T, dtype: int64\n\n Downsample the series into 3 minute bins as above, but label each\n bin using the right edge instead of the left. Please note that the\n value in the bucket used as the label is not included in the bucket,\n which it labels. For example, in the original series the\n bucket ``2000-01-01 00:03:00`` contains the value 3, but the summed\n value in the resampled bucket with the label ``2000-01-01 00:03:00``\n does not include 3 (if it did, the summed value would be 6, not 3).\n To include this value close the right side of the bin interval as\n illustrated in the example below this one.\n\n >>> series.resample('3T', label='right').sum()\n 2000-01-01 00:03:00 3\n 2000-01-01 00:06:00 12\n 2000-01-01 00:09:00 21\n Freq: 3T, dtype: int64\n\n Downsample the series into 3 minute bins as above, but close the right\n side of the bin interval.\n\n >>> series.resample('3T', label='right', closed='right').sum()\n 2000-01-01 00:00:00 0\n 2000-01-01 00:03:00 6\n 2000-01-01 00:06:00 15\n 2000-01-01 00:09:00 15\n Freq: 3T, dtype: int64\n\n Upsample the series into 30 second bins.\n\n >>> series.resample('30S').asfreq()[0:5] # Select first 5 rows\n 2000-01-01 00:00:00 0.0\n 2000-01-01 00:00:30 NaN\n 2000-01-01 00:01:00 1.0\n 2000-01-01 00:01:30 NaN\n 2000-01-01 00:02:00 2.0\n Freq: 30S, dtype: float64\n\n Upsample the series into 30 second bins and fill the ``NaN``\n values using the ``pad`` method.\n\n >>> series.resample('30S').pad()[0:5]\n 2000-01-01 00:00:00 0\n 2000-01-01 00:00:30 0\n 2000-01-01 00:01:00 1\n 2000-01-01 00:01:30 1\n 2000-01-01 00:02:00 2\n Freq: 30S, dtype: int64\n\n Upsample the series into 30 second bins and fill the\n ``NaN`` values using the ``bfill`` method.\n\n >>> series.resample('30S').bfill()[0:5]\n 2000-01-01 00:00:00 0\n 2000-01-01 00:00:30 1\n 2000-01-01 00:01:00 1\n 2000-01-01 00:01:30 2\n 2000-01-01 00:02:00 2\n Freq: 30S, dtype: int64\n\n Pass a custom function via ``apply``\n\n >>> def custom_resampler(array_like):\n ... return np.sum(array_like) + 5\n ...\n >>> series.resample('3T').apply(custom_resampler)\n 2000-01-01 00:00:00 8\n 2000-01-01 00:03:00 17\n 2000-01-01 00:06:00 26\n Freq: 3T, dtype: int64\n\n For a Series with a PeriodIndex, the keyword `convention` can be\n used to control whether to use the start or end of `rule`.\n\n Resample a year by quarter using 'start' `convention`. Values are\n assigned to the first quarter of the period.\n\n >>> s = pd.Series([1, 2], index=pd.period_range('2012-01-01',\n ... freq='A',\n ... periods=2))\n >>> s\n 2012 1\n 2013 2\n Freq: A-DEC, dtype: int64\n >>> s.resample('Q', convention='start').asfreq()\n 2012Q1 1.0\n 2012Q2 NaN\n 2012Q3 NaN\n 2012Q4 NaN\n 2013Q1 2.0\n 2013Q2 NaN\n 2013Q3 NaN\n 2013Q4 NaN\n Freq: Q-DEC, dtype: float64\n\n Resample quarters by month using 'end' `convention`. Values are\n assigned to the last month of the period.\n\n >>> q = pd.Series([1, 2, 3, 4], index=pd.period_range('2018-01-01',\n ... freq='Q',\n ... periods=4))\n >>> q\n 2018Q1 1\n 2018Q2 2\n 2018Q3 3\n 2018Q4 4\n Freq: Q-DEC, dtype: int64\n >>> q.resample('M', convention='end').asfreq()\n 2018-03 1.0\n 2018-04 NaN\n 2018-05 NaN\n 2018-06 2.0\n 2018-07 NaN\n 2018-08 NaN\n 2018-09 3.0\n 2018-10 NaN\n 2018-11 NaN\n 2018-12 4.0\n Freq: M, dtype: float64\n\n For DataFrame objects, the keyword `on` can be used to specify the\n column instead of the index for resampling.\n\n >>> d = dict({'price': [10, 11, 9, 13, 14, 18, 17, 19],\n ... 'volume': [50, 60, 40, 100, 50, 100, 40, 50]})\n >>> df = pd.DataFrame(d)\n >>> df['week_starting'] = pd.date_range('01/01/2018',\n ... periods=8,\n ... freq='W')\n >>> df\n price volume week_starting\n 0 10 50 2018-01-07\n 1 11 60 2018-01-14\n 2 9 40 2018-01-21\n 3 13 100 2018-01-28\n 4 14 50 2018-02-04\n 5 18 100 2018-02-11\n 6 17 40 2018-02-18\n 7 19 50 2018-02-25\n >>> df.resample('M', on='week_starting').mean()\n price volume\n week_starting\n 2018-01-31 10.75 62.5\n 2018-02-28 17.00 60.0\n\n For a DataFrame with MultiIndex, the keyword `level` can be used to\n specify on which level the resampling needs to take place.\n\n >>> days = pd.date_range('1/1/2000', periods=4, freq='D')\n >>> d2 = dict({'price': [10, 11, 9, 13, 14, 18, 17, 19],\n ... 'volume': [50, 60, 40, 100, 50, 100, 40, 50]})\n >>> df2 = pd.DataFrame(d2,\n ... index=pd.MultiIndex.from_product([days,\n ... ['morning',\n ... 'afternoon']]\n ... ))\n >>> df2\n price volume\n 2000-01-01 morning 10 50\n afternoon 11 60\n 2000-01-02 morning 9 40\n afternoon 13 100\n 2000-01-03 morning 14 50\n afternoon 18 100\n 2000-01-04 morning 17 40\n afternoon 19 50\n >>> df2.resample('D', level=0).sum()\n price volume\n 2000-01-01 21 110\n 2000-01-02 22 140\n 2000-01-03 32 150\n 2000-01-04 36 90\n \"\"\"\n from pandas.core.resample import get_resampler\n\n axis = self._get_axis_number(axis)\n return get_resampler(\n self,\n freq=rule,\n label=label,\n closed=closed,\n axis=axis,\n kind=kind,\n loffset=loffset,\n convention=convention,\n base=base,\n key=on,\n level=level,\n )\n\n def first(self: FrameOrSeries, offset) -> FrameOrSeries:\n \"\"\"\n Method to subset initial periods of time series data based on a date offset.\n\n Parameters\n ----------\n offset : str, DateOffset, dateutil.relativedelta\n\n Returns\n -------\n subset : same type as caller\n\n Raises\n ------\n TypeError\n If the index is not a :class:`DatetimeIndex`\n\n See Also\n --------\n last : Select final periods of time series based on a date offset.\n at_time : Select values at a particular time of the day.\n between_time : Select values between particular times of the day.\n\n Examples\n --------\n >>> i = pd.date_range('2018-04-09', periods=4, freq='2D')\n >>> ts = pd.DataFrame({'A': [1,2,3,4]}, index=i)\n >>> ts\n A\n 2018-04-09 1\n 2018-04-11 2\n 2018-04-13 3\n 2018-04-15 4\n\n Get the rows for the first 3 days:\n\n >>> ts.first('3D')\n A\n 2018-04-09 1\n 2018-04-11 2\n\n Notice the data for 3 first calender days were returned, not the first\n 3 days observed in the dataset, and therefore data for 2018-04-13 was\n not returned.\n \"\"\"\n if not isinstance(self.index, DatetimeIndex):\n raise TypeError(\"'first' only supports a DatetimeIndex index\")\n\n if len(self.index) == 0:\n return self\n\n offset = to_offset(offset)\n end_date = end = self.index[0] + offset\n\n # Tick-like, e.g. 3 weeks\n if not offset.is_anchored() and hasattr(offset, \"_inc\"):\n if end_date in self.index:\n end = self.index.searchsorted(end_date, side=\"left\")\n return self.iloc[:end]\n\n return self.loc[:end]\n\n def last(self: FrameOrSeries, offset) -> FrameOrSeries:\n \"\"\"\n Method to subset final periods of time series data based on a date offset.\n\n Parameters\n ----------\n offset : str, DateOffset, dateutil.relativedelta\n\n Returns\n -------\n subset : same type as caller\n\n Raises\n ------\n TypeError\n If the index is not a :class:`DatetimeIndex`\n\n See Also\n --------\n first : Select initial periods of time series based on a date offset.\n at_time : Select values at a particular time of the day.\n between_time : Select values between particular times of the day.\n\n Examples\n --------\n >>> i = pd.date_range('2018-04-09', periods=4, freq='2D')\n >>> ts = pd.DataFrame({'A': [1, 2, 3, 4]}, index=i)\n >>> ts\n A\n 2018-04-09 1\n 2018-04-11 2\n 2018-04-13 3\n 2018-04-15 4\n\n Get the rows for the last 3 days:\n\n >>> ts.last('3D')\n A\n 2018-04-13 3\n 2018-04-15 4\n\n Notice the data for 3 last calender days were returned, not the last\n 3 observed days in the dataset, and therefore data for 2018-04-11 was\n not returned.\n \"\"\"\n if not isinstance(self.index, DatetimeIndex):\n raise TypeError(\"'last' only supports a DatetimeIndex index\")\n\n if len(self.index) == 0:\n return self\n\n offset = to_offset(offset)\n\n start_date = self.index[-1] - offset\n start = self.index.searchsorted(start_date, side=\"right\")\n return self.iloc[start:]\n\n def rank(\n self: FrameOrSeries,\n axis=0,\n method: str = \"average\",\n numeric_only: Optional[bool_t] = None,\n na_option: str = \"keep\",\n ascending: bool_t = True,\n pct: bool_t = False,\n ) -> FrameOrSeries:\n \"\"\"\n Compute numerical data ranks (1 through n) along axis.\n\n By default, equal values are assigned a rank that is the average of the\n ranks of those values.\n\n Parameters\n ----------\n axis : {0 or 'index', 1 or 'columns'}, default 0\n Index to direct ranking.\n method : {'average', 'min', 'max', 'first', 'dense'}, default 'average'\n How to rank the group of records that have the same value (i.e. ties):\n\n * average: average rank of the group\n * min: lowest rank in the group\n * max: highest rank in the group\n * first: ranks assigned in order they appear in the array\n * dense: like 'min', but rank always increases by 1 between groups.\n\n numeric_only : bool, optional\n For DataFrame objects, rank only numeric columns if set to True.\n na_option : {'keep', 'top', 'bottom'}, default 'keep'\n How to rank NaN values:\n\n * keep: assign NaN rank to NaN values\n * top: assign smallest rank to NaN values if ascending\n * bottom: assign highest rank to NaN values if ascending.\n\n ascending : bool, default True\n Whether or not the elements should be ranked in ascending order.\n pct : bool, default False\n Whether or not to display the returned rankings in percentile\n form.\n\n Returns\n -------\n same type as caller\n Return a Series or DataFrame with data ranks as values.\n\n See Also\n --------\n core.groupby.GroupBy.rank : Rank of values within each group.\n\n Examples\n --------\n >>> df = pd.DataFrame(data={'Animal': ['cat', 'penguin', 'dog',\n ... 'spider', 'snake'],\n ... 'Number_legs': [4, 2, 4, 8, np.nan]})\n >>> df\n Animal Number_legs\n 0 cat 4.0\n 1 penguin 2.0\n 2 dog 4.0\n 3 spider 8.0\n 4 snake NaN\n\n The following example shows how the method behaves with the above\n parameters:\n\n * default_rank: this is the default behaviour obtained without using\n any parameter.\n * max_rank: setting ``method = 'max'`` the records that have the\n same values are ranked using the highest rank (e.g.: since 'cat'\n and 'dog' are both in the 2nd and 3rd position, rank 3 is assigned.)\n * NA_bottom: choosing ``na_option = 'bottom'``, if there are records\n with NaN values they are placed at the bottom of the ranking.\n * pct_rank: when setting ``pct = True``, the ranking is expressed as\n percentile rank.\n\n >>> df['default_rank'] = df['Number_legs'].rank()\n >>> df['max_rank'] = df['Number_legs'].rank(method='max')\n >>> df['NA_bottom'] = df['Number_legs'].rank(na_option='bottom')\n >>> df['pct_rank'] = df['Number_legs'].rank(pct=True)\n >>> df\n Animal Number_legs default_rank max_rank NA_bottom pct_rank\n 0 cat 4.0 2.5 3.0 2.5 0.625\n 1 penguin 2.0 1.0 1.0 1.0 0.250\n 2 dog 4.0 2.5 3.0 2.5 0.625\n 3 spider 8.0 4.0 4.0 4.0 1.000\n 4 snake NaN NaN NaN 5.0 NaN\n \"\"\"\n axis = self._get_axis_number(axis)\n\n if na_option not in {\"keep\", \"top\", \"bottom\"}:\n msg = \"na_option must be one of 'keep', 'top', or 'bottom'\"\n raise ValueError(msg)\n\n def ranker(data):\n ranks = algos.rank(\n data.values,\n axis=axis,\n method=method,\n ascending=ascending,\n na_option=na_option,\n pct=pct,\n )\n ranks = self._constructor(ranks, **data._construct_axes_dict())\n return ranks.__finalize__(self)\n\n # if numeric_only is None, and we can't get anything, we try with\n # numeric_only=True\n if numeric_only is None:\n try:\n return ranker(self)\n except TypeError:\n numeric_only = True\n\n if numeric_only:\n data = self._get_numeric_data()\n else:\n data = self\n\n return ranker(data)\n\n _shared_docs[\n \"align\"\n ] = \"\"\"\n Align two objects on their axes with the specified join method.\n\n Join method is specified for each axis Index.\n\n Parameters\n ----------\n other : DataFrame or Series\n join : {'outer', 'inner', 'left', 'right'}, default 'outer'\n axis : allowed axis of the other object, default None\n Align on index (0), columns (1), or both (None).\n level : int or level name, default None\n Broadcast across a level, matching Index values on the\n passed MultiIndex level.\n copy : bool, default True\n Always returns new objects. If copy=False and no reindexing is\n required then original objects are returned.\n fill_value : scalar, default np.NaN\n Value to use for missing values. Defaults to NaN, but can be any\n \"compatible\" value.\n method : {'backfill', 'bfill', 'pad', 'ffill', None}, default None\n Method to use for filling holes in reindexed Series:\n\n - pad / ffill: propagate last valid observation forward to next valid.\n - backfill / bfill: use NEXT valid observation to fill gap.\n\n limit : int, default None\n If method is specified, this is the maximum number of consecutive\n NaN values to forward/backward fill. In other words, if there is\n a gap with more than this number of consecutive NaNs, it will only\n be partially filled. If method is not specified, this is the\n maximum number of entries along the entire axis where NaNs will be\n filled. Must be greater than 0 if not None.\n fill_axis : %(axes_single_arg)s, default 0\n Filling axis, method and limit.\n broadcast_axis : %(axes_single_arg)s, default None\n Broadcast values along this axis, if aligning two objects of\n different dimensions.\n\n Returns\n -------\n (left, right) : (%(klass)s, type of other)\n Aligned objects.\n \"\"\"\n\n @Appender(_shared_docs[\"align\"] % _shared_doc_kwargs)\n def align(\n self,\n other,\n join=\"outer\",\n axis=None,\n level=None,\n copy=True,\n fill_value=None,\n method=None,\n limit=None,\n fill_axis=0,\n broadcast_axis=None,\n ):\n method = missing.clean_fill_method(method)\n\n if broadcast_axis == 1 and self.ndim != other.ndim:\n if isinstance(self, ABCSeries):\n # this means other is a DataFrame, and we need to broadcast\n # self\n cons = self._constructor_expanddim\n df = cons(\n {c: self for c in other.columns}, **other._construct_axes_dict()\n )\n return df._align_frame(\n other,\n join=join,\n axis=axis,\n level=level,\n copy=copy,\n fill_value=fill_value,\n method=method,\n limit=limit,\n fill_axis=fill_axis,\n )\n elif isinstance(other, ABCSeries):\n # this means self is a DataFrame, and we need to broadcast\n # other\n cons = other._constructor_expanddim\n df = cons(\n {c: other for c in self.columns}, **self._construct_axes_dict()\n )\n return self._align_frame(\n df,\n join=join,\n axis=axis,\n level=level,\n copy=copy,\n fill_value=fill_value,\n method=method,\n limit=limit,\n fill_axis=fill_axis,\n )\n\n if axis is not None:\n axis = self._get_axis_number(axis)\n if isinstance(other, ABCDataFrame):\n return self._align_frame(\n other,\n join=join,\n axis=axis,\n level=level,\n copy=copy,\n fill_value=fill_value,\n method=method,\n limit=limit,\n fill_axis=fill_axis,\n )\n elif isinstance(other, ABCSeries):\n return self._align_series(\n other,\n join=join,\n axis=axis,\n level=level,\n copy=copy,\n fill_value=fill_value,\n method=method,\n limit=limit,\n fill_axis=fill_axis,\n )\n else: # pragma: no cover\n raise TypeError(f\"unsupported type: {type(other)}\")\n\n def _align_frame(\n self,\n other,\n join=\"outer\",\n axis=None,\n level=None,\n copy: bool_t = True,\n fill_value=None,\n method=None,\n limit=None,\n fill_axis=0,\n ):\n # defaults\n join_index, join_columns = None, None\n ilidx, iridx = None, None\n clidx, cridx = None, None\n\n is_series = isinstance(self, ABCSeries)\n\n if axis is None or axis == 0:\n if not self.index.equals(other.index):\n join_index, ilidx, iridx = self.index.join(\n other.index, how=join, level=level, return_indexers=True\n )\n\n if axis is None or axis == 1:\n if not is_series and not self.columns.equals(other.columns):\n join_columns, clidx, cridx = self.columns.join(\n other.columns, how=join, level=level, return_indexers=True\n )\n\n if is_series:\n reindexers = {0: [join_index, ilidx]}\n else:\n reindexers = {0: [join_index, ilidx], 1: [join_columns, clidx]}\n\n left = self._reindex_with_indexers(\n reindexers, copy=copy, fill_value=fill_value, allow_dups=True\n )\n # other must be always DataFrame\n right = other._reindex_with_indexers(\n {0: [join_index, iridx], 1: [join_columns, cridx]},\n copy=copy,\n fill_value=fill_value,\n allow_dups=True,\n )\n\n if method is not None:\n left = self._ensure_type(\n left.fillna(method=method, axis=fill_axis, limit=limit)\n )\n right = right.fillna(method=method, axis=fill_axis, limit=limit)\n\n # if DatetimeIndex have different tz, convert to UTC\n if is_datetime64tz_dtype(left.index):\n if left.index.tz != right.index.tz:\n if join_index is not None:\n left.index = join_index\n right.index = join_index\n\n return left.__finalize__(self), right.__finalize__(other)\n\n def _align_series(\n self,\n other,\n join=\"outer\",\n axis=None,\n level=None,\n copy: bool_t = True,\n fill_value=None,\n method=None,\n limit=None,\n fill_axis=0,\n ):\n\n is_series = isinstance(self, ABCSeries)\n\n # series/series compat, other must always be a Series\n if is_series:\n if axis:\n raise ValueError(\"cannot align series to a series other than axis 0\")\n\n # equal\n if self.index.equals(other.index):\n join_index, lidx, ridx = None, None, None\n else:\n join_index, lidx, ridx = self.index.join(\n other.index, how=join, level=level, return_indexers=True\n )\n\n left = self._reindex_indexer(join_index, lidx, copy)\n right = other._reindex_indexer(join_index, ridx, copy)\n\n else:\n # one has > 1 ndim\n fdata = self._data\n if axis == 0:\n join_index = self.index\n lidx, ridx = None, None\n if not self.index.equals(other.index):\n join_index, lidx, ridx = self.index.join(\n other.index, how=join, level=level, return_indexers=True\n )\n\n if lidx is not None:\n fdata = fdata.reindex_indexer(join_index, lidx, axis=1)\n\n elif axis == 1:\n join_index = self.columns\n lidx, ridx = None, None\n if not self.columns.equals(other.index):\n join_index, lidx, ridx = self.columns.join(\n other.index, how=join, level=level, return_indexers=True\n )\n\n if lidx is not None:\n fdata = fdata.reindex_indexer(join_index, lidx, axis=0)\n else:\n raise ValueError(\"Must specify axis=0 or 1\")\n\n if copy and fdata is self._data:\n fdata = fdata.copy()\n\n left = self._constructor(fdata)\n\n if ridx is None:\n right = other\n else:\n right = other.reindex(join_index, level=level)\n\n # fill\n fill_na = notna(fill_value) or (method is not None)\n if fill_na:\n left = left.fillna(fill_value, method=method, limit=limit, axis=fill_axis)\n right = right.fillna(fill_value, method=method, limit=limit)\n\n # if DatetimeIndex have different tz, convert to UTC\n if is_series or (not is_series and axis == 0):\n if is_datetime64tz_dtype(left.index):\n if left.index.tz != right.index.tz:\n if join_index is not None:\n left.index = join_index\n right.index = join_index\n\n return left.__finalize__(self), right.__finalize__(other)\n\n def _where(\n self,\n cond,\n other=np.nan,\n inplace=False,\n axis=None,\n level=None,\n errors=\"raise\",\n try_cast=False,\n ):\n \"\"\"\n Equivalent to public method `where`, except that `other` is not\n applied as a function even if callable. Used in __setitem__.\n \"\"\"\n inplace = validate_bool_kwarg(inplace, \"inplace\")\n\n # align the cond to same shape as myself\n cond = com.apply_if_callable(cond, self)\n if isinstance(cond, NDFrame):\n cond, _ = cond.align(self, join=\"right\", broadcast_axis=1)\n else:\n if not hasattr(cond, \"shape\"):\n cond = np.asanyarray(cond)\n if cond.shape != self.shape:\n raise ValueError(\"Array conditional must be same shape as self\")\n cond = self._constructor(cond, **self._construct_axes_dict())\n\n # make sure we are boolean\n fill_value = bool(inplace)\n cond = cond.fillna(fill_value)\n\n msg = \"Boolean array expected for the condition, not {dtype}\"\n\n if not isinstance(cond, ABCDataFrame):\n # This is a single-dimensional object.\n if not is_bool_dtype(cond):\n raise ValueError(msg.format(dtype=cond.dtype))\n elif not cond.empty:\n for dt in cond.dtypes:\n if not is_bool_dtype(dt):\n raise ValueError(msg.format(dtype=dt))\n\n cond = -cond if inplace else cond\n\n # try to align with other\n try_quick = True\n if hasattr(other, \"align\"):\n\n # align with me\n if other.ndim <= self.ndim:\n\n _, other = self.align(\n other, join=\"left\", axis=axis, level=level, fill_value=np.nan\n )\n\n # if we are NOT aligned, raise as we cannot where index\n if axis is None and not all(\n other._get_axis(i).equals(ax) for i, ax in enumerate(self.axes)\n ):\n raise InvalidIndexError\n\n # slice me out of the other\n else:\n raise NotImplementedError(\n \"cannot align with a higher dimensional NDFrame\"\n )\n\n if isinstance(other, np.ndarray):\n\n if other.shape != self.shape:\n\n if self.ndim == 1:\n\n icond = cond.values\n\n # GH 2745 / GH 4192\n # treat like a scalar\n if len(other) == 1:\n other = np.array(other[0])\n\n # GH 3235\n # match True cond to other\n elif len(cond[icond]) == len(other):\n\n # try to not change dtype at first (if try_quick)\n if try_quick:\n new_other = np.asarray(self)\n new_other = new_other.copy()\n new_other[icond] = other\n other = new_other\n\n else:\n raise ValueError(\n \"Length of replacements must equal series length\"\n )\n\n else:\n raise ValueError(\n \"other must be the same shape as self when an ndarray\"\n )\n\n # we are the same shape, so create an actual object for alignment\n else:\n other = self._constructor(other, **self._construct_axes_dict())\n\n if axis is None:\n axis = 0\n\n if self.ndim == getattr(other, \"ndim\", 0):\n align = True\n else:\n align = self._get_axis_number(axis) == 1\n\n block_axis = self._get_block_manager_axis(axis)\n\n if inplace:\n # we may have different type blocks come out of putmask, so\n # reconstruct the block manager\n\n self._check_inplace_setting(other)\n new_data = self._data.putmask(\n mask=cond,\n new=other,\n align=align,\n inplace=True,\n axis=block_axis,\n transpose=self._AXIS_REVERSED,\n )\n self._update_inplace(new_data)\n\n else:\n new_data = self._data.where(\n other=other,\n cond=cond,\n align=align,\n errors=errors,\n try_cast=try_cast,\n axis=block_axis,\n )\n\n return self._constructor(new_data).__finalize__(self)\n\n _shared_docs[\n \"where\"\n ] = \"\"\"\n Replace values where the condition is %(cond_rev)s.\n\n Parameters\n ----------\n cond : bool %(klass)s, array-like, or callable\n Where `cond` is %(cond)s, keep the original value. Where\n %(cond_rev)s, replace with corresponding value from `other`.\n If `cond` is callable, it is computed on the %(klass)s and\n should return boolean %(klass)s or array. The callable must\n not change input %(klass)s (though pandas doesn't check it).\n other : scalar, %(klass)s, or callable\n Entries where `cond` is %(cond_rev)s are replaced with\n corresponding value from `other`.\n If other is callable, it is computed on the %(klass)s and\n should return scalar or %(klass)s. The callable must not\n change input %(klass)s (though pandas doesn't check it).\n inplace : bool, default False\n Whether to perform the operation in place on the data.\n axis : int, default None\n Alignment axis if needed.\n level : int, default None\n Alignment level if needed.\n errors : str, {'raise', 'ignore'}, default 'raise'\n Note that currently this parameter won't affect\n the results and will always coerce to a suitable dtype.\n\n - 'raise' : allow exceptions to be raised.\n - 'ignore' : suppress exceptions. On error return original object.\n\n try_cast : bool, default False\n Try to cast the result back to the input type (if possible).\n\n Returns\n -------\n Same type as caller\n\n See Also\n --------\n :func:`DataFrame.%(name_other)s` : Return an object of same shape as\n self.\n\n Notes\n -----\n The %(name)s method is an application of the if-then idiom. For each\n element in the calling DataFrame, if ``cond`` is ``%(cond)s`` the\n element is used; otherwise the corresponding element from the DataFrame\n ``other`` is used.\n\n The signature for :func:`DataFrame.where` differs from\n :func:`numpy.where`. Roughly ``df1.where(m, df2)`` is equivalent to\n ``np.where(m, df1, df2)``.\n\n For further details and examples see the ``%(name)s`` documentation in\n :ref:`indexing <indexing.where_mask>`.\n\n Examples\n --------\n >>> s = pd.Series(range(5))\n >>> s.where(s > 0)\n 0 NaN\n 1 1.0\n 2 2.0\n 3 3.0\n 4 4.0\n dtype: float64\n\n >>> s.mask(s > 0)\n 0 0.0\n 1 NaN\n 2 NaN\n 3 NaN\n 4 NaN\n dtype: float64\n\n >>> s.where(s > 1, 10)\n 0 10\n 1 10\n 2 2\n 3 3\n 4 4\n dtype: int64\n\n >>> df = pd.DataFrame(np.arange(10).reshape(-1, 2), columns=['A', 'B'])\n >>> df\n A B\n 0 0 1\n 1 2 3\n 2 4 5\n 3 6 7\n 4 8 9\n >>> m = df %% 3 == 0\n >>> df.where(m, -df)\n A B\n 0 0 -1\n 1 -2 3\n 2 -4 -5\n 3 6 -7\n 4 -8 9\n >>> df.where(m, -df) == np.where(m, df, -df)\n A B\n 0 True True\n 1 True True\n 2 True True\n 3 True True\n 4 True True\n >>> df.where(m, -df) == df.mask(~m, -df)\n A B\n 0 True True\n 1 True True\n 2 True True\n 3 True True\n 4 True True\n \"\"\"\n\n @Appender(\n _shared_docs[\"where\"]\n % dict(\n _shared_doc_kwargs,\n cond=\"True\",\n cond_rev=\"False\",\n name=\"where\",\n name_other=\"mask\",\n )\n )\n def where(\n self,\n cond,\n other=np.nan,\n inplace=False,\n axis=None,\n level=None,\n errors=\"raise\",\n try_cast=False,\n ):\n\n other = com.apply_if_callable(other, self)\n return self._where(\n cond, other, inplace, axis, level, errors=errors, try_cast=try_cast\n )\n\n @Appender(\n _shared_docs[\"where\"]\n % dict(\n _shared_doc_kwargs,\n cond=\"False\",\n cond_rev=\"True\",\n name=\"mask\",\n name_other=\"where\",\n )\n )\n def mask(\n self,\n cond,\n other=np.nan,\n inplace=False,\n axis=None,\n level=None,\n errors=\"raise\",\n try_cast=False,\n ):\n\n inplace = validate_bool_kwarg(inplace, \"inplace\")\n cond = com.apply_if_callable(cond, self)\n\n # see gh-21891\n if not hasattr(cond, \"__invert__\"):\n cond = np.array(cond)\n\n return self.where(\n ~cond,\n other=other,\n inplace=inplace,\n axis=axis,\n level=level,\n try_cast=try_cast,\n errors=errors,\n )\n\n _shared_docs[\n \"shift\"\n ] = \"\"\"\n Shift index by desired number of periods with an optional time `freq`.\n\n When `freq` is not passed, shift the index without realigning the data.\n If `freq` is passed (in this case, the index must be date or datetime,\n or it will raise a `NotImplementedError`), the index will be\n increased using the periods and the `freq`.\n\n Parameters\n ----------\n periods : int\n Number of periods to shift. Can be positive or negative.\n freq : DateOffset, tseries.offsets, timedelta, or str, optional\n Offset to use from the tseries module or time rule (e.g. 'EOM').\n If `freq` is specified then the index values are shifted but the\n data is not realigned. That is, use `freq` if you would like to\n extend the index when shifting and preserve the original data.\n axis : {0 or 'index', 1 or 'columns', None}, default None\n Shift direction.\n fill_value : object, optional\n The scalar value to use for newly introduced missing values.\n the default depends on the dtype of `self`.\n For numeric data, ``np.nan`` is used.\n For datetime, timedelta, or period data, etc. :attr:`NaT` is used.\n For extension dtypes, ``self.dtype.na_value`` is used.\n\n .. versionchanged:: 0.24.0\n\n Returns\n -------\n %(klass)s\n Copy of input object, shifted.\n\n See Also\n --------\n Index.shift : Shift values of Index.\n DatetimeIndex.shift : Shift values of DatetimeIndex.\n PeriodIndex.shift : Shift values of PeriodIndex.\n tshift : Shift the time index, using the index's frequency if\n available.\n\n Examples\n --------\n >>> df = pd.DataFrame({'Col1': [10, 20, 15, 30, 45],\n ... 'Col2': [13, 23, 18, 33, 48],\n ... 'Col3': [17, 27, 22, 37, 52]})\n\n >>> df.shift(periods=3)\n Col1 Col2 Col3\n 0 NaN NaN NaN\n 1 NaN NaN NaN\n 2 NaN NaN NaN\n 3 10.0 13.0 17.0\n 4 20.0 23.0 27.0\n\n >>> df.shift(periods=1, axis='columns')\n Col1 Col2 Col3\n 0 NaN 10.0 13.0\n 1 NaN 20.0 23.0\n 2 NaN 15.0 18.0\n 3 NaN 30.0 33.0\n 4 NaN 45.0 48.0\n\n >>> df.shift(periods=3, fill_value=0)\n Col1 Col2 Col3\n 0 0 0 0\n 1 0 0 0\n 2 0 0 0\n 3 10 13 17\n 4 20 23 27\n \"\"\"\n\n @Appender(_shared_docs[\"shift\"] % _shared_doc_kwargs)\n def shift(\n self: FrameOrSeries, periods=1, freq=None, axis=0, fill_value=None\n ) -> FrameOrSeries:\n if periods == 0:\n return self.copy()\n\n block_axis = self._get_block_manager_axis(axis)\n if freq is None:\n new_data = self._data.shift(\n periods=periods, axis=block_axis, fill_value=fill_value\n )\n else:\n return self.tshift(periods, freq)\n\n return self._constructor(new_data).__finalize__(self)\n\n def slice_shift(self: FrameOrSeries, periods: int = 1, axis=0) -> FrameOrSeries:\n \"\"\"\n Equivalent to `shift` without copying data.\n\n The shifted data will not include the dropped periods and the\n shifted axis will be smaller than the original.\n\n Parameters\n ----------\n periods : int\n Number of periods to move, can be positive or negative.\n\n Returns\n -------\n shifted : same type as caller\n\n Notes\n -----\n While the `slice_shift` is faster than `shift`, you may pay for it\n later during alignment.\n \"\"\"\n if periods == 0:\n return self\n\n if periods > 0:\n vslicer = slice(None, -periods)\n islicer = slice(periods, None)\n else:\n vslicer = slice(-periods, None)\n islicer = slice(None, periods)\n\n new_obj = self._slice(vslicer, axis=axis)\n shifted_axis = self._get_axis(axis)[islicer]\n new_obj.set_axis(shifted_axis, axis=axis, inplace=True)\n\n return new_obj.__finalize__(self)\n\n def tshift(\n self: FrameOrSeries, periods: int = 1, freq=None, axis=0\n ) -> FrameOrSeries:\n \"\"\"\n Shift the time index, using the index's frequency if available.\n\n Parameters\n ----------\n periods : int\n Number of periods to move, can be positive or negative.\n freq : DateOffset, timedelta, or str, default None\n Increment to use from the tseries module\n or time rule expressed as a string (e.g. 'EOM').\n axis : {0 or ‘index’, 1 or ‘columns’, None}, default 0\n Corresponds to the axis that contains the Index.\n\n Returns\n -------\n shifted : Series/DataFrame\n\n Notes\n -----\n If freq is not specified then tries to use the freq or inferred_freq\n attributes of the index. If neither of those attributes exist, a\n ValueError is thrown\n \"\"\"\n index = self._get_axis(axis)\n if freq is None:\n freq = getattr(index, \"freq\", None)\n\n if freq is None:\n freq = getattr(index, \"inferred_freq\", None)\n\n if freq is None:\n msg = \"Freq was not given and was not set in the index\"\n raise ValueError(msg)\n\n if periods == 0:\n return self\n\n if isinstance(freq, str):\n freq = to_offset(freq)\n\n block_axis = self._get_block_manager_axis(axis)\n if isinstance(index, PeriodIndex):\n orig_freq = to_offset(index.freq)\n if freq == orig_freq:\n new_data = self._data.copy()\n new_data.axes[block_axis] = index.shift(periods)\n elif orig_freq is not None:\n raise ValueError(\n f\"Given freq {freq.rule_code} does not match \"\n f\"PeriodIndex freq {orig_freq.rule_code}\"\n )\n else:\n new_data = self._data.copy()\n new_data.axes[block_axis] = index.shift(periods, freq)\n\n return self._constructor(new_data).__finalize__(self)\n\n def truncate(\n self: FrameOrSeries, before=None, after=None, axis=None, copy: bool_t = True\n ) -> FrameOrSeries:\n \"\"\"\n Truncate a Series or DataFrame before and after some index value.\n\n This is a useful shorthand for boolean indexing based on index\n values above or below certain thresholds.\n\n Parameters\n ----------\n before : date, str, int\n Truncate all rows before this index value.\n after : date, str, int\n Truncate all rows after this index value.\n axis : {0 or 'index', 1 or 'columns'}, optional\n Axis to truncate. Truncates the index (rows) by default.\n copy : bool, default is True,\n Return a copy of the truncated section.\n\n Returns\n -------\n type of caller\n The truncated Series or DataFrame.\n\n See Also\n --------\n DataFrame.loc : Select a subset of a DataFrame by label.\n DataFrame.iloc : Select a subset of a DataFrame by position.\n\n Notes\n -----\n If the index being truncated contains only datetime values,\n `before` and `after` may be specified as strings instead of\n Timestamps.\n\n Examples\n --------\n >>> df = pd.DataFrame({'A': ['a', 'b', 'c', 'd', 'e'],\n ... 'B': ['f', 'g', 'h', 'i', 'j'],\n ... 'C': ['k', 'l', 'm', 'n', 'o']},\n ... index=[1, 2, 3, 4, 5])\n >>> df\n A B C\n 1 a f k\n 2 b g l\n 3 c h m\n 4 d i n\n 5 e j o\n\n >>> df.truncate(before=2, after=4)\n A B C\n 2 b g l\n 3 c h m\n 4 d i n\n\n The columns of a DataFrame can be truncated.\n\n >>> df.truncate(before=\"A\", after=\"B\", axis=\"columns\")\n A B\n 1 a f\n 2 b g\n 3 c h\n 4 d i\n 5 e j\n\n For Series, only rows can be truncated.\n\n >>> df['A'].truncate(before=2, after=4)\n 2 b\n 3 c\n 4 d\n Name: A, dtype: object\n\n The index values in ``truncate`` can be datetimes or string\n dates.\n\n >>> dates = pd.date_range('2016-01-01', '2016-02-01', freq='s')\n >>> df = pd.DataFrame(index=dates, data={'A': 1})\n >>> df.tail()\n A\n 2016-01-31 23:59:56 1\n 2016-01-31 23:59:57 1\n 2016-01-31 23:59:58 1\n 2016-01-31 23:59:59 1\n 2016-02-01 00:00:00 1\n\n >>> df.truncate(before=pd.Timestamp('2016-01-05'),\n ... after=pd.Timestamp('2016-01-10')).tail()\n A\n 2016-01-09 23:59:56 1\n 2016-01-09 23:59:57 1\n 2016-01-09 23:59:58 1\n 2016-01-09 23:59:59 1\n 2016-01-10 00:00:00 1\n\n Because the index is a DatetimeIndex containing only dates, we can\n specify `before` and `after` as strings. They will be coerced to\n Timestamps before truncation.\n\n >>> df.truncate('2016-01-05', '2016-01-10').tail()\n A\n 2016-01-09 23:59:56 1\n 2016-01-09 23:59:57 1\n 2016-01-09 23:59:58 1\n 2016-01-09 23:59:59 1\n 2016-01-10 00:00:00 1\n\n Note that ``truncate`` assumes a 0 value for any unspecified time\n component (midnight). This differs from partial string slicing, which\n returns any partially matching dates.\n\n >>> df.loc['2016-01-05':'2016-01-10', :].tail()\n A\n 2016-01-10 23:59:55 1\n 2016-01-10 23:59:56 1\n 2016-01-10 23:59:57 1\n 2016-01-10 23:59:58 1\n 2016-01-10 23:59:59 1\n \"\"\"\n if axis is None:\n axis = self._stat_axis_number\n axis = self._get_axis_number(axis)\n ax = self._get_axis(axis)\n\n # GH 17935\n # Check that index is sorted\n if not ax.is_monotonic_increasing and not ax.is_monotonic_decreasing:\n raise ValueError(\"truncate requires a sorted index\")\n\n # if we have a date index, convert to dates, otherwise\n # treat like a slice\n if ax.is_all_dates:\n from pandas.core.tools.datetimes import to_datetime\n\n before = to_datetime(before)\n after = to_datetime(after)\n\n if before is not None and after is not None:\n if before > after:\n raise ValueError(f\"Truncate: {after} must be after {before}\")\n\n slicer = [slice(None, None)] * self._AXIS_LEN\n slicer[axis] = slice(before, after)\n result = self.loc[tuple(slicer)]\n\n if isinstance(ax, MultiIndex):\n setattr(result, self._get_axis_name(axis), ax.truncate(before, after))\n\n if copy:\n result = result.copy()\n\n return result\n\n def tz_convert(\n self: FrameOrSeries, tz, axis=0, level=None, copy: bool_t = True\n ) -> FrameOrSeries:\n \"\"\"\n Convert tz-aware axis to target time zone.\n\n Parameters\n ----------\n tz : str or tzinfo object\n axis : the axis to convert\n level : int, str, default None\n If axis is a MultiIndex, convert a specific level. Otherwise\n must be None.\n copy : bool, default True\n Also make a copy of the underlying data.\n\n Returns\n -------\n %(klass)s\n Object with time zone converted axis.\n\n Raises\n ------\n TypeError\n If the axis is tz-naive.\n \"\"\"\n axis = self._get_axis_number(axis)\n ax = self._get_axis(axis)\n\n def _tz_convert(ax, tz):\n if not hasattr(ax, \"tz_convert\"):\n if len(ax) > 0:\n ax_name = self._get_axis_name(axis)\n raise TypeError(\n f\"{ax_name} is not a valid DatetimeIndex or PeriodIndex\"\n )\n else:\n ax = DatetimeIndex([], tz=tz)\n else:\n ax = ax.tz_convert(tz)\n return ax\n\n # if a level is given it must be a MultiIndex level or\n # equivalent to the axis name\n if isinstance(ax, MultiIndex):\n level = ax._get_level_number(level)\n new_level = _tz_convert(ax.levels[level], tz)\n ax = ax.set_levels(new_level, level=level)\n else:\n if level not in (None, 0, ax.name):\n raise ValueError(f\"The level {level} is not valid\")\n ax = _tz_convert(ax, tz)\n\n result = self._constructor(self._data, copy=copy)\n result = result.set_axis(ax, axis=axis, inplace=False)\n return result.__finalize__(self)\n\n def tz_localize(\n self: FrameOrSeries,\n tz,\n axis=0,\n level=None,\n copy: bool_t = True,\n ambiguous=\"raise\",\n nonexistent: str = \"raise\",\n ) -> FrameOrSeries:\n \"\"\"\n Localize tz-naive index of a Series or DataFrame to target time zone.\n\n This operation localizes the Index. To localize the values in a\n timezone-naive Series, use :meth:`Series.dt.tz_localize`.\n\n Parameters\n ----------\n tz : str or tzinfo\n axis : the axis to localize\n level : int, str, default None\n If axis ia a MultiIndex, localize a specific level. Otherwise\n must be None.\n copy : bool, default True\n Also make a copy of the underlying data.\n ambiguous : 'infer', bool-ndarray, 'NaT', default 'raise'\n When clocks moved backward due to DST, ambiguous times may arise.\n For example in Central European Time (UTC+01), when going from\n 03:00 DST to 02:00 non-DST, 02:30:00 local time occurs both at\n 00:30:00 UTC and at 01:30:00 UTC. In such a situation, the\n `ambiguous` parameter dictates how ambiguous times should be\n handled.\n\n - 'infer' will attempt to infer fall dst-transition hours based on\n order\n - bool-ndarray where True signifies a DST time, False designates\n a non-DST time (note that this flag is only applicable for\n ambiguous times)\n - 'NaT' will return NaT where there are ambiguous times\n - 'raise' will raise an AmbiguousTimeError if there are ambiguous\n times.\n nonexistent : str, default 'raise'\n A nonexistent time does not exist in a particular timezone\n where clocks moved forward due to DST. Valid values are:\n\n - 'shift_forward' will shift the nonexistent time forward to the\n closest existing time\n - 'shift_backward' will shift the nonexistent time backward to the\n closest existing time\n - 'NaT' will return NaT where there are nonexistent times\n - timedelta objects will shift nonexistent times by the timedelta\n - 'raise' will raise an NonExistentTimeError if there are\n nonexistent times.\n\n .. versionadded:: 0.24.0\n\n Returns\n -------\n Series or DataFrame\n Same type as the input.\n\n Raises\n ------\n TypeError\n If the TimeSeries is tz-aware and tz is not None.\n\n Examples\n --------\n Localize local times:\n\n >>> s = pd.Series([1],\n ... index=pd.DatetimeIndex(['2018-09-15 01:30:00']))\n >>> s.tz_localize('CET')\n 2018-09-15 01:30:00+02:00 1\n dtype: int64\n\n Be careful with DST changes. When there is sequential data, pandas\n can infer the DST time:\n\n >>> s = pd.Series(range(7),\n ... index=pd.DatetimeIndex(['2018-10-28 01:30:00',\n ... '2018-10-28 02:00:00',\n ... '2018-10-28 02:30:00',\n ... '2018-10-28 02:00:00',\n ... '2018-10-28 02:30:00',\n ... '2018-10-28 03:00:00',\n ... '2018-10-28 03:30:00']))\n >>> s.tz_localize('CET', ambiguous='infer')\n 2018-10-28 01:30:00+02:00 0\n 2018-10-28 02:00:00+02:00 1\n 2018-10-28 02:30:00+02:00 2\n 2018-10-28 02:00:00+01:00 3\n 2018-10-28 02:30:00+01:00 4\n 2018-10-28 03:00:00+01:00 5\n 2018-10-28 03:30:00+01:00 6\n dtype: int64\n\n In some cases, inferring the DST is impossible. In such cases, you can\n pass an ndarray to the ambiguous parameter to set the DST explicitly\n\n >>> s = pd.Series(range(3),\n ... index=pd.DatetimeIndex(['2018-10-28 01:20:00',\n ... '2018-10-28 02:36:00',\n ... '2018-10-28 03:46:00']))\n >>> s.tz_localize('CET', ambiguous=np.array([True, True, False]))\n 2018-10-28 01:20:00+02:00 0\n 2018-10-28 02:36:00+02:00 1\n 2018-10-28 03:46:00+01:00 2\n dtype: int64\n\n If the DST transition causes nonexistent times, you can shift these\n dates forward or backwards with a timedelta object or `'shift_forward'`\n or `'shift_backwards'`.\n >>> s = pd.Series(range(2),\n ... index=pd.DatetimeIndex(['2015-03-29 02:30:00',\n ... '2015-03-29 03:30:00']))\n >>> s.tz_localize('Europe/Warsaw', nonexistent='shift_forward')\n 2015-03-29 03:00:00+02:00 0\n 2015-03-29 03:30:00+02:00 1\n dtype: int64\n >>> s.tz_localize('Europe/Warsaw', nonexistent='shift_backward')\n 2015-03-29 01:59:59.999999999+01:00 0\n 2015-03-29 03:30:00+02:00 1\n dtype: int64\n >>> s.tz_localize('Europe/Warsaw', nonexistent=pd.Timedelta('1H'))\n 2015-03-29 03:30:00+02:00 0\n 2015-03-29 03:30:00+02:00 1\n dtype: int64\n \"\"\"\n nonexistent_options = (\"raise\", \"NaT\", \"shift_forward\", \"shift_backward\")\n if nonexistent not in nonexistent_options and not isinstance(\n nonexistent, timedelta\n ):\n raise ValueError(\n \"The nonexistent argument must be one of 'raise', \"\n \"'NaT', 'shift_forward', 'shift_backward' or \"\n \"a timedelta object\"\n )\n\n axis = self._get_axis_number(axis)\n ax = self._get_axis(axis)\n\n def _tz_localize(ax, tz, ambiguous, nonexistent):\n if not hasattr(ax, \"tz_localize\"):\n if len(ax) > 0:\n ax_name = self._get_axis_name(axis)\n raise TypeError(\n f\"{ax_name} is not a valid DatetimeIndex or PeriodIndex\"\n )\n else:\n ax = DatetimeIndex([], tz=tz)\n else:\n ax = ax.tz_localize(tz, ambiguous=ambiguous, nonexistent=nonexistent)\n return ax\n\n # if a level is given it must be a MultiIndex level or\n # equivalent to the axis name\n if isinstance(ax, MultiIndex):\n level = ax._get_level_number(level)\n new_level = _tz_localize(ax.levels[level], tz, ambiguous, nonexistent)\n ax = ax.set_levels(new_level, level=level)\n else:\n if level not in (None, 0, ax.name):\n raise ValueError(f\"The level {level} is not valid\")\n ax = _tz_localize(ax, tz, ambiguous, nonexistent)\n\n result = self._constructor(self._data, copy=copy)\n result = result.set_axis(ax, axis=axis, inplace=False)\n return result.__finalize__(self)\n\n # ----------------------------------------------------------------------\n # Numeric Methods\n def abs(self: FrameOrSeries) -> FrameOrSeries:\n \"\"\"\n Return a Series/DataFrame with absolute numeric value of each element.\n\n This function only applies to elements that are all numeric.\n\n Returns\n -------\n abs\n Series/DataFrame containing the absolute value of each element.\n\n See Also\n --------\n numpy.absolute : Calculate the absolute value element-wise.\n\n Notes\n -----\n For ``complex`` inputs, ``1.2 + 1j``, the absolute value is\n :math:`\\\\sqrt{ a^2 + b^2 }`.\n\n Examples\n --------\n Absolute numeric values in a Series.\n\n >>> s = pd.Series([-1.10, 2, -3.33, 4])\n >>> s.abs()\n 0 1.10\n 1 2.00\n 2 3.33\n 3 4.00\n dtype: float64\n\n Absolute numeric values in a Series with complex numbers.\n\n >>> s = pd.Series([1.2 + 1j])\n >>> s.abs()\n 0 1.56205\n dtype: float64\n\n Absolute numeric values in a Series with a Timedelta element.\n\n >>> s = pd.Series([pd.Timedelta('1 days')])\n >>> s.abs()\n 0 1 days\n dtype: timedelta64[ns]\n\n Select rows with data closest to certain value using argsort (from\n `StackOverflow <https://stackoverflow.com/a/17758115>`__).\n\n >>> df = pd.DataFrame({\n ... 'a': [4, 5, 6, 7],\n ... 'b': [10, 20, 30, 40],\n ... 'c': [100, 50, -30, -50]\n ... })\n >>> df\n a b c\n 0 4 10 100\n 1 5 20 50\n 2 6 30 -30\n 3 7 40 -50\n >>> df.loc[(df.c - 43).abs().argsort()]\n a b c\n 1 5 20 50\n 0 4 10 100\n 2 6 30 -30\n 3 7 40 -50\n \"\"\"\n return np.abs(self)\n\n def describe(\n self: FrameOrSeries, percentiles=None, include=None, exclude=None\n ) -> FrameOrSeries:\n \"\"\"\n Generate descriptive statistics.\n\n Descriptive statistics include those that summarize the central\n tendency, dispersion and shape of a\n dataset's distribution, excluding ``NaN`` values.\n\n Analyzes both numeric and object series, as well\n as ``DataFrame`` column sets of mixed data types. The output\n will vary depending on what is provided. Refer to the notes\n below for more detail.\n\n Parameters\n ----------\n percentiles : list-like of numbers, optional\n The percentiles to include in the output. All should\n fall between 0 and 1. The default is\n ``[.25, .5, .75]``, which returns the 25th, 50th, and\n 75th percentiles.\n include : 'all', list-like of dtypes or None (default), optional\n A white list of data types to include in the result. Ignored\n for ``Series``. Here are the options:\n\n - 'all' : All columns of the input will be included in the output.\n - A list-like of dtypes : Limits the results to the\n provided data types.\n To limit the result to numeric types submit\n ``numpy.number``. To limit it instead to object columns submit\n the ``numpy.object`` data type. Strings\n can also be used in the style of\n ``select_dtypes`` (e.g. ``df.describe(include=['O'])``). To\n select pandas categorical columns, use ``'category'``\n - None (default) : The result will include all numeric columns.\n exclude : list-like of dtypes or None (default), optional,\n A black list of data types to omit from the result. Ignored\n for ``Series``. Here are the options:\n\n - A list-like of dtypes : Excludes the provided data types\n from the result. To exclude numeric types submit\n ``numpy.number``. To exclude object columns submit the data\n type ``numpy.object``. Strings can also be used in the style of\n ``select_dtypes`` (e.g. ``df.describe(include=['O'])``). To\n exclude pandas categorical columns, use ``'category'``\n - None (default) : The result will exclude nothing.\n\n Returns\n -------\n Series or DataFrame\n Summary statistics of the Series or Dataframe provided.\n\n See Also\n --------\n DataFrame.count: Count number of non-NA/null observations.\n DataFrame.max: Maximum of the values in the object.\n DataFrame.min: Minimum of the values in the object.\n DataFrame.mean: Mean of the values.\n DataFrame.std: Standard deviation of the observations.\n DataFrame.select_dtypes: Subset of a DataFrame including/excluding\n columns based on their dtype.\n\n Notes\n -----\n For numeric data, the result's index will include ``count``,\n ``mean``, ``std``, ``min``, ``max`` as well as lower, ``50`` and\n upper percentiles. By default the lower percentile is ``25`` and the\n upper percentile is ``75``. The ``50`` percentile is the\n same as the median.\n\n For object data (e.g. strings or timestamps), the result's index\n will include ``count``, ``unique``, ``top``, and ``freq``. The ``top``\n is the most common value. The ``freq`` is the most common value's\n frequency. Timestamps also include the ``first`` and ``last`` items.\n\n If multiple object values have the highest count, then the\n ``count`` and ``top`` results will be arbitrarily chosen from\n among those with the highest count.\n\n For mixed data types provided via a ``DataFrame``, the default is to\n return only an analysis of numeric columns. If the dataframe consists\n only of object and categorical data without any numeric columns, the\n default is to return an analysis of both the object and categorical\n columns. If ``include='all'`` is provided as an option, the result\n will include a union of attributes of each type.\n\n The `include` and `exclude` parameters can be used to limit\n which columns in a ``DataFrame`` are analyzed for the output.\n The parameters are ignored when analyzing a ``Series``.\n\n Examples\n --------\n Describing a numeric ``Series``.\n\n >>> s = pd.Series([1, 2, 3])\n >>> s.describe()\n count 3.0\n mean 2.0\n std 1.0\n min 1.0\n 25% 1.5\n 50% 2.0\n 75% 2.5\n max 3.0\n dtype: float64\n\n Describing a categorical ``Series``.\n\n >>> s = pd.Series(['a', 'a', 'b', 'c'])\n >>> s.describe()\n count 4\n unique 3\n top a\n freq 2\n dtype: object\n\n Describing a timestamp ``Series``.\n\n >>> s = pd.Series([\n ... np.datetime64(\"2000-01-01\"),\n ... np.datetime64(\"2010-01-01\"),\n ... np.datetime64(\"2010-01-01\")\n ... ])\n >>> s.describe()\n count 3\n unique 2\n top 2010-01-01 00:00:00\n freq 2\n first 2000-01-01 00:00:00\n last 2010-01-01 00:00:00\n dtype: object\n\n Describing a ``DataFrame``. By default only numeric fields\n are returned.\n\n >>> df = pd.DataFrame({'categorical': pd.Categorical(['d','e','f']),\n ... 'numeric': [1, 2, 3],\n ... 'object': ['a', 'b', 'c']\n ... })\n >>> df.describe()\n numeric\n count 3.0\n mean 2.0\n std 1.0\n min 1.0\n 25% 1.5\n 50% 2.0\n 75% 2.5\n max 3.0\n\n Describing all columns of a ``DataFrame`` regardless of data type.\n\n >>> df.describe(include='all')\n categorical numeric object\n count 3 3.0 3\n unique 3 NaN 3\n top f NaN c\n freq 1 NaN 1\n mean NaN 2.0 NaN\n std NaN 1.0 NaN\n min NaN 1.0 NaN\n 25% NaN 1.5 NaN\n 50% NaN 2.0 NaN\n 75% NaN 2.5 NaN\n max NaN 3.0 NaN\n\n Describing a column from a ``DataFrame`` by accessing it as\n an attribute.\n\n >>> df.numeric.describe()\n count 3.0\n mean 2.0\n std 1.0\n min 1.0\n 25% 1.5\n 50% 2.0\n 75% 2.5\n max 3.0\n Name: numeric, dtype: float64\n\n Including only numeric columns in a ``DataFrame`` description.\n\n >>> df.describe(include=[np.number])\n numeric\n count 3.0\n mean 2.0\n std 1.0\n min 1.0\n 25% 1.5\n 50% 2.0\n 75% 2.5\n max 3.0\n\n Including only string columns in a ``DataFrame`` description.\n\n >>> df.describe(include=[np.object])\n object\n count 3\n unique 3\n top c\n freq 1\n\n Including only categorical columns from a ``DataFrame`` description.\n\n >>> df.describe(include=['category'])\n categorical\n count 3\n unique 3\n top f\n freq 1\n\n Excluding numeric columns from a ``DataFrame`` description.\n\n >>> df.describe(exclude=[np.number])\n categorical object\n count 3 3\n unique 3 3\n top f c\n freq 1 1\n\n Excluding object columns from a ``DataFrame`` description.\n\n >>> df.describe(exclude=[np.object])\n categorical numeric\n count 3 3.0\n unique 3 NaN\n top f NaN\n freq 1 NaN\n mean NaN 2.0\n std NaN 1.0\n min NaN 1.0\n 25% NaN 1.5\n 50% NaN 2.0\n 75% NaN 2.5\n max NaN 3.0\n \"\"\"\n if self.ndim == 2 and self.columns.size == 0:\n raise ValueError(\"Cannot describe a DataFrame without columns\")\n\n if percentiles is not None:\n # explicit conversion of `percentiles` to list\n percentiles = list(percentiles)\n\n # get them all to be in [0, 1]\n validate_percentile(percentiles)\n\n # median should always be included\n if 0.5 not in percentiles:\n percentiles.append(0.5)\n percentiles = np.asarray(percentiles)\n else:\n percentiles = np.array([0.25, 0.5, 0.75])\n\n # sort and check for duplicates\n unique_pcts = np.unique(percentiles)\n if len(unique_pcts) < len(percentiles):\n raise ValueError(\"percentiles cannot contain duplicates\")\n percentiles = unique_pcts\n\n formatted_percentiles = format_percentiles(percentiles)\n\n def describe_numeric_1d(series):\n stat_index = (\n [\"count\", \"mean\", \"std\", \"min\"] + formatted_percentiles + [\"max\"]\n )\n d = (\n [series.count(), series.mean(), series.std(), series.min()]\n + series.quantile(percentiles).tolist()\n + [series.max()]\n )\n return pd.Series(d, index=stat_index, name=series.name)\n\n def describe_categorical_1d(data):\n names = [\"count\", \"unique\"]\n objcounts = data.value_counts()\n count_unique = len(objcounts[objcounts != 0])\n result = [data.count(), count_unique]\n dtype = None\n if result[1] > 0:\n top, freq = objcounts.index[0], objcounts.iloc[0]\n names += [\"top\", \"freq\"]\n result += [top, freq]\n\n # If the DataFrame is empty, set 'top' and 'freq' to None\n # to maintain output shape consistency\n else:\n names += [\"top\", \"freq\"]\n result += [np.nan, np.nan]\n dtype = \"object\"\n\n return pd.Series(result, index=names, name=data.name, dtype=dtype)\n\n def describe_timestamp_1d(data):\n # GH-30164\n stat_index = [\"count\", \"mean\", \"min\"] + formatted_percentiles + [\"max\"]\n d = (\n [data.count(), data.mean(), data.min()]\n + data.quantile(percentiles).tolist()\n + [data.max()]\n )\n return pd.Series(d, index=stat_index, name=data.name)\n\n def describe_1d(data):\n if is_bool_dtype(data):\n return describe_categorical_1d(data)\n elif is_numeric_dtype(data):\n return describe_numeric_1d(data)\n elif is_datetime64_any_dtype(data):\n return describe_timestamp_1d(data)\n elif is_timedelta64_dtype(data):\n return describe_numeric_1d(data)\n else:\n return describe_categorical_1d(data)\n\n if self.ndim == 1:\n return describe_1d(self)\n elif (include is None) and (exclude is None):\n # when some numerics are found, keep only numerics\n data = self.select_dtypes(include=[np.number])\n if len(data.columns) == 0:\n data = self\n elif include == \"all\":\n if exclude is not None:\n msg = \"exclude must be None when include is 'all'\"\n raise ValueError(msg)\n data = self\n else:\n data = self.select_dtypes(include=include, exclude=exclude)\n\n ldesc = [describe_1d(s) for _, s in data.items()]\n # set a convenient order for rows\n names: List[Label] = []\n ldesc_indexes = sorted((x.index for x in ldesc), key=len)\n for idxnames in ldesc_indexes:\n for name in idxnames:\n if name not in names:\n names.append(name)\n\n d = pd.concat([x.reindex(names, copy=False) for x in ldesc], axis=1, sort=False)\n d.columns = data.columns.copy()\n return d\n\n _shared_docs[\n \"pct_change\"\n ] = \"\"\"\n Percentage change between the current and a prior element.\n\n Computes the percentage change from the immediately previous row by\n default. This is useful in comparing the percentage of change in a time\n series of elements.\n\n Parameters\n ----------\n periods : int, default 1\n Periods to shift for forming percent change.\n fill_method : str, default 'pad'\n How to handle NAs before computing percent changes.\n limit : int, default None\n The number of consecutive NAs to fill before stopping.\n freq : DateOffset, timedelta, or str, optional\n Increment to use from time series API (e.g. 'M' or BDay()).\n **kwargs\n Additional keyword arguments are passed into\n `DataFrame.shift` or `Series.shift`.\n\n Returns\n -------\n chg : Series or DataFrame\n The same type as the calling object.\n\n See Also\n --------\n Series.diff : Compute the difference of two elements in a Series.\n DataFrame.diff : Compute the difference of two elements in a DataFrame.\n Series.shift : Shift the index by some number of periods.\n DataFrame.shift : Shift the index by some number of periods.\n\n Examples\n --------\n **Series**\n\n >>> s = pd.Series([90, 91, 85])\n >>> s\n 0 90\n 1 91\n 2 85\n dtype: int64\n\n >>> s.pct_change()\n 0 NaN\n 1 0.011111\n 2 -0.065934\n dtype: float64\n\n >>> s.pct_change(periods=2)\n 0 NaN\n 1 NaN\n 2 -0.055556\n dtype: float64\n\n See the percentage change in a Series where filling NAs with last\n valid observation forward to next valid.\n\n >>> s = pd.Series([90, 91, None, 85])\n >>> s\n 0 90.0\n 1 91.0\n 2 NaN\n 3 85.0\n dtype: float64\n\n >>> s.pct_change(fill_method='ffill')\n 0 NaN\n 1 0.011111\n 2 0.000000\n 3 -0.065934\n dtype: float64\n\n **DataFrame**\n\n Percentage change in French franc, Deutsche Mark, and Italian lira from\n 1980-01-01 to 1980-03-01.\n\n >>> df = pd.DataFrame({\n ... 'FR': [4.0405, 4.0963, 4.3149],\n ... 'GR': [1.7246, 1.7482, 1.8519],\n ... 'IT': [804.74, 810.01, 860.13]},\n ... index=['1980-01-01', '1980-02-01', '1980-03-01'])\n >>> df\n FR GR IT\n 1980-01-01 4.0405 1.7246 804.74\n 1980-02-01 4.0963 1.7482 810.01\n 1980-03-01 4.3149 1.8519 860.13\n\n >>> df.pct_change()\n FR GR IT\n 1980-01-01 NaN NaN NaN\n 1980-02-01 0.013810 0.013684 0.006549\n 1980-03-01 0.053365 0.059318 0.061876\n\n Percentage of change in GOOG and APPL stock volume. Shows computing\n the percentage change between columns.\n\n >>> df = pd.DataFrame({\n ... '2016': [1769950, 30586265],\n ... '2015': [1500923, 40912316],\n ... '2014': [1371819, 41403351]},\n ... index=['GOOG', 'APPL'])\n >>> df\n 2016 2015 2014\n GOOG 1769950 1500923 1371819\n APPL 30586265 40912316 41403351\n\n >>> df.pct_change(axis='columns')\n 2016 2015 2014\n GOOG NaN -0.151997 -0.086016\n APPL NaN 0.337604 0.012002\n \"\"\"\n\n @Appender(_shared_docs[\"pct_change\"] % _shared_doc_kwargs)\n def pct_change(\n self: FrameOrSeries,\n periods=1,\n fill_method=\"pad\",\n limit=None,\n freq=None,\n **kwargs,\n ) -> FrameOrSeries:\n # TODO: Not sure if above is correct - need someone to confirm.\n axis = self._get_axis_number(kwargs.pop(\"axis\", self._stat_axis_name))\n if fill_method is None:\n data = self\n else:\n data = self._ensure_type(\n self.fillna(method=fill_method, axis=axis, limit=limit)\n )\n\n rs = data.div(data.shift(periods=periods, freq=freq, axis=axis, **kwargs)) - 1\n if freq is not None:\n # Shift method is implemented differently when freq is not None\n # We want to restore the original index\n rs = rs.loc[~rs.index.duplicated()]\n rs = rs.reindex_like(data)\n return rs\n\n def _agg_by_level(self, name, axis=0, level=0, skipna=True, **kwargs):\n if axis is None:\n raise ValueError(\"Must specify 'axis' when aggregating by level.\")\n grouped = self.groupby(level=level, axis=axis, sort=False)\n if hasattr(grouped, name) and skipna:\n return getattr(grouped, name)(**kwargs)\n axis = self._get_axis_number(axis)\n method = getattr(type(self), name)\n applyf = lambda x: method(x, axis=axis, skipna=skipna, **kwargs)\n return grouped.aggregate(applyf)\n\n @classmethod\n def _add_numeric_operations(cls):\n \"\"\"\n Add the operations to the cls; evaluate the doc strings again\n \"\"\"\n axis_descr, name1, name2 = _doc_parms(cls)\n\n cls.any = _make_logical_function(\n cls,\n \"any\",\n name1=name1,\n name2=name2,\n axis_descr=axis_descr,\n desc=_any_desc,\n func=nanops.nanany,\n see_also=_any_see_also,\n examples=_any_examples,\n empty_value=False,\n )\n cls.all = _make_logical_function(\n cls,\n \"all\",\n name1=name1,\n name2=name2,\n axis_descr=axis_descr,\n desc=_all_desc,\n func=nanops.nanall,\n see_also=_all_see_also,\n examples=_all_examples,\n empty_value=True,\n )\n\n @Substitution(\n desc=\"Return the mean absolute deviation of the values \"\n \"for the requested axis.\",\n name1=name1,\n name2=name2,\n axis_descr=axis_descr,\n min_count=\"\",\n see_also=\"\",\n examples=\"\",\n )\n @Appender(_num_doc_mad)\n def mad(self, axis=None, skipna=None, level=None):\n if skipna is None:\n skipna = True\n if axis is None:\n axis = self._stat_axis_number\n if level is not None:\n return self._agg_by_level(\"mad\", axis=axis, level=level, skipna=skipna)\n\n data = self._get_numeric_data()\n if axis == 0:\n demeaned = data - data.mean(axis=0)\n else:\n demeaned = data.sub(data.mean(axis=1), axis=0)\n return np.abs(demeaned).mean(axis=axis, skipna=skipna)\n\n cls.mad = mad\n\n cls.sem = _make_stat_function_ddof(\n cls,\n \"sem\",\n name1=name1,\n name2=name2,\n axis_descr=axis_descr,\n desc=\"Return unbiased standard error of the mean over requested \"\n \"axis.\\n\\nNormalized by N-1 by default. This can be changed \"\n \"using the ddof argument\",\n func=nanops.nansem,\n )\n cls.var = _make_stat_function_ddof(\n cls,\n \"var\",\n name1=name1,\n name2=name2,\n axis_descr=axis_descr,\n desc=\"Return unbiased variance over requested axis.\\n\\nNormalized by \"\n \"N-1 by default. This can be changed using the ddof argument\",\n func=nanops.nanvar,\n )\n cls.std = _make_stat_function_ddof(\n cls,\n \"std\",\n name1=name1,\n name2=name2,\n axis_descr=axis_descr,\n desc=\"Return sample standard deviation over requested axis.\"\n \"\\n\\nNormalized by N-1 by default. This can be changed using the \"\n \"ddof argument\",\n func=nanops.nanstd,\n )\n\n cls.cummin = _make_cum_function(\n cls,\n \"cummin\",\n name1=name1,\n name2=name2,\n axis_descr=axis_descr,\n desc=\"minimum\",\n accum_func=np.minimum.accumulate,\n accum_func_name=\"min\",\n mask_a=np.inf,\n mask_b=np.nan,\n examples=_cummin_examples,\n )\n cls.cumsum = _make_cum_function(\n cls,\n \"cumsum\",\n name1=name1,\n name2=name2,\n axis_descr=axis_descr,\n desc=\"sum\",\n accum_func=np.cumsum,\n accum_func_name=\"sum\",\n mask_a=0.0,\n mask_b=np.nan,\n examples=_cumsum_examples,\n )\n cls.cumprod = _make_cum_function(\n cls,\n \"cumprod\",\n name1=name1,\n name2=name2,\n axis_descr=axis_descr,\n desc=\"product\",\n accum_func=np.cumprod,\n accum_func_name=\"prod\",\n mask_a=1.0,\n mask_b=np.nan,\n examples=_cumprod_examples,\n )\n cls.cummax = _make_cum_function(\n cls,\n \"cummax\",\n name1=name1,\n name2=name2,\n axis_descr=axis_descr,\n desc=\"maximum\",\n accum_func=np.maximum.accumulate,\n accum_func_name=\"max\",\n mask_a=-np.inf,\n mask_b=np.nan,\n examples=_cummax_examples,\n )\n\n cls.sum = _make_min_count_stat_function(\n cls,\n \"sum\",\n name1=name1,\n name2=name2,\n axis_descr=axis_descr,\n desc=\"Return the sum of the values for the requested axis.\\n\\n\"\n \"This is equivalent to the method ``numpy.sum``.\",\n func=nanops.nansum,\n see_also=_stat_func_see_also,\n examples=_sum_examples,\n )\n cls.mean = _make_stat_function(\n cls,\n \"mean\",\n name1=name1,\n name2=name2,\n axis_descr=axis_descr,\n desc=\"Return the mean of the values for the requested axis.\",\n func=nanops.nanmean,\n )\n cls.skew = _make_stat_function(\n cls,\n \"skew\",\n name1=name1,\n name2=name2,\n axis_descr=axis_descr,\n desc=\"Return unbiased skew over requested axis.\\n\\nNormalized by N-1.\",\n func=nanops.nanskew,\n )\n cls.kurt = _make_stat_function(\n cls,\n \"kurt\",\n name1=name1,\n name2=name2,\n axis_descr=axis_descr,\n desc=\"Return unbiased kurtosis over requested axis.\\n\\n\"\n \"Kurtosis obtained using Fisher's definition of\\n\"\n \"kurtosis (kurtosis of normal == 0.0). Normalized \"\n \"by N-1.\",\n func=nanops.nankurt,\n )\n cls.kurtosis = cls.kurt\n cls.prod = _make_min_count_stat_function(\n cls,\n \"prod\",\n name1=name1,\n name2=name2,\n axis_descr=axis_descr,\n desc=\"Return the product of the values for the requested axis.\",\n func=nanops.nanprod,\n examples=_prod_examples,\n )\n cls.product = cls.prod\n cls.median = _make_stat_function(\n cls,\n \"median\",\n name1=name1,\n name2=name2,\n axis_descr=axis_descr,\n desc=\"Return the median of the values for the requested axis.\",\n func=nanops.nanmedian,\n )\n cls.max = _make_stat_function(\n cls,\n \"max\",\n name1=name1,\n name2=name2,\n axis_descr=axis_descr,\n desc=\"Return the maximum of the values for the requested axis.\\n\\n\"\n \"If you want the *index* of the maximum, use ``idxmax``. This is\"\n \"the equivalent of the ``numpy.ndarray`` method ``argmax``.\",\n func=nanops.nanmax,\n see_also=_stat_func_see_also,\n examples=_max_examples,\n )\n cls.min = _make_stat_function(\n cls,\n \"min\",\n name1=name1,\n name2=name2,\n axis_descr=axis_descr,\n desc=\"Return the minimum of the values for the requested axis.\\n\\n\"\n \"If you want the *index* of the minimum, use ``idxmin``. This is\"\n \"the equivalent of the ``numpy.ndarray`` method ``argmin``.\",\n func=nanops.nanmin,\n see_also=_stat_func_see_also,\n examples=_min_examples,\n )\n\n @classmethod\n def _add_series_or_dataframe_operations(cls):\n \"\"\"\n Add the series or dataframe only operations to the cls; evaluate\n the doc strings again.\n \"\"\"\n from pandas.core.window import EWM, Expanding, Rolling, Window\n\n @Appender(Rolling.__doc__)\n def rolling(\n self,\n window,\n min_periods=None,\n center=False,\n win_type=None,\n on=None,\n axis=0,\n closed=None,\n ):\n axis = self._get_axis_number(axis)\n\n if win_type is not None:\n return Window(\n self,\n window=window,\n min_periods=min_periods,\n center=center,\n win_type=win_type,\n on=on,\n axis=axis,\n closed=closed,\n )\n\n return Rolling(\n self,\n window=window,\n min_periods=min_periods,\n center=center,\n win_type=win_type,\n on=on,\n axis=axis,\n closed=closed,\n )\n\n cls.rolling = rolling\n\n @Appender(Expanding.__doc__)\n def expanding(self, min_periods=1, center=False, axis=0):\n axis = self._get_axis_number(axis)\n return Expanding(self, min_periods=min_periods, center=center, axis=axis)\n\n cls.expanding = expanding\n\n @Appender(EWM.__doc__)\n def ewm(\n self,\n com=None,\n span=None,\n halflife=None,\n alpha=None,\n min_periods=0,\n adjust=True,\n ignore_na=False,\n axis=0,\n ):\n axis = self._get_axis_number(axis)\n return EWM(\n self,\n com=com,\n span=span,\n halflife=halflife,\n alpha=alpha,\n min_periods=min_periods,\n adjust=adjust,\n ignore_na=ignore_na,\n axis=axis,\n )\n\n cls.ewm = ewm\n\n @Appender(_shared_docs[\"transform\"] % dict(axis=\"\", **_shared_doc_kwargs))\n def transform(self, func, *args, **kwargs):\n result = self.agg(func, *args, **kwargs)\n if is_scalar(result) or len(result) != len(self):\n raise ValueError(\"transforms cannot produce aggregated results\")\n\n return result\n\n # ----------------------------------------------------------------------\n # Misc methods\n\n _shared_docs[\n \"valid_index\"\n ] = \"\"\"\n Return index for %(position)s non-NA/null value.\n\n Returns\n -------\n scalar : type of index\n\n Notes\n -----\n If all elements are non-NA/null, returns None.\n Also returns None for empty %(klass)s.\n \"\"\"\n\n def _find_valid_index(self, how: str):\n \"\"\"\n Retrieves the index of the first valid value.\n\n Parameters\n ----------\n how : {'first', 'last'}\n Use this parameter to change between the first or last valid index.\n\n Returns\n -------\n idx_first_valid : type of index\n \"\"\"\n idxpos = find_valid_index(self._values, how)\n if idxpos is None:\n return None\n return self.index[idxpos]\n\n @Appender(\n _shared_docs[\"valid_index\"] % {\"position\": \"first\", \"klass\": \"Series/DataFrame\"}\n )\n def first_valid_index(self):\n return self._find_valid_index(\"first\")\n\n @Appender(\n _shared_docs[\"valid_index\"] % {\"position\": \"last\", \"klass\": \"Series/DataFrame\"}\n )\n def last_valid_index(self):\n return self._find_valid_index(\"last\")\n\n\ndef _doc_parms(cls):\n \"\"\"Return a tuple of the doc parms.\"\"\"\n axis_descr = (\n f\"{{{', '.join(f'{a} ({i})' for i, a in enumerate(cls._AXIS_ORDERS))}}}\"\n )\n name = cls._constructor_sliced.__name__ if cls._AXIS_LEN > 1 else \"scalar\"\n name2 = cls.__name__\n return axis_descr, name, name2\n\n\n_num_doc = \"\"\"\n%(desc)s\n\nParameters\n----------\naxis : %(axis_descr)s\n Axis for the function to be applied on.\nskipna : bool, default True\n Exclude NA/null values when computing the result.\nlevel : int or level name, default None\n If the axis is a MultiIndex (hierarchical), count along a\n particular level, collapsing into a %(name1)s.\nnumeric_only : bool, default None\n Include only float, int, boolean columns. If None, will attempt to use\n everything, then use only numeric data. Not implemented for Series.\n%(min_count)s\\\n**kwargs\n Additional keyword arguments to be passed to the function.\n\nReturns\n-------\n%(name1)s or %(name2)s (if level specified)\\\n%(see_also)s\\\n%(examples)s\n\"\"\"\n\n_num_doc_mad = \"\"\"\n%(desc)s\n\nParameters\n----------\naxis : %(axis_descr)s\n Axis for the function to be applied on.\nskipna : bool, default None\n Exclude NA/null values when computing the result.\nlevel : int or level name, default None\n If the axis is a MultiIndex (hierarchical), count along a\n particular level, collapsing into a %(name1)s.\n\nReturns\n-------\n%(name1)s or %(name2)s (if level specified)\\\n%(see_also)s\\\n%(examples)s\n\"\"\"\n\n_num_ddof_doc = \"\"\"\n%(desc)s\n\nParameters\n----------\naxis : %(axis_descr)s\nskipna : bool, default True\n Exclude NA/null values. If an entire row/column is NA, the result\n will be NA.\nlevel : int or level name, default None\n If the axis is a MultiIndex (hierarchical), count along a\n particular level, collapsing into a %(name1)s.\nddof : int, default 1\n Delta Degrees of Freedom. The divisor used in calculations is N - ddof,\n where N represents the number of elements.\nnumeric_only : bool, default None\n Include only float, int, boolean columns. If None, will attempt to use\n everything, then use only numeric data. Not implemented for Series.\n\nReturns\n-------\n%(name1)s or %(name2)s (if level specified)\\n\"\"\"\n\n_bool_doc = \"\"\"\n%(desc)s\n\nParameters\n----------\naxis : {0 or 'index', 1 or 'columns', None}, default 0\n Indicate which axis or axes should be reduced.\n\n * 0 / 'index' : reduce the index, return a Series whose index is the\n original column labels.\n * 1 / 'columns' : reduce the columns, return a Series whose index is the\n original index.\n * None : reduce all axes, return a scalar.\n\nbool_only : bool, default None\n Include only boolean columns. If None, will attempt to use everything,\n then use only boolean data. Not implemented for Series.\nskipna : bool, default True\n Exclude NA/null values. If the entire row/column is NA and skipna is\n True, then the result will be %(empty_value)s, as for an empty row/column.\n If skipna is False, then NA are treated as True, because these are not\n equal to zero.\nlevel : int or level name, default None\n If the axis is a MultiIndex (hierarchical), count along a\n particular level, collapsing into a %(name1)s.\n**kwargs : any, default None\n Additional keywords have no effect but might be accepted for\n compatibility with NumPy.\n\nReturns\n-------\n%(name1)s or %(name2)s\n If level is specified, then, %(name2)s is returned; otherwise, %(name1)s\n is returned.\n\n%(see_also)s\n%(examples)s\"\"\"\n\n_all_desc = \"\"\"\\\nReturn whether all elements are True, potentially over an axis.\n\nReturns True unless there at least one element within a series or\nalong a Dataframe axis that is False or equivalent (e.g. zero or\nempty).\"\"\"\n\n_all_examples = \"\"\"\\\nExamples\n--------\n**Series**\n\n>>> pd.Series([True, True]).all()\nTrue\n>>> pd.Series([True, False]).all()\nFalse\n>>> pd.Series([]).all()\nTrue\n>>> pd.Series([np.nan]).all()\nTrue\n>>> pd.Series([np.nan]).all(skipna=False)\nTrue\n\n**DataFrames**\n\nCreate a dataframe from a dictionary.\n\n>>> df = pd.DataFrame({'col1': [True, True], 'col2': [True, False]})\n>>> df\n col1 col2\n0 True True\n1 True False\n\nDefault behaviour checks if column-wise values all return True.\n\n>>> df.all()\ncol1 True\ncol2 False\ndtype: bool\n\nSpecify ``axis='columns'`` to check if row-wise values all return True.\n\n>>> df.all(axis='columns')\n0 True\n1 False\ndtype: bool\n\nOr ``axis=None`` for whether every value is True.\n\n>>> df.all(axis=None)\nFalse\n\"\"\"\n\n_all_see_also = \"\"\"\\\nSee Also\n--------\nSeries.all : Return True if all elements are True.\nDataFrame.any : Return True if one (or more) elements are True.\n\"\"\"\n\n_cnum_doc = \"\"\"\nReturn cumulative %(desc)s over a DataFrame or Series axis.\n\nReturns a DataFrame or Series of the same size containing the cumulative\n%(desc)s.\n\nParameters\n----------\naxis : {0 or 'index', 1 or 'columns'}, default 0\n The index or the name of the axis. 0 is equivalent to None or 'index'.\nskipna : bool, default True\n Exclude NA/null values. If an entire row/column is NA, the result\n will be NA.\n*args, **kwargs :\n Additional keywords have no effect but might be accepted for\n compatibility with NumPy.\n\nReturns\n-------\n%(name1)s or %(name2)s\n\nSee Also\n--------\ncore.window.Expanding.%(accum_func_name)s : Similar functionality\n but ignores ``NaN`` values.\n%(name2)s.%(accum_func_name)s : Return the %(desc)s over\n %(name2)s axis.\n%(name2)s.cummax : Return cumulative maximum over %(name2)s axis.\n%(name2)s.cummin : Return cumulative minimum over %(name2)s axis.\n%(name2)s.cumsum : Return cumulative sum over %(name2)s axis.\n%(name2)s.cumprod : Return cumulative product over %(name2)s axis.\n\n%(examples)s\"\"\"\n\n_cummin_examples = \"\"\"\\\nExamples\n--------\n**Series**\n\n>>> s = pd.Series([2, np.nan, 5, -1, 0])\n>>> s\n0 2.0\n1 NaN\n2 5.0\n3 -1.0\n4 0.0\ndtype: float64\n\nBy default, NA values are ignored.\n\n>>> s.cummin()\n0 2.0\n1 NaN\n2 2.0\n3 -1.0\n4 -1.0\ndtype: float64\n\nTo include NA values in the operation, use ``skipna=False``\n\n>>> s.cummin(skipna=False)\n0 2.0\n1 NaN\n2 NaN\n3 NaN\n4 NaN\ndtype: float64\n\n**DataFrame**\n\n>>> df = pd.DataFrame([[2.0, 1.0],\n... [3.0, np.nan],\n... [1.0, 0.0]],\n... columns=list('AB'))\n>>> df\n A B\n0 2.0 1.0\n1 3.0 NaN\n2 1.0 0.0\n\nBy default, iterates over rows and finds the minimum\nin each column. This is equivalent to ``axis=None`` or ``axis='index'``.\n\n>>> df.cummin()\n A B\n0 2.0 1.0\n1 2.0 NaN\n2 1.0 0.0\n\nTo iterate over columns and find the minimum in each row,\nuse ``axis=1``\n\n>>> df.cummin(axis=1)\n A B\n0 2.0 1.0\n1 3.0 NaN\n2 1.0 0.0\n\"\"\"\n\n_cumsum_examples = \"\"\"\\\nExamples\n--------\n**Series**\n\n>>> s = pd.Series([2, np.nan, 5, -1, 0])\n>>> s\n0 2.0\n1 NaN\n2 5.0\n3 -1.0\n4 0.0\ndtype: float64\n\nBy default, NA values are ignored.\n\n>>> s.cumsum()\n0 2.0\n1 NaN\n2 7.0\n3 6.0\n4 6.0\ndtype: float64\n\nTo include NA values in the operation, use ``skipna=False``\n\n>>> s.cumsum(skipna=False)\n0 2.0\n1 NaN\n2 NaN\n3 NaN\n4 NaN\ndtype: float64\n\n**DataFrame**\n\n>>> df = pd.DataFrame([[2.0, 1.0],\n... [3.0, np.nan],\n... [1.0, 0.0]],\n... columns=list('AB'))\n>>> df\n A B\n0 2.0 1.0\n1 3.0 NaN\n2 1.0 0.0\n\nBy default, iterates over rows and finds the sum\nin each column. This is equivalent to ``axis=None`` or ``axis='index'``.\n\n>>> df.cumsum()\n A B\n0 2.0 1.0\n1 5.0 NaN\n2 6.0 1.0\n\nTo iterate over columns and find the sum in each row,\nuse ``axis=1``\n\n>>> df.cumsum(axis=1)\n A B\n0 2.0 3.0\n1 3.0 NaN\n2 1.0 1.0\n\"\"\"\n\n_cumprod_examples = \"\"\"\\\nExamples\n--------\n**Series**\n\n>>> s = pd.Series([2, np.nan, 5, -1, 0])\n>>> s\n0 2.0\n1 NaN\n2 5.0\n3 -1.0\n4 0.0\ndtype: float64\n\nBy default, NA values are ignored.\n\n>>> s.cumprod()\n0 2.0\n1 NaN\n2 10.0\n3 -10.0\n4 -0.0\ndtype: float64\n\nTo include NA values in the operation, use ``skipna=False``\n\n>>> s.cumprod(skipna=False)\n0 2.0\n1 NaN\n2 NaN\n3 NaN\n4 NaN\ndtype: float64\n\n**DataFrame**\n\n>>> df = pd.DataFrame([[2.0, 1.0],\n... [3.0, np.nan],\n... [1.0, 0.0]],\n... columns=list('AB'))\n>>> df\n A B\n0 2.0 1.0\n1 3.0 NaN\n2 1.0 0.0\n\nBy default, iterates over rows and finds the product\nin each column. This is equivalent to ``axis=None`` or ``axis='index'``.\n\n>>> df.cumprod()\n A B\n0 2.0 1.0\n1 6.0 NaN\n2 6.0 0.0\n\nTo iterate over columns and find the product in each row,\nuse ``axis=1``\n\n>>> df.cumprod(axis=1)\n A B\n0 2.0 2.0\n1 3.0 NaN\n2 1.0 0.0\n\"\"\"\n\n_cummax_examples = \"\"\"\\\nExamples\n--------\n**Series**\n\n>>> s = pd.Series([2, np.nan, 5, -1, 0])\n>>> s\n0 2.0\n1 NaN\n2 5.0\n3 -1.0\n4 0.0\ndtype: float64\n\nBy default, NA values are ignored.\n\n>>> s.cummax()\n0 2.0\n1 NaN\n2 5.0\n3 5.0\n4 5.0\ndtype: float64\n\nTo include NA values in the operation, use ``skipna=False``\n\n>>> s.cummax(skipna=False)\n0 2.0\n1 NaN\n2 NaN\n3 NaN\n4 NaN\ndtype: float64\n\n**DataFrame**\n\n>>> df = pd.DataFrame([[2.0, 1.0],\n... [3.0, np.nan],\n... [1.0, 0.0]],\n... columns=list('AB'))\n>>> df\n A B\n0 2.0 1.0\n1 3.0 NaN\n2 1.0 0.0\n\nBy default, iterates over rows and finds the maximum\nin each column. This is equivalent to ``axis=None`` or ``axis='index'``.\n\n>>> df.cummax()\n A B\n0 2.0 1.0\n1 3.0 NaN\n2 3.0 1.0\n\nTo iterate over columns and find the maximum in each row,\nuse ``axis=1``\n\n>>> df.cummax(axis=1)\n A B\n0 2.0 2.0\n1 3.0 NaN\n2 1.0 1.0\n\"\"\"\n\n_any_see_also = \"\"\"\\\nSee Also\n--------\nnumpy.any : Numpy version of this method.\nSeries.any : Return whether any element is True.\nSeries.all : Return whether all elements are True.\nDataFrame.any : Return whether any element is True over requested axis.\nDataFrame.all : Return whether all elements are True over requested axis.\n\"\"\"\n\n_any_desc = \"\"\"\\\nReturn whether any element is True, potentially over an axis.\n\nReturns False unless there at least one element within a series or\nalong a Dataframe axis that is True or equivalent (e.g. non-zero or\nnon-empty).\"\"\"\n\n_any_examples = \"\"\"\\\nExamples\n--------\n**Series**\n\nFor Series input, the output is a scalar indicating whether any element\nis True.\n\n>>> pd.Series([False, False]).any()\nFalse\n>>> pd.Series([True, False]).any()\nTrue\n>>> pd.Series([]).any()\nFalse\n>>> pd.Series([np.nan]).any()\nFalse\n>>> pd.Series([np.nan]).any(skipna=False)\nTrue\n\n**DataFrame**\n\nWhether each column contains at least one True element (the default).\n\n>>> df = pd.DataFrame({\"A\": [1, 2], \"B\": [0, 2], \"C\": [0, 0]})\n>>> df\n A B C\n0 1 0 0\n1 2 2 0\n\n>>> df.any()\nA True\nB True\nC False\ndtype: bool\n\nAggregating over the columns.\n\n>>> df = pd.DataFrame({\"A\": [True, False], \"B\": [1, 2]})\n>>> df\n A B\n0 True 1\n1 False 2\n\n>>> df.any(axis='columns')\n0 True\n1 True\ndtype: bool\n\n>>> df = pd.DataFrame({\"A\": [True, False], \"B\": [1, 0]})\n>>> df\n A B\n0 True 1\n1 False 0\n\n>>> df.any(axis='columns')\n0 True\n1 False\ndtype: bool\n\nAggregating over the entire DataFrame with ``axis=None``.\n\n>>> df.any(axis=None)\nTrue\n\n`any` for an empty DataFrame is an empty Series.\n\n>>> pd.DataFrame([]).any()\nSeries([], dtype: bool)\n\"\"\"\n\n_shared_docs[\n \"stat_func_example\"\n] = \"\"\"\n\nExamples\n--------\n>>> idx = pd.MultiIndex.from_arrays([\n... ['warm', 'warm', 'cold', 'cold'],\n... ['dog', 'falcon', 'fish', 'spider']],\n... names=['blooded', 'animal'])\n>>> s = pd.Series([4, 2, 0, 8], name='legs', index=idx)\n>>> s\nblooded animal\nwarm dog 4\n falcon 2\ncold fish 0\n spider 8\nName: legs, dtype: int64\n\n>>> s.{stat_func}()\n{default_output}\n\n{verb} using level names, as well as indices.\n\n>>> s.{stat_func}(level='blooded')\nblooded\nwarm {level_output_0}\ncold {level_output_1}\nName: legs, dtype: int64\n\n>>> s.{stat_func}(level=0)\nblooded\nwarm {level_output_0}\ncold {level_output_1}\nName: legs, dtype: int64\"\"\"\n\n_sum_examples = _shared_docs[\"stat_func_example\"].format(\n stat_func=\"sum\", verb=\"Sum\", default_output=14, level_output_0=6, level_output_1=8\n)\n\n_sum_examples += \"\"\"\n\nBy default, the sum of an empty or all-NA Series is ``0``.\n\n>>> pd.Series([]).sum() # min_count=0 is the default\n0.0\n\nThis can be controlled with the ``min_count`` parameter. For example, if\nyou'd like the sum of an empty series to be NaN, pass ``min_count=1``.\n\n>>> pd.Series([]).sum(min_count=1)\nnan\n\nThanks to the ``skipna`` parameter, ``min_count`` handles all-NA and\nempty series identically.\n\n>>> pd.Series([np.nan]).sum()\n0.0\n\n>>> pd.Series([np.nan]).sum(min_count=1)\nnan\"\"\"\n\n_max_examples = _shared_docs[\"stat_func_example\"].format(\n stat_func=\"max\", verb=\"Max\", default_output=8, level_output_0=4, level_output_1=8\n)\n\n_min_examples = _shared_docs[\"stat_func_example\"].format(\n stat_func=\"min\", verb=\"Min\", default_output=0, level_output_0=2, level_output_1=0\n)\n\n_stat_func_see_also = \"\"\"\n\nSee Also\n--------\nSeries.sum : Return the sum.\nSeries.min : Return the minimum.\nSeries.max : Return the maximum.\nSeries.idxmin : Return the index of the minimum.\nSeries.idxmax : Return the index of the maximum.\nDataFrame.sum : Return the sum over the requested axis.\nDataFrame.min : Return the minimum over the requested axis.\nDataFrame.max : Return the maximum over the requested axis.\nDataFrame.idxmin : Return the index of the minimum over the requested axis.\nDataFrame.idxmax : Return the index of the maximum over the requested axis.\"\"\"\n\n_prod_examples = \"\"\"\n\nExamples\n--------\nBy default, the product of an empty or all-NA Series is ``1``\n\n>>> pd.Series([]).prod()\n1.0\n\nThis can be controlled with the ``min_count`` parameter\n\n>>> pd.Series([]).prod(min_count=1)\nnan\n\nThanks to the ``skipna`` parameter, ``min_count`` handles all-NA and\nempty series identically.\n\n>>> pd.Series([np.nan]).prod()\n1.0\n\n>>> pd.Series([np.nan]).prod(min_count=1)\nnan\"\"\"\n\n_min_count_stub = \"\"\"\\\nmin_count : int, default 0\n The required number of valid values to perform the operation. If fewer than\n ``min_count`` non-NA values are present the result will be NA.\n\n .. versionadded:: 0.22.0\n\n Added with the default being 0. This means the sum of an all-NA\n or empty Series is 0, and the product of an all-NA or empty\n Series is 1.\n\"\"\"\n\n\ndef _make_min_count_stat_function(\n cls,\n name: str,\n name1: str,\n name2: str,\n axis_descr: str,\n desc: str,\n func: Callable,\n see_also: str = \"\",\n examples: str = \"\",\n) -> Callable:\n @Substitution(\n desc=desc,\n name1=name1,\n name2=name2,\n axis_descr=axis_descr,\n min_count=_min_count_stub,\n see_also=see_also,\n examples=examples,\n )\n @Appender(_num_doc)\n def stat_func(\n self,\n axis=None,\n skipna=None,\n level=None,\n numeric_only=None,\n min_count=0,\n **kwargs,\n ):\n if name == \"sum\":\n nv.validate_sum(tuple(), kwargs)\n elif name == \"prod\":\n nv.validate_prod(tuple(), kwargs)\n else:\n nv.validate_stat_func(tuple(), kwargs, fname=name)\n if skipna is None:\n skipna = True\n if axis is None:\n axis = self._stat_axis_number\n if level is not None:\n return self._agg_by_level(\n name, axis=axis, level=level, skipna=skipna, min_count=min_count\n )\n return self._reduce(\n func,\n name=name,\n axis=axis,\n skipna=skipna,\n numeric_only=numeric_only,\n min_count=min_count,\n )\n\n return set_function_name(stat_func, name, cls)\n\n\ndef _make_stat_function(\n cls,\n name: str,\n name1: str,\n name2: str,\n axis_descr: str,\n desc: str,\n func: Callable,\n see_also: str = \"\",\n examples: str = \"\",\n) -> Callable:\n @Substitution(\n desc=desc,\n name1=name1,\n name2=name2,\n axis_descr=axis_descr,\n min_count=\"\",\n see_also=see_also,\n examples=examples,\n )\n @Appender(_num_doc)\n def stat_func(\n self, axis=None, skipna=None, level=None, numeric_only=None, **kwargs\n ):\n if name == \"median\":\n nv.validate_median(tuple(), kwargs)\n else:\n nv.validate_stat_func(tuple(), kwargs, fname=name)\n if skipna is None:\n skipna = True\n if axis is None:\n axis = self._stat_axis_number\n if level is not None:\n return self._agg_by_level(name, axis=axis, level=level, skipna=skipna)\n return self._reduce(\n func, name=name, axis=axis, skipna=skipna, numeric_only=numeric_only\n )\n\n return set_function_name(stat_func, name, cls)\n\n\ndef _make_stat_function_ddof(\n cls, name: str, name1: str, name2: str, axis_descr: str, desc: str, func: Callable\n) -> Callable:\n @Substitution(desc=desc, name1=name1, name2=name2, axis_descr=axis_descr)\n @Appender(_num_ddof_doc)\n def stat_func(\n self, axis=None, skipna=None, level=None, ddof=1, numeric_only=None, **kwargs\n ):\n nv.validate_stat_ddof_func(tuple(), kwargs, fname=name)\n if skipna is None:\n skipna = True\n if axis is None:\n axis = self._stat_axis_number\n if level is not None:\n return self._agg_by_level(\n name, axis=axis, level=level, skipna=skipna, ddof=ddof\n )\n return self._reduce(\n func, name, axis=axis, numeric_only=numeric_only, skipna=skipna, ddof=ddof\n )\n\n return set_function_name(stat_func, name, cls)\n\n\ndef _make_cum_function(\n cls,\n name: str,\n name1: str,\n name2: str,\n axis_descr: str,\n desc: str,\n accum_func: Callable,\n accum_func_name: str,\n mask_a: float,\n mask_b: float,\n examples: str,\n) -> Callable:\n @Substitution(\n desc=desc,\n name1=name1,\n name2=name2,\n axis_descr=axis_descr,\n accum_func_name=accum_func_name,\n examples=examples,\n )\n @Appender(_cnum_doc)\n def cum_func(self, axis=None, skipna=True, *args, **kwargs):\n skipna = nv.validate_cum_func_with_skipna(skipna, args, kwargs, name)\n if axis is None:\n axis = self._stat_axis_number\n else:\n axis = self._get_axis_number(axis)\n\n if axis == 1:\n return cum_func(self.T, axis=0, skipna=skipna, *args, **kwargs).T\n\n def na_accum_func(blk_values):\n # We will be applying this function to block values\n if blk_values.dtype.kind in [\"m\", \"M\"]:\n # GH#30460, GH#29058\n # numpy 1.18 started sorting NaTs at the end instead of beginning,\n # so we need to work around to maintain backwards-consistency.\n orig_dtype = blk_values.dtype\n\n # We need to define mask before masking NaTs\n mask = isna(blk_values)\n\n if accum_func == np.minimum.accumulate:\n # Note: the accum_func comparison fails as an \"is\" comparison\n y = blk_values.view(\"i8\")\n y[mask] = np.iinfo(np.int64).max\n changed = True\n else:\n y = blk_values\n changed = False\n\n result = accum_func(y.view(\"i8\"), axis)\n if skipna:\n np.putmask(result, mask, iNaT)\n elif accum_func == np.minimum.accumulate:\n # Restore NaTs that we masked previously\n nz = (~np.asarray(mask)).nonzero()[0]\n if len(nz):\n # everything up to the first non-na entry stays NaT\n result[: nz[0]] = iNaT\n\n if changed:\n # restore NaT elements\n y[mask] = iNaT # TODO: could try/finally for this?\n\n if isinstance(blk_values, np.ndarray):\n result = result.view(orig_dtype)\n else:\n # DatetimeArray\n result = type(blk_values)._from_sequence(result, dtype=orig_dtype)\n\n elif skipna and not issubclass(\n blk_values.dtype.type, (np.integer, np.bool_)\n ):\n vals = blk_values.copy().T\n mask = isna(vals)\n np.putmask(vals, mask, mask_a)\n result = accum_func(vals, axis)\n np.putmask(result, mask, mask_b)\n else:\n result = accum_func(blk_values.T, axis)\n\n # transpose back for ndarray, not for EA\n return result.T if hasattr(result, \"T\") else result\n\n result = self._data.apply(na_accum_func)\n\n d = self._construct_axes_dict()\n d[\"copy\"] = False\n return self._constructor(result, **d).__finalize__(self)\n\n return set_function_name(cum_func, name, cls)\n\n\ndef _make_logical_function(\n cls,\n name: str,\n name1: str,\n name2: str,\n axis_descr: str,\n desc: str,\n func: Callable,\n see_also: str,\n examples: str,\n empty_value: bool,\n) -> Callable:\n @Substitution(\n desc=desc,\n name1=name1,\n name2=name2,\n axis_descr=axis_descr,\n see_also=see_also,\n examples=examples,\n empty_value=empty_value,\n )\n @Appender(_bool_doc)\n def logical_func(self, axis=0, bool_only=None, skipna=True, level=None, **kwargs):\n nv.validate_logical_func(tuple(), kwargs, fname=name)\n if level is not None:\n if bool_only is not None:\n raise NotImplementedError(\n \"Option bool_only is not implemented with option level.\"\n )\n return self._agg_by_level(name, axis=axis, level=level, skipna=skipna)\n return self._reduce(\n func,\n name=name,\n axis=axis,\n skipna=skipna,\n numeric_only=bool_only,\n filter_type=\"bool\",\n )\n\n return set_function_name(logical_func, name, cls)\n"
] | [
[
"pandas.tseries.frequencies.to_offset",
"pandas.util._validators.validate_bool_kwarg",
"pandas.core.dtypes.inference.is_hashable",
"numpy.unique",
"numpy.asanyarray",
"pandas.core.dtypes.common.is_re_compilable",
"pandas.concat",
"pandas.core.dtypes.common.is_list_like",
"pandas.compat.numpy.function.validate_cum_func_with_skipna",
"pandas.io.pickle.to_pickle",
"numpy.array",
"pandas.io.formats.format.DataFrameFormatter",
"pandas.core.dtypes.common.is_bool_dtype",
"pandas.core.window.Window",
"pandas.core.dtypes.missing.isna",
"pandas.io.sql.to_sql",
"pandas.Series",
"numpy.asarray",
"pandas.io.json.to_json",
"pandas.core.dtypes.common.is_datetime64tz_dtype",
"numpy.iinfo",
"pandas.core.common.SettingWithCopyError",
"pandas.compat._optional.import_optional_dependency",
"pandas._config.config.is_nonnegative_int",
"numpy.putmask",
"pandas.core.construction.create_series_with_explicit_dtype",
"pandas._libs.Timestamp",
"pandas.core.dtypes.common.ensure_str",
"pandas.core.indexes.api.Index",
"numpy.errstate",
"pandas.core.computation.parsing.clean_column_name",
"pandas.core.common.random_state",
"pandas.core.dtypes.common.is_integer",
"pandas.core.resample.get_resampler",
"pandas.util._decorators.doc",
"pandas._libs.lib.item_from_zerodim",
"pandas.core.indexes.datetimes.DatetimeIndex",
"pandas.core.window.EWM",
"pandas.core.dtypes.common.is_extension_array_dtype",
"pandas.core.missing.find_valid_index",
"pandas.core.dtypes.missing.notna",
"pandas.DataFrame",
"pandas.core.common.pipe",
"pandas.core.indexes.api.RangeIndex",
"pandas.io.pytables.to_hdf",
"pandas.util._decorators.Substitution",
"pandas.core.dtypes.common.is_numeric_dtype",
"pandas.errors.AbstractMethodError",
"pandas.core.dtypes.common.is_number",
"pandas.core.dtypes.common.ensure_int64",
"pandas.core.dtypes.common.is_dict_like",
"pandas.core.window.Rolling",
"pandas.util._decorators.Appender",
"pandas.core.dtypes.common.pandas_dtype",
"pandas.core.dtypes.common.is_timedelta64_dtype",
"numpy.isnan",
"pandas.core.algorithms.rank",
"pandas.core.missing.mask_missing",
"pandas.core.dtypes.common.is_bool",
"pandas.core.missing.get_fill_func",
"pandas.util._decorators.rewrite_axis_style_signature",
"pandas.core.missing.clean_fill_method",
"pandas.core.common.get_rename_function",
"pandas.core.dtypes.common.is_scalar",
"pandas.io.formats.csvs.CSVFormatter",
"pandas.io.formats.format.format_percentiles",
"pandas.util._validators.validate_fillna_kwargs",
"pandas.core.indexes.api.ensure_index",
"pandas.core.resample.asfreq",
"pandas.compat.numpy.function.validate_clip_with_axis",
"pandas.util._validators.validate_percentile",
"pandas.core.indexes.period.Period",
"numpy.any",
"pandas.io.clipboards.to_clipboard",
"pandas.core.window.Expanding",
"pandas.compat.set_function_name",
"pandas.core.dtypes.common.is_float",
"pandas.core.dtypes.common.is_datetime64_any_dtype",
"pandas.core.tools.datetimes.to_datetime",
"pandas.core.common.maybe_make_list",
"pandas._config.config.get_option",
"pandas.core.common.count_not_none",
"numpy.abs",
"pandas.core.ops._align_method_FRAME",
"pandas.io.formats.excel.ExcelFormatter",
"pandas.core.dtypes.common.is_object_dtype",
"pandas.core.common.apply_if_callable",
"numpy.prod",
"pandas.core.common.index_labels_to_array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.3",
"1.1",
"1.5",
"0.24",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
dajtmullaj/example_conda_pkg | [
"7c2bf657d14c714608e653d7218fa3cd658a6297"
] | [
"example_conda_pkg/descriptors.py"
] | [
"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Oct 3 21:21:19 2020\n\nProject: chemplot (Chemical Space Visualization)\nContent: Descriptor operation methods\n\n@author: murat cihan sorkun\n\"\"\"\n\nfrom rdkit import Chem\nfrom rdkit.Chem import AllChem\nimport pandas as pd\nimport math\nimport mordred\nfrom mordred import Calculator, descriptors #Dont remove these imports\nfrom sklearn.linear_model import Lasso, LogisticRegression\nfrom sklearn.feature_selection import SelectFromModel\nfrom sklearn.preprocessing import StandardScaler\n\ndef get_mordred_descriptors(smiles_list):\n \"\"\"\n Calculates the Mordred descriptors for given smiles list\n \n :param smiles_list: List of smiles\n :type smiles_list: list\n :returns: The calculated descriptors list for the given smiles\n :rtype: Dataframe\n \"\"\" \n \n return generate_mordred_descriptors(smiles_list, Chem.MolFromSmiles, 'SMILES')\n\n\ndef get_mordred_descriptors_from_inchi(inchi_list):\n \"\"\"\n Calculates the Mordred descriptors for given InChi list\n \n :param inchi_list: List of InChi\n :type inchi_list: list\n :returns: The calculated descriptors list for the given smiles\n :rtype: Dataframe\n \"\"\" \n \n return generate_mordred_descriptors(inchi_list, Chem.MolFromInchi, 'InChi')\n\n \ndef generate_mordred_descriptors(encoding_list, encoding_function, encoding_name):\n \"\"\"\n Calculates the Mordred descriptors for list of molecules encodings\n \n :param smiles_list: List of molecules encodings\n :type smiles_list: list\n :returns: The calculated descriptors list for the given molecules encodings\n :rtype: Dataframe\n \"\"\" \n \n calc = mordred.Calculator() \n \n calc.register(mordred.AtomCount) #16\n calc.register(mordred.RingCount) #139\n calc.register(mordred.BondCount) #9 \n calc.register(mordred.HydrogenBond) #2 \n calc.register(mordred.CarbonTypes) #10\n calc.register(mordred.SLogP) #2\n calc.register(mordred.Constitutional) #16 \n calc.register(mordred.TopoPSA) #2\n calc.register(mordred.Weight) #2\n calc.register(mordred.Polarizability) #2\n calc.register(mordred.McGowanVolume) #1\n \n name_list=[]\n for desc_name in calc.descriptors:\n name_list.append(str(desc_name))\n \n descriptors_list=[] \n erroneous_encodings=[]\n encodings_none_descriptors=[]\n for encoding in encoding_list:\n mol=encoding_function(encoding)\n if mol is None:\n descriptors_list.append([None]*len(name_list))\n erroneous_encodings.append(encoding)\n else:\n mol=Chem.AddHs(mol)\n calculated_descriptors = calc(mol)\n for i in range(len(calculated_descriptors._values)):\n if math.isnan(calculated_descriptors._values[i]):\n calculated_descriptors._values = [None]*len(name_list)\n encodings_none_descriptors.append(encoding)\n break\n descriptors_list.append(calculated_descriptors._values) \n \n if len(erroneous_encodings)>0:\n print(\"The following erroneous {} have been found in the data:\\n{}.\\nThe erroneous {} will be removed from the data.\".format(encoding_name, '\\n'.join(map(str, erroneous_encodings)), encoding_name))\n\n if len(encodings_none_descriptors)>0:\n print(\"For the following {} not all descriptors can be computed:\\n{}.\\nThese {} will be removed from the data.\".format(encoding_name, '\\n'.join(map(str, encodings_none_descriptors)), encoding_name))\n \n df_descriptors=pd.DataFrame(descriptors_list,columns=name_list)\n df_descriptors = df_descriptors.select_dtypes(exclude=['object']) \n return df_descriptors\n \ndef select_descriptors_lasso(df_descriptors,target_list, R_select=0.05, C_select=0.05, kind=\"R\"):\n \"\"\"\n Selects descriptors by LASSO \n \n :param df_descriptors: descriptors of molecules \n :type df_descriptors: Dataframe\n :param target_list: list of target values \n :type target_list: list\n :param R_select: alpha value for Lasso \n :type R_select: float\n :param C_select: C value for LogisticRegression \n :type C_select: float\n :param kind: kind of target R->Regression C->Classification \n :type kind: string\n :returns: The selected descriptors\n :rtype: Dataframe\n \"\"\" \n \n # Remove erroneous data\n df_descriptors = df_descriptors.assign(target=target_list.values)\n df_descriptors = df_descriptors.dropna(how='any')\n target_list = df_descriptors['target'].to_list()\n df_descriptors = df_descriptors.drop(columns=['target'])\n \n df_descriptors_scaled = StandardScaler().fit_transform(df_descriptors)\n \n if(kind==\"C\"): \n model = LogisticRegression(C=C_select,penalty='l1', solver='liblinear',random_state=1).fit(df_descriptors_scaled, target_list)\n else:\n model = Lasso(alpha=R_select,max_iter=10000,random_state=1).fit(df_descriptors_scaled, target_list)\n \n \n selected = SelectFromModel(model, prefit=True)\n X_new_lasso = selected.transform(df_descriptors)\n # Get back the kept features as a DataFrame with dropped columns as all 0s\n selected_features = pd.DataFrame(selected.inverse_transform(X_new_lasso), index=df_descriptors.index, columns=df_descriptors.columns)\n # Dropped columns have values of all 0s, keep other columns \n selected_columns_lasso = selected_features.columns[selected_features.var() != 0] \n selected_data = df_descriptors[selected_columns_lasso] \n \n return selected_data, target_list\n\n\ndef get_ecfp(smiles_list, target_list, radius=2, nBits=2048):\n \"\"\"\n Calculates the ECFP fingerprint for given SMILES list\n \n :param smiles_list: List of SMILES\n :type smiles_list: list\n :param radius: The ECPF fingerprints radius.\n :type radius: int\n :param nBits: The number of bits of the fingerprint vector.\n :type nBits: int\n :returns: The calculated ECPF fingerprints for the given SMILES\n :rtype: Dataframe\n \"\"\" \n \n return generate_ecfp(smiles_list, Chem.MolFromSmiles, 'SMILES', target_list, radius=2, nBits=2048)\n\n\ndef get_ecfp_from_inchi(inchi_list, target_list, radius=2, nBits=2048):\n \"\"\"\n Calculates the ECFP fingerprint for given InChi list\n \n :param inchi_list: List of InChi\n :type inchi_list: list\n :param radius: The ECPF fingerprints radius.\n :type radius: int\n :param nBits: The number of bits of the fingerprint vector.\n :type nBits: int\n :returns: The calculated ECPF fingerprints for the given InChi\n :rtype: Dataframe\n \"\"\" \n \n return generate_ecfp(inchi_list, Chem.MolFromInchi, 'InChi', target_list, radius=2, nBits=2048)\n\n\ndef generate_ecfp(encoding_list, encoding_function, encoding_name, target_list, radius=2, nBits=2048):\n \"\"\"\n Calculates the ECFP fingerprint for given list of molecules encodings\n \n :param encoding_list: List of molecules encodings\n :type encoding_list: list\n :param encoding_function: Function used to extract the molecules from the encodings\n :type encoding_function: fun\n :param radius: The ECPF fingerprints radius.\n :type radius: int\n :param nBits: The number of bits of the fingerprint vector.\n :type nBits: int\n :returns: The calculated ECPF fingerprints for the given molecules encodings\n :rtype: Dataframe\n \"\"\" \n \n # Generate ECFP fingerprints\n ecfp_fingerprints=[]\n erroneous_encodings=[]\n for encoding in encoding_list:\n mol=encoding_function(encoding)\n if mol is None:\n ecfp_fingerprints.append([None]*nBits)\n erroneous_encodings.append(encoding)\n else:\n mol=Chem.AddHs(mol)\n list_bits_fingerprint = []\n list_bits_fingerprint[:0] = AllChem.GetMorganFingerprintAsBitVect(mol, radius, nBits).ToBitString()\n ecfp_fingerprints.append(list_bits_fingerprint) \n \n # Create dataframe of fingerprints\n df_ecfp_fingerprints = pd.DataFrame(data = ecfp_fingerprints, index = encoding_list)\n # Remove erroneous data\n if len(erroneous_encodings)>0:\n print(\"The following erroneous {} have been found in the data:\\n{}.\\nThe erroneous {} will be removed from the data.\".format(encoding_name, '\\n'.join(map(str, erroneous_encodings)), encoding_name))\n \n if len(target_list)>0:\n df_ecfp_fingerprints = df_ecfp_fingerprints.assign(target=target_list.values)\n \n df_ecfp_fingerprints = df_ecfp_fingerprints.dropna(how='any')\n \n if len(target_list)>0:\n target_list = df_ecfp_fingerprints['target'].to_list()\n df_ecfp_fingerprints = df_ecfp_fingerprints.drop(columns=['target'])\n \n # Remove bit columns with no variablity (all \"0\" or all \"1\")\n df_ecfp_fingerprints = df_ecfp_fingerprints.loc[:, (df_ecfp_fingerprints != 0).any(axis=0)]\n df_ecfp_fingerprints = df_ecfp_fingerprints.loc[:, (df_ecfp_fingerprints != 1).any(axis=0)]\n \n return df_ecfp_fingerprints, target_list"
] | [
[
"sklearn.linear_model.LogisticRegression",
"pandas.DataFrame",
"sklearn.linear_model.Lasso",
"sklearn.preprocessing.StandardScaler",
"sklearn.feature_selection.SelectFromModel"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
pmarshwx/matplotlib | [
"12be528dbf2114f7c25abf60de8100cb2d4494af",
"12be528dbf2114f7c25abf60de8100cb2d4494af",
"12be528dbf2114f7c25abf60de8100cb2d4494af",
"12be528dbf2114f7c25abf60de8100cb2d4494af"
] | [
"lib/matplotlib/backends/qt_compat.py",
"lib/matplotlib/transforms.py",
"lib/matplotlib/tests/test_tightlayout.py",
"lib/matplotlib/tests/test_offsetbox.py"
] | [
"\"\"\" A Qt API selector that can be used to switch between PyQt and PySide.\n\"\"\"\nfrom __future__ import (absolute_import, division, print_function,\n unicode_literals)\n\nimport six\n\nimport os\nfrom matplotlib import rcParams, verbose\n\n# Available APIs.\nQT_API_PYQT = 'PyQt4' # API is not set here; Python 2.x default is V 1\nQT_API_PYQTv2 = 'PyQt4v2' # forced to Version 2 API\nQT_API_PYSIDE = 'PySide' # only supports Version 2 API\nQT_API_PYQT5 = 'PyQt5' # use PyQt5 API; Version 2 with module shim\n\nETS = dict(pyqt=(QT_API_PYQTv2, 4), pyside=(QT_API_PYSIDE, 4),\n pyqt5=(QT_API_PYQT5, 5))\n# ETS is a dict of env variable to (QT_API, QT_MAJOR_VERSION)\n# If the ETS QT_API environment variable is set, use it, but only\n# if the varible if of the same major QT version. Note that\n# ETS requires the version 2 of PyQt4, which is not the platform\n# default for Python 2.x.\n\nQT_API_ENV = os.environ.get('QT_API')\n\nif rcParams['backend'] == 'Qt5Agg':\n QT_RC_MAJOR_VERSION = 5\nelse:\n QT_RC_MAJOR_VERSION = 4\n\nQT_API = None\n\nif (QT_API_ENV is not None):\n try:\n QT_ENV_MAJOR_VERSION = ETS[QT_API_ENV][1]\n except KeyError:\n raise RuntimeError(\n ('Unrecognized environment variable %r, valid values are:'\n ' %r, %r or %r' % (QT_API_ENV, 'pyqt', 'pyside', 'pyqt5')))\n if QT_ENV_MAJOR_VERSION == QT_RC_MAJOR_VERSION:\n # Only if backend and env qt major version are\n # compatible use the env variable.\n QT_API = ETS[QT_API_ENV][0]\n\nif QT_API is None:\n # No ETS environment or incompatible so use rcParams.\n if rcParams['backend'] == 'Qt5Agg':\n QT_API = rcParams['backend.qt5']\n else:\n QT_API = rcParams['backend.qt4']\n\n# We will define an appropriate wrapper for the differing versions\n# of file dialog.\n_getSaveFileName = None\n\n# Flag to check if sip could be imported\n_sip_imported = False\n\n# Now perform the imports.\nif QT_API in (QT_API_PYQT, QT_API_PYQTv2, QT_API_PYQT5):\n try:\n import sip\n _sip_imported = True\n except ImportError:\n # Try using PySide\n QT_API = QT_API_PYSIDE\n cond = (\"Could not import sip; falling back on PySide\\n\"\n \"in place of PyQt4 or PyQt5.\\n\")\n verbose.report(cond, 'helpful')\n\nif _sip_imported:\n if QT_API == QT_API_PYQTv2:\n if QT_API_ENV == 'pyqt':\n cond = (\"Found 'QT_API=pyqt' environment variable. \"\n \"Setting PyQt4 API accordingly.\\n\")\n else:\n cond = \"PyQt API v2 specified.\"\n try:\n sip.setapi('QString', 2)\n except:\n res = 'QString API v2 specification failed. Defaulting to v1.'\n verbose.report(cond + res, 'helpful')\n # condition has now been reported, no need to repeat it:\n cond = \"\"\n try:\n sip.setapi('QVariant', 2)\n except:\n res = 'QVariant API v2 specification failed. Defaulting to v1.'\n verbose.report(cond + res, 'helpful')\n\n if QT_API in [QT_API_PYQT, QT_API_PYQTv2]: # PyQt4 API\n\n from PyQt4 import QtCore, QtGui\n\n try:\n if sip.getapi(\"QString\") > 1:\n # Use new getSaveFileNameAndFilter()\n _getSaveFileName = QtGui.QFileDialog.getSaveFileNameAndFilter\n else:\n\n # Use old getSaveFileName()\n def _getSaveFileName(*args, **kwargs):\n return (QtGui.QFileDialog.getSaveFileName(*args, **kwargs),\n None)\n\n except (AttributeError, KeyError):\n\n # call to getapi() can fail in older versions of sip\n def _getSaveFileName(*args, **kwargs):\n return QtGui.QFileDialog.getSaveFileName(*args, **kwargs), None\n\n else: # PyQt5 API\n from PyQt5 import QtCore, QtGui, QtWidgets\n _getSaveFileName = QtWidgets.QFileDialog.getSaveFileName\n\n # Alias PyQt-specific functions for PySide compatibility.\n QtCore.Signal = QtCore.pyqtSignal\n try:\n QtCore.Slot = QtCore.pyqtSlot\n except AttributeError:\n # Not a perfect match but works in simple cases\n QtCore.Slot = QtCore.pyqtSignature\n\n QtCore.Property = QtCore.pyqtProperty\n __version__ = QtCore.PYQT_VERSION_STR\n\nelse: # try importing pyside\n try:\n from PySide import QtCore, QtGui, __version__, __version_info__\n except ImportError:\n raise ImportError(\n \"Matplotlib qt-based backends require an external PyQt4, PyQt5,\\n\"\n \"or PySide package to be installed, but it was not found.\")\n\n if __version_info__ < (1, 0, 3):\n raise ImportError(\n \"Matplotlib backend_qt4 and backend_qt4agg require PySide >=1.0.3\")\n\n _getSaveFileName = QtGui.QFileDialog.getSaveFileName\n\n\n# Apply shim to Qt4 APIs to make them look like Qt5\nif QT_API in (QT_API_PYQT, QT_API_PYQTv2, QT_API_PYSIDE):\n '''Import all used QtGui objects into QtWidgets\n\n Here I've opted to simple copy QtGui into QtWidgets as that\n achieves the same result as copying over the objects, and will\n continue to work if other objects are used.\n\n '''\n QtWidgets = QtGui\n",
"\"\"\"\nmatplotlib includes a framework for arbitrary geometric\ntransformations that is used determine the final position of all\nelements drawn on the canvas.\n\nTransforms are composed into trees of :class:`TransformNode` objects\nwhose actual value depends on their children. When the contents of\nchildren change, their parents are automatically invalidated. The\nnext time an invalidated transform is accessed, it is recomputed to\nreflect those changes. This invalidation/caching approach prevents\nunnecessary recomputations of transforms, and contributes to better\ninteractive performance.\n\nFor example, here is a graph of the transform tree used to plot data\nto the graph:\n\n.. image:: ../_static/transforms.png\n\nThe framework can be used for both affine and non-affine\ntransformations. However, for speed, we want use the backend\nrenderers to perform affine transformations whenever possible.\nTherefore, it is possible to perform just the affine or non-affine\npart of a transformation on a set of data. The affine is always\nassumed to occur after the non-affine. For any transform::\n\n full transform == non-affine part + affine part\n\nThe backends are not expected to handle non-affine transformations\nthemselves.\n\"\"\"\n\nfrom __future__ import (absolute_import, division, print_function,\n unicode_literals)\n\nimport six\n\nimport numpy as np\nfrom numpy import ma\nfrom matplotlib._path import (affine_transform, count_bboxes_overlapping_bbox,\n update_path_extents)\nfrom numpy.linalg import inv\n\nfrom weakref import WeakValueDictionary\nimport warnings\ntry:\n set\nexcept NameError:\n from sets import Set as set\n\nfrom .path import Path\n\nDEBUG = False\n\nMaskedArray = ma.MaskedArray\n\n\nclass TransformNode(object):\n \"\"\"\n :class:`TransformNode` is the base class for anything that\n participates in the transform tree and needs to invalidate its\n parents or be invalidated. This includes classes that are not\n really transforms, such as bounding boxes, since some transforms\n depend on bounding boxes to compute their values.\n \"\"\"\n _gid = 0\n\n # Invalidation may affect only the affine part. If the\n # invalidation was \"affine-only\", the _invalid member is set to\n # INVALID_AFFINE_ONLY\n INVALID_NON_AFFINE = 1\n INVALID_AFFINE = 2\n INVALID = INVALID_NON_AFFINE | INVALID_AFFINE\n\n # Some metadata about the transform, used to determine whether an\n # invalidation is affine-only\n is_affine = False\n is_bbox = False\n\n pass_through = False\n \"\"\"\n If pass_through is True, all ancestors will always be\n invalidated, even if 'self' is already invalid.\n \"\"\"\n\n def __init__(self, shorthand_name=None):\n \"\"\"\n Creates a new :class:`TransformNode`.\n\n **shorthand_name** - a string representing the \"name\" of this\n transform. The name carries no significance\n other than to improve the readability of\n ``str(transform)`` when DEBUG=True.\n \"\"\"\n # Parents are stored in a WeakValueDictionary, so that if the\n # parents are deleted, references from the children won't keep\n # them alive.\n self._parents = WeakValueDictionary()\n\n # TransformNodes start out as invalid until their values are\n # computed for the first time.\n self._invalid = 1\n self._shorthand_name = shorthand_name or ''\n\n if DEBUG:\n def __str__(self):\n # either just return the name of this TransformNode, or it's repr\n return self._shorthand_name or repr(self)\n\n def __getstate__(self):\n d = self.__dict__.copy()\n # turn the weakkey dictionary into a normal dictionary\n d['_parents'] = dict(six.iteritems(self._parents))\n return d\n\n def __setstate__(self, data_dict):\n self.__dict__ = data_dict\n # turn the normal dictionary back into a WeakValueDictionary\n self._parents = WeakValueDictionary(self._parents)\n\n def __copy__(self, *args):\n raise NotImplementedError(\n \"TransformNode instances can not be copied. \" +\n \"Consider using frozen() instead.\")\n __deepcopy__ = __copy__\n\n def invalidate(self):\n \"\"\"\n Invalidate this :class:`TransformNode` and triggers an\n invalidation of its ancestors. Should be called any\n time the transform changes.\n \"\"\"\n value = self.INVALID\n if self.is_affine:\n value = self.INVALID_AFFINE\n return self._invalidate_internal(value, invalidating_node=self)\n\n def _invalidate_internal(self, value, invalidating_node):\n \"\"\"\n Called by :meth:`invalidate` and subsequently ascends the transform\n stack calling each TransformNode's _invalidate_internal method.\n \"\"\"\n # determine if this call will be an extension to the invalidation\n # status. If not, then a shortcut means that we needn't invoke an\n # invalidation up the transform stack as it will already have been\n # invalidated.\n\n # N.B This makes the invalidation sticky, once a transform has been\n # invalidated as NON_AFFINE, then it will always be invalidated as\n # NON_AFFINE even when triggered with a AFFINE_ONLY invalidation.\n # In most cases this is not a problem (i.e. for interactive panning and\n # zooming) and the only side effect will be on performance.\n status_changed = self._invalid < value\n\n if self.pass_through or status_changed:\n self._invalid = value\n\n for parent in list(six.itervalues(self._parents)):\n parent._invalidate_internal(value=value,\n invalidating_node=self)\n\n def set_children(self, *children):\n \"\"\"\n Set the children of the transform, to let the invalidation\n system know which transforms can invalidate this transform.\n Should be called from the constructor of any transforms that\n depend on other transforms.\n \"\"\"\n for child in children:\n child._parents[id(self)] = self\n\n if DEBUG:\n _set_children = set_children\n\n def set_children(self, *children):\n self._set_children(*children)\n self._children = children\n set_children.__doc__ = _set_children.__doc__\n\n def frozen(self):\n \"\"\"\n Returns a frozen copy of this transform node. The frozen copy\n will not update when its children change. Useful for storing\n a previously known state of a transform where\n ``copy.deepcopy()`` might normally be used.\n \"\"\"\n return self\n\n if DEBUG:\n def write_graphviz(self, fobj, highlight=[]):\n \"\"\"\n For debugging purposes.\n\n Writes the transform tree rooted at 'self' to a graphviz \"dot\"\n format file. This file can be run through the \"dot\" utility\n to produce a graph of the transform tree.\n\n Affine transforms are marked in blue. Bounding boxes are\n marked in yellow.\n\n *fobj*: A Python file-like object\n\n Once the \"dot\" file has been created, it can be turned into a\n png easily with::\n\n $> dot -Tpng -o $OUTPUT_FILE $DOT_FILE\n\n \"\"\"\n seen = set()\n\n def recurse(root):\n if root in seen:\n return\n seen.add(root)\n props = {}\n label = root.__class__.__name__\n if root._invalid:\n label = '[%s]' % label\n if root in highlight:\n props['style'] = 'bold'\n props['shape'] = 'box'\n props['label'] = '\"%s\"' % label\n props = ' '.join(['%s=%s' % (key, val)\n for key, val\n in six.iteritems(props)])\n\n fobj.write('%s [%s];\\n' %\n (hash(root), props))\n\n if hasattr(root, '_children'):\n for child in root._children:\n name = '?'\n for key, val in six.iteritems(root.__dict__):\n if val is child:\n name = key\n break\n fobj.write('\"%s\" -> \"%s\" [label=\"%s\", fontsize=10];\\n'\n % (hash(root),\n hash(child),\n name))\n recurse(child)\n\n fobj.write(\"digraph G {\\n\")\n recurse(self)\n fobj.write(\"}\\n\")\n\n\nclass BboxBase(TransformNode):\n \"\"\"\n This is the base class of all bounding boxes, and provides\n read-only access to its data. A mutable bounding box is provided\n by the :class:`Bbox` class.\n\n The canonical representation is as two points, with no\n restrictions on their ordering. Convenience properties are\n provided to get the left, bottom, right and top edges and width\n and height, but these are not stored explicitly.\n \"\"\"\n is_bbox = True\n is_affine = True\n\n #* Redundant: Removed for performance\n #\n # def __init__(self):\n # TransformNode.__init__(self)\n\n if DEBUG:\n def _check(points):\n if ma.isMaskedArray(points):\n warnings.warn(\"Bbox bounds are a masked array.\")\n points = np.asarray(points)\n if (points[1, 0] - points[0, 0] == 0 or\n points[1, 1] - points[0, 1] == 0):\n warnings.warn(\"Singular Bbox.\")\n _check = staticmethod(_check)\n\n def frozen(self):\n return Bbox(self.get_points().copy())\n frozen.__doc__ = TransformNode.__doc__\n\n def __array__(self, *args, **kwargs):\n return self.get_points()\n\n def is_unit(self):\n \"\"\"\n Returns True if the :class:`Bbox` is the unit bounding box\n from (0, 0) to (1, 1).\n \"\"\"\n return list(self.get_points().flatten()) == [0., 0., 1., 1.]\n\n def _get_x0(self):\n return self.get_points()[0, 0]\n x0 = property(_get_x0, None, None, \"\"\"\n (property) :attr:`x0` is the first of the pair of *x* coordinates that\n define the bounding box. :attr:`x0` is not guaranteed to be\n less than :attr:`x1`. If you require that, use :attr:`xmin`.\"\"\")\n\n def _get_y0(self):\n return self.get_points()[0, 1]\n y0 = property(_get_y0, None, None, \"\"\"\n (property) :attr:`y0` is the first of the pair of *y* coordinates that\n define the bounding box. :attr:`y0` is not guaranteed to be\n less than :attr:`y1`. If you require that, use :attr:`ymin`.\"\"\")\n\n def _get_x1(self):\n return self.get_points()[1, 0]\n x1 = property(_get_x1, None, None, \"\"\"\n (property) :attr:`x1` is the second of the pair of *x* coordinates\n that define the bounding box. :attr:`x1` is not guaranteed to be\n greater than :attr:`x0`. If you require that, use :attr:`xmax`.\"\"\")\n\n def _get_y1(self):\n return self.get_points()[1, 1]\n y1 = property(_get_y1, None, None, \"\"\"\n (property) :attr:`y1` is the second of the pair of *y* coordinates\n that define the bounding box. :attr:`y1` is not guaranteed to be\n greater than :attr:`y0`. If you require that, use :attr:`ymax`.\"\"\")\n\n def _get_p0(self):\n return self.get_points()[0]\n p0 = property(_get_p0, None, None, \"\"\"\n (property) :attr:`p0` is the first pair of (*x*, *y*) coordinates\n that define the bounding box. It is not guaranteed to be the\n bottom-left corner. For that, use :attr:`min`.\"\"\")\n\n def _get_p1(self):\n return self.get_points()[1]\n p1 = property(_get_p1, None, None, \"\"\"\n (property) :attr:`p1` is the second pair of (*x*, *y*) coordinates\n that define the bounding box. It is not guaranteed to be the\n top-right corner. For that, use :attr:`max`.\"\"\")\n\n def _get_xmin(self):\n return min(self.get_points()[:, 0])\n xmin = property(_get_xmin, None, None, \"\"\"\n (property) :attr:`xmin` is the left edge of the bounding box.\"\"\")\n\n def _get_ymin(self):\n return min(self.get_points()[:, 1])\n ymin = property(_get_ymin, None, None, \"\"\"\n (property) :attr:`ymin` is the bottom edge of the bounding box.\"\"\")\n\n def _get_xmax(self):\n return max(self.get_points()[:, 0])\n xmax = property(_get_xmax, None, None, \"\"\"\n (property) :attr:`xmax` is the right edge of the bounding box.\"\"\")\n\n def _get_ymax(self):\n return max(self.get_points()[:, 1])\n ymax = property(_get_ymax, None, None, \"\"\"\n (property) :attr:`ymax` is the top edge of the bounding box.\"\"\")\n\n def _get_min(self):\n return [min(self.get_points()[:, 0]),\n min(self.get_points()[:, 1])]\n min = property(_get_min, None, None, \"\"\"\n (property) :attr:`min` is the bottom-left corner of the bounding\n box.\"\"\")\n\n def _get_max(self):\n return [max(self.get_points()[:, 0]),\n max(self.get_points()[:, 1])]\n max = property(_get_max, None, None, \"\"\"\n (property) :attr:`max` is the top-right corner of the bounding box.\"\"\")\n\n def _get_intervalx(self):\n return self.get_points()[:, 0]\n intervalx = property(_get_intervalx, None, None, \"\"\"\n (property) :attr:`intervalx` is the pair of *x* coordinates that define\n the bounding box. It is not guaranteed to be sorted from left to\n right.\"\"\")\n\n def _get_intervaly(self):\n return self.get_points()[:, 1]\n intervaly = property(_get_intervaly, None, None, \"\"\"\n (property) :attr:`intervaly` is the pair of *y* coordinates that define\n the bounding box. It is not guaranteed to be sorted from bottom to\n top.\"\"\")\n\n def _get_width(self):\n points = self.get_points()\n return points[1, 0] - points[0, 0]\n width = property(_get_width, None, None, \"\"\"\n (property) The width of the bounding box. It may be negative if\n :attr:`x1` < :attr:`x0`.\"\"\")\n\n def _get_height(self):\n points = self.get_points()\n return points[1, 1] - points[0, 1]\n height = property(_get_height, None, None, \"\"\"\n (property) The height of the bounding box. It may be negative if\n :attr:`y1` < :attr:`y0`.\"\"\")\n\n def _get_size(self):\n points = self.get_points()\n return points[1] - points[0]\n size = property(_get_size, None, None, \"\"\"\n (property) The width and height of the bounding box. May be negative,\n in the same way as :attr:`width` and :attr:`height`.\"\"\")\n\n def _get_bounds(self):\n x0, y0, x1, y1 = self.get_points().flatten()\n return (x0, y0, x1 - x0, y1 - y0)\n bounds = property(_get_bounds, None, None, \"\"\"\n (property) Returns (:attr:`x0`, :attr:`y0`, :attr:`width`,\n :attr:`height`).\"\"\")\n\n def _get_extents(self):\n return self.get_points().flatten().copy()\n extents = property(_get_extents, None, None, \"\"\"\n (property) Returns (:attr:`x0`, :attr:`y0`, :attr:`x1`,\n :attr:`y1`).\"\"\")\n\n def get_points(self):\n return NotImplementedError()\n\n def containsx(self, x):\n \"\"\"\n Returns True if *x* is between or equal to :attr:`x0` and\n :attr:`x1`.\n \"\"\"\n x0, x1 = self.intervalx\n return ((x0 < x1\n and (x >= x0 and x <= x1))\n or (x >= x1 and x <= x0))\n\n def containsy(self, y):\n \"\"\"\n Returns True if *y* is between or equal to :attr:`y0` and\n :attr:`y1`.\n \"\"\"\n y0, y1 = self.intervaly\n return ((y0 < y1\n and (y >= y0 and y <= y1))\n or (y >= y1 and y <= y0))\n\n def contains(self, x, y):\n \"\"\"\n Returns *True* if (*x*, *y*) is a coordinate inside the\n bounding box or on its edge.\n \"\"\"\n return self.containsx(x) and self.containsy(y)\n\n def overlaps(self, other):\n \"\"\"\n Returns True if this bounding box overlaps with the given\n bounding box *other*.\n \"\"\"\n ax1, ay1, ax2, ay2 = self._get_extents()\n bx1, by1, bx2, by2 = other._get_extents()\n if any(np.isnan(v) for v in [ax1, ay1, ax2, ay2, bx1, by1, bx2, by2]):\n return False\n\n if ax2 < ax1:\n ax2, ax1 = ax1, ax2\n if ay2 < ay1:\n ay2, ay1 = ay1, ay2\n if bx2 < bx1:\n bx2, bx1 = bx1, bx2\n if by2 < by1:\n by2, by1 = by1, by2\n\n return not ((bx2 < ax1) or\n (by2 < ay1) or\n (bx1 > ax2) or\n (by1 > ay2))\n\n def fully_containsx(self, x):\n \"\"\"\n Returns True if *x* is between but not equal to :attr:`x0` and\n :attr:`x1`.\n \"\"\"\n x0, x1 = self.intervalx\n return ((x0 < x1\n and (x > x0 and x < x1))\n or (x > x1 and x < x0))\n\n def fully_containsy(self, y):\n \"\"\"\n Returns True if *y* is between but not equal to :attr:`y0` and\n :attr:`y1`.\n \"\"\"\n y0, y1 = self.intervaly\n return ((y0 < y1\n and (y > y0 and y < y1))\n or (y > y1 and y < y0))\n\n def fully_contains(self, x, y):\n \"\"\"\n Returns True if (*x*, *y*) is a coordinate inside the bounding\n box, but not on its edge.\n \"\"\"\n return self.fully_containsx(x) \\\n and self.fully_containsy(y)\n\n def fully_overlaps(self, other):\n \"\"\"\n Returns True if this bounding box overlaps with the given\n bounding box *other*, but not on its edge alone.\n \"\"\"\n ax1, ay1, ax2, ay2 = self._get_extents()\n bx1, by1, bx2, by2 = other._get_extents()\n\n if ax2 < ax1:\n ax2, ax1 = ax1, ax2\n if ay2 < ay1:\n ay2, ay1 = ay1, ay2\n if bx2 < bx1:\n bx2, bx1 = bx1, bx2\n if by2 < by1:\n by2, by1 = by1, by2\n\n return not ((bx2 <= ax1) or\n (by2 <= ay1) or\n (bx1 >= ax2) or\n (by1 >= ay2))\n\n def transformed(self, transform):\n \"\"\"\n Return a new :class:`Bbox` object, statically transformed by\n the given transform.\n \"\"\"\n pts = self.get_points()\n ll, ul, lr = transform.transform(np.array([pts[0],\n [pts[0, 0], pts[1, 1]], [pts[1, 0], pts[0, 1]]]))\n return Bbox([ll, [lr[0], ul[1]]])\n\n def inverse_transformed(self, transform):\n \"\"\"\n Return a new :class:`Bbox` object, statically transformed by\n the inverse of the given transform.\n \"\"\"\n return self.transformed(transform.inverted())\n\n coefs = {'C': (0.5, 0.5),\n 'SW': (0, 0),\n 'S': (0.5, 0),\n 'SE': (1.0, 0),\n 'E': (1.0, 0.5),\n 'NE': (1.0, 1.0),\n 'N': (0.5, 1.0),\n 'NW': (0, 1.0),\n 'W': (0, 0.5)}\n\n def anchored(self, c, container=None):\n \"\"\"\n Return a copy of the :class:`Bbox`, shifted to position *c*\n within a container.\n\n *c*: may be either:\n\n * a sequence (*cx*, *cy*) where *cx* and *cy* range from 0\n to 1, where 0 is left or bottom and 1 is right or top\n\n * a string:\n - 'C' for centered\n - 'S' for bottom-center\n - 'SE' for bottom-left\n - 'E' for left\n - etc.\n\n Optional argument *container* is the box within which the\n :class:`Bbox` is positioned; it defaults to the initial\n :class:`Bbox`.\n \"\"\"\n if container is None:\n container = self\n l, b, w, h = container.bounds\n if isinstance(c, six.string_types):\n cx, cy = self.coefs[c]\n else:\n cx, cy = c\n L, B, W, H = self.bounds\n return Bbox(self._points +\n [(l + cx * (w - W)) - L,\n (b + cy * (h - H)) - B])\n\n def shrunk(self, mx, my):\n \"\"\"\n Return a copy of the :class:`Bbox`, shrunk by the factor *mx*\n in the *x* direction and the factor *my* in the *y* direction.\n The lower left corner of the box remains unchanged. Normally\n *mx* and *my* will be less than 1, but this is not enforced.\n \"\"\"\n w, h = self.size\n return Bbox([self._points[0],\n self._points[0] + [mx * w, my * h]])\n\n def shrunk_to_aspect(self, box_aspect, container=None, fig_aspect=1.0):\n \"\"\"\n Return a copy of the :class:`Bbox`, shrunk so that it is as\n large as it can be while having the desired aspect ratio,\n *box_aspect*. If the box coordinates are relative---that\n is, fractions of a larger box such as a figure---then the\n physical aspect ratio of that figure is specified with\n *fig_aspect*, so that *box_aspect* can also be given as a\n ratio of the absolute dimensions, not the relative dimensions.\n \"\"\"\n if box_aspect <= 0 or fig_aspect <= 0:\n raise ValueError(\"'box_aspect' and 'fig_aspect' must be positive\")\n if container is None:\n container = self\n w, h = container.size\n H = w * box_aspect / fig_aspect\n if H <= h:\n W = w\n else:\n W = h * fig_aspect / box_aspect\n H = h\n return Bbox([self._points[0],\n self._points[0] + (W, H)])\n\n def splitx(self, *args):\n \"\"\"\n e.g., ``bbox.splitx(f1, f2, ...)``\n\n Returns a list of new :class:`Bbox` objects formed by\n splitting the original one with vertical lines at fractional\n positions *f1*, *f2*, ...\n \"\"\"\n boxes = []\n xf = [0] + list(args) + [1]\n x0, y0, x1, y1 = self._get_extents()\n w = x1 - x0\n for xf0, xf1 in zip(xf[:-1], xf[1:]):\n boxes.append(Bbox([[x0 + xf0 * w, y0], [x0 + xf1 * w, y1]]))\n return boxes\n\n def splity(self, *args):\n \"\"\"\n e.g., ``bbox.splitx(f1, f2, ...)``\n\n Returns a list of new :class:`Bbox` objects formed by\n splitting the original one with horizontal lines at fractional\n positions *f1*, *f2*, ...\n \"\"\"\n boxes = []\n yf = [0] + list(args) + [1]\n x0, y0, x1, y1 = self._get_extents()\n h = y1 - y0\n for yf0, yf1 in zip(yf[:-1], yf[1:]):\n boxes.append(Bbox([[x0, y0 + yf0 * h], [x1, y0 + yf1 * h]]))\n return boxes\n\n def count_contains(self, vertices):\n \"\"\"\n Count the number of vertices contained in the :class:`Bbox`.\n\n *vertices* is a Nx2 Numpy array.\n \"\"\"\n if len(vertices) == 0:\n return 0\n vertices = np.asarray(vertices)\n x0, y0, x1, y1 = self._get_extents()\n with np.errstate(invalid='ignore'):\n dx0 = np.sign(vertices[:, 0] - x0)\n dy0 = np.sign(vertices[:, 1] - y0)\n dx1 = np.sign(vertices[:, 0] - x1)\n dy1 = np.sign(vertices[:, 1] - y1)\n inside = ((abs(dx0 + dx1) + abs(dy0 + dy1)) == 0)\n return np.sum(inside)\n\n def count_overlaps(self, bboxes):\n \"\"\"\n Count the number of bounding boxes that overlap this one.\n\n bboxes is a sequence of :class:`BboxBase` objects\n \"\"\"\n return count_bboxes_overlapping_bbox(self, [np.array(x) for x in bboxes])\n\n def expanded(self, sw, sh):\n \"\"\"\n Return a new :class:`Bbox` which is this :class:`Bbox`\n expanded around its center by the given factors *sw* and\n *sh*.\n \"\"\"\n width = self.width\n height = self.height\n deltaw = (sw * width - width) / 2.0\n deltah = (sh * height - height) / 2.0\n a = np.array([[-deltaw, -deltah], [deltaw, deltah]])\n return Bbox(self._points + a)\n\n def padded(self, p):\n \"\"\"\n Return a new :class:`Bbox` that is padded on all four sides by\n the given value.\n \"\"\"\n points = self.get_points()\n return Bbox(points + [[-p, -p], [p, p]])\n\n def translated(self, tx, ty):\n \"\"\"\n Return a copy of the :class:`Bbox`, statically translated by\n *tx* and *ty*.\n \"\"\"\n return Bbox(self._points + (tx, ty))\n\n def corners(self):\n \"\"\"\n Return an array of points which are the four corners of this\n rectangle. For example, if this :class:`Bbox` is defined by\n the points (*a*, *b*) and (*c*, *d*), :meth:`corners` returns\n (*a*, *b*), (*a*, *d*), (*c*, *b*) and (*c*, *d*).\n \"\"\"\n l, b, r, t = self.get_points().flatten()\n return np.array([[l, b], [l, t], [r, b], [r, t]])\n\n def rotated(self, radians):\n \"\"\"\n Return a new bounding box that bounds a rotated version of\n this bounding box by the given radians. The new bounding box\n is still aligned with the axes, of course.\n \"\"\"\n corners = self.corners()\n corners_rotated = Affine2D().rotate(radians).transform(corners)\n bbox = Bbox.unit()\n bbox.update_from_data_xy(corners_rotated, ignore=True)\n return bbox\n\n @staticmethod\n def union(bboxes):\n \"\"\"\n Return a :class:`Bbox` that contains all of the given bboxes.\n \"\"\"\n if not len(bboxes):\n raise ValueError(\"'bboxes' cannot be empty\")\n\n if len(bboxes) == 1:\n return bboxes[0]\n\n x0 = np.inf\n y0 = np.inf\n x1 = -np.inf\n y1 = -np.inf\n\n for bbox in bboxes:\n points = bbox.get_points()\n xs = points[:, 0]\n ys = points[:, 1]\n x0 = min(x0, np.min(xs))\n y0 = min(y0, np.min(ys))\n x1 = max(x1, np.max(xs))\n y1 = max(y1, np.max(ys))\n\n return Bbox.from_extents(x0, y0, x1, y1)\n\n @staticmethod\n def intersection(bbox1, bbox2):\n \"\"\"\n Return the intersection of the two bboxes or None\n if they do not intersect.\n\n Implements the algorithm described at:\n\n http://www.tekpool.com/node/2687\n\n \"\"\"\n intersects = not (bbox2.xmin > bbox1.xmax or\n bbox2.xmax < bbox1.xmin or\n bbox2.ymin > bbox1.ymax or\n bbox2.ymax < bbox1.ymin)\n\n if intersects:\n x0 = max([bbox1.xmin, bbox2.xmin])\n x1 = min([bbox1.xmax, bbox2.xmax])\n y0 = max([bbox1.ymin, bbox2.ymin])\n y1 = min([bbox1.ymax, bbox2.ymax])\n return Bbox.from_extents(x0, y0, x1, y1)\n\n return None\n\n\nclass Bbox(BboxBase):\n \"\"\"\n A mutable bounding box.\n \"\"\"\n\n def __init__(self, points, **kwargs):\n \"\"\"\n *points*: a 2x2 numpy array of the form [[x0, y0], [x1, y1]]\n\n If you need to create a :class:`Bbox` object from another form\n of data, consider the static methods :meth:`unit`,\n :meth:`from_bounds` and :meth:`from_extents`.\n \"\"\"\n BboxBase.__init__(self, **kwargs)\n points = np.asarray(points, np.float_)\n if points.shape != (2, 2):\n raise ValueError('Bbox points must be of the form '\n '\"[[x0, y0], [x1, y1]]\".')\n self._points = points\n self._minpos = np.array([0.0000001, 0.0000001])\n self._ignore = True\n # it is helpful in some contexts to know if the bbox is a\n # default or has been mutated; we store the orig points to\n # support the mutated methods\n self._points_orig = self._points.copy()\n if DEBUG:\n ___init__ = __init__\n\n def __init__(self, points, **kwargs):\n self._check(points)\n self.___init__(points, **kwargs)\n\n def invalidate(self):\n self._check(self._points)\n TransformNode.invalidate(self)\n\n @staticmethod\n def unit():\n \"\"\"\n (staticmethod) Create a new unit :class:`Bbox` from (0, 0) to\n (1, 1).\n \"\"\"\n return Bbox(np.array([[0.0, 0.0], [1.0, 1.0]], np.float))\n\n @staticmethod\n def null():\n \"\"\"\n (staticmethod) Create a new null :class:`Bbox` from (inf, inf) to\n (-inf, -inf).\n \"\"\"\n return Bbox(np.array([[np.inf, np.inf], [-np.inf, -np.inf]], np.float))\n\n @staticmethod\n def from_bounds(x0, y0, width, height):\n \"\"\"\n (staticmethod) Create a new :class:`Bbox` from *x0*, *y0*,\n *width* and *height*.\n\n *width* and *height* may be negative.\n \"\"\"\n return Bbox.from_extents(x0, y0, x0 + width, y0 + height)\n\n @staticmethod\n def from_extents(*args):\n \"\"\"\n (staticmethod) Create a new Bbox from *left*, *bottom*,\n *right* and *top*.\n\n The *y*-axis increases upwards.\n \"\"\"\n points = np.array(args, dtype=np.float_).reshape(2, 2)\n return Bbox(points)\n\n def __format__(self, fmt):\n return (\n 'Bbox(x0={0.x0:{1}}, y0={0.y0:{1}}, x1={0.x1:{1}}, y1={0.y1:{1}})'.\n format(self, fmt))\n\n def __str__(self):\n return format(self, '')\n\n def __repr__(self):\n return 'Bbox([[{0.x0}, {0.y0}], [{0.x1}, {0.y1}]])'.format(self)\n\n def ignore(self, value):\n \"\"\"\n Set whether the existing bounds of the box should be ignored\n by subsequent calls to :meth:`update_from_data` or\n :meth:`update_from_data_xy`.\n\n *value*:\n\n - When True, subsequent calls to :meth:`update_from_data`\n will ignore the existing bounds of the :class:`Bbox`.\n\n - When False, subsequent calls to :meth:`update_from_data`\n will include the existing bounds of the :class:`Bbox`.\n \"\"\"\n self._ignore = value\n\n def update_from_data(self, x, y, ignore=None):\n \"\"\"\n Update the bounds of the :class:`Bbox` based on the passed in\n data. After updating, the bounds will have positive *width*\n and *height*; *x0* and *y0* will be the minimal values.\n\n *x*: a numpy array of *x*-values\n\n *y*: a numpy array of *y*-values\n\n *ignore*:\n - when True, ignore the existing bounds of the :class:`Bbox`.\n - when False, include the existing bounds of the :class:`Bbox`.\n - when None, use the last value passed to :meth:`ignore`.\n \"\"\"\n warnings.warn(\n \"update_from_data requires a memory copy -- please replace with \"\n \"update_from_data_xy\")\n\n xy = np.hstack((x.reshape((len(x), 1)), y.reshape((len(y), 1))))\n return self.update_from_data_xy(xy, ignore)\n\n def update_from_path(self, path, ignore=None, updatex=True, updatey=True):\n \"\"\"\n Update the bounds of the :class:`Bbox` based on the passed in\n data. After updating, the bounds will have positive *width*\n and *height*; *x0* and *y0* will be the minimal values.\n\n *path*: a :class:`~matplotlib.path.Path` instance\n\n *ignore*:\n - when True, ignore the existing bounds of the :class:`Bbox`.\n - when False, include the existing bounds of the :class:`Bbox`.\n - when None, use the last value passed to :meth:`ignore`.\n\n *updatex*: when True, update the x values\n\n *updatey*: when True, update the y values\n\n \"\"\"\n if ignore is None:\n ignore = self._ignore\n\n if path.vertices.size == 0:\n return\n\n points, minpos, changed = update_path_extents(\n path, None, self._points, self._minpos, ignore)\n\n if changed:\n self.invalidate()\n if updatex:\n self._points[:, 0] = points[:, 0]\n self._minpos[0] = minpos[0]\n if updatey:\n self._points[:, 1] = points[:, 1]\n self._minpos[1] = minpos[1]\n\n def update_from_data_xy(self, xy, ignore=None, updatex=True, updatey=True):\n \"\"\"\n Update the bounds of the :class:`Bbox` based on the passed in\n data. After updating, the bounds will have positive *width*\n and *height*; *x0* and *y0* will be the minimal values.\n\n *xy*: a numpy array of 2D points\n\n *ignore*:\n - when True, ignore the existing bounds of the :class:`Bbox`.\n - when False, include the existing bounds of the :class:`Bbox`.\n - when None, use the last value passed to :meth:`ignore`.\n\n *updatex*: when True, update the x values\n\n *updatey*: when True, update the y values\n \"\"\"\n if len(xy) == 0:\n return\n\n path = Path(xy)\n self.update_from_path(path, ignore=ignore,\n updatex=updatex, updatey=updatey)\n\n def _set_x0(self, val):\n self._points[0, 0] = val\n self.invalidate()\n x0 = property(BboxBase._get_x0, _set_x0)\n\n def _set_y0(self, val):\n self._points[0, 1] = val\n self.invalidate()\n y0 = property(BboxBase._get_y0, _set_y0)\n\n def _set_x1(self, val):\n self._points[1, 0] = val\n self.invalidate()\n x1 = property(BboxBase._get_x1, _set_x1)\n\n def _set_y1(self, val):\n self._points[1, 1] = val\n self.invalidate()\n y1 = property(BboxBase._get_y1, _set_y1)\n\n def _set_p0(self, val):\n self._points[0] = val\n self.invalidate()\n p0 = property(BboxBase._get_p0, _set_p0)\n\n def _set_p1(self, val):\n self._points[1] = val\n self.invalidate()\n p1 = property(BboxBase._get_p1, _set_p1)\n\n def _set_intervalx(self, interval):\n self._points[:, 0] = interval\n self.invalidate()\n intervalx = property(BboxBase._get_intervalx, _set_intervalx)\n\n def _set_intervaly(self, interval):\n self._points[:, 1] = interval\n self.invalidate()\n intervaly = property(BboxBase._get_intervaly, _set_intervaly)\n\n def _set_bounds(self, bounds):\n l, b, w, h = bounds\n points = np.array([[l, b], [l + w, b + h]], np.float_)\n if np.any(self._points != points):\n self._points = points\n self.invalidate()\n bounds = property(BboxBase._get_bounds, _set_bounds)\n\n def _get_minpos(self):\n return self._minpos\n minpos = property(_get_minpos)\n\n def _get_minposx(self):\n return self._minpos[0]\n minposx = property(_get_minposx)\n\n def _get_minposy(self):\n return self._minpos[1]\n minposy = property(_get_minposy)\n\n def get_points(self):\n \"\"\"\n Get the points of the bounding box directly as a numpy array\n of the form: [[x0, y0], [x1, y1]].\n \"\"\"\n self._invalid = 0\n return self._points\n\n def set_points(self, points):\n \"\"\"\n Set the points of the bounding box directly from a numpy array\n of the form: [[x0, y0], [x1, y1]]. No error checking is\n performed, as this method is mainly for internal use.\n \"\"\"\n if np.any(self._points != points):\n self._points = points\n self.invalidate()\n\n def set(self, other):\n \"\"\"\n Set this bounding box from the \"frozen\" bounds of another\n :class:`Bbox`.\n \"\"\"\n if np.any(self._points != other.get_points()):\n self._points = other.get_points()\n self.invalidate()\n\n def mutated(self):\n 'return whether the bbox has changed since init'\n return self.mutatedx() or self.mutatedy()\n\n def mutatedx(self):\n 'return whether the x-limits have changed since init'\n return (self._points[0, 0] != self._points_orig[0, 0] or\n self._points[1, 0] != self._points_orig[1, 0])\n\n def mutatedy(self):\n 'return whether the y-limits have changed since init'\n return (self._points[0, 1] != self._points_orig[0, 1] or\n self._points[1, 1] != self._points_orig[1, 1])\n\n\nclass TransformedBbox(BboxBase):\n \"\"\"\n A :class:`Bbox` that is automatically transformed by a given\n transform. When either the child bounding box or transform\n changes, the bounds of this bbox will update accordingly.\n \"\"\"\n def __init__(self, bbox, transform, **kwargs):\n \"\"\"\n *bbox*: a child :class:`Bbox`\n\n *transform*: a 2D :class:`Transform`\n \"\"\"\n if not bbox.is_bbox:\n raise ValueError(\"'bbox' is not a bbox\")\n if not isinstance(transform, Transform):\n msg = (\"'transform' must be an instance of\"\n \" 'matplotlib.transform.Transform'\")\n raise ValueError(msg)\n if transform.input_dims != 2 or transform.output_dims != 2:\n msg = \"The input and output dimensions of 'transform' must be 2\"\n raise ValueError(msg)\n\n BboxBase.__init__(self, **kwargs)\n self._bbox = bbox\n self._transform = transform\n self.set_children(bbox, transform)\n self._points = None\n\n def __repr__(self):\n return \"TransformedBbox(%r, %r)\" % (self._bbox, self._transform)\n\n def get_points(self):\n if self._invalid:\n points = self._transform.transform(self._bbox.get_points())\n points = np.ma.filled(points, 0.0)\n self._points = points\n self._invalid = 0\n return self._points\n get_points.__doc__ = Bbox.get_points.__doc__\n\n if DEBUG:\n _get_points = get_points\n\n def get_points(self):\n points = self._get_points()\n self._check(points)\n return points\n\n\nclass Transform(TransformNode):\n \"\"\"\n The base class of all :class:`TransformNode` instances that\n actually perform a transformation.\n\n All non-affine transformations should be subclasses of this class.\n New affine transformations should be subclasses of\n :class:`Affine2D`.\n\n Subclasses of this class should override the following members (at\n minimum):\n\n - :attr:`input_dims`\n - :attr:`output_dims`\n - :meth:`transform`\n - :attr:`is_separable`\n - :attr:`has_inverse`\n - :meth:`inverted` (if :attr:`has_inverse` is True)\n\n If the transform needs to do something non-standard with\n :class:`matplotlib.path.Path` objects, such as adding curves\n where there were once line segments, it should override:\n\n - :meth:`transform_path`\n \"\"\"\n input_dims = None\n \"\"\"\n The number of input dimensions of this transform.\n Must be overridden (with integers) in the subclass.\n \"\"\"\n\n output_dims = None\n \"\"\"\n The number of output dimensions of this transform.\n Must be overridden (with integers) in the subclass.\n \"\"\"\n\n has_inverse = False\n \"\"\"True if this transform has a corresponding inverse transform.\"\"\"\n\n is_separable = False\n \"\"\"True if this transform is separable in the x- and y- dimensions.\"\"\"\n\n def __add__(self, other):\n \"\"\"\n Composes two transforms together such that *self* is followed\n by *other*.\n \"\"\"\n if isinstance(other, Transform):\n return composite_transform_factory(self, other)\n raise TypeError(\n \"Can not add Transform to object of type '%s'\" % type(other))\n\n def __radd__(self, other):\n \"\"\"\n Composes two transforms together such that *self* is followed\n by *other*.\n \"\"\"\n if isinstance(other, Transform):\n return composite_transform_factory(other, self)\n raise TypeError(\n \"Can not add Transform to object of type '%s'\" % type(other))\n\n def __eq__(self, other):\n # equality is based on transform object id. Hence:\n # Transform() != Transform().\n # Some classes, such as TransformWrapper & AffineBase, will override.\n return self is other\n\n def _iter_break_from_left_to_right(self):\n \"\"\"\n Returns an iterator breaking down this transform stack from left to\n right recursively. If self == ((A, N), A) then the result will be an\n iterator which yields I : ((A, N), A), followed by A : (N, A),\n followed by (A, N) : (A), but not ((A, N), A) : I.\n\n This is equivalent to flattening the stack then yielding\n ``flat_stack[:i], flat_stack[i:]`` where i=0..(n-1).\n\n \"\"\"\n yield IdentityTransform(), self\n\n @property\n def depth(self):\n \"\"\"\n Returns the number of transforms which have been chained\n together to form this Transform instance.\n\n .. note::\n\n For the special case of a Composite transform, the maximum depth\n of the two is returned.\n\n \"\"\"\n return 1\n\n def contains_branch(self, other):\n \"\"\"\n Return whether the given transform is a sub-tree of this transform.\n\n This routine uses transform equality to identify sub-trees, therefore\n in many situations it is object id which will be used.\n\n For the case where the given transform represents the whole\n of this transform, returns True.\n\n \"\"\"\n if self.depth < other.depth:\n return False\n\n # check that a subtree is equal to other (starting from self)\n for _, sub_tree in self._iter_break_from_left_to_right():\n if sub_tree == other:\n return True\n return False\n\n def contains_branch_seperately(self, other_transform):\n \"\"\"\n Returns whether the given branch is a sub-tree of this transform on\n each seperate dimension.\n\n A common use for this method is to identify if a transform is a blended\n transform containing an axes' data transform. e.g.::\n\n x_isdata, y_isdata = trans.contains_branch_seperately(ax.transData)\n\n \"\"\"\n if self.output_dims != 2:\n raise ValueError('contains_branch_seperately only supports '\n 'transforms with 2 output dimensions')\n # for a non-blended transform each seperate dimension is the same, so\n # just return the appropriate shape.\n return [self.contains_branch(other_transform)] * 2\n\n def __sub__(self, other):\n \"\"\"\n Returns a transform stack which goes all the way down self's transform\n stack, and then ascends back up other's stack. If it can, this is\n optimised::\n\n # normally\n A - B == a + b.inverted()\n\n # sometimes, when A contains the tree B there is no need to\n # descend all the way down to the base of A (via B), instead we\n # can just stop at B.\n\n (A + B) - (B)^-1 == A\n\n # similarly, when B contains tree A, we can avoid decending A at\n # all, basically:\n A - (A + B) == ((B + A) - A).inverted() or B^-1\n\n For clarity, the result of ``(A + B) - B + B == (A + B)``.\n\n \"\"\"\n # we only know how to do this operation if other is a Transform.\n if not isinstance(other, Transform):\n return NotImplemented\n\n for remainder, sub_tree in self._iter_break_from_left_to_right():\n if sub_tree == other:\n return remainder\n\n for remainder, sub_tree in other._iter_break_from_left_to_right():\n if sub_tree == self:\n if not remainder.has_inverse:\n raise ValueError(\"The shortcut cannot be computed since \"\n \"other's transform includes a non-invertable component.\")\n return remainder.inverted()\n\n # if we have got this far, then there was no shortcut possible\n if other.has_inverse:\n return self + other.inverted()\n else:\n raise ValueError('It is not possible to compute transA - transB '\n 'since transB cannot be inverted and there is no '\n 'shortcut possible.')\n\n def __array__(self, *args, **kwargs):\n \"\"\"\n Array interface to get at this Transform's affine matrix.\n \"\"\"\n return self.get_affine().get_matrix()\n\n def transform(self, values):\n \"\"\"\n Performs the transformation on the given array of values.\n\n Accepts a numpy array of shape (N x :attr:`input_dims`) and\n returns a numpy array of shape (N x :attr:`output_dims`).\n\n Alternatively, accepts a numpy array of length :attr:`input_dims`\n and returns a numpy array of length :attr:`output_dims`.\n \"\"\"\n # Ensure that values is a 2d array (but remember whether\n # we started with a 1d or 2d array).\n values = np.asanyarray(values)\n ndim = values.ndim\n values = values.reshape((-1, self.input_dims))\n\n # Transform the values\n res = self.transform_affine(self.transform_non_affine(values))\n\n # Convert the result back to the shape of the input values.\n if ndim == 0:\n assert not np.ma.is_masked(res) # just to be on the safe side\n return res[0, 0]\n if ndim == 1:\n return res.reshape(-1)\n elif ndim == 2:\n return res\n else:\n raise ValueError(\n \"Input values must have shape (N x {dims}) \"\n \"or ({dims}).\".format(dims=self.input_dims))\n\n return res\n\n def transform_affine(self, values):\n \"\"\"\n Performs only the affine part of this transformation on the\n given array of values.\n\n ``transform(values)`` is always equivalent to\n ``transform_affine(transform_non_affine(values))``.\n\n In non-affine transformations, this is generally a no-op. In\n affine transformations, this is equivalent to\n ``transform(values)``.\n\n Accepts a numpy array of shape (N x :attr:`input_dims`) and\n returns a numpy array of shape (N x :attr:`output_dims`).\n\n Alternatively, accepts a numpy array of length :attr:`input_dims`\n and returns a numpy array of length :attr:`output_dims`.\n \"\"\"\n return self.get_affine().transform(values)\n\n def transform_non_affine(self, values):\n \"\"\"\n Performs only the non-affine part of the transformation.\n\n ``transform(values)`` is always equivalent to\n ``transform_affine(transform_non_affine(values))``.\n\n In non-affine transformations, this is generally equivalent to\n ``transform(values)``. In affine transformations, this is\n always a no-op.\n\n Accepts a numpy array of shape (N x :attr:`input_dims`) and\n returns a numpy array of shape (N x :attr:`output_dims`).\n\n Alternatively, accepts a numpy array of length :attr:`input_dims`\n and returns a numpy array of length :attr:`output_dims`.\n \"\"\"\n return values\n\n def transform_bbox(self, bbox):\n \"\"\"\n Transform the given bounding box.\n\n Note, for smarter transforms including caching (a common\n requirement for matplotlib figures), see :class:`TransformedBbox`.\n \"\"\"\n return Bbox(self.transform(bbox.get_points()))\n\n def get_affine(self):\n \"\"\"\n Get the affine part of this transform.\n \"\"\"\n return IdentityTransform()\n\n def get_matrix(self):\n \"\"\"\n Get the Affine transformation array for the affine part\n of this transform.\n\n \"\"\"\n return self.get_affine().get_matrix()\n\n def transform_point(self, point):\n \"\"\"\n A convenience function that returns the transformed copy of a\n single point.\n\n The point is given as a sequence of length :attr:`input_dims`.\n The transformed point is returned as a sequence of length\n :attr:`output_dims`.\n \"\"\"\n if len(point) != self.input_dims:\n msg = \"The length of 'point' must be 'self.input_dims'\"\n raise ValueError(msg)\n return self.transform(np.asarray([point]))[0]\n\n def transform_path(self, path):\n \"\"\"\n Returns a transformed path.\n\n *path*: a :class:`~matplotlib.path.Path` instance.\n\n In some cases, this transform may insert curves into the path\n that began as line segments.\n \"\"\"\n return self.transform_path_affine(self.transform_path_non_affine(path))\n\n def transform_path_affine(self, path):\n \"\"\"\n Returns a path, transformed only by the affine part of\n this transform.\n\n *path*: a :class:`~matplotlib.path.Path` instance.\n\n ``transform_path(path)`` is equivalent to\n ``transform_path_affine(transform_path_non_affine(values))``.\n \"\"\"\n return self.get_affine().transform_path_affine(path)\n\n def transform_path_non_affine(self, path):\n \"\"\"\n Returns a path, transformed only by the non-affine\n part of this transform.\n\n *path*: a :class:`~matplotlib.path.Path` instance.\n\n ``transform_path(path)`` is equivalent to\n ``transform_path_affine(transform_path_non_affine(values))``.\n \"\"\"\n x = self.transform_non_affine(path.vertices)\n return Path._fast_from_codes_and_verts(x, path.codes,\n {'interpolation_steps': path._interpolation_steps,\n 'should_simplify': path.should_simplify})\n\n def transform_angles(self, angles, pts, radians=False, pushoff=1e-5):\n \"\"\"\n Performs transformation on a set of angles anchored at\n specific locations.\n\n The *angles* must be a column vector (i.e., numpy array).\n\n The *pts* must be a two-column numpy array of x,y positions\n (angle transforms currently only work in 2D). This array must\n have the same number of rows as *angles*.\n\n *radians* indicates whether or not input angles are given in\n radians (True) or degrees (False; the default).\n\n *pushoff* is the distance to move away from *pts* for\n determining transformed angles (see discussion of method\n below).\n\n The transformed angles are returned in an array with the same\n size as *angles*.\n\n The generic version of this method uses a very generic\n algorithm that transforms *pts*, as well as locations very\n close to *pts*, to find the angle in the transformed system.\n \"\"\"\n # Must be 2D\n if self.input_dims != 2 or self.output_dims != 2:\n raise NotImplementedError('Only defined in 2D')\n\n if pts.shape[1] != 2:\n raise ValueError(\"'pts' must be array with 2 columns for x,y\")\n\n if angles.ndim != 1 or angles.shape[0] != pts.shape[0]:\n msg = \"'angles' must be a column vector and have same number of\"\n msg += \" rows as 'pts'\"\n raise ValueError(msg)\n\n # Convert to radians if desired\n if not radians:\n angles = angles / 180.0 * np.pi\n\n # Move a short distance away\n pts2 = pts + pushoff * np.c_[np.cos(angles), np.sin(angles)]\n\n # Transform both sets of points\n tpts = self.transform(pts)\n tpts2 = self.transform(pts2)\n\n # Calculate transformed angles\n d = tpts2 - tpts\n a = np.arctan2(d[:, 1], d[:, 0])\n\n # Convert back to degrees if desired\n if not radians:\n a = a * 180.0 / np.pi\n\n return a\n\n def inverted(self):\n \"\"\"\n Return the corresponding inverse transformation.\n\n The return value of this method should be treated as\n temporary. An update to *self* does not cause a corresponding\n update to its inverted copy.\n\n ``x === self.inverted().transform(self.transform(x))``\n \"\"\"\n raise NotImplementedError()\n\n\nclass TransformWrapper(Transform):\n \"\"\"\n A helper class that holds a single child transform and acts\n equivalently to it.\n\n This is useful if a node of the transform tree must be replaced at\n run time with a transform of a different type. This class allows\n that replacement to correctly trigger invalidation.\n\n Note that :class:`TransformWrapper` instances must have the same\n input and output dimensions during their entire lifetime, so the\n child transform may only be replaced with another child transform\n of the same dimensions.\n \"\"\"\n pass_through = True\n\n def __init__(self, child):\n \"\"\"\n *child*: A class:`Transform` instance. This child may later\n be replaced with :meth:`set`.\n \"\"\"\n if not isinstance(child, Transform):\n msg = (\"'child' must be an instance of\"\n \" 'matplotlib.transform.Transform'\")\n raise ValueError(msg)\n Transform.__init__(self)\n self.input_dims = child.input_dims\n self.output_dims = child.output_dims\n self._set(child)\n self._invalid = 0\n\n def __eq__(self, other):\n return self._child.__eq__(other)\n\n if DEBUG:\n\n def __str__(self):\n return str(self._child)\n\n def __getstate__(self):\n # only store the child\n return {'child': self._child}\n\n def __setstate__(self, state):\n # re-initialise the TransformWrapper with the state's child\n self.__init__(state['child'])\n\n def __repr__(self):\n return \"TransformWrapper(%r)\" % self._child\n\n def frozen(self):\n return self._child.frozen()\n frozen.__doc__ = Transform.frozen.__doc__\n\n def _set(self, child):\n self._child = child\n self.set_children(child)\n\n self.transform = child.transform\n self.transform_affine = child.transform_affine\n self.transform_non_affine = child.transform_non_affine\n self.transform_path = child.transform_path\n self.transform_path_affine = child.transform_path_affine\n self.transform_path_non_affine = child.transform_path_non_affine\n self.get_affine = child.get_affine\n self.inverted = child.inverted\n self.get_matrix = child.get_matrix\n\n # note we do not wrap other properties here since the transform's\n # child can be changed with WrappedTransform.set and so checking\n # is_affine and other such properties may be dangerous.\n\n def set(self, child):\n \"\"\"\n Replace the current child of this transform with another one.\n\n The new child must have the same number of input and output\n dimensions as the current child.\n \"\"\"\n if (child.input_dims != self.input_dims or\n child.output_dims != self.output_dims):\n msg = (\"The new child must have the same number of input and\"\n \" output dimensions as the current child.\")\n raise ValueError(msg)\n\n self._set(child)\n\n self._invalid = 0\n self.invalidate()\n self._invalid = 0\n\n def _get_is_affine(self):\n return self._child.is_affine\n is_affine = property(_get_is_affine)\n\n def _get_is_separable(self):\n return self._child.is_separable\n is_separable = property(_get_is_separable)\n\n def _get_has_inverse(self):\n return self._child.has_inverse\n has_inverse = property(_get_has_inverse)\n\n\nclass AffineBase(Transform):\n \"\"\"\n The base class of all affine transformations of any number of\n dimensions.\n \"\"\"\n is_affine = True\n\n def __init__(self, *args, **kwargs):\n Transform.__init__(self, *args, **kwargs)\n self._inverted = None\n\n def __array__(self, *args, **kwargs):\n # optimises the access of the transform matrix vs the superclass\n return self.get_matrix()\n\n @staticmethod\n def _concat(a, b):\n \"\"\"\n Concatenates two transformation matrices (represented as numpy\n arrays) together.\n \"\"\"\n return np.dot(b, a)\n\n def __eq__(self, other):\n if getattr(other, \"is_affine\", False):\n return np.all(self.get_matrix() == other.get_matrix())\n return NotImplemented\n\n def transform(self, values):\n return self.transform_affine(values)\n transform.__doc__ = Transform.transform.__doc__\n\n def transform_affine(self, values):\n raise NotImplementedError('Affine subclasses should override this '\n 'method.')\n transform_affine.__doc__ = Transform.transform_affine.__doc__\n\n def transform_non_affine(self, points):\n return points\n transform_non_affine.__doc__ = Transform.transform_non_affine.__doc__\n\n def transform_path(self, path):\n return self.transform_path_affine(path)\n transform_path.__doc__ = Transform.transform_path.__doc__\n\n def transform_path_affine(self, path):\n return Path(self.transform_affine(path.vertices),\n path.codes, path._interpolation_steps)\n transform_path_affine.__doc__ = Transform.transform_path_affine.__doc__\n\n def transform_path_non_affine(self, path):\n return path\n transform_path_non_affine.__doc__ = Transform.transform_path_non_affine.__doc__\n\n def get_affine(self):\n return self\n get_affine.__doc__ = Transform.get_affine.__doc__\n\n\nclass Affine2DBase(AffineBase):\n \"\"\"\n The base class of all 2D affine transformations.\n\n 2D affine transformations are performed using a 3x3 numpy array::\n\n a c e\n b d f\n 0 0 1\n\n This class provides the read-only interface. For a mutable 2D\n affine transformation, use :class:`Affine2D`.\n\n Subclasses of this class will generally only need to override a\n constructor and :meth:`get_matrix` that generates a custom 3x3 matrix.\n \"\"\"\n has_inverse = True\n\n input_dims = 2\n output_dims = 2\n\n def frozen(self):\n return Affine2D(self.get_matrix().copy())\n frozen.__doc__ = AffineBase.frozen.__doc__\n\n def _get_is_separable(self):\n mtx = self.get_matrix()\n return mtx[0, 1] == 0.0 and mtx[1, 0] == 0.0\n is_separable = property(_get_is_separable)\n\n def to_values(self):\n \"\"\"\n Return the values of the matrix as a sequence (a,b,c,d,e,f)\n \"\"\"\n mtx = self.get_matrix()\n return tuple(mtx[:2].swapaxes(0, 1).flatten())\n\n @staticmethod\n def matrix_from_values(a, b, c, d, e, f):\n \"\"\"\n (staticmethod) Create a new transformation matrix as a 3x3\n numpy array of the form::\n\n a c e\n b d f\n 0 0 1\n \"\"\"\n return np.array([[a, c, e], [b, d, f], [0.0, 0.0, 1.0]], np.float_)\n\n def transform_affine(self, points):\n mtx = self.get_matrix()\n if isinstance(points, MaskedArray):\n tpoints = affine_transform(points.data, mtx)\n return ma.MaskedArray(tpoints, mask=ma.getmask(points))\n return affine_transform(points, mtx)\n\n def transform_point(self, point):\n mtx = self.get_matrix()\n return affine_transform([point], mtx)[0]\n transform_point.__doc__ = AffineBase.transform_point.__doc__\n\n if DEBUG:\n _transform_affine = transform_affine\n\n def transform_affine(self, points):\n # The major speed trap here is just converting to the\n # points to an array in the first place. If we can use\n # more arrays upstream, that should help here.\n if (not ma.isMaskedArray(points) and\n not isinstance(points, np.ndarray)):\n warnings.warn(\n ('A non-numpy array of type %s was passed in for ' +\n 'transformation. Please correct this.')\n % type(points))\n return self._transform_affine(points)\n transform_affine.__doc__ = AffineBase.transform_affine.__doc__\n\n def inverted(self):\n if self._inverted is None or self._invalid:\n mtx = self.get_matrix()\n shorthand_name = None\n if self._shorthand_name:\n shorthand_name = '(%s)-1' % self._shorthand_name\n self._inverted = Affine2D(inv(mtx), shorthand_name=shorthand_name)\n self._invalid = 0\n return self._inverted\n inverted.__doc__ = AffineBase.inverted.__doc__\n\n\nclass Affine2D(Affine2DBase):\n \"\"\"\n A mutable 2D affine transformation.\n \"\"\"\n\n def __init__(self, matrix=None, **kwargs):\n \"\"\"\n Initialize an Affine transform from a 3x3 numpy float array::\n\n a c e\n b d f\n 0 0 1\n\n If *matrix* is None, initialize with the identity transform.\n \"\"\"\n Affine2DBase.__init__(self, **kwargs)\n if matrix is None:\n matrix = np.identity(3)\n elif DEBUG:\n matrix = np.asarray(matrix, np.float_)\n assert matrix.shape == (3, 3)\n self._mtx = matrix\n self._invalid = 0\n\n def __repr__(self):\n return \"Affine2D(%s)\" % repr(self._mtx)\n\n# def __cmp__(self, other):\n# # XXX redundant. this only tells us eq.\n# if (isinstance(other, Affine2D) and\n# (self.get_matrix() == other.get_matrix()).all()):\n# return 0\n# return -1\n\n @staticmethod\n def from_values(a, b, c, d, e, f):\n \"\"\"\n (staticmethod) Create a new Affine2D instance from the given\n values::\n\n a c e\n b d f\n 0 0 1\n\n .\n \"\"\"\n return Affine2D(\n np.array([a, c, e, b, d, f, 0.0, 0.0, 1.0], np.float_)\n .reshape((3, 3)))\n\n def get_matrix(self):\n \"\"\"\n Get the underlying transformation matrix as a 3x3 numpy array::\n\n a c e\n b d f\n 0 0 1\n\n .\n \"\"\"\n self._invalid = 0\n return self._mtx\n\n def set_matrix(self, mtx):\n \"\"\"\n Set the underlying transformation matrix from a 3x3 numpy array::\n\n a c e\n b d f\n 0 0 1\n\n .\n \"\"\"\n self._mtx = mtx\n self.invalidate()\n\n def set(self, other):\n \"\"\"\n Set this transformation from the frozen copy of another\n :class:`Affine2DBase` object.\n \"\"\"\n if not isinstance(other, Affine2DBase):\n msg = (\"'other' must be an instance of\"\n \" 'matplotlib.transform.Affine2DBase'\")\n raise ValueError(msg)\n self._mtx = other.get_matrix()\n self.invalidate()\n\n @staticmethod\n def identity():\n \"\"\"\n (staticmethod) Return a new :class:`Affine2D` object that is\n the identity transform.\n\n Unless this transform will be mutated later on, consider using\n the faster :class:`IdentityTransform` class instead.\n \"\"\"\n return Affine2D(np.identity(3))\n\n def clear(self):\n \"\"\"\n Reset the underlying matrix to the identity transform.\n \"\"\"\n self._mtx = np.identity(3)\n self.invalidate()\n return self\n\n def rotate(self, theta):\n \"\"\"\n Add a rotation (in radians) to this transform in place.\n\n Returns *self*, so this method can easily be chained with more\n calls to :meth:`rotate`, :meth:`rotate_deg`, :meth:`translate`\n and :meth:`scale`.\n \"\"\"\n a = np.cos(theta)\n b = np.sin(theta)\n rotate_mtx = np.array(\n [[a, -b, 0.0], [b, a, 0.0], [0.0, 0.0, 1.0]],\n np.float_)\n self._mtx = np.dot(rotate_mtx, self._mtx)\n self.invalidate()\n return self\n\n def rotate_deg(self, degrees):\n \"\"\"\n Add a rotation (in degrees) to this transform in place.\n\n Returns *self*, so this method can easily be chained with more\n calls to :meth:`rotate`, :meth:`rotate_deg`, :meth:`translate`\n and :meth:`scale`.\n \"\"\"\n return self.rotate(degrees * np.pi / 180.)\n\n def rotate_around(self, x, y, theta):\n \"\"\"\n Add a rotation (in radians) around the point (x, y) in place.\n\n Returns *self*, so this method can easily be chained with more\n calls to :meth:`rotate`, :meth:`rotate_deg`, :meth:`translate`\n and :meth:`scale`.\n \"\"\"\n return self.translate(-x, -y).rotate(theta).translate(x, y)\n\n def rotate_deg_around(self, x, y, degrees):\n \"\"\"\n Add a rotation (in degrees) around the point (x, y) in place.\n\n Returns *self*, so this method can easily be chained with more\n calls to :meth:`rotate`, :meth:`rotate_deg`, :meth:`translate`\n and :meth:`scale`.\n \"\"\"\n return self.translate(-x, -y).rotate_deg(degrees).translate(x, y)\n\n def translate(self, tx, ty):\n \"\"\"\n Adds a translation in place.\n\n Returns *self*, so this method can easily be chained with more\n calls to :meth:`rotate`, :meth:`rotate_deg`, :meth:`translate`\n and :meth:`scale`.\n \"\"\"\n translate_mtx = np.array(\n [[1.0, 0.0, tx], [0.0, 1.0, ty], [0.0, 0.0, 1.0]],\n np.float_)\n self._mtx = np.dot(translate_mtx, self._mtx)\n self.invalidate()\n return self\n\n def scale(self, sx, sy=None):\n \"\"\"\n Adds a scale in place.\n\n If *sy* is None, the same scale is applied in both the *x*- and\n *y*-directions.\n\n Returns *self*, so this method can easily be chained with more\n calls to :meth:`rotate`, :meth:`rotate_deg`, :meth:`translate`\n and :meth:`scale`.\n \"\"\"\n if sy is None:\n sy = sx\n scale_mtx = np.array(\n [[sx, 0.0, 0.0], [0.0, sy, 0.0], [0.0, 0.0, 1.0]],\n np.float_)\n self._mtx = np.dot(scale_mtx, self._mtx)\n self.invalidate()\n return self\n\n def skew(self, xShear, yShear):\n \"\"\"\n Adds a skew in place.\n\n *xShear* and *yShear* are the shear angles along the *x*- and\n *y*-axes, respectively, in radians.\n\n Returns *self*, so this method can easily be chained with more\n calls to :meth:`rotate`, :meth:`rotate_deg`, :meth:`translate`\n and :meth:`scale`.\n \"\"\"\n rotX = np.tan(xShear)\n rotY = np.tan(yShear)\n skew_mtx = np.array(\n [[1.0, rotX, 0.0], [rotY, 1.0, 0.0], [0.0, 0.0, 1.0]],\n np.float_)\n self._mtx = np.dot(skew_mtx, self._mtx)\n self.invalidate()\n return self\n\n def skew_deg(self, xShear, yShear):\n \"\"\"\n Adds a skew in place.\n\n *xShear* and *yShear* are the shear angles along the *x*- and\n *y*-axes, respectively, in degrees.\n\n Returns *self*, so this method can easily be chained with more\n calls to :meth:`rotate`, :meth:`rotate_deg`, :meth:`translate`\n and :meth:`scale`.\n \"\"\"\n return self.skew(np.deg2rad(xShear), np.deg2rad(yShear))\n\n def _get_is_separable(self):\n mtx = self.get_matrix()\n return mtx[0, 1] == 0.0 and mtx[1, 0] == 0.0\n is_separable = property(_get_is_separable)\n\n\nclass IdentityTransform(Affine2DBase):\n \"\"\"\n A special class that does on thing, the identity transform, in a\n fast way.\n \"\"\"\n _mtx = np.identity(3)\n\n def frozen(self):\n return self\n frozen.__doc__ = Affine2DBase.frozen.__doc__\n\n def __repr__(self):\n return \"IdentityTransform()\"\n\n def get_matrix(self):\n return self._mtx\n get_matrix.__doc__ = Affine2DBase.get_matrix.__doc__\n\n def transform(self, points):\n return np.asanyarray(points)\n transform.__doc__ = Affine2DBase.transform.__doc__\n\n transform_affine = transform\n transform_affine.__doc__ = Affine2DBase.transform_affine.__doc__\n\n transform_non_affine = transform\n transform_non_affine.__doc__ = Affine2DBase.transform_non_affine.__doc__\n\n def transform_path(self, path):\n return path\n transform_path.__doc__ = Affine2DBase.transform_path.__doc__\n\n transform_path_affine = transform_path\n transform_path_affine.__doc__ = Affine2DBase.transform_path_affine.__doc__\n\n transform_path_non_affine = transform_path\n transform_path_non_affine.__doc__ = Affine2DBase.transform_path_non_affine.__doc__\n\n def get_affine(self):\n return self\n get_affine.__doc__ = Affine2DBase.get_affine.__doc__\n\n inverted = get_affine\n inverted.__doc__ = Affine2DBase.inverted.__doc__\n\n\nclass BlendedGenericTransform(Transform):\n \"\"\"\n A \"blended\" transform uses one transform for the *x*-direction, and\n another transform for the *y*-direction.\n\n This \"generic\" version can handle any given child transform in the\n *x*- and *y*-directions.\n \"\"\"\n input_dims = 2\n output_dims = 2\n is_separable = True\n pass_through = True\n\n def __init__(self, x_transform, y_transform, **kwargs):\n \"\"\"\n Create a new \"blended\" transform using *x_transform* to\n transform the *x*-axis and *y_transform* to transform the\n *y*-axis.\n\n You will generally not call this constructor directly but use\n the :func:`blended_transform_factory` function instead, which\n can determine automatically which kind of blended transform to\n create.\n \"\"\"\n # Here we ask: \"Does it blend?\"\n\n Transform.__init__(self, **kwargs)\n self._x = x_transform\n self._y = y_transform\n self.set_children(x_transform, y_transform)\n self._affine = None\n\n def __eq__(self, other):\n # Note, this is an exact copy of BlendedAffine2D.__eq__\n if isinstance(other, (BlendedAffine2D, BlendedGenericTransform)):\n return (self._x == other._x) and (self._y == other._y)\n elif self._x == self._y:\n return self._x == other\n else:\n return NotImplemented\n\n def contains_branch_seperately(self, transform):\n # Note, this is an exact copy of BlendedAffine2D.contains_branch_seperately\n return self._x.contains_branch(transform), self._y.contains_branch(transform)\n\n @property\n def depth(self):\n return max([self._x.depth, self._y.depth])\n\n def contains_branch(self, other):\n # a blended transform cannot possibly contain a branch from two different transforms.\n return False\n\n def _get_is_affine(self):\n return self._x.is_affine and self._y.is_affine\n is_affine = property(_get_is_affine)\n\n def _get_has_inverse(self):\n return self._x.has_inverse and self._y.has_inverse\n has_inverse = property(_get_has_inverse)\n\n def frozen(self):\n return blended_transform_factory(self._x.frozen(), self._y.frozen())\n frozen.__doc__ = Transform.frozen.__doc__\n\n def __repr__(self):\n return \"BlendedGenericTransform(%s,%s)\" % (self._x, self._y)\n\n def transform_non_affine(self, points):\n if self._x.is_affine and self._y.is_affine:\n return points\n x = self._x\n y = self._y\n\n if x == y and x.input_dims == 2:\n return x.transform_non_affine(points)\n\n if x.input_dims == 2:\n x_points = x.transform_non_affine(points)[:, 0:1]\n else:\n x_points = x.transform_non_affine(points[:, 0])\n x_points = x_points.reshape((len(x_points), 1))\n\n if y.input_dims == 2:\n y_points = y.transform_non_affine(points)[:, 1:]\n else:\n y_points = y.transform_non_affine(points[:, 1])\n y_points = y_points.reshape((len(y_points), 1))\n\n if isinstance(x_points, MaskedArray) or isinstance(y_points, MaskedArray):\n return ma.concatenate((x_points, y_points), 1)\n else:\n return np.concatenate((x_points, y_points), 1)\n transform_non_affine.__doc__ = Transform.transform_non_affine.__doc__\n\n def inverted(self):\n return BlendedGenericTransform(self._x.inverted(), self._y.inverted())\n inverted.__doc__ = Transform.inverted.__doc__\n\n def get_affine(self):\n if self._invalid or self._affine is None:\n if self._x == self._y:\n self._affine = self._x.get_affine()\n else:\n x_mtx = self._x.get_affine().get_matrix()\n y_mtx = self._y.get_affine().get_matrix()\n # This works because we already know the transforms are\n # separable, though normally one would want to set b and\n # c to zero.\n mtx = np.vstack((x_mtx[0], y_mtx[1], [0.0, 0.0, 1.0]))\n self._affine = Affine2D(mtx)\n self._invalid = 0\n return self._affine\n get_affine.__doc__ = Transform.get_affine.__doc__\n\n\nclass BlendedAffine2D(Affine2DBase):\n \"\"\"\n A \"blended\" transform uses one transform for the *x*-direction, and\n another transform for the *y*-direction.\n\n This version is an optimization for the case where both child\n transforms are of type :class:`Affine2DBase`.\n \"\"\"\n is_separable = True\n\n def __init__(self, x_transform, y_transform, **kwargs):\n \"\"\"\n Create a new \"blended\" transform using *x_transform* to\n transform the *x*-axis and *y_transform* to transform the\n *y*-axis.\n\n Both *x_transform* and *y_transform* must be 2D affine\n transforms.\n\n You will generally not call this constructor directly but use\n the :func:`blended_transform_factory` function instead, which\n can determine automatically which kind of blended transform to\n create.\n \"\"\"\n is_affine = x_transform.is_affine and y_transform.is_affine\n is_separable = x_transform.is_separable and y_transform.is_separable\n is_correct = is_affine and is_separable\n if not is_correct:\n msg = (\"Both *x_transform* and *y_transform* must be 2D affine\"\n \" transforms.\")\n raise ValueError(msg)\n\n Transform.__init__(self, **kwargs)\n self._x = x_transform\n self._y = y_transform\n self.set_children(x_transform, y_transform)\n\n Affine2DBase.__init__(self)\n self._mtx = None\n\n def __eq__(self, other):\n # Note, this is an exact copy of BlendedGenericTransform.__eq__\n if isinstance(other, (BlendedAffine2D, BlendedGenericTransform)):\n return (self._x == other._x) and (self._y == other._y)\n elif self._x == self._y:\n return self._x == other\n else:\n return NotImplemented\n\n def contains_branch_seperately(self, transform):\n # Note, this is an exact copy of BlendedTransform.contains_branch_seperately\n return self._x.contains_branch(transform), self._y.contains_branch(transform)\n\n def __repr__(self):\n return \"BlendedAffine2D(%s,%s)\" % (self._x, self._y)\n\n def get_matrix(self):\n if self._invalid:\n if self._x == self._y:\n self._mtx = self._x.get_matrix()\n else:\n x_mtx = self._x.get_matrix()\n y_mtx = self._y.get_matrix()\n # This works because we already know the transforms are\n # separable, though normally one would want to set b and\n # c to zero.\n self._mtx = np.vstack((x_mtx[0], y_mtx[1], [0.0, 0.0, 1.0]))\n self._inverted = None\n self._invalid = 0\n return self._mtx\n get_matrix.__doc__ = Affine2DBase.get_matrix.__doc__\n\n\ndef blended_transform_factory(x_transform, y_transform):\n \"\"\"\n Create a new \"blended\" transform using *x_transform* to transform\n the *x*-axis and *y_transform* to transform the *y*-axis.\n\n A faster version of the blended transform is returned for the case\n where both child transforms are affine.\n \"\"\"\n if (isinstance(x_transform, Affine2DBase)\n and isinstance(y_transform, Affine2DBase)):\n return BlendedAffine2D(x_transform, y_transform)\n return BlendedGenericTransform(x_transform, y_transform)\n\n\nclass CompositeGenericTransform(Transform):\n \"\"\"\n A composite transform formed by applying transform *a* then\n transform *b*.\n\n This \"generic\" version can handle any two arbitrary\n transformations.\n \"\"\"\n pass_through = True\n\n def __init__(self, a, b, **kwargs):\n \"\"\"\n Create a new composite transform that is the result of\n applying transform *a* then transform *b*.\n\n You will generally not call this constructor directly but use\n the :func:`composite_transform_factory` function instead,\n which can automatically choose the best kind of composite\n transform instance to create.\n \"\"\"\n if a.output_dims != b.input_dims:\n msg = (\"The output dimension of 'a' must be equal to the input\"\n \" dimensions of 'b'\")\n raise ValueError(msg)\n self.input_dims = a.input_dims\n self.output_dims = b.output_dims\n\n Transform.__init__(self, **kwargs)\n self._a = a\n self._b = b\n self.set_children(a, b)\n\n is_affine = property(lambda self: self._a.is_affine and self._b.is_affine)\n\n def frozen(self):\n self._invalid = 0\n frozen = composite_transform_factory(self._a.frozen(), self._b.frozen())\n if not isinstance(frozen, CompositeGenericTransform):\n return frozen.frozen()\n return frozen\n frozen.__doc__ = Transform.frozen.__doc__\n\n def _invalidate_internal(self, value, invalidating_node):\n # In some cases for a composite transform, an invalidating call to AFFINE_ONLY needs\n # to be extended to invalidate the NON_AFFINE part too. These cases are when the right\n # hand transform is non-affine and either:\n # (a) the left hand transform is non affine\n # (b) it is the left hand node which has triggered the invalidation\n if value == Transform.INVALID_AFFINE \\\n and not self._b.is_affine \\\n and (not self._a.is_affine or invalidating_node is self._a):\n\n value = Transform.INVALID\n\n Transform._invalidate_internal(self, value=value,\n invalidating_node=invalidating_node)\n\n def __eq__(self, other):\n if isinstance(other, (CompositeGenericTransform, CompositeAffine2D)):\n return self is other or (self._a == other._a and self._b == other._b)\n else:\n return False\n\n def _iter_break_from_left_to_right(self):\n for lh_compliment, rh_compliment in self._a._iter_break_from_left_to_right():\n yield lh_compliment, rh_compliment + self._b\n for lh_compliment, rh_compliment in self._b._iter_break_from_left_to_right():\n yield self._a + lh_compliment, rh_compliment\n\n @property\n def depth(self):\n return self._a.depth + self._b.depth\n\n def _get_is_affine(self):\n return self._a.is_affine and self._b.is_affine\n is_affine = property(_get_is_affine)\n\n def _get_is_separable(self):\n return self._a.is_separable and self._b.is_separable\n is_separable = property(_get_is_separable)\n\n if DEBUG:\n def __str__(self):\n return '(%s, %s)' % (self._a, self._b)\n\n def __repr__(self):\n return \"CompositeGenericTransform(%r, %r)\" % (self._a, self._b)\n\n def transform_affine(self, points):\n return self.get_affine().transform(points)\n transform_affine.__doc__ = Transform.transform_affine.__doc__\n\n def transform_non_affine(self, points):\n if self._a.is_affine and self._b.is_affine:\n return points\n elif not self._a.is_affine and self._b.is_affine:\n return self._a.transform_non_affine(points)\n else:\n return self._b.transform_non_affine(\n self._a.transform(points))\n transform_non_affine.__doc__ = Transform.transform_non_affine.__doc__\n\n def transform_path_non_affine(self, path):\n if self._a.is_affine and self._b.is_affine:\n return path\n elif not self._a.is_affine and self._b.is_affine:\n return self._a.transform_path_non_affine(path)\n else:\n return self._b.transform_path_non_affine(\n self._a.transform_path(path))\n transform_path_non_affine.__doc__ = Transform.transform_path_non_affine.__doc__\n\n def get_affine(self):\n if not self._b.is_affine:\n return self._b.get_affine()\n else:\n return Affine2D(np.dot(self._b.get_affine().get_matrix(),\n self._a.get_affine().get_matrix()))\n get_affine.__doc__ = Transform.get_affine.__doc__\n\n def inverted(self):\n return CompositeGenericTransform(self._b.inverted(), self._a.inverted())\n inverted.__doc__ = Transform.inverted.__doc__\n\n def _get_has_inverse(self):\n return self._a.has_inverse and self._b.has_inverse\n has_inverse = property(_get_has_inverse)\n\n\nclass CompositeAffine2D(Affine2DBase):\n \"\"\"\n A composite transform formed by applying transform *a* then transform *b*.\n\n This version is an optimization that handles the case where both *a*\n and *b* are 2D affines.\n \"\"\"\n def __init__(self, a, b, **kwargs):\n \"\"\"\n Create a new composite transform that is the result of\n applying transform *a* then transform *b*.\n\n Both *a* and *b* must be instances of :class:`Affine2DBase`.\n\n You will generally not call this constructor directly but use\n the :func:`composite_transform_factory` function instead,\n which can automatically choose the best kind of composite\n transform instance to create.\n \"\"\"\n if not a.is_affine or not b.is_affine:\n raise ValueError(\"'a' and 'b' must be affine transforms\")\n if a.output_dims != b.input_dims:\n msg = (\"The output dimension of 'a' must be equal to the input\"\n \" dimensions of 'b'\")\n raise ValueError(msg)\n self.input_dims = a.input_dims\n self.output_dims = b.output_dims\n\n Affine2DBase.__init__(self, **kwargs)\n self._a = a\n self._b = b\n self.set_children(a, b)\n self._mtx = None\n\n if DEBUG:\n def __str__(self):\n return '(%s, %s)' % (self._a, self._b)\n\n @property\n def depth(self):\n return self._a.depth + self._b.depth\n\n def _iter_break_from_left_to_right(self):\n for lh_compliment, rh_compliment in self._a._iter_break_from_left_to_right():\n yield lh_compliment, rh_compliment + self._b\n for lh_compliment, rh_compliment in self._b._iter_break_from_left_to_right():\n yield self._a + lh_compliment, rh_compliment\n\n def __repr__(self):\n return \"CompositeAffine2D(%r, %r)\" % (self._a, self._b)\n\n def get_matrix(self):\n if self._invalid:\n self._mtx = np.dot(\n self._b.get_matrix(),\n self._a.get_matrix())\n self._inverted = None\n self._invalid = 0\n return self._mtx\n get_matrix.__doc__ = Affine2DBase.get_matrix.__doc__\n\n\ndef composite_transform_factory(a, b):\n \"\"\"\n Create a new composite transform that is the result of applying\n transform a then transform b.\n\n Shortcut versions of the blended transform are provided for the\n case where both child transforms are affine, or one or the other\n is the identity transform.\n\n Composite transforms may also be created using the '+' operator,\n e.g.::\n\n c = a + b\n \"\"\"\n # check to see if any of a or b are IdentityTransforms. We use\n # isinstance here to guarantee that the transforms will *always*\n # be IdentityTransforms. Since TransformWrappers are mutable,\n # use of equality here would be wrong.\n if isinstance(a, IdentityTransform):\n return b\n elif isinstance(b, IdentityTransform):\n return a\n elif isinstance(a, Affine2D) and isinstance(b, Affine2D):\n return CompositeAffine2D(a, b)\n return CompositeGenericTransform(a, b)\n\n\nclass BboxTransform(Affine2DBase):\n \"\"\"\n :class:`BboxTransform` linearly transforms points from one\n :class:`Bbox` to another :class:`Bbox`.\n \"\"\"\n is_separable = True\n\n def __init__(self, boxin, boxout, **kwargs):\n \"\"\"\n Create a new :class:`BboxTransform` that linearly transforms\n points from *boxin* to *boxout*.\n \"\"\"\n if not boxin.is_bbox or not boxout.is_bbox:\n msg = \"'boxin' and 'boxout' must be bbox\"\n raise ValueError(msg)\n\n Affine2DBase.__init__(self, **kwargs)\n self._boxin = boxin\n self._boxout = boxout\n self.set_children(boxin, boxout)\n self._mtx = None\n self._inverted = None\n\n def __repr__(self):\n return \"BboxTransform(%r, %r)\" % (self._boxin, self._boxout)\n\n def get_matrix(self):\n if self._invalid:\n inl, inb, inw, inh = self._boxin.bounds\n outl, outb, outw, outh = self._boxout.bounds\n x_scale = outw / inw\n y_scale = outh / inh\n if DEBUG and (x_scale == 0 or y_scale == 0):\n raise ValueError(\"Transforming from or to a singular bounding box.\")\n self._mtx = np.array([[x_scale, 0.0 , (-inl*x_scale+outl)],\n [0.0 , y_scale, (-inb*y_scale+outb)],\n [0.0 , 0.0 , 1.0 ]],\n np.float_)\n self._inverted = None\n self._invalid = 0\n return self._mtx\n get_matrix.__doc__ = Affine2DBase.get_matrix.__doc__\n\n\nclass BboxTransformTo(Affine2DBase):\n \"\"\"\n :class:`BboxTransformTo` is a transformation that linearly\n transforms points from the unit bounding box to a given\n :class:`Bbox`.\n \"\"\"\n is_separable = True\n\n def __init__(self, boxout, **kwargs):\n \"\"\"\n Create a new :class:`BboxTransformTo` that linearly transforms\n points from the unit bounding box to *boxout*.\n \"\"\"\n if not boxout.is_bbox:\n raise ValueError(\"'boxout' must be bbox\")\n\n Affine2DBase.__init__(self, **kwargs)\n self._boxout = boxout\n self.set_children(boxout)\n self._mtx = None\n self._inverted = None\n\n def __repr__(self):\n return \"BboxTransformTo(%r)\" % (self._boxout)\n\n def get_matrix(self):\n if self._invalid:\n outl, outb, outw, outh = self._boxout.bounds\n if DEBUG and (outw == 0 or outh == 0):\n raise ValueError(\"Transforming to a singular bounding box.\")\n self._mtx = np.array([[outw, 0.0, outl],\n [ 0.0, outh, outb],\n [ 0.0, 0.0, 1.0]],\n np.float_)\n self._inverted = None\n self._invalid = 0\n return self._mtx\n get_matrix.__doc__ = Affine2DBase.get_matrix.__doc__\n\n\nclass BboxTransformToMaxOnly(BboxTransformTo):\n \"\"\"\n :class:`BboxTransformTo` is a transformation that linearly\n transforms points from the unit bounding box to a given\n :class:`Bbox` with a fixed upper left of (0, 0).\n \"\"\"\n def __repr__(self):\n return \"BboxTransformToMaxOnly(%r)\" % (self._boxout)\n\n def get_matrix(self):\n if self._invalid:\n xmax, ymax = self._boxout.max\n if DEBUG and (xmax == 0 or ymax == 0):\n raise ValueError(\"Transforming to a singular bounding box.\")\n self._mtx = np.array([[xmax, 0.0, 0.0],\n [ 0.0, ymax, 0.0],\n [ 0.0, 0.0, 1.0]],\n np.float_)\n self._inverted = None\n self._invalid = 0\n return self._mtx\n get_matrix.__doc__ = Affine2DBase.get_matrix.__doc__\n\n\nclass BboxTransformFrom(Affine2DBase):\n \"\"\"\n :class:`BboxTransformFrom` linearly transforms points from a given\n :class:`Bbox` to the unit bounding box.\n \"\"\"\n is_separable = True\n\n def __init__(self, boxin, **kwargs):\n if not boxin.is_bbox:\n raise ValueError(\"'boxin' must be bbox\")\n\n Affine2DBase.__init__(self, **kwargs)\n self._boxin = boxin\n self.set_children(boxin)\n self._mtx = None\n self._inverted = None\n\n def __repr__(self):\n return \"BboxTransformFrom(%r)\" % (self._boxin)\n\n def get_matrix(self):\n if self._invalid:\n inl, inb, inw, inh = self._boxin.bounds\n if DEBUG and (inw == 0 or inh == 0):\n raise ValueError(\"Transforming from a singular bounding box.\")\n x_scale = 1.0 / inw\n y_scale = 1.0 / inh\n self._mtx = np.array([[x_scale, 0.0 , (-inl*x_scale)],\n [0.0 , y_scale, (-inb*y_scale)],\n [0.0 , 0.0 , 1.0 ]],\n np.float_)\n self._inverted = None\n self._invalid = 0\n return self._mtx\n get_matrix.__doc__ = Affine2DBase.get_matrix.__doc__\n\n\nclass ScaledTranslation(Affine2DBase):\n \"\"\"\n A transformation that translates by *xt* and *yt*, after *xt* and *yt*\n have been transformad by the given transform *scale_trans*.\n \"\"\"\n def __init__(self, xt, yt, scale_trans, **kwargs):\n Affine2DBase.__init__(self, **kwargs)\n self._t = (xt, yt)\n self._scale_trans = scale_trans\n self.set_children(scale_trans)\n self._mtx = None\n self._inverted = None\n\n def __repr__(self):\n return \"ScaledTranslation(%r)\" % (self._t,)\n\n def get_matrix(self):\n if self._invalid:\n xt, yt = self._scale_trans.transform_point(self._t)\n self._mtx = np.array([[1.0, 0.0, xt],\n [0.0, 1.0, yt],\n [0.0, 0.0, 1.0]],\n np.float_)\n self._invalid = 0\n self._inverted = None\n return self._mtx\n get_matrix.__doc__ = Affine2DBase.get_matrix.__doc__\n\n\nclass TransformedPath(TransformNode):\n \"\"\"\n A :class:`TransformedPath` caches a non-affine transformed copy of\n the :class:`~matplotlib.path.Path`. This cached copy is\n automatically updated when the non-affine part of the transform\n changes.\n\n .. note::\n\n Paths are considered immutable by this class. Any update to the\n path's vertices/codes will not trigger a transform recomputation.\n\n \"\"\"\n def __init__(self, path, transform):\n \"\"\"\n Create a new :class:`TransformedPath` from the given\n :class:`~matplotlib.path.Path` and :class:`Transform`.\n \"\"\"\n if not isinstance(transform, Transform):\n msg = (\"'transform' must be an instance of\"\n \" 'matplotlib.transform.Transform'\")\n raise ValueError(msg)\n TransformNode.__init__(self)\n\n self._path = path\n self._transform = transform\n self.set_children(transform)\n self._transformed_path = None\n self._transformed_points = None\n\n def _revalidate(self):\n # only recompute if the invalidation includes the non_affine part of the transform\n if ((self._invalid & self.INVALID_NON_AFFINE == self.INVALID_NON_AFFINE)\n or self._transformed_path is None):\n self._transformed_path = \\\n self._transform.transform_path_non_affine(self._path)\n self._transformed_points = \\\n Path._fast_from_codes_and_verts(\n self._transform.transform_non_affine(self._path.vertices),\n None,\n {'interpolation_steps': self._path._interpolation_steps,\n 'should_simplify': self._path.should_simplify})\n self._invalid = 0\n\n def get_transformed_points_and_affine(self):\n \"\"\"\n Return a copy of the child path, with the non-affine part of\n the transform already applied, along with the affine part of\n the path necessary to complete the transformation. Unlike\n :meth:`get_transformed_path_and_affine`, no interpolation will\n be performed.\n \"\"\"\n self._revalidate()\n return self._transformed_points, self.get_affine()\n\n def get_transformed_path_and_affine(self):\n \"\"\"\n Return a copy of the child path, with the non-affine part of\n the transform already applied, along with the affine part of\n the path necessary to complete the transformation.\n \"\"\"\n self._revalidate()\n return self._transformed_path, self.get_affine()\n\n def get_fully_transformed_path(self):\n \"\"\"\n Return a fully-transformed copy of the child path.\n \"\"\"\n self._revalidate()\n return self._transform.transform_path_affine(self._transformed_path)\n\n def get_affine(self):\n return self._transform.get_affine()\n\n\ndef nonsingular(vmin, vmax, expander=0.001, tiny=1e-15, increasing=True):\n '''\n Modify the endpoints of a range as needed to avoid singularities.\n\n *vmin*, *vmax*\n the initial endpoints.\n\n *tiny*\n threshold for the ratio of the interval to the maximum absolute\n value of its endpoints. If the interval is smaller than\n this, it will be expanded. This value should be around\n 1e-15 or larger; otherwise the interval will be approaching\n the double precision resolution limit.\n\n *expander*\n fractional amount by which *vmin* and *vmax* are expanded if\n the original interval is too small, based on *tiny*.\n\n *increasing*: [True | False]\n If True (default), swap *vmin*, *vmax* if *vmin* > *vmax*\n\n Returns *vmin*, *vmax*, expanded and/or swapped if necessary.\n\n If either input is inf or NaN, or if both inputs are 0,\n returns -*expander*, *expander*.\n '''\n if (not np.isfinite(vmin)) or (not np.isfinite(vmax)):\n return -expander, expander\n swapped = False\n if vmax < vmin:\n vmin, vmax = vmax, vmin\n swapped = True\n if vmax - vmin <= max(abs(vmin), abs(vmax)) * tiny:\n if vmax == 0 and vmin == 0:\n vmin = -expander\n vmax = expander\n else:\n vmin -= expander*abs(vmin)\n vmax += expander*abs(vmax)\n\n if swapped and not increasing:\n vmin, vmax = vmax, vmin\n return vmin, vmax\n\n\ndef interval_contains(interval, val):\n a, b = interval\n return (\n ((a < b) and (a <= val and b >= val))\n or (b <= val and a >= val))\n\ndef interval_contains_open(interval, val):\n a, b = interval\n return (\n ((a < b) and (a < val and b > val))\n or (b < val and a > val))\n\ndef offset_copy(trans, fig=None, x=0.0, y=0.0, units='inches'):\n '''\n Return a new transform with an added offset.\n args:\n trans is any transform\n kwargs:\n fig is the current figure; it can be None if units are 'dots'\n x, y give the offset\n units is 'inches', 'points' or 'dots'\n '''\n if units == 'dots':\n return trans + Affine2D().translate(x, y)\n if fig is None:\n raise ValueError('For units of inches or points a fig kwarg is needed')\n if units == 'points':\n x /= 72.0\n y /= 72.0\n elif not units == 'inches':\n raise ValueError('units must be dots, points, or inches')\n return trans + ScaledTranslation(x, y, fig.dpi_scale_trans)\n",
"from __future__ import (absolute_import, division, print_function,\n unicode_literals)\n\nimport six\nimport warnings\n\nimport numpy as np\n\nfrom matplotlib.testing.decorators import image_comparison, knownfailureif\nimport matplotlib.pyplot as plt\nfrom nose.tools import assert_raises\nfrom numpy.testing import assert_array_equal\nfrom matplotlib.offsetbox import AnchoredOffsetbox, DrawingArea\nfrom matplotlib.patches import Rectangle\n\n\ndef example_plot(ax, fontsize=12):\n ax.plot([1, 2])\n ax.locator_params(nbins=3)\n ax.set_xlabel('x-label', fontsize=fontsize)\n ax.set_ylabel('y-label', fontsize=fontsize)\n ax.set_title('Title', fontsize=fontsize)\n\n\n@image_comparison(baseline_images=['tight_layout1'])\ndef test_tight_layout1():\n 'Test tight_layout for a single subplot'\n fig = plt.figure()\n ax = fig.add_subplot(111)\n example_plot(ax, fontsize=24)\n plt.tight_layout()\n\n\n@image_comparison(baseline_images=['tight_layout2'])\ndef test_tight_layout2():\n 'Test tight_layout for mutiple subplots'\n fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(nrows=2, ncols=2)\n example_plot(ax1)\n example_plot(ax2)\n example_plot(ax3)\n example_plot(ax4)\n plt.tight_layout()\n\n\n@image_comparison(baseline_images=['tight_layout3'])\ndef test_tight_layout3():\n 'Test tight_layout for mutiple subplots'\n\n fig = plt.figure()\n\n ax1 = plt.subplot(221)\n ax2 = plt.subplot(223)\n ax3 = plt.subplot(122)\n\n example_plot(ax1)\n example_plot(ax2)\n example_plot(ax3)\n\n plt.tight_layout()\n\n\n@image_comparison(baseline_images=['tight_layout4'],\n freetype_version=('2.4.5', '2.4.9'))\ndef test_tight_layout4():\n 'Test tight_layout for subplot2grid'\n\n fig = plt.figure()\n\n ax1 = plt.subplot2grid((3, 3), (0, 0))\n ax2 = plt.subplot2grid((3, 3), (0, 1), colspan=2)\n ax3 = plt.subplot2grid((3, 3), (1, 0), colspan=2, rowspan=2)\n ax4 = plt.subplot2grid((3, 3), (1, 2), rowspan=2)\n\n example_plot(ax1)\n example_plot(ax2)\n example_plot(ax3)\n example_plot(ax4)\n\n plt.tight_layout()\n\n\n@image_comparison(baseline_images=['tight_layout5'])\ndef test_tight_layout5():\n 'Test tight_layout for image'\n\n fig = plt.figure()\n\n ax = plt.subplot(111)\n arr = np.arange(100).reshape((10, 10))\n ax.imshow(arr, interpolation=\"none\")\n\n plt.tight_layout()\n\n\n@image_comparison(baseline_images=['tight_layout6'])\ndef test_tight_layout6():\n 'Test tight_layout for gridspec'\n\n # This raises warnings since tight layout cannot\n # do this fully automatically. But the test is\n # correct since the layout is manually edited\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\", UserWarning)\n fig = plt.figure()\n\n import matplotlib.gridspec as gridspec\n\n gs1 = gridspec.GridSpec(2, 1)\n ax1 = fig.add_subplot(gs1[0])\n ax2 = fig.add_subplot(gs1[1])\n\n example_plot(ax1)\n example_plot(ax2)\n\n gs1.tight_layout(fig, rect=[0, 0, 0.5, 1])\n\n gs2 = gridspec.GridSpec(3, 1)\n\n for ss in gs2:\n ax = fig.add_subplot(ss)\n example_plot(ax)\n ax.set_title(\"\")\n ax.set_xlabel(\"\")\n\n ax.set_xlabel(\"x-label\", fontsize=12)\n\n gs2.tight_layout(fig, rect=[0.5, 0, 1, 1], h_pad=0.45)\n\n top = min(gs1.top, gs2.top)\n bottom = max(gs1.bottom, gs2.bottom)\n\n gs1.tight_layout(fig, rect=[None, 0 + (bottom-gs1.bottom),\n 0.5, 1 - (gs1.top-top)])\n gs2.tight_layout(fig, rect=[0.5, 0 + (bottom-gs2.bottom),\n None, 1 - (gs2.top-top)],\n h_pad=0.45)\n\n\n@image_comparison(baseline_images=['tight_layout7'])\ndef test_tight_layout7():\n # tight layout with left and right titles\n fig = plt.figure()\n fontsize = 24\n ax = fig.add_subplot(111)\n ax.plot([1, 2])\n ax.locator_params(nbins=3)\n ax.set_xlabel('x-label', fontsize=fontsize)\n ax.set_ylabel('y-label', fontsize=fontsize)\n ax.set_title('Left Title', loc='left', fontsize=fontsize)\n ax.set_title('Right Title', loc='right', fontsize=fontsize)\n plt.tight_layout()\n\n@image_comparison(baseline_images=['tight_layout8'])\ndef test_tight_layout8():\n 'Test automatic use of tight_layout'\n fig = plt.figure()\n fig.set_tight_layout({'pad': .1})\n ax = fig.add_subplot(111)\n example_plot(ax, fontsize=24)\n\n\ndef add_offsetboxes(ax, size=10, margin=.1, color='black'):\n \"\"\"\n Surround ax with OffsetBoxes\n \"\"\"\n da = DrawingArea(size, size)\n background = Rectangle((0, 0), width=size,\n height=size,\n facecolor=color,\n edgecolor='None',\n linewidth=0,\n antialiased=False)\n da.add_artist(background)\n m, mp = margin, 1+margin\n anchor_points = [(-m, -m), (-m, .5), (-m, mp),\n (mp, .5), (.5, mp), (mp, mp),\n (.5, -m), (mp, -m), (.5, -m)]\n for point in anchor_points:\n anchored_box = AnchoredOffsetbox(\n loc=10,\n child=da,\n pad=0.,\n frameon=False,\n bbox_to_anchor=point,\n bbox_transform=ax.transAxes,\n borderpad=0.)\n ax.add_artist(anchored_box)\n return anchored_box\n\n\n@image_comparison(baseline_images=['tight_layout_offsetboxes1',\n 'tight_layout_offsetboxes2'])\ndef test_tight_layout_offsetboxes():\n # 1.\n # - Create 4 subplots\n # - Plot a diagonal line on them\n # - Surround each plot with 7 boxes\n # - Use tight_layout\n # - See that the squares are included in the tight_layout\n # and that the squares in the middle do not overlap\n #\n # 2.\n # - Make the squares around the right side axes invisible\n # - See that the invisible squares do not affect the\n # tight_layout\n rows = cols = 2\n colors = ['red', 'blue', 'green', 'yellow']\n x = y = [0, 1]\n\n def _subplots():\n _, axs = plt.subplots(rows, cols)\n axs = axs.flat\n for ax, color in zip(axs, colors):\n ax.plot(x, y, color=color)\n add_offsetboxes(ax, 20, color=color)\n return axs\n\n # 1.\n axs = _subplots()\n plt.tight_layout()\n\n # 2.\n axs = _subplots()\n for ax in (axs[cols-1::rows]):\n for child in ax.get_children():\n if isinstance(child, AnchoredOffsetbox):\n child.set_visible(False)\n\n plt.tight_layout()\n",
"from __future__ import (absolute_import, division, print_function,\n unicode_literals)\n\nimport nose\n\nfrom matplotlib.testing.decorators import image_comparison\nimport matplotlib.pyplot as plt\nimport matplotlib.patches as mpatches\nimport matplotlib.lines as mlines\nfrom matplotlib.offsetbox import AnchoredOffsetbox, DrawingArea\n\n\n@image_comparison(baseline_images=['offsetbox_clipping'], remove_text=True)\ndef test_offsetbox_clipping():\n # - create a plot\n # - put an AnchoredOffsetbox with a child DrawingArea\n # at the center of the axes\n # - give the DrawingArea a gray background\n # - put a black line across the bounds of the DrawingArea\n # - see that the black line is clipped to the edges of\n # the DrawingArea.\n fig, ax = plt.subplots()\n size = 100\n da = DrawingArea(size, size, clip=True)\n bg = mpatches.Rectangle((0, 0), size, size,\n facecolor='#CCCCCC',\n edgecolor='None',\n linewidth=0)\n line = mlines.Line2D([-size*.5, size*1.5], [size/2, size/2],\n color='black',\n linewidth=10)\n anchored_box = AnchoredOffsetbox(\n loc=10,\n child=da,\n pad=0.,\n frameon=False,\n bbox_to_anchor=(.5, .5),\n bbox_transform=ax.transAxes,\n borderpad=0.)\n\n da.add_artist(bg)\n da.add_artist(line)\n ax.add_artist(anchored_box)\n ax.set_xlim((0, 1))\n ax.set_ylim((0, 1))\n\n\nif __name__ == '__main__':\n nose.runmodule(argv=['-s', '--with-doctest'], exit=False)\n"
] | [
[
"matplotlib.verbose.report"
],
[
"numpy.dot",
"numpy.asarray",
"numpy.arctan2",
"numpy.concatenate",
"numpy.max",
"numpy.any",
"numpy.ma.getmask",
"numpy.ma.is_masked",
"numpy.sin",
"numpy.asanyarray",
"matplotlib._path.update_path_extents",
"numpy.ma.isMaskedArray",
"numpy.min",
"numpy.isnan",
"numpy.linalg.inv",
"numpy.tan",
"numpy.deg2rad",
"numpy.ma.concatenate",
"numpy.identity",
"numpy.errstate",
"numpy.array",
"matplotlib._path.affine_transform",
"numpy.sum",
"numpy.ma.filled",
"numpy.isfinite",
"numpy.cos",
"numpy.sign",
"numpy.vstack"
],
[
"matplotlib.pyplot.tight_layout",
"matplotlib.offsetbox.DrawingArea",
"numpy.arange",
"matplotlib.patches.Rectangle",
"matplotlib.pyplot.subplots",
"matplotlib.testing.decorators.image_comparison",
"matplotlib.pyplot.subplot",
"matplotlib.gridspec.GridSpec",
"matplotlib.offsetbox.AnchoredOffsetbox",
"matplotlib.pyplot.subplot2grid",
"matplotlib.pyplot.figure"
],
[
"matplotlib.offsetbox.DrawingArea",
"matplotlib.patches.Rectangle",
"matplotlib.lines.Line2D",
"matplotlib.pyplot.subplots",
"matplotlib.testing.decorators.image_comparison",
"matplotlib.offsetbox.AnchoredOffsetbox"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
DaulPavid/pyturbo | [
"878e0b1b514c043f1b4ea5cd5268b23c0df5192e"
] | [
"turbo/turbo_encoder.py"
] | [
"#\n# Turbo Encoder\n#\n\nimport numpy as np\n\nfrom .rsc import RSC\n\n\nclass TurboEncoder:\n def __init__(self, interleaver):\n self.interleaver = interleaver\n self.block_size = len(self.interleaver)\n self.encoders = 2 * [RSC()]\n\n def reset(self):\n for e in self.encoders:\n e.reset()\n\n def interleave(self, vector):\n interleaved = np.zeros(self.block_size, dtype=int)\n for i in range(0, self.block_size):\n interleaved[i] = vector[self.interleaver[i]]\n\n return interleaved\n\n def execute(self, vector):\n output_size = 3 * (len(vector) + len(self.encoders[0].registers))\n output = np.zeros(output_size, dtype=int)\n interleaved = self.interleave(vector)\n\n output[1::3], output[::3] = self.encoders[0].execute(vector)\n output[2::3], _ = self.encoders[1].execute(interleaved)\n\n return output\n"
] | [
[
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
wmcnally/evopose2d | [
"ea05b818044d8d84e9cbbee778bc465be59ebd59"
] | [
"inference_speed.py"
] | [
"import os\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'\nimport tensorflow as tf\nfrom dataset.dataloader import load_tfds\nfrom time import time\nimport argparse\nfrom nets.simple_basline import SimpleBaseline\nfrom nets.evopose2d import EvoPose\nfrom nets.hrnet import HRNet\nfrom utils import detect_hardware\n\n\ndef speed_test(strategy, cfg, split='val', n=1000):\n with strategy.scope():\n if cfg.MODEL.TYPE == 'simple_baseline':\n model = SimpleBaseline(cfg)\n elif cfg.MODEL.TYPE == 'hrnet':\n model = HRNet(cfg)\n elif cfg.MODEL.TYPE == 'evopose':\n model = EvoPose(cfg)\n\n cfg.DATASET.OUTPUT_SHAPE = model.output_shape[1:]\n\n ds = load_tfds(cfg, split, det=cfg.VAL.DET,\n predict_kp=True, drop_remainder=cfg.VAL.DROP_REMAINDER)\n ds = strategy.experimental_distribute_dataset(ds)\n\n @tf.function\n def predict(imgs, flip=False):\n if flip:\n imgs = imgs[:, :, ::-1, :]\n return model(imgs, training=False)\n\n for count, batch in enumerate(ds):\n if count == 1: # skip first pass\n ti = time()\n\n _, imgs, _, _, scores = batch\n\n hms = strategy.run(predict, args=(imgs,)).numpy()\n\n if cfg.VAL.FLIP:\n flip_hms = strategy.run(predict, args=(imgs, True,)).numpy()\n flip_hms = flip_hms[:, :, ::-1, :]\n tmp = flip_hms.copy()\n for i in range(len(cfg.DATASET.KP_FLIP)):\n flip_hms[:, :, :, i] = tmp[:, :, :, cfg.DATASET.KP_FLIP[i]]\n # shift to align features\n flip_hms[:, :, 1:, :] = flip_hms[:, :, 0:-1, :].copy()\n hms = (hms + flip_hms) / 2.\n\n if count == n:\n break\n\n print('FPS: {:.5f}'.format((n * cfg.VAL.BATCH_SIZE) / (time() - ti)))\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--cpu', action='store_true')\n parser.add_argument('--gpu', action='store_true')\n parser.add_argument('--tpu', default='')\n parser.add_argument('-c', '--cfg', required=True) # yaml\n parser.add_argument('-bs', '--batch-size', type=int, default=1)\n parser.add_argument('-n', type=int, default=1000)\n args = parser.parse_args()\n\n from dataset.coco import cn as cfg\n cfg.merge_from_file('configs/' + args.cfg)\n cfg.MODEL.NAME = args.cfg.split('.')[0]\n cfg.VAL.BATCH_SIZE = args.batch_size\n\n if args.cpu:\n strategy = tf.distribute.OneDeviceStrategy('/CPU:0')\n elif args.gpu:\n strategy = tf.distribute.OneDeviceStrategy('/GPU:0')\n else:\n tpu, strategy = detect_hardware(args.tpu)\n\n tf.config.optimizer.set_experimental_options({'disable_meta_optimizer': True})\n speed_test(strategy, cfg, split='val', n=args.n)\n\n\n\n"
] | [
[
"tensorflow.distribute.OneDeviceStrategy",
"tensorflow.config.optimizer.set_experimental_options"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ourDirection/ourDirection | [
"b99ed67a8cc0fe5016e03fe3b5ad083b7f8bbdc0"
] | [
"momus/VHRED/split-examples-by-token.py"
] | [
"\"\"\"\nTakes as input a binarized dialogue corpus, splits the examples by a certain token and shuffles it\n\nExample run:\n\n python split-examples-by-token.py Training.dialogues.pkl 2 Training_SplitByDialogues.dialogues --join_last_two_examples\n\n@author Iulian Vlad Serban\n\"\"\"\n\nimport collections\nimport numpy\nimport math\nimport operator\nimport os\nimport sys\nimport logging\nimport cPickle\n\nfrom collections import Counter\n\nlogging.basicConfig(level=logging.INFO)\nlogger = logging.getLogger('text2dict')\n\ndef safe_pickle(obj, filename):\n if os.path.isfile(filename):\n logger.info(\"Overwriting %s.\" % filename)\n else:\n logger.info(\"Saving to %s.\" % filename)\n \n with open(filename, 'wb') as f:\n cPickle.dump(obj, f, protocol=cPickle.HIGHEST_PROTOCOL)\n\n# Thanks to Emile on Stackoverflow:\n# http://stackoverflow.com/questions/4322705/split-a-list-into-nested-lists-on-a-value\n\ndef _itersplit(l, splitters):\n current = []\n for item in l:\n if item in splitters:\n yield current\n current = []\n else:\n current.append(item)\n yield current\n\ndef magicsplit(l, *splitters):\n return [subl for subl in _itersplit(l, splitters) if subl]\n\nimport argparse\nparser = argparse.ArgumentParser()\nparser.add_argument(\"input\", type=str, help=\"Binarized dialogue corpus (pkl file)\")\nparser.add_argument(\"token_id\", type=int, help=\"Token index to split examples by (e.g. to split by end-of-dialogue set this to 2)\")\nparser.add_argument(\"consecutive_examples_to_merge\", type=int, default='1', help=\"After splitting these number of examples will be merged.\")\nparser.add_argument(\"--join_last_two_examples\",\n action=\"store_true\", default=False,\n help=\"If on, will join the last two splits generated from each example. This is useful to handle empty or very short last samples\")\n\n\nparser.add_argument(\"output\", type=str, help=\"Filename of processed binarized dialogue corpus (pkl file)\")\nargs = parser.parse_args()\n\nif not os.path.isfile(args.input):\n raise Exception(\"Input file not found!\")\n\nlogger.info(\"Loading dialogue corpus\")\ndata = cPickle.load(open(args.input, 'r'))\ndata_len = len(data)\n\nlogger.info('Corpus loaded... Data len is %d' % data_len)\n\n# Count number of tokens\ntokens_count = 0\nfor i in range(data_len):\n tokens_count += len(data[i])\nlogger.info('Tokens count %d' % tokens_count)\n\n\nlogger.info(\"Splitting corpus examples by token id... \")\nprocessed_binarized_corpus = []\nfor i in range(data_len):\n logger.info(' Example %d ' % i)\n new_examples = magicsplit(data[i], int(args.token_id))\n\n # If option is specified, we append the last new example to the second last one\n if args.join_last_two_examples and len(new_examples) > 1:\n new_examples[len(new_examples)-2] += new_examples[len(new_examples)-1]\n del new_examples[len(new_examples)-1]\n\n # Simpler version of the two for loops, which does not allow merging together samples\n #for new_example in new_examples:\n # processed_binarized_corpus.append(new_example + [int(args.token_id)])\n\n s = int(math.floor(len(new_examples) / args.consecutive_examples_to_merge))\n for j in range(1, s):\n start_index = j*args.consecutive_examples_to_merge\n merged_example = []\n for k in reversed(range(args.consecutive_examples_to_merge)):\n merged_example += new_examples[start_index-k-1] + [int(args.token_id)]\n processed_binarized_corpus.append(merged_example)\n\n if s > 0:\n merged_example = []\n for k in range((s-1)*args.consecutive_examples_to_merge, len(new_examples)):\n merged_example += new_examples[k] + [int(args.token_id)]\n processed_binarized_corpus.append(merged_example)\n else:\n merged_example = []\n for k in range(len(new_examples)):\n merged_example += new_examples[k] + [int(args.token_id)]\n processed_binarized_corpus.append(merged_example)\n\n\nlogger.info('New data len is %d' % len(processed_binarized_corpus))\n\n# Count number of tokens\nprocessed_tokens_count = 0\nfor i in range(len(processed_binarized_corpus)):\n processed_tokens_count += len(processed_binarized_corpus[i])\nlogger.info('New tokens count %d' % processed_tokens_count)\n\n# When splitting by end-of-utterance token </s>, there are some instances with multiple </s> at the end of each example. Our splitting method will effectively remove these, but it is not of any concern to us.\n# assert(processed_tokens_count == tokens_count)\n\nlogger.info(\"Reshuffling corpus.\")\nrng = numpy.random.RandomState(13248)\nrng.shuffle(processed_binarized_corpus)\n\nlogger.info(\"Saving corpus.\")\nsafe_pickle(processed_binarized_corpus, args.output + \".pkl\")\n\nlogger.info(\"Corpus saved. All done!\")\n"
] | [
[
"numpy.random.RandomState"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
YosefLab/SingleCellLineageTracing | [
"010072b307f7eadbf10dc4af8b2165e48f1736a7",
"010072b307f7eadbf10dc4af8b2165e48f1736a7",
"010072b307f7eadbf10dc4af8b2165e48f1736a7"
] | [
"test/simulator_tests/birth_death_simulator_test.py",
"test/preprocess_tests/align_sequence_test.py",
"test/solver_tests/neighborjoining_solver_test.py"
] | [
"import unittest\n\nimport networkx as nx\nimport numpy as np\n\nfrom typing import List, Tuple\n\n\nfrom cassiopeia.data.CassiopeiaTree import CassiopeiaTree\nfrom cassiopeia.mixins import TreeSimulatorError\nfrom cassiopeia.simulator.BirthDeathFitnessSimulator import (\n BirthDeathFitnessSimulator,\n)\n\nimport cassiopeia.data.utilities as utilities\n\n\ndef extract_tree_statistics(\n tree: CassiopeiaTree,\n) -> Tuple[List[float], int, bool]:\n \"\"\"A helper function for testing simulated trees.\n\n Outputs the total lived time for each extant lineage, the number of extant\n lineages, and whether the tree has the expected node degrees (to ensure\n unifurcations were collapsed).\n\n Args:\n tree: The tree to test\n\n Returns:\n The total time lived for each leaf, the number of leaves, and if the\n degrees only have degree 0 or 2\n \"\"\"\n\n times = []\n out_degrees = []\n for i in tree.nodes:\n if tree.is_leaf(i):\n times.append(tree.get_time(i))\n out_degrees.append(len(tree.children(i)))\n out_degrees.pop(0)\n\n correct_degrees = all(x == 2 or x == 0 for x in out_degrees)\n\n return times, len(times), correct_degrees\n\n\nclass BirthDeathSimulatorTest(unittest.TestCase):\n def test_bad_waiting_distributions(self):\n \"\"\"Ensures errors when invalid distributions are given.\"\"\"\n with self.assertRaises(TreeSimulatorError):\n bd_sim = BirthDeathFitnessSimulator(\n lambda _: -1, 1, experiment_time=1\n )\n tree = bd_sim.simulate_tree()\n\n with self.assertRaises(TreeSimulatorError):\n bd_sim = BirthDeathFitnessSimulator(lambda _: 0, 1, num_extant=4)\n tree = bd_sim.simulate_tree()\n\n with self.assertRaises(TreeSimulatorError):\n bd_sim = BirthDeathFitnessSimulator(\n lambda _: 1, 1, lambda: -1, num_extant=1\n )\n tree = bd_sim.simulate_tree()\n\n with self.assertRaises(TreeSimulatorError):\n bd_sim = BirthDeathFitnessSimulator(\n lambda _: 1, 1, lambda: 0, experiment_time=1\n )\n tree = bd_sim.simulate_tree()\n\n with self.assertRaises(TreeSimulatorError):\n bd_sim = BirthDeathFitnessSimulator(\n lambda _: 1,\n 1,\n lambda: 0,\n mutation_distribution=lambda: -1,\n fitness_distribution=lambda: 1,\n experiment_time=1,\n )\n tree = bd_sim.simulate_tree()\n\n def test_bad_stopping_conditions(self):\n \"\"\"Ensures errors when an invalid stopping conditions are given.\"\"\"\n with self.assertRaises(TreeSimulatorError):\n bd_sim = BirthDeathFitnessSimulator(lambda _: 1, 1, lambda: 2)\n\n with self.assertRaises(TreeSimulatorError):\n bd_sim = BirthDeathFitnessSimulator(\n lambda _: 1, 1, lambda: 2, num_extant=0.5\n )\n\n with self.assertRaises(TreeSimulatorError):\n bd_sim = BirthDeathFitnessSimulator(\n lambda _: 1, 1, lambda: 2, num_extant=-1\n )\n\n with self.assertRaises(TreeSimulatorError):\n bd_sim = BirthDeathFitnessSimulator(\n lambda _: 1, 1, lambda: 2, num_extant=0\n )\n\n with self.assertRaises(TreeSimulatorError):\n bd_sim = BirthDeathFitnessSimulator(\n lambda _: 1, 1, lambda: 2, experiment_time=-1\n )\n\n with self.assertRaises(TreeSimulatorError):\n bd_sim = BirthDeathFitnessSimulator(\n lambda _: 1, 1, lambda: 2, experiment_time=0\n )\n\n def test_dead_at_start(self):\n \"\"\"Ensures errors in base case where all lineages die on first event.\"\"\"\n with self.assertRaises(TreeSimulatorError):\n bd_sim = BirthDeathFitnessSimulator(\n lambda _: 2, 1, lambda: 1, num_extant=4\n )\n tree = bd_sim.simulate_tree()\n\n with self.assertRaises(TreeSimulatorError):\n bd_sim = BirthDeathFitnessSimulator(\n lambda _: 2, 1, lambda: 1, experiment_time=4\n )\n tree = bd_sim.simulate_tree()\n\n def test_dead_before_end(self):\n \"\"\"Ensures errors when all lineages die before stopping condition.\"\"\"\n birth_wd = lambda scale: np.random.exponential(scale)\n death_wd = lambda: np.random.exponential(0.6)\n\n with self.assertRaises(TreeSimulatorError):\n bd_sim = BirthDeathFitnessSimulator(\n birth_wd, 0.5, death_wd, num_extant=8, random_seed=5\n )\n tree = bd_sim.simulate_tree()\n\n with self.assertRaises(TreeSimulatorError):\n bd_sim = BirthDeathFitnessSimulator(\n birth_wd, 0.5, death_wd, experiment_time=2, random_seed=5\n )\n tree = bd_sim.simulate_tree()\n\n def test_single_lineage(self):\n \"\"\"Tests base case that stopping conditions work before divisions.\"\"\"\n bd_sim = BirthDeathFitnessSimulator(lambda _: 1, 1, num_extant=1)\n tree = bd_sim.simulate_tree()\n results = extract_tree_statistics(tree)\n self.assertEqual(results[1], 1)\n self.assertEqual(tree.get_branch_length(\"0\", \"1\"), 1.0)\n self.assertEqual(results[0], [1])\n\n bd_sim = BirthDeathFitnessSimulator(lambda _: 1, 1, experiment_time=1)\n tree = bd_sim.simulate_tree()\n results = extract_tree_statistics(tree)\n self.assertEqual(results[1], 1)\n self.assertEqual(tree.get_branch_length(\"0\", \"1\"), 1.0)\n self.assertEqual(results[0], [1])\n\n def test_constant_yule(self):\n \"\"\"Tests small case without death with constant waiting times.\"\"\"\n bd_sim = BirthDeathFitnessSimulator(lambda _: 1, 1, num_extant=32)\n tree = bd_sim.simulate_tree()\n results = extract_tree_statistics(tree)\n for i in results[0]:\n self.assertEqual(i, 6)\n self.assertEqual(results[1], 32)\n self.assertTrue(results[2])\n\n bd_sim = BirthDeathFitnessSimulator(lambda _: 1, 1, experiment_time=6)\n tree = bd_sim.simulate_tree()\n results = extract_tree_statistics(tree)\n for i in results[0]:\n self.assertEqual(i, 6)\n self.assertEqual(results[1], 32)\n self.assertTrue(results[2])\n\n def test_nonconstant_yule(self):\n \"\"\"Tests case without death with variable waiting times.\"\"\"\n birth_wd = lambda scale: np.random.exponential(scale)\n\n bd_sim = BirthDeathFitnessSimulator(\n birth_wd, 1, num_extant=16, random_seed=54\n )\n tree = bd_sim.simulate_tree()\n results = extract_tree_statistics(tree)\n self.assertTrue(all(np.isclose(x, results[0][0]) for x in results[0]))\n self.assertEqual(results[1], 16)\n self.assertTrue(results[2])\n self.assertEqual(max([int(i) for i in tree.nodes]), 31)\n\n bd_sim = BirthDeathFitnessSimulator(\n birth_wd, 1, experiment_time=2, random_seed=54\n )\n tree = bd_sim.simulate_tree()\n results = extract_tree_statistics(tree)\n for i in results[0]:\n self.assertEqual(i, 2)\n self.assertTrue(results[2])\n\n def test_nonconstant_birth_death(self):\n \"\"\"Tests case with with variable birth and death waiting times.\n Also, tests pruning dead lineages and unifurcation collapsing.\"\"\"\n birth_wd = lambda scale: np.random.exponential(scale)\n death_wd = lambda: np.random.exponential(1.5)\n\n bd_sim = BirthDeathFitnessSimulator(\n birth_wd, 0.5, death_wd, num_extant=8, random_seed=1234\n )\n tree = bd_sim.simulate_tree()\n results = extract_tree_statistics(tree)\n self.assertTrue(all(np.isclose(x, results[0][0]) for x in results[0]))\n self.assertEqual(results[1], 8)\n self.assertTrue(results[2])\n self.assertNotIn(\"9\", tree.nodes)\n self.assertNotIn(\"2\", tree.nodes)\n\n bd_sim = BirthDeathFitnessSimulator(\n birth_wd, 0.5, death_wd, experiment_time=2, random_seed=1234\n )\n tree = bd_sim.simulate_tree()\n results = extract_tree_statistics(tree)\n for i in results[0]:\n self.assertTrue(np.isclose(i, 2))\n self.assertTrue(results[2])\n self.assertNotIn(\"9\", tree.nodes)\n self.assertNotIn(\"2\", tree.nodes)\n\n def test_nonconstant_birth_death_no_unifurcation_collapsing(self):\n \"\"\"Tests case with with variable birth and death waiting times.\n Checks that unifurcations are not collapsed.\"\"\"\n birth_wd = lambda scale: np.random.exponential(scale)\n death_wd = lambda: np.random.exponential(1.5)\n\n bd_sim = BirthDeathFitnessSimulator(\n birth_wd,\n 0.5,\n death_wd,\n num_extant=8,\n collapse_unifurcations=False,\n random_seed=12,\n )\n tree = bd_sim.simulate_tree()\n results = extract_tree_statistics(tree)\n self.assertTrue(all(np.isclose(x, results[0][0]) for x in results[0]))\n self.assertEqual(results[1], 8)\n self.assertFalse(results[2])\n self.assertNotIn(\"3\", tree.nodes)\n self.assertIn(\"2\", tree.nodes)\n self.assertIn(\"6\", tree.nodes)\n\n bd_sim = BirthDeathFitnessSimulator(\n birth_wd,\n 0.5,\n death_wd,\n experiment_time=1.3,\n collapse_unifurcations=False,\n random_seed=12,\n )\n tree = bd_sim.simulate_tree()\n results = extract_tree_statistics(tree)\n for i in results[0]:\n self.assertTrue(np.isclose(i, 1.3))\n self.assertFalse(results[2])\n self.assertNotIn(\"3\", tree.nodes)\n self.assertIn(\"2\", tree.nodes)\n self.assertIn(\"6\", tree.nodes)\n\n def test_nonconstant_birth_death_both_stopping_conditions(self):\n \"\"\"Tests case with with variable birth and death waiting times.\n Checks that using both stopping conditions works fine.\"\"\"\n birth_wd = lambda scale: np.random.exponential(scale)\n death_wd = lambda: np.random.exponential(1.5)\n\n bd_sim = BirthDeathFitnessSimulator(\n birth_wd,\n 0.5,\n death_wd,\n num_extant=8,\n experiment_time=2,\n random_seed=17,\n )\n tree = bd_sim.simulate_tree()\n results = extract_tree_statistics(tree)\n self.assertTrue(all(np.isclose(x, results[0][0]) for x in results[0]))\n self.assertTrue(all(x > 1 for x in results[0]))\n self.assertEqual(results[1], 8)\n self.assertTrue(results[2])\n\n bd_sim = BirthDeathFitnessSimulator(\n birth_wd,\n 0.5,\n death_wd,\n num_extant=8,\n experiment_time=1,\n random_seed=17,\n )\n tree = bd_sim.simulate_tree()\n results = extract_tree_statistics(tree)\n for i in results[0]:\n self.assertTrue(np.isclose(i, 1))\n self.assertEqual(results[1], 3)\n self.assertTrue(results[2])\n\n def test_nonconstant_yule_with_predictable_fitness(self):\n \"\"\"Tests case with birth and death with constant fitness.\"\"\"\n\n def check_fitness_values_as_expected(tree: nx.DiGraph):\n \"\"\"Checks if the fitness value stored at each node is what we\n expect given deterministic fitness evolution\"\"\"\n tree = tree.copy()\n for u, v in tree.edges:\n tree[u][v][\"val\"] = 1\n tree.nodes[\"0\"][\"depth\"] = 0\n for u, v in nx.dfs_edges(tree, source=\"0\"):\n tree.nodes[v][\"depth\"] = (\n tree.nodes[u][\"depth\"] + tree[u][v][\"val\"]\n )\n leaves = [n for n in tree if tree.out_degree(n) == 0]\n for i in tree.nodes:\n if i in leaves:\n self.assertTrue(\n np.isclose(\n tree.nodes[i][\"birth_scale\"],\n 0.5 * 0.98 ** (2 * (tree.nodes[i][\"depth\"] - 1)),\n )\n )\n else:\n self.assertTrue(\n np.isclose(\n tree.nodes[i][\"birth_scale\"],\n 0.5 * 0.98 ** (2 * tree.nodes[i][\"depth\"]),\n )\n )\n\n birth_wd = lambda scale: np.random.exponential(scale)\n\n bd_sim = BirthDeathFitnessSimulator(\n birth_wd,\n 0.5,\n mutation_distribution=lambda: 2,\n fitness_distribution=lambda: 1,\n fitness_base=0.98,\n num_extant=8,\n random_seed=1234,\n )\n tree = bd_sim.simulate_tree()\n results = extract_tree_statistics(tree)\n self.assertTrue(all(np.isclose(x, results[0][0]) for x in results[0]))\n self.assertEqual(results[1], 8)\n self.assertTrue(results[2])\n check_fitness_values_as_expected(tree.get_tree_topology())\n\n bd_sim = BirthDeathFitnessSimulator(\n birth_wd,\n 0.5,\n mutation_distribution=lambda: 2,\n fitness_distribution=lambda: 1,\n fitness_base=0.98,\n experiment_time=0.6,\n random_seed=1234,\n )\n tree = bd_sim.simulate_tree()\n results = extract_tree_statistics(tree)\n for i in results[0]:\n self.assertTrue(np.isclose(i, 0.6))\n self.assertTrue(results[2])\n check_fitness_values_as_expected(tree.get_tree_topology())\n\n def test_nonconstant_birth_death_with_variable_fitness(self):\n \"\"\"Tests a case with variable birth and death waiting times, as well\n as variable fitness evolution. Also tests pruning and collapsing.\"\"\"\n\n birth_wd = lambda scale: np.random.exponential(scale)\n death_wd = lambda: np.random.exponential(0.6)\n mut_dist = lambda: 1 if np.random.uniform() < 0.2 else 0\n fit_dist = lambda: np.random.uniform(-1, 1)\n\n bd_sim = BirthDeathFitnessSimulator(\n birth_wd,\n 0.5,\n death_wd,\n mut_dist,\n fit_dist,\n 1.5,\n num_extant=8,\n random_seed=12364,\n )\n tree = bd_sim.simulate_tree()\n results = extract_tree_statistics(tree)\n self.assertTrue(all(np.isclose(x, results[0][0]) for x in results[0]))\n self.assertEqual(results[1], 8)\n self.assertTrue(results[2])\n self.assertNotIn(2, tree.nodes)\n self.assertNotIn(3, tree.nodes)\n\n bd_sim = BirthDeathFitnessSimulator(\n birth_wd,\n 0.5,\n death_wd,\n mut_dist,\n fit_dist,\n 1.5,\n experiment_time=3,\n random_seed=12364,\n )\n tree = bd_sim.simulate_tree()\n results = extract_tree_statistics(tree)\n for i in results[0]:\n self.assertTrue(np.isclose(i, 3))\n self.assertTrue(results[2])\n self.assertNotIn(2, tree.nodes)\n self.assertNotIn(3, tree.nodes)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n",
"\"\"\"\nTests for the sequence alignment in pipeline.py.\n\"\"\"\nimport unittest\n\nimport numpy as np\nimport pandas as pd\n\nimport cassiopeia\n\n\nclass TestAlignSequence(unittest.TestCase):\n def setUp(self):\n\n self.queries = pd.DataFrame.from_dict(\n {\n \"cellBC\": [\"A\", \"A\", \"A\", \"B\", \"B\", \"C\", \"C\", \"C\"],\n \"UMI\": [\"1\", \"2\", \"3\", \"1\", \"2\", \"1\", \"2\", \"3\"],\n \"readCount\": [20, 30, 30, 40, 40, 10, 10, 15],\n \"seq\": [\n \"AACCTTGG\",\n \"ACTG\",\n \"AACCTTGGACTGCATCG\",\n \"AATTAA\",\n \"ACTGGACT\",\n \"AACCTTGGGG\",\n \"AAAAAAAAAAA\",\n \"TACTCTATA\",\n ],\n }\n )\n self.queries[\"readName\"] = self.queries.apply(\n lambda x: \"_\".join([x.cellBC, x.UMI, str(x.readCount)]), axis=1\n )\n\n self.reference = \"AACCTTGG\"\n\n def test_alignment_dataframe_structure(self):\n\n aln_df = cassiopeia.pp.align_sequences(\n self.queries,\n ref=self.reference,\n gap_open_penalty=20,\n gap_extend_penalty=1,\n n_threads=2,\n )\n\n self.assertEqual(aln_df.shape[0], self.queries.shape[0])\n\n for cellBC in self.queries[\"cellBC\"].unique():\n self.assertIn(cellBC, aln_df[\"cellBC\"].unique())\n\n expected_columns = [\n \"cellBC\",\n \"UMI\",\n \"AlignmentScore\",\n \"CIGAR\",\n \"QueryBegin\",\n \"ReferenceBegin\",\n \"Seq\",\n \"readName\",\n \"readCount\",\n ]\n\n for column in expected_columns:\n self.assertIn(column, aln_df.columns)\n\n def test_extremely_large_gap_open_penalty(self):\n\n aln_df = cassiopeia.pp.align_sequences(\n self.queries,\n ref=self.reference,\n gap_open_penalty=255,\n gap_extend_penalty=1,\n )\n\n # since the gap open penalty is so large, enforce that\n # no gaps should occur\n for ind, row in aln_df.iterrows():\n\n self.assertNotIn(\"D\", row.CIGAR)\n self.assertNotIn(\"I\", row.CIGAR)\n\n def test_default_alignment_works(self):\n\n aln_df = cassiopeia.pp.align_sequences(\n self.queries,\n ref=self.reference,\n gap_open_penalty=2,\n gap_extend_penalty=1,\n )\n\n expected_alignments = {\n \"A_1_20\": (\"8M\", 40),\n \"A_2_30\": (\"2M2D2M\", 17),\n \"A_3_30\": (\"8M\", 40),\n \"B_1_40\": (\"2M2D2M\", 17),\n \"B_2_40\": (\"2M2D3M\", 22),\n \"C_1_10\": (\"8M\", 40),\n \"C_2_10\": (\"2M\", 10),\n \"C_3_15\": (\"2M1I2M1I1M\", 21),\n }\n\n for read_name in aln_df[\"readName\"].unique():\n\n expected_cigar = expected_alignments[read_name][0]\n expected_score = expected_alignments[read_name][1]\n\n self.assertEqual(\n aln_df.loc[aln_df[\"readName\"] == read_name, \"CIGAR\"].iloc[0],\n expected_cigar,\n )\n self.assertEqual(\n aln_df.loc[aln_df[\"readName\"] == read_name, \"AlignmentScore\"].iloc[0],\n expected_score,\n )\n\n def test_global_alignment(self):\n\n aln_df = cassiopeia.pp.align_sequences(\n self.queries,\n ref=self.reference,\n gap_open_penalty=2,\n gap_extend_penalty=1,\n method=\"global\",\n )\n\n expected_alignments = {\n \"A_1_20\": (\"8M\", 40),\n \"A_2_30\": (\"1M2D2M1D1M1D\", 15),\n \"A_3_30\": (\"8M9I\", 40),\n \"B_1_40\": (\"2M2D2M2D2I\", 14),\n \"B_2_40\": (\"1M2D2M1D2M3I\", 20),\n \"C_1_10\": (\"8M2I\", 40),\n \"C_2_10\": (\"2M6D9I\", 3),\n \"C_3_15\": (\"1I1M1D1M1I2M1I1M1I2D\", 15),\n }\n\n for read_name in aln_df[\"readName\"].unique():\n\n expected_cigar = expected_alignments[read_name][0]\n expected_score = expected_alignments[read_name][1]\n\n self.assertEqual(\n aln_df.loc[aln_df[\"readName\"] == read_name, \"CIGAR\"].iloc[0],\n expected_cigar,\n )\n self.assertEqual(\n aln_df.loc[aln_df[\"readName\"] == read_name, \"AlignmentScore\"].iloc[0],\n expected_score,\n )\n\n\nif __name__ == \"__main__\":\n unittest.main()\n",
"\"\"\"\nTest NeighborJoiningSolver in Cassiopeia.solver.\n\"\"\"\nimport unittest\nfrom typing import Dict, Optional\nfrom unittest import mock\n\nimport itertools\nimport networkx as nx\nimport numpy as np\nimport pandas as pd\n\nimport cassiopeia as cas\n\n\ndef find_triplet_structure(triplet, T):\n a, b, c = triplet[0], triplet[1], triplet[2]\n a_ancestors = [node for node in nx.ancestors(T, a)]\n b_ancestors = [node for node in nx.ancestors(T, b)]\n c_ancestors = [node for node in nx.ancestors(T, c)]\n ab_common = len(set(a_ancestors) & set(b_ancestors))\n ac_common = len(set(a_ancestors) & set(c_ancestors))\n bc_common = len(set(b_ancestors) & set(c_ancestors))\n structure = \"-\"\n if ab_common > bc_common and ab_common > ac_common:\n structure = \"ab\"\n elif ac_common > bc_common and ac_common > ab_common:\n structure = \"ac\"\n elif bc_common > ab_common and bc_common > ac_common:\n structure = \"bc\"\n return structure\n\n\n# specify dissimilarity function for solvers to use\ndef delta_fn(\n x: np.array,\n y: np.array,\n missing_state: int,\n priors: Optional[Dict[int, Dict[int, float]]],\n):\n d = 0\n for i in range(len(x)):\n if x[i] != y[i]:\n d += 1\n return d\n\n\nclass TestNeighborJoiningSolver(unittest.TestCase):\n def setUp(self):\n\n # --------------------- General NJ ---------------------\n cm = pd.DataFrame.from_dict(\n {\n \"a\": [0, 1, 2],\n \"b\": [1, 1, 2],\n \"c\": [2, 2, 2],\n \"d\": [1, 1, 1],\n \"e\": [0, 0, 0],\n },\n orient=\"index\",\n columns=[\"x1\", \"x2\", \"x3\"],\n )\n\n delta = pd.DataFrame.from_dict(\n {\n \"a\": [0, 15, 21, 17, 12],\n \"b\": [15, 0, 10, 6, 17],\n \"c\": [21, 10, 0, 10, 23],\n \"d\": [17, 6, 10, 0, 19],\n \"e\": [12, 17, 23, 19, 0],\n },\n orient=\"index\",\n columns=[\"a\", \"b\", \"c\", \"d\", \"e\"],\n )\n\n self.cm = cm\n self.basic_dissimilarity_map = delta\n self.basic_tree = cas.data.CassiopeiaTree(\n character_matrix=cm, dissimilarity_map=delta, root_sample_name=\"b\"\n )\n\n self.nj_solver = cas.solver.NeighborJoiningSolver(add_root=True)\n\n # ---------------- Lineage Tracing NJ ----------------\n\n pp_cm = pd.DataFrame.from_dict(\n {\n \"a\": [1, 1, 0],\n \"b\": [1, 2, 0],\n \"c\": [1, 2, 1],\n \"d\": [2, 0, 0],\n \"e\": [2, 0, 2],\n },\n orient=\"index\",\n columns=[\"x1\", \"x2\", \"x3\"],\n )\n\n self.pp_tree = cas.data.CassiopeiaTree(character_matrix=pp_cm)\n\n self.nj_solver_delta = cas.solver.NeighborJoiningSolver(\n dissimilarity_function=delta_fn, add_root=True\n )\n\n # ------------- CM with Duplictes -----------------------\n duplicates_cm = pd.DataFrame.from_dict(\n {\n \"a\": [1, 1, 0],\n \"b\": [1, 2, 0],\n \"c\": [1, 2, 1],\n \"d\": [2, 0, 0],\n \"e\": [2, 0, 2],\n \"f\": [2, 0, 2],\n },\n orient=\"index\",\n columns=[\"x1\", \"x2\", \"x3\"],\n )\n\n self.duplicate_tree = cas.data.CassiopeiaTree(\n character_matrix=duplicates_cm\n )\n\n # ------------- NJ with modified hamming dissimilarity ------------\n priors = {0: {1: 0.5, 2: 0.5}, 1: {1: 0.2, 2: 0.8}, 2: {1: 0.3, 2: 0.7}}\n self.pp_tree_priors = cas.data.CassiopeiaTree(\n character_matrix=pp_cm, priors=priors\n )\n self.nj_solver_modified = cas.solver.NeighborJoiningSolver(\n dissimilarity_function=cas.solver.dissimilarity.weighted_hamming_distance,\n add_root=True,\n )\n\n def test_constructor(self):\n self.assertIsNotNone(self.nj_solver_delta.dissimilarity_function)\n self.assertIsNotNone(self.basic_tree.get_dissimilarity_map())\n\n nothing_solver = cas.solver.NeighborJoiningSolver(\n dissimilarity_function=None, add_root=False\n )\n\n no_root_tree = cas.data.CassiopeiaTree(\n character_matrix=self.cm,\n dissimilarity_map=self.basic_dissimilarity_map,\n )\n\n with self.assertRaises(cas.solver.DistanceSolver.DistanceSolverError):\n nothing_solver.solve(no_root_tree)\n\n no_root_solver = cas.solver.NeighborJoiningSolver(\n dissimilarity_function=None, add_root=True\n )\n\n with self.assertRaises(cas.solver.DistanceSolver.DistanceSolverError):\n no_root_solver.solve(no_root_tree)\n\n root_only_tree = cas.data.CassiopeiaTree(\n character_matrix=self.cm, root_sample_name=\"b\"\n )\n\n with self.assertRaises(cas.solver.DistanceSolver.DistanceSolverError):\n nothing_solver.solve(root_only_tree)\n\n nj_solver_fn = cas.solver.NeighborJoiningSolver(\n add_root=True, dissimilarity_function=delta_fn\n )\n nj_solver_fn.solve(self.basic_tree)\n\n self.assertEqual(\n self.basic_tree.get_dissimilarity_map().loc[\"a\", \"b\"], 15\n )\n\n def test_compute_q(self):\n q_vals = self.nj_solver.compute_q(self.basic_dissimilarity_map.values)\n\n expected_q = pd.DataFrame.from_dict(\n {\n \"state0\": [0, -22.67, -22, -22, -33.33],\n \"state1\": [-22.67, 0, -27.33, -27.33, -22.67],\n \"state2\": [-22, -27.33, 0, -28.67, -22],\n \"state3\": [-22, -27.33, -28.67, 0, -22],\n \"state4\": [-33.33, -22.67, -22, -22, 0],\n },\n orient=\"index\",\n columns=[\"state0\", \"state2\", \"state3\", \"state4\", \"state5\"],\n )\n\n self.assertTrue(np.allclose(q_vals, expected_q, atol=0.1))\n\n def test_find_cherry(self):\n\n cherry = self.nj_solver.find_cherry(self.basic_dissimilarity_map.values)\n delta = self.basic_dissimilarity_map\n node_i, node_j = (delta.index[cherry[0]], delta.index[cherry[1]])\n\n self.assertIn((node_i, node_j), [(\"a\", \"e\"), (\"e\", \"a\")])\n\n def test_update_dissimilarity_map(self):\n\n delta = self.basic_dissimilarity_map\n\n cherry = self.nj_solver.find_cherry(delta.values)\n node_i, node_j = (delta.index[cherry[0]], delta.index[cherry[1]])\n\n delta = self.nj_solver.update_dissimilarity_map(\n delta, (node_i, node_j), \"f\"\n )\n\n expected_delta = pd.DataFrame.from_dict(\n {\n \"f\": [0, 10, 16, 12],\n \"b\": [10, 0, 10, 6],\n \"c\": [16, 10, 0, 10],\n \"d\": [12, 6, 10, 0],\n },\n orient=\"index\",\n columns=[\"f\", \"b\", \"c\", \"d\"],\n )\n\n for sample in expected_delta.index:\n for sample2 in expected_delta.index:\n self.assertEqual(\n delta.loc[sample, sample2],\n expected_delta.loc[sample, sample2],\n )\n\n def test_basic_solver(self):\n\n self.nj_solver.solve(self.basic_tree)\n\n # test leaves exist in tree\n _leaves = self.basic_tree.leaves\n\n self.assertEqual(\n len(_leaves), self.basic_dissimilarity_map.shape[0] - 1\n )\n for _leaf in _leaves:\n self.assertIn(_leaf, self.basic_dissimilarity_map.index.values)\n\n # test for expected number of edges\n edges = list(self.basic_tree.edges)\n self.assertEqual(len(edges), 6)\n\n # test relationships between samples\n expected_tree = nx.DiGraph()\n expected_tree.add_edges_from(\n [\n (\"5\", \"a\"),\n (\"5\", \"e\"),\n (\"6\", \"5\"),\n (\"b\", \"6\"),\n (\"6\", \"7\"),\n (\"7\", \"d\"),\n (\"7\", \"c\"),\n ]\n )\n\n observed_tree = self.basic_tree.get_tree_topology()\n triplets = itertools.combinations([\"a\", \"c\", \"d\", \"e\"], 3)\n for triplet in triplets:\n expected_triplet = find_triplet_structure(triplet, expected_tree)\n observed_triplet = find_triplet_structure(triplet, observed_tree)\n self.assertEqual(expected_triplet, observed_triplet)\n\n self.nj_solver.solve(self.basic_tree, collapse_mutationless_edges=True)\n expected_tree = nx.DiGraph()\n expected_tree.add_nodes_from([\"a\", \"b\", \"c\", \"d\", \"e\", \"5\", \"6\", \"7\"])\n expected_tree.add_edges_from(\n [(\"6\", \"a\"), (\"6\", \"e\"), (\"b\", \"6\"), (\"6\", \"d\"), (\"6\", \"c\")]\n )\n observed_tree = self.basic_tree.get_tree_topology()\n triplets = itertools.combinations([\"a\", \"c\", \"d\", \"e\"], 3)\n for triplet in triplets:\n expected_triplet = find_triplet_structure(triplet, expected_tree)\n observed_triplet = find_triplet_structure(triplet, observed_tree)\n self.assertEqual(expected_triplet, observed_triplet)\n\n # compare tree distances\n observed_tree = observed_tree.to_undirected()\n expected_tree = expected_tree.to_undirected()\n for i in range(len(_leaves)):\n sample1 = _leaves[i]\n for j in range(i + 1, len(_leaves)):\n sample2 = _leaves[j]\n self.assertEqual(\n nx.shortest_path_length(observed_tree, sample1, sample2),\n nx.shortest_path_length(expected_tree, sample1, sample2),\n )\n\n def test_nj_solver_weights(self):\n self.nj_solver_modified.solve(self.pp_tree_priors)\n observed_tree = self.pp_tree_priors.get_tree_topology()\n\n expected_tree = nx.DiGraph()\n expected_tree.add_edges_from(\n [\n (\"root\", \"7\"),\n (\"7\", \"6\"),\n (\"6\", \"d\"),\n (\"6\", \"e\"),\n (\"7\", \"8\"),\n (\"8\", \"a\"),\n (\"8\", \"9\"),\n (\"9\", \"b\"),\n (\"9\", \"c\"),\n ]\n )\n\n triplets = itertools.combinations([\"a\", \"b\", \"c\", \"d\", \"e\"], 3)\n for triplet in triplets:\n expected_triplet = find_triplet_structure(triplet, expected_tree)\n observed_triplet = find_triplet_structure(triplet, observed_tree)\n self.assertEqual(expected_triplet, observed_triplet)\n\n self.nj_solver_modified.solve(\n self.pp_tree_priors, collapse_mutationless_edges=True\n )\n observed_tree = self.pp_tree_priors.get_tree_topology()\n for triplet in triplets:\n expected_triplet = find_triplet_structure(triplet, expected_tree)\n observed_triplet = find_triplet_structure(triplet, observed_tree)\n self.assertEqual(expected_triplet, observed_triplet)\n\n def test_pp_solver(self):\n\n self.nj_solver_delta.solve(self.pp_tree)\n observed_tree = self.pp_tree.get_tree_topology()\n\n expected_tree = nx.DiGraph()\n expected_tree.add_edges_from(\n [\n (\"root\", \"9\"),\n (\"9\", \"8\"),\n (\"9\", \"7\"),\n (\"7\", \"6\"),\n (\"7\", \"a\"),\n (\"6\", \"b\"),\n (\"6\", \"c\"),\n (\"8\", \"e\"),\n (\"8\", \"d\"),\n ]\n )\n\n triplets = itertools.combinations([\"a\", \"b\", \"c\", \"d\", \"e\"], 3)\n for triplet in triplets:\n expected_triplet = find_triplet_structure(triplet, expected_tree)\n observed_triplet = find_triplet_structure(triplet, observed_tree)\n self.assertEqual(expected_triplet, observed_triplet)\n\n self.nj_solver_delta.solve(\n self.pp_tree, collapse_mutationless_edges=True\n )\n observed_tree = self.pp_tree.get_tree_topology()\n for triplet in triplets:\n expected_triplet = find_triplet_structure(triplet, expected_tree)\n observed_triplet = find_triplet_structure(triplet, observed_tree)\n self.assertEqual(expected_triplet, observed_triplet)\n\n def test_duplicate_sample_neighbor_joining(self):\n\n self.nj_solver_delta.solve(self.duplicate_tree)\n observed_tree = self.duplicate_tree.get_tree_topology()\n\n expected_tree = nx.DiGraph()\n expected_tree.add_edges_from(\n [\n (\"root\", \"9\"),\n (\"9\", \"8\"),\n (\"9\", \"7\"),\n (\"7\", \"6\"),\n (\"7\", \"a\"),\n (\"6\", \"b\"),\n (\"6\", \"c\"),\n (\"8\", \"10\"),\n (\"10\", \"e\"),\n (\"10\", \"f\"),\n (\"8\", \"d\"),\n ]\n )\n\n triplets = itertools.combinations([\"a\", \"b\", \"c\", \"d\", \"e\", \"f\"], 3)\n for triplet in triplets:\n expected_triplet = find_triplet_structure(triplet, expected_tree)\n observed_triplet = find_triplet_structure(triplet, observed_tree)\n self.assertEqual(expected_triplet, observed_triplet)\n\n def test_setup_root_finder_missing_dissimilarity_map(self):\n tree = cas.data.CassiopeiaTree(character_matrix=self.cm)\n with mock.patch.object(\n tree, \"compute_dissimilarity_map\"\n ) as compute_dissimilarity_map:\n self.nj_solver_delta.setup_root_finder(tree)\n compute_dissimilarity_map.assert_called_once_with(\n delta_fn, \"negative_log\"\n )\n self.assertEqual(tree.root_sample_name, \"root\")\n\n def test_setup_root_finder_existing_dissimilarity_map(self):\n tree = cas.data.CassiopeiaTree(\n character_matrix=self.cm,\n dissimilarity_map=self.basic_dissimilarity_map,\n )\n with mock.patch.object(\n tree, \"compute_dissimilarity_map\"\n ) as compute_dissimilarity_map:\n self.nj_solver_delta.setup_root_finder(tree)\n compute_dissimilarity_map.assert_not_called()\n self.assertEqual(tree.root_sample_name, \"root\")\n dissimilarity_map = tree.get_dissimilarity_map()\n self.assertEqual(\n {\"a\", \"b\", \"c\", \"d\", \"e\", \"root\"}, set(dissimilarity_map.index)\n )\n self.assertEqual(\n {\"a\", \"b\", \"c\", \"d\", \"e\", \"root\"}, set(dissimilarity_map.columns)\n )\n for leaf in self.cm.index:\n delta = delta_fn(\n [0] * tree.n_character,\n self.cm.loc[leaf].values,\n tree.missing_state_indicator,\n None,\n )\n self.assertEqual(dissimilarity_map.loc[leaf, \"root\"], delta)\n self.assertEqual(dissimilarity_map.loc[\"root\", leaf], delta)\n self.assertEqual(dissimilarity_map.loc[\"root\", \"root\"], 0)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n"
] | [
[
"numpy.random.exponential",
"numpy.random.uniform",
"numpy.isclose"
],
[
"pandas.DataFrame.from_dict"
],
[
"numpy.allclose",
"pandas.DataFrame.from_dict"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
ssccutyy/KWS-Transformer | [
"7ae6d2e8fce1a293d88eedc0dbfacae726151a08"
] | [
"kws_streaming/train/train.py"
] | [
"# coding=utf-8\n# Copyright (c) 2021, Arm Limited and Contributors.\n# SPDX-License-Identifier: Apache-2.0\n# Copyright 2021 The Google Research Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# \n# http://www.apache.org/licenses/LICENSE-2.0\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Train utility functions, based on tensorflow/examples/speech_commands.\n\n It consists of several steps:\n 1. Creates model.\n 2. Reads data\n 3. Trains model\n 4. Select the best model and evaluates it\n\"\"\"\n\nimport json\nfrom types import SimpleNamespace\nimport os.path\nimport pprint\nfrom absl import logging\nimport numpy as np\nimport tensorflow.compat.v1 as tf\nimport tensorflow_addons as tfa\nimport kws_streaming.data.input_data as input_data\nfrom kws_streaming.models import models\nfrom kws_streaming.models import utils\n\nimport math\n\nfrom transformers import AdamWeightDecay\n\n\nfrom kws_streaming.models import model_flags\n\n\ndef train(flags):\n \"\"\"Model training.\"\"\"\n\n flags.training = True\n\n # Set the verbosity based on flags (default is INFO, so we see all messages)\n logging.set_verbosity(flags.verbosity)\n\n # Start a new TensorFlow session.\n tf.reset_default_graph()\n config = tf.ConfigProto()\n config.gpu_options.allow_growth = True\n sess = tf.Session(config=config)\n tf.keras.backend.set_session(sess)\n\n audio_processor = input_data.AudioProcessor(flags)\n\n time_shift_samples = int((flags.time_shift_ms * flags.sample_rate) / 1000)\n\n # Figure out the learning rates for each training phase. Since it's often\n # effective to have high learning rates at the start of training, followed by\n # lower levels towards the end, the number of steps and learning rates can be\n # specified as comma-separated lists to define the rate at each stage. For\n # example --how_many_training_steps=10000,3000 --learning_rate=0.001,0.0001\n # will run 13,000 training loops in total, with a rate of 0.001 for the first\n # 10,000, and 0.0001 for the final 3,000.\n training_steps_list = list(map(int, flags.how_many_training_steps.split(',')))\n learning_rates_list = list(map(float, flags.learning_rate.split(',')))\n if len(training_steps_list) != len(learning_rates_list):\n raise Exception(\n '--how_many_training_steps and --learning_rate must be equal length '\n 'lists, but are %d and %d long instead' % (len(training_steps_list),\n len(learning_rates_list)))\n logging.info(flags)\n\n model = models.MODELS[flags.model_name](flags)\n if flags.distill_teacher_json:\n with open(flags.distill_teacher_json, 'r') as f:\n teacher_flags = json.load(f, object_hook=lambda d: SimpleNamespace(\n **{ k: v for k, v in flags.__dict__.items() if not k in d },\n **d))\n teacher_base = models.MODELS[teacher_flags.model_name](teacher_flags)\n hard_labels = tf.keras.layers.Lambda(lambda logits: tf.one_hot(tf.math.argmax(logits, axis=-1), depth=flags.label_count))\n teacher = tf.keras.models.Sequential([teacher_base, hard_labels])\n teacher_base.trainable = False\n teacher.trainable = False\n else:\n teacher = None\n teacher_flags = None\n\n base_model = model\n\n logging.info(model.summary())\n\n # save model summary\n utils.save_model_summary(model, flags.train_dir)\n\n # save model and data flags\n with open(os.path.join(flags.train_dir, 'flags.txt'), 'wt') as f:\n pprint.pprint(flags, stream=f)\n\n loss = tf.keras.losses.CategoricalCrossentropy(from_logits=True, label_smoothing=flags.label_smoothing)\n metrics = ['accuracy']\n\n if flags.optimizer == 'adam':\n optimizer = tf.keras.optimizers.Adam(epsilon=flags.optimizer_epsilon)\n elif flags.optimizer == 'momentum':\n optimizer = tf.keras.optimizers.SGD(momentum=0.9)\n elif flags.optimizer == 'novograd':\n optimizer = tfa.optimizers.NovoGrad(\n lr=0.05,\n beta_1=flags.novograd_beta_1,\n beta_2=flags.novograd_beta_2,\n weight_decay=flags.novograd_weight_decay,\n grad_averaging=bool(flags.novograd_grad_averaging))\n elif flags.optimizer == 'adamw':\n # Exclude some layers for weight decay\n exclude = [\"pos_emb\", \"class_emb\", \"layer_normalization\", \"bias\"]\n optimizer = AdamWeightDecay(learning_rate=0.05, weight_decay_rate=flags.l2_weight_decay, exclude_from_weight_decay=exclude)\n else:\n raise ValueError('Unsupported optimizer:%s' % flags.optimizer)\n\n loss_weights = [ 0.5, 0.5, 0.0 ] if teacher else [ 1. ] # equally weight losses form label and teacher, ignore ensemble output\n model.compile(optimizer=optimizer, loss=loss, loss_weights=loss_weights, metrics=metrics)\n\n train_writer = tf.summary.FileWriter(flags.summaries_dir + '/train',\n sess.graph)\n validation_writer = tf.summary.FileWriter(flags.summaries_dir + '/validation')\n\n sess.run(tf.global_variables_initializer())\n\n if flags.start_checkpoint:\n model.load_weights(flags.start_checkpoint).expect_partial()\n logging.info('Weights loaded from %s', flags.start_checkpoint)\n\n if teacher_flags and teacher_flags.start_checkpoint:\n # Load weights into teacher base as this is the actual model that was saved, teacher includes hard label head\n teacher_base.load_weights(teacher_flags.start_checkpoint).assert_existing_objects_matched()\n logging.info('Distillation teacher weights loaded from %s', teacher_flags.start_checkpoint)\n\n start_step = 0\n\n logging.info('Training from step: %d ', start_step)\n\n # Save graph.pbtxt.\n tf.train.write_graph(sess.graph_def, flags.train_dir, 'graph.pbtxt')\n\n # Save list of words.\n with tf.io.gfile.GFile(os.path.join(flags.train_dir, 'labels.txt'), 'w') as f:\n f.write('\\n'.join(audio_processor.words_list))\n\n best_accuracy = 0.0\n\n # prepare parameters for exp learning rate decay\n training_steps_max = np.sum(training_steps_list)\n lr_init = learning_rates_list[0]\n exp_rate = -np.log(learning_rates_list[-1] / lr_init)/training_steps_max\n mode = 'training'\n\n if flags.lr_schedule == 'cosine':\n # Currently, no restarts are performed, so it is just a cosine decay over the entire\n # training process. I think this is how DeiT does it.\n lr_init = lr_init * flags.batch_size / 512\n num_train = audio_processor.set_size(mode)\n warmup_steps = int((num_train / flags.batch_size) * flags.warmup_epochs)\n first_decay_steps=training_steps_max\n\n # Training loop.\n for training_step in range(start_step, training_steps_max + 1):\n if training_step > 0:\n offset = (training_step -\n 1) * flags.batch_size if flags.pick_deterministically else 0\n\n # Pull the audio samples we'll use for training.\n train_fingerprints, train_ground_truth = audio_processor.get_data(\n flags.batch_size, offset, flags, flags.background_frequency,\n flags.background_volume, time_shift_samples, mode,\n flags.resample, flags.volume_resample, sess)\n\n if flags.lr_schedule == 'exp':\n learning_rate_value = lr_init * np.exp(-exp_rate * training_step)\n elif flags.lr_schedule == 'linear':\n # Figure out what the current learning rate is.\n training_steps_sum = 0\n for i in range(len(training_steps_list)):\n training_steps_sum += training_steps_list[i]\n if training_step <= training_steps_sum:\n learning_rate_value = learning_rates_list[i]\n break\n elif flags.lr_schedule == 'cosine':\n learning_rate_value = lr_init * min(1, float(training_step) / max(1, warmup_steps)) * (math.cos(math.pi * training_step / training_steps_max) + 1) / 2.\n else:\n raise ValueError('Wrong lr_schedule: %s' % flags.lr_schedule)\n\n tf.keras.backend.set_value(model.optimizer.learning_rate, learning_rate_value)\n\n one_hot_labels = tf.keras.utils.to_categorical(train_ground_truth, num_classes=flags.label_count)\n\n if teacher:\n teacher_labels = teacher.predict_on_batch(train_fingerprints)\n one_hot_labels = [ one_hot_labels, teacher_labels, one_hot_labels ] # third is for the ensemble output, gradient is unused\n\n result = model.train_on_batch(train_fingerprints, one_hot_labels)\n\n if teacher:\n loss_total, loss_label, loss_teacher, loss_average, acc_label, acc_teacher, acc_ensemble = result\n differences = (teacher_labels != one_hot_labels).astype(dtype=int).sum()\n logging.info(\n 'Step #%d: rate %f, accuracy %.2f%%, cross entropy %f, teacher acc %.2f%% (%d diff), teacher cross entropy %f, ensemble acc %.2f%%',\n *(training_step, learning_rate_value, acc_label * 100, loss_total, acc_teacher * 100, differences, loss_teacher, acc_ensemble * 100))\n summary = tf.Summary(value=[\n tf.Summary.Value(tag='accuracy', simple_value=acc_label),\n tf.Summary.Value(tag='teacher_accuracy', simple_value=acc_teacher),\n tf.Summary.Value(tag='ensemble_accuracy', simple_value=acc_ensemble),\n ])\n else:\n loss_label, acc_label = result\n logging.info(\n 'Step #%d: rate %f, accuracy %.2f%%, cross entropy %f',\n *(training_step, learning_rate_value, acc_label * 100, loss_label))\n summary = tf.Summary(value=[\n tf.Summary.Value(tag='accuracy', simple_value=acc_label),\n ])\n\n train_writer.add_summary(summary, training_step)\n\n is_last_step = (training_step == training_steps_max)\n if (training_step % flags.eval_step_interval) == 0 or is_last_step:\n set_size = audio_processor.set_size('validation')\n set_size = int(set_size / flags.batch_size) * flags.batch_size\n total_accuracy = 0.0\n count = 0.0\n for i in range(0, set_size, flags.batch_size):\n validation_fingerprints, validation_ground_truth = audio_processor.get_data(\n flags.batch_size, i, flags, 0.0,\n 0.0, 0, 'validation',\n 0.0, 0.0, sess)\n\n one_hot_labels = tf.keras.utils.to_categorical(validation_ground_truth, num_classes=flags.label_count)\n if teacher:\n one_hot_labels = [ one_hot_labels, one_hot_labels, one_hot_labels ]\n # Run a validation step and capture training summaries for TensorBoard\n # with the `merged` op.\n result = model.test_on_batch(validation_fingerprints,\n one_hot_labels)\n\n if teacher:\n loss_total, loss_label, loss_teacher, loss_average, acc_label, acc_teacher, acc_ensemble = result\n summary = tf.Summary(value=[\n tf.Summary.Value(tag='accuracy', simple_value=acc_ensemble),\n tf.Summary.Value(tag='label_head_accuracy', simple_value=acc_label),\n tf.Summary.Value(tag='distill_head_accuracy', simple_value=acc_teacher),\n ])\n accuracy = acc_ensemble\n else:\n loss_label, acc_label = result\n summary = tf.Summary(value=[\n tf.Summary.Value(tag='accuracy', simple_value=acc_label),])\n accuracy = acc_label\n\n validation_writer.add_summary(summary, training_step)\n\n total_accuracy += accuracy\n count = count + 1.0\n\n total_accuracy = total_accuracy / count\n logging.info('Step %d: Validation accuracy = %.2f%% (N=%d)',\n *(training_step, total_accuracy * 100, set_size))\n\n # Save the model checkpoint when validation accuracy improves\n if total_accuracy >= best_accuracy:\n best_accuracy = total_accuracy\n # overwrite the best model weights\n model.save_weights(flags.train_dir + 'best_weights')\n logging.info('So far the best validation accuracy is %.2f%%',\n (best_accuracy * 100))\n\n tf.keras.backend.set_learning_phase(0)\n set_size = audio_processor.set_size('testing')\n set_size = int(set_size / flags.batch_size) * flags.batch_size\n logging.info('set_size=%d', set_size)\n total_accuracy = 0.0\n count = 0.0\n\n for i in range(0, set_size, flags.batch_size):\n test_fingerprints, test_ground_truth = audio_processor.get_data(\n flags.batch_size, i, flags, 0.0, 0.0, 0, 'testing', 0.0, 0.0, sess)\n\n one_hot_labels = tf.keras.utils.to_categorical(test_ground_truth, num_classes=flags.label_count)\n if teacher:\n one_hot_labels = [ one_hot_labels, one_hot_labels, one_hot_labels ]\n result = model.test_on_batch(test_fingerprints, one_hot_labels)\n\n total_accuracy += result[-1] if teacher else result[1]\n count = count + 1.0\n total_accuracy = total_accuracy / count\n\n logging.info('Final test accuracy = %.2f%% (N=%d)',\n *(total_accuracy * 100, set_size))\n with open(os.path.join(flags.train_dir, 'accuracy_last.txt'), 'wt') as fd:\n fd.write(str(total_accuracy * 100))\n model.save_weights(flags.train_dir + 'last_weights')\n\nif __name__ == '__main__':\n flags = model_flags.update_flags(None)\n train(flags)"
] | [
[
"tensorflow.compat.v1.keras.backend.set_learning_phase",
"tensorflow.compat.v1.keras.utils.to_categorical",
"tensorflow.compat.v1.keras.losses.CategoricalCrossentropy",
"tensorflow.compat.v1.keras.backend.set_value",
"numpy.exp",
"tensorflow.compat.v1.math.argmax",
"tensorflow.compat.v1.Summary.Value",
"tensorflow.compat.v1.keras.backend.set_session",
"numpy.log",
"tensorflow.compat.v1.keras.optimizers.Adam",
"tensorflow.compat.v1.keras.models.Sequential",
"numpy.sum",
"tensorflow.compat.v1.ConfigProto",
"tensorflow.compat.v1.summary.FileWriter",
"tensorflow.compat.v1.keras.optimizers.SGD",
"tensorflow.compat.v1.global_variables_initializer",
"tensorflow.compat.v1.Session",
"tensorflow.compat.v1.train.write_graph",
"tensorflow.compat.v1.reset_default_graph"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
paudetseis/OBStools | [
"c6c02d8864c25a14f22d1fae17ff5ad911b9ff00"
] | [
"obstools/scripts/atacr_clean_spectra.py"
] | [
"#!/usr/bin/env python\n\n# Copyright 2019 Pascal Audet & Helen Janiszewski\n#\n# This file is part of OBStools.\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\n\n# Import modules and functions\nimport numpy as np\nimport pickle\nimport stdb\nfrom obstools.atacr import StaNoise, Power, Cross, Rotation\nfrom obstools.atacr import utils, plotting\nfrom pathlib import Path\n\nfrom argparse import ArgumentParser\nfrom os.path import exists as exist\nfrom obspy import UTCDateTime\nfrom numpy import nan\n\n\ndef get_cleanspec_arguments(argv=None):\n \"\"\"\n Get Options from :class:`~optparse.OptionParser` objects.\n\n Calling options for the script `obs_clean_spectra.py` that accompany this\n package.\n\n \"\"\"\n\n parser = ArgumentParser(\n usage=\"%(prog)s [options] <indb>\",\n description=\"Script used \"\n \"to extract daily spectra calculated from \" +\n \"`obs_daily_spectra.py` and flag days for outlier \" +\n \"PSDs and calculate spectral averages of the \" +\n \"corresponding Fourier transforms over the entire \" +\n \"time period specified. The stations are processed \" +\n \"one by one and the data are stored to disk.\")\n parser.add_argument(\n \"indb\",\n help=\"Station Database to process from.\",\n type=str)\n\n # General Settings\n parser.add_argument(\n \"--keys\",\n action=\"store\",\n type=str,\n dest=\"stkeys\",\n default=\"\",\n help=\"Specify a comma separated list of station \" +\n \"keys for which to perform the analysis. These must \" +\n \"be contained within the station database. Partial \" +\n \"keys will be used to match against those in the \" +\n \"dictionary. For instance, providing IU will match \" +\n \"with all stations in the IU network. \" +\n \"[Default processes all stations in the database]\")\n parser.add_argument(\n \"-O\", \"--overwrite\",\n action=\"store_true\",\n dest=\"ovr\",\n default=False,\n help=\"Force the overwriting of pre-existing data. \" +\n \"[Default False]\")\n\n # Event Selection Criteria\n DaysGroup = parser.add_argument_group(\n title=\"Time Search Settings\",\n description=\"Time settings associated with \" +\n \"searching for day-long seismograms\")\n DaysGroup.add_argument(\n \"--start\",\n action=\"store\",\n type=str,\n dest=\"startT\",\n default=\"\",\n help=\"Specify a UTCDateTime compatible string \" +\n \"representing the start day for the data search. \" +\n \"This will override any station start times. \" +\n \"[Default start date of each station in database]\")\n DaysGroup.add_argument(\n \"--end\",\n action=\"store\",\n type=str,\n dest=\"endT\",\n default=\"\",\n help=\"Specify a UTCDateTime compatible string \" +\n \"representing the start time for the data search. \" +\n \"This will override any station end times. \" +\n \"[Default end date of each station in database]\")\n\n # Constants Settings\n ConstGroup = parser.add_argument_group(\n title='Parameter Settings',\n description=\"Miscellaneous default values \" +\n \"and settings\")\n ConstGroup.add_argument(\n \"--freq-band\",\n action=\"store\",\n type=str,\n dest=\"pd\",\n default=None,\n help=\"Specify comma-separated frequency limits \" +\n \"(float, in Hz) over which to calculate spectral \" +\n \"features used in flagging the days/windows. \" +\n \"[Default 0.004,2.0]\")\n ConstGroup.add_argument(\n \"--tolerance\",\n action=\"store\",\n type=float,\n dest=\"tol\",\n default=1.5,\n help=\"Specify parameter for tolerance threshold. \" +\n \"If spectrum > std*tol, window is flagged as bad. \" +\n \"[Default 1.5]\")\n ConstGroup.add_argument(\n \"--alpha\",\n action=\"store\",\n type=float,\n dest=\"alpha\",\n default=0.05,\n help=\"Confidence level for f-test, for iterative \" +\n \"flagging of windows. [Default 0.05, or 95 percent confidence]\")\n\n # Constants Settings\n FigureGroup = parser.add_argument_group(\n title='Figure Settings',\n description=\"Flags for plotting figures\")\n FigureGroup.add_argument(\n \"--figQC\",\n action=\"store_true\",\n dest=\"fig_QC\",\n default=False,\n help=\"Plot Quality-Control figure. \" +\n \"[Default does not plot figure]\")\n FigureGroup.add_argument(\n \"--debug\",\n action=\"store_true\",\n dest=\"debug\",\n default=False,\n help=\"Plot intermediate steps for debugging. \" +\n \"[Default does not plot figure]\")\n FigureGroup.add_argument(\n \"--figAverage\",\n action=\"store_true\",\n dest=\"fig_average\",\n default=False,\n help=\"Plot daily average figure. \" +\n \"[Default does not plot figure]\")\n FigureGroup.add_argument(\n \"--figCoh\",\n action=\"store_true\",\n dest=\"fig_coh_ph\",\n default=False,\n help=\"Plot Coherence and Phase figure. \" +\n \"[Default does not plot figure]\")\n FigureGroup.add_argument(\n \"--figCross\",\n action=\"store_true\",\n dest=\"fig_av_cross\",\n default=False,\n help=\"Plot cross-spectra figure. \" +\n \"[Default does not plot figure]\")\n FigureGroup.add_argument(\n \"--save-fig\",\n action=\"store_true\",\n dest=\"saveplot\",\n default=False,\n help=\"Set this option if you wish to save the figure(s). [Default \" +\n \"does not save figure]\")\n FigureGroup.add_argument(\n \"--format\",\n action=\"store\",\n type=str,\n dest=\"form\",\n default=\"png\",\n help=\"Specify format of figure. Can be any one of the valid\" +\n \"matplotlib formats: 'png', 'jpg', 'eps', 'pdf'. [Default 'png']\")\n\n args = parser.parse_args(argv)\n\n # Check inputs\n if not exist(args.indb):\n parser.error(\"Input file \" + args.indb + \" does not exist\")\n\n # create station key list\n if len(args.stkeys) > 0:\n args.stkeys = args.stkeys.split(',')\n\n # construct start time\n if len(args.startT) > 0:\n try:\n args.startT = UTCDateTime(args.startT)\n except Exception:\n parser.error(\n \"Error: Cannot construct UTCDateTime from start time: \" +\n args.startT)\n else:\n args.startT = None\n\n # construct end time\n if len(args.endT) > 0:\n try:\n args.endT = UTCDateTime(args.endT)\n except Exception:\n parser.error(\n \"Error: Cannot construct UTCDateTime from end time: \" +\n args.endT)\n else:\n args.endT = None\n\n if args.pd is None:\n args.pd = [0.004, 2.0]\n else:\n args.pd = [float(val) for val in args.pd.split(',')]\n args.pd = sorted(args.pd)\n if (len(args.pd)) != 2:\n raise(Exception(\n \"Error: --freq-band should contain 2 \" +\n \"comma-separated floats\"))\n\n return args\n\n\ndef main(args=None):\n\n if args is None:\n # Run Input Parser\n args = get_cleanspec_arguments()\n\n # Load Database\n # stdb>0.1.3\n try:\n db, stkeys = stdb.io.load_db(fname=args.indb, keys=args.stkeys)\n\n # stdb=0.1.3\n except Exception:\n db = stdb.io.load_db(fname=args.indb)\n\n # Construct station key loop\n allkeys = db.keys()\n sorted(allkeys)\n\n # Extract key subset\n if len(args.stkeys) > 0:\n stkeys = []\n for skey in args.stkeys:\n stkeys.extend([s for s in allkeys if skey in s])\n else:\n stkeys = db.keys()\n sorted(stkeys)\n\n # Loop over station keys\n for stkey in list(stkeys):\n\n # Extract station information from dictionary\n sta = db[stkey]\n\n # Path where spectra are located\n specpath = Path('SPECTRA') / stkey\n if not specpath.is_dir():\n raise(Exception(\n \"Path to \" + str(specpath) +\n \" doesn`t exist - aborting\"))\n\n # Path where average spectra will be saved\n avstpath = Path('AVG_STA') / stkey\n if not avstpath.is_dir():\n print(\"Path to \"+str(avstpath)+\" doesn`t exist - creating it\")\n avstpath.mkdir(parents=True)\n\n # Path where plots will be saved\n if args.saveplot:\n plotpath = avstpath / 'PLOTS'\n if not plotpath.is_dir():\n plotpath.mkdir(parents=True)\n else:\n plotpath = False\n\n # Get catalogue search start time\n if args.startT is None:\n tstart = sta.startdate\n else:\n tstart = args.startT\n\n # Get catalogue search end time\n if args.endT is None:\n tend = sta.enddate\n else:\n tend = args.endT\n\n if tstart > sta.enddate or tend < sta.startdate:\n continue\n\n # Temporary print locations\n tlocs = sta.location\n if len(tlocs) == 0:\n tlocs = ['']\n for il in range(0, len(tlocs)):\n if len(tlocs[il]) == 0:\n tlocs[il] = \"--\"\n sta.location = tlocs\n\n # Update Display\n print(\"\\n|===============================================|\")\n print(\"|===============================================|\")\n print(\"| {0:>8s} |\".format(\n sta.station))\n print(\"|===============================================|\")\n print(\"|===============================================|\")\n print(\"| Station: {0:>2s}.{1:5s} |\".format(\n sta.network, sta.station))\n print(\"| Channel: {0:2s}; Locations: {1:15s} |\".format(\n sta.channel, \",\".join(tlocs)))\n print(\"| Lon: {0:7.2f}; Lat: {1:6.2f} |\".format(\n sta.longitude, sta.latitude))\n print(\"| Start time: {0:19s} |\".format(\n sta.startdate.strftime(\"%Y-%m-%d %H:%M:%S\")))\n print(\"| End time: {0:19s} |\".format(\n sta.enddate.strftime(\"%Y-%m-%d %H:%M:%S\")))\n print(\"|-----------------------------------------------|\")\n\n # Filename for output average spectra\n dstart = str(tstart.year).zfill(4)+'.'+str(tstart.julday).zfill(3)+'-'\n dend = str(tend.year).zfill(4)+'.'+str(tend.julday).zfill(3)+'.'\n fileavst = avstpath / (dstart+dend+'avg_sta.pkl')\n\n if fileavst.exists():\n if not args.ovr:\n print(\"* -> file \"+str(fileavst)+\" exists - continuing\")\n continue\n\n # Containers for power and cross spectra\n coh_all = []\n ph_all = []\n coh_12_all = []\n coh_1Z_all = []\n coh_1P_all = []\n coh_2Z_all = []\n coh_2P_all = []\n coh_ZP_all = []\n ph_12_all = []\n ph_1Z_all = []\n ph_1P_all = []\n ph_2Z_all = []\n ph_2P_all = []\n ph_ZP_all = []\n ad_12_all = []\n ad_1Z_all = []\n ad_1P_all = []\n ad_2Z_all = []\n ad_2P_all = []\n ad_ZP_all = []\n nwins = []\n\n t1 = tstart\n\n # Initialize StaNoise object\n stanoise = StaNoise()\n\n # Loop through each day withing time range\n while t1 < tend:\n\n year = str(t1.year).zfill(4)\n jday = str(t1.julday).zfill(3)\n\n tstamp = year+'.'+jday+'.'\n filespec = specpath / (tstamp + 'spectra.pkl')\n\n # Load file if it exists\n if filespec.exists():\n print(\"\\n\"+\"*\"*60)\n print('* Calculating noise spectra for key ' +\n stkey+' and day '+year+'.'+jday)\n print(\"* -> file \"+str(filespec)+\" found - loading\")\n file = open(filespec, 'rb')\n daynoise = pickle.load(file)\n file.close()\n stanoise += daynoise\n else:\n t1 += 3600.*24.\n continue\n\n coh_all.append(daynoise.rotation.coh)\n ph_all.append(daynoise.rotation.ph)\n\n # Coherence\n coh_12_all.append(\n utils.smooth(\n utils.coherence(\n daynoise.cross.c12,\n daynoise.power.c11,\n daynoise.power.c22), 50))\n coh_1Z_all.append(\n utils.smooth(\n utils.coherence(\n daynoise.cross.c1Z,\n daynoise.power.c11,\n daynoise.power.cZZ), 50))\n coh_1P_all.append(\n utils.smooth(\n utils.coherence(\n daynoise.cross.c1P,\n daynoise.power.c11,\n daynoise.power.cPP), 50))\n coh_2Z_all.append(\n utils.smooth(\n utils.coherence(\n daynoise.cross.c2Z,\n daynoise.power.c22,\n daynoise.power.cZZ), 50))\n coh_2P_all.append(\n utils.smooth(\n utils.coherence(\n daynoise.cross.c2P,\n daynoise.power.c22,\n daynoise.power.cPP), 50))\n coh_ZP_all.append(\n utils.smooth(\n utils.coherence(\n daynoise.cross.cZP,\n daynoise.power.cZZ,\n daynoise.power.cPP), 50))\n\n # Phase\n try:\n ph_12_all.append(\n 180./np.pi*utils.phase(daynoise.cross.c12))\n except Exception:\n ph_12_all.append(None)\n try:\n ph_1Z_all.append(\n 180./np.pi*utils.phase(daynoise.cross.c1Z))\n except Exception:\n ph_1Z_all.append(None)\n try:\n ph_1P_all.append(\n 180./np.pi*utils.phase(daynoise.cross.c1P))\n except Exception:\n ph_1P_all.append(None)\n try:\n ph_2Z_all.append(\n 180./np.pi*utils.phase(daynoise.cross.c2Z))\n except Exception:\n ph_2Z_all.append(None)\n try:\n ph_2P_all.append(\n 180./np.pi*utils.phase(daynoise.cross.c2P))\n except Exception:\n ph_2P_all.append(None)\n try:\n ph_ZP_all.append(\n 180./np.pi*utils.phase(daynoise.cross.cZP))\n except Exception:\n ph_ZP_all.append(None)\n\n # Admittance\n ad_12_all.append(utils.smooth(utils.admittance(\n daynoise.cross.c12, daynoise.power.c11), 50))\n ad_1Z_all.append(utils.smooth(utils.admittance(\n daynoise.cross.c1Z, daynoise.power.c11), 50))\n ad_1P_all.append(utils.smooth(utils.admittance(\n daynoise.cross.c1P, daynoise.power.c11), 50))\n ad_2Z_all.append(utils.smooth(utils.admittance(\n daynoise.cross.c2Z, daynoise.power.c22), 50))\n ad_2P_all.append(utils.smooth(utils.admittance(\n daynoise.cross.c2P, daynoise.power.c22), 50))\n ad_ZP_all.append(utils.smooth(utils.admittance(\n daynoise.cross.cZP, daynoise.power.cZZ), 50))\n\n t1 += 3600.*24.\n\n # Convert to numpy arrays\n coh_all = np.array(coh_all)\n ph_all = np.array(ph_all)\n coh_12_all = np.array(coh_12_all)\n coh_1Z_all = np.array(coh_1Z_all)\n coh_1P_all = np.array(coh_1P_all)\n coh_2Z_all = np.array(coh_2Z_all)\n coh_2P_all = np.array(coh_2P_all)\n coh_ZP_all = np.array(coh_ZP_all)\n ph_12_all = np.array(ph_12_all)\n ph_1Z_all = np.array(ph_1Z_all)\n ph_1P_all = np.array(ph_1P_all)\n ph_2Z_all = np.array(ph_2Z_all)\n ph_2P_all = np.array(ph_2P_all)\n ph_ZP_all = np.array(ph_ZP_all)\n ad_12_all = np.array(ad_12_all)\n ad_1Z_all = np.array(ad_1Z_all)\n ad_1P_all = np.array(ad_1P_all)\n ad_2Z_all = np.array(ad_2Z_all)\n ad_2P_all = np.array(ad_2P_all)\n ad_ZP_all = np.array(ad_ZP_all)\n\n # Store transfer functions as objects for plotting\n coh = Cross(coh_12_all, coh_1Z_all, coh_1P_all,\n coh_2Z_all, coh_2P_all, coh_ZP_all)\n ph = Cross(ph_12_all, ph_1Z_all, ph_1P_all,\n ph_2Z_all, ph_2P_all, ph_ZP_all)\n ad = Cross(ad_12_all, ad_1Z_all, ad_1P_all,\n ad_2Z_all, ad_2P_all, ad_ZP_all)\n\n # Quality control to identify outliers\n stanoise.QC_sta_spectra(pd=args.pd, tol=args.tol, alpha=args.alpha,\n fig_QC=args.fig_QC, debug=args.debug,\n save=plotpath, form=args.form)\n\n # Average spectra for good days\n stanoise.average_sta_spectra(\n fig_average=args.fig_average,\n save=plotpath, form=args.form)\n\n if args.fig_av_cross:\n fname = stkey + '.' + 'av_coherence'\n plot = plotting.fig_av_cross(\n stanoise.f, coh, stanoise.gooddays,\n 'Coherence', stanoise.ncomp, key=stkey, lw=0.5)\n # if plotpath.is_dir():\n if plotpath:\n plot.savefig(\n str(plotpath / (fname + '.' + args.form)),\n dpi=300, bbox_inches='tight', format=args.form)\n else:\n plot.show()\n\n fname = stkey + '.' + 'av_admittance'\n plot = plotting.fig_av_cross(\n stanoise.f, ad, stanoise.gooddays,\n 'Admittance', stanoise.ncomp, key=stkey, lw=0.5)\n\n if plotpath:\n plot.savefig(\n str(plotpath / (fname + '.' + args.form)),\n dpi=300, bbox_inches='tight', format=args.form)\n else:\n plot.show()\n\n fname = stkey + '.' + 'av_phase'\n plot = plotting.fig_av_cross(\n stanoise.f, ph, stanoise.gooddays,\n 'Phase', stanoise.ncomp, key=stkey, marker=',', lw=0)\n\n if plotpath:\n plot.savefig(\n str(plotpath / (fname + '.' + args.form)),\n dpi=300, bbox_inches='tight', format=args.form)\n else:\n plot.show()\n\n if args.fig_coh_ph and stanoise.direc is not None:\n fname = stkey + '.' + 'coh_ph'\n plot = plotting.fig_coh_ph(coh_all, ph_all, stanoise.direc)\n if plotpath:\n plot.savefig(\n str(plotpath / (fname + '.' + args.form)),\n dpi=300, bbox_inches='tight', format=args.form)\n else:\n plot.show()\n\n # Save to file\n stanoise.save(fileavst)\n\n\nif __name__ == \"__main__\":\n\n # Run main program\n main()\n"
] | [
[
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
tempoCollaboration/OQuPy | [
"a389a161991a59259e5df47d8e0f405fcac75fe5"
] | [
"oqupy/backends/tempo_backend.py"
] | [
"# Copyright 2020 The TEMPO Collaboration\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\nModule for tempo and mean-field tempo backend.\n\"\"\"\n\nfrom typing import Callable, Dict, Optional, Tuple\nfrom copy import copy\n\nfrom numpy import ndarray, moveaxis, dot\n\nfrom oqupy import operators\nfrom oqupy.config import TEMPO_BACKEND_CONFIG\nfrom oqupy.backends import node_array as na\nfrom oqupy.util import create_delta\n\nclass BaseTempoBackend:\n \"\"\"\n Backend class for TEMPO.\n\n Parameters\n ----------\n initial_state: ndarray\n The initial density matrix (as a vector).\n influence: callable(int) -> ndarray\n Callable that takes an integer `step` and returns the influence super\n operator of that `step`.\n unitary_transform: ndarray\n Unitary that transforms the coupling operator into a diagonal form.\n sum_north: ndarray\n The summing vector for the north legs.\n sum_west: ndarray\n The summing vector for the west legs.\n dkmax: int\n Number of influences to include. If ``dkmax == None`` then all\n influences are included.\n epsrel: float\n Maximal relative SVD truncation error.\n \"\"\"\n def __init__(\n self,\n initial_state: ndarray,\n influence: Callable[[int], ndarray],\n unitary_transform: ndarray,\n sum_north: ndarray,\n sum_west: ndarray,\n dkmax: int,\n epsrel: float,\n config: Optional[Dict] = None):\n \"\"\"Create a TempoBackend object. \"\"\"\n self._initial_state = initial_state\n self._influence = influence\n self._unitary_transform = unitary_transform\n self._sum_north = sum_north\n self._sum_west = sum_west\n self._dkmax = dkmax\n self._epsrel = epsrel\n self._step = None\n self._state = None\n self._config = TEMPO_BACKEND_CONFIG if config is None else config\n self._mps = None\n self._mpo = None\n self._super_u = None\n self._super_u_dagg = None\n self._sum_north_na = None\n\n @property\n def step(self) -> int:\n \"\"\"The current step in the TEMPO computation. \"\"\"\n return self._step\n\n def _initialize_mps_mpo(self) :\n \"\"\"ToDo\"\"\"\n self._initial_state = copy(self._initial_state).reshape(-1)\n\n self._super_u = operators.left_right_super(\n self._unitary_transform,\n self._unitary_transform.conjugate().T)\n self._super_u_dagg = operators.left_right_super(\n self._unitary_transform.conjugate().T,\n self._unitary_transform)\n\n self._sum_north_na = na.NodeArray([self._sum_north],\n left=False,\n right=False,\n name=\"Sum north\")\n influences = []\n if self._dkmax is None:\n dkmax_pre_compute = 1\n else:\n dkmax_pre_compute = self._dkmax + 1\n\n for i in range(dkmax_pre_compute):\n infl = self._influence(i)\n infl_four_legs = create_delta(infl, [1, 0, 0, 1])\n if i == 0:\n tmp = dot(moveaxis(infl_four_legs, 1, -1),\n self._super_u_dagg)\n tmp = moveaxis(tmp, -1, 1)\n tmp = dot(tmp, self._super_u.T)\n infl_four_legs = tmp\n influences.append(infl_four_legs)\n\n self._mps = na.NodeArray([self._initial_state],\n left=False,\n right=False,\n name=\"Thee MPS\")\n self._mpo = na.NodeArray(list(reversed(influences)),\n left=True,\n right=True,\n name=\"Thee Time Evolving MPO\")\n\n\n def _compute_system_step(self, current_step, prop_1, prop_2) -> ndarray:\n \"\"\"\n Takes a step in the TEMPO tensor network computation.\n\n For example, for at step 4, we start with:\n\n A ... self._mps\n B ... self._mpo\n w ... self._sum_west\n n ... self._sum_north_array\n p1 ... prop_1\n p2 ... prop_2\n\n n n n n\n | | | |\n\n | | | | |\n w~~ ~~B~~B~~B~~B~~ ~~p2\n | | | |\n p1\n | | | |\n A~~A~~A~~A\n\n return:\n step = 4\n state = contraction of A,B,w,n,p1\n\n effects:\n self._mpo will grow to the left with the next influence functional\n self._mps will be contraction of A,B,w,p1,p2\n\n Returns\n -------\n step: int\n The current step count.\n state: ndarray\n Density matrix at the current step.\n\n \"\"\"\n prop_1_na = na.NodeArray([prop_1.T],\n left=False,\n right=False,\n name=\"first half-step\")\n prop_2_na = na.NodeArray([prop_2.T],\n left=True,\n right=False,\n name=\"second half-step\")\n\n if self._dkmax is None:\n mpo = self._mpo.copy()\n infl = self._influence(len(mpo))\n infl_four_legs = create_delta(infl, [1, 0, 0, 1])\n infl_na = na.NodeArray([infl_four_legs],\n left=True,\n right=True)\n self._mpo = na.join(infl_na,\n self._mpo,\n name=\"The Time Evolving MPO\",\n copy=False)\n elif current_step <= self._dkmax:\n _, mpo = na.split(self._mpo,\n int(0 - current_step),\n copy=True)\n else: # current_step > self._dkmax\n mpo = self._mpo.copy()\n infl = self._influence(self._dkmax-current_step)\n if infl is not None:\n infl_four_legs = create_delta(infl, [1, 0, 0, 1])\n infl_na = na.NodeArray([infl_four_legs],\n left=True,\n right=True)\n _, mpo = na.split(self._mpo,\n index=1,\n copy=True)\n mpo = na.join(infl_na,\n mpo,\n name=\"Thee Time Evolving MPO\",\n copy=False)\n\n mpo.name = \"temporary MPO\"\n mpo.apply_vector(self._sum_west, left=True)\n\n self._mps.zip_up(prop_1_na,\n axes=[(0,0)],\n left_index=-1,\n right_index=-1,\n direction=\"left\",\n max_singular_values=None,\n max_truncation_err=self._epsrel,\n relative=True,\n copy=False)\n\n if len(self._mps) != len(mpo):\n self._mps.contract(self._sum_north_na,\n axes=[(0,0)],\n left_index=0,\n right_index=0,\n direction=\"right\",\n copy=True)\n\n self._mps.zip_up(mpo,\n axes=[(0, 0)],\n left_index=0,\n right_index=-1,\n direction=\"right\",\n max_singular_values=None,\n max_truncation_err=self._epsrel,\n relative=True,\n copy=False)\n\n self._mps.svd_sweep(from_index=-1,\n to_index=0,\n max_singular_values=None,\n max_truncation_err=self._epsrel,\n relative=True)\n\n self._mps = na.join(self._mps,\n prop_2_na,\n copy=False,\n name=f\"The MPS ({current_step})\")\n\n tmp_mps = self._mps.copy()\n for _ in range(len(tmp_mps)-1):\n tmp_mps.contract(self._sum_north_na,\n axes=[(0,0)],\n left_index=0,\n right_index=0,\n direction=\"right\",\n copy=True)\n\n assert len(tmp_mps) == 1\n assert not tmp_mps.left\n assert not tmp_mps.right\n assert tmp_mps.rank == 1\n state = tmp_mps.nodes[0].get_tensor()\n\n return state\n\nclass TempoBackend(BaseTempoBackend):\n \"\"\"\n ToDo\n \"\"\"\n def __init__(\n self,\n initial_state: ndarray,\n influence: Callable[[int], ndarray],\n unitary_transform: ndarray,\n propagators: Callable[[int], Tuple[ndarray, ndarray]],\n sum_north: ndarray,\n sum_west: ndarray,\n dkmax: int,\n epsrel: float,\n config: Optional[Dict] = None):\n \"\"\"Create a TempoBackend object. \"\"\"\n super().__init__(\n initial_state,\n influence,\n unitary_transform,\n sum_north,\n sum_west,\n dkmax,\n epsrel,\n config)\n self._propagators = propagators\n\n def initialize(self)-> Tuple[int, ndarray]:\n \"\"\"\n ToDo\n \"\"\"\n self._step = 0\n self._initialize_mps_mpo()\n self._state = self._initial_state\n return self._step, copy(self._state)\n\n def compute_step(self) -> Tuple[int, ndarray]:\n \"\"\"\n ToDo\n \"\"\"\n self._step += 1\n prop_1, prop_2 = self._propagators(self._step-1)\n self._state = self._compute_system_step(self._step, prop_1, prop_2)\n return self._step, copy(self._state)\n\n\nclass TempoWithFieldBackend(BaseTempoBackend):\n \"\"\"\n backend for tensor network tempo with coherent field evolution.\n Note the only difference from TensorNetworkTempoBackend in the\n signature is the addition of the initial_field and compute_field\n parameters, and the change of the propagator signature.\n\n Parameters\n ----------\n initial_state: ndarray\n The initial density matrix (as a vector).\n initial_field: complex\n The initial field value.\n influence: callable(int) -> ndarray\n Callable that takes an integer `step` and returns the influence super\n operator of that `step`.\n unitary_transform: ndarray\n Unitary that transforms the coupling operator into a diagonal form.\n propagators: callable(int, ndarray, complex) -> ndarray, ndarray\n Callable that takes an integer `step`, an ndarray `state` and a complex\n `field` and returns the first and second half of the system propagator\n of that `step`.\n compute_field: callable(int, ndarray, complex, ndarray) -> complex\n Callable that takes an integer `step`, a complex `field` (the current\n value of the field) and two ndarrays for (respectively) the current and\n next density matrix as vectors, and returns the next field value.\n sum_north: ndarray\n The summing vector for the north legs.\n sum_west: ndarray\n The summing vector for the west legs.\n dkmax: int\n Number of influences to include. If ``dkmax == -1`` then all influences\n are included.\n epsrel: float\n Maximal relative SVD truncation error.\n \"\"\"\n def __init__(\n self,\n initial_state: ndarray,\n initial_field: ndarray,\n influence: Callable[[int], ndarray],\n unitary_transform: ndarray,\n propagators: Callable[[int, ndarray, complex],\n Tuple[ndarray, ndarray]],\n compute_field: Callable[[float, ndarray, complex], complex],\n sum_north: ndarray,\n sum_west: ndarray,\n dkmax: int,\n epsrel: float,\n config: Dict):\n # Field specific variables\n self._initial_field = initial_field\n self._compute_field = compute_field\n self._field = initial_field\n self._propagators = propagators\n \"\"\"Create a TempoWithFieldBackend object. \"\"\"\n super().__init__(initial_state,\n influence,\n unitary_transform,\n sum_north,\n sum_west,\n dkmax,\n epsrel,\n config)\n\n def initialize(self) -> Tuple[int, ndarray, complex]:\n \"\"\"See BaseBackend.initialize() for main docstring.\"\"\"\n self._step = 0\n self._initialize_mps_mpo()\n self._state = self._initial_state\n self._field = self._initial_field\n return self._step, copy(self._state), self._field\n\n def compute_step(self) -> Tuple[int, ndarray, complex]:\n \"\"\"\n ToDo\n \"\"\"\n current_step = self._step\n next_step = current_step + 1\n current_state = copy(self._state)\n current_field = self._field\n prop_1, prop_2 = self._propagators(current_step, current_state,\n current_field)\n next_state = self._compute_system_step(next_step, prop_1, prop_2)\n next_field = self._compute_field(current_step, current_state,\n current_field, next_state)\n self._state = next_state\n self._field = next_field\n self._step = next_step\n\n return self._step, copy(self._state), self._field\n"
] | [
[
"numpy.dot",
"numpy.moveaxis"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
j-chan-hkust/deep_testing_of_advanced_learning_systems | [
"ec535e2b4dc489d407b664a138d3f5262b71d21e",
"ec535e2b4dc489d407b664a138d3f5262b71d21e"
] | [
"2_data_collection/CIFAR_10/vgg16_CIFAR10.py",
"4_Coverage_Evaluation/MNIST/utils.py"
] | [
"from __future__ import print_function\nimport keras\nfrom keras.datasets import cifar10\nfrom keras.preprocessing.image import ImageDataGenerator\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Dropout, Activation, Flatten\nfrom keras.layers import Conv2D, MaxPooling2D, BatchNormalization\nfrom keras import optimizers\nimport numpy as np\nfrom keras.layers.core import Lambda\nfrom keras import backend as K\nfrom keras import regularizers\n\nclass cifar10vgg:\n def __init__(self,train=True):\n self.num_classes = 10\n self.weight_decay = 0.0005\n self.x_shape = [32,32,3]\n\n self.model = self.build_model()\n if train:\n self.model = self.train(self.model)\n else:\n self.model.load_weights('cifar10vgg.h5')\n\n\n def build_model(self):\n # Build the network of vgg for 10 classes with massive dropout and weight decay as described in the paper.\n\n model = Sequential()\n weight_decay = self.weight_decay\n\n model.add(Conv2D(64, (3, 3), padding='same',\n input_shape=self.x_shape,kernel_regularizer=regularizers.l2(weight_decay)))\n model.add(Activation('relu'))\n model.add(BatchNormalization())\n model.add(Dropout(0.3))\n\n model.add(Conv2D(64, (3, 3), padding='same',kernel_regularizer=regularizers.l2(weight_decay)))\n model.add(Activation('relu'))\n model.add(BatchNormalization())\n\n model.add(MaxPooling2D(pool_size=(2, 2)))\n\n model.add(Conv2D(128, (3, 3), padding='same',kernel_regularizer=regularizers.l2(weight_decay)))\n model.add(Activation('relu'))\n model.add(BatchNormalization())\n model.add(Dropout(0.4))\n\n model.add(Conv2D(128, (3, 3), padding='same',kernel_regularizer=regularizers.l2(weight_decay)))\n model.add(Activation('relu'))\n model.add(BatchNormalization())\n\n model.add(MaxPooling2D(pool_size=(2, 2)))\n\n model.add(Conv2D(256, (3, 3), padding='same',kernel_regularizer=regularizers.l2(weight_decay)))\n model.add(Activation('relu'))\n model.add(BatchNormalization())\n model.add(Dropout(0.4))\n\n model.add(Conv2D(256, (3, 3), padding='same',kernel_regularizer=regularizers.l2(weight_decay)))\n model.add(Activation('relu'))\n model.add(BatchNormalization())\n model.add(Dropout(0.4))\n\n model.add(Conv2D(256, (3, 3), padding='same',kernel_regularizer=regularizers.l2(weight_decay)))\n model.add(Activation('relu'))\n model.add(BatchNormalization())\n\n model.add(MaxPooling2D(pool_size=(2, 2)))\n\n\n model.add(Conv2D(512, (3, 3), padding='same',kernel_regularizer=regularizers.l2(weight_decay)))\n model.add(Activation('relu'))\n model.add(BatchNormalization())\n model.add(Dropout(0.4))\n\n model.add(Conv2D(512, (3, 3), padding='same',kernel_regularizer=regularizers.l2(weight_decay)))\n model.add(Activation('relu'))\n model.add(BatchNormalization())\n model.add(Dropout(0.4))\n\n model.add(Conv2D(512, (3, 3), padding='same',kernel_regularizer=regularizers.l2(weight_decay)))\n model.add(Activation('relu'))\n model.add(BatchNormalization())\n\n model.add(MaxPooling2D(pool_size=(2, 2)))\n\n\n model.add(Conv2D(512, (3, 3), padding='same',kernel_regularizer=regularizers.l2(weight_decay)))\n model.add(Activation('relu'))\n model.add(BatchNormalization())\n model.add(Dropout(0.4))\n\n model.add(Conv2D(512, (3, 3), padding='same',kernel_regularizer=regularizers.l2(weight_decay)))\n model.add(Activation('relu'))\n model.add(BatchNormalization())\n model.add(Dropout(0.4))\n\n model.add(Conv2D(512, (3, 3), padding='same',kernel_regularizer=regularizers.l2(weight_decay)))\n model.add(Activation('relu'))\n model.add(BatchNormalization())\n\n model.add(MaxPooling2D(pool_size=(2, 2)))\n model.add(Dropout(0.5))\n\n model.add(Flatten())\n model.add(Dense(512,kernel_regularizer=regularizers.l2(weight_decay)))\n model.add(Activation('relu'))\n model.add(BatchNormalization())\n\n model.add(Dropout(0.5))\n model.add(Dense(self.num_classes))\n model.add(Activation('softmax'))\n return model\n\n\n def normalize(self,X_train,X_test):\n #this function normalize inputs for zero mean and unit variance\n # it is used when training a model.\n # Input: training set and test set\n # Output: normalized training set and test set according to the trianing set statistics.\n mean = np.mean(X_train,axis=(0,1,2,3))\n std = np.std(X_train, axis=(0, 1, 2, 3))\n X_train = (X_train-mean)/(std+1e-7)\n X_test = (X_test-mean)/(std+1e-7)\n return X_train, X_test\n\n def normalize_production(self,x):\n #this function is used to normalize instances in production according to saved training set statistics\n # Input: X - a training set\n # Output X - a normalized training set according to normalization constants.\n\n #these values produced during first training and are general for the standard cifar10 training set normalization\n mean = 120.707\n std = 64.15\n return (x-mean)/(std+1e-7)\n\n def predict(self,x,normalize=True,batch_size=50):\n if normalize:\n x = self.normalize_production(x)\n return self.model.predict(x,batch_size)\n\n def train(self,model):\n\n model.load_weights(\"cifar10vgg.h5\")\n #training parameters\n batch_size = 128\n maxepoches = 250\n learning_rate = 0.01\n lr_decay = 1e-6\n lr_drop = 20\n # The data, shuffled and split between train and test sets:\n (x_train, y_train), (x_test, y_test) = cifar10.load_data()\n x_train = x_train.astype('float32')\n x_test = x_test.astype('float32')\n x_train, x_test = self.normalize(x_train, x_test)\n\n y_train = keras.utils.to_categorical(y_train, self.num_classes)\n y_test = keras.utils.to_categorical(y_test, self.num_classes)\n\n def lr_scheduler(epoch):\n return learning_rate * (0.5 ** (epoch // lr_drop))\n reduce_lr = keras.callbacks.LearningRateScheduler(lr_scheduler)\n\n #data augmentation\n datagen = ImageDataGenerator(\n featurewise_center=False, # set input mean to 0 over the dataset\n samplewise_center=False, # set each sample mean to 0\n featurewise_std_normalization=False, # divide inputs by std of the dataset\n samplewise_std_normalization=False, # divide each input by its std\n zca_whitening=False, # apply ZCA whitening\n rotation_range=15, # randomly rotate images in the range (degrees, 0 to 180)\n width_shift_range=0.1, # randomly shift images horizontally (fraction of total width)\n height_shift_range=0.1, # randomly shift images vertically (fraction of total height)\n horizontal_flip=True, # randomly flip images\n vertical_flip=False) # randomly flip images\n # (std, mean, and principal components if ZCA whitening is applied).\n datagen.fit(x_train)\n\n\n\n #optimization details\n sgd = optimizers.SGD(lr=learning_rate, decay=lr_decay, momentum=0.9, nesterov=True)\n model.compile(loss='categorical_crossentropy', optimizer=sgd,metrics=['accuracy'])\n\n\n # training process in a for loop with learning rate drop every 25 epoches.\n\n historytemp = model.fit_generator(datagen.flow(x_train, y_train,\n batch_size=batch_size),\n steps_per_epoch=x_train.shape[0] // batch_size,\n epochs=maxepoches,\n validation_data=(x_test, y_test),callbacks=[reduce_lr],verbose=2)\n model.save_weights('cifar10vgg.h5')\n return model\n\nif __name__ == '__main__':\n\n\n (x_train, y_train), (x_test, y_test) = cifar10.load_data()\n x_train = x_train.astype('float32')\n x_test = x_test.astype('float32')\n\n y_train = keras.utils.to_categorical(y_train, 10)\n y_test = keras.utils.to_categorical(y_test, 10)\n\n model = cifar10vgg()\n\n predicted_x = model.predict(x_test)\n residuals = np.argmax(predicted_x,1)!=np.argmax(y_test,1)\n\n loss = sum(residuals)/len(residuals)\n print(\"the validation 0/1 loss is: \",loss)\n",
"# some utils taken from the DeepXplore Implementation\n\nimport random\nfrom collections import defaultdict\n\nimport numpy as np\nfrom keras import backend as K\nfrom keras.models import Model\nfrom keras.preprocessing import image\nfrom keras import models, layers, activations\n\nfrom scipy.spatial.distance import mahalanobis\nfrom numpy.linalg import inv\nfrom itertools import combinations\n\n\n#loads a mnist image\ndef preprocess_image(img_path):\n img = image.load_img(img_path, target_size=(28, 28), grayscale=True)\n input_img_data = image.img_to_array(img)\n input_img_data = input_img_data.reshape(1, 28, 28, 1)\n\n input_img_data = input_img_data.astype('float32')\n input_img_data /= 255\n # input_img_data = preprocess_input(input_img_data) # final input shape = (1,224,224,3)\n return input_img_data\n\ndef init_neuron_cov_dict(model, model_layer_dict):\n for layer in model.layers:\n if 'flatten' in layer.name or 'input' in layer.name:\n continue\n for index in range(layer.output_shape[-1]):\n model_layer_dict[(layer.name, index)] = False\n\n\ndef neuron_to_cover(model_layer_dict):\n not_covered = [(layer_name, index) for (layer_name, index), v in model_layer_dict.items() if not v]\n if not_covered:\n layer_name, index = random.choice(not_covered)\n else:\n layer_name, index = random.choice(model_layer_dict.keys())\n return layer_name, index\n\n\ndef get_neuron_coverage(model_layer_dict):\n covered_neurons = len([v for v in model_layer_dict.values() if v])\n total_neurons = len(model_layer_dict)\n return covered_neurons, total_neurons, covered_neurons / float(total_neurons)\n\n\ndef update_neuron_coverage(input_data, model, model_layer_dict, threshold=0):\n layer_names = [layer.name for layer in model.layers if\n 'flatten' not in layer.name and 'input' not in layer.name]\n\n intermediate_layer_model = Model(inputs=model.input,\n outputs=[model.get_layer(layer_name).output for layer_name in layer_names])\n intermediate_layer_outputs = intermediate_layer_model.predict(input_data)\n\n for i, intermediate_layer_output in enumerate(intermediate_layer_outputs):\n scaled = scale(intermediate_layer_output[0])\n for num_neuron in range(scaled.shape[-1]):\n if np.mean(scaled[..., num_neuron]) > threshold and not model_layer_dict[(layer_names[i], num_neuron)]:\n model_layer_dict[(layer_names[i], num_neuron)] = True\n print(\"new coverage found\")\n\n\n#To test\n\n#gets the distance of the points in standard deviations\n#note that it assumes that the points are normally distributed\ndef distance(point, mean, covarianceMatrix):\n return mahalanobis(point, mean, inv(covarianceMatrix))\n\n# an adaptation of some code from deepXplore\n# initializes a dictionary that will store which qudrants have been covered\n# model - the model we are looking to covered\n# layer_index - the layer we are exploring\n# group_size - size of the group of neurons we are analyzing\n# model_layer_dict - the object we want to initialize\ndef init_orthant_cov_dict(model, layer_index, group_size, model_layer_dict):\n layer = model.layers[layer_index]\n # some error handling\n if 'flatten' in layer.name or 'input' in layer.name:\n print(\"error in init_dict: layer_index points to the wrong layer\")\n # we initialize each combination\n for neuron_group in combinations(range(layer.output_shape[-1]), group_size): # layer.output_shape[-1] returns the number of total_neurons\n for orthant in range(2^group_size-1):\n model_layer_dict[(neuron_group, orthant)] = False\n\ndef get_orthant_coverage(model_layer_dict):\n covered_orthants = len([v for v in model_layer_dict.values() if v])\n total_orthants = len(model_layer_dict)\n return covered_orthants, total_orthants, covered_orthants / float(total_orthants)\n\n#this is meant to pick a orthant that is not covered\n# we actually don't need to use this just yet, maybe if I decide to implement for DeepXplore\ndef next_orthant_to_cover(model_layer_dict):\n not_covered = [(neuron_group, orthant) for (neuron_group, orthant), v in model_layer_dict.items() if not v]\n if not_covered:\n neuron_group, orthant = random.choice(not_covered)\n else:\n neuron_group, orthant = random.choice(model_layer_dict.keys())\n return neuron_group, orthant\n\n\n# creates a shortened model that ends at the nth layer, and has no activation function\n# same code as from collect_data\ndef create_shortened_model(model, layer_depth):\n # we get the neuron output for the penultimate layer for each neuron\n\n # implemented with help from the suggestion at: https://stackoverflow.com/questions/45492318/keras-retrieve-value-of-node-before-activation-function\n # we recreate the model, delete layers up to and including the layer we want to analyze, add a blank layer with no activation, and then import the old weights to this layer.\n\n #make a new model\n\n # some simple input checks\n if(layer_depth < 0):\n println ('layer depth must be positive!')\n sys.exit()\n\n if(layer_depth > len(model.layers)):\n println ('layer depth too large!')\n sys.exit()\n\n # save the original weights\n wgts = model.layers[layer_depth].get_weights()\n nthLayerNeurons = model.layers[layer_depth].output_shape[1]\n\n #remove layers up to the nth layer\n for i in range(len(model.layers)-layer_depth):\n model.pop()\n model.summary\n # add new layer with no activation\n model.add(layers.Dense(nthLayerNeurons,activation = None))\n\n # with the new layer, load the previous weights\n model.layers[layer_depth].set_weights(wgts)\n\n # get the output of this new model.\n return Model(inputs=model.input, outputs=model.layers[layer_depth].output )\n\n#this code updates the coverage given a certain input\ndef update_orthant_coverage(input_data, shortened_model, model_layer_dict, mean_vector, covariance_matrix, group_size=1, sd_threshold=1):\n\n layer_outputs = shortened_model.predict(input_data) #get the output\n # the reason that we use layer_outputs[0] is change it into a single row, rather than an array with a row.\n\n for neuron_group in combinations(range(layer_outputs.shape[-1]),group_size):\n group_output = np.asarray([layer_outputs[0][i] for i in neuron_group]) #get a list of the outputs\n\n # we do binary addition to get the correct orthant index.\n # for example, if we only have a 2 variables, we have 4 quadrants. we need to classify into 0,1,2,3 index\n #init the tools to find which orthant is being explored\n orthant = 0\n add = int(1)\n for neuron_index in neuron_group:\n if layer_outputs[0][neuron_index] > mean_vector[neuron_index]:\n orthant += add\n add *= 2\n\n if model_layer_dict[(neuron_group,orthant)] == True:\n continue #don't do the expensive action of loading the group cov, group mean, and calculating the distance\n\n group_mean = np.asarray([mean_vector[i] for i in neuron_group]) #list of mean\n #initialize the group numpy array for later calculation\n group_cov_matrix = np.asarray([[covariance_matrix[j][i] for i in neuron_group] for j in neuron_group]) #dont ask me why\n\n if(distance(group_output, group_mean, group_cov_matrix)>sd_threshold):\n model_layer_dict[(neuron_group,orthant)] = True\n\n# just a simple check if we have full coverage works for any coverage\ndef full_coverage(model_layer_dict):\n if False in model_layer_dict.values():\n return False\n return True\n\n\n# from here on is code from deepxplore\n\n# util function to convert a tensor into a valid image\ndef deprocess_image(x):\n x *= 255\n x = np.clip(x, 0, 255).astype('uint8')\n return x.reshape(x.shape[1], x.shape[2]) # original shape (1,img_rows, img_cols,1)\n\n\ndef normalize(x):\n # utility function to normalize a tensor by its L2 norm\n return x / (K.sqrt(K.mean(K.square(x))) + 1e-5)\n\n\ndef constraint_occl(gradients, start_point, rect_shape):\n new_grads = np.zeros_like(gradients)\n new_grads[:, start_point[0]:start_point[0] + rect_shape[0],\n start_point[1]:start_point[1] + rect_shape[1]] = gradients[:, start_point[0]:start_point[0] + rect_shape[0],\n start_point[1]:start_point[1] + rect_shape[1]]\n return new_grads\n\n\ndef constraint_light(gradients):\n new_grads = np.ones_like(gradients)\n grad_mean = np.mean(gradients)\n return grad_mean * new_grads\n\n\ndef constraint_black(gradients, rect_shape=(6, 6)):\n start_point = (\n random.randint(0, gradients.shape[1] - rect_shape[0]), random.randint(0, gradients.shape[2] - rect_shape[1]))\n new_grads = np.zeros_like(gradients)\n patch = gradients[:, start_point[0]:start_point[0] + rect_shape[0], start_point[1]:start_point[1] + rect_shape[1]]\n if np.mean(patch) < 0:\n new_grads[:, start_point[0]:start_point[0] + rect_shape[0],\n start_point[1]:start_point[1] + rect_shape[1]] = -np.ones_like(patch)\n return new_grads\n\n\ndef init_coverage_tables(model1, model1_layer_index, model2, model2_layer_index, model3, model3_layer_index, group_size = 1):\n model_layer_dict1 = defaultdict(bool)\n model_layer_dict2 = defaultdict(bool)\n model_layer_dict3 = defaultdict(bool)\n init_dict(model1, model1_layer_index, group_size, model_layer_dict1)\n init_dict(model2, model2_layer_index, group_size, model_layer_dict2)\n init_dict(model3, model3_layer_index, group_size, model_layer_dict3)\n return model_layer_dict1, model_layer_dict2, model_layer_dict3\n\ndef init_neuron_coverage_table(model1):\n model_layer_dict1 = defaultdict(bool)\n init_neuron_cov_dict(model1, model_layer_dict1)\n return model_layer_dict1\n\ndef init_orthant_coverage_table(model1, layer_index, group_size):\n model_layer_dict1 = defaultdict(bool)\n init_orthant_cov_dict(model1, layer_index, group_size, model_layer_dict1)\n return model_layer_dict1\n\ndef scale(intermediate_layer_output, rmax=1, rmin=0):\n X_std = (intermediate_layer_output - intermediate_layer_output.min()) / (\n intermediate_layer_output.max() - intermediate_layer_output.min())\n X_scaled = X_std * (rmax - rmin) + rmin\n return X_scaled\n\n\ndef fired(model, layer_name, index, input_data, threshold=0):\n intermediate_layer_model = Model(inputs=model.input, outputs=model.get_layer(layer_name).output)\n intermediate_layer_output = intermediate_layer_model.predict(input_data)[0]\n scaled = scale(intermediate_layer_output)\n if np.mean(scaled[..., index]) > threshold:\n return True\n return False\n\n\ndef diverged(predictions1, predictions2, predictions3, target):\n # if predictions2 == predictions3 == target and predictions1 != target:\n if not predictions1 == predictions2 == predictions3:\n return True\n return False\n"
] | [
[
"numpy.std",
"numpy.argmax",
"numpy.mean"
],
[
"numpy.ones_like",
"numpy.clip",
"numpy.linalg.inv",
"numpy.asarray",
"numpy.zeros_like",
"numpy.mean"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
kul-group/MAZE-sim | [
"0f85e74bf93f9242a73bcfaa20a593ae966f38fa",
"0f85e74bf93f9242a73bcfaa20a593ae966f38fa"
] | [
"scraps/forcefield_v2.py",
"demos/double_defect_maker.py"
] | [
"from maze.extra_framework_maker import ExtraFrameworkMaker, ExtraFrameworkAnalyzer\nfrom maze.io_zeolite import read_vasp\nfrom maze.zeolite import PerfectZeolite, Zeolite\nfrom ase.neighborlist import natural_cutoffs, NeighborList\nimport os\nfrom pathlib import Path\nfrom ase.io import write, read, gromacs, proteindatabank\nfrom ase.visualize import view\nimport copy\nimport shutil\nfrom glob import glob\nfrom ase.constraints import FixAtoms\nfrom simtk.openmm.app import *\nfrom simtk.openmm import *\nfrom simtk.unit import *\nfrom sys import stdout\nfrom ase.geometry.analysis import Analysis\nimport numpy as np\nfrom itertools import permutations\nfrom lxml import etree\nfrom contextlib import closing\nfrom collections import OrderedDict\nfrom scipy.optimize import least_squares, minimize\nimport matplotlib.pyplot as plt\nfrom statistics import mode\nimport pickle\nimport time\nfrom ase.data import atomic_masses, atomic_numbers\n\n\ndef get_EF_atom_indices(atoms):\n \"\"\"\n for index tracking, to ensure we are comparing the DFT and FF forces on the same EF atoms after before and after\n scooping out the smaller cluster.\n alse used for recentering the cluster based on the EF-O atom\n \"\"\"\n TM_list = ['Pt', 'Cu', 'Co', 'Pd', 'Fe', 'Cr', 'Rh', 'Ru']\n index_EF_TM = [a.index for a in atoms if a.symbol in TM_list]\n index_Al = [a.index for a in atoms if a.symbol == 'Al']\n nl = NeighborList(natural_cutoffs(atoms), bothways=True, self_interaction=False)\n nl.update(atoms)\n Al_neigh_list = np.concatenate((nl.get_neighbors(index_Al[0])[0], nl.get_neighbors(index_Al[1])[0]))\n Al_neigh_list = [x for x in Al_neigh_list if atoms[x].symbol == 'O']\n\n TM_neigh_list = np.concatenate((nl.get_neighbors(index_EF_TM[0])[0], nl.get_neighbors(index_EF_TM[1])[0]))\n centering_o = [[x for x in TM_neigh_list if list(TM_neigh_list).count(x) > 1 and x not in Al_neigh_list][0]]\n return index_EF_TM + centering_o\n\n\ndef get_capped_cluster(atoms, folder_path, file_name, save_traj, EF_O_index):\n \"\"\" #TODO: check whether capping is necessary\n Inconsistent capping (remove all caps for now, does not need this cluster to be physical)\n Possible fix: change mult in neighbor list\n\n Extract smaller cluster containing the extra-framework atoms and cap all the O. Then the capped cluster is moved\n to the center of the cell to avoid boundary issue.\n Save cluster in both .traj file and .pdb format.\n :param atoms:\n :param folder_path:\n :param file_name:\n :param save_traj: if True, save clusters into .traj as well, for later comparison and trouble shooting\n :param EF_O_index: if not none, will use this value, else, will find the index using Extraframework code\n :return: 1. EF-cluster including 13 atoms, index of the EF atoms in original zeolite, index of the EF atoms in\n the current cluster (the later two output index lists share the ordering)\n \"\"\"\n EFMaker = ExtraFrameworkAnalyzer(atoms)\n cluster = atoms[[index for index in EFMaker.get_extraframework_cluster(EF_O_index)]]\n\n cluster_EF_index = get_EF_atom_indices(cluster)\n centering_pos = cluster.get_positions()[cluster_EF_index[-1]]\n recentered_cluster = EFMaker.recentering_atoms(cluster, centering_pos)[0]\n # FIXME: recentering doesn't work well for very small unit cells. eg. SOD\n # cluster = Zeolite(cluster).cap_atoms()\n\n proteindatabank.write_proteindatabank(folder_path + '/%s.pdb' % file_name, recentered_cluster)\n if save_traj is True:\n write(folder_path + '/%s.traj' % file_name, recentered_cluster)\n\n return cluster, EFMaker.get_extraframework_cluster(EF_O_index), cluster_EF_index\n\n\ndef label_pdb(folder_path, file_name, del_unlabeled_pdb):\n \"\"\"\n Relabeling the Atom name in proteindatabank file. (required step for openMM)\n The same atom type connecting to different neighboring types are treated differently due to differences in their\n chemical environments, and is therefore named separately.\n :param folder_path:\n :param file_name:\n :param del_unlabeled_pdb:\n \"\"\"\n filein = open(folder_path + '/%s.pdb' % file_name, 'r')\n fileout = open(folder_path + '/%s_labeled.pdb' % file_name, 'w')\n\n name_list = []\n for line in filein.readlines():\n if line.startswith('ATOM') or line.startswith('HETATM'):\n name = line[12:16].strip()\n name_list.append(name)\n name = name + str(name_list.count(name))\n name = name.rjust(4)\n line = line.replace(line[12:16], name, 1)\n # only replacing the first occurrence of line[12:16], atomic symbols are maintained\n fileout.writelines(line)\n\n filein.close()\n fileout.close()\n if del_unlabeled_pdb is True:\n os.remove(folder_path + '/%s.pdb' % file_name)\n\n\ndef get_bonds(cluster, mult=1, excluded_index=None, excluded_pair=None):\n \"\"\"\n Using ase.geometry.analysis.Analysis to get all bonds, then remove the repeated ones.\n Function also allows removing certain bonding pair defined by user (excluded_pair).\n Or removing pairs including certain atomic indices (excluded_index).\n :param cluster:\n :param mult:\n :param excluded_index: list of integers\n :param excluded_pair: list of lists\n :return: full bonding list, shortened list.\n If both excluded_index and excluded_pair are None, bonding list == shortened list\n \"\"\"\n if excluded_index is None:\n excluded_index = []\n if excluded_pair is None:\n excluded_pair = []\n\n nl = NeighborList(natural_cutoffs(cluster, mult=mult), bothways=True, self_interaction=False)\n nl.update(cluster)\n\n bond_list, shortened_list = [], []\n for count, indices in enumerate(Analysis(cluster, nl=nl).all_bonds[0]):\n for index in indices:\n if [count, index] not in bond_list and [index, count] not in bond_list:\n bond_list.append([count, index])\n\n for bond in bond_list:\n if all(single_index not in bond for single_index in excluded_index) and \\\n all(tuple(bond) not in list(permutations(pair)) for pair in excluded_pair):\n shortened_list.append(bond)\n\n return bond_list, shortened_list\n\n\ndef get_angles(cluster, mult=1, excluded_index=None, excluded_pair=None):\n \"\"\"\n #TODO: consider combining get_bonds and get_angles function\n ase.geometry.analysis.Analysis.unique_angles function does not work, return all angles.\n three-body interactions.\n :param excluded_pair: excluding all [particle1, particle2, particle3] lists involving the excluded pair\n \"\"\"\n if excluded_index is None:\n excluded_index = []\n if excluded_pair is None:\n excluded_pair = []\n\n nl = NeighborList(natural_cutoffs(cluster, mult=mult), bothways=True, self_interaction=False)\n nl.update(cluster)\n\n angle_list, shortened_list = [], []\n for count, indices in enumerate(Analysis(cluster, nl=nl).all_angles[0]):\n for index in indices:\n if all(list(val) not in angle_list for val in list(permutations([count, index[0], index[1]]))):\n angle_list.append([count, index[0], index[1]])\n\n for angle in angle_list:\n if all(single_index not in angle for single_index in excluded_index) and \\\n all(list(value) not in excluded_pair for value in list(permutations(angle, 2))):\n shortened_list.append(angle)\n\n return angle_list, shortened_list\n\n\ndef write_xml(atoms, bonds, save_as):\n # on-the-fly generation of force field xml file, matching atoms and bonds with pdb file\n root = etree.Element('ForceField')\n\n xml_section = etree.SubElement(root, \"AtomTypes\")\n for atom in atoms:\n element_type = ''.join(filter(lambda x: not x.isdigit(), atom.name))\n # properties = {'name': atom.name, 'class': atom.name, 'element': element_type, 'mass': str(atomic_mass)}\n if element_type == 'Cu' or atom.name == 'O9':\n atomic_mass = atomic_masses[atomic_numbers[element_type]]\n else:\n atomic_mass = 0.0\n properties = {'name': atom.name, 'class': atom.name, 'element': element_type, 'mass': str(atomic_mass)}\n etree.SubElement(xml_section, 'Type', **properties)\n\n xml_section = etree.SubElement(root, 'Residues')\n xml_residue = etree.SubElement(xml_section, 'Residue', name='MOL')\n for atom in atoms:\n etree.SubElement(xml_residue, 'Atom', name=atom.name, type=atom.name)\n for bond in bonds:\n etree.SubElement(xml_residue, 'Bond', atomName1=bond[0].name, atomName2=bond[1].name)\n\n tree = etree.ElementTree(root)\n xml = etree.tostring(tree, pretty_print=True).decode('utf-8')\n\n with closing(open(save_as, 'w')) as f:\n f.write(xml)\n\n\ndef check_atom_types(cluster, index):\n \"\"\" assign atom types, same element connected to different neighbors are assigned into different classes.\n For example, extra-framework O (in Cu-O-Cu) is in a different class from framework O (Si-O-Si). Each class\n assignment is unique (each atom belongs to one class and one class only).\n O_EF: extra-framework O\n O-Cu: framework O, connecting to one T-site(Al) and Cu\n O-H: framework O, connecting to one T-site(Al) and H (capping)\n \"\"\"\n nl = NeighborList(natural_cutoffs(cluster), bothways=True, self_interaction=False)\n nl.update(cluster)\n\n class_Al = [atom.index for atom in cluster if atom.symbol == 'Al']\n class_Cu = [atom.index for atom in cluster if atom.symbol == 'Cu']\n class_H = [atom.index for atom in cluster if atom.symbol == 'H']\n class_O_EF = [get_EF_atom_indices(cluster)[-1]]\n class_O_Cu = [atom.index for atom in cluster if atom.symbol == 'O' and atom.index not in class_O_EF and\n all(val not in class_H for val in nl.get_neighbors(atom.index)[0])]\n class_O_H = [atom.index for atom in cluster if atom.symbol == 'O' and atom.index not in class_O_EF + class_O_Cu]\n\n if index in class_Al:\n return 'Al'\n if index in class_Cu:\n return 'Cu'\n if index in class_H:\n return 'H'\n if index in class_O_EF:\n return 'O-EF'\n if index in class_O_Cu:\n return 'O-Cu'\n if index in class_O_H:\n return 'O-H'\n else:\n return 'None'\n\n\ndef get_property_types(cluster, property_list):\n \"\"\" assign all bonding pairs or angles into different types based on differences in atom types. For example,\n O(extra-framework)-Cu is different from O(framework)-Cu.\n :param property_list: bond or angle index list of the cluster of interests\n :return type_dict: return a dictionary of all unique bond-pairs or angle types, with \"keys\" being integers starting\n from 0, and \"values\" being a list of two atom types string for bonds or three atom types string for angles.\n eg. {0: [AtomClass1, AtomClass2], 1: [AtomClass1, AtomClass3], ...} for bonds\n Note: Bond types such as [AtomClass1, AtomClass2] and [AtomClass2, AtomClass1] are considered the same. Same rules\n also apply for angles.\n :return whole_type_list: return the entire list of bond or angle types assignment of the input.\n len(whole_type_list) = len(my_list)\n \"\"\"\n type_dict, repeated_list, whole_type_list, count = {}, [], [], 0\n\n for items in property_list:\n my_list = []\n for val in items:\n my_list.append(check_atom_types(cluster, val))\n whole_type_list.append(my_list)\n if all(list(pair) not in repeated_list for pair in list(permutations(my_list))):\n repeated_list.append(my_list)\n type_dict[count] = my_list\n count += 1\n\n return type_dict, whole_type_list\n\n\ndef _get_index_dict(type_dict, whole_type_list, index_list):\n \"\"\" assign bond pairs or angles indices into different bond or angle types, all the pairs or angles within the same\n types will share the same set of force field parameters.\n :param type_dict:\n :param whole_type_list:\n :param index_list:\n :return index_dict: return a dictionary of all bond-pairs or angle indices for each unique bond or angle type,\n using the the same keys as type_dict.\n \"\"\"\n index_dict = {}\n for key, value in type_dict.items():\n temp_list = []\n for count, items in enumerate(whole_type_list):\n if any(list(pair) == value for pair in list(permutations(items))):\n temp_list.append(index_list[count])\n index_dict[key] = temp_list\n\n return index_dict\n\n\ndef get_type_index_pair(type_dict, whole_type_list, index_list):\n \"\"\" write bond_type and bond_index into a single dictionary; can use tuples as dictionary key, not lists\n :param type_dict:\n :param whole_type_list:\n :param index_list:\n \"\"\"\n bond_index_dict = _get_index_dict(type_dict, whole_type_list, index_list)\n type_index_dict = {}\n for key, value in type_dict.items():\n type_index_dict[tuple(value)] = bond_index_dict[key]\n return type_index_dict\n\n\ndef pretty_print(my_dict):\n \"\"\" for better visualization of the bond (or angle) types and bond (or angle) indices that belong to certain types.\n \"\"\"\n for key, value in my_dict.items():\n print(key, '-->', value)\n\n\ndef shorten_index_list_by_types(type_index_dict, exclude_atom_type=None, exclude_property_type=None,\n include_property_type=None, case=0):\n \"\"\"\n allow excluding certain property types or only including certain types\n \"\"\"\n\n if exclude_atom_type is not None and exclude_property_type is None:\n case = 1\n if exclude_property_type is not None and exclude_atom_type is None:\n case = 2\n if exclude_property_type is not None and exclude_atom_type is not None:\n case = 3\n if include_property_type is not None:\n case = 4\n\n shortened_list = []\n for type_list, index_list in type_index_dict.items():\n if case == 1 and all(single_type not in type_list for single_type in exclude_atom_type):\n shortened_list.extend(index_list)\n elif case == 2 and all(list(value) not in exclude_property_type for value in list(permutations(type_list))):\n shortened_list.extend(index_list)\n elif case == 3 and all(single_type not in type_list for single_type in exclude_atom_type) and \\\n all(list(value) not in exclude_property_type for value in list(permutations(type_list))):\n shortened_list.extend(index_list)\n elif case == 4 and any(list(value) in include_property_type for value in list(permutations(type_list))):\n shortened_list.extend(index_list)\n\n return shortened_list\n\n\ndef set_up_openMM_system(folder_path, cluster_tag_number, shortened_bond_list):\n \"\"\" Feed pdb topology file and xml force field file into openMM, generate a system for the MD simulation/force\n calculation.\n :param folder_path:\n :param cluster_tag_number:\n :param shortened_bond_list:\n :return pdb:\n :return system:\n \"\"\"\n pdb = PDBFile(folder_path + '/cluster_%s_labeled.pdb' % cluster_tag_number)\n atoms = list(pdb.topology.atoms())\n\n for index in shortened_bond_list:\n pdb.topology.addBond(atoms[index[0]], atoms[index[1]])\n bonds = list(pdb.topology.bonds())\n\n write_xml(atoms, bonds, folder_path + '/forcefield.xml')\n FF = ForceField(folder_path + '/forcefield.xml')\n system = FF.createSystem(pdb.topology)\n return pdb, system\n\n\ndef custom_openMM_force_object(system, bond_list, bond_type_index_dict, bond_param_dict, angle_list=None,\n angle_type_index_dict=None, angle_param_dict=None):\n \"\"\" #todo: add argument allowing this custom function to be fed in as an input (more flexible used-designed ff)\n :param bond_list: list to be included into force field\n :param angle_list:\n :param bond_type_index_dict: {(type): [index], ...}\n :param angle_type_index_dict:\n :param bond_param_dict: {(type): [param], ...} Note: parameters here uses the standard units, kJ, nm, ...\n :param angle_param_dict:\n :return system: openMM system with custom forces added onto it\n \"\"\"\n force = CustomBondForce(\"D*(1-exp(-alpha*(r-r0)))^2\") # Morse bond\n force.addPerBondParameter(\"D\")\n force.addPerBondParameter(\"alpha\")\n force.addPerBondParameter(\"r0\")\n force.setUsesPeriodicBoundaryConditions(periodic=True)\n\n for bond in bond_list:\n for my_type, my_index in bond_type_index_dict.items():\n if any(list(val) in my_index for val in list(permutations(bond))):\n try:\n force.addBond(int(bond[0]), int(bond[1]), bond_param_dict.get(my_type))\n except:\n my_type = tuple(reversed(my_type))\n force.addBond(int(bond[0]), int(bond[1]), bond_param_dict.get(my_type))\n # note: consider updating the info_dict to make it order insensitive\n system.addForce(force)\n\n force = HarmonicAngleForce() # Harmonic angle\n force.setUsesPeriodicBoundaryConditions(periodic=True) # adding periodic conditions\n\n for angle in angle_list:\n for my_type, my_index in angle_type_index_dict.items():\n if any(list(val) in my_index for val in list(permutations(angle))):\n type_tag = [tuple(val) for val in list(angle_param_dict.keys()) if val in list(permutations(my_type))]\n force.addAngle(int(angle[0]), int(angle[1]), int(angle[2]), *angle_param_dict.get(type_tag[0]))\n system.addForce(force)\n\n # assert(system.usesPeriodicBoundaryConditions() == True)\n return system\n\n\ndef get_openMM_forces(pdb, system, bond_list, bond_type_index_dict, bond_param_dict, angle_list=None,\n angle_type_index_dict=None, angle_param_dict=None):\n \"\"\" forces for a single configuration\n use numb to keep track of individual configurations\n integrator used for advancing the equations of motion in MD\n doesn't matter what we pick here since we only need the forces on the initial structure, but do need to have it\n :return: forces values on atoms in units of eV/A\n \"\"\"\n system = custom_openMM_force_object(system, bond_list, bond_type_index_dict, bond_param_dict, angle_list,\n angle_type_index_dict, angle_param_dict)\n integrator = LangevinMiddleIntegrator(3 * kelvin, 1 / picosecond, 0.4 * picoseconds) # randomly picked\n simulation = Simulation(pdb.topology, system, integrator)\n simulation.context.setPositions(pdb.positions)\n state = simulation.context.getState(getForces=True)\n forces = np.array(state.getForces(asNumpy=True)) * 1.0364e-2 * 0.1 # convert forces from kJ/nm mol to eV/A\n\n return forces\n\n\n# NOTE: section below deals with multiple input structures for force field training\n\ndef get_EF_O_index(traj):\n \"\"\"\n get the mode of EF_O, and use that to extract the EF cluster for the force field training\n all EF atoms should have the same indices regardless of there is binds on the zeolite, as long as the zeolite\n framework is the same - (all EF atoms, aka. Cu-O-Cu insertion follows the same procedures)\n :param traj: traj of configurations containing all atoms, including both the zeolite backbone and EF atoms\n \"\"\"\n EF_O_index_list = []\n for atoms in traj:\n try:\n EFAnalyzer = ExtraFrameworkAnalyzer(atoms)\n EF_O_index_list.append(EFAnalyzer.get_extraframework_cluster()[-1])\n except:\n ...\n return mode(tuple(EF_O_index_list))\n\n\ndef prep_topologies(folder_path, sample_zeolite, traj_name=None, save_traj=False, del_unlabeled_pdb=False,\n show_all=False):\n \"\"\"\n :param folder_path:\n :param sample_zeolite:\n :param traj_name:\n :param save_traj:\n :param del_unlabeled_pdb:\n :param show_all:\n \"\"\"\n if traj_name is not None:\n traj = read(folder_path + '/%s.traj' % traj_name, ':')\n output_dir = os.path.join(folder_path, traj_name)\n else:\n traj = read(folder_path + '/%s.traj' % sample_zeolite, ':')\n output_dir = os.path.join(folder_path, sample_zeolite)\n Path(output_dir).mkdir(parents=True, exist_ok=True)\n\n cluster_traj, EF_O_index, EF_atoms_index, cluster_EF_index = [], get_EF_O_index(traj[0:100]), [], []\n for count, atoms in enumerate(traj):\n try:\n cluster, EF_atoms_index, cluster_EF_index = get_capped_cluster(atoms, output_dir, 'cluster_' + str(count),\n save_traj, [EF_O_index])\n label_pdb(output_dir, 'cluster_%s' % str(count), del_unlabeled_pdb)\n cluster_traj.append(cluster)\n print(sample_zeolite, count)\n except:\n print(sample_zeolite, count, 'failed!')\n\n if show_all is True:\n view(cluster_traj)\n\n return EF_atoms_index, cluster_EF_index\n\n\ndef reformat_inputs(bond_param_dict, angle_param_dict):\n \"\"\" reformat input dict into lists\n :return bond_type: List[List[str]] eg. ['Cu', 'O']\n :return angle_type: List[List[str]] eg. ['Cu', 'O', 'Cu']\n :return param_list: List[float], extend all parameters into a single list, since scipy.optimize.minimize can only\n take an 1D array as initial guess parameter\n \"\"\"\n bond_type, angle_type, param_list = [], [], []\n for types, indices in bond_param_dict.items():\n bond_type.append(list(types))\n param_list.extend([val for val in np.array(indices)])\n\n for types, indices in angle_param_dict.items():\n angle_type.append(list(types))\n param_list.extend([val for val in np.array(indices)])\n\n return bond_type, angle_type, param_list\n\n\ndef get_required_objects_for_ff(folder_path, cluster_tag_number, included_bond_type, included_angle_type,\n bond_type_index_dict, angle_type_index_dict):\n \"\"\" To reduce computational cost, objects such as pdb, system, shortened_bond_list, bond_type_index_dict are kept\n fixed for each configuration during the optimization (only run once).\n \"\"\"\n\n shortened_bond_list = shorten_index_list_by_types(bond_type_index_dict, include_property_type=included_bond_type)\n shortened_angle_list = shorten_index_list_by_types(angle_type_index_dict, include_property_type=included_angle_type)\n pdb, system = set_up_openMM_system(folder_path, cluster_tag_number, shortened_bond_list)\n\n return pdb, system, shortened_bond_list, shortened_angle_list\n\n\ndef get_FF_forces(param, info_dict, ini_bond_param_dict, ini_angle_param_dict, bond_type_index_dict,\n angle_type_index_dict, EF_index):\n \"\"\" openMM forces for multiple configuration based on the same set of parameters\n \"\"\"\n bond_param_dict, angle_param_dict, number_of_bond_param = {}, {}, 0\n for count, (types, indices) in enumerate(ini_bond_param_dict.items()):\n bond_param_dict[types] = list(param[count * len(indices):(count + 1) * len(indices)])\n number_of_bond_param += len(indices)\n\n for count, (types, indices) in enumerate(ini_angle_param_dict.items()):\n angle_param_dict[types] = list(\n param[count * len(indices) + number_of_bond_param:(count + 1) * len(indices) + number_of_bond_param])\n\n predicted_f = []\n my_dict = copy.deepcopy(info_dict)\n for config_tag, info_list in my_dict.items():\n ff_forces = get_openMM_forces(info_list[0], info_list[1], info_list[2], bond_type_index_dict, bond_param_dict,\n info_list[3], angle_type_index_dict, angle_param_dict)[EF_index]\n predicted_f.append([force_list for force_list in ff_forces])\n\n return predicted_f\n\n\ndef get_DFT_forces_single(atoms, atom_index):\n \"\"\"\n reference DFT forces on single atoms\n \"\"\"\n f_vec = atoms.calc.results['forces'][atom_index] # self.atoms.get_forces()[atom_index]\n f_mag = np.linalg.norm(f_vec)\n return f_vec\n\n\ndef get_residue(param, info_dict, DFT_f, weights, ini_bond_param_dict, ini_angle_param_dict,\n bond_type_index_dict, angle_type_index_dict, EF_index):\n \"\"\"\n optimize force field parameters by minimizing this loss function (MSE), weighted by DFT electronic energies\n k (Boltzmann's constant) = 8.617e-5 eV/K\n T = 298 K\n \"\"\"\n predicted_f = get_FF_forces(param, info_dict, ini_bond_param_dict, ini_angle_param_dict, bond_type_index_dict,\n angle_type_index_dict, EF_index)\n residue = np.reshape(np.array(np.reshape(predicted_f, [-1, 3])) - np.array(np.reshape(DFT_f, [-1, 3])), -1)\n weighted_residue = residue * weights # 39 number of atoms\n print(np.mean(weighted_residue ** 2))\n return np.mean(weighted_residue ** 2)\n\n\ndef get_fitting_parameters(initial_param, info_dict, DFT_f, weights, ini_bond_param_dict, ini_angle_param_dict,\n bond_type_index_dict, angle_type_index_dict, EF_index):\n # todo: more flexible bond reformating and feeding\n bounds = ((-np.Inf, np.Inf), (-np.Inf, np.Inf), (0, np.Inf), (-np.Inf, np.Inf), (-np.Inf, np.Inf),\n (0, np.Inf), (-np.Inf, np.Inf), (-np.Inf, np.Inf), (0, np.Inf), (0, np.pi),\n (-np.Inf, np.Inf), (0, np.pi), (-np.Inf, np.Inf), (0, np.pi), (-np.Inf, np.Inf))\n res = minimize(get_residue, initial_param, method='Powell', bounds=bounds, options={'ftol': 0.01, 'maxiter': 1000},\n args=(info_dict, DFT_f, weights, ini_bond_param_dict, ini_angle_param_dict,\n bond_type_index_dict, angle_type_index_dict, EF_index))\n print(res.success)\n return res\n\n\ndef make_parity_plot(ff_forces, dft_forces, atom_name):\n \"\"\" plot FF forces vs. DFT forces\n \"\"\"\n plt.figure()\n fig, ax = plt.subplots()\n plt.plot(dft_forces, ff_forces, 'o')\n plt.xlabel('DFT_force', fontsize=18)\n plt.ylabel('FF_force', fontsize=18)\n lims = [np.min([ax.get_xlim(), ax.get_ylim()]), np.max([ax.get_xlim(), ax.get_ylim()])]\n ax.plot(lims, lims, 'k-', alpha=0.75, zorder=0)\n ax.set_aspect('equal')\n ax.set_xlim(lims)\n ax.set_ylim(lims)\n plt.title('Force fitting on %s' % atom_name, fontsize=18)\n plt.show()\n\n\ndef func():\n tic = time.perf_counter()\n zeolite = 'SOD'\n folder_path, sample_zeolite, traj_name = '/Users/jiaweiguo/Box/openMM_FF', zeolite, zeolite + '_md'\n # prep_topologies(folder_path, sample_zeolite, traj_name, del_unlabeled_pdb=True)\n \"\"\"\n ini_bond_param_dict = {('O-Cu', 'Cu'): [1.2, 4, 0.3], ('O-EF', 'Cu'): [1.2, 4, 0.2], ('Al', 'Cu'): [1.2, 4, 0.4]}\n ini_angle_param_dict = {('Cu', 'O-EF', 'Cu'): [2.3, 10], ('O-Cu', 'Cu', 'O-EF'): [2.3, 10],\n ('Al', 'Cu', 'O-EF'): [2.3, 10]}\n \"\"\"\n ini_bond_param_dict = {('O-Cu', 'Cu'): [60.097, 2.267, 0.228], ('O-EF', 'Cu'): [4405.247, 4.163, 0.177],\n ('Al', 'Cu'): [-2.656, 4.608, 0.413]}\n ini_angle_param_dict = {('Cu', 'O-EF', 'Cu'): [2.458, 16.552], ('O-Cu', 'Cu', 'O-EF'): [3.266, 4.136],\n ('Al', 'Cu', 'O-EF'): [1.925, 1.673]}\n included_bond_type, included_angle_type, ini_param = reformat_inputs(ini_bond_param_dict, ini_angle_param_dict)\n\n # set up type_index_dict using a single set of data #fixme: randomly pick several initial clusters to built dict\n cluster = read(os.path.join(folder_path, traj_name) + '/cluster_0_labeled.pdb', '0')\n bond_index_list, shortened_bond_index_list = get_bonds(cluster, mult=2)\n bond_type_dict, whole_bond_type_list = get_property_types(cluster, bond_index_list)\n angle_index_list, shortened_angle_index_list = get_angles(cluster, mult=2)\n angle_type_dict, whole_angle_type_list = get_property_types(cluster, angle_index_list)\n bond_type_index_dict = get_type_index_pair(bond_type_dict, whole_bond_type_list, bond_index_list)\n angle_type_index_dict = get_type_index_pair(angle_type_dict, whole_angle_type_list, angle_index_list)\n\n numb_skip = 2000\n info_dict, output_path = {}, os.path.join(folder_path, traj_name)\n files = [files for files in os.listdir(os.path.join(folder_path, traj_name)) if '.pdb' in files]\n for cluster_tag_number in np.arange(0, len(files), numb_skip):\n cluster_tag_number = int(cluster_tag_number)\n pdb, system, shortened_bond_list, shortened_angle_list = \\\n get_required_objects_for_ff(output_path, cluster_tag_number, included_bond_type, included_angle_type,\n bond_type_index_dict, angle_type_index_dict)\n info_dict[cluster_tag_number] = [pdb, system, shortened_bond_list, shortened_angle_list]\n print(cluster_tag_number)\n\n with open(output_path + '/info_dict_%s.pickle' % numb_skip, 'wb') as f:\n pickle.dump(info_dict, f)\n\n with open(folder_path + '/EF_index_dict.pickle', 'rb') as f:\n EF_index_dict = pickle.load(f)\n\n traj = read(folder_path + '/%s.traj' % traj_name, '0::%s' % numb_skip)\n DFT_f = []\n for atoms in traj:\n DFT_f.append([get_DFT_forces_single(atoms, atom_index=val) for val in EF_index_dict.get(zeolite)[-3:]])\n print(np.array(DFT_f).shape)\n\n ref_E = read(folder_path + '/%s.traj' % traj_name, '-1').calc.results['energy']\n DFT_E = []\n for atoms in traj:\n DFT_E.append(atoms.calc.results['energy'])\n\n with open(os.path.join(folder_path, traj_name) + '/info_dict_%s.pickle' % numb_skip, 'rb') as f:\n info_dict = pickle.load(f)\n\n with open(folder_path + '/cluster_EF_index_dict.pickle', 'rb') as f:\n cluster_EF_index_dict = pickle.load(f)\n\n my_dict = copy.deepcopy(info_dict) # important, need to keep openMM \"systems\" fixed\n weights = []\n for value in np.exp(-(np.array(DFT_E) - ref_E) / len(traj[0]) / (8.617e-5 * 298)):\n weights.extend([value, value, value, value, value, value, value, value, value])\n res = get_fitting_parameters(ini_param, my_dict, DFT_f, np.array(weights), ini_bond_param_dict, ini_angle_param_dict,\n bond_type_index_dict, angle_type_index_dict, cluster_EF_index_dict.get(zeolite))\n\n print([np.around(float(val), decimals=3) for val in res.x])\n FF_f = get_FF_forces(res.x, info_dict, ini_bond_param_dict, ini_angle_param_dict, bond_type_index_dict,\n angle_type_index_dict, cluster_EF_index_dict.get(zeolite))\n make_parity_plot(np.array(np.reshape(FF_f, [-1, 3])), np.array(np.reshape(DFT_f, [-1, 3])), 'Cu-O-Cu')\n\n force_dict = {'FF': np.array(np.reshape(FF_f, [-1, 3])), 'DFT': np.array(np.reshape(DFT_f, [-1, 3]))}\n with open(output_path + '/forces_%s.pickle' % numb_skip, 'wb') as f:\n pickle.dump(force_dict, f)\n\n toc = time.perf_counter()\n print(f\"Program terminated in {toc - tic:0.4f} seconds\")\n\n\nif __name__ == '__main__':\n # func()\n \n \"\"\" weighting factor for the loss function\n zeolite = 'SOD'\n folder_path, traj_name, numb_skip = '/Users/jiaweiguo/Box/openMM_FF', zeolite + '_md', 2000\n traj = read(folder_path + '/%s.traj' % traj_name, '0::%s' % numb_skip)\n ref_E = read(folder_path + '/%s.traj' % traj_name, '-1').calc.results['energy']\n DFT_E = []\n for atoms in traj:\n DFT_E.append(atoms.calc.results['energy'])\n weight = np.exp(-(np.array(DFT_E) - ref_E) / len(traj[0]) / (8.617e-5 * 298))\n plt.plot(DFT_E, weight, 'o')\n plt.xlabel('DFT electronic energies (eV)', fontsize=16)\n plt.ylabel('Boltzmann weighting', fontsize=16)\n plt.show()\n \"\"\"\n",
"from ase.io import read, write\nfrom ase import Atoms\nfrom maze import Zeotype, OpenDefect, ImperfectZeotype\nimport os\nfrom ase.visualize import view\nfrom pathlib import Path\nfrom collections import defaultdict\nfrom typing import List\nimport numpy as np\n\n\n# %%\ndef defect_maker(cif_dir, zeolite_code, output_dir, savefiles=True):\n zeolite = Zeotype.build_from_cif_with_labels(cif_dir)\n open_defect = OpenDefect(zeolite)\n unique_t_site_indices = {}\n for site_name, value in zeolite._site_to_atom_indices.items():\n if 'T' in site_name:\n unique_t_site_indices[site_name] = value[1]\n\n # build dictionary of open defects\n unique_t_site_to_od = defaultdict(list)\n for site_name, t_site in unique_t_site_indices.items():\n for o_index in open_defect.neighbor_list.get_neighbors(t_site)[0]:\n for si_index in open_defect.neighbor_list.get_neighbors(o_index)[0]:\n if si_index == t_site:\n continue\n pos = open_defect.get_positions()[si_index]\n new_od = open_defect.delete_atoms([si_index]).cap_atoms()\n unique_t_site_to_od[site_name].append(new_od)\n\n # save T sites\n if savefiles: #I made this change\n for site_name, od_list in unique_t_site_to_od.items():\n output_dir2 = os.path.join(output_dir, zeolite_code, site_name)\n Path(output_dir2).mkdir(parents=True, exist_ok=True)\n for index, od in enumerate(od_list):\n output_filename = zeolite_code + '_' + site_name + '_' + str(index) + '.traj'\n my_path = os.path.join(output_dir2, output_filename)\n write(my_path, od)\n\n return unique_t_site_to_od # and most importantly I made this chnage\n\n\n# helper functions for second defect function\ndef find_removed_atoms(iz: ImperfectZeotype) -> List[int]:\n \"\"\"\n Finds the atoms removed from the iz by comparing it with parent MAZE-sim\n :param iz: imperfect MAZE-sim to check for missing atoms\n :return: list of indices of missing atoms (using parent indexing)\n \"\"\"\n missing_list = []\n for atom in iz.parent_zeotype:\n value = iz.index_mapper.get_index(iz.parent_zeotype.name, iz.name, atom.index)\n if value is None:\n missing_list.append(atom.index)\n\n return missing_list\n\ndef find_neighbor_si(z: Zeotype, first_si_index: int):\n \"\"\"\n Finds the first neighboring Si\n :param z: the MAZE-sim object\n :param first_si_index: the first Si index to find a neighbor too\n :return: the first neighboring si index\n \"\"\"\n z.update_nl()\n for o_index in z.neighbor_list.get_neighbors(first_si_index)[0]:\n for si_index in z.neighbor_list.get_neighbors(o_index)[0]:\n if si_index == first_si_index:\n continue\n return si_index\n\ndef find_index_common_oxygen(iz, site_1: int, site_2: int) -> int:\n \"\"\"\n Finds a common oxygen, if it exists, between two T sites\n :param iz: imperfect MAZE-sim (or subclass) containing T sites\n :param site_1: index of T site 1\n :param site_2: index of T site 2\n :return:\n \"\"\"\n iz.update_nl()\n nl1 = iz.neighbor_list.get_neighbors(site_1)[0]\n nl2 = iz.neighbor_list.get_neighbors(site_2)[0]\n # find common oxygen\n for i in nl1:\n if i in nl2:\n if iz[i].symbol == 'O':\n return i\n\n assert False, 'No middle oxygen found!!'\n\ndef remove_two_T_sites(iz, site_1: int, site_2: int) -> ImperfectZeotype:\n \"\"\"\n Removes two T sites that are adjacent to eachother\n :param iz: Impefect MAZE-sim with two T sites\n :param site_1: the index of the first site to remove\n :param site_2: the index of the second site to remove\n :return:\n \"\"\"\n indices_to_remove = [site_1, site_2, find_index_common_oxygen(iz, site_1, site_2)]\n return iz.delete_atoms(indices_to_remove)\n\ndef second_defect(od_dict, T_site_str, list_pos, cif_name, out_path, savefile: bool =True):\n \"\"\"\n Second defect creator\n :param od_dict: dictionary of open defects (this is to allow it to integrate with the other code better)\n :param T_site_str: The key for the open defects dictionary\n :param list_pos: The position in the list of od_dict[T_site_str]\n :param cif_name: the name of the cif file used to build the zeolite\n :param out_path: the output path\n :param savefile: bool if a file should be saved or not\n :return: an open defect with an additional adjacent site removed\n \"\"\"\n od = od_dict[T_site_str][list_pos] # get specific open defect\n complete_od = OpenDefect(od.parent_zeotype) # create an opendefect object without the removed index\n removed_atom_index = find_removed_atoms(od)[0]\n\n neighbor_si_index = find_neighbor_si(complete_od, removed_atom_index)\n new_od = remove_two_T_sites(complete_od, removed_atom_index, neighbor_si_index).cap_atoms()\n my_path = os.path.join(out_path, cif_name + '_' + str(neighbor_si_index) + '.traj')\n if savefile:\n write(my_path, new_od)\n return new_od\n\n\n# .add_atoms(Atoms('Ir', positions=[pos]), 'Ir')\n\n# %%'\n\n# %%\n\n\ndef insert_HM(cif_dir, out_path, cif_name, si_index):\n atoms = read(cif_dir + str(si_index) + '.traj', '-1')\n zeolite = Zeotype(atoms)\n open_defect = OpenDefect(zeolite)\n atoms_H = [a.index for a in open_defect if a.symbol in ['H']]\n pos_H = open_defect.get_positions()[atoms_H]\n cell_dim = atoms.get_cell_lengths_and_angles()[0:3]\n for row in pos_H:\n for index, item in enumerate(row):\n if item >= cell_dim[index]:\n item -= cell_dim[index]\n row[index] = item\n elif item < 0:\n item += cell_dim[index]\n row[index] = item\n else:\n continue\n pos = np.mean(pos_H, axis=0)\n new_od = open_defect.add_atoms(Atoms('Ir', positions=[pos]), 'Ir')\n # my_path = os.path.join(out_path, cif_name + '_' + str(si_index) + '_Ir.traj')\n # write(my_path, new_od)\n return np.mean(pos)\n\n\ndef insert_HM_2(open_defect, si_index):\n atoms_types, _ = open_defect.count_elements\n atoms_H = atoms_types['H']\n pos_H = open_defect.get_positions()[atoms_H]\n cell_dim = open_defect.get_cell_lengths_and_angles()[0:3]\n for row in pos_H:\n for index, item in enumerate(row):\n if item >= cell_dim[index]:\n item -= cell_dim[index]\n row[index] = item\n elif item < 0:\n item += cell_dim[index]\n row[index] = item\n else:\n continue\n pos = np.mean(pos_H, axis=0)\n new_od = open_defect.add_atoms(Atoms('Ir', positions=[pos]), 'Ir')\n return np.mean(pos)\n\n\nif __name__ == \"__main__\":\n #defect_maker('/Users/jiaweiguo/Desktop/0125Proposal/BEA.cif', 'BEA', '/Users/jiaweiguo/Desktop/0125Proposal')\n od_dict = defect_maker('/data/BEA.cif', 'BEA', '//data/test_output',\n savefiles=False)\n my_od = second_defect(od_dict, 'T3', 3, 'BEA', '//data/test_output', savefile=False)\n view(my_od)\n # second_defect(cif_dir, out_path, 'BEA_T1_3', 189)\n # second_defect(cif_dir, out_path, 'BEA_T1_3', 141)\n # second_defect(cif_dir, out_path, 'BEA_T1_3', 177)\n # cif_dir = '/Users/jiaweiguo/Desktop/0125Proposal/BEA/T1/BEA_T1_3.traj'\n # out_path = '/Users/jiaweiguo/Desktop/0125Proposal/BEA/T1/'\n # cif_dir = '/Users/jiaweiguo/Desktop/0125Proposal/BEA/T1/BEA_T1_3_'\n # out_path = '/Users/jiaweiguo/Desktop/0125Proposal/BEA/T1/'\n # insert_HM(cif_dir, out_path, 'BEA_T1_3', 141)\n # insert_HM(cif_dir, out_path, 'BEA_T1_3', 189)\n # insert_HM(cif_dir, out_path, 'BEA_T1_3', 177)\n #\n"
] | [
[
"matplotlib.pyplot.title",
"numpy.reshape",
"matplotlib.pyplot.subplots",
"numpy.linalg.norm",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.ylabel",
"scipy.optimize.minimize",
"numpy.mean",
"matplotlib.pyplot.xlabel",
"numpy.array",
"matplotlib.pyplot.show",
"matplotlib.pyplot.figure"
],
[
"numpy.mean"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"1.6",
"0.14",
"1.10",
"0.15",
"1.4",
"0.16",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"0.12",
"1.0",
"0.17",
"1.3",
"1.8"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Duncanswilson/keras | [
"32aa192548b6b59bf407e583fbd246ba9f5f5676"
] | [
"keras/layers/recurrent.py"
] | [
"# -*- coding: utf-8 -*-\n\"\"\"Recurrent layers and their base classes.\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\nimport warnings\n\nfrom .. import backend as K\nfrom .. import activations\nfrom .. import initializers\nfrom .. import regularizers\nfrom .. import constraints\nfrom ..engine import Layer\nfrom ..engine import InputSpec\nfrom ..utils.generic_utils import has_arg\n\n# Legacy support.\nfrom ..legacy.layers import Recurrent\nfrom ..legacy import interfaces\n\n\nclass StackedRNNCells(Layer):\n \"\"\"Wrapper allowing a stack of RNN cells to behave as a single cell.\n\n Used to implement efficient stacked RNNs.\n\n # Arguments\n cells: List of RNN cell instances.\n\n # Examples\n\n ```python\n cells = [\n keras.layers.LSTMCell(output_dim),\n keras.layers.LSTMCell(output_dim),\n keras.layers.LSTMCell(output_dim),\n ]\n\n inputs = keras.Input((timesteps, input_dim))\n x = keras.layers.RNN(cells)(inputs)\n ```\n \"\"\"\n\n def __init__(self, cells, **kwargs):\n for cell in cells:\n if not hasattr(cell, 'call'):\n raise ValueError('All cells must have a `call` method. '\n 'received cells:', cells)\n if not hasattr(cell, 'state_size'):\n raise ValueError('All cells must have a '\n '`state_size` attribute. '\n 'received cells:', cells)\n self.cells = cells\n super(StackedRNNCells, self).__init__(**kwargs)\n\n @property\n def state_size(self):\n # States are a flat list\n # in reverse order of the cell stack.\n # This allows to preserve the requirement\n # `stack.state_size[0] == output_dim`.\n # e.g. states of a 2-layer LSTM would be\n # `[h2, c2, h1, c1]`\n # (assuming one LSTM has states [h, c])\n state_size = []\n for cell in self.cells[::-1]:\n if hasattr(cell.state_size, '__len__'):\n state_size += list(cell.state_size)\n else:\n state_size.append(cell.state_size)\n return tuple(state_size)\n\n def call(self, inputs, states, **kwargs):\n # Recover per-cell states.\n nested_states = []\n for cell in self.cells[::-1]:\n if hasattr(cell.state_size, '__len__'):\n nested_states.append(states[:len(cell.state_size)])\n states = states[len(cell.state_size):]\n else:\n nested_states.append([states[0]])\n states = states[1:]\n nested_states = nested_states[::-1]\n\n # Call the cells in order and store the returned states.\n new_nested_states = []\n for cell, states in zip(self.cells, nested_states):\n inputs, states = cell.call(inputs, states, **kwargs)\n new_nested_states.append(states)\n\n # Format the new states as a flat list\n # in reverse cell order.\n states = []\n for cell_states in new_nested_states[::-1]:\n states += cell_states\n return inputs, states\n\n def build(self, input_shape):\n for cell in self.cells:\n if isinstance(cell, Layer):\n cell.build(input_shape)\n if hasattr(cell.state_size, '__len__'):\n output_dim = cell.state_size[0]\n else:\n output_dim = cell.state_size\n input_shape = (input_shape[0], input_shape[1], output_dim)\n self.built = True\n\n def get_config(self):\n cells = []\n for cell in self.cells:\n cells.append({'class_name': cell.__class__.__name__,\n 'config': cell.get_config()})\n config = {'cells': cells}\n base_config = super(StackedRNNCells, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n\n @classmethod\n def from_config(cls, config, custom_objects=None):\n from . import deserialize as deserialize_layer\n cells = []\n for cell_config in config.pop('cells'):\n cells.append(deserialize_layer(cell_config,\n custom_objects=custom_objects))\n return cls(cells, **config)\n\n @property\n def trainable_weights(self):\n if not self.trainable:\n return []\n weights = []\n for cell in self.cells:\n if isinstance(cell, Layer):\n weights += cell.trainable_weights\n return weights\n\n @property\n def non_trainable_weights(self):\n weights = []\n for cell in self.cells:\n if isinstance(cell, Layer):\n weights += cell.non_trainable_weights\n if not self.trainable:\n trainable_weights = []\n for cell in self.cells:\n if isinstance(cell, Layer):\n trainable_weights += cell.trainable_weights\n return trainable_weights + weights\n return weights\n\n def get_weights(self):\n \"\"\"Retrieves the weights of the model.\n\n # Returns\n A flat list of Numpy arrays.\n \"\"\"\n weights = []\n for cell in self.cells:\n if isinstance(cell, Layer):\n weights += cell.weights\n return K.batch_get_value(weights)\n\n def set_weights(self, weights):\n \"\"\"Sets the weights of the model.\n\n # Arguments\n weights: A list of Numpy arrays with shapes and types matching\n the output of `model.get_weights()`.\n \"\"\"\n tuples = []\n for cell in self.cells:\n if isinstance(cell, Layer):\n num_param = len(cell.weights)\n weights = weights[:num_param]\n for sw, w in zip(cell.weights, weights):\n tuples.append((sw, w))\n weights = weights[num_param:]\n K.batch_set_value(tuples)\n\n @property\n def losses(self):\n losses = []\n for cell in self.cells:\n if isinstance(cell, Layer):\n cell_losses = cell.losses\n losses += cell_losses\n return losses\n\n def get_losses_for(self, inputs=None):\n losses = []\n for cell in self.cells:\n if isinstance(cell, Layer):\n cell_losses = cell.get_losses_for(inputs)\n losses += cell_losses\n return losses\n\n\nclass RNN(Layer):\n \"\"\"Base class for recurrent layers.\n\n # Arguments\n cell: A RNN cell instance. A RNN cell is a class that has:\n - a `call(input_at_t, states_at_t)` method, returning\n `(output_at_t, states_at_t_plus_1)`. The call method of the\n cell can also take the optional argument `constants`, see\n section \"Note on passing external constants\" below.\n - a `state_size` attribute. This can be a single integer\n (single state) in which case it is\n the size of the recurrent state\n (which should be the same as the size of the cell output).\n This can also be a list/tuple of integers\n (one size per state). In this case, the first entry\n (`state_size[0]`) should be the same as\n the size of the cell output.\n It is also possible for `cell` to be a list of RNN cell instances,\n in which cases the cells get stacked on after the other in the RNN,\n implementing an efficient stacked RNN.\n return_sequences: Boolean. Whether to return the last output.\n in the output sequence, or the full sequence.\n return_state: Boolean. Whether to return the last state\n in addition to the output.\n go_backwards: Boolean (default False).\n If True, process the input sequence backwards and return the\n reversed sequence.\n stateful: Boolean (default False). If True, the last state\n for each sample at index i in a batch will be used as initial\n state for the sample of index i in the following batch.\n unroll: Boolean (default False).\n If True, the network will be unrolled,\n else a symbolic loop will be used.\n Unrolling can speed-up a RNN,\n although it tends to be more memory-intensive.\n Unrolling is only suitable for short sequences.\n input_dim: dimensionality of the input (integer).\n This argument (or alternatively,\n the keyword argument `input_shape`)\n is required when using this layer as the first layer in a model.\n input_length: Length of input sequences, to be specified\n when it is constant.\n This argument is required if you are going to connect\n `Flatten` then `Dense` layers upstream\n (without it, the shape of the dense outputs cannot be computed).\n Note that if the recurrent layer is not the first layer\n in your model, you would need to specify the input length\n at the level of the first layer\n (e.g. via the `input_shape` argument)\n\n # Input shape\n 3D tensor with shape `(batch_size, timesteps, input_dim)`.\n\n # Output shape\n - if `return_state`: a list of tensors. The first tensor is\n the output. The remaining tensors are the last states,\n each with shape `(batch_size, units)`.\n - if `return_sequences`: 3D tensor with shape\n `(batch_size, timesteps, units)`.\n - else, 2D tensor with shape `(batch_size, units)`.\n\n # Masking\n This layer supports masking for input data with a variable number\n of timesteps. To introduce masks to your data,\n use an [Embedding](embeddings.md) layer with the `mask_zero` parameter\n set to `True`.\n\n # Note on using statefulness in RNNs\n You can set RNN layers to be 'stateful', which means that the states\n computed for the samples in one batch will be reused as initial states\n for the samples in the next batch. This assumes a one-to-one mapping\n between samples in different successive batches.\n\n To enable statefulness:\n - specify `stateful=True` in the layer constructor.\n - specify a fixed batch size for your model, by passing\n if sequential model:\n `batch_input_shape=(...)` to the first layer in your model.\n else for functional model with 1 or more Input layers:\n `batch_shape=(...)` to all the first layers in your model.\n This is the expected shape of your inputs\n *including the batch size*.\n It should be a tuple of integers, e.g. `(32, 10, 100)`.\n - specify `shuffle=False` when calling fit().\n\n To reset the states of your model, call `.reset_states()` on either\n a specific layer, or on your entire model.\n\n # Note on specifying the initial state of RNNs\n You can specify the initial state of RNN layers symbolically by\n calling them with the keyword argument `initial_state`. The value of\n `initial_state` should be a tensor or list of tensors representing\n the initial state of the RNN layer.\n\n You can specify the initial state of RNN layers numerically by\n calling `reset_states` with the keyword argument `states`. The value of\n `states` should be a numpy array or list of numpy arrays representing\n the initial state of the RNN layer.\n\n # Note on passing external constants to RNNs\n You can pass \"external\" constants to the cell using the `constants`\n keyword argument of `RNN.__call__` (as well as `RNN.call`) method. This\n requires that the `cell.call` method accepts the same keyword argument\n `constants`. Such constants can be used to condition the cell\n transformation on additional static inputs (not changing over time),\n a.k.a. an attention mechanism.\n\n # Examples\n\n ```python\n # First, let's define a RNN Cell, as a layer subclass.\n\n class MinimalRNNCell(keras.layers.Layer):\n\n def __init__(self, units, **kwargs):\n self.units = units\n self.state_size = units\n super(MinimalRNNCell, self).__init__(**kwargs)\n\n def build(self, input_shape):\n self.kernel = self.add_weight(shape=(input_shape[-1], self.units),\n initializer='uniform',\n name='kernel')\n self.recurrent_kernel = self.add_weight(\n shape=(self.units, self.units),\n initializer='uniform',\n name='recurrent_kernel')\n self.built = True\n\n def call(self, inputs, states):\n prev_output = states[0]\n h = K.dot(inputs, self.kernel)\n output = h + K.dot(prev_output, self.recurrent_kernel)\n return output, [output]\n\n # Let's use this cell in a RNN layer:\n\n cell = MinimalRNNCell(32)\n x = keras.Input((None, 5))\n layer = RNN(cell)\n y = layer(x)\n\n # Here's how to use the cell to build a stacked RNN:\n\n cells = [MinimalRNNCell(32), MinimalRNNCell(64)]\n x = keras.Input((None, 5))\n layer = RNN(cells)\n y = layer(x)\n ```\n \"\"\"\n\n def __init__(self, cell,\n return_sequences=False,\n return_state=False,\n go_backwards=False,\n stateful=False,\n unroll=False,\n **kwargs):\n if isinstance(cell, (list, tuple)):\n cell = StackedRNNCells(cell)\n if not hasattr(cell, 'call'):\n raise ValueError('`cell` should have a `call` method. '\n 'The RNN was passed:', cell)\n if not hasattr(cell, 'state_size'):\n raise ValueError('The RNN cell should have '\n 'an attribute `state_size` '\n '(tuple of integers, '\n 'one integer per RNN state).')\n super(RNN, self).__init__(**kwargs)\n self.cell = cell\n self.return_sequences = return_sequences\n self.return_state = return_state\n self.go_backwards = go_backwards\n self.stateful = stateful\n self.unroll = unroll\n\n self.supports_masking = True\n self.input_spec = [InputSpec(ndim=3)]\n self.state_spec = None\n self._states = None\n self.constants_spec = None\n self._num_constants = None\n\n @property\n def states(self):\n if self._states is None:\n if isinstance(self.cell.state_size, int):\n num_states = 1\n else:\n num_states = len(self.cell.state_size)\n return [None for _ in range(num_states)]\n return self._states\n\n @states.setter\n def states(self, states):\n self._states = states\n\n def compute_output_shape(self, input_shape):\n if isinstance(input_shape, list):\n input_shape = input_shape[0]\n\n if hasattr(self.cell.state_size, '__len__'):\n state_size = self.cell.state_size\n else:\n state_size = [self.cell.state_size]\n output_dim = state_size[0]\n\n if self.return_sequences:\n output_shape = (input_shape[0], input_shape[1], output_dim)\n else:\n output_shape = (input_shape[0], output_dim)\n\n if self.return_state:\n state_shape = [(input_shape[0], dim) for dim in state_size]\n return [output_shape] + state_shape\n else:\n return output_shape\n\n def compute_mask(self, inputs, mask):\n if isinstance(mask, list):\n mask = mask[0]\n output_mask = mask if self.return_sequences else None\n if self.return_state:\n state_mask = [None for _ in self.states]\n return [output_mask] + state_mask\n else:\n return output_mask\n\n def build(self, input_shape):\n # Note input_shape will be list of shapes of initial states and\n # constants if these are passed in __call__.\n if self._num_constants is not None:\n constants_shape = input_shape[-self._num_constants:]\n else:\n constants_shape = None\n\n if isinstance(input_shape, list):\n input_shape = input_shape[0]\n\n batch_size = input_shape[0] if self.stateful else None\n input_dim = input_shape[-1]\n self.input_spec[0] = InputSpec(shape=(batch_size, None, input_dim))\n\n # allow cell (if layer) to build before we set or validate state_spec\n if isinstance(self.cell, Layer):\n step_input_shape = (input_shape[0],) + input_shape[2:]\n if constants_shape is not None:\n self.cell.build([step_input_shape] + constants_shape)\n else:\n self.cell.build(step_input_shape)\n\n # set or validate state_spec\n if hasattr(self.cell.state_size, '__len__'):\n state_size = list(self.cell.state_size)\n else:\n state_size = [self.cell.state_size]\n\n if self.state_spec is not None:\n # initial_state was passed in call, check compatibility\n if [spec.shape[-1] for spec in self.state_spec] != state_size:\n raise ValueError(\n 'An `initial_state` was passed that is not compatible with '\n '`cell.state_size`. Received `state_spec`={}; '\n 'however `cell.state_size` is '\n '{}'.format(self.state_spec, self.cell.state_size))\n else:\n self.state_spec = [InputSpec(shape=(None, dim))\n for dim in state_size]\n if self.stateful:\n self.reset_states()\n\n def get_initial_state(self, inputs):\n # build an all-zero tensor of shape (samples, output_dim)\n initial_state = K.zeros_like(inputs) # (samples, timesteps, input_dim)\n initial_state = K.sum(initial_state, axis=(1, 2)) # (samples,)\n initial_state = K.expand_dims(initial_state) # (samples, 1)\n if hasattr(self.cell.state_size, '__len__'):\n return [K.tile(initial_state, [1, dim])\n for dim in self.cell.state_size]\n else:\n return [K.tile(initial_state, [1, self.cell.state_size])]\n\n def __call__(self, inputs, initial_state=None, constants=None, **kwargs):\n inputs, initial_state, constants = self._standardize_args(\n inputs, initial_state, constants)\n\n if initial_state is None and constants is None:\n return super(RNN, self).__call__(inputs, **kwargs)\n\n # If any of `initial_state` or `constants` are specified and are Keras\n # tensors, then add them to the inputs and temporarily modify the\n # input_spec to include them.\n\n additional_inputs = []\n additional_specs = []\n if initial_state is not None:\n kwargs['initial_state'] = initial_state\n additional_inputs += initial_state\n self.state_spec = [InputSpec(shape=K.int_shape(state))\n for state in initial_state]\n additional_specs += self.state_spec\n if constants is not None:\n kwargs['constants'] = constants\n additional_inputs += constants\n self.constants_spec = [InputSpec(shape=K.int_shape(constant))\n for constant in constants]\n self._num_constants = len(constants)\n additional_specs += self.constants_spec\n # at this point additional_inputs cannot be empty\n is_keras_tensor = hasattr(additional_inputs[0], '_keras_history')\n for tensor in additional_inputs:\n if hasattr(tensor, '_keras_history') != is_keras_tensor:\n raise ValueError('The initial state or constants of an RNN'\n ' layer cannot be specified with a mix of'\n ' Keras tensors and non-Keras tensors')\n\n if is_keras_tensor:\n # Compute the full input spec, including state and constants\n full_input = [inputs] + additional_inputs\n full_input_spec = self.input_spec + additional_specs\n # Perform the call with temporarily replaced input_spec\n original_input_spec = self.input_spec\n self.input_spec = full_input_spec\n output = super(RNN, self).__call__(full_input, **kwargs)\n self.input_spec = original_input_spec\n return output\n else:\n return super(RNN, self).__call__(inputs, **kwargs)\n\n def call(self,\n inputs,\n mask=None,\n training=None,\n initial_state=None,\n constants=None):\n # input shape: `(samples, time (padded with zeros), input_dim)`\n # note that the .build() method of subclasses MUST define\n # self.input_spec and self.state_spec with complete input shapes.\n if isinstance(inputs, list):\n inputs = inputs[0]\n if initial_state is not None:\n pass\n elif self.stateful:\n initial_state = self.states\n else:\n initial_state = self.get_initial_state(inputs)\n\n if isinstance(mask, list):\n mask = mask[0]\n\n if len(initial_state) != len(self.states):\n raise ValueError('Layer has ' + str(len(self.states)) +\n ' states but was passed ' +\n str(len(initial_state)) +\n ' initial states.')\n input_shape = K.int_shape(inputs)\n timesteps = input_shape[1]\n if self.unroll and timesteps in [None, 1]:\n raise ValueError('Cannot unroll a RNN if the '\n 'time dimension is undefined or equal to 1. \\n'\n '- If using a Sequential model, '\n 'specify the time dimension by passing '\n 'an `input_shape` or `batch_input_shape` '\n 'argument to your first layer. If your '\n 'first layer is an Embedding, you can '\n 'also use the `input_length` argument.\\n'\n '- If using the functional API, specify '\n 'the time dimension by passing a `shape` '\n 'or `batch_shape` argument to your Input layer.')\n\n kwargs = {}\n if has_arg(self.cell.call, 'training'):\n kwargs['training'] = training\n\n if constants:\n if not has_arg(self.cell.call, 'constants'):\n raise ValueError('RNN cell does not support constants')\n\n def step(inputs, states):\n constants = states[-self._num_constants:]\n states = states[:-self._num_constants]\n return self.cell.call(inputs, states, constants=constants,\n **kwargs)\n else:\n def step(inputs, states):\n return self.cell.call(inputs, states, **kwargs)\n\n last_output, outputs, states = K.rnn(step,\n inputs,\n initial_state,\n constants=constants,\n go_backwards=self.go_backwards,\n mask=mask,\n unroll=self.unroll,\n input_length=timesteps)\n if self.stateful:\n updates = []\n for i in range(len(states)):\n updates.append((self.states[i], states[i]))\n self.add_update(updates, inputs)\n\n if self.return_sequences:\n output = outputs\n else:\n output = last_output\n\n # Properly set learning phase\n if getattr(last_output, '_uses_learning_phase', False):\n output._uses_learning_phase = True\n for state in states:\n state._uses_learning_phase = True\n\n if self.return_state:\n if not isinstance(states, (list, tuple)):\n states = [states]\n else:\n states = list(states)\n return [output] + states\n else:\n return output\n\n def _standardize_args(self, inputs, initial_state, constants):\n \"\"\"Standardize `__call__` to a single list of tensor inputs.\n\n When running a model loaded from file, the input tensors\n `initial_state` and `constants` can be passed to `RNN.__call__` as part\n of `inputs` instead of by the dedicated keyword arguments. This method\n makes sure the arguments are separated and that `initial_state` and\n `constants` are lists of tensors (or None).\n\n # Arguments\n inputs: tensor or list/tuple of tensors\n initial_state: tensor or list of tensors or None\n constants: tensor or list of tensors or None\n\n # Returns\n inputs: tensor\n initial_state: list of tensors or None\n constants: list of tensors or None\n \"\"\"\n if isinstance(inputs, list):\n assert initial_state is None and constants is None\n if self._num_constants is not None:\n constants = inputs[-self._num_constants:]\n inputs = inputs[:-self._num_constants]\n if len(inputs) > 1:\n initial_state = inputs[1:]\n inputs = inputs[0]\n\n def to_list_or_none(x):\n if x is None or isinstance(x, list):\n return x\n if isinstance(x, tuple):\n return list(x)\n return [x]\n\n initial_state = to_list_or_none(initial_state)\n constants = to_list_or_none(constants)\n\n return inputs, initial_state, constants\n\n def reset_states(self, states=None):\n if not self.stateful:\n raise AttributeError('Layer must be stateful.')\n batch_size = self.input_spec[0].shape[0]\n if not batch_size:\n raise ValueError('If a RNN is stateful, it needs to know '\n 'its batch size. Specify the batch size '\n 'of your input tensors: \\n'\n '- If using a Sequential model, '\n 'specify the batch size by passing '\n 'a `batch_input_shape` '\n 'argument to your first layer.\\n'\n '- If using the functional API, specify '\n 'the batch size by passing a '\n '`batch_shape` argument to your Input layer.')\n # initialize state if None\n if self.states[0] is None:\n if hasattr(self.cell.state_size, '__len__'):\n self.states = [K.zeros((batch_size, dim))\n for dim in self.cell.state_size]\n else:\n self.states = [K.zeros((batch_size, self.cell.state_size))]\n elif states is None:\n if hasattr(self.cell.state_size, '__len__'):\n for state, dim in zip(self.states, self.cell.state_size):\n K.set_value(state, np.zeros((batch_size, dim)))\n else:\n K.set_value(self.states[0],\n np.zeros((batch_size, self.cell.state_size)))\n else:\n if not isinstance(states, (list, tuple)):\n states = [states]\n if len(states) != len(self.states):\n raise ValueError('Layer ' + self.name + ' expects ' +\n str(len(self.states)) + ' states, '\n 'but it received ' + str(len(states)) +\n ' state values. Input received: ' +\n str(states))\n for index, (value, state) in enumerate(zip(states, self.states)):\n if hasattr(self.cell.state_size, '__len__'):\n dim = self.cell.state_size[index]\n else:\n dim = self.cell.state_size\n if value.shape != (batch_size, dim):\n raise ValueError('State ' + str(index) +\n ' is incompatible with layer ' +\n self.name + ': expected shape=' +\n str((batch_size, dim)) +\n ', found shape=' + str(value.shape))\n # TODO: consider batch calls to `set_value`.\n K.set_value(state, value)\n\n def get_config(self):\n config = {'return_sequences': self.return_sequences,\n 'return_state': self.return_state,\n 'go_backwards': self.go_backwards,\n 'stateful': self.stateful,\n 'unroll': self.unroll}\n if self._num_constants is not None:\n config['num_constants'] = self._num_constants\n\n cell_config = self.cell.get_config()\n config['cell'] = {'class_name': self.cell.__class__.__name__,\n 'config': cell_config}\n base_config = super(RNN, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n\n @classmethod\n def from_config(cls, config, custom_objects=None):\n from . import deserialize as deserialize_layer\n cell = deserialize_layer(config.pop('cell'),\n custom_objects=custom_objects)\n num_constants = config.pop('num_constants', None)\n layer = cls(cell, **config)\n layer._num_constants = num_constants\n return layer\n\n @property\n def trainable_weights(self):\n if not self.trainable:\n return []\n if isinstance(self.cell, Layer):\n return self.cell.trainable_weights\n return []\n\n @property\n def non_trainable_weights(self):\n if isinstance(self.cell, Layer):\n if not self.trainable:\n return self.cell.weights\n return self.cell.non_trainable_weights\n return []\n\n @property\n def losses(self):\n if isinstance(self.cell, Layer):\n return self.cell.losses\n return []\n\n def get_losses_for(self, inputs=None):\n if isinstance(self.cell, Layer):\n cell_losses = self.cell.get_losses_for(inputs)\n return cell_losses + super(RNN, self).get_losses_for(inputs)\n return super(RNN, self).get_losses_for(inputs)\n\n\nclass SimpleRNNCell(Layer):\n \"\"\"Cell class for SimpleRNN.\n\n # Arguments\n units: Positive integer, dimensionality of the output space.\n activation: Activation function to use\n (see [activations](../activations.md)).\n If you pass None, no activation is applied\n (ie. \"linear\" activation: `a(x) = x`).\n use_bias: Boolean, whether the layer uses a bias vector.\n kernel_initializer: Initializer for the `kernel` weights matrix,\n used for the linear transformation of the inputs\n (see [initializers](../initializers.md)).\n recurrent_initializer: Initializer for the `recurrent_kernel`\n weights matrix,\n used for the linear transformation of the recurrent state\n (see [initializers](../initializers.md)).\n bias_initializer: Initializer for the bias vector\n (see [initializers](../initializers.md)).\n kernel_regularizer: Regularizer function applied to\n the `kernel` weights matrix\n (see [regularizer](../regularizers.md)).\n recurrent_regularizer: Regularizer function applied to\n the `recurrent_kernel` weights matrix\n (see [regularizer](../regularizers.md)).\n bias_regularizer: Regularizer function applied to the bias vector\n (see [regularizer](../regularizers.md)).\n kernel_constraint: Constraint function applied to\n the `kernel` weights matrix\n (see [constraints](../constraints.md)).\n recurrent_constraint: Constraint function applied to\n the `recurrent_kernel` weights matrix\n (see [constraints](../constraints.md)).\n bias_constraint: Constraint function applied to the bias vector\n (see [constraints](../constraints.md)).\n dropout: Float between 0 and 1.\n Fraction of the units to drop for\n the linear transformation of the inputs.\n recurrent_dropout: Float between 0 and 1.\n Fraction of the units to drop for\n the linear transformation of the recurrent state.\n \"\"\"\n\n def __init__(self, units,\n activation='tanh',\n use_bias=True,\n kernel_initializer='glorot_uniform',\n recurrent_initializer='orthogonal',\n bias_initializer='zeros',\n kernel_regularizer=None,\n recurrent_regularizer=None,\n bias_regularizer=None,\n kernel_constraint=None,\n recurrent_constraint=None,\n bias_constraint=None,\n dropout=0.,\n recurrent_dropout=0.,\n **kwargs):\n super(SimpleRNNCell, self).__init__(**kwargs)\n self.units = units\n self.activation = activations.get(activation)\n self.use_bias = use_bias\n\n self.kernel_initializer = initializers.get(kernel_initializer)\n self.recurrent_initializer = initializers.get(recurrent_initializer)\n self.bias_initializer = initializers.get(bias_initializer)\n\n self.kernel_regularizer = regularizers.get(kernel_regularizer)\n self.recurrent_regularizer = regularizers.get(recurrent_regularizer)\n self.bias_regularizer = regularizers.get(bias_regularizer)\n\n self.kernel_constraint = constraints.get(kernel_constraint)\n self.recurrent_constraint = constraints.get(recurrent_constraint)\n self.bias_constraint = constraints.get(bias_constraint)\n\n self.dropout = min(1., max(0., dropout))\n self.recurrent_dropout = min(1., max(0., recurrent_dropout))\n self.state_size = self.units\n self._dropout_mask = None\n self._recurrent_dropout_mask = None\n\n def build(self, input_shape):\n self.kernel = self.add_weight(shape=(input_shape[-1], self.units),\n name='kernel',\n initializer=self.kernel_initializer,\n regularizer=self.kernel_regularizer,\n constraint=self.kernel_constraint)\n self.recurrent_kernel = self.add_weight(\n shape=(self.units, self.units),\n name='recurrent_kernel',\n initializer=self.recurrent_initializer,\n regularizer=self.recurrent_regularizer,\n constraint=self.recurrent_constraint)\n if self.use_bias:\n self.bias = self.add_weight(shape=(self.units,),\n name='bias',\n initializer=self.bias_initializer,\n regularizer=self.bias_regularizer,\n constraint=self.bias_constraint)\n else:\n self.bias = None\n self.built = True\n\n def call(self, inputs, states, training=None):\n prev_output = states[0]\n if 0 < self.dropout < 1 and self._dropout_mask is None:\n self._dropout_mask = _generate_dropout_mask(\n _generate_dropout_ones(inputs, K.shape(inputs)[-1]),\n self.dropout,\n training=training)\n if (0 < self.recurrent_dropout < 1 and\n self._recurrent_dropout_mask is None):\n self._recurrent_dropout_mask = _generate_dropout_mask(\n _generate_dropout_ones(inputs, self.units),\n self.recurrent_dropout,\n training=training)\n\n dp_mask = self._dropout_mask\n rec_dp_mask = self._recurrent_dropout_mask\n\n if dp_mask is not None:\n h = K.dot(inputs * dp_mask, self.kernel)\n else:\n h = K.dot(inputs, self.kernel)\n if self.bias is not None:\n h = K.bias_add(h, self.bias)\n\n if rec_dp_mask is not None:\n prev_output *= rec_dp_mask\n output = h + K.dot(prev_output, self.recurrent_kernel)\n if self.activation is not None:\n output = self.activation(output)\n\n # Properly set learning phase on output tensor.\n if 0 < self.dropout + self.recurrent_dropout:\n if training is None:\n output._uses_learning_phase = True\n return output, [output]\n\n def get_config(self):\n config = {'units': self.units,\n 'activation': activations.serialize(self.activation),\n 'use_bias': self.use_bias,\n 'kernel_initializer': initializers.serialize(self.kernel_initializer),\n 'recurrent_initializer': initializers.serialize(self.recurrent_initializer),\n 'bias_initializer': initializers.serialize(self.bias_initializer),\n 'kernel_regularizer': regularizers.serialize(self.kernel_regularizer),\n 'recurrent_regularizer': regularizers.serialize(self.recurrent_regularizer),\n 'bias_regularizer': regularizers.serialize(self.bias_regularizer),\n 'kernel_constraint': constraints.serialize(self.kernel_constraint),\n 'recurrent_constraint': constraints.serialize(self.recurrent_constraint),\n 'bias_constraint': constraints.serialize(self.bias_constraint),\n 'dropout': self.dropout,\n 'recurrent_dropout': self.recurrent_dropout}\n base_config = super(SimpleRNNCell, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n\n\nclass SimpleRNN(RNN):\n \"\"\"Fully-connected RNN where the output is to be fed back to input.\n\n # Arguments\n units: Positive integer, dimensionality of the output space.\n activation: Activation function to use\n (see [activations](../activations.md)).\n If you pass None, no activation is applied\n (ie. \"linear\" activation: `a(x) = x`).\n use_bias: Boolean, whether the layer uses a bias vector.\n kernel_initializer: Initializer for the `kernel` weights matrix,\n used for the linear transformation of the inputs\n (see [initializers](../initializers.md)).\n recurrent_initializer: Initializer for the `recurrent_kernel`\n weights matrix,\n used for the linear transformation of the recurrent state\n (see [initializers](../initializers.md)).\n bias_initializer: Initializer for the bias vector\n (see [initializers](../initializers.md)).\n kernel_regularizer: Regularizer function applied to\n the `kernel` weights matrix\n (see [regularizer](../regularizers.md)).\n recurrent_regularizer: Regularizer function applied to\n the `recurrent_kernel` weights matrix\n (see [regularizer](../regularizers.md)).\n bias_regularizer: Regularizer function applied to the bias vector\n (see [regularizer](../regularizers.md)).\n activity_regularizer: Regularizer function applied to\n the output of the layer (its \"activation\").\n (see [regularizer](../regularizers.md)).\n kernel_constraint: Constraint function applied to\n the `kernel` weights matrix\n (see [constraints](../constraints.md)).\n recurrent_constraint: Constraint function applied to\n the `recurrent_kernel` weights matrix\n (see [constraints](../constraints.md)).\n bias_constraint: Constraint function applied to the bias vector\n (see [constraints](../constraints.md)).\n dropout: Float between 0 and 1.\n Fraction of the units to drop for\n the linear transformation of the inputs.\n recurrent_dropout: Float between 0 and 1.\n Fraction of the units to drop for\n the linear transformation of the recurrent state.\n return_sequences: Boolean. Whether to return the last output.\n in the output sequence, or the full sequence.\n return_state: Boolean. Whether to return the last state\n in addition to the output.\n go_backwards: Boolean (default False).\n If True, process the input sequence backwards and return the\n reversed sequence.\n stateful: Boolean (default False). If True, the last state\n for each sample at index i in a batch will be used as initial\n state for the sample of index i in the following batch.\n unroll: Boolean (default False).\n If True, the network will be unrolled,\n else a symbolic loop will be used.\n Unrolling can speed-up a RNN,\n although it tends to be more memory-intensive.\n Unrolling is only suitable for short sequences.\n \"\"\"\n\n @interfaces.legacy_recurrent_support\n def __init__(self, units,\n activation='tanh',\n use_bias=True,\n kernel_initializer='glorot_uniform',\n recurrent_initializer='orthogonal',\n bias_initializer='zeros',\n kernel_regularizer=None,\n recurrent_regularizer=None,\n bias_regularizer=None,\n activity_regularizer=None,\n kernel_constraint=None,\n recurrent_constraint=None,\n bias_constraint=None,\n dropout=0.,\n recurrent_dropout=0.,\n return_sequences=False,\n return_state=False,\n go_backwards=False,\n stateful=False,\n unroll=False,\n **kwargs):\n if 'implementation' in kwargs:\n kwargs.pop('implementation')\n warnings.warn('The `implementation` argument '\n 'in `SimpleRNN` has been deprecated. '\n 'Please remove it from your layer call.')\n if K.backend() == 'theano':\n warnings.warn(\n 'RNN dropout is no longer supported with the Theano backend '\n 'due to technical limitations. '\n 'You can either set `dropout` and `recurrent_dropout` to 0, '\n 'or use the TensorFlow backend.')\n dropout = 0.\n recurrent_dropout = 0.\n\n cell = SimpleRNNCell(units,\n activation=activation,\n use_bias=use_bias,\n kernel_initializer=kernel_initializer,\n recurrent_initializer=recurrent_initializer,\n bias_initializer=bias_initializer,\n kernel_regularizer=kernel_regularizer,\n recurrent_regularizer=recurrent_regularizer,\n bias_regularizer=bias_regularizer,\n kernel_constraint=kernel_constraint,\n recurrent_constraint=recurrent_constraint,\n bias_constraint=bias_constraint,\n dropout=dropout,\n recurrent_dropout=recurrent_dropout)\n super(SimpleRNN, self).__init__(cell,\n return_sequences=return_sequences,\n return_state=return_state,\n go_backwards=go_backwards,\n stateful=stateful,\n unroll=unroll,\n **kwargs)\n self.activity_regularizer = regularizers.get(activity_regularizer)\n\n def call(self, inputs, mask=None, training=None, initial_state=None):\n return super(SimpleRNN, self).call(inputs,\n mask=mask,\n training=training,\n initial_state=initial_state)\n\n @property\n def units(self):\n return self.cell.units\n\n @property\n def activation(self):\n return self.cell.activation\n\n @property\n def use_bias(self):\n return self.cell.use_bias\n\n @property\n def kernel_initializer(self):\n return self.cell.kernel_initializer\n\n @property\n def recurrent_initializer(self):\n return self.cell.recurrent_initializer\n\n @property\n def bias_initializer(self):\n return self.cell.bias_initializer\n\n @property\n def kernel_regularizer(self):\n return self.cell.kernel_regularizer\n\n @property\n def recurrent_regularizer(self):\n return self.cell.recurrent_regularizer\n\n @property\n def bias_regularizer(self):\n return self.cell.bias_regularizer\n\n @property\n def kernel_constraint(self):\n return self.cell.kernel_constraint\n\n @property\n def recurrent_constraint(self):\n return self.cell.recurrent_constraint\n\n @property\n def bias_constraint(self):\n return self.cell.bias_constraint\n\n @property\n def dropout(self):\n return self.cell.dropout\n\n @property\n def recurrent_dropout(self):\n return self.cell.recurrent_dropout\n\n def get_config(self):\n config = {'units': self.units,\n 'activation': activations.serialize(self.activation),\n 'use_bias': self.use_bias,\n 'kernel_initializer': initializers.serialize(self.kernel_initializer),\n 'recurrent_initializer': initializers.serialize(self.recurrent_initializer),\n 'bias_initializer': initializers.serialize(self.bias_initializer),\n 'kernel_regularizer': regularizers.serialize(self.kernel_regularizer),\n 'recurrent_regularizer': regularizers.serialize(self.recurrent_regularizer),\n 'bias_regularizer': regularizers.serialize(self.bias_regularizer),\n 'activity_regularizer': regularizers.serialize(self.activity_regularizer),\n 'kernel_constraint': constraints.serialize(self.kernel_constraint),\n 'recurrent_constraint': constraints.serialize(self.recurrent_constraint),\n 'bias_constraint': constraints.serialize(self.bias_constraint),\n 'dropout': self.dropout,\n 'recurrent_dropout': self.recurrent_dropout}\n base_config = super(SimpleRNN, self).get_config()\n del base_config['cell']\n return dict(list(base_config.items()) + list(config.items()))\n\n @classmethod\n def from_config(cls, config):\n if 'implementation' in config:\n config.pop('implementation')\n return cls(**config)\n\n\nclass GRUCell(Layer):\n \"\"\"Cell class for the GRU layer.\n\n # Arguments\n units: Positive integer, dimensionality of the output space.\n activation: Activation function to use\n (see [activations](../activations.md)).\n If you pass None, no activation is applied\n (ie. \"linear\" activation: `a(x) = x`).\n recurrent_activation: Activation function to use\n for the recurrent step\n (see [activations](../activations.md)).\n use_bias: Boolean, whether the layer uses a bias vector.\n kernel_initializer: Initializer for the `kernel` weights matrix,\n used for the linear transformation of the inputs\n (see [initializers](../initializers.md)).\n recurrent_initializer: Initializer for the `recurrent_kernel`\n weights matrix,\n used for the linear transformation of the recurrent state\n (see [initializers](../initializers.md)).\n bias_initializer: Initializer for the bias vector\n (see [initializers](../initializers.md)).\n kernel_regularizer: Regularizer function applied to\n the `kernel` weights matrix\n (see [regularizer](../regularizers.md)).\n recurrent_regularizer: Regularizer function applied to\n the `recurrent_kernel` weights matrix\n (see [regularizer](../regularizers.md)).\n bias_regularizer: Regularizer function applied to the bias vector\n (see [regularizer](../regularizers.md)).\n kernel_constraint: Constraint function applied to\n the `kernel` weights matrix\n (see [constraints](../constraints.md)).\n recurrent_constraint: Constraint function applied to\n the `recurrent_kernel` weights matrix\n (see [constraints](../constraints.md)).\n bias_constraint: Constraint function applied to the bias vector\n (see [constraints](../constraints.md)).\n dropout: Float between 0 and 1.\n Fraction of the units to drop for\n the linear transformation of the inputs.\n recurrent_dropout: Float between 0 and 1.\n Fraction of the units to drop for\n the linear transformation of the recurrent state.\n implementation: Implementation mode, either 1 or 2.\n Mode 1 will structure its operations as a larger number of\n smaller dot products and additions, whereas mode 2 will\n batch them into fewer, larger operations. These modes will\n have different performance profiles on different hardware and\n for different applications.\n \"\"\"\n\n def __init__(self, units,\n activation='tanh',\n recurrent_activation='hard_sigmoid',\n use_bias=True,\n kernel_initializer='glorot_uniform',\n recurrent_initializer='orthogonal',\n bias_initializer='zeros',\n kernel_regularizer=None,\n recurrent_regularizer=None,\n bias_regularizer=None,\n kernel_constraint=None,\n recurrent_constraint=None,\n bias_constraint=None,\n dropout=0.,\n recurrent_dropout=0.,\n implementation=1,\n **kwargs):\n super(GRUCell, self).__init__(**kwargs)\n self.units = units\n self.activation = activations.get(activation)\n self.recurrent_activation = activations.get(recurrent_activation)\n self.use_bias = use_bias\n\n self.kernel_initializer = initializers.get(kernel_initializer)\n self.recurrent_initializer = initializers.get(recurrent_initializer)\n self.bias_initializer = initializers.get(bias_initializer)\n\n self.kernel_regularizer = regularizers.get(kernel_regularizer)\n self.recurrent_regularizer = regularizers.get(recurrent_regularizer)\n self.bias_regularizer = regularizers.get(bias_regularizer)\n\n self.kernel_constraint = constraints.get(kernel_constraint)\n self.recurrent_constraint = constraints.get(recurrent_constraint)\n self.bias_constraint = constraints.get(bias_constraint)\n\n self.dropout = min(1., max(0., dropout))\n self.recurrent_dropout = min(1., max(0., recurrent_dropout))\n self.implementation = implementation\n self.state_size = self.units\n self._dropout_mask = None\n self._recurrent_dropout_mask = None\n\n def build(self, input_shape):\n input_dim = input_shape[-1]\n self.kernel = self.add_weight(shape=(input_dim, self.units * 3),\n name='kernel',\n initializer=self.kernel_initializer,\n regularizer=self.kernel_regularizer,\n constraint=self.kernel_constraint)\n self.recurrent_kernel = self.add_weight(\n shape=(self.units, self.units * 3),\n name='recurrent_kernel',\n initializer=self.recurrent_initializer,\n regularizer=self.recurrent_regularizer,\n constraint=self.recurrent_constraint)\n\n if self.use_bias:\n self.bias = self.add_weight(shape=(self.units * 3,),\n name='bias',\n initializer=self.bias_initializer,\n regularizer=self.bias_regularizer,\n constraint=self.bias_constraint)\n else:\n self.bias = None\n\n self.kernel_z = self.kernel[:, :self.units]\n self.recurrent_kernel_z = self.recurrent_kernel[:, :self.units]\n self.kernel_r = self.kernel[:, self.units: self.units * 2]\n self.recurrent_kernel_r = self.recurrent_kernel[:,\n self.units:\n self.units * 2]\n self.kernel_h = self.kernel[:, self.units * 2:]\n self.recurrent_kernel_h = self.recurrent_kernel[:, self.units * 2:]\n\n if self.use_bias:\n self.bias_z = self.bias[:self.units]\n self.bias_r = self.bias[self.units: self.units * 2]\n self.bias_h = self.bias[self.units * 2:]\n else:\n self.bias_z = None\n self.bias_r = None\n self.bias_h = None\n self.built = True\n\n def call(self, inputs, states, training=None):\n h_tm1 = states[0] # previous memory\n\n if 0 < self.dropout < 1 and self._dropout_mask is None:\n self._dropout_mask = _generate_dropout_mask(\n _generate_dropout_ones(inputs, K.shape(inputs)[-1]),\n self.dropout,\n training=training,\n count=3)\n if (0 < self.recurrent_dropout < 1 and\n self._recurrent_dropout_mask is None):\n self._recurrent_dropout_mask = _generate_dropout_mask(\n _generate_dropout_ones(inputs, self.units),\n self.recurrent_dropout,\n training=training,\n count=3)\n\n # dropout matrices for input units\n dp_mask = self._dropout_mask\n # dropout matrices for recurrent units\n rec_dp_mask = self._recurrent_dropout_mask\n\n if self.implementation == 1:\n if 0. < self.dropout < 1.:\n inputs_z = inputs * dp_mask[0]\n inputs_r = inputs * dp_mask[1]\n inputs_h = inputs * dp_mask[2]\n else:\n inputs_z = inputs\n inputs_r = inputs\n inputs_h = inputs\n x_z = K.dot(inputs_z, self.kernel_z)\n x_r = K.dot(inputs_r, self.kernel_r)\n x_h = K.dot(inputs_h, self.kernel_h)\n if self.use_bias:\n x_z = K.bias_add(x_z, self.bias_z)\n x_r = K.bias_add(x_r, self.bias_r)\n x_h = K.bias_add(x_h, self.bias_h)\n\n if 0. < self.recurrent_dropout < 1.:\n h_tm1_z = h_tm1 * rec_dp_mask[0]\n h_tm1_r = h_tm1 * rec_dp_mask[1]\n h_tm1_h = h_tm1 * rec_dp_mask[2]\n else:\n h_tm1_z = h_tm1\n h_tm1_r = h_tm1\n h_tm1_h = h_tm1\n z = self.recurrent_activation(x_z + K.dot(h_tm1_z,\n self.recurrent_kernel_z))\n r = self.recurrent_activation(x_r + K.dot(h_tm1_r,\n self.recurrent_kernel_r))\n\n hh = self.activation(x_h + K.dot(r * h_tm1_h,\n self.recurrent_kernel_h))\n else:\n if 0. < self.dropout < 1.:\n inputs *= dp_mask[0]\n matrix_x = K.dot(inputs, self.kernel)\n if self.use_bias:\n matrix_x = K.bias_add(matrix_x, self.bias)\n if 0. < self.recurrent_dropout < 1.:\n h_tm1 *= rec_dp_mask[0]\n matrix_inner = K.dot(h_tm1,\n self.recurrent_kernel[:, :2 * self.units])\n\n x_z = matrix_x[:, :self.units]\n x_r = matrix_x[:, self.units: 2 * self.units]\n recurrent_z = matrix_inner[:, :self.units]\n recurrent_r = matrix_inner[:, self.units: 2 * self.units]\n\n z = self.recurrent_activation(x_z + recurrent_z)\n r = self.recurrent_activation(x_r + recurrent_r)\n\n x_h = matrix_x[:, 2 * self.units:]\n recurrent_h = K.dot(r * h_tm1,\n self.recurrent_kernel[:, 2 * self.units:])\n hh = self.activation(x_h + recurrent_h)\n h = z * h_tm1 + (1 - z) * hh\n if 0 < self.dropout + self.recurrent_dropout:\n if training is None:\n h._uses_learning_phase = True\n return h, [h]\n\n def get_config(self):\n config = {'units': self.units,\n 'activation': activations.serialize(self.activation),\n 'recurrent_activation': activations.serialize(self.recurrent_activation),\n 'use_bias': self.use_bias,\n 'kernel_initializer': initializers.serialize(self.kernel_initializer),\n 'recurrent_initializer': initializers.serialize(self.recurrent_initializer),\n 'bias_initializer': initializers.serialize(self.bias_initializer),\n 'kernel_regularizer': regularizers.serialize(self.kernel_regularizer),\n 'recurrent_regularizer': regularizers.serialize(self.recurrent_regularizer),\n 'bias_regularizer': regularizers.serialize(self.bias_regularizer),\n 'kernel_constraint': constraints.serialize(self.kernel_constraint),\n 'recurrent_constraint': constraints.serialize(self.recurrent_constraint),\n 'bias_constraint': constraints.serialize(self.bias_constraint),\n 'dropout': self.dropout,\n 'recurrent_dropout': self.recurrent_dropout,\n 'implementation': self.implementation}\n base_config = super(GRUCell, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n\n\nclass GRU(RNN):\n \"\"\"Gated Recurrent Unit - Cho et al. 2014.\n\n # Arguments\n units: Positive integer, dimensionality of the output space.\n activation: Activation function to use\n (see [activations](../activations.md)).\n If you pass None, no activation is applied\n (ie. \"linear\" activation: `a(x) = x`).\n recurrent_activation: Activation function to use\n for the recurrent step\n (see [activations](../activations.md)).\n use_bias: Boolean, whether the layer uses a bias vector.\n kernel_initializer: Initializer for the `kernel` weights matrix,\n used for the linear transformation of the inputs\n (see [initializers](../initializers.md)).\n recurrent_initializer: Initializer for the `recurrent_kernel`\n weights matrix,\n used for the linear transformation of the recurrent state\n (see [initializers](../initializers.md)).\n bias_initializer: Initializer for the bias vector\n (see [initializers](../initializers.md)).\n kernel_regularizer: Regularizer function applied to\n the `kernel` weights matrix\n (see [regularizer](../regularizers.md)).\n recurrent_regularizer: Regularizer function applied to\n the `recurrent_kernel` weights matrix\n (see [regularizer](../regularizers.md)).\n bias_regularizer: Regularizer function applied to the bias vector\n (see [regularizer](../regularizers.md)).\n activity_regularizer: Regularizer function applied to\n the output of the layer (its \"activation\").\n (see [regularizer](../regularizers.md)).\n kernel_constraint: Constraint function applied to\n the `kernel` weights matrix\n (see [constraints](../constraints.md)).\n recurrent_constraint: Constraint function applied to\n the `recurrent_kernel` weights matrix\n (see [constraints](../constraints.md)).\n bias_constraint: Constraint function applied to the bias vector\n (see [constraints](../constraints.md)).\n dropout: Float between 0 and 1.\n Fraction of the units to drop for\n the linear transformation of the inputs.\n recurrent_dropout: Float between 0 and 1.\n Fraction of the units to drop for\n the linear transformation of the recurrent state.\n implementation: Implementation mode, either 1 or 2.\n Mode 1 will structure its operations as a larger number of\n smaller dot products and additions, whereas mode 2 will\n batch them into fewer, larger operations. These modes will\n have different performance profiles on different hardware and\n for different applications.\n return_sequences: Boolean. Whether to return the last output.\n in the output sequence, or the full sequence.\n return_state: Boolean. Whether to return the last state\n in addition to the output.\n go_backwards: Boolean (default False).\n If True, process the input sequence backwards and return the\n reversed sequence.\n stateful: Boolean (default False). If True, the last state\n for each sample at index i in a batch will be used as initial\n state for the sample of index i in the following batch.\n unroll: Boolean (default False).\n If True, the network will be unrolled,\n else a symbolic loop will be used.\n Unrolling can speed-up a RNN,\n although it tends to be more memory-intensive.\n Unrolling is only suitable for short sequences.\n\n # References\n - [On the Properties of Neural Machine Translation: Encoder-Decoder Approaches](https://arxiv.org/abs/1409.1259)\n - [Empirical Evaluation of Gated Recurrent Neural Networks on Sequence Modeling](http://arxiv.org/abs/1412.3555v1)\n - [A Theoretically Grounded Application of Dropout in Recurrent Neural Networks](http://arxiv.org/abs/1512.05287)\n \"\"\"\n\n @interfaces.legacy_recurrent_support\n def __init__(self, units,\n activation='tanh',\n recurrent_activation='hard_sigmoid',\n use_bias=True,\n kernel_initializer='glorot_uniform',\n recurrent_initializer='orthogonal',\n bias_initializer='zeros',\n kernel_regularizer=None,\n recurrent_regularizer=None,\n bias_regularizer=None,\n activity_regularizer=None,\n kernel_constraint=None,\n recurrent_constraint=None,\n bias_constraint=None,\n dropout=0.,\n recurrent_dropout=0.,\n implementation=1,\n return_sequences=False,\n return_state=False,\n go_backwards=False,\n stateful=False,\n unroll=False,\n **kwargs):\n if implementation == 0:\n warnings.warn('`implementation=0` has been deprecated, '\n 'and now defaults to `implementation=1`.'\n 'Please update your layer call.')\n if K.backend() == 'theano':\n warnings.warn(\n 'RNN dropout is no longer supported with the Theano backend '\n 'due to technical limitations. '\n 'You can either set `dropout` and `recurrent_dropout` to 0, '\n 'or use the TensorFlow backend.')\n dropout = 0.\n recurrent_dropout = 0.\n\n cell = GRUCell(units,\n activation=activation,\n recurrent_activation=recurrent_activation,\n use_bias=use_bias,\n kernel_initializer=kernel_initializer,\n recurrent_initializer=recurrent_initializer,\n bias_initializer=bias_initializer,\n kernel_regularizer=kernel_regularizer,\n recurrent_regularizer=recurrent_regularizer,\n bias_regularizer=bias_regularizer,\n kernel_constraint=kernel_constraint,\n recurrent_constraint=recurrent_constraint,\n bias_constraint=bias_constraint,\n dropout=dropout,\n recurrent_dropout=recurrent_dropout,\n implementation=implementation)\n super(GRU, self).__init__(cell,\n return_sequences=return_sequences,\n return_state=return_state,\n go_backwards=go_backwards,\n stateful=stateful,\n unroll=unroll,\n **kwargs)\n self.activity_regularizer = regularizers.get(activity_regularizer)\n\n def call(self, inputs, mask=None, training=None, initial_state=None):\n return super(GRU, self).call(inputs,\n mask=mask,\n training=training,\n initial_state=initial_state)\n\n @property\n def units(self):\n return self.cell.units\n\n @property\n def activation(self):\n return self.cell.activation\n\n @property\n def recurrent_activation(self):\n return self.cell.recurrent_activation\n\n @property\n def use_bias(self):\n return self.cell.use_bias\n\n @property\n def kernel_initializer(self):\n return self.cell.kernel_initializer\n\n @property\n def recurrent_initializer(self):\n return self.cell.recurrent_initializer\n\n @property\n def bias_initializer(self):\n return self.cell.bias_initializer\n\n @property\n def kernel_regularizer(self):\n return self.cell.kernel_regularizer\n\n @property\n def recurrent_regularizer(self):\n return self.cell.recurrent_regularizer\n\n @property\n def bias_regularizer(self):\n return self.cell.bias_regularizer\n\n @property\n def kernel_constraint(self):\n return self.cell.kernel_constraint\n\n @property\n def recurrent_constraint(self):\n return self.cell.recurrent_constraint\n\n @property\n def bias_constraint(self):\n return self.cell.bias_constraint\n\n @property\n def dropout(self):\n return self.cell.dropout\n\n @property\n def recurrent_dropout(self):\n return self.cell.recurrent_dropout\n\n @property\n def implementation(self):\n return self.cell.implementation\n\n def get_config(self):\n config = {'units': self.units,\n 'activation': activations.serialize(self.activation),\n 'recurrent_activation': activations.serialize(self.recurrent_activation),\n 'use_bias': self.use_bias,\n 'kernel_initializer': initializers.serialize(self.kernel_initializer),\n 'recurrent_initializer': initializers.serialize(self.recurrent_initializer),\n 'bias_initializer': initializers.serialize(self.bias_initializer),\n 'kernel_regularizer': regularizers.serialize(self.kernel_regularizer),\n 'recurrent_regularizer': regularizers.serialize(self.recurrent_regularizer),\n 'bias_regularizer': regularizers.serialize(self.bias_regularizer),\n 'activity_regularizer': regularizers.serialize(self.activity_regularizer),\n 'kernel_constraint': constraints.serialize(self.kernel_constraint),\n 'recurrent_constraint': constraints.serialize(self.recurrent_constraint),\n 'bias_constraint': constraints.serialize(self.bias_constraint),\n 'dropout': self.dropout,\n 'recurrent_dropout': self.recurrent_dropout,\n 'implementation': self.implementation}\n base_config = super(GRU, self).get_config()\n del base_config['cell']\n return dict(list(base_config.items()) + list(config.items()))\n\n @classmethod\n def from_config(cls, config):\n if 'implementation' in config and config['implementation'] == 0:\n config['implementation'] = 1\n return cls(**config)\n\n\nclass LSTMCell(Layer):\n \"\"\"Cell class for the LSTM layer.\n\n # Arguments\n units: Positive integer, dimensionality of the output space.\n activation: Activation function to use\n (see [activations](../activations.md)).\n If you pass None, no activation is applied\n (ie. \"linear\" activation: `a(x) = x`).\n recurrent_activation: Activation function to use\n for the recurrent step\n (see [activations](../activations.md)).\n use_bias: Boolean, whether the layer uses a bias vector.\n kernel_initializer: Initializer for the `kernel` weights matrix,\n used for the linear transformation of the inputs\n (see [initializers](../initializers.md)).\n recurrent_initializer: Initializer for the `recurrent_kernel`\n weights matrix,\n used for the linear transformation of the recurrent state\n (see [initializers](../initializers.md)).\n bias_initializer: Initializer for the bias vector\n (see [initializers](../initializers.md)).\n unit_forget_bias: Boolean.\n If True, add 1 to the bias of the forget gate at initialization.\n Setting it to true will also force `bias_initializer=\"zeros\"`.\n This is recommended in [Jozefowicz et al.](http://www.jmlr.org/proceedings/papers/v37/jozefowicz15.pdf)\n kernel_regularizer: Regularizer function applied to\n the `kernel` weights matrix\n (see [regularizer](../regularizers.md)).\n recurrent_regularizer: Regularizer function applied to\n the `recurrent_kernel` weights matrix\n (see [regularizer](../regularizers.md)).\n bias_regularizer: Regularizer function applied to the bias vector\n (see [regularizer](../regularizers.md)).\n kernel_constraint: Constraint function applied to\n the `kernel` weights matrix\n (see [constraints](../constraints.md)).\n recurrent_constraint: Constraint function applied to\n the `recurrent_kernel` weights matrix\n (see [constraints](../constraints.md)).\n bias_constraint: Constraint function applied to the bias vector\n (see [constraints](../constraints.md)).\n dropout: Float between 0 and 1.\n Fraction of the units to drop for\n the linear transformation of the inputs.\n recurrent_dropout: Float between 0 and 1.\n Fraction of the units to drop for\n the linear transformation of the recurrent state.\n implementation: Implementation mode, either 1 or 2.\n Mode 1 will structure its operations as a larger number of\n smaller dot products and additions, whereas mode 2 will\n batch them into fewer, larger operations. These modes will\n have different performance profiles on different hardware and\n for different applications.\n \"\"\"\n\n def __init__(self, units,\n activation='tanh',\n recurrent_activation='hard_sigmoid',\n use_bias=True,\n kernel_initializer='glorot_uniform',\n recurrent_initializer='orthogonal',\n bias_initializer='zeros',\n unit_forget_bias=True,\n kernel_regularizer=None,\n recurrent_regularizer=None,\n bias_regularizer=None,\n kernel_constraint=None,\n recurrent_constraint=None,\n bias_constraint=None,\n dropout=0.,\n recurrent_dropout=0.,\n implementation=1,\n **kwargs):\n super(LSTMCell, self).__init__(**kwargs)\n self.units = units\n self.activation = activations.get(activation)\n self.recurrent_activation = activations.get(recurrent_activation)\n self.use_bias = use_bias\n\n self.kernel_initializer = initializers.get(kernel_initializer)\n self.recurrent_initializer = initializers.get(recurrent_initializer)\n self.bias_initializer = initializers.get(bias_initializer)\n self.unit_forget_bias = unit_forget_bias\n\n self.kernel_regularizer = regularizers.get(kernel_regularizer)\n self.recurrent_regularizer = regularizers.get(recurrent_regularizer)\n self.bias_regularizer = regularizers.get(bias_regularizer)\n\n self.kernel_constraint = constraints.get(kernel_constraint)\n self.recurrent_constraint = constraints.get(recurrent_constraint)\n self.bias_constraint = constraints.get(bias_constraint)\n\n self.dropout = min(1., max(0., dropout))\n self.recurrent_dropout = min(1., max(0., recurrent_dropout))\n self.implementation = implementation\n self.state_size = (self.units, self.units)\n self._dropout_mask = None\n self._recurrent_dropout_mask = None\n\n def build(self, input_shape):\n input_dim = input_shape[-1]\n self.kernel = self.add_weight(shape=(input_dim, self.units * 4),\n name='kernel',\n initializer=self.kernel_initializer,\n regularizer=self.kernel_regularizer,\n constraint=self.kernel_constraint)\n self.recurrent_kernel = self.add_weight(\n shape=(self.units, self.units * 4),\n name='recurrent_kernel',\n initializer=self.recurrent_initializer,\n regularizer=self.recurrent_regularizer,\n constraint=self.recurrent_constraint)\n\n if self.use_bias:\n if self.unit_forget_bias:\n def bias_initializer(_, *args, **kwargs):\n return K.concatenate([\n self.bias_initializer((self.units,), *args, **kwargs),\n initializers.Ones()((self.units,), *args, **kwargs),\n self.bias_initializer((self.units * 2,), *args, **kwargs),\n ])\n else:\n bias_initializer = self.bias_initializer\n self.bias = self.add_weight(shape=(self.units * 4,),\n name='bias',\n initializer=bias_initializer,\n regularizer=self.bias_regularizer,\n constraint=self.bias_constraint)\n else:\n self.bias = None\n\n self.kernel_i = self.kernel[:, :self.units]\n self.kernel_f = self.kernel[:, self.units: self.units * 2]\n self.kernel_c = self.kernel[:, self.units * 2: self.units * 3]\n self.kernel_o = self.kernel[:, self.units * 3:]\n\n self.recurrent_kernel_i = self.recurrent_kernel[:, :self.units]\n self.recurrent_kernel_f = self.recurrent_kernel[:, self.units: self.units * 2]\n self.recurrent_kernel_c = self.recurrent_kernel[:, self.units * 2: self.units * 3]\n self.recurrent_kernel_o = self.recurrent_kernel[:, self.units * 3:]\n\n if self.use_bias:\n self.bias_i = self.bias[:self.units]\n self.bias_f = self.bias[self.units: self.units * 2]\n self.bias_c = self.bias[self.units * 2: self.units * 3]\n self.bias_o = self.bias[self.units * 3:]\n else:\n self.bias_i = None\n self.bias_f = None\n self.bias_c = None\n self.bias_o = None\n self.built = True\n\n def call(self, inputs, states, training=None):\n if 0 < self.dropout < 1 and self._dropout_mask is None:\n self._dropout_mask = _generate_dropout_mask(\n _generate_dropout_ones(inputs, K.shape(inputs)[-1]),\n self.dropout,\n training=training,\n count=4)\n if (0 < self.recurrent_dropout < 1 and\n self._recurrent_dropout_mask is None):\n self._recurrent_dropout_mask = _generate_dropout_mask(\n _generate_dropout_ones(inputs, self.units),\n self.recurrent_dropout,\n training=training,\n count=4)\n\n # dropout matrices for input units\n dp_mask = self._dropout_mask\n # dropout matrices for recurrent units\n rec_dp_mask = self._recurrent_dropout_mask\n\n h_tm1 = states[0] # previous memory state\n c_tm1 = states[1] # previous carry state\n\n if self.implementation == 1:\n if 0 < self.dropout < 1.:\n inputs_i = inputs * dp_mask[0]\n inputs_f = inputs * dp_mask[1]\n inputs_c = inputs * dp_mask[2]\n inputs_o = inputs * dp_mask[3]\n else:\n inputs_i = inputs\n inputs_f = inputs\n inputs_c = inputs\n inputs_o = inputs\n x_i = K.dot(inputs_i, self.kernel_i)\n x_f = K.dot(inputs_f, self.kernel_f)\n x_c = K.dot(inputs_c, self.kernel_c)\n x_o = K.dot(inputs_o, self.kernel_o)\n if self.use_bias:\n x_i = K.bias_add(x_i, self.bias_i)\n x_f = K.bias_add(x_f, self.bias_f)\n x_c = K.bias_add(x_c, self.bias_c)\n x_o = K.bias_add(x_o, self.bias_o)\n\n if 0 < self.recurrent_dropout < 1.:\n h_tm1_i = h_tm1 * rec_dp_mask[0]\n h_tm1_f = h_tm1 * rec_dp_mask[1]\n h_tm1_c = h_tm1 * rec_dp_mask[2]\n h_tm1_o = h_tm1 * rec_dp_mask[3]\n else:\n h_tm1_i = h_tm1\n h_tm1_f = h_tm1\n h_tm1_c = h_tm1\n h_tm1_o = h_tm1\n i = self.recurrent_activation(x_i + K.dot(h_tm1_i,\n self.recurrent_kernel_i))\n f = self.recurrent_activation(x_f + K.dot(h_tm1_f,\n self.recurrent_kernel_f))\n c = f * c_tm1 + i * self.activation(x_c + K.dot(h_tm1_c,\n self.recurrent_kernel_c))\n o = self.recurrent_activation(x_o + K.dot(h_tm1_o,\n self.recurrent_kernel_o))\n else:\n if 0. < self.dropout < 1.:\n inputs *= dp_mask[0]\n z = K.dot(inputs, self.kernel)\n if 0. < self.recurrent_dropout < 1.:\n h_tm1 *= rec_dp_mask[0]\n z += K.dot(h_tm1, self.recurrent_kernel)\n if self.use_bias:\n z = K.bias_add(z, self.bias)\n\n z0 = z[:, :self.units]\n z1 = z[:, self.units: 2 * self.units]\n z2 = z[:, 2 * self.units: 3 * self.units]\n z3 = z[:, 3 * self.units:]\n\n i = self.recurrent_activation(z0)\n f = self.recurrent_activation(z1)\n c = f * c_tm1 + i * self.activation(z2)\n o = self.recurrent_activation(z3)\n\n h = o * self.activation(c)\n if 0 < self.dropout + self.recurrent_dropout:\n if training is None:\n h._uses_learning_phase = True\n return h, [h, c]\n\n def get_config(self):\n config = {'units': self.units,\n 'activation': activations.serialize(self.activation),\n 'recurrent_activation': activations.serialize(self.recurrent_activation),\n 'use_bias': self.use_bias,\n 'kernel_initializer': initializers.serialize(self.kernel_initializer),\n 'recurrent_initializer': initializers.serialize(self.recurrent_initializer),\n 'bias_initializer': initializers.serialize(self.bias_initializer),\n 'unit_forget_bias': self.unit_forget_bias,\n 'kernel_regularizer': regularizers.serialize(self.kernel_regularizer),\n 'recurrent_regularizer': regularizers.serialize(self.recurrent_regularizer),\n 'bias_regularizer': regularizers.serialize(self.bias_regularizer),\n 'kernel_constraint': constraints.serialize(self.kernel_constraint),\n 'recurrent_constraint': constraints.serialize(self.recurrent_constraint),\n 'bias_constraint': constraints.serialize(self.bias_constraint),\n 'dropout': self.dropout,\n 'recurrent_dropout': self.recurrent_dropout,\n 'implementation': self.implementation}\n base_config = super(LSTMCell, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n\n\nclass LSTM(RNN):\n \"\"\"Long-Short Term Memory layer - Hochreiter 1997.\n\n # Arguments\n units: Positive integer, dimensionality of the output space.\n activation: Activation function to use\n (see [activations](../activations.md)).\n If you pass None, no activation is applied\n (ie. \"linear\" activation: `a(x) = x`).\n recurrent_activation: Activation function to use\n for the recurrent step\n (see [activations](../activations.md)).\n use_bias: Boolean, whether the layer uses a bias vector.\n kernel_initializer: Initializer for the `kernel` weights matrix,\n used for the linear transformation of the inputs.\n (see [initializers](../initializers.md)).\n recurrent_initializer: Initializer for the `recurrent_kernel`\n weights matrix,\n used for the linear transformation of the recurrent state.\n (see [initializers](../initializers.md)).\n bias_initializer: Initializer for the bias vector\n (see [initializers](../initializers.md)).\n unit_forget_bias: Boolean.\n If True, add 1 to the bias of the forget gate at initialization.\n Setting it to true will also force `bias_initializer=\"zeros\"`.\n This is recommended in [Jozefowicz et al.](http://www.jmlr.org/proceedings/papers/v37/jozefowicz15.pdf)\n kernel_regularizer: Regularizer function applied to\n the `kernel` weights matrix\n (see [regularizer](../regularizers.md)).\n recurrent_regularizer: Regularizer function applied to\n the `recurrent_kernel` weights matrix\n (see [regularizer](../regularizers.md)).\n bias_regularizer: Regularizer function applied to the bias vector\n (see [regularizer](../regularizers.md)).\n activity_regularizer: Regularizer function applied to\n the output of the layer (its \"activation\").\n (see [regularizer](../regularizers.md)).\n kernel_constraint: Constraint function applied to\n the `kernel` weights matrix\n (see [constraints](../constraints.md)).\n recurrent_constraint: Constraint function applied to\n the `recurrent_kernel` weights matrix\n (see [constraints](../constraints.md)).\n bias_constraint: Constraint function applied to the bias vector\n (see [constraints](../constraints.md)).\n dropout: Float between 0 and 1.\n Fraction of the units to drop for\n the linear transformation of the inputs.\n recurrent_dropout: Float between 0 and 1.\n Fraction of the units to drop for\n the linear transformation of the recurrent state.\n implementation: Implementation mode, either 1 or 2.\n Mode 1 will structure its operations as a larger number of\n smaller dot products and additions, whereas mode 2 will\n batch them into fewer, larger operations. These modes will\n have different performance profiles on different hardware and\n for different applications.\n return_sequences: Boolean. Whether to return the last output.\n in the output sequence, or the full sequence.\n return_state: Boolean. Whether to return the last state\n in addition to the output.\n go_backwards: Boolean (default False).\n If True, process the input sequence backwards and return the\n reversed sequence.\n stateful: Boolean (default False). If True, the last state\n for each sample at index i in a batch will be used as initial\n state for the sample of index i in the following batch.\n unroll: Boolean (default False).\n If True, the network will be unrolled,\n else a symbolic loop will be used.\n Unrolling can speed-up a RNN,\n although it tends to be more memory-intensive.\n Unrolling is only suitable for short sequences.\n\n # References\n - [Long short-term memory](http://www.bioinf.jku.at/publications/older/2604.pdf) (original 1997 paper)\n - [Learning to forget: Continual prediction with LSTM](http://www.mitpressjournals.org/doi/pdf/10.1162/089976600300015015)\n - [Supervised sequence labeling with recurrent neural networks](http://www.cs.toronto.edu/~graves/preprint.pdf)\n - [A Theoretically Grounded Application of Dropout in Recurrent Neural Networks](http://arxiv.org/abs/1512.05287)\n \"\"\"\n\n @interfaces.legacy_recurrent_support\n def __init__(self, units,\n activation='tanh',\n recurrent_activation='hard_sigmoid',\n use_bias=True,\n kernel_initializer='glorot_uniform',\n recurrent_initializer='orthogonal',\n bias_initializer='zeros',\n unit_forget_bias=True,\n kernel_regularizer=None,\n recurrent_regularizer=None,\n bias_regularizer=None,\n activity_regularizer=None,\n kernel_constraint=None,\n recurrent_constraint=None,\n bias_constraint=None,\n dropout=0.,\n recurrent_dropout=0.,\n implementation=1,\n return_sequences=False,\n return_state=False,\n go_backwards=False,\n stateful=False,\n unroll=False,\n **kwargs):\n if implementation == 0:\n warnings.warn('`implementation=0` has been deprecated, '\n 'and now defaults to `implementation=1`.'\n 'Please update your layer call.')\n if K.backend() == 'theano':\n warnings.warn(\n 'RNN dropout is no longer supported with the Theano backend '\n 'due to technical limitations. '\n 'You can either set `dropout` and `recurrent_dropout` to 0, '\n 'or use the TensorFlow backend.')\n dropout = 0.\n recurrent_dropout = 0.\n\n cell = LSTMCell(units,\n activation=activation,\n recurrent_activation=recurrent_activation,\n use_bias=use_bias,\n kernel_initializer=kernel_initializer,\n recurrent_initializer=recurrent_initializer,\n unit_forget_bias=unit_forget_bias,\n bias_initializer=bias_initializer,\n kernel_regularizer=kernel_regularizer,\n recurrent_regularizer=recurrent_regularizer,\n bias_regularizer=bias_regularizer,\n kernel_constraint=kernel_constraint,\n recurrent_constraint=recurrent_constraint,\n bias_constraint=bias_constraint,\n dropout=dropout,\n recurrent_dropout=recurrent_dropout,\n implementation=implementation)\n super(LSTM, self).__init__(cell,\n return_sequences=return_sequences,\n return_state=return_state,\n go_backwards=go_backwards,\n stateful=stateful,\n unroll=unroll,\n **kwargs)\n self.activity_regularizer = regularizers.get(activity_regularizer)\n\n def call(self, inputs, mask=None, training=None, initial_state=None):\n return super(LSTM, self).call(inputs,\n mask=mask,\n training=training,\n initial_state=initial_state)\n\n @property\n def units(self):\n return self.cell.units\n\n @property\n def activation(self):\n return self.cell.activation\n\n @property\n def recurrent_activation(self):\n return self.cell.recurrent_activation\n\n @property\n def use_bias(self):\n return self.cell.use_bias\n\n @property\n def kernel_initializer(self):\n return self.cell.kernel_initializer\n\n @property\n def recurrent_initializer(self):\n return self.cell.recurrent_initializer\n\n @property\n def bias_initializer(self):\n return self.cell.bias_initializer\n\n @property\n def unit_forget_bias(self):\n return self.cell.unit_forget_bias\n\n @property\n def kernel_regularizer(self):\n return self.cell.kernel_regularizer\n\n @property\n def recurrent_regularizer(self):\n return self.cell.recurrent_regularizer\n\n @property\n def bias_regularizer(self):\n return self.cell.bias_regularizer\n\n @property\n def kernel_constraint(self):\n return self.cell.kernel_constraint\n\n @property\n def recurrent_constraint(self):\n return self.cell.recurrent_constraint\n\n @property\n def bias_constraint(self):\n return self.cell.bias_constraint\n\n @property\n def dropout(self):\n return self.cell.dropout\n\n @property\n def recurrent_dropout(self):\n return self.cell.recurrent_dropout\n\n @property\n def implementation(self):\n return self.cell.implementation\n\n def get_config(self):\n config = {'units': self.units,\n 'activation': activations.serialize(self.activation),\n 'recurrent_activation': activations.serialize(self.recurrent_activation),\n 'use_bias': self.use_bias,\n 'kernel_initializer': initializers.serialize(self.kernel_initializer),\n 'recurrent_initializer': initializers.serialize(self.recurrent_initializer),\n 'bias_initializer': initializers.serialize(self.bias_initializer),\n 'unit_forget_bias': self.unit_forget_bias,\n 'kernel_regularizer': regularizers.serialize(self.kernel_regularizer),\n 'recurrent_regularizer': regularizers.serialize(self.recurrent_regularizer),\n 'bias_regularizer': regularizers.serialize(self.bias_regularizer),\n 'activity_regularizer': regularizers.serialize(self.activity_regularizer),\n 'kernel_constraint': constraints.serialize(self.kernel_constraint),\n 'recurrent_constraint': constraints.serialize(self.recurrent_constraint),\n 'bias_constraint': constraints.serialize(self.bias_constraint),\n 'dropout': self.dropout,\n 'recurrent_dropout': self.recurrent_dropout,\n 'implementation': self.implementation}\n base_config = super(LSTM, self).get_config()\n del base_config['cell']\n return dict(list(base_config.items()) + list(config.items()))\n\n @classmethod\n def from_config(cls, config):\n if 'implementation' in config and config['implementation'] == 0:\n config['implementation'] = 1\n return cls(**config)\n\n\ndef _generate_dropout_ones(inputs, dims):\n # Currently, CTNK can't instantiate `ones` with symbolic shapes.\n # Will update workaround once CTNK supports it.\n if K.backend() == 'cntk':\n ones = K.ones_like(K.reshape(inputs[:, 0], (-1, 1)))\n return K.tile(ones, (1, dims))\n else:\n return K.ones((K.shape(inputs)[0], dims))\n\n\ndef _generate_dropout_mask(ones, rate, training=None, count=1):\n def dropped_inputs():\n return K.dropout(ones, rate)\n\n if count > 1:\n return [K.in_train_phase(\n dropped_inputs,\n ones,\n training=training) for _ in range(count)]\n return K.in_train_phase(\n dropped_inputs,\n ones,\n training=training)\n"
] | [
[
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Msegade/pyNastran | [
"ae36548579c6bb2ee3a4fff207f7211c1986a5ab",
"ae36548579c6bb2ee3a4fff207f7211c1986a5ab",
"ae36548579c6bb2ee3a4fff207f7211c1986a5ab",
"ae36548579c6bb2ee3a4fff207f7211c1986a5ab",
"ae36548579c6bb2ee3a4fff207f7211c1986a5ab",
"ae36548579c6bb2ee3a4fff207f7211c1986a5ab"
] | [
"pyNastran/dev/bdf_vectorized/cards/dynamic.py",
"pyNastran/bdf/cards/dmig.py",
"pyNastran/dev/bdf_vectorized/cards/elements/shell/pcomp_helper.py",
"pyNastran/op2/tables/oes_stressStrain/real/oes_bush1d.py",
"pyNastran/op2/tables/oes_stressStrain/random/oes_bars.py",
"pyNastran/op2/tables/oef_forces/oef_thermal_objects.py"
] | [
"# pylint: disable=C0103,R0902,R0904,R0914\n\"\"\"\nAll dynamic control cards are defined in this file. This includes:\n\n * FREQ\n * FREQ1\n * FREQ2 (not implemented)\n * FREQ3\n * FREQ4\n * FREQ5 (not implemented)\n * NLPCI\n * NLPARM\n * TSTEP\n * TSTEPNL\n\nAll cards are BaseCard objects.\n\n\"\"\"\nfrom math import log, exp, ceil\nimport numpy as np\nfrom numpy import unique, hstack\n\nfrom pyNastran.utils.numpy_utils import integer_types\nfrom pyNastran.bdf.field_writer_8 import set_blank_if_default\nfrom pyNastran.bdf.cards.base_card import BaseCard\nfrom pyNastran.bdf.bdf_interface.assign_type import (\n integer, integer_or_blank, double, double_or_blank,\n string_or_blank, blank, fields, components_or_blank\n)\nfrom pyNastran.bdf.field_writer_8 import print_card_8\nfrom pyNastran.bdf.field_writer_16 import print_card_16\nif TYPE_CHECKING: # pragma: no cover\n from pyNastran.bdf.bdf import BDF\n\n\nclass DELAY(BaseCard):\n type = 'DELAY'\n\n def __init__(self, sid, nodes, components, delays, comment=''):\n \"\"\"\n +-------+-----+-----------+-----+--------+------+-----+--------+-----+\n | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 |\n +=======+=====+===========+=====+========+======+=====+========+=====+\n | DELAY | SID | POINT ID1 | C1 | T1 | P2 | C2 | T2 | |\n +-------+-----+-----------+-----+--------+------+-----+--------+-----+\n \"\"\"\n if comment:\n self.comment = comment\n\n #: Identification number of DELAY entry. (Integer > 0)\n self.sid = sid\n #: Grid, extra, or scalar point identification number. (Integer > 0)\n self.nodes = nodes\n #: Component number. (Integers 1 through 6 for grid points; zero or blank for extra\n #: or scalar points)\n self.components = components\n #: Time delay (tau) for designated point Pi and component Ci. (Real)\n self.delays = delays\n\n @classmethod\n def add_card(cls, card, comment=''):\n \"\"\"\n Adds a DELAY card from ``BDF.add_card(...)``\n\n Parameters\n ----------\n card : BDFCard()\n a BDFCard object\n comment : str; default=''\n a comment for the card\n \"\"\"\n sid = integer(card, 1, 'sid')\n nodes = [integer(card, 2, 'node')]\n components = [integer(card, 3, 'components')]\n delays = [double_or_blank(card, 4, 'delay')]\n assert components[0] in [0, 1, 2, 3, 4, 5, 6], components\n if card.field(5):\n nodes.append(integer(card, 5, 'node'))\n components.append(integer(card, 6, 'components'))\n delays.append(double_or_blank(card, 7, 'delay'))\n assert components[1] in [0, 1, 2, 3, 4, 5, 6], components\n return DELAY(sid, nodes, components, delays, comment=comment)\n\n def add(self, delay):\n assert self.sid == delay.sid, 'sid=%s delay.sid=%s' % (self.sid, delay.sid)\n if delay.comment:\n if hasattr('_comment'):\n self._comment += delay.comment\n else:\n self._comment = delay.comment\n self.nodes += delay.nodes\n self.components += delay.components\n self.delays += delay.delays\n\n def get_delay_at_freq(self, freq):\n return self.nodes, self.components, self.delays\n\n #def cross_reference(self, model: BDF) -> None:\n #\"\"\"\n #Cross links the card so referenced cards can be extracted directly\n\n #Parameters\n #----------\n #model : BDF()\n #the BDF object\n #\"\"\"\n #msg = ', which is required by DELAY sid=%s' % self.sid\n #self.nodes_ref = model.Node(self.node_ids, msg=msg)\n\n #@property\n #def node_id1(self):\n #if isinstance(self.nodes[0], integer_types):\n #return self.nodes[0]\n #return self.nodes_ref[0].nid\n\n #@property\n #def node_id2(self):\n #if isinstance(self.nodes[1], integer_types):\n #return self.nodes[1]\n #return self.nodes_ref[1].nid\n\n @property\n def node_ids(self):\n node_ids = [self.node_id1]\n if len(self.components) == 2:\n node_ids.append(self.node_id2)\n return node_ids\n\n def raw_fields(self):\n list_fields = ['DELAY', self.sid]\n for nid, comp, delay in zip(self.node_ids, self.components, self.delays):\n if isinstance(nid, integer_types):\n nidi = nid\n else:\n nidi = nid.nid\n list_fields += [nidi, comp, delay]\n return list_fields\n\n def write_card(self, size: int=8, is_double: bool=False) -> str:\n msg = self.comment\n node_ids = self.node_ids\n if size == 8:\n for nid, comp, delay in zip(node_ids, self.components, self.delays):\n msg += print_card_8(['DELAY', self.sid, nid, comp, delay])\n else:\n for nid, comp, delay in zip(node_ids, self.components, self.delays):\n msg += print_card_16(['DELAY', self.sid, nid, comp, delay])\n return msg\n\n\nclass DPHASE(BaseCard):\n type = 'DPHASE'\n\n def __init__(self, sid, nodes, components, phase_leads, comment=''):\n \"\"\"\n +--------+-----+-----------+-----+------+------+-----+-----+-----+\n | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 |\n +========+=====+===========+=====+======+======+=====+=====+=====+\n | DPHASE | SID | POINT ID1 | C1 | TH1 | P2 | C2 | TH2 | |\n +--------+-----+-----------+-----+------+------+-----+-----+-----+\n \"\"\"\n if comment:\n self.comment = comment\n self.sid = sid\n self.nodes = nodes\n self.components = components\n self.phase_leads = phase_leads\n\n @classmethod\n def add_card(cls, card, comment=''):\n \"\"\"\n Adds a DPHASE card from ``BDF.add_card(...)``\n\n Parameters\n ----------\n card : BDFCard()\n a BDFCard object\n comment : str; default=''\n a comment for the card\n \"\"\"\n sid = integer(card, 1, 'sid')\n nodes = [integer(card, 2, 'node')]\n components = [integer(card, 3, 'components')]\n phase_leads = [double_or_blank(card, 4, 'phase_lead')]\n assert components[0] in [0, 1, 2, 3, 4, 5, 6], components\n if card.field(5):\n nodes.append(integer(card, 5, 'node'))\n components.append(integer(card, 6, 'components'))\n phase_leads.append(double_or_blank(card, 7, 'phase_lead'))\n assert components[1] in [0, 1, 2, 3, 4, 5, 6], components\n return DPHASE(sid, nodes, components, phase_leads, comment=comment)\n\n def add(self, dphase):\n assert self.sid == dphase.sid, 'sid=%s dphase.sid=%s' % (self.sid, dphase.sid)\n if dphase.comment:\n if hasattr('_comment'):\n self._comment += dphase.comment\n else:\n self._comment = dphase.comment\n self.nodes += dphase.nodes\n self.components += dphase.components\n self.phase_leads += dphase.phase_leads\n\n #def cross_reference(self, model: BDF) -> None:\n #\"\"\"\n #Cross links the card so referenced cards can be extracted directly\n\n #Parameters\n #----------\n #model : BDF()\n #the BDF object\n #\"\"\"\n #msg = ', which is required by DPHASE sid=%s' % self.sid\n #self.nodes_ref = model.Nodes(self.node_ids, msg=msg)\n\n #@property\n #def node_id1(self):\n #if isinstance(self.nodes[0], integer_types):\n #return self.nodes[0]\n #return self.nodes_ref[0].nid\n\n #@property\n #def node_id2(self):\n #if isinstance(self.nodes[1], integer_types):\n #return self.nodes[1]\n #return self.nodes_ref[1].nid\n\n @property\n def node_ids(self):\n node_ids = [self.node_id1]\n if len(self.components) == 2:\n node_ids.append(self.node_id2)\n return node_ids\n\n def raw_fields(self):\n list_fields = ['DPHASE', self.sid]\n for nid, comp, delay in zip(self.nodes, self.components, self.phase_leads):\n if isinstance(nid, integer_types):\n nidi = nid\n else:\n nidi = nid.nid\n list_fields += [nidi, comp, delay]\n return list_fields\n\n def write_card(self, size: int=8, is_double: bool=False) -> str:\n msg = self.comment\n node_ids = self.node_ids\n if size == 8:\n for nid, comp, delay in zip(node_ids, self.components, self.phase_leads):\n msg += print_card_8(['DPHASE', self.sid, nid, comp, delay])\n else:\n for nid, comp, delay in zip(node_ids, self.components, self.phase_leads):\n msg += print_card_16(['DPHASE', self.sid, nid, comp, delay])\n return msg\n\n\nclass FREQ(BaseCard):\n \"\"\"\n Defines a set of frequencies to be used in the solution of frequency\n response problems.\n\n +------+-----+-----+-----+------+-----+-----+-----+-----+\n | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 |\n +======+=====+=====+=====+======+=====+=====+=====+=====+\n | FREQ | SID | F1 | F2 | etc. | | | | |\n +------+-----+-----+-----+------+-----+-----+-----+-----+\n \"\"\"\n type = 'FREQ'\n\n def __init__(self, sid, freqs, comment=''):\n if comment:\n self.comment = comment\n self.sid = sid\n self.freqs = np.unique(freqs)\n\n @classmethod\n def add_card(cls, card, comment=''):\n \"\"\"\n Adds a FREQ card from ``BDF.add_card(...)``\n\n Parameters\n ----------\n card : BDFCard()\n a BDFCard object\n comment : str; default=''\n a comment for the card\n \"\"\"\n sid = integer(card, 1, 'sid')\n freqs = fields(double, card, 'freq', i=2, j=len(card))\n return FREQ(sid, freqs, comment=comment)\n\n def get_freqs(self):\n return self.freqs\n\n def add_frequencies(self, freqs):\n \"\"\"\n Combines the frequencies from 1 FREQx object with another.\n All FREQi entries with the same frequency set identification numbers\n will be used. Duplicate frequencies will be ignored.\n\n Parameters\n ----------\n freqs : ???\n the frequencies for a FREQx object\n \"\"\"\n #print(\"self.freqs = \",self.freqs)\n #print(\"freqs = \",freqs)\n self.freqs = unique(hstack([self.freqs, freqs]))\n\n def add_frequency_object(self, freq):\n \"\"\"\n :param freq: a FREQx object\n\n .. seealso:: :func:`addFrequencies`\n \"\"\"\n self.add_frequencies(freq.freqs)\n\n def raw_fields(self):\n list_fields = ['FREQ', self.sid] + list(self.freqs)\n return list_fields\n\n def write_card(self, size: int=8, is_double: bool=False) -> str:\n card = self.repr_fields()\n if size == 8:\n return self.comment + print_card_8(card)\n return self.comment + print_card_16(card)\n\n\nclass FREQ1(FREQ):\n \"\"\"\n Defines a set of frequencies to be used in the solution of frequency\n response problems by specification of a starting frequency, frequency\n increment, and the number of increments desired.\n\n +-------+-----+-----+-----+-----+-----+-----+-----+-----+\n | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 |\n +=======+=====+=====+=====+=====+=====+=====+=====+=====+\n | FREQ1 | SID | F1 | DF | NDF | | | | |\n +-------+-----+-----+-----+-----+-----+-----+-----+-----+\n\n .. note:: this card rewrites as a FREQ card\n \"\"\"\n type = 'FREQ1'\n\n def __init__(self, sid, f1, df, ndf, comment=''):\n if comment:\n self.comment = comment\n self.sid = sid\n self.f1 = f1\n self.df = df\n self.ndf = ndf\n\n freqs = []\n for i in range(ndf):\n freqs.append(f1 + i * df)\n self.freqs = unique(freqs)\n\n @classmethod\n def add_card(cls, card, comment=''):\n \"\"\"\n Adds a FREQ1 card from ``BDF.add_card(...)``\n\n Parameters\n ----------\n card : BDFCard()\n a BDFCard object\n comment : str; default=''\n a comment for the card\n \"\"\"\n sid = integer(card, 1, 'sid')\n f1 = double_or_blank(card, 2, 'f1', 0.0)\n df = double(card, 3, 'df')\n ndf = integer_or_blank(card, 4, 'ndf', 1)\n assert len(card) <= 5, 'len(FREQ card) = %i\\ncard=%s' % (len(card), card)\n return FREQ1(sid, f1, df, ndf, comment=comment)\n\n def write_card(self, size: int=8, is_double: bool=False) -> str:\n card = self.repr_fields()\n if size == 8:\n return self.comment + print_card_8(card)\n return self.comment + print_card_16(card)\n\n\nclass FREQ2(FREQ):\n \"\"\"\n Defines a set of frequencies to be used in the solution of frequency\n response problems by specification of a starting frequency, final\n frequency, and the number of logarithmic increments desired.\n\n +-------+-----+-----+-----+-----+-----+-----+-----+-----+\n | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 |\n +=======+=====+=====+=====+=====+=====+=====+=====+=====+\n | FREQ2 | SID | F1 | F2 | NDF | | | | |\n +-------+-----+-----+-----+-----+-----+-----+-----+-----+\n\n .. note:: this card rewrites as a FREQ card\n \"\"\"\n type = 'FREQ2'\n\n def __init__(self, sid, f1, f2, ndf=1, comment=''):\n if comment:\n self.comment = comment\n self.sid = sid\n self.f1 = f1\n self.f2 = f2\n self.ndf = ndf\n\n d = 1. / ndf * log(f2 / f1)\n freqs = []\n for i in range(ndf):\n freqs.append(f1 * exp(i * d)) # 0 based index\n self.freqs = np.unique(freqs)\n\n @classmethod\n def add_card(cls, card, comment=''):\n \"\"\"\n Adds a FREQ2 card from ``BDF.add_card(...)``\n\n Parameters\n ----------\n card : BDFCard()\n a BDFCard object\n comment : str; default=''\n a comment for the card\n \"\"\"\n sid = integer(card, 1, 'sid')\n f1 = double(card, 2, 'f1') # default=0.0 ?\n f2 = double(card, 3, 'f2')\n ndf = integer_or_blank(card, 4, 'nf', 1)\n assert len(card) <= 5, 'len(FREQ2 card) = %i\\ncard=%s' % (len(card), card)\n return FREQ2(sid, f1, f2, ndf, comment=comment)\n #return FREQ(sid, freqs, comment=comment)\n\n\nclass FREQ3(FREQ):\n \"\"\"\n +-------+-----+------+-------+--------+-----+---------+\n | 1 | 2 | 3 | 4 | 5 | 6 | 7 |\n +=======+=====+======+=======+========+=====+=========+\n | FREQ3 | SID | F1 | F2 | TYPE | NEF | CLUSTER |\n +-------+-----+------+-------+--------+-----+---------+\n | FREQ3 | 6 | 20.0 | 200.0 | LINEAR | 10 | 2.0 |\n +-------+-----+------+-------+--------+-----+---------+\n \"\"\"\n type = 'FREQ3'\n\n def __init__(self, f1, f2=None, Type='LINEAR', nef=10, cluster=1.0, comment=''):\n if comment:\n self.comment = comment\n if f2 is None:\n f2 = f1\n self.sid = sid\n self.f1 = f1\n self.f2 = f2\n self.Type = Type\n self.nef = nef\n self.cluster = cluster\n\n @classmethod\n def add_card(cls, card, comment=''):\n sid = integer(card, 1, 'sid')\n f1 = double(card, 1, 'f1')\n f2 = integer_or_blank(card, 1, 'f2', f1)\n Type = string_or_blank(card, 1, 'Type', 'LINEAR')\n nef = integer_or_blank(card, 1, 'nef', 10)\n cluster = double_or_blank(card, 1, 'cluster', 1.0)\n\n return FREQ3(sid, f1, f2, Type, nef, cluster, comment='')\n\n def raw_fields(self):\n return ['FREQ3', self.sid, self.f1, self.f2, self.Type, self.nef, self.cluster]\n\n def write_card(self, size: int=8, is_double: bool=False) -> str:\n card = self.repr_fields()\n if size == 8:\n return self.comment + print_card_8(card)\n return self.comment + print_card_16(card)\n\n\nclass FREQ4(FREQ):\n \"\"\"\n Defines a set of frequencies used in the solution of modal frequency\n response problems by specifying the amount of 'spread' around each natural\n frequency and the number of equally spaced excitation frequencies within\n the spread.\n\n +-------+-----+-----+-----+------+-----+-----+-----+-----+\n | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 |\n +=======+=====+=====+=====+======+=====+=====+=====+=====+\n | FREQ4 | SID | F1 | F2 | FSPD | NFM | | | |\n +-------+-----+-----+-----+------+-----+-----+-----+-----+\n\n .. note:: this card rewrites as a FREQ card\n .. todo:: not done...\n \"\"\"\n type = 'FREQ4'\n\n def __init__(self, sid, f1, f2, fspread, nfm, comment=''):\n if comment:\n self.comment = comment\n self.sid = sid\n self.f1 = f1\n self.f2 = f2\n self.fspread = fspread\n self.nfm = nfm\n\n @classmethod\n def add_card(cls, card, comment=''):\n \"\"\"\n Adds a FREQ4 card from ``BDF.add_card(...)``\n\n Parameters\n ----------\n card : BDFCard()\n a BDFCard object\n comment : str; default=''\n a comment for the card\n \"\"\"\n sid = integer(card, 1, 'sid')\n f1 = double_or_blank(card, 2, 'f1', 0.0)\n f2 = double_or_blank(card, 3, 'f2', 1.e20)\n fspread = double_or_blank(card, 4, 'fspd', 0.1)\n nfm = integer_or_blank(card, 5, 'nfm', 3)\n assert len(card) <= 6, 'len(FREQ card) = %i\\ncard=%s' % (len(card), card)\n return FREQ4(sid, f1, f2, fspread, nfm, comment=comment)\n\n def raw_fields(self):\n list_fields = ['FREQ4', self.sid, self.f1, self.f2, self.fspread,\n self.nfm]\n return list_fields\n\n def repr_fields(self):\n return self.raw_fields()\n\n def write_card(self, size: int=8, is_double: bool=False) -> str:\n card = self.repr_fields()\n if size == 8:\n return self.comment + print_card_8(card)\n return self.comment + print_card_16(card)\n\n\n#class FREQ5(FREQ):\n #type = 'FREQ5'\n\n #def __init__(self, card=None, data=None, comment=''):\n #if comment:\n # self.comment = comment\n #raise NotImplementedError()\n\n #def write_card(self, size: int=8, is_double: bool=False) -> str:\n #card = self.repr_fields()\n #if size == 8:\n #return self.comment + print_card_8(card)\n #return self.comment + print_card_16(card)\n\n\nclass NLPARM(BaseCard):\n \"\"\"\n Defines a set of parameters for nonlinear static analysis iteration\n strategy.\n\n +--------+--------+------+------+---------+-------+---------+---------+--------+\n | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 |\n +========+========+======+======+=========+=======+=========+=========+========+\n | NLPARM | ID | NINC | DT | KMETHOD | KSTEP | MAXITER | CONV | INTOUT |\n +--------+--------+------+------+---------+-------+---------+---------+--------+\n | | ESPU | EPSP | EPSW | MAXDIV | MAXQN | MAXLS | FSTRESS | LSTOL |\n +--------+--------+------+------+---------+-------+---------+---------+--------+\n | | MAXBIS | | | | MAXR | | RTOLB | CONV |\n +--------+--------+------+------+---------+-------+---------+---------+--------+\n \"\"\"\n type = 'NLPARM'\n\n def __init__(self, nlparm_id, ninc=10, dt=0.0, kmethod='AUTO', kstep=5,\n max_iter=25, conv='PW', int_out='NO',\n eps_u=0.01, eps_p=0.01, eps_w=0.01, max_div=3, max_qn=None, max_ls=4,\n fstress=0.2, ls_tol=0.5, max_bisect=5, max_r=20., rtol_b=20., comment=''):\n if comment:\n self.comment = comment\n self.nlparm_id = nlparm_id\n self.ninc = ninc\n self.dt = dt\n self.kmethod = kmethod\n self.kstep = kstep\n self.max_iter = max_iter\n self.conv = conv\n self.int_out = int_out\n\n # line 2\n self.eps_p = eps_p\n self.eps_u = eps_u\n self.eps_w = eps_w\n self.max_div = max_div\n self.max_qn = max_qn\n self.max_ls = max_ls\n self.fstress = fstress\n self.ls_tol = ls_tol\n\n # line 3\n self.max_bisect = max_bisect\n self.max_r = max_r\n self.rtol_b = rtol_b\n\n if self.max_qn is None:\n if kmethod == 'PFNT':\n self.max_qn = 0\n else:\n self.max_qn = max_iter\n\n @classmethod\n def add_card(cls, card, comment=''):\n \"\"\"\n Adds a NLPARM card from ``BDF.add_card(...)``\n\n Parameters\n ----------\n card : BDFCard()\n a BDFCard object\n comment : str; default=''\n a comment for the card\n \"\"\"\n nlparm_id = integer(card, 1, 'nlparm_id')\n ninc = integer_or_blank(card, 2, 'ninc', 10)\n dt = double_or_blank(card, 3, 'dt', 0.0)\n kmethod = string_or_blank(card, 4, 'kmethod', 'AUTO')\n kstep = integer_or_blank(card, 5, 'kstep', 5)\n max_iter = integer_or_blank(card, 6, 'max_iter', 25)\n conv = string_or_blank(card, 7, 'conv', 'PW')\n int_out = string_or_blank(card, 8, 'intOut', 'NO')\n\n # line 2\n eps_u = double_or_blank(card, 9, 'eps_u', 0.01)\n eps_p = double_or_blank(card, 10, 'eps_p', 0.01)\n eps_w = double_or_blank(card, 11, 'eps_w', 0.01)\n max_div = integer_or_blank(card, 12, 'max_div', 3)\n\n if kmethod == 'PFNT':\n max_qn = integer_or_blank(card, 13, 'max_qn', 0)\n else:\n max_qn = integer_or_blank(card, 13, 'max_qn', max_iter)\n\n max_ls = integer_or_blank(card, 14, 'max_ls', 4)\n fstress = double_or_blank(card, 15, 'fstress', 0.2)\n ls_tol = double_or_blank(card, 16, 'ls_tol', 0.5)\n\n # line 3\n max_bisect = integer_or_blank(card, 17, 'max_bisect', 5)\n max_r = double_or_blank(card, 21, 'max_r', 20.)\n rtol_b = double_or_blank(card, 23, 'rtol_b', 20.)\n assert len(card) <= 24, 'len(NLPARM card) = %i\\ncard=%s' % (len(card), card)\n return NLPARM(nlparm_id, ninc, dt, kmethod, kstep, max_iter, conv,\n int_out, eps_u, eps_p, eps_w, max_div,\n max_qn, max_ls, fstress,\n ls_tol, max_bisect, max_r,\n rtol_b, comment=comment)\n\n @classmethod\n def add_op2_data(cls, data, comment=''):\n \"\"\"\n Adds a NLPARM card from the OP2\n\n Parameters\n ----------\n data : List[varies]\n a list of fields defined in OP2 format\n comment : str; default=''\n a comment for the card\n \"\"\"\n (nlparm_id, ninc, dt, kmethod, kstep, max_iter, conv, int_out, eps_u, eps_p,\n eps_w, max_div, max_qn, max_ls, fstress, ls_tol, max_bisect, max_r,\n rtol_b) = data\n\n if kmethod == 1:\n kmethod = 'AUTO'\n elif kmethod == 2:\n kmethod = 'ITER'\n elif kmethod == 4:\n kmethod = 'SEMI'\n elif kmethod == 3:\n kmethod = 'ADAPT'\n else:\n msg = 'nlparm_id=%s kmethod=%r data=%s' % (nlparm_id, kmethod, data)\n raise NotImplementedError(msg)\n\n if conv == 1:\n conv = 'W'\n elif conv == 2:\n conv = 'P'\n elif conv == 3:\n conv = 'PW'\n elif conv == 4:\n conv = 'U'\n elif conv == 5:\n conv = 'UW'\n elif conv == 6:\n conv = 'UP'\n elif conv == 7:\n conv = 'UPW'\n else:\n msg = 'nlparm_id=%s conv=%r data=%s' % (nlparm_id, conv, data)\n raise NotImplementedError(msg)\n\n if int_out == 0:\n int_out = 'NO'\n elif int_out == 1:\n int_out = 'YES'\n elif int_out == 2:\n int_out = 'ALL'\n else:\n msg = 'nlparm_id=%s int_out=%r data=%s' % (nlparm_id, int_out, data)\n raise NotImplementedError(msg)\n return NLPARM(nlparm_id, ninc, dt, kmethod, kstep, max_iter, conv,\n int_out, eps_u, eps_p, eps_w, max_div,\n max_qn, max_ls, fstress,\n ls_tol, max_bisect, max_r,\n rtol_b, comment=comment)\n\n def raw_fields(self):\n list_fields = ['NLPARM', self.nlparm_id, self.ninc, self.dt, self.kmethod,\n self.kstep, self.max_iter, self.conv, self.int_out, self.eps_u,\n self.eps_p, self.eps_w, self.max_div, self.max_qn, self.max_ls,\n self.fstress, self.ls_tol, self.max_bisect, None, None, None,\n self.max_r, None, self.rtol_b]\n return list_fields\n\n def repr_fields(self):\n ninc = set_blank_if_default(self.ninc, 10)\n dt = set_blank_if_default(self.dt, 0.0)\n kmethod = set_blank_if_default(self.kmethod, 'AUTO')\n kstep = set_blank_if_default(self.kstep, 5)\n max_iter = set_blank_if_default(self.max_iter, 25)\n conv = set_blank_if_default(self.conv, 'PW')\n int_out = set_blank_if_default(self.int_out, 'NO')\n eps_u = set_blank_if_default(self.eps_u, 0.01)\n eps_p = set_blank_if_default(self.eps_p, 0.01)\n eps_w = set_blank_if_default(self.eps_w, 0.01)\n max_div = set_blank_if_default(self.max_div, 3)\n max_qn = set_blank_if_default(self.max_qn, self.max_iter)\n max_ls = set_blank_if_default(self.max_ls, 4)\n fstress = set_blank_if_default(self.fstress, 0.2)\n ls_tol = set_blank_if_default(self.ls_tol, 0.5)\n max_bisect = set_blank_if_default(self.max_bisect, 5)\n max_r = set_blank_if_default(self.max_r, 20.)\n rtol_b = set_blank_if_default(self.rtol_b, 20.)\n\n list_fields = ['NLPARM', self.nlparm_id, ninc, dt, kmethod, kstep, max_iter,\n conv, int_out, eps_u, eps_p, eps_w, max_div, max_qn, max_ls,\n fstress, ls_tol, max_bisect, None, None, None, max_r, None,\n rtol_b]\n return list_fields\n\n def write_card(self, size: int=8, is_double: bool=False) -> str:\n card = self.repr_fields()\n if size == 8:\n return self.comment + print_card_8(card) # having trouble with double precision...\n return self.comment + print_card_16(card)\n\n\nclass NLPCI(BaseCard):\n type = 'NLPCI'\n\n def __init__(self, nlpci_id, Type='CRIS', minalr=0.25, maxalr=4.,\n scale=0., desiter=12, mxinc=20, comment=''):\n if comment:\n self.comment = comment\n self.nlpci_id = nlpci_id\n self.Type = Type\n self.minalr = minalr\n self.maxalr = maxalr\n self.scale = scale\n self.desiter = desiter\n self.mxinc = mxinc\n\n @classmethod\n def add_card(cls, card, comment=''):\n \"\"\"\n Adds a NLPCI card from ``BDF.add_card(...)``\n\n Parameters\n ----------\n card : BDFCard()\n a BDFCard object\n comment : str; default=''\n a comment for the card\n \"\"\"\n nlpci_id = integer(card, 1, 'nlpci_id')\n Type = string_or_blank(card, 2, 'Type', 'CRIS')\n minalr = double_or_blank(card, 3, 'minalr', 0.25)\n maxalr = double_or_blank(card, 4, 'maxalr', 4.0)\n scale = double_or_blank(card, 5, 'scale', 0.0)\n blank(card, 6, 'blank')\n desiter = integer_or_blank(card, 7, 'desiter', 12)\n mxinc = integer_or_blank(card, 8, 'mxinc', 20)\n return NLPCI(nlpci_id, Type=Type, minalr=minalr, maxalr=maxalr,\n scale=scale, desiter=desiter, mxinc=mxinc, comment=comment)\n\n def raw_fields(self):\n list_fields = ['NLPCI', self.nlpci_id, self.Type, self.minalr,\n self.maxalr, self.scale, None, self.desiter, self.mxinc]\n return list_fields\n\n def repr_fields(self):\n #minalr = set_blank_if_default(self.minalr, 0.25)\n return self.raw_fields()\n\n def write_card(self, size: int=8, is_double: bool=False) -> str:\n card = self.repr_fields()\n if size == 8:\n return self.comment + print_card_8(card)\n return self.comment + print_card_16(card)\n\n\nclass TF(BaseCard):\n \"\"\"\n Defines a dynamic transfer function of the form:\n (B0 + B1 p + B2 *p2)*ud sum(A0_i + A1_i*p + A2_i*p2)*ui = 0\n\n +----+-----+-----+------+------+------+--------+----+----+\n | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 |\n +====+=====+=====+======+======+======+========+====+====+\n | TF | SID | GD | CD | B0 | B1 | B2 | | |\n +----+-----+-----+------+------+------+--------+----+----+\n | | G_1 | C_1 | A0_1 | A1_1 | A2_1 | etc. | | |\n +----+-----+-----+------+------+------+--------+----+----+\n\n \"\"\"\n type = 'TF'\n def __init__(self, sid, nid0, c, b0, b1, b2, nids, components, a, comment=''):\n if comment:\n self.comment = comment\n self.sid = sid\n self.nid0 = nid0\n self.c = c\n self.b0 = b0\n self.b1 = b1\n self.b2 = b2\n self.nids = nids\n self.components = components\n self.a = a\n\n def validate(self):\n pass\n #assert len(self.grids1) > 0, 'ngrids1=%s\\n%s' % (len(self.grids1), str(self))\n\n #def cross_reference(self, model: BDF) -> None:\n #pass\n\n @classmethod\n def add_card(cls, card, comment=''):\n \"\"\"\n Adds a TF card from ``BDF.add_card(...)``\n\n Parameters\n ----------\n card : BDFCard()\n a BDFCard object\n comment : str; default=''\n a comment for the card\n \"\"\"\n sid = integer(card, 1, 'sid')\n nid0 = integer(card, 2, 'nid0')\n # component 0 means an SPOINT/EPOINT\n c = components_or_blank(card, 3, 'components_0', 0)\n b0 = double_or_blank(card, 4, 'b0', 0.)\n b1 = double_or_blank(card, 5, 'b1', 0.)\n b2 = double_or_blank(card, 6, 'b2', 0.)\n\n nfields = len(card) - 9\n nrows = nfields // 8\n if nfields % 8 > 0:\n nrows += 1\n\n nids = []\n components = []\n a = []\n for irow in range(nrows):\n j = irow * 8 + 9\n #ifield = irow + 1\n nid = integer(card, j, 'grid_%i' % (irow + 1))\n component = components_or_blank(card, j + 1, 'components_%i' % (irow + 1), 0)\n a0 = double_or_blank(card, j + 2, 'a0_%i' % (irow + 1), 0.)\n a1 = double_or_blank(card, j + 3, 'a1_%i' % (irow + 1), 0.)\n a2 = double_or_blank(card, j + 4, 'a2_%i' % (irow + 1), 0.)\n nids.append(nid)\n components.append(component)\n a.append([a0, a1, a2])\n return TF(sid, nid0, c, b0, b1, b2, nids, components, a,\n comment=comment)\n\n def raw_fields(self):\n list_fields = ['TF', self.sid, self.nid0, self.c, self.b0, self.b1, self.b2, None, None]\n for grid, c, (a0, a1, a2) in zip(self.nids, self.components, self.a):\n list_fields += [grid, c, a0, a1, a2, None, None, None]\n return list_fields\n\n def write_card(self, size: int=8, is_double: bool=False) -> str:\n # double precision?\n card = self.repr_fields()\n if size == 8:\n return self.comment + print_card_8(card)\n return self.comment + print_card_16(card)\n\n\nclass TSTEP(BaseCard):\n \"\"\"\n Transient Time Step\n Defines time step intervals at which a solution will be generated and\n output in transient analysis.\n\n +-------+------+------+------+------+-----+-----+-----+-----+\n | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 |\n +=======+======+======+======+======+=====+=====+=====+=====+\n | TSTEP | SID | N1 | DT1 | NO1 | | | | |\n +-------+------+------+------+------+-----+-----+-----+-----+\n | | | N2 | DT2 | NO2 | | | | |\n +-------+------+------+------+------+-----+-----+-----+-----+\n | | | etc. | | | | | | |\n +-------+------+------+------+------+-----+-----+-----+-----+\n\n +-------+------+------+------+------+-----+-----+-----+-----+\n | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 |\n +=======+======+======+======+======+=====+=====+=====+=====+\n | TSTEP | 101 | 9000 | .001 | 9000 | | | | |\n +-------+------+------+------+------+-----+-----+-----+-----+\n | | | 1000 | .001 | 1 | | | | |\n +-------+------+------+------+------+-----+-----+-----+-----+\n \"\"\"\n type = 'TSTEP'\n\n def __init__(self, sid, N, DT, NO, comment=''):\n \"\"\"\n Creates a TSTEP card\n\n Parameters\n ----------\n sid : int\n the time step id\n N : List[int/None]\n ???\n DT : List[float/None]\n ???\n NO : List[int/None]\n ???\n comment : str; default=''\n a comment for the card\n \"\"\"\n if comment:\n self.comment = comment\n self.sid = sid\n #: Number of time steps of value DTi. (Integer > 1)\n self.N = N\n #: Time increment (float)\n self.DT = DT\n #: Skip factor for output. Every NOi-th step will be saved for output (default=1)\n self.NO = NO\n\n def validate(self):\n assert len(self.N) == len(self.DT), 'N=%s DT=%s' % (self.N, self.DT)\n assert len(self.N) == len(self.NO), 'N=%s NO=%s' % (self.N, self.NO)\n\n @classmethod\n def add_card(cls, card, comment=''):\n \"\"\"\n Adds a TSTEP card from ``BDF.add_card(...)``\n\n Parameters\n ----------\n card : BDFCard()\n a BDFCard object\n comment : str; default=''\n a comment for the card\n \"\"\"\n sid = integer(card, 1, 'sid')\n N = []\n DT = []\n NO = []\n\n nrows = int(ceil((len(card) - 1.) / 8.))\n for i in range(nrows):\n n = 8 * i + 1\n ni = integer_or_blank(card, n + 1, 'N' + str(i), 1)\n dt = double_or_blank(card, n + 2, 'dt' + str(i), 0.)\n no = integer_or_blank(card, n + 3, 'NO' + str(i), 1)\n N.append(ni)\n DT.append(dt)\n NO.append(no)\n return TSTEP(sid, N, DT, NO, comment=comment)\n\n def raw_fields(self):\n list_fields = ['TSTEP', self.sid]\n for (N, dt, no) in zip(self.N, self.DT, self.NO):\n list_fields += [N, dt, no, None, None, None, None, None]\n return list_fields\n\n def repr_fields(self):\n return self.raw_fields()\n\n def write_card(self, size: int=8, is_double: bool=False) -> str:\n card = self.repr_fields()\n if size == 8:\n return self.comment + print_card_8(card)\n return self.comment + print_card_16(card)\n\n\nclass TSTEPNL(BaseCard):\n \"\"\"\n Defines parametric controls and data for nonlinear transient structural or\n heat transfer analysis. TSTEPNL is intended for SOLs 129, 159, and 600.\n Parameters for Nonlinear Transient Analysis.\n\n +---------+--------+--------+-------+--------+--------+-------+---------+------+\n | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 |\n +=========+========+========+=======+========+========+=======+=========+======+\n | TSTEPNL | ID | NDT | DT | NO | METHOD | KSTEP | MAXITER | CONV |\n +---------+--------+--------+-------+--------+--------+-------+---------+------+\n | | ESPU | EPSP | EPSW | MAXDIV | MAXQN | MAXLS | FSTRESS | |\n +---------+--------+--------+-------+--------+--------+-------+---------+------+\n | | MAXBIS | ADJUST | MSTEP | RB | MAXR | UTOL | RTOLB | |\n +---------+--------+--------+-------+--------+--------+-------+---------+------+\n\n method = None for NX, but apparently TSTEP as well, which is not in the QRG\n \"\"\"\n type = 'TSTEPNL'\n allowed_methods = ['AUTO', 'ITER', 'ADAPT', 'SEMI', 'FNT', 'PFNT', # MSC\n 'TSTEP'] # NX\n\n def __init__(self, sid, ndt, dt, no, method='ADAPT', kstep=None,\n max_iter=10, conv='PW', eps_u=1.e-2, eps_p=1.e-3,\n eps_w=1.e-6, max_div=2, max_qn=10, max_ls=2,\n fstress=0.2, max_bisect=5, adjust=5, mstep=None,\n rb=0.6, max_r=32., utol=0.1, rtol_b=20.,\n min_iter=None, comment=''):\n \"\"\"\n Creates a TSTEPNL card\n\n Parameters\n ----------\n sid : int\n the time step id\n ndt : ???\n ???\n dt : ???\n ???\n no : ???\n ???\n eps_u : float; default=1.e-2\n ???\n eps_p : float; default=1.e-3\n ???\n eps_w : float; default=1.e-6\n ???\n max_div : int; default=2\n ???\n max_qn : int; default=10\n ???\n max_ls : int; default=2\n ???\n fstress : float; default=0.2\n ???\n max_bisect : int; default=5\n ???\n adjust : int; default=5\n ???\n mstep : int; default=None\n ???\n rb : float; default=0.6\n ???\n max_r = float; default=32.\n ???\n utol = float; default=0.1\n ???\n rtol_b = float; default=20.\n ???\n min_iter : int; default=None\n not listed in all QRGs\n comment : str; default=''\n a comment for the card\n \"\"\"\n if comment:\n self.comment = comment\n\n # line 1\n self.sid = sid\n self.ndt = ndt\n self.dt = dt\n self.no = no\n self.method = method\n self.kstep = kstep\n self.max_iter = max_iter\n self.conv = conv\n\n self.eps_u = eps_u\n self.eps_p = eps_p\n self.eps_w = eps_w\n self.max_div = max_div\n self.max_qn = max_qn\n self.max_ls = max_ls\n self.fstress = fstress\n\n # line 3\n self.max_bisect = max_bisect\n self.adjust = adjust\n self.mstep = mstep\n self.rb = rb\n self.max_r = max_r\n self.utol = utol\n self.rtol_b = rtol_b\n self.min_iter = min_iter\n assert self.ndt >= 3\n assert self.dt > 0.\n\n def validate(self):\n if self.method not in self.allowed_methods:\n msg = 'method=%r allowed_methods=[%s]' % (\n self.method, ', '.join(self.allowed_methods))\n raise ValueError(msg)\n\n @classmethod\n def add_card(cls, card, comment=''):\n \"\"\"\n Adds a TSTEPNL card from ``BDF.add_card(...)``\n\n Parameters\n ----------\n card : BDFCard()\n a BDFCard object\n comment : str; default=''\n a comment for the card\n \"\"\"\n sid = integer(card, 1, 'sid')\n ndt = integer(card, 2, 'ndt')\n dt = double(card, 3, 'dt')\n no = integer_or_blank(card, 4, 'no', 1)\n\n #: .. note:: not listed in all QRGs\n method = string_or_blank(card, 5, 'method', 'ADAPT')\n if method == 'ADAPT':\n kstep = integer_or_blank(card, 6, 'kStep', 2)\n elif method == 'ITER':\n kstep = integer_or_blank(card, 6, 'kStep', 10)\n elif method in ['AUTO', 'TSTEP', 'SEMI']:\n kstep = None\n #kstep = blank(card, 6, 'kStep') #: .. todo:: not blank\n else:\n msg = 'invalid TSTEPNL Method. method=%r; allowed_methods=[%s]' % (\n method, ', '.join(cls.allowed_methods))\n raise RuntimeError(msg)\n max_iter = integer_or_blank(card, 7, 'maxIter', 10)\n conv = string_or_blank(card, 8, 'conv', 'PW')\n\n # line 2\n eps_u = double_or_blank(card, 9, 'epsU', 1.E-2)\n eps_p = double_or_blank(card, 10, 'epsP', 1.E-3)\n eps_w = double_or_blank(card, 11, 'epsW', 1.E-6)\n max_div = integer_or_blank(card, 12, 'maxDiv', 2)\n max_qn = integer_or_blank(card, 13, 'maxQn', 10)\n max_ls = integer_or_blank(card, 14, 'MaxLs', 2)\n fstress = double_or_blank(card, 15, 'fStress', 0.2)\n\n # line 3\n max_bisect = integer_or_blank(card, 17, 'maxBisect', 5)\n adjust = integer_or_blank(card, 18, 'adjust', 5)\n mstep = integer_or_blank(card, 19, 'mStep')\n rb = double_or_blank(card, 20, 'rb', 0.6)\n max_r = double_or_blank(card, 21, 'maxR', 32.)\n utol = double_or_blank(card, 22, 'uTol', 0.1)\n rtol_b = double_or_blank(card, 23, 'rTolB', 20.)\n\n # not listed in all QRGs\n min_iter = integer_or_blank(card, 24, 'minIter')\n assert len(card) <= 25, 'len(TSTEPNL card) = %i\\ncard=%s' % (len(card), card)\n return TSTEPNL(\n sid, ndt, dt, no, method, kstep, max_iter, conv,\n eps_u, eps_p, eps_w, max_div, max_qn, max_ls, fstress,\n max_bisect, adjust, mstep, rb, max_r, utol, rtol_b, min_iter,\n comment=comment)\n\n @classmethod\n def add_op2_data(cls, data, comment=''):\n \"\"\"\n Adds a TSTEPNL card from the OP2\n\n Parameters\n ----------\n data : List[varies]\n a list of fields defined in OP2 format\n comment : str; default=''\n a comment for the card\n \"\"\"\n (sid, ndt, dt, no, method, kstep, max_iter, conv, eps_u, eps_p, eps_w,\n max_div, max_qn, max_ls, fstress, max_bisect,\n adjust, mstep, rb, max_r, utol, rtol_b) = data\n\n if method == 1:\n method = 'AUTO'\n elif method == 3:\n method = 'ADAPT'\n else:\n raise NotImplementedError('tstepnl=%s method=%r data=%s' % (sid, method, data))\n\n if conv == 3:\n conv = 'PW'\n elif conv == 4:\n conv = 'U'\n #elif conv == 3:\n #conv = 'ADAPT'\n else:\n raise NotImplementedError('tstepnl=%s conv=%r data=%s' % (sid, conv, data))\n\n min_iter = None # not listed in DMAP 2005\n return TSTEPNL(\n sid, ndt, dt, no, method, kstep, max_iter, conv,\n eps_u, eps_p, eps_w, max_div, max_qn, max_ls, fstress,\n max_bisect, adjust, mstep, rb, max_r, utol, rtol_b, min_iter,\n comment=comment)\n #self.sid = sid\n #self.ndt = ndt\n #self.dt = dt\n #self.no = no\n #self.method = method\n #self.kStep = kStep\n #self.maxIter = maxIter\n #self.conv = conv\n\n ## line 2\n #self.epsU = epsU\n #self.epsP = epsP\n #self.epsW = epsW\n #self.maxDiv = maxDiv\n #self.maxQn = maxQn\n #self.MaxLs = maxLs\n #self.fStress = fStress\n\n ## line 3\n #self.maxBisect = maxBisect\n #self.adjust = adjust\n #self.mStep = mStep\n #self.rb = rb\n #self.maxR = maxR\n #self.uTol = uTol\n #self.rTolB = rTolB\n\n def raw_fields(self):\n list_fields = ['TSTEPNL', self.sid, self.ndt, self.dt, self.no,\n self.method, self.kstep, self.max_iter, self.conv, self.eps_u,\n self.eps_p, self.eps_w, self.max_div, self.max_qn, self.max_ls,\n self.fstress, None, self.max_bisect, self.adjust, self.mstep,\n self.rb, self.max_r, self.utol, self.rtol_b, self.min_iter]\n return list_fields\n\n def repr_fields(self):\n #no = set_blank_if_default(self.no,1)\n no = self.no\n method = set_blank_if_default(self.method, 'ADAPT')\n\n kstep = self.kstep\n #if self.method == 'ADAPT':\n #kStep = set_blank_if_default(self.kStep, 2)\n #elif self.method == 'ITER':\n #kStep = set_blank_if_default(self.kStep, 10)\n #else:\n #msg = 'invalid TSTEPNL Method. method=|%s|' %(self.method)\n #raise RuntimeError(msg)\n\n #maxIter = set_blank_if_default(self.maxIter, 10)\n conv = set_blank_if_default(self.conv, 'PW')\n\n eps_u = set_blank_if_default(self.eps_u, 1e-2)\n eps_p = set_blank_if_default(self.eps_p, 1e-3)\n eps_w = set_blank_if_default(self.eps_w, 1e-6)\n max_div = set_blank_if_default(self.max_div, 2)\n max_qn = set_blank_if_default(self.max_qn, 10)\n max_ls = set_blank_if_default(self.max_ls, 2)\n fstress = set_blank_if_default(self.fstress, 0.2)\n\n max_bisect = set_blank_if_default(self.max_bisect, 5)\n adjust = set_blank_if_default(self.adjust, 5)\n rb = set_blank_if_default(self.rb, 0.6)\n max_r = set_blank_if_default(self.max_r, 32.)\n utol = set_blank_if_default(self.utol, 0.1)\n rtol_b = set_blank_if_default(self.rtol_b, 20.)\n\n list_fields = ['TSTEPNL', self.sid, self.ndt, self.dt, no, method,\n kstep, self.max_iter, conv, eps_u, eps_p, eps_w, max_div, max_qn,\n max_ls, fstress, None, max_bisect, adjust, self.mstep, rb,\n max_r, utol, rtol_b, self.min_iter]\n return list_fields\n\n def write_card(self, size: int=8, is_double: bool=False) -> str:\n card = self.repr_fields()\n if size == 8:\n return self.comment + print_card_8(card)\n return self.comment + print_card_16(card)\n",
"# pylint: disable=R0902,R0904,R0914\nfrom math import sin, cos, radians, atan2, sqrt, degrees\nfrom itertools import count\nfrom typing import Tuple # , TYPE_CHECKING\n\nimport numpy as np\nfrom numpy import array, zeros\nfrom scipy.sparse import coo_matrix # type: ignore\n\nfrom pyNastran.utils.numpy_utils import integer_types\nfrom pyNastran.bdf.cards.base_card import BaseCard\nfrom pyNastran.bdf.field_writer_8 import print_card_8\nfrom pyNastran.bdf.field_writer_16 import print_card_16\nfrom pyNastran.bdf.field_writer_double import print_card_double\n\nfrom pyNastran.bdf.bdf_interface.assign_type import (\n integer, integer_or_blank, double, string, string_or_blank,\n parse_components, interpret_value, integer_double_string_or_blank)\n\n\nclass DTI(BaseCard):\n \"\"\"\n +-----+-------+-----+------+-------+--------+------+-------------+\n | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 |\n +=====+=======+=====+======+=======+========+======+=============+\n | DTI | UNITS | \"1\" | MASS | FORCE | LENGTH | TIME | STRESS |\n +-----+-------+-----+------+-------+--------+------+-------------+\n\n MSC\n\n +-----+-------+-----+------+-------+--------+------+-------------+\n | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 |\n +=====+=======+=====+======+=======+========+======+=============+\n | DTI | UNITS | \"1\" | MASS | FORCE | LENGTH | TIME | TEMPERATURE |\n +-----+-------+-----+------+-------+--------+------+-------------+\n\n NX\n \"\"\"\n type = 'DTI'\n #_properties = ['shape', 'ifo', 'is_real', 'is_complex', 'is_polar', 'matrix_type', 'tin_dtype', 'tout_dtype']\n\n @classmethod\n def _init_from_empty(cls):\n name = 'name'\n fields = []\n return DTI(name, fields, comment='')\n\n def _finalize_hdf5(self, encoding):\n \"\"\"hdf5 helper function\"\"\"\n keys, values = self.fields\n\n # nan != nan\n values = [value if value == value else None for value in values]\n values_str = [value.decode(encoding) if isinstance(value, bytes) else value\n for value in values]\n #values = [valuei.decode(encoding) if isinstance(valuei, bytes) else (\n # None if np.isnan(valuei) else valuei)\n # for valuei in values]\n self.fields = {key : value for key, value in zip(keys, values_str)}\n\n @classmethod\n def export_to_hdf5(cls, h5_file, model, encoding):\n \"\"\"exports the elements in a vectorized way\"\"\"\n from pyNastran.bdf.bdf_interface.hdf5_exporter import _export_list\n for name, dti in sorted(model.dti.items()):\n if name == 'UNITS':\n i = 0\n for key, value in sorted(dti.fields.items()):\n #print(key, value)\n h5_group = h5_file.create_group(str(key))\n if value is None:\n h5_group.create_dataset(str(i), data=np.nan)\n else:\n h5_group.create_dataset(str(i), data=value)\n i += 1\n #fields = {\n #'mass' : mass,\n #'force' : force,\n #'length' : length,\n #'time' : time,\n #'temp_stress' : temp_stress\n #}\n else:\n for irecord, fields in sorted(dti.fields.items()):\n #h5_group = h5_file.create_group(str(irecord))\n attr = 'irecord=%s' % irecord\n namei = str(irecord)\n values = fields\n _export_list(h5_file, attr, namei, values, encoding)\n #print(h5_group)\n #print(irecord, fields)\n\n def __init__(self, name, fields, comment=''):\n \"\"\"\n Creates a DTI card\n\n Parameters\n ----------\n name : str\n UNITS\n fields : List[varies]\n the fields\n comment : str; default=''\n a comment for the card\n \"\"\"\n if comment:\n self.comment = comment\n self.name = name\n self.fields = fields\n assert len(fields) > 0, fields\n\n @classmethod\n def add_card(cls, card, comment):\n \"\"\"\n Adds a DTI card from ``BDF.add_card(...)``\n\n Parameters\n ----------\n card : BDFCard()\n a BDFCard object\n comment : str; default=''\n a comment for the card\n\n \"\"\"\n name = string(card, 1, 'name')\n if name == 'UNITS':\n integer(card, 2, '1')\n mass = string(card, 3, 'mass')\n force = string(card, 4, 'force')\n length = string(card, 5, 'length')\n time = string(card, 6, 'time')\n temp_stress = string_or_blank(card, 7, 'stress/temperature')\n fields = {\n 'mass' : mass,\n 'force' : force,\n 'length' : length,\n 'time' : time,\n 'temp_stress' : temp_stress\n }\n else:\n fields = []\n #field2 = card[2]\n\n list_fields = []\n irecord = integer(card, 2, 'record')\n if irecord == 0:\n for i in range(3, len(card)):\n val = integer_double_string_or_blank(\n card, i, 'T%i' % (i-1), default=32767)\n list_fields.append(val)\n else:\n for i in range(3, len(card)):\n val = integer_double_string_or_blank(\n card, i, 'T%i' % (i-1), default=None)\n list_fields.append(val)\n fields = {irecord: list_fields,}\n return DTI(name, fields, comment=comment)\n\n def raw_fields(self):\n if self.name == 'UNITS':\n mass = self.fields['mass']\n force = self.fields['force']\n length = self.fields['length']\n time = self.fields['time']\n temp_stress = self.fields['temp_stress']\n list_fields = ['DTI', self.name, '1', mass, force, length, time, temp_stress]\n else:\n list_fields = []\n for irecord, fields in sorted(self.fields.items()):\n nfields = len(fields)\n list_fields += ['DTI', self.name] + fields\n nleftover = nfields % 8\n if nleftover:\n list_fields += [None] * nleftover\n return list_fields\n\n def write_card(self, size: int=8, is_double: bool=False) -> str:\n if self.name == 'UNITS':\n card = self.repr_fields()\n return self.comment + print_card_8(card)\n\n msg = self.comment\n for irecord, fields in sorted(self.fields.items()):\n list_fields = ['DTI', self.name, irecord, ] + fields\n msg += print_card_8(list_fields)\n return msg\n\n\nclass NastranMatrix(BaseCard):\n \"\"\"\n Base class for the DMIG, DMIJ, DMIJI, DMIK matrices\n \"\"\"\n def _finalize_hdf5(self, encoding):\n \"\"\"hdf5 helper function\"\"\"\n self.finalize()\n\n def __init__(self, name, matrix_form, tin, tout, polar, ncols,\n GCj, GCi, Real, Complex=None, comment='', finalize=True):\n \"\"\"\n Creates a NastranMatrix\n\n Parameters\n ----------\n name : str\n the name of the matrix\n matrix_form : int\n matrix shape\n 4=Lower Triangular\n 5=Upper Triangular\n 6=Symmetric\n 8=Identity (m=nRows, n=m)\n tin : int\n matrix input precision\n 1=Real, Single Precision\n 2=Real, Double Precision\n 3=Complex, Single Precision\n 4=Complex, Double Precision\n tout : int\n matrix output precision\n 0=same as tin\n 1=Real, Single Precision\n 2=Real, Double Precision\n 3=Complex, Single Precision\n 4=Complex, Double Precision\n polar : int; default=0\n Input format of Ai, Bi\n Integer=blank or 0 indicates real, imaginary format\n Integer > 0 indicates amplitude, phase format\n ncols : int\n ???\n GCj : List[(node, dof)]\n the jnode, jDOFs\n GCi : List[(node, dof)]\n the inode, iDOFs\n Real : List[float]\n The real values\n Complex : List[float]; default=None\n The complex values (if the matrix is complex)\n comment : str; default=''\n a comment for the card\n\n \"\"\"\n if comment:\n self.comment = comment\n if Complex is None:\n Complex = []\n if tout is None:\n tout = 0\n\n polar = _set_polar(polar)\n\n if matrix_form not in [1, 2, 4, 5, 6, 8, 9]:\n msg = (\n 'matrix_form=%r must be [1, 2, 4, 5, 6, 8, 9]\\n'\n ' 1: Square\\n'\n ' 2: Rectangular\\n'\n #' 4: Lower Triangular\\n'\n #' 5: Upper Triangular\\n'\n ' 6: Symmetric\\n'\n #' 8: Identity (m=nRows, n=m)\\n'\n ' 9: Rectangular\\n' % matrix_form)\n raise ValueError(msg)\n self.name = name\n\n #: 4-Lower Triangular; 5=Upper Triangular; 6=Symmetric; 8=Identity (m=nRows, n=m)\n self.matrix_form = matrix_form\n\n #: 1-Real, Single Precision; 2=Real,Double Precision;\n # 3=Complex, Single; 4=Complex, Double\n self.tin = tin\n\n #: 0-Set by cell precision\n self.tout = tout\n\n #: Input format of Ai, Bi. (Integer=blank or 0 indicates real, imaginary format;\n #: Integer > 0 indicates amplitude, phase format.)\n self.polar = polar\n\n self.ncols = ncols\n self.GCj = GCj\n self.GCi = GCi\n\n self.Real = Real\n if len(Complex) or self.is_complex:\n self.Complex = Complex\n assert self.tin in [3, 4], 'tin=%r and must 3 or 4 to be complex' % self.tin\n assert self.tout in [0, 3, 4], 'tin=%r and must 0, 3 or 4 to be complex' % self.tout\n assert isinstance(matrix_form, integer_types), 'matrix_form=%r type=%s' % (matrix_form, type(matrix_form))\n assert not isinstance(matrix_form, bool), 'matrix_form=%r type=%s' % (matrix_form, type(matrix_form))\n if finalize:\n self.finalize()\n\n @classmethod\n def add_card(cls, card, comment=''):\n \"\"\"\n Adds a NastranMatrix (DMIG, DMIJ, DMIK, DMIJI) card from ``BDF.add_card(...)``\n\n Parameters\n ----------\n card : BDFCard()\n a BDFCard object\n comment : str; default=''\n a comment for the card\n\n \"\"\"\n name = string(card, 1, 'name')\n #zero\n\n matrix_form = integer(card, 3, 'ifo')\n tin = integer(card, 4, 'tin')\n tout = integer_or_blank(card, 5, 'tout', 0)\n polar = integer_or_blank(card, 6, 'polar', 0)\n if matrix_form == 1: # square\n ncols = integer_or_blank(card, 8, 'matrix_form=%s; ncol' % matrix_form)\n elif matrix_form == 6: # symmetric\n ncols = integer_or_blank(card, 8, 'matrix_form=%s; ncol' % matrix_form)\n elif matrix_form in [2, 9]: # rectangular\n ncols = integer(card, 8, 'matrix_form=%s; ncol' % (matrix_form))\n else:\n # technically right, but nulling this will fix bad decks\n #self.ncols = blank(card, 8, 'matrix_form=%s; ncol' % self.matrix_form)\n\n msg = (\n '%s name=%r matrix_form=%r is not supported. Valid forms:\\n'\n ' 4=Lower Triangular\\n'\n ' 5=Upper Triangular\\n'\n ' 6=Symmetric\\n'\n ' 8=Identity (m=nRows, n=m)\\n' % (cls.type, name, matrix_form)\n )\n raise NotImplementedError(msg)\n\n GCj = []\n GCi = []\n Real = []\n Complex = []\n return cls(name, matrix_form, tin, tout, polar, ncols,\n GCj, GCi, Real, Complex, comment=comment, finalize=False)\n\n @property\n def matrix_type(self):\n \"\"\"gets the matrix type\"\"\"\n if not isinstance(self.matrix_form, integer_types):\n msg = 'ifo must be an integer; matrix_form=%r type=%s name=%s' % (\n self.matrix_form, type(self.matrix_form), self.name)\n raise TypeError(msg)\n if isinstance(self.matrix_form, bool):\n msg = 'matrix_form must not be a boolean; matrix_form=%r type=%s name=%s' % (\n self.matrix_form, type(self.matrix_form), self.name)\n raise TypeError(msg)\n\n if self.matrix_form == 1:\n matrix_type = 'square'\n elif self.matrix_form == 6:\n matrix_type = 'symmetric'\n elif self.matrix_form in [2, 9]:\n matrix_type = 'rectangular'\n else:\n # technically right, but nulling this will fix bad decks\n #self.ncols = blank(card, 8, 'matrix_form=%s; ncol' % self.matrix_form)\n raise NotImplementedError('%s matrix_form=%r is not supported' % (\n self.type, self.matrix_form))\n return matrix_type\n\n def finalize(self):\n \"\"\"converts the lists into numpy arrays\"\"\"\n self.GCi = np.asarray(self.GCi)\n self.GCj = np.asarray(self.GCj)\n self.Real = np.asarray(self.Real)\n if self.is_complex:\n self.Complex = np.asarray(self.Complex)\n\n @property\n def shape(self):\n \"\"\"gets the matrix shape\"\"\"\n if self.matrix_form in [1, 6]: # square, symmetric\n if self.ncols is not None:\n shape = (self.ncols, self.ncols)\n else:\n nrows, ncols = get_row_col_map(\n self, self.GCi, self.GCj, self.matrix_form)[:2]\n shape = (nrows, ncols)\n elif self.matrix_form in [2, 9]:\n raise NotImplementedError('need to pull the nrows after reading in everything')\n #shape = (self.ncols, self.ncols)\n else:\n raise NotImplementedError('matrix_form=%s' % self.matrix_form)\n return shape\n\n def _add_column(self, card, comment=''):\n \"\"\"adds an additional column entry to the matrix\"\"\"\n if comment:\n if hasattr(self, '_comment'):\n self.comment += comment\n else:\n self.comment = comment\n\n name = string(card, 1, 'name')\n if name == 'UACCEL':\n self._add_column_uaccel()\n return\n\n Gj = integer(card, 2, 'Gj')\n # Cj = integer(card, 3, 'Cj')\n Cj = integer_or_blank(card, 3, 'Cj', 0)\n #Cj = parse_components(card, 3, 'Cj')\n assert 0 <= Cj <= 6, 'C%i must be between [0, 6]; Cj=%s' % (0, Cj)\n\n nfields = len(card)\n #print(\"nfields = %i\" % nfields)\n #print(\"card[5:] =\", card[5:])\n #print(\"(nfields - 5) %% 4 = %i\" % ((nfields - 5) % 4))\n\n nloops = (nfields - 5) // 4\n if (nfields - 5) % 4 in [2, 3]: # real/complex\n nloops += 1\n #assert nfields <= 8,'nfields=%s' % nfields\n #print(\"nloops = %i\" % nloops)\n assert nloops > 0, 'nloops=%s' % nloops\n\n for i in range(nloops):\n self.GCj.append((Gj, Cj))\n\n if self.is_complex:\n if self.is_polar:\n for i in range(nloops):\n n = 5 + 4 * i\n Gi = integer(card, n, 'Gi')\n # Ci = integer(card, n + 1, 'Ci')\n Ci = integer_or_blank(card, n + 1, 'Ci', 0)\n #Ci = parse_components(card, n + 1, 'Ci')\n assert 0 <= Ci <= 6, 'C%i must be between [0, 6]; Ci=%s' % (i + 1, Ci)\n self.GCi.append((Gi, Ci))\n magi = double(card, n + 2, 'ai')\n phasei = double(card, n + 3, 'bi')\n reali = magi * cos(radians(phasei))\n complexi = magi * sin(radians(phasei))\n self.Real.append(reali)\n self.Complex.append(complexi)\n else:\n for i in range(nloops):\n n = 5 + 4 * i\n Gi = integer(card, n, 'Gi')\n # Ci = integer(card, n + 1, 'Ci')\n Ci = integer_or_blank(card, n + 1, 'Ci', 0)\n #Ci = parse_components(card, n + 1, 'Ci')\n assert 0 <= Ci <= 6, 'C%i must be between [0, 6]; Ci=%s' % (i + 1, Ci)\n self.GCi.append((Gi, Ci))\n reali = double(card, n + 2, 'real')\n complexi = double(card, n + 3, 'complex')\n self.Real.append(reali)\n self.Complex.append(complexi)\n else:\n # real\n for i in range(nloops):\n n = 5 + 4 * i\n Gi = integer(card, n, 'Gi')\n # Ci = integer(card, n + 1, 'Ci')\n Ci = integer_or_blank(card, n + 1, 'Ci', 0)\n #Ci = parse_components(card, n + 1, 'Ci')\n assert 0 <= Ci <= 6, 'C%i must be between [0, 6]; Ci=%s' % (i + 1, Ci)\n reali = double(card, n + 2, 'real')\n self.GCi.append((Gi, Ci))\n self.Real.append(reali)\n #print(\"GC=%s,%s real=%s\" % (Gi, Ci, reali))\n\n msg = '(len(GCj)=%s len(GCi)=%s' % (len(self.GCj), len(self.GCi))\n assert len(self.GCj) == len(self.GCi), msg\n #if self.is_complex:\n #self.Complex(double(card, v, 'complex')\n\n def get_matrix(self, is_sparse=False, apply_symmetry=True):\n \"\"\"\n Builds the Matrix\n\n Parameters\n ----------\n is_sparse : bool; default=False\n should the matrix be returned as a sparse matrix.\n Slower for dense matrices.\n apply_symmetry : bool; default=True\n If the matrix is symmetric (ifo=6), returns a symmetric matrix.\n Supported as there are symmetric matrix routines.\n\n Returns\n -------\n M : numpy.ndarray or scipy.coomatrix\n the matrix\n rows : dict[int] = [int, int]\n dictionary of keys=rowID, values=(Grid,Component) for the matrix\n cols: dict[int] = [int, int]\n dictionary of keys=columnID, values=(Grid,Component) for the matrix\n\n .. warning:: is_sparse=True WILL fail\n\n \"\"\"\n return get_matrix(self, is_sparse=is_sparse, apply_symmetry=apply_symmetry)\n\n @property\n def is_real(self):\n \"\"\"real vs. complex attribute\"\"\"\n return not self.is_complex\n\n @property\n def is_complex(self):\n \"\"\"real vs. complex attribute\"\"\"\n if self.tin in [1, 2]: # real\n return False\n elif self.tin in [3, 4]: # complex\n return True\n msg = ('Matrix %r must have a value of TIN = [1, 2, 3, 4].\\n'\n 'TIN defines the type (real, complex) '\n 'of the matrix. TIN=%r.\\n'\n ' TIN=1,2 -> real\\n'\n ' TIN=3,4 -> complex' % (self.name, self.tin))\n raise ValueError(msg)\n\n @property\n def is_polar(self):\n \"\"\"\n Used by:\n - DMIG\n - DMIJ\n - DMIJI\n - DMIK\n\n Not used by:\n - DMI\n - DMIAX\n - DMIG, UACCEL\n - DMIGOUT\n - DMIGROT\n\n \"\"\"\n if self.polar == 0: # real, imag\n return False\n elif self.polar == 1: # mag, phase\n return True\n elif self.polar is None:\n return False\n msg = ('Matrix %r must have a value of POLAR = [0, 1].\\n'\n 'POLAR defines the type (real/imag or mag/phase) complex) '\n 'of the matrix. POLAR=%r.' % (self.name, self.polar))\n raise ValueError(msg)\n\n @property\n def tin_dtype(self):\n \"\"\"gets the input dtype\"\"\"\n return _get_dtype(self.is_complex, self.tin)\n\n @property\n def tout_dtype(self):\n \"\"\"gets the output dtype\"\"\"\n return _get_dtype(self.is_complex, self.tout)\n\n def __repr__(self):\n return self.write_card(size=8, is_double=False)\n\n def fill_in_default_components(self, model):\n for i, (Gi, Ci) in enumerate(self.GCi):\n if Ci is None:\n node = model.nodes[Gi]\n if node.type == 'GRID':\n msg = ('Ci on DMIG card must be 1, 2, 3, 4, 5, or 6; '\n 'Node=%i (GRID); Ci=%s' % (Gi, Ci))\n raise RuntimeError(msg)\n elif node.type in ['SPOINT', 'EPOINT']:\n Ci = 0\n else:\n raise NotImplementedError(node)\n self.GCi[i] = [Gi, Ci]\n\n for i, (Gj, Cj) in enumerate(self.GCj):\n if Cj is None:\n node = model.nodes[Gj]\n if node.type == 'GRID':\n msg = ('Cj on DMIG card must be 1, 2, 3, 4, 5, or 6; '\n 'Node=%i (GRID); Cj=%s' % (Gj, Cj))\n raise RuntimeError(msg)\n elif node.type in ['SPOINT', 'EPOINT']:\n Cj = 0\n else:\n raise NotImplementedError(node)\n self.GCj[i] = [Gj, Cj]\n return\n\n def write_card(self, size: int=8, is_double: bool=False) -> str:\n if self.tin in [1, 3]:\n is_double = False\n elif self.tin in [2, 4]:\n is_double = True\n size = 16\n else:\n raise RuntimeError('tin=%r must be 1, 2, 3, or 4' % self.tin)\n\n assert isinstance(self.GCi, (list, np.ndarray)), 'type(GCi)=%s' % type(self.GCi)\n assert isinstance(self.GCj, (list, np.ndarray)), 'type(GCj)=%s' % type(self.GCj)\n assert isinstance(self.Real, (list, np.ndarray)), 'type(Real)=%s' % type(self.Real)\n #assert isinstance(self.GCi[0], (list, np.ndarray)), 'type(GCi[0])=%s' % type(self.GCi[0])\n #assert isinstance(self.GCj[0], (list, np.ndarray)), 'type(GCj[0])=%s' % type(self.GCj[0])\n\n msg = '\\n$' + '-' * 80\n msg += '\\n$ %s Matrix %s\\n' % (self.type, self.name)\n list_fields = [self.type, self.name, 0, self.matrix_form, self.tin,\n self.tout, self.polar, None, self.ncols]\n if size == 8:\n msg += print_card_8(list_fields)\n else:\n msg += print_card_16(list_fields)\n\n if self.is_complex:\n if self.is_polar:\n for (GCi, GCj, reali, complexi) in zip(self.GCi, self.GCj, self.Real, self.Complex):\n magi = sqrt(reali**2 + complexi**2)\n if reali == 0.0:\n phasei = 0.0\n else:\n phasei = degrees(atan2(complexi, reali))\n list_fields = [self.type, self.name, GCj[0], GCj[1],\n None, GCi[0], GCi[1], magi, phasei]\n if size == 8:\n msg += print_card_8(list_fields)\n elif is_double:\n msg += print_card_double(list_fields)\n else:\n msg += print_card_16(list_fields)\n else:\n for (GCi, GCj, reali, complexi) in zip(self.GCi, self.GCj, self.Real, self.Complex):\n list_fields = [self.type, self.name, GCj[0], GCj[1],\n None, GCi[0], GCi[1], reali, complexi]\n if size == 8:\n msg += print_card_8(list_fields)\n elif is_double:\n msg += print_card_double(list_fields)\n else:\n msg += print_card_16(list_fields)\n else:\n for (GCi, GCj, reali) in zip(self.GCi, self.GCj, self.Real):\n list_fields = [self.type, self.name, GCj[0], GCj[1],\n None, GCi[0], GCi[1], reali, None]\n if size == 8:\n msg += print_card_8(list_fields)\n elif is_double:\n msg += print_card_double(list_fields)\n else:\n msg += print_card_16(list_fields)\n\n #msg += '\\n\\nGCi[0]=%s\\n' % self.GCi[0]\n #msg += 'GCj[0]=%s\\n' % self.GCj[0]\n #msg += 'Real[0]=%s\\n' % self.Real[0]\n #assert isinstance(self.GCi[0], (list, np.ndarray)), msg\n #assert isinstance(self.GCj[0], (list, np.ndarray)), msg\n #assert isinstance(self.Real[0], (list, np.ndarray)), msg\n\n return msg\n\n\nclass DMIG_UACCEL(BaseCard):\n \"\"\"\n Direct Matrix Input of Enforced Static Acceleration\n Defines rigid body accelerations in the basic coordinate system.\n\n +------+--------+-----+-----+-----+-----+-----+-------+-------+\n | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | |\n +======+========+=====+=====+=====+=====+=====+=======+=======+\n | DMIG | UACCEL | \"0\" | \"9\" | TIN | | | | NCOL |\n +------+--------+-----+-----+-----+-----+-----+-------+-------+\n | DMIG | UACCEL | L | | | G1 | C1 | X1 | |\n +------+--------+-----+-----+-----+-----+-----+-------+-------+\n | | G2 | C2 | X2 | | G3 | C3 | X3 | |\n +------+--------+-----+-----+-----+-----+-----+-------+-------+\n\n +------+--------+-----+-----+-----+-----+-----+-------+-------+\n | DMIG | UACCEL | 0 | 9 | 1 | | | | 4 |\n +------+--------+-----+-----+-----+-----+-----+-------+-------+\n | DMIG | UACCEL | 2 | | | 2 | 3 | 386.4 | |\n +------+--------+-----+-----+-----+-----+-----+-------+-------+\n | DMIG | UACCEL | 3 | | | 2 | 4 | 3.0 | |\n +------+--------+-----+-----+-----+-----+-----+-------+-------+\n | DMIG | UACCEL | 4 | | | 2 | 6 | 1.0 | |\n +------+--------+-----+-----+-----+-----+-----+-------+-------+\n \"\"\"\n type = 'DMIG'\n name = 'UACCEL'\n def __init__(self, tin, ncol, load_sequences, comment=''):\n if comment:\n self.comment = comment\n self.tin = tin\n self.ncol = ncol\n self.load_sequences = load_sequences\n #print(str(self))\n\n @classmethod\n def export_to_hdf5(cls, h5_file, model, encoding):\n _export_dmig_to_hdf5(h5_file, model, model.dmig, encoding)\n\n @classmethod\n def add_card(cls, card, comment=''):\n \"\"\"\n Adds a DMIG,UACCEL card from ``BDF.add_card(...)``\n\n Parameters\n ----------\n card : BDFCard()\n a BDFCard object\n comment : str; default=''\n a comment for the card\n\n \"\"\"\n tin = integer(card, 4, 'tin')\n ncol = integer_or_blank(card, 8, 'ncol')\n return DMIG_UACCEL(tin, ncol, load_sequences={}, comment=comment)\n\n def _add_column(self, card, comment=''):\n if comment:\n if hasattr(self, '_comment'):\n self.comment += comment\n else:\n self.comment = comment\n load_seq = integer(card, 2, 'load_seq')\n\n i = 0\n ifield = 5\n self.load_sequences[load_seq] = []\n assert len(card) >= 8, 'len=%s card=%s' % (len(card), card)\n while ifield < len(card):\n g1 = integer(card, ifield, 'nid%d' % i)\n c1 = parse_components(card, ifield+1, 'c%d' % i)\n x1 = double(card, ifield+2, 'x%d' % i)\n #assert len(card) <= 8, 'len=%s card=%s' % (len(card), card)\n gcx = [g1, c1, x1]\n self.load_sequences[load_seq].append(gcx)\n ifield += 4\n i += 1\n\n\n @staticmethod\n def finalize():\n \"\"\"a passer method\"\"\"\n pass\n\n def raw_fields(self):\n list_fields = [\n 'DMIG', 'UACCEL', 0, 9, self.tin, None, None, None, self.ncol\n ]\n for lseq, ncx in sorted(self.load_sequences.items()):\n list_fields += [lseq, None, None]\n for ncxi in ncx:\n list_fields += ncxi\n #for (nid, comp, xi) in ncx:\n #print('list_fields= %s' % list_fields)\n self.write_card()\n return list_fields\n\n def write_card(self, size: int=8, is_double: bool=False) -> str:\n if self.tin in [1, 3]:\n is_double = False\n msg = self.write_card_8()\n elif self.tin in [2, 4]:\n is_double = True\n size = 16\n msg = self.write_card_16()\n else:\n raise RuntimeError('tin=%r must be 1, 2, 3, or 4' % self.tin)\n return msg\n\n def write_card_8(self):\n \"\"\"writes the card in small field format\"\"\"\n return self._write_card(print_card_8)\n\n def write_card_16(self):\n \"\"\"writes the card in small large format\"\"\"\n return self._write_card(print_card_16)\n\n def _write_card(self, func):\n \"\"\"writes the card\"\"\"\n msg = '\\n$' + '-' * 80\n msg += '\\n$ DMIG Matrix UACCEL\\n'\n list_fields = [\n 'DMIG', 'UACCEL', 0, 9, self.tin, None, None, None, self.ncol,\n ]\n msg += func(list_fields)\n\n for lseq, ncx in sorted(self.load_sequences.items()):\n list_fields = ['DMIG', 'UACCEL']\n list_fields += [lseq, None, None]\n for ncxi in ncx:\n list_fields += ncxi + [None]\n list_fields.pop()\n msg += func(list_fields)\n #print(msg)\n #if self.is_complex:\n #msg += self._get_complex_fields(func)\n #else:\n #msg += self._get_real_fields(func)\n return msg\n\n def __repr__(self):\n return self.write_card(size=8)\n\nclass DMIG(NastranMatrix):\n \"\"\"\n Defines direct input matrices related to grid, extra, and/or scalar points.\n The matrix is defined by a single header entry and one or more column\n entries. A column entry is required for each column with nonzero elements.\n\n +------+------+----+-----+-----+------+-------+----+------+\n | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 |\n +======+======+====+=====+=====+======+=======+====+======+\n | DMIG | NAME | 0 | IFO | TIN | TOUT | POLAR | | NCOL |\n +------+------+----+-----+-----+------+-------+----+------+\n | DMIG | NAME | GJ | CJ | | G1 | C1 | A1 | B1 |\n +------+------+----+-----+-----+------+-------+----+------+\n | | G2 | C2 | A2 | B2 | | | | |\n +------+------+----+-----+-----+------+-------+----+------+\n \"\"\"\n type = 'DMIG'\n _properties = ['is_real', 'is_complex', 'is_polar', 'matrix_type', 'shape',\n 'tin_dtype', 'tout_dtype']\n\n #@classmethod\n #def _init_from_empty(cls):\n #name = 'name'\n #ifo = 1\n #tin = 1\n #tout = 1\n #polar = 0\n #ncols = 1\n #GCj = []\n #GCi = []\n #Real = []\n #return DMIG(name, ifo, tin, tout, polar, ncols, GCj, GCi, Real,\n #Complex=None, comment='', finalize=True)\n\n @classmethod\n def export_to_hdf5(cls, h5_file, model, encoding):\n _export_dmig_to_hdf5(h5_file, model, model.dmig, encoding)\n\n def __init__(self, name, ifo, tin, tout, polar, ncols,\n GCj, GCi, Real, Complex=None, comment='', finalize=True):\n \"\"\"\n Creates a DMIG card\n\n Parameters\n ----------\n name : str\n the name of the matrix\n ifo : int\n matrix shape\n 4=Lower Triangular\n 5=Upper Triangular\n 6=Symmetric\n 8=Identity (m=nRows, n=m)\n tin : int\n matrix input precision\n 1=Real, Single Precision\n 2=Real, Double Precision\n 3=Complex, Single Precision\n 4=Complex, Double Precision\n tout : int\n matrix output precision\n 0=same as tin\n 1=Real, Single Precision\n 2=Real, Double Precision\n 3=Complex, Single Precision\n 4=Complex, Double Precision\n polar : int; default=0\n Input format of Ai, Bi\n Integer=blank or 0 indicates real, imaginary format\n Integer > 0 indicates amplitude, phase format\n ncols : int\n ???\n GCj : List[(node, dof)]\n the [jnode, jDOFs]\n GCi : List[(node, dof)]\n the inode, iDOFs\n Real : List[float]\n The real values\n Complex : List[float]; default=None\n The complex values (if the matrix is complex)\n comment : str; default=''\n a comment for the card\n\n \"\"\"\n NastranMatrix.__init__(self, name, ifo, tin, tout, polar, ncols,\n GCj, GCi, Real, Complex, comment=comment,\n finalize=finalize)\n\n\nclass DMIAX(BaseCard):\n \"\"\"\n Direct Matrix Input for Axisymmetric Analysis\n\n Defines axisymmetric (fluid or structure) related direct input matrix\n terms. The matrix is defined by a single header entry and one or\n more column entries. Only one header entry is required. A column\n entry is required for each column with nonzero elements.\n\n +-------+------+----+--------+------+--------+-------+----+------+\n | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 |\n +=======+======+====+========+======+========+=======+====+======+\n | DMIAX | NAME | 0 | IFO | TIN | TOUT | | | |\n +-------+------+----+--------+------+--------+-------+----+------+\n\n +-------+------+----+--------+------+--------+-------+----+------+\n | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 |\n +=======+======+====+========+======+========+=======+====+======+\n | DMIAX | NAME | GJ | CJ | NJ | | | | |\n +-------+------+----+--------+------+--------+-------+----+------+\n | | G1 | C1 | N1 | A1 | B1 | | | |\n +-------+------+----+--------+------+--------+-------+----+------+\n | | G2 | C2 | etc. | | | | | |\n +-------+------+----+--------+------+--------+-------+----+------+\n\n +-------+------+----+--------+------+--------+-------+----+------+\n | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 |\n +=======+======+====+========+======+========+=======+====+======+\n | DMIAX | B2PP | 0 | 1 | 3 | | | | |\n +-------+------+----+--------+------+--------+-------+----+------+\n | DMIAX | B2PP | 32 | | | | | | |\n +-------+------+----+--------+------+--------+-------+----+------+\n | | 1027 | 3 | 4.25+6 | | 2.27+3 | | | |\n +-------+------+----+--------+------+--------+-------+----+------+\n\n \"\"\"\n type = 'DMIAX'\n\n def __init__(self, name, matrix_form, tin, tout, ncols,\n GCNj, GCNi, Real, Complex=None, comment=''):\n \"\"\"\n Creates a DMIAX card\n\n Parameters\n ----------\n name : str\n the name of the matrix\n matrix_form : int\n matrix shape\n 1=Square\n 2=General Rectangular\n 6=Symmetric\n tin : int\n matrix input precision\n 1=Real, Single Precision\n 3=Complex, Single Precision\n tout : int\n matrix output precision\n 1=Real, Single Precision\n 2=Real, Double Precision\n 3=Complex, Single Precision\n 4=Complex, Double Precision\n GCNj : List[(node, dof, harmonic_number)]???\n the jnode, jDOFs\n GCNi : List[(node, dof, harmonic_number)]???\n the inode, iDOFs\n Real : List[float]???\n The real values\n Complex : List[float]???; default=None\n The complex values (if the matrix is complex)\n comment : str; default=''\n a comment for the card\n\n \"\"\"\n ncols = None\n\n if comment:\n self.comment = comment\n\n if Complex is None:\n Complex = []\n\n if tout is None:\n tout = 0\n\n self.name = name\n\n #: ifo/4-Lower Triangular; 5=Upper Triangular; 6=Symmetric; 8=Identity (m=nRows, n=m)\n self.matrix_form = matrix_form\n\n #: 1-Real, Single Precision; 2=Real,Double Precision;\n # 3=Complex, Single; 4=Complex, Double\n self.tin = tin\n\n #: 0-Set by cell precision\n self.tout = tout\n\n self.ncols = ncols\n self.GCNj = GCNj\n self.GCNi = GCNi\n\n self.Real = Real\n if len(Complex) or self.is_complex:\n self.Complex = Complex\n if matrix_form not in [1]: #4, 5, 6, 8\n msg = (\n f'{self.type} name={name!r} matrix_form={matrix_form!r} '\n 'must be [1, 2, 6]\\n'\n ' 1: Square\\n'\n ' 2: General Rectangular\\n'\n ' 4: Lower Triangular\\n'\n ' 5: Upper Triangular\\n'\n ' 6: Symmetric\\n'\n ' 8: Identity (m=nRows, n=m)\\n')\n raise ValueError(msg)\n\n assert isinstance(matrix_form, integer_types), 'matrix_form=%r type=%s' % (matrix_form, type(matrix_form))\n assert not isinstance(matrix_form, bool), 'matrix_form=%r type=%s' % (matrix_form, type(matrix_form))\n\n def finalize(self):\n \"\"\"converts the lists into numpy arrays\"\"\"\n return\n #self.GCi = np.asarray(self.GCi)\n #self.GCj = np.asarray(self.GCj)\n self.Real = np.asarray(self.Real)\n if self.is_complex:\n self.Complex = np.asarray(self.Complex)\n\n @classmethod\n def export_to_hdf5(cls, h5_file, model, encoding):\n _export_dmiax_to_hdf5(h5_file, model, model.dmiax, encoding)\n\n @property\n def is_real(self):\n \"\"\"is the matrix real?\"\"\"\n if self.tin in [1, 2]:\n return True\n return False\n\n @property\n def is_complex(self):\n \"\"\"is the matrix complex\"\"\"\n return not self.is_real\n\n @property\n def is_polar(self):\n \"\"\"is the matrix polar (vs real/imag)?\"\"\"\n return False\n\n @property\n def tin_dtype(self):\n \"\"\"gets the input dtype\"\"\"\n return _get_dtype(self.is_complex, self.tin)\n\n @property\n def tout_dtype(self):\n \"\"\"gets the output dtype\"\"\"\n return _get_dtype(self.is_complex, self.tout)\n\n @property\n def matrix_type(self):\n \"\"\"gets the matrix type\"\"\"\n if not isinstance(self.matrix_form, integer_types):\n msg = 'ifo must be an integer; matrix_form=%r type=%s name=%s' % (\n self.matrix_form, type(self.matrix_form), self.name)\n raise TypeError(msg)\n if isinstance(self.matrix_form, bool):\n msg = 'matrix_form must not be a boolean; matrix_form=%r type=%s name=%s' % (\n self.matrix_form, type(self.matrix_form), self.name)\n raise TypeError(msg)\n\n if self.matrix_form == 1:\n matrix_type = 'square'\n #elif self.matrix_form == 6:\n #matrix_type = 'symmetric'\n #elif self.matrix_form in [2, 9]:\n #matrix_type = 'rectangular'\n else:\n # technically right, but nulling this will fix bad decks\n #self.ncols = blank(card, 8, 'matrix_form=%s; ncol' % self.matrix_form)\n raise NotImplementedError(f'{self.type} matrix_form={self.matrix_form} '\n 'is not supported')\n return matrix_type\n\n @classmethod\n def add_card(cls, card, comment=''):\n \"\"\"\n Adds a NastranMatrix (DMIAX) card from ``BDF.add_card(...)``\n\n Parameters\n ----------\n card : BDFCard()\n a BDFCard object\n comment : str; default=''\n a comment for the card\n\n \"\"\"\n name = string(card, 1, 'name')\n #zero\n\n matrix_form = integer(card, 3, 'ifo')\n tin = integer(card, 4, 'tin')\n tout = integer_or_blank(card, 5, 'tout', 0)\n if matrix_form == 1: # square\n ncols = integer_or_blank(card, 8, 'matrix_form=%s; ncol' % matrix_form)\n elif matrix_form == 6: # symmetric\n ncols = integer_or_blank(card, 8, 'matrix_form=%s; ncol' % matrix_form)\n elif matrix_form in [2, 9]: # rectangular\n ncols = integer(card, 8, 'matrix_form=%s; ncol' % matrix_form)\n else:\n # technically right, but nulling this will fix bad decks\n #self.ncols = blank(card, 8, 'matrix_form=%s; ncol' % self.matrix_form)\n raise NotImplementedError('matrix_form=%s is not supported' % matrix_form)\n\n GCj = []\n GCi = []\n Real = []\n Complex = []\n return DMIAX(name, matrix_form, tin, tout, ncols,\n GCj, GCi, Real, Complex, comment=comment)\n\n def _add_column(self, card, comment=''):\n if comment:\n if hasattr(self, '_comment'):\n self.comment += comment\n else:\n self.comment = comment\n\n unused_name = string(card, 1, 'name')\n\n Gj = integer(card, 2, 'Gj')\n # Cj = integer(card, 3, 'Cj')\n Cj = integer_or_blank(card, 3, 'Cj', 0)\n #Cj = parse_components(card, 3, 'Cj')\n Nj = integer_or_blank(card, 4, 'Nj')\n\n assert 0 <= Cj <= 6, 'C%i must be between [0, 6]; Cj=%s' % (0, Cj)\n\n nfields = len(card)\n #print(\"nfields = %i\" % nfields)\n #print(\"card[5:] =\", card[5:])\n #print(\"(nfields - 5) %% 4 = %i\" % ((nfields - 5) % 4))\n\n nloops = (nfields - 8) // 8\n if nfields - 8 % 8:\n nloops += 1\n #assert nfields <= 8,'nfields=%s' % nfields\n #print(\"nloops = %i\" % nloops)\n assert nloops > 0, 'nloops=%s' % nloops\n\n self.GCNj.append((Gj, Cj, Nj))\n GCNi = []\n self.GCNi.append(GCNi)\n if self.is_complex:\n for i in range(nloops):\n #print(dir(card))\n n = 9 + 8 * i\n Gi = integer(card, n, f'Gi{i}')\n # Ci = integer(card, n + 1, 'Ci')\n Ci = integer_or_blank(card, n + 1, f'Ci{i}', 0)\n #Ci = parse_components(card, n + 1, 'Ci')\n Ni = integer_or_blank(card, n + 2, f'Ni{i}')\n\n assert 0 <= Ci <= 6, 'C%i must be between [0, 6]; Ci=%s' % (i + 1, Ci)\n GCNi.append((Gi, Ci, Ni))\n reali = double(card, n + 3, 'real')\n complexi = double(card, n + 4, 'complex')\n self.Real.append(reali)\n self.Complex.append(complexi)\n else:\n # real\n for i in range(nloops):\n n = 9 + 9 * i\n Gi = integer(card, n, 'Gi')\n # Ci = integer(card, n + 1, 'Ci')\n Ci = integer_or_blank(card, n + 1, 'Ci', 0)\n #Ci = parse_components(card, n + 1, 'Ci')\n Ni = integer(card, n + 2, 'Ni')\n\n assert 0 <= Ci <= 6, 'C%i must be between [0, 6]; Ci=%s' % (i + 1, Ci)\n reali = double(card, n + 3, 'real')\n GCNi.append((Gi, Ci, Ni))\n self.Real.append(reali)\n #print(\"GC=%s,%s real=%s\" % (Gi, Ci, reali))\n\n msg = '(len(GCNj)=%s len(GCNi)=%s' % (len(self.GCNj), len(self.GCNi))\n assert len(self.GCNj) == len(self.GCNi), msg\n #if self.is_complex:\n #self.Complex(double(card, v, 'complex')\n\n def raw_fields(self):\n list_fields = [\n 'DMIAX', self.name, 0, self.matrix_form, self.tin, None, None, None, self.ncols,\n ]\n k = 0\n if self.is_real:\n for i, GCNj in enumerate(self.GCNj):\n gj, cj, nj = GCNj\n list_fields += ['DMIAX', self.name, gj, cj, nj, None, None, None, None]\n for unused_j, GCNi in enumerate(self.GCNi[i]):\n gi, ci, ni = GCNi\n reali = self.Real[k]\n list_fields += [gi, ci, ni, reali, None, None, None, None]\n k += 1\n else:\n for i, GCNj in enumerate(self.GCNj):\n gj, cj, nj = GCNj\n list_fields += ['DMIAX', self.name, gj, cj, nj, None, None, None, None]\n for unused_j, GCNi in enumerate(self.GCNi[i]):\n gi, ci, ni = GCNi\n reali = self.Real[k]\n imagi = self.Complex[k]\n list_fields += [gi, ci, ni, reali, imagi, None, None, None, None]\n k += 1\n\n self.write_card()\n return list_fields\n\n def write_card(self, size: int=8, is_double: bool=False) -> str:\n if self.tin in [1, 3]:\n is_double = False\n msg = self.write_card_8()\n elif self.tin in [2, 4]:\n is_double = True\n size = 16\n msg = self.write_card_16()\n else:\n raise RuntimeError('tin=%r must be 1, 2, 3, or 4' % self.tin)\n return msg\n\n def write_card_8(self):\n \"\"\"writes the card in small field format\"\"\"\n return self._write_card(print_card_8)\n\n def write_card_16(self):\n \"\"\"writes the card in small large format\"\"\"\n return self._write_card(print_card_16)\n\n def _write_card(self, func):\n \"\"\"writes the card\"\"\"\n msg = '\\n$' + '-' * 80\n msg += f'\\n$ DMIAX Matrix {self.name}\\n'\n list_fields = [\n 'DMIAX', self.name, 0, self.matrix_form, self.tin, None, None, None, self.ncols,\n ]\n msg += func(list_fields)\n k = 0\n assert len(self.GCNj) > 0, self.get_stats()\n assert len(self.GCNi) > 0, self.get_stats()\n if self.is_real:\n for i, GCNj in enumerate(self.GCNj):\n gj, cj, nj = GCNj\n list_fields = ['DMIAX', self.name, gj, cj, nj, None, None, None, None]\n for unused_j, GCNi in enumerate(self.GCNi[i]):\n gi, ci, ni = GCNi\n reali = self.Real[k]\n list_fields += [gi, ci, ni, reali, None, None, None, None]\n k += 1\n msg += func(list_fields)\n else:\n for i, GCNj in enumerate(self.GCNj):\n gj, cj, nj = GCNj\n list_fields = ['DMIAX', self.name, gj, cj, nj, None, None, None, None]\n for unused_j, GCNi in enumerate(self.GCNi[i]):\n gi, ci, ni = GCNi\n reali = self.Real[k]\n imagi = self.Complex[k]\n list_fields += [gi, ci, ni, reali, imagi, None, None, None]\n k += 1\n msg += func(list_fields)\n return msg\n\n def __repr__(self):\n return self.write_card(size=8)\n\nclass DMIJ(NastranMatrix):\n \"\"\"\n Direct Matrix Input at js-Set of the Aerodynamic Mesh\n Defines direct input matrices related to collation degrees-of-freedom\n (js-set) of aerodynamic mesh points for CAERO1, CAERO3, CAERO4 and CAERO5\n and for the slender body elements of CAERO2. These include W2GJ, FA2J and\n input pressures and downwashes associated with AEPRESS and AEDW entries.\n The matrix is described by a single header entry and one or more column\n entries. A column entry is required for each column with nonzero elements.\n For entering data for the interference elements of a CAERO2, use DMIJI\n or DMI.\n\n \"\"\"\n type = 'DMIJ'\n _properties = ['shape', 'ifo', 'is_real', 'is_complex', 'is_polar', 'matrix_type',\n 'tin_dtype', 'tout_dtype']\n\n @classmethod\n def _init_from_empty(cls):\n name = 'name'\n ifo = 1\n tin = 1\n tout = 1\n polar = 0\n ncols = 1\n GCj = []\n GCi = []\n Real = []\n return DMIJ(name, ifo, tin, tout, polar, ncols, GCj, GCi, Real,\n Complex=None, comment='', finalize=True)\n\n @classmethod\n def export_to_hdf5(cls, h5_file, model, encoding):\n _export_dmig_to_hdf5(h5_file, model, model.dmij, encoding)\n\n def __init__(self, name, matrix_form, tin, tout, polar, ncols,\n GCj, GCi, Real, Complex=None, comment='',\n finalize=True):\n \"\"\"\n Creates a DMIJ card\n\n Parameters\n ----------\n name : str\n the name of the matrix\n matrix_form : int\n matrix shape\n 4=Lower Triangular\n 5=Upper Triangular\n 6=Symmetric\n 8=Identity (m=nRows, n=m)\n tin : int\n matrix input precision\n 1=Real, Single Precision\n 2=Real, Double Precision\n 3=Complex, Single Precision\n 4=Complex, Double Precision\n tout : int\n matrix output precision\n 0=same as tin\n 1=Real, Single Precision\n 2=Real, Double Precision\n 3=Complex, Single Precision\n 4=Complex, Double Precision\n polar : int; default=0\n Input format of Ai, Bi\n Integer=blank or 0 indicates real, imaginary format\n Integer > 0 indicates amplitude, phase format\n ncols : int\n ???\n GCj : List[(node, dof)]???\n the jnode, jDOFs\n GCi : List[(node, dof)]???\n the inode, iDOFs\n Real : List[float]???\n The real values\n Complex : List[float]???; default=None\n The complex values (if the matrix is complex)\n comment : str; default=''\n a comment for the card\n\n \"\"\"\n NastranMatrix.__init__(self, name, matrix_form, tin, tout, polar, ncols,\n GCj, GCi, Real, Complex, comment=comment,\n finalize=finalize)\n\n\nclass DMIJI(NastranMatrix):\n \"\"\"\n Direct Matrix Input at js-Set of the Interference Body\n Defines direct input matrices related to collation degrees-of-freedom\n (js-set) of aerodynamic mesh points for the interference elements of CAERO2.\n These include W2GJ, FA2J and input pressures and downwashes associated with\n AEPRESS and AEDW entries. The matrix is described by a single header entry\n and one or more column entries. A column entry is required for each column\n with nonzero elements. For entering data for the slender elements of a\n CAERO2, or a CAERO1, 3, 4 or 5 use DMIJ or DMI.\n\n \"\"\"\n type = 'DMIJI'\n _properties = ['shape', 'ifo', 'is_real', 'is_complex', 'is_polar', 'matrix_type',\n 'tin_dtype', 'tout_dtype']\n\n #@classmethod\n #def _init_from_empty(cls):\n #name = 'name'\n #ifo = 1\n #tin = 1\n #tout = 1\n #polar = 0\n #ncols = 1\n #GCj = []\n #GCi = []\n #Real = []\n #return DMIJI(name, ifo, tin, tout, polar, ncols, GCj, GCi, Real,\n #Complex=None, comment='', finalize=True)\n\n @classmethod\n def export_to_hdf5(cls, h5_file, model, encoding):\n _export_dmig_to_hdf5(h5_file, model, model.dmiji, encoding)\n\n def __init__(self, name, ifo, tin, tout, polar, ncols,\n GCj, GCi, Real, Complex=None, comment='', finalize=True):\n \"\"\"\n Creates a DMIJI card\n\n Parameters\n ----------\n name : str\n the name of the matrix\n ifo : int\n matrix shape\n 4=Lower Triangular\n 5=Upper Triangular\n 6=Symmetric\n 8=Identity (m=nRows, n=m)\n tin : int\n matrix input precision\n 1=Real, Single Precision\n 2=Real, Double Precision\n 3=Complex, Single Precision\n 4=Complex, Double Precision\n tout : int\n matrix output precision\n 0=same as tin\n 1=Real, Single Precision\n 2=Real, Double Precision\n 3=Complex, Single Precision\n 4=Complex, Double Precision\n polar : int; default=0\n Input format of Ai, Bi\n Integer=blank or 0 indicates real, imaginary format\n Integer > 0 indicates amplitude, phase format\n ncols : int\n ???\n GCj : List[(node, dof)]???\n the jnode, jDOFs\n GCi : List[(node, dof)]???\n the inode, iDOFs\n Real : List[float]???\n The real values\n Complex : List[float]???; default=None\n The complex values (if the matrix is complex)\n comment : str; default=''\n a comment for the card\n\n \"\"\"\n NastranMatrix.__init__(self, name, ifo, tin, tout, polar, ncols,\n GCj, GCi, Real, Complex, comment=comment,\n finalize=finalize)\n\n\nclass DMIK(NastranMatrix):\n \"\"\"\n Direct Matrix Input at ks-Set of the Aerodynamic Mesh\n Defines direct input matrices related to physical (displacement)\n degrees-of-freedom (ks-set) of aerodynamic grid points. These include WKK,\n WTFACT and input forces associated with AEFORCE entries. The matrix is\n described by a single header entry and one or more column entries. A column\n entry is required for each column with nonzero elements.\n\n +------+-------+----+-----+-----+------+-------+----+------+\n | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 |\n +======+=======+====+=====+=====+======+=======+====+======+\n | DMIK | NAME | 0 | IFO | TIN | TOUT | POLAR | | NCOL |\n +------+-------+----+-----+-----+------+-------+----+------+\n | DMIK | NAME | GJ | CJ | | G1 | C1 | A1 | B1 |\n +------+-------+----+-----+-----+------+-------+----+------+\n | | G2 | C2 | A2 | B2 | | | | |\n +------+-------+----+-----+-----+------+-------+----+------+\n | DMIK | ALPH1 | 0 | 9 | 2 | 0 | 1 | | |\n +------+-------+----+-----+-----+------+-------+----+------+\n | DMIK | ALPH1 | 1 | 1 | 1 | 1 | 1.0 | | |\n +------+-------+----+-----+-----+------+-------+----+------+\n | | 2 | 1 | 1.0 | | | | | |\n +------+-------+----+-----+-----+------+-------+----+------+\n \"\"\"\n type = 'DMIK'\n _properties = ['shape', 'ifo', 'is_real', 'is_complex', 'is_polar', 'matrix_type',\n 'tin_dtype', 'tout_dtype']\n\n #@classmethod\n #def _init_from_empty(cls):\n #name = 'name'\n #ifo = 1\n #tin = 1\n #tout = 1\n #polar = 0\n #ncols = 1\n #GCj = []\n #GCi = []\n #Real = []\n #return DMIK(name, ifo, tin, tout, polar, ncols, GCj, GCi, Real,\n #Complex=None, comment='', finalize=True)\n\n @classmethod\n def export_to_hdf5(cls, h5_file, model, encoding):\n _export_dmig_to_hdf5(h5_file, model, model.dmik, encoding)\n\n def __init__(self, name, ifo, tin, tout, polar, ncols,\n GCj, GCi, Real, Complex=None, comment='', finalize=True):\n \"\"\"\n Creates a DMIK card\n\n Parameters\n ----------\n name : str\n the name of the matrix\n ifo : int\n matrix shape\n 4=Lower Triangular\n 5=Upper Triangular\n 6=Symmetric\n 8=Identity (m=nRows, n=m)\n tin : int\n matrix input precision\n 1=Real, Single Precision\n 2=Real, Double Precision\n 3=Complex, Single Precision\n 4=Complex, Double Precision\n tout : int\n matrix output precision\n 0=same as tin\n 1=Real, Single Precision\n 2=Real, Double Precision\n 3=Complex, Single Precision\n 4=Complex, Double Precision\n polar : int; default=0\n Input format of Ai, Bi\n Integer=blank or 0 indicates real, imaginary format\n Integer > 0 indicates amplitude, phase format\n ncols : int\n ???\n GCj : List[(node, dof)]\n the jnode, jDOFs\n GCi : List[(node, dof)]\n the inode, iDOFs\n Real : List[float]\n The real values\n Complex : List[float]; default=None\n The complex values (if the matrix is complex)\n comment : str; default=''\n a comment for the card\n\n \"\"\"\n NastranMatrix.__init__(self, name, ifo, tin, tout, polar, ncols,\n GCj, GCi, Real, Complex, comment=comment,\n finalize=finalize)\n\n\nclass DMI(NastranMatrix):\n \"\"\"\n +------+-------+------+------+---------+----------+-----------+-----------+------+\n | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 |\n +======+=======+======+======+=========+==========+===========+===========+======+\n | DMI | NAME | 0 | FORM | TIN | TOUT | | M | N |\n +------+-------+------+------+---------+----------+-----------+-----------+------+\n | DMI | NAME | J | I1 | A(I1,J) | A(I1,J) | A(I1+1,J) | A(I1+2,J) | etc. |\n +------+-------+------+------+---------+----------+-----------+-----------+------+\n | | I2 | etc. | | | | | | |\n +------+-------+------+------+---------+----------+-----------+-----------+------+\n \"\"\"\n type = 'DMI'\n _properties = ['shape', 'ifo', 'is_real', 'is_complex', 'is_polar', 'matrix_type',\n 'tin_dtype', 'tout_dtype']\n\n @classmethod\n def _init_from_empty(cls):\n name = 'name'\n matrix_form = 8\n tin = 1\n tout = 1\n nrows = 5\n ncols = 5\n GCj = []\n GCi = []\n Real = []\n return DMI(name, matrix_form, tin, tout, nrows, ncols, GCj, GCi, Real,\n Complex=None, comment='', finalize=False)\n\n @classmethod\n def export_to_hdf5(cls, h5_file, model, encoding):\n _export_dmig_to_hdf5(h5_file, model, model.dmi, encoding)\n\n def __init__(self, name, matrix_form, tin, tout, nrows, ncols,\n GCj, GCi, Real, Complex=None, comment='', finalize=True):\n #NastranMatrix.__init__(self, name, ifo, tin, tout, polar, ncols,\n #GCj, GCi, Real, Complex, comment='')\n if comment:\n self.comment = comment\n\n if Complex is None:\n Complex = []\n\n if tout is None:\n tout = 0\n\n if matrix_form not in [1, 2, 3, 4, 5, 6, 8]:\n msg = (\n '%s name=%r matrix_form=%r must be [1, 2, 3, 4, 5, 6, 8]\\n'\n ' 1: Square\\n'\n ' 2: Rectangular\\n'\n ' 3: Diagonal matrix (M=number of rows, N=1)\\n'\n ' 4: Lower Triangular\\n'\n ' 5: Upper Triangular\\n'\n ' 6: Symmetric\\n'\n ' 8: Identity (m=nRows, n=m)\\n'\n #' 9: Rectangular\\n'\n % (self.type, name, matrix_form))\n raise ValueError(msg)\n\n self.name = name\n self.matrix_form = matrix_form\n self.tin = tin\n self.tout = tout\n self.nrows = nrows\n self.ncols = ncols\n self.GCi = GCi\n self.GCj = GCj\n self.Real = Real\n if len(Complex) or self.is_complex:\n self.Complex = Complex\n if finalize:\n self.finalize()\n\n #@property\n #def form(self):\n #\"\"\"gets the matrix_form\"\"\"\n #self.deprecated('form', 'matrix_form', '1.1')\n #return self.matrix_form\n\n #@form.setter\n #def form(self, matrix_form):\n #\"\"\"sets the matrix_form\"\"\"\n #self.deprecated('form', 'matrix_form', '1.1')\n #self.matrix_form = matrix_form\n\n @classmethod\n def add_card(cls, card, comment=''):\n \"\"\"\n Adds a DMI card from ``BDF.add_card(...)``\n\n Parameters\n ----------\n card : BDFCard()\n a BDFCard object\n comment : str; default=''\n a comment for the card\n\n \"\"\"\n name = string(card, 1, 'name')\n #zero\n\n #: Form of the matrix: 1=Square (not symmetric); 2=Rectangular;\n #: 3=Diagonal (m=nRows,n=1); 4=Lower Triangular; 5=Upper Triangular;\n #: 6=Symmetric; 8=Identity (m=nRows, n=m)\n matrix_form = integer(card, 3, 'form')\n\n #: 1-Real, Single Precision; 2=Real,Double Precision;\n #: 3=Complex, Single; 4=Complex, Double\n tin = integer(card, 4, 'tin')\n\n #: 0-Set by cell precision\n tout = integer_or_blank(card, 5, 'tout', 0)\n\n nrows = integer(card, 7, 'nrows')\n ncols = integer(card, 8, 'ncols')\n\n assert len(card) == 9, 'len(DMI card) = %i\\ncard=%s' % (len(card), card)\n\n GCj = []\n GCi = []\n Real = []\n Complex = []\n return DMI(name, matrix_form, tin, tout, nrows, ncols,\n GCj, GCi, Real, Complex, comment=comment, finalize=False)\n\n def finalize(self):\n self.GCi = np.asarray(self.GCi)\n self.GCj = np.asarray(self.GCj)\n self.Real = np.asarray(self.Real)\n if self.is_complex:\n self.Complex = np.asarray(self.Complex)\n\n @property\n def matrix_type(self):\n \"\"\"\n gets the matrix type\n\n 1 Square matrix (not symmetric)\n 2 General rectangular matrix\n 3 Diagonal matrix (M=number of rows, N = 1)\n #4 Lower triangular factor\n #5 Upper triangular factor\n 6 Symmetric matrix\n 8 Identity matrix (M=number of rows, N = M)\n \"\"\"\n if not isinstance(self.matrix_form, integer_types):\n msg = 'ifo must be an integer; matrix_form=%r type=%s name=%s' % (\n self.matrix_form, type(self.matrix_form), self.name)\n raise TypeError(msg)\n if isinstance(self.matrix_form, bool):\n msg = 'matrix_form must not be a boolean; matrix_form=%r type=%s name=%s' % (\n self.matrix_form, type(self.matrix_form), self.name)\n raise TypeError(msg)\n\n if self.matrix_form == 1:\n matrix_type = 'square'\n elif self.matrix_form == 2: # 9 ???\n matrix_type = 'rectangular'\n elif self.matrix_form == 3:\n matrix_type = 'diagonal'\n elif self.matrix_form == 6:\n matrix_type = 'symmetric'\n elif self.matrix_form == 9:\n matrix_type = 'identity'\n else:\n raise NotImplementedError('%s matrix_form=%r is not supported' % (\n self.type, self.matrix_form))\n return matrix_type\n\n @property\n def is_polar(self):\n if self.tin in [1, 2]:\n is_polar = False\n elif self.tin in [3, 4]:\n is_polar = False # TODO: could be wrong...\n else:\n raise NotImplementedError('nrows=%s ncols=%s' % (self.nrows, self.ncols))\n return is_polar\n\n @property\n def shape(self):\n return (self.nrows, self.ncols)\n\n @property\n def ifo(self):\n \"\"\"\n ifo\n #: 4-Lower Triangular; 5=Upper Triangular; 6=Symmetric; 8=Identity (m=nRows, n=m)\n\n #: Form of the matrix: 1=Square (not symmetric); 2=Rectangular;\n #: 3=Diagonal (m=nRows,n=1); 4=Lower Triangular; 5=Upper Triangular;\n #: 6=Symmetric; 8=Identity (m=nRows, n=m)\n self.matrix_form = integer(card, 3, 'matrix_form')\n\n \"\"\"\n return self.matrix_form\n #if self.nrows == self.ncols:\n ## symmetric\n #ifo = 6\n ##elif self.nrows > 1 and self.ncols > 1:\n ##ifo = 2\n #else:\n #raise NotImplementedError('matrix_form=%r nrows=%s ncols=%s' % (\n #self.matrix_form, self.nrows, self.ncols))\n #return ifo\n\n def _add_column(self, card, comment=''):\n \"\"\"\n .. todo:: support comment\n \"\"\"\n if self.is_complex:\n self._read_complex(card)\n else:\n self._read_real(card)\n\n def _read_real(self, card):\n \"\"\"reads a real DMI column\"\"\"\n # column number\n j = integer(card, 2, 'icol')\n\n # counter\n i = 0\n fields = [interpret_value(field, card) for field in card[3:]]\n\n # Real, starts at A(i1,j), goes to A(i2,j) in a column\n while i < len(fields):\n i1 = fields[i]\n if isinstance(i1, integer_types):\n i += 1\n is_done_reading_floats = False\n while not is_done_reading_floats and i < len(fields):\n real_value = fields[i]\n if isinstance(real_value, integer_types):\n is_done_reading_floats = True\n elif isinstance(real_value, float):\n #print('adding j=%s i1=%s val=%s' % (j, i1, real_value))\n self.GCj.append(j)\n self.GCi.append(i1)\n self.Real.append(real_value)\n i += 1\n i1 += 1\n else:\n real_value = self.Real[-1]\n end_i = fields[i + 1]\n for ii in range(i1, end_i + 1):\n #print('adding j=%s i1=%s val=%s' % (j, ii, real_value))\n self.GCj.append(j)\n self.GCi.append(ii)\n self.Real.append(real_value)\n i += 1\n is_done_reading_floats = True\n\n def _read_complex(self, card):\n \"\"\"reads a complex DMI column\"\"\"\n #msg = 'complex matrices not supported in the DMI reader...'\n #raise NotImplementedError(msg)\n # column number\n j = integer(card, 2, 'icol')\n # counter\n i = 0\n fields = [interpret_value(field, card) for field in card[3:]]\n # Complex, starts at A(i1,j)+imag*A(i1,j), goes to A(i2,j) in a column\n if 0: # pragma: no cover\n is_real = True\n gci = None\n for field in fields:\n if isinstance(field, integer_types):\n gci = field\n elif isinstance(field, float):\n if is_real:\n real = field\n else:\n self.GCj.append(j)\n self.GCi.append(gci)\n self.Real.append(real)\n self.Complex.append(field)\n is_real = not is_real\n\n while i < len(fields):\n i1 = fields[i]\n assert isinstance(i1, int), card\n i += 1\n is_done_reading_floats = False\n while not is_done_reading_floats and i < len(fields):\n value = fields[i]\n #print(\"i=%s len(fields)=%s value=%s\" % (\n #i, len(fields), value))\n if isinstance(value, integer_types):\n is_done_reading_floats = True\n elif isinstance(value, float):\n complex_value = fields[i + 1]\n assert isinstance(complex_value, float), card\n self.GCj.append(j)\n self.GCi.append(i1)\n self.Real.append(value)\n self.Complex.append(complex_value)\n i += 2\n else:\n raise NotImplementedError()\n\n @property\n def is_real(self):\n \"\"\"real vs. complex attribute\"\"\"\n return not self.is_complex\n\n @property\n def is_complex(self):\n \"\"\"real vs. complex attribute\"\"\"\n if self.tin in [3, 4]:\n return True\n return False\n\n def raw_fields(self):\n \"\"\"\n .. warning:: All the writers are bad because Nastran insists on\n making columns a single DMI card. This makes\n writing a card much harder, so there are a lot of\n NotImplementedErrors floating about.\n\n This is an invalid method, but is not disabled\n because it's currently needed for checking results\n\n \"\"\"\n list_fields = ['DMI', self.name, 0, self.matrix_form, self.tin,\n self.tout, None, self.nrows, self.ncols]\n\n if self.is_complex:\n for (gci, gcj, reali, imagi) in zip(self.GCi, self.GCj, self.Real, self.Complex):\n list_fields += ['DMI', self.name, gcj, gci, reali, imagi]\n else:\n for (gci, gcj, reali) in zip(self.GCi, self.GCj, self.Real):\n list_fields += ['DMI', self.name, gcj, gci, reali]\n return list_fields\n\n def write_card_8(self):\n \"\"\"writes the card in single precision\"\"\"\n return self._write_card(print_card_8)\n\n def _get_real_fields(self, func):\n msg = ''\n uGCj = np.unique(self.GCj)\n for gcj in uGCj:\n i = np.where(gcj == self.GCj)[0]\n gcis = self.GCi[i]\n reals = self.Real[i]\n isort = np.argsort(gcis)\n list_fields = ['DMI', self.name, gcj]\n\n # will always write the first one\n gci_last = -1\n for gci, real in zip(gcis[isort], reals[isort]):\n if gci == gci_last + 1:\n pass\n else:\n list_fields.append(gci)\n list_fields.append(real)\n gci_last = gci\n msg += func(list_fields)\n return msg\n\n def _get_complex_fields(self, func):\n msg = ''\n uGCj = np.unique(self.GCj)\n for gcj in uGCj:\n i = np.where(gcj == self.GCj)[0]\n gcis = self.GCi[i]\n reals = self.Real[i]\n complexs = self.Complex[i]\n isort = np.argsort(gcis)\n list_fields = ['DMI', self.name, gcj]\n\n # will always write the first one\n gci_last = -10\n #print('gcis=%s \\nreals=%s \\ncomplexs=%s' % (\n #gcis[isort], reals[isort], complexs[isort]))\n if max(gcis) == min(gcis):\n list_fields += [gcis[0]]\n for reali, complexi in zip(reals, complexs):\n list_fields.extend([reali, complexi])\n msg += func(list_fields)\n else:\n #print(f'list_fields0 = {list_fields}')\n for i, gci, reali, complexi in zip(count(), gcis[isort], reals[isort], complexs[isort]):\n #print('B', gci, reali, complexi, gci_last)\n if gci != gci_last + 1 and i != 0:\n pass\n else:\n list_fields.append(gci)\n list_fields.append(reali)\n list_fields.append(complexi)\n gci_last = gci\n #print(f'list_fields = {list_fields}')\n msg += func(list_fields)\n return msg\n\n def get_matrix(self, is_sparse=False, apply_symmetry=True):\n \"\"\"\n Builds the Matrix\n\n Parameters\n ----------\n is_sparse : bool; default=False\n should the matrix be returned as a sparse matrix.\n Slower for dense matrices.\n apply_symmetry : bool; default=True\n If the matrix is symmetric (ifo=6), returns a symmetric matrix.\n Supported as there are symmetric matrix routines.\n\n Returns\n -------\n M : numpy.ndarray or scipy.coomatrix\n the matrix\n rows : dict[int] = [int, int]\n dictionary of keys=rowID, values=(Grid,Component) for the matrix\n cols: dict[int] = [int, int]\n dictionary of keys=columnID, values=(Grid,Component) for the matrix\n\n .. warning:: is_sparse=True WILL fail\n\n \"\"\"\n return get_dmi_matrix(self, is_sparse=is_sparse, apply_symmetry=apply_symmetry)\n\n def write_card_16(self):\n \"\"\"writes the card in single precision\"\"\"\n return self._write_card(print_card_16)\n\n def write_card_double(self):\n \"\"\"writes the card in double precision\"\"\"\n return self._write_card(print_card_16)\n\n def _write_card(self, func):\n \"\"\"writes the card in single/double precision\"\"\"\n msg = '\\n$' + '-' * 80\n msg += '\\n$ %s Matrix %s\\n' % ('DMI', self.name)\n list_fields = ['DMI', self.name, 0, self.matrix_form, self.tin,\n self.tout, None, self.nrows, self.ncols]\n msg += print_card_8(list_fields)\n\n if self.is_complex:\n msg += self._get_complex_fields(func)\n else:\n msg += self._get_real_fields(func)\n return msg\n\n def write_card(self, size: int=8, is_double: bool=False) -> str:\n if size == 8:\n return self.write_card_8()\n if is_double:\n return self.write_card_double()\n return self.write_card_16()\n\n def __repr__(self):\n \"\"\"\n .. todo:: support shortened output format. There's a very low 1000\n DMI cap, I assume this is entries and not matrices.\n\n \"\"\"\n return self.write_card(size=8, is_double=False)\n\n\ndef get_row_col_map(matrix, GCi, GCj, ifo):\n ndim = len(GCi.shape)\n #print('ndim=%s' % ndim)\n #print('GCj=%s' % GCj)\n #print('GCi=%s' % GCi)\n if ndim == 1:\n rows, cols, rows_reversed, cols_reversed = _get_row_col_map_1d(matrix, GCi, GCj, ifo)\n else:\n rows, cols, rows_reversed, cols_reversed = _get_row_col_map_2d(matrix, GCi, GCj, ifo)\n\n nrows = len(rows)\n ncols = len(cols)\n assert nrows > 0, 'nrows=%s' % nrows\n assert ncols > 0, 'ncols=%s' % ncols\n return nrows, ncols, ndim, rows, cols, rows_reversed, cols_reversed\n\ndef _get_row_col_map_1d(matrix, GCi, GCj, ifo):\n \"\"\"helper for ``get_row_col_map``\"\"\"\n rows = {}\n rows_reversed = {}\n\n cols = {}\n cols_reversed = {}\n i = 0\n #nrows = np.unique(GCi)\n #ncols = np.unique(GCj)\n for gci in GCi:\n if gci not in rows:\n rows[gci] = i\n rows_reversed[i] = gci\n i += 1\n\n if ifo == 6:\n # symmetric\n #print(GCj)\n for gcj in GCj:\n if gcj not in rows:\n #print('row.gcj = %s' % str(gcj))\n rows[gcj] = i\n rows_reversed[i] = gcj\n i += 1\n cols = rows\n cols_reversed = rows_reversed\n else:\n j = 0\n for gcj in GCj:\n if gcj not in cols:\n cols[gcj] = j\n cols_reversed[j] = gcj\n j += 1\n return rows, cols, rows_reversed, cols_reversed\n\ndef _get_row_col_map_2d(matrix, GCi, GCj, ifo):\n \"\"\"helper for ``get_row_col_map``\"\"\"\n rows = {}\n rows_reversed = {}\n\n cols = {}\n cols_reversed = {}\n #print('i0=%s j0=%s' % (i, j))\n #nrows = len(GCi)\n #ncols = len(GCj)\n #rows_array = np.zeros((nrows, 2), dtype='int32')\n #cols_array = np.zeros((ncols, 2), dtype='int32')\n #for i, (nid, comp) in enumerate(GCi):\n ##print('i=%s nid=%s comp=%s nrows=%s rows_array.shape=%s' % (\n ##i, nid, comp, nrows, str(rows_array.shape)))\n #rows_array[i, :] = [nid, comp]\n #print('rows_array = \\n%s' % rows_array)\n\n #for j, (nid, comp) in enumerate(GCj):\n #cols_array[j, :] = [nid, comp]\n #print('cols_array = \\n%s' % cols_array)\n\n #print(GCi)\n #print(GCj)\n i = 0\n for (nid, comp) in GCi:\n gci = (nid, comp)\n if gci not in rows:\n #print('row.gci = %s' % str(gci))\n rows[gci] = i\n rows_reversed[i] = gci\n i += 1\n if ifo == 6:\n # symmetric\n for (nid, comp) in GCj:\n gcj = (nid, comp)\n if gcj not in rows:\n #print('row.gcj = %s' % str(gcj))\n rows[gcj] = i\n rows_reversed[i] = gcj\n i += 1\n cols = rows\n cols_reversed = rows_reversed\n else:\n j = 0\n for (nid, comp) in GCj:\n gcj = (nid, comp)\n if gcj not in cols:\n #print('col.gcj = %s' % str(gcj))\n cols[gcj] = j\n cols_reversed[j] = gcj\n j += 1\n return rows, cols, rows_reversed, cols_reversed\n\ndef _fill_sparse_matrix(matrix, nrows, ncols):\n \"\"\"helper method for get_matrix\"\"\"\n GCj = array(matrix.GCj, dtype='int32') - 1\n GCi = array(matrix.GCi, dtype='int32') - 1\n reals = array(matrix.Real, dtype='float32')\n\n # TODO: matrix size: is this correct?\n nrows = max(GCi) + 1\n ncols = max(GCj) + 1\n\n dtype = _get_dtype(matrix.is_complex, matrix.tin)\n # TODO: no check for symmetry\n # TODO: no check for dtype\n if matrix.is_complex:\n complexs = array(matrix.Complex, dtype='float32')\n data = array([reals, complexs]).astype(complex)\n else:\n data = reals\n\n if matrix.matrix_form in [1, 6]:\n nrows = max(nrows, ncols)\n ncols = nrows\n\n #A = coo_matrix( (entries,(rows,cols)),shape=(nrows,ncols),dtype=dtype) # test\n sparse_matrix = coo_matrix((data, (matrix.GCi, matrix.GCj)),\n shape=(nrows, ncols), dtype=dtype)\n #sparse_matrix = coo_matrix( (data,(matrix.GCi,matrix.GCj)),shape=(i,j)) # old\n #sparse_matrix = coo_matrix( (data,(matrix.GCi,matrix.GCj)),shape=(nrows,ncols))\n #print(sparse_matrix.toarray())\n #print(sparse_matrix)\n return sparse_matrix\n\n\ndef _fill_dense_rectangular_matrix(self, nrows, ncols, ndim, rows, cols, apply_symmetry):\n \"\"\"helper method for get_matrix\"\"\"\n is_sparse = False\n if self.is_complex:\n dense_mat = zeros((nrows, ncols), dtype='complex128')\n if self.matrix_form == 6 and apply_symmetry: # symmetric\n for (gcj, gci, reali, complexi) in zip(self.GCj, self.GCi,\n self.Real, self.Complex):\n i = rows[(gci[0], gci[1])]\n j = cols[(gcj[0], gcj[1])]\n dense_mat[i, j] = complex(reali, complexi)\n dense_mat[j, i] = complex(reali, complexi)\n else:\n for (gcj, gci, reali, complexi) in zip(self.GCj, self.GCi,\n self.Real, self.Complex):\n i = rows[(gci[0], gci[1])]\n j = cols[(gcj[0], gcj[1])]\n dense_mat[i, j] = complex(reali, complexi)\n else:\n dense_mat = zeros((nrows, ncols), dtype='float64')\n if self.matrix_form == 6 and apply_symmetry: # symmetric\n try:\n for (gcj, gci, reali) in zip(self.GCj, self.GCi, self.Real):\n i = rows[(gci[0], gci[1])]\n j = cols[(gcj[0], gcj[1])]\n dense_mat[i, j] = reali\n dense_mat[j, i] = reali\n except IndexError:\n msg = ('name=%s ndim=%s i=%s j=%s matrix_type=%s '\n 'is_polar=%s ncols=%s M.shape=%s\\n' % (\n self.name, ndim, i, j, self.matrix_type,\n self.is_polar, self.ncols, dense_mat.shape))\n msg += 'Rows:\\n'\n for i, row in enumerate(rows):\n msg += 'i=%s row=%s\\n' % (i, row)\n raise RuntimeError(msg)\n else:\n try:\n for (gcj, gci, reali) in zip(self.GCj, self.GCi, self.Real):\n i = rows[(gci[0], gci[1])]\n j = cols[(gcj[0], gcj[1])]\n dense_mat[i, j] = reali\n except KeyError:\n msg = ('name=%s ndim=%s gci=%s gcj=%s matrix_type=%s '\n 'is_polar=%s is_sparse=%s ncols=%s M.shape=%s\\n\\n' % (\n self.name, ndim, str(gci), str(gcj), self.matrix_type,\n self.is_polar, is_sparse, self.ncols, dense_mat.shape))\n\n gci2 = (gci[0], gci[1])\n gcj2 = (gcj[0], gcj[1])\n if gci2 in rows:\n msg += 'gci/row_key=%s found\\n' % str(gci2)\n else:\n msg += 'gci/row_key=%s not found\\n' % str(gci2)\n msg += 'Rows:\\n'\n for i, row in enumerate(rows):\n msg += ' i=%s row=%s\\n' % (i, row)\n\n if gcj2 in cols:\n msg += '\\ngcj/col_key=%s found\\n' % str(gcj2)\n else:\n msg += '\\ngcj/col_key=%s not found\\n' % str(gcj2)\n msg += 'Cols:\\n'\n for j, col in enumerate(cols):\n msg += ' j=%s row=%s\\n' % (j, col)\n\n msg += '\\n'\n print(msg)\n raise KeyError(msg)\n except IndexError:\n msg = ('name=%s ndim=%s i=%s j=%s matrix_type=%s '\n 'is_polar=%s is_sparse=%s ncols=%s M.shape=%s\\n' % (\n self.name, ndim, i, j, self.matrix_type,\n self.is_polar, is_sparse, self.ncols, dense_mat.shape))\n msg += 'Rows:\\n'\n for i, row in enumerate(rows):\n msg += ' i=%s row=%s\\n' % (i, row)\n\n msg += '\\nCols:\\n'\n for j, row in enumerate(cols):\n msg += ' j=%s row=%s\\n' % (j, col)\n raise RuntimeError(msg)\n return dense_mat\n\n\ndef _fill_dense_column_matrix(self, nrows, ncols, ndim, rows, cols, apply_symmetry):\n \"\"\"helper method for get_matrix\"\"\"\n is_sparse = False\n if self.is_complex:\n dense_mat = zeros((nrows, ncols), dtype='complex128')\n if self.matrix_form == 6 and apply_symmetry: # symmetric\n assert nrows == ncols, 'nrows=%s ncols=%s' % (nrows, ncols)\n for (gcj, gci, reali, complexi) in zip(self.GCj, self.GCi,\n self.Real, self.Complex):\n i = rows[gci]\n j = cols[gcj]\n dense_mat[i, j] = complex(reali, complexi)\n dense_mat[j, i] = complex(reali, complexi)\n elif self.matrix_form == 2: # rectangular\n assert nrows == ncols, 'nrows=%s ncols=%s' % (nrows, ncols)\n for (gcj, gci, reali, complexi) in zip(self.GCj, self.GCi,\n self.Real, self.Complex):\n i = rows[gci]\n j = cols[gcj]\n dense_mat[i, j] = complex(reali, complexi)\n else:\n for (gcj, gci, reali, complexi) in zip(self.GCj, self.GCi,\n self.Real, self.Complex):\n i = rows[gci]\n j = cols[gcj]\n else:\n #print('nrows=%s ncols=%s' % (nrows, ncols))\n dense_mat = zeros((nrows, ncols), dtype='float64')\n if self.matrix_form == 6 and apply_symmetry: # symmetric\n assert nrows == ncols, 'nrows=%s ncols=%s' % (nrows, ncols)\n for (gcj, gci, reali) in zip(self.GCj, self.GCi, self.Real):\n i = rows[gci]\n j = cols[gcj]\n dense_mat[i, j] = reali\n dense_mat[j, i] = reali\n else:\n try:\n for (gcj, gci, reali) in zip(self.GCj, self.GCi, self.Real):\n i = rows[gci]\n j = cols[gcj]\n dense_mat[i, j] = reali\n except IndexError:\n msg = ('name=%s ndim=%s i=%s j=%s matrix_type=%s '\n 'is_polar=%s is_sparse=%s ncols=%s M.shape=%s\\n' % (\n self.name, ndim, i, j, self.matrix_type,\n self.is_polar, is_sparse, self.ncols, dense_mat.shape))\n msg += 'Rows:\\n'\n for i, row in enumerate(rows):\n msg += ' i=%s row=%s\\n' % (i, row)\n raise RuntimeError(msg)\n return dense_mat\n\ndef get_dmi_matrix(matrix: DMI, is_sparse: bool=False,\n apply_symmetry: bool=True) -> Tuple[np.array, None, None]:\n \"\"\"\n Builds the Matrix\n\n Parameters\n ----------\n is_sparse : bool\n should the matrix be returned as a sparse matrix (default=True).\n Slower for dense matrices.\n apply_symmetry: bool\n If the matrix is symmetric (matrix_form=6), returns a symmetric matrix.\n Supported as there are symmetric matrix routines.\n TODO: unused...\n\n Returns\n -------\n M : ndarray\n the matrix\n rows : None\n unused\n cols : None\n unused\n\n .. warning:: is_sparse=True WILL fail\n\n \"\"\"\n ifo = matrix.ifo\n GCj = array(matrix.GCj, dtype='int32') - 1\n GCi = array(matrix.GCi, dtype='int32') - 1\n\n dtype = matrix.tin_dtype\n\n if matrix.is_complex:\n data = matrix.Real + matrix.Complex * 1j\n else:\n data = matrix.Real\n\n if ifo == 2:\n # rectangular\n nrows = matrix.nrows\n ncols = matrix.ncols\n\n M = coo_matrix((data, (GCi, GCj)),\n shape=(nrows, ncols), dtype=dtype)\n if not is_sparse:\n M = M.toarray()\n\n else:\n nrows = matrix.nrows\n ncols = matrix.ncols\n if ifo == 6:\n nrows = max(nrows, ncols)\n ncols = nrows\n M = coo_matrix((data, (GCi, GCj)),\n shape=(nrows, ncols), dtype=dtype)\n if not is_sparse:\n M = M.toarray()\n #else:\n #ifo : int\n # matrix shape\n # 4=Lower Triangular\n # 5=Upper Triangular\n # 6=Symmetric\n # 8=Identity (m=nRows, n=m)\n #raise RuntimeError(matrix.get_stats())\n return M, None, None\n\ndef get_matrix(self, is_sparse=False, apply_symmetry=True):\n \"\"\"\n Builds the Matrix\n\n Parameters\n ----------\n is_sparse : bool\n should the matrix be returned as a sparse matrix (default=True).\n Slower for dense matrices.\n apply_symmetry: bool\n If the matrix is symmetric (matrix_form=6), returns a symmetric matrix.\n Supported as there are symmetric matrix routines.\n TODO: unused...\n\n Returns\n -------\n M : ndarray\n the matrix\n rows : Dict[(nid, nid)] = float\n dictionary of keys=rowID, values=(Grid,Component) for the matrix\n cols : Dict[](int, int)] = float\n dictionary of keys=columnID, values=(Grid,Component) for the matrix\n\n .. warning:: is_sparse=True WILL fail\n\n \"\"\"\n nrows, ncols, ndim, rows, cols, rows_reversed, cols_reversed = get_row_col_map(\n self, self.GCi, self.GCj, self.matrix_form)\n #print('rows = ', rows)\n #print('cols = ', cols)\n #print('i=%s j=%s' % (i, j))\n #nrows = len(rows2)\n #ncols = len(cols2)\n\n #A = ss.lil_matrix((3,3), dtype='d') # double precision\n #rows = []\n #cols = []\n #data = []\n #for i in range(3):\n #for j in range(3):\n #k = float((i+1)*(j+1))\n #rows.append(i)\n #cols.append(j)\n #data.append(k)\n #A[i,j] = k\n\n #is_sparse = False\n if is_sparse:\n M = _fill_sparse_matrix(self, nrows, ncols)\n return M\n else:\n if ndim == 1:\n M = _fill_dense_column_matrix(self, nrows, ncols, ndim, rows, cols, apply_symmetry)\n else:\n M = _fill_dense_rectangular_matrix(self, nrows, ncols, ndim, rows, cols, apply_symmetry)\n\n #print(M)\n return (M, rows_reversed, cols_reversed)\n\n\ndef _export_dmig_to_hdf5(h5_file, model, dict_obj, encoding):\n \"\"\"export dmigs, dmij, dmiji, dmik, dmi\"\"\"\n for name, dmig in dict_obj.items():\n dmig_group = h5_file.create_group(name)\n dmig_group.create_dataset('tin', data=dmig.tin)\n\n if hasattr(dmig, 'tout'):\n dmig_group.create_dataset('tout', data=dmig.tout)\n\n if dmig.type == 'DMIG' and name == 'UACCEL':\n if dmig.ncol is not None:\n dmig_group.create_dataset('ncol', data=dmig.ncol)\n #load_seq_group = dmig_group.create_group('load_sequences')\n\n nids = []\n dofs = []\n values = []\n for lseq, ncx in sorted(dmig.load_sequences.items()):\n lseq_group = dmig_group.create_group(str(lseq))\n #list_fields += [lseq, None, None]\n for (nid, dof, value) in ncx:\n nids.append(nid)\n dofs.append(int(dof))\n values.append(value)\n\n #print('nids =', nids)\n #print('dofs =', dofs)\n #print('values =', values)\n lseq_group.create_dataset('nids', data=nids)\n lseq_group.create_dataset('dofs', data=dofs)\n lseq_group.create_dataset('values', data=values)\n else:\n if hasattr(dmig, 'nrows') and dmig.nrows is not None:\n dmig_group.create_dataset('nrows', data=dmig.nrows)\n if dmig.ncols is not None:\n dmig_group.create_dataset('ncols', data=dmig.ncols)\n if hasattr(dmig, 'polar'):\n dmig_group.create_dataset('polar', data=dmig.polar)\n\n dmig_group.create_dataset('matrix_form', data=dmig.matrix_form)\n dmig_group.create_dataset('tin_dtype', data=dmig.tin_dtype)\n dmig_group.create_dataset('tout_dtype', data=dmig.tout_dtype)\n\n dmig_group.create_dataset('matrix_type', data=dmig.matrix_type)\n dmig_group.create_dataset('is_complex', data=dmig.is_complex)\n dmig_group.create_dataset('is_real', data=dmig.is_real)\n dmig_group.create_dataset('is_polar', data=dmig.is_polar)\n\n dmig_group.create_dataset('GCi', data=dmig.GCi)\n dmig_group.create_dataset('GCj', data=dmig.GCj)\n dmig_group.create_dataset('Real', data=dmig.Real)\n if hasattr(dmig, 'Complex') and dmig.Complex is not None:\n dmig_group.create_dataset('Complex', data=dmig.Complex)\n\n\ndef _export_dmiax_to_hdf5(h5_file, model, dict_obj, encoding):\n \"\"\"export dmiax\"\"\"\n for name, dmiax in dict_obj.items():\n #print(f'exporting {dmiax.type} name={name!r}')\n dmiax_group = h5_file.create_group(name)\n dmiax_group.create_dataset('tin', data=dmiax.tin)\n\n if hasattr(dmiax, 'tout'):\n dmiax_group.create_dataset('tout', data=dmiax.tout)\n\n if hasattr(dmiax, 'nrows') and dmiax.nrows is not None:\n dmiax_group.create_dataset('nrows', data=dmiax.nrows)\n if dmiax.ncols is not None:\n dmiax_group.create_dataset('ncols', data=dmiax.ncols)\n if hasattr(dmiax, 'polar'):\n dmiax_group.create_dataset('polar', data=dmiax.polar)\n\n dmiax_group.create_dataset('matrix_form', data=dmiax.matrix_form)\n dmiax_group.create_dataset('tin_dtype', data=dmiax.tin_dtype)\n dmiax_group.create_dataset('tout_dtype', data=dmiax.tout_dtype)\n\n dmiax_group.create_dataset('matrix_type', data=dmiax.matrix_type)\n dmiax_group.create_dataset('is_complex', data=dmiax.is_complex)\n dmiax_group.create_dataset('is_real', data=dmiax.is_real)\n dmiax_group.create_dataset('is_polar', data=dmiax.is_polar)\n\n gcnj = []\n j_none_flags = []\n\n gcni = []\n i_none_flags = []\n for j, GCNj in enumerate(dmiax.GCNj):\n gj, cj, nj = GCNj\n is_none_flag_j = False\n if nj is None:\n nj = 0\n is_none_flag_j = True\n j_none_flags.append(is_none_flag_j)\n gcnj.append((gj, cj, nj))\n for unused_i, GCNi in enumerate(dmiax.GCNi[j]):\n gi, ci, ni = GCNi\n is_none_flag_i = False\n if ni is None:\n ni = 0\n is_none_flag_i = True\n i_none_flags.append(is_none_flag_i)\n gcni.append((gi, ci, ni, j))\n\n dmiax_group.create_dataset('GCNi_j', data=gcni)\n dmiax_group.create_dataset('GCNj', data=gcnj)\n dmiax_group.create_dataset('i_none_flags', data=i_none_flags)\n dmiax_group.create_dataset('j_none_flags', data=j_none_flags)\n\n dmiax_group.create_dataset('Real', data=dmiax.Real)\n if hasattr(dmiax, 'Complex') and dmiax.Complex is not None:\n dmiax_group.create_dataset('Complex', data=dmiax.Complex)\n\ndef _set_polar(polar):\n if polar in [None, 0, False]:\n polar = 0\n elif polar in [1, True]:\n polar = 1\n else:\n raise ValueError('polar=%r and must be 0 or 1' % polar)\n return polar\n\ndef _get_dtype(is_complex, type_flag):\n if type_flag == 1:\n dtype = 'float32'\n elif type_flag == 2:\n dtype = 'float64'\n elif type_flag == 3:\n dtype = 'complex64'\n elif type_flag == 4:\n dtype = 'complex128'\n elif type_flag == 0:\n if is_complex:\n dtype = 'complex128'\n else:\n dtype = 'float64'\n else:\n raise RuntimeError(\"invalid option for matrix format\")\n return dtype\n\n",
"from __future__ import annotations\nfrom typing import TYPE_CHECKING\nfrom numpy import array\n\nfrom pyNastran.utils.numpy_utils import integer_types\n#from pyNastran.bdf.bdf_interface.assign_type import (integer, integer_or_blank,\n #double_or_blank, integer_double_or_blank, blank, string_or_blank)\nfrom pyNastran.bdf.bdf_interface.assign_type import integer, double_or_blank, string_or_blank, integer_or_blank\nfrom pyNastran.bdf.field_writer_8 import set_blank_if_default, print_card_8\nfrom pyNastran.bdf.field_writer import print_card\nfrom pyNastran.bdf.cards.base_card import _format_comment\nif TYPE_CHECKING: # pragma: no cover\n from pyNastran.bdf.bdf import BDF\n\n\nclass BaseCard:\n def __init__(self):\n pass\n\n @property\n def comment(self):\n if hasattr(self, '_comment'):\n return '%s' % self._comment\n return ''\n\n @comment.setter\n def comment(self, new_comment):\n \"\"\"sets a comment\"\"\"\n #comment = new_comment.rstrip()\n #self._comment = comment + '\\n' if comment else ''\n self._comment = _format_comment(new_comment)\n\n def print_card(self, size=8):\n list_fields = self.repr_fields()\n return self.comment + print_card(list_fields, size=size)\n\n\n def __repr__(self):\n \"\"\"\n Prints a card in the simplest way possible\n (default values are left blank).\n \"\"\"\n try:\n return self.print_card()\n except:\n print('problem printing %s card' % self.type)\n fields = self.repr_fields()\n print(\"fields = \", fields)\n raise\n\n\nclass Property_i(BaseCard):\n def __init__(self):\n pass\n\n def Pid(self):\n \"\"\"\n returns the property ID of an property\n\n Returns\n -------\n pid : int\n the Property ID\n \"\"\"\n return self.pid\n\n def Mid(self):\n \"\"\"\n returns the material ID of an element\n\n Returns\n -------\n mid : int\n the Material ID\n \"\"\"\n if isinstance(self.mid, integer_types):\n return self.mid\n else:\n return self.mid_ref.mid\n\n def cross_reference(self, model: BDF) -> None:\n \"\"\"\n Cross links the card so referenced cards can be extracted directly\n\n Parameters\n ----------\n model : BDF()\n the BDF object\n \"\"\"\n msg = ' which is required by %s pid=%s' % (self.type, self.pid)\n self.mid = model.Material(self.mid, msg)\n self.mid_ref = self.mid\n\n def uncross_reference(self) -> None:\n self.mid = self.Mid()\n del self.mid_ref\n\n\nclass ShellProperty(Property_i):\n def __init__(self):\n Property_i.__init__(self)\n\nclass DeprecatedCompositeShellProperty:\n def MassPerArea(self, iply='all', method='nplies'):\n return self.get_mass_per_area(iply, method)\n\n def Thickness(self, iply='all'):\n return self.get_thickness(iply)\n\n def nPlies(self):\n return self.nplies\n\n def Nsm(self):\n return self.get_nonstructural_mass()\n\n def Rho(self, iply):\n return self.get_density(iply)\n\n def Theta(self, iply):\n return self.get_theta(iply)\n\n def sout(self, iply):\n return self.get_sout(iply)\n\n\nclass CompositeShellProperty(ShellProperty, DeprecatedCompositeShellProperty):\n def __init__(self):\n ShellProperty.__init__(self)\n self.nsm = 0.0\n\n def is_symmetrical(self):\n \"\"\"\n Is the laminate symmetrical?\n\n :returns; True or False\n \"\"\"\n if self.lam == 'SYM':\n return True\n return False\n\n def _adjust_ply_id(self, iply):\n \"\"\"\n Gets the ply ID that's stored in **self.plies**.\n\n When a ply is not symmetric, this function returns the input iply.\n When a ply is symmetrical and the iply value is greater than the\n number of plies, we return the mirrored ply. For the case of a\n symmetrical ply, the element will always have an even number of\n layers.\n\n Parameters\n ----------\n iply : int\n the ply ID\n\n Raises\n ------\n - IndexError if iply is invalid\n\n ::\n\n Case 1 (nplies=6, len(plies)=3, lam='SYM'):\n ply 2\n ply 1\n ply 0\n ------- sym\n ply 0 / 3\n ply 1 / 4\n ply 2 / 5\n Ask for ply 3, return ply 0\n Ask for ply 4, return ply 1\n Ask for ply 5, return ply 2\n\n Case 2 (nplies=5, len(plies)=5, lam='NO'):\n ply 5\n ply 4\n ply 3\n ply 1\n ply 0\n Ask for ply 3, return ply 1\n Ask for ply 4, return ply 2\n \"\"\"\n if iply == 'all':\n return iply\n\n nplies = len(self.plies)\n if iply >= nplies:\n if iply < self.nplies:\n iply = iply - nplies\n else:\n raise IndexError('invalid value for iply=%r' % iply)\n elif iply < 0:\n raise IndexError('invalid value for iply=%r' % iply)\n return iply\n\n def get_thickness(self, iply='all'):\n \"\"\"\n Gets the thickness of the :math:`i^{th}` ply.\n\n Parameters\n ----------\n iply : int/str; default='all'\n the string **'all'** (default) or the mass per area of\n the :math:`i^{th}` ply\n\n Returns\n -------\n thickness : float\n the thickness of the ply or plies\n \"\"\"\n nplies = len(self.plies)\n if iply == 'all': # get all layers\n t = 0.\n for iply in range(nplies):\n t += self.get_thickness(iply)\n\n if self.is_symmetrical:\n return t * 2.\n return t\n else:\n iply = self._adjust_ply_id(iply)\n t = self.plies[iply][1]\n return t\n\n @property\n def nplies(self):\n r\"\"\"\n Gets the number of plies including the core.\n\n ::\n\n if Lam=SYM:\n returns nplies*2 (even)\n else:\n returns nplies\n \"\"\"\n nplies = len(self.plies)\n if self.is_symmetrical:\n return nplies * 2\n return nplies\n\n def get_nonstructural_mass(self):\n \"\"\"\n Gets the non-structural mass :math:`i^{th}` ply\n \"\"\"\n return self.nsm\n\n def Mid(self, iply):\n \"\"\"\n Gets the Material ID of the :math:`i^{th}` ply.\n\n Parameters\n ----------\n iply : int\n the ply ID (starts from 0)\n\n Returns\n -------\n material_id : int\n the material id of the ith ply\n \"\"\"\n iply = self._adjust_ply_id(iply)\n Mid = self.Material(iply)\n if isinstance(Mid, integer_types):\n return Mid\n return Mid.mid\n\n def get_material_ids(self):\n return self.Mids()\n\n def Mids(self):\n \"\"\"\n Gets the material IDs of all the plies\n\n Returns\n -------\n mids : MATx\n the material IDs\n \"\"\"\n mids = []\n for iply in range(self.nplies):\n mids.append(self.Mid(iply))\n #theta = self.get_theta(iply)\n #sout = self.get_sout(iply)\n return mids\n\n def get_density(self, iply):\n \"\"\"\n Gets the density of the :math:`i^{th}` ply\n\n Parameters\n ----------\n iply : int\n the ply ID (starts from 0)\n \"\"\"\n iply = self._adjust_ply_id(iply)\n mid = self.Material(iply)\n #print(\"rho =\", mid.rho)\n return mid.rho\n\n def Material(self, iply):\n \"\"\"\n Gets the material of the :math:`i^{th}` ply (not the ID unless\n it is not cross-referenced).\n\n Parameters\n ----------\n iply : int\n the ply ID (starts from 0)\n \"\"\"\n iply = self._adjust_ply_id(iply)\n Mid = self.plies[iply][0]\n return Mid\n\n def get_theta(self, iply):\n \"\"\"\n Gets the ply angle of the :math:`i^{th}` ply (not the ID)\n\n Parameters\n ----------\n iply : int\n the ply ID (starts from 0)\n \"\"\"\n iply = self._adjust_ply_id(iply)\n Theta = self.plies[iply][2]\n return Theta\n\n def get_sout(self, iply):\n \"\"\"\n Gets the the flag identifying stress/strain outpur of the\n :math:`i^{th}` ply (not the ID). default='NO'.\n\n Parameters\n ----------\n iply : int\n the ply ID (starts from 0)\n \"\"\"\n iply = self._adjust_ply_id(iply)\n sout = self.plies[iply][3]\n return sout\n\n def get_z_locations(self):\n \"\"\"\n Gets the z locations for the various plies.\n\n Parameters\n ----------\n iply : int\n the ply ID (starts from 0)\n\n Assume there are 2 plies, each of 1.0 thick, starting from :math:`z=0`.\n\n >>> pcomp.get_z_locations()\n [0., 1., 2.]\n \"\"\"\n zi = self.z0\n z = [zi]\n for i in range(self.nplies):\n t = self.get_thickness(i)\n zi += t\n z.append(zi)\n return array(z)\n\n def get_mass_per_area(self, iply='all', method='nplies'):\n r\"\"\"\n Gets the Mass/Area for the property.\n\n .. math:: \\frac{m}{A} = \\sum(\\rho t) + nsm\n\n or\n\n .. math:: \\frac{m}{A} - nsm = \\sum(\\rho t)\n\n and\n\n .. math:: \\frac{m_i}{A} = rho_i t_i + nsm_i\n\n where :math:`nsm_i` is the non-structural mass of the\n :math:`i^{th}` ply\n\n :param iply: the string 'all' (default) or the mass per area of\n the :math:`i^{th}` ply\n :param method: the method to compute MassPerArea\n\n * **Case 1 (iply = all)**\n\n method has no effect because the total nsm is defined\n\n * **Case 2 (iply != all)**\n\n method **'nplies'** smear the nsm based on :math:`n_{plies}` (default)\n\n :math:`nsm_i = nsm / n_{plies}` # smear based on nplies\n\n * **Case 3 (iply != all)**\n\n method **'rho*t'** smear the nsm based on the mass distribution\n\n .. math:: nsm_i = \\rho_i t_i \\frac{nsm}{\\sum(\\rho_i t_i)}\n\n .. math:: nsm_i = \\rho_i t_i \\frac{nsm}{\\frac{m}{A} - nsm}\n\n * **Case 4 (iply != all)**\n\n method **'t'** smear the nsm based on the thickness distribution\n\n .. math:: nsm_i = t_i \\frac{nsm}{\\sum(t_i)}\n\n .. note:: final mass calculation will be done later\n \"\"\"\n rhos = [ply[0].get_density() for ply in self.plies]\n return self.get_mass_per_area_rho(rhos, iply, method)\n\n def get_mass_per_area_rho(self, rhos, iply='all', method='nplies'):\n r\"\"\"\n Gets the Mass/Area for the property.\n\n .. math:: \\frac{m}{A} = \\sum(\\rho t) + nsm\n\n or\n\n .. math:: \\frac{m}{A} - nsm = \\sum(\\rho t)\n\n and\n\n .. math:: \\frac{m_i}{A} = rho_i t_i + nsm_i\n\n where :math:`nsm_i` is the non-structural mass of the\n :math:`i^{th}` ply\n\n :param iply: the string 'all' (default) or the mass per area of\n the :math:`i^{th}` ply\n :param method: the method to compute MassPerArea\n\n * **Case 1 (iply = all)**\n\n method has no effect because the total nsm is defined\n\n * **Case 2 (iply != all)**\n\n method **'nplies'** smear the nsm based on :math:`n_{plies}` (default)\n\n :math:`nsm_i = nsm / n_{plies}` # smear based on nplies\n\n * **Case 3 (iply != all)**\n\n method **'rho*t'** smear the nsm based on the mass distribution\n\n .. math:: nsm_i = \\rho_i t_i \\frac{nsm}{\\sum(\\rho_i t_i)}\n\n .. math:: nsm_i = \\rho_i t_i \\frac{nsm}{\\frac{m}{A} - nsm}\n\n * **Case 4 (iply != all)**\n\n method **'t'** smear the nsm based on the thickness distribution\n\n .. math:: nsm_i = t_i \\frac{nsm}{\\sum(t_i)}\n\n .. note:: final mass calculation will be done later\n \"\"\"\n assert method in ['nplies', 'rho*t', 't'], 'method=%r is invalid' % method\n nplies = len(self.plies)\n iply = self._adjust_ply_id(iply)\n if iply == 'all': # get all layers\n #mass_per_area_total = m/A = sum(rho*t) + nsm\n #mass_per_area_total = mpa-nsm = sum(rho*t)\n #(m/A)i = rho*t + nsmi\n # where nsmi has two methods\n mass_per_area = 0.\n nplies = len(self.plies)\n for iply in range(nplies):\n #rho = self.get_density(iply)\n rho = rhos[iply]\n t = self.plies[iply][1]\n mass_per_area += rho * t\n\n if self.is_symmetrical:\n return 2. * mass_per_area + self.nsm\n return mass_per_area + self.nsm\n else:\n assert isinstance(iply, int), 'iply must be an integer; iply=%r' % iply\n #rho = self.get_density(iply)\n rho = rhos[iply]\n t = self.plies[iply][1]\n\n if method == 'nplies':\n # we divide by nplies b/c it's nsm per area and\n # we're working on a per ply basis\n # nsmi = nsmi/n # smear based on nplies\n mass_per_area = rho * t + self.nsm / self.nplies\n elif method == 'rho*t':\n # assume you smear the nsm mass based on rho*t distribution\n #nsmi = rho*t / sum(rho*t) * nsm\n #rho*t + nsmi = rho*t + rho*t/(sum(rho*t) + nsm - nsm) * nsm\n #rho*t + nsmi = rho*t + rho*t/(mass_per_area_total - nsm) * nsm\n # = rho*t * (1 + nsm/(mass_per_area_total-nsm))\n mass_per_area_total = self.get_mass_per_area_rho(rhos, iply='all', method='nplies')\n mass_per_area = rho * t * (1.0 + self.nsm / (mass_per_area_total - self.nsm))\n elif method == 't':\n # assume you smear the nsm mass based on t distribution\n #nsmi = t / sum(t) * nsm\n #rho*t + nsmi = rho*t + t/sum(t) * nsm\n #rho*t + nsmi = rho*t + t/thickness_total * nsm\n # = t * (rho + nsm/thickness_total)\n thickness_total = self.get_thickness()\n mass_per_area = t * (rho + self.nsm / thickness_total)\n else:\n raise NotImplementedError('method=%r is not supported' % method)\n return mass_per_area\n\n\nclass PCOMPi(CompositeShellProperty):\n \"\"\"\n +-------+--------+--------+---------+------+--------+--------+-------+------+\n | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 |\n +=======+========+========+=========+======+========+========+=======+======+\n | PCOMP | PID | Z0 | NSM | SB | FT | TREF | GE | LAM |\n +-------+--------+--------+---------+------+--------+--------+-------+------+\n | MID1 | T1 | THETA1 | SOUT1 | MID2 | T2 | THETA2 | SOUT2 | |\n +-------+--------+--------+---------+------+--------+--------+-------+------+\n | MID3 | T3 | THETA3 | SOUT3 | etc. | | | | |\n +-------+--------+--------+---------+------+--------+--------+-------+------+\n\n +-------+--------+--------+---------+------+--------+--------+-------+------+\n | PCOMP | 701512 | 0.0+0 | 1.549-2 | | | 0.0+0 | 0.0+0 | SYM |\n +-------+--------+--------+---------+------+--------+--------+-------+------+\n | | 300704 | 3.7-2 | 0.0+0 | YES | 300704 | 3.7-2 | 45. | YES |\n +-------+--------+--------+---------+------+--------+--------+-------+------+\n | | 300704 | 3.7-2 | -45. | YES | 300704 | 3.7-2 | 90. | YES |\n +-------+--------+--------+---------+------+--------+--------+-------+------+\n | | 300705 | .5 | 0.0+0 | YES | | | | |\n +-------+--------+--------+---------+------+--------+--------+-------+------+\n \"\"\"\n type = 'PCOMP'\n\n def __init__(self, card=None, data=None, comment=''): # not done, cleanup\n ShellProperty.__init__(self, card, data)\n\n @property\n def plies(self):\n plies = []\n for mid, t, theta, sout in zip(self.mids, self.thicknesses, self.thetas, self.souts):\n plies.append([mid, t, theta, sout])\n return plies\n\n def __init__(self, pid,\n mids, thicknesses, thetas, souts,\n nsm, sb, ft, tref, ge, lam, z0, comment=''):\n CompositeShellProperty.__init__(self)\n if comment:\n self.comment = comment\n\n #: Property ID\n self.pid = pid\n\n #: Non-Structural Mass per unit Area\n self.nsm = nsm\n self.sb = sb\n\n #: Failure Theory\n #:\n #: ['HILL', 'HOFF', 'TSAI', 'STRN', None]\n self.ft = ft\n\n #: Reference Temperature (default=0.0)\n self.tref = tref\n self.ge = ge\n\n #: symmetric flag - default = No Symmetry (NO)\n #if lam is None: # TODO: is NO an option?\n #lam = 'NO'\n self.lam = lam\n self.mids = mids\n self.thicknesses = thicknesses\n self.thetas = thetas\n self.souts = souts\n if z0 is None:\n z0 = -0.5 * self.Thickness()\n self.z0 = z0\n\n assert self.ft in ['HILL', 'HOFF', 'TSAI', 'STRN', 0.0, None], 'ft=%r' % self.ft\n # TODO: is NO an option?\n assert self.lam in [None, 'SYM', 'MEM', 'BEND', 'SMEAR', 'SMCORE'], 'lam=%r is invalid' % self.lam\n\n @classmethod\n def add_card(cls, card, comment=''):\n pid = integer(card, 1, 'pid')\n\n # z0 is field 2 and is calculated at the end because we need the\n # thickness first\n #self.z0 = double_or_blank(card, 1, 'pid')\n\n nsm = double_or_blank(card, 3, 'nsm', 0.0)\n sb = double_or_blank(card, 4, 'sb', 0.0)\n ft = string_or_blank(card, 5, 'ft')\n tref = double_or_blank(card, 6, 'tref', 0.0)\n ge = double_or_blank(card, 7, 'ge', 0.0)\n lam = string_or_blank(card, 8, 'lam') # default=blank -> nothing\n\n # -8 for the first 8 fields (1st line)\n nply_fields = card.nfields - 9\n\n # counting plies\n nmajor = nply_fields // 4\n nleftover = nply_fields % 4\n if nleftover:\n nmajor += 1\n nplies = nmajor\n\n mid_last = None\n thick_last = None\n ply = None\n iply = 1\n\n # supports single ply per line\n mids = []\n thicknesses = []\n thetas = []\n souts = []\n for i in range(9, 9 + nplies * 4, 4):\n actual = card.fields(i, i + 4)\n mid = integer_or_blank(card, i, 'mid', mid_last)\n t = double_or_blank(card, i + 1, 't', thick_last)\n theta = double_or_blank(card, i + 2, 'theta', 0.0)\n sout = string_or_blank(card, i + 3, 'sout', 'NO')\n\n if t <= 0.:\n msg = ('thickness of PCOMP layer is invalid pid=%s'\n ' iLayer=%s t=%s ply=[mid,t,theta,'\n 'sout]=%s' % (pid, iply, t, ply))\n raise RuntimeError(msg)\n\n # if this card has 2 plies on the line\n if actual != [None, None, None, None]:\n mids.append(mid)\n thicknesses.append(t)\n thetas.append(theta)\n souts.append(sout)\n iply += 1\n mid_last = mid\n thick_last = t\n #print(\"nplies = \",nplies)\n\n #self.plies = []\n #if self.lam == 'SYM':\n # if nplies%2 == 1: # 0th layer is the core layer\n # # cut the thickness in half to make the ply have an\n # # even number of plies, is there a better way???\n # plies[0][1] = plies[0][1]/2.\n #\n # pliesLower = plies.reverse()\n # self.plies = pliesLower+plies\n # #print str(self)\n z0 = double_or_blank(card, 2, 'z0')\n return PCOMPi(pid, mids, thicknesses, thetas, souts, nsm, sb, ft, tref, ge,\n lam, z0, comment=comment)\n\n def update(self, pid_map, mid_map):\n \"\"\"\n maps = {\n 'node' : nid_map,\n 'property' : pid_map,\n }\n \"\"\"\n pid2 = pid_map[self.pid]\n mids2 = [mid_map[mid] if mid != 0 else 0 for mid in self.mids]\n self.pid = pid2\n self.mids = mids2\n\n @classmethod\n def add_op2_data(cls, data, comment=''):\n #data_in = [\n #pid, z0, nsm, sb, ft, Tref, ge,\n #is_symmetrical, Mid, T, Theta, Sout]\n pid = data[0]\n z0 = data[1]\n nsm = data[2]\n sb = data[3]\n ft = data[4]\n tref = data[5]\n ge = data[6]\n lam = data[7]\n Mid = data[8]\n T = data[9]\n Theta = data[10]\n Sout = data[11]\n\n if lam == 'NO':\n lam = None\n\n #ply = [mid,t,theta,sout]\n mids = []\n thicknesses = []\n thetas = []\n souts = []\n for (mid, t, theta, sout) in zip(Mid, T, Theta, Sout):\n if sout == 0:\n sout = 'NO'\n elif sout == 1:\n sout = 'YES'\n #elif sout == 2: #: .. todo:: what?!!\n #sout = 'YES'\n #elif sout == 3: #: .. todo:: what?!!\n #sout = 'YES'\n else:\n raise RuntimeError('unsupported sout. sout=%r and must be 0 or 1.'\n '\\nPCOMP = %s' % (sout, data))\n mids.append(mid)\n thicknesses.append(t)\n thetas.append(theta)\n souts.append(sout)\n if ft == 0:\n ft = None\n elif ft == 1:\n ft = 'HILL'\n elif ft == 2:\n ft = 'HOFF'\n elif ft == 3:\n ft = 'TSAI'\n elif ft == 4:\n ft = 'STRN'\n else:\n raise RuntimeError('unsupported ft. pid=%s ft=%r.'\n '\\nPCOMP = %s' % (pid, ft, data))\n return PCOMPi(pid, mids, thicknesses, thetas, souts,\n nsm, sb, ft, tref, ge, lam, z0, comment=comment)\n\n def raw_fields(self):\n list_fields = ['PCOMP', self.pid, self.z0, self.nsm, self.sb, self.ft,\n self.tref, self.ge, self.lam, ]\n for (iply, ply) in enumerate(self.plies):\n (_mid, t, theta, sout) = ply\n mid = self.Mid(iply)\n list_fields += [mid, t, theta, sout]\n return list_fields\n\n def repr_fields(self):\n nsm = set_blank_if_default(self.nsm, 0.0)\n sb = set_blank_if_default(self.sb, 0.0)\n tref = set_blank_if_default(self.tref, 0.0)\n ge = set_blank_if_default(self.ge, 0.0)\n z0 = set_blank_if_default(self.z0, -0.5 * self.get_thickness())\n\n list_fields = ['PCOMP', self.pid, z0, nsm, sb, self.ft, tref, ge, self.lam]\n for (iply, ply) in enumerate(self.plies):\n (_mid, t, theta, sout) = ply\n mid = self.Mid(iply)\n #theta = set_blank_if_default(theta,0.0)\n sout = set_blank_if_default(sout, 'NO')\n list_fields += [mid, t, theta, sout]\n return list_fields\n\n def write_card(self, size: int=8, is_double: bool=False) -> str:\n card = self.repr_fields()\n return self.comment + print_card_8(card)\n\n\n",
"from itertools import count\nfrom typing import List\n\nimport numpy as np\nfrom numpy import zeros, searchsorted, ravel\n\nfrom pyNastran.utils.numpy_utils import integer_types\nfrom pyNastran.op2.tables.oes_stressStrain.real.oes_objects import OES_Object\nfrom pyNastran.f06.f06_formatting import write_floats_13e, _eigenvalue_header\n\n\nclass RealBush1DStressArray(OES_Object):\n def __init__(self, data_code, is_sort1, isubcase, dt):\n OES_Object.__init__(self, data_code, isubcase, apply_data_code=True)\n #self.code = [self.format_code, self.sort_code, self.s_code]\n #self.ntimes = 0 # or frequency/mode\n #self.ntotal = 0\n self.ielement = 0\n self.nelements = 0 # result specific\n\n @property\n def is_stress(self) -> bool:\n return True\n\n @property\n def is_real(self) -> bool:\n return True\n\n @property\n def is_complex(self) -> bool:\n return False\n\n @property\n def nnodes_per_elements(self) -> int:\n if self.element_type == 40:\n nnodes_per_element = 1\n else:\n raise NotImplementedError(self.element_type)\n return nnodes_per_element\n\n def _reset_indices(self):\n self.itotal = 0\n self.ielement = 0\n\n def _get_msgs(self):\n words = [\n #' ELEMENT-ID = 104'\n ' S T R E S S E S ( F O R C E S ) I N B U S H 1 D E L E M E N T S ( C B U S H 1 D )\\n',\n ' \\n',\n ' AXIAL AXIAL AXIAL AXIAL AXIAL PLASTIC\\n',\n ' TIME FORCE DISPLACEMENT VELOCITY STRESS STRAIN STRAIN STATUS\\n',\n #' 2.000000E-02 1.960396E+01 1.960396E-04 1.940792E-02 1.960396E+01 1.960396E-04 0.000000E+00 \\n',\n ]\n return words\n # raise NotImplementedError('%s needs to implement _get_msgs' % self.__class__.__name__)\n\n def get_headers(self) -> List[str]:\n headers = ['element_force', 'axial_displacement', 'axial_velocity',\n 'axial_stress', 'axial_strain', 'plastic_strain']\n return headers\n\n def build(self):\n \"\"\"sizes the vectorized attributes of the RealBush1DStressArray\"\"\"\n #print(\"self.ielement =\", self.ielement)\n #print('ntimes=%s nelements=%s ntotal=%s' % (self.ntimes, self.nelements, self.ntotal))\n\n assert self.ntimes > 0, 'ntimes=%s' % self.ntimes\n assert self.nelements > 0, 'nelements=%s' % self.nelements\n assert self.ntotal > 0, 'ntotal=%s' % self.ntotal\n\n self.itime = 0\n self.ielement = 0\n self.itotal = 0\n #self.ntimes = 0\n #self.nelements = 0\n self.is_built = True\n\n #print(\"***name=%s type=%s nnodes_per_element=%s ntimes=%s nelements=%s ntotal=%s\" % (\n #self.element_name, self.element_type, nnodes_per_element, self.ntimes, self.nelements,\n #self.ntotal))\n dtype = 'float32'\n if isinstance(self.nonlinear_factor, integer_types):\n dtype = 'int32'\n self._times = zeros(self.ntimes, dtype=dtype)\n self.element = zeros(self.ntotal, dtype='int32')\n self.is_failed = zeros((self.ntimes, self.ntotal, 1), dtype='int32')\n\n # [element_force, axial_displacement, axial_velocity, axial_stress, axial_strain, plastic_strain, is_failed]\n self.data = zeros((self.ntimes, self.ntotal, 6), dtype='float32')\n\n def build_dataframe(self):\n \"\"\"creates a pandas dataframe\"\"\"\n import pandas as pd\n headers = self.get_headers()\n if self.nonlinear_factor not in (None, np.nan):\n # Time 0.02 0.04 0.06\n # ElementID Item\n #104 element_force 38.633198 113.462921 220.903046\n # axial_displacement 0.000194 0.000761 0.001673\n # axial_velocity 0.019220 0.037323 0.053638\n # axial_stress NaN NaN NaN\n # axial_strain NaN NaN NaN\n # plastic_strain 0.000000 0.000000 0.000000\n column_names, column_values = self._build_dataframe_transient_header()\n data_frame = self._build_pandas_transient_elements(\n column_values, column_names,\n headers, self.element, self.data)\n else:\n #Static element_force axial_displacement axial_velocity axial_stress axial_strain plastic_strain\n #ElementID\n #17801 1.0 0.1 0.0 0.0 0.0 0.0\n #17807 1.0 0.1 0.0 0.0 0.0 0.0\n data_frame = pd.DataFrame(self.data[0], columns=headers, index=self.element)\n data_frame.index.name = 'ElementID'\n data_frame.columns.names = ['Static']\n self.data_frame = data_frame\n\n def __eq__(self, table): # pragma: no cover\n assert self.is_sort1 == table.is_sort1\n self._eq_header(table)\n\n i = 0\n if not np.array_equal(self.data, table.data):\n msg = 'table_name=%r class_name=%s\\n' % (self.table_name, self.__class__.__name__)\n msg += '%s\\n' % str(self.code_information())\n ntimes = self.data.shape[0]\n\n if self.is_sort1:\n for itime in range(ntimes):\n for ieid, eid, in enumerate(self.element):\n t1 = self.data[itime, ieid, :]\n t2 = table.data[itime, ieid, :]\n #i_not_nan = np.isnp.where(t1 != np.nan)[0]\n i_not_nan = np.isfinite(t1)\n (axial_stress1, equiv_stress1, total_strain1, eff_plastic_creep_strain1, eff_creep_strain1, linear_torsional_stress1) = t1\n (axial_stress2, equiv_stress2, total_strain2, eff_plastic_creep_strain2, eff_creep_strain2, linear_torsional_stress2) = t2\n if not np.allclose(t1[i_not_nan], t2[i_not_nan]):\n #if not np.array_equal(t1, t2):\n msg += '%s\\n (%s, %s, %s, %s, %s, %s)\\n (%s, %s, %s, %s, %s, %s)\\n' % (\n eid,\n axial_stress1, equiv_stress1, total_strain1, eff_plastic_creep_strain1, eff_creep_strain1, linear_torsional_stress1,\n axial_stress2, equiv_stress2, total_strain2, eff_plastic_creep_strain2, eff_creep_strain2, linear_torsional_stress2)\n i += 1\n if i > 10:\n print(msg)\n raise ValueError(msg)\n else:\n raise NotImplementedError(self.is_sort2)\n if i > 0:\n print(msg)\n raise ValueError(msg)\n return True\n\n def add_sort1(self, dt, eid, element_force, axial_displacement, axial_velocity,\n axial_stress, axial_strain, plastic_strain, is_failed):\n \"\"\"unvectorized method for adding SORT1 transient data\"\"\"\n assert isinstance(eid, integer_types) and eid > 0, 'dt=%s eid=%s' % (dt, eid)\n # pyNastran_examples\\move_tpl\\ar29scb1.op2\n #print('dt=%s eid=%s force=%s' % (dt, eid, element_force))\n #print('element.shape=%s' % self.element.shape)\n #print('data.shape=%s' % str(self.data.shape))\n #print('times.shape=%s' % self._times.shape)\n #print('itime=%s ielement=%s itotal=%s' % (self.itime, self.itotal, self.ielement))\n self._times[self.itime] = dt\n self.element[self.itotal] = eid\n self.is_failed[self.itime, self.itotal, 0] = is_failed\n self.data[self.itime, self.itotal, :] = [\n element_force, axial_displacement, axial_velocity,\n axial_stress, axial_strain, plastic_strain]\n self.itotal += 1\n self.ielement += 1\n\n def get_stats(self, short=False) -> List[str]:\n if not self.is_built:\n return ['<%s>\\n' % self.__class__.__name__,\n ' ntimes: %i\\n' % self.ntimes,\n ' ntotal: %i\\n' % self.ntotal,\n ]\n\n nelements = self.ntotal\n ntimes = self.ntimes\n #ntotal = self.ntotal\n nelements = self.ntotal\n\n msg = []\n if self.nonlinear_factor not in (None, np.nan): # transient\n msg.append(' type=%s ntimes=%i nelements=%i\\n'\n % (self.__class__.__name__, ntimes, nelements))\n ntimes_word = 'ntimes'\n else:\n msg.append(' type=%s nelements=%i\\n'\n % (self.__class__.__name__, nelements))\n ntimes_word = '1'\n headers = self.get_headers()\n\n n = len(headers)\n assert n == self.data.shape[2], 'nheaders=%s shape=%s' % (n, str(self.data.shape))\n msg.append(' data: [%s, ntotal, %i] where %i=[%s]\\n' % (ntimes_word, n, n, str(', '.join(headers))))\n msg.append(' element.shape = %s\\n' % str(self.element.shape).replace('L', ''))\n msg.append(' is_failed.shape = %s\\n' % str(self.is_failed.shape).replace('L', ''))\n msg.append(' data.shape = %s\\n' % str(self.data.shape).replace('L', ''))\n msg.append(' element type: %s\\n' % self.element_name)\n msg += self.get_data_code()\n return msg\n\n def get_element_index(self, eids):\n # elements are always sorted; nodes are not\n itot = searchsorted(eids, self.element) #[0]\n return itot\n\n def eid_to_element_node_index(self, eids):\n ind = ravel([searchsorted(self.element == eid) for eid in eids])\n return ind\n\n def write_f06(self, f06_file, header=None, page_stamp='PAGE %s',\n page_num=1, is_mag_phase=False, is_sort1=True):\n if header is None:\n header = []\n msg = self._get_msgs()\n ntimes = self.data.shape[0]\n eids = self.element\n for itime in range(ntimes):\n dt = self._times[itime]\n header = _eigenvalue_header(self, header, itime, ntimes, dt)\n f06_file.write(''.join(header + msg))\n\n #[element_force, axial_displacement, axial_velocity, axial_stress, axial_strain, plastic_strain, is_failed]\n element_force = self.data[itime, :, 0]\n axial_displacement = self.data[itime, :, 1]\n axial_velocity = self.data[itime, :, 2]\n axial_stress = self.data[itime, :, 3]\n axial_strain = self.data[itime, :, 4]\n plastic_strain = self.data[itime, :, 5]\n is_failed = self.is_failed[itime, :, 0]\n\n for (i, eid, element_forcei, axial_displacementi, axial_velocityi, axial_stressi,\n axial_straini, plastic_straini, is_failedi) in zip(\n count(), eids, element_force, axial_displacement, axial_velocity,\n axial_stress, axial_strain, plastic_strain, is_failed):\n\n vals = [element_forcei, axial_displacementi, axial_velocityi, axial_stressi,\n axial_straini, plastic_straini, is_failedi]\n vals2 = write_floats_13e(vals)\n [element_forcei, axial_displacementi, axial_velocityi, axial_stressi,\n axial_straini, plastic_straini, is_failedi] = vals2\n f06_file.write(\n '0%8i %-13s %-13s %-13s %-13s %-13s %-13s %s\\n'\n % (eid, element_forcei, axial_displacementi, axial_velocityi, axial_stressi,\n axial_straini, plastic_straini, is_failedi))\n f06_file.write(page_stamp % page_num)\n page_num += 1\n if self.nonlinear_factor in (None, np.nan):\n page_num -= 1\n return page_num\n",
"from typing import List\n\nimport numpy as np\nfrom numpy import zeros, searchsorted, ravel\n\nfrom pyNastran.op2.tables.oes_stressStrain.real.oes_objects import (\n StressObject, StrainObject, OES_Object)\nfrom pyNastran.f06.f06_formatting import write_floats_13e, _eigenvalue_header\nfrom pyNastran.utils.numpy_utils import integer_types\n\n#oxx = 0. # max from bending and axial\n#txz = 1. # from transverse shear; txz=Vz/(Kz*A)\n#txy = 1. # from transverse shear; txy=Vz/(Ky*A)\n#t = 2. # from torsional stress; t=T*C/J\n#ovm = (oxx**2 + 3 * (txy**2 + txz**2 + t**2))**0.5\n\nclass RandomBarArray(OES_Object):\n def __init__(self, data_code, is_sort1, isubcase, dt):\n OES_Object.__init__(self, data_code, isubcase, apply_data_code=False)\n #self.code = [self.format_code, self.sort_code, self.s_code]\n\n #self.ntimes = 0 # or frequency/mode\n #self.ntotal = 0\n self.ielement = 0\n self.nelements = 0 # result specific\n\n #if not is_sort1:\n #raise NotImplementedError('SORT2')\n #assert dt is not None\n #self.add = self.add_sort2\n #self.add_new_eid = self.add_new_eid_sort2\n #self.addNewNode = self.addNewNodeSort2\n\n @property\n def is_real(self):\n return True\n\n @property\n def is_complex(self):\n return False\n\n def _reset_indices(self):\n self.itotal = 0\n if self.table_name not in ['OESRMS2', 'OESNO2', 'OSTRRMS2', 'OSTRNO2']:\n self.ielement = 0\n\n def _get_msgs(self):\n raise NotImplementedError('%s needs to implement _get_msgs' % self.__class__.__name__)\n\n def get_headers(self):\n raise NotImplementedError('%s needs to implement get_headers' % self.__class__.__name__)\n\n def build(self):\n \"\"\"sizes the vectorized attributes of the RealBarArray\"\"\"\n #print(\"self.ielement =\", self.ielement)\n #print('ntimes=%s nelements=%s ntotal=%s' % (self.ntimes, self.nelements, self.ntotal))\n\n assert self.ntimes > 0, 'ntimes=%s' % self.ntimes\n assert self.nelements > 0, 'nelements=%s' % self.nelements\n assert self.ntotal > 0, 'ntotal=%s' % self.ntotal\n\n # buggy MSC 2005 (was this ever fixed?)\n # NX doesn't have this bug\n if self.table_name in ['OESRMS2', 'OESNO2', 'OSTRRMS2', 'OSTRNO2']:\n self.ntotal = self.nelements\n\n #if self.element_type == 34:\n #nnodes_per_element = 1\n #else:\n #raise NotImplementedError(self.element_type)\n\n self.itime = 0\n self.ielement = 0\n self.itotal = 0\n #self.ntimes = 0\n #self.nelements = 0\n self.is_built = True\n\n #print(\"***name=%s type=%s nnodes_per_element=%s ntimes=%s nelements=%s ntotal=%s\" % (\n #self.element_name, self.element_type, nnodes_per_element, self.ntimes, self.nelements, self.ntotal))\n dtype = 'float32'\n if isinstance(self.nonlinear_factor, integer_types):\n dtype = 'int32'\n self._times = zeros(self.ntimes, dtype=dtype)\n self.element = zeros(self.ntotal, dtype='int32')\n\n #[s1a, s2a, s3a, s4a, axial,\n # s1b, s2b, s3b, s4b]\n self.data = zeros((self.ntimes, self.ntotal, 9), dtype='float32')\n\n def build_dataframe(self):\n \"\"\"creates a pandas dataframe\"\"\"\n import pandas as pd\n headers = self.get_headers()\n if self.nonlinear_factor not in (None, np.nan):\n column_names, column_values = self._build_dataframe_transient_header()\n self.data_frame = pd.Panel(self.data, items=column_values, major_axis=self.element, minor_axis=headers).to_frame()\n self.data_frame.columns.names = column_names\n self.data_frame.index.names = ['ElementID', 'Item']\n else:\n self.data_frame = pd.Panel(self.data, major_axis=self.element, minor_axis=headers).to_frame()\n self.data_frame.columns.names = ['Static']\n self.data_frame.index.names = ['ElementID', 'Item']\n\n def __eq__(self, table): # pragma: no cover\n assert self.is_sort1 == table.is_sort1\n self._eq_header(table)\n if not np.array_equal(self.data, table.data):\n msg = 'table_name=%r class_name=%s\\n' % (self.table_name, self.__class__.__name__)\n msg += '%s\\n' % str(self.code_information())\n ntimes = self.data.shape[0]\n\n i = 0\n if self.is_sort1:\n for itime in range(ntimes):\n for ieid, eid, in enumerate(self.element):\n t1 = self.data[itime, ieid, :]\n t2 = table.data[itime, ieid, :]\n (axial_stress1, equiv_stress1, total_strain1, effective_plastic_creep_strain1, effective_creep_strain1, linear_torsional_stress1) = t1\n (axial_stress2, equiv_stress2, total_strain2, effective_plastic_creep_strain2, effective_creep_strain2, linear_torsional_stress2) = t2\n if not np.allclose(t1, t2):\n #if not np.array_equal(t1, t2):\n msg += '%s\\n (%s, %s, %s, %s, %s, %s)\\n (%s, %s, %s, %s, %s, %s)\\n' % (\n eid,\n axial_stress1, equiv_stress1, total_strain1, effective_plastic_creep_strain1, effective_creep_strain1, linear_torsional_stress1,\n axial_stress2, equiv_stress2, total_strain2, effective_plastic_creep_strain2, effective_creep_strain2, linear_torsional_stress2)\n i += 1\n if i > 10:\n print(msg)\n raise ValueError(msg)\n else:\n raise NotImplementedError(self.is_sort2)\n if i > 0:\n print(msg)\n raise ValueError(msg)\n return True\n\n def add_new_eid_sort1(self, dt, eid,\n s1a, s2a, s3a, s4a, axial,\n s1b, s2b, s3b, s4b):\n\n assert isinstance(eid, integer_types)\n assert eid > 0, eid\n self._times[self.itime] = dt\n self.element[self.itotal] = eid\n self.data[self.itime, self.itotal, :] = [s1a, s2a, s3a, s4a, axial,\n s1b, s2b, s3b, s4b]\n self.itotal += 1\n self.ielement += 1\n\n #def add_sort1(self, dt, eid, nodeID, fd, oxx, oyy, txy, angle, majorP, minorP, ovm):\n #assert eid is not None\n #msg = \"i=%s dt=%s eid=%s nodeID=%s fd=%g oxx=%g oyy=%g \\ntxy=%g angle=%g major=%g minor=%g ovmShear=%g\" % (\n #self.itotal, dt, eid, nodeID, fd, oxx, oyy, txy, angle, majorP, minorP, ovm)\n ##print(msg)\n #if isinstance(nodeID, str):\n #nodeID = 0\n ##assert isinstance(nodeID, integer_types), nodeID\n #self.element_node[self.itotal, :] = [eid, nodeID]\n #self.data[self.itime, self.itotal, :] = [fd, oxx, oyy, txy, angle, majorP, minorP, ovm]\n #self.itotal += 1\n\n def get_stats(self, short=False) -> List[str]:\n if not self.is_built:\n return [\n '<%s>\\n' % self.__class__.__name__,\n ' ntimes: %i\\n' % self.ntimes,\n ' ntotal: %i\\n' % self.ntotal,\n ]\n\n nelements = self.ntotal\n ntimes = self.ntimes\n unused_ntotal = self.ntotal\n nelements = self.ntotal\n\n msg = []\n if self.nonlinear_factor not in (None, np.nan): # transient\n msg.append(' type=%s ntimes=%i nelements=%i\\n'\n % (self.__class__.__name__, ntimes, nelements))\n ntimes_word = 'ntimes'\n else:\n msg.append(' type=%s nelements=%i\\n'\n % (self.__class__.__name__, nelements))\n ntimes_word = '1'\n headers = self.get_headers()\n\n n = len(headers)\n assert n == self.data.shape[2], 'nheaders=%s shape=%s' % (n, str(self.data.shape))\n msg.append(' data: [%s, ntotal, %i] where %i=[%s]\\n' % (ntimes_word, n, n, str(', '.join(headers))))\n msg.append(' data.shape = %s\\n' % str(self.data.shape).replace('L', ''))\n msg.append(' element.shape = %s\\n' % str(self.element.shape).replace('L', ''))\n msg.append(' element type: %s\\n' % self.element_name)\n msg += self.get_data_code()\n return msg\n\n def get_element_index(self, eids):\n # elements are always sorted; nodes are not\n itot = searchsorted(eids, self.element) #[0]\n return itot\n\n def eid_to_element_node_index(self, eids):\n ind = ravel([searchsorted(self.element == eid) for eid in eids])\n #ind = searchsorted(eids, self.element)\n #ind = ind.reshape(ind.size)\n #ind.sort()\n return ind\n\n def write_f06(self, f06_file, header=None, page_stamp='PAGE %s',\n page_num=1, is_mag_phase=False, is_sort1=True):\n if header is None:\n header = []\n msg = self._get_msgs()\n ntimes = self.data.shape[0]\n eids = self.element\n #print('CBAR ntimes=%s ntotal=%s' % (ntimes, ntotal))\n for itime in range(ntimes):\n dt = self._times[itime]\n header = _eigenvalue_header(self, header, itime, ntimes, dt)\n f06_file.write(''.join(header + msg))\n\n s1a = self.data[itime, :, 0]\n s2a = self.data[itime, :, 1]\n s3a = self.data[itime, :, 2]\n s4a = self.data[itime, :, 3]\n\n axial = self.data[itime, :, 4]\n s1b = self.data[itime, :, 5]\n s2b = self.data[itime, :, 6]\n s3b = self.data[itime, :, 7]\n s4b = self.data[itime, :, 8]\n\n for (eid, s1ai, s2ai, s3ai, s4ai, axiali, s1bi, s2bi, s3bi, s4bi) in zip(\n eids, s1a, s2a, s3a, s4a, axial, s1b, s2b, s3b, s4b):\n\n vals = [s1ai, s2ai, s3ai, s4ai, axiali,\n s1bi, s2bi, s3bi, s4bi,]\n vals2 = write_floats_13e(vals)\n [s1ai, s2ai, s3ai, s4ai, axiali,\n s1bi, s2bi, s3bi, s4bi,] = vals2\n\n f06_file.write('\\n %-13s ENDA %-13s %-13s %-13s %-13s %s\\n'\n '0 %-13s ENDB %-13s %-13s %-13s %s\\n'\n % (eid, s1ai, s2ai, s3ai, s4ai, axiali,\n '', s1bi, s2bi, s3bi, s4bi))\n #f06_file.write('0%8i %-13s %-13s %-13s %-13s %s\\n'\n #' %8s %-13s %-13s %-13s %s\\n'\n #% (eid, s1ai, s2ai, s3ai, s4ai, axiali,\n #'', s1bi, s2bi, s3bi, s4bi))\n\n f06_file.write(page_stamp % page_num)\n page_num += 1\n\n if self.nonlinear_factor in (None, np.nan):\n page_num -= 1\n return page_num\n\n\nclass RandomBarStressArray(RandomBarArray, StressObject):\n def __init__(self, data_code, is_sort1, isubcase, dt):\n RandomBarArray.__init__(self, data_code, is_sort1, isubcase, dt)\n StressObject.__init__(self, data_code, isubcase)\n\n def get_headers(self) -> List[str]:\n headers = ['s1a', 's2a', 's3a', 's4a', 'axial',\n 's1b', 's2b', 's3b', 's4b']\n return headers\n\n def _get_msgs(self):\n if self.element_type == 34:\n pass\n else:\n raise NotImplementedError(self.element_type)\n\n msg = [\n ' S T R E S S E S I N B A R E L E M E N T S ( C B A R )\\n',\n ]\n if self.table_name in ['OESATO1', 'OESATO2']:\n msg += [' ( AUTO-CORRELATION FUNCTION )\\n']\n elif self.table_name in ['OESPSD1', 'OESPSD2']:\n msg += [' ( POWER SPECTRAL DENSITY FUNCTION )\\n']\n elif self.table_name in ['OESRMS1', 'OESRMS2', 'OESXRMS1']:\n msg += [' ( ROOT MEAN SQUARE )\\n']\n elif self.table_name in ['OESCRM1', 'OESCRM2']:\n msg += [' ( CUMULATIVE ROOT MEAN SQUARE )\\n']\n elif self.table_name in ['OESNO1', 'OESNO2']:\n msg += [' ( NUMBER OF ZERO CROSSINGS )\\n']\n else:\n raise NotImplementedError(self.table_name)\n msg += [\n '\\n ELEMENT SA1 SA2 SA3 SA4 AXIAL\\n',\n ' ID. SB1 SB2 SB3 SB4 STRESS\\n',\n ]\n return msg\n\n\nclass RandomBarStrainArray(RandomBarArray, StrainObject):\n def __init__(self, data_code, is_sort1, isubcase, dt):\n RandomBarArray.__init__(self, data_code, is_sort1, isubcase, dt)\n StrainObject.__init__(self, data_code, isubcase)\n\n def get_headers(self) -> List[str]:\n headers = ['e1a', 'e2a', 'e3a', 'e4a', 'axial',\n 'e1b', 'e2b', 'e3b', 'e4b',]\n return headers\n\n def _get_msgs(self):\n if self.element_type == 34:\n pass\n else:\n raise NotImplementedError(self.element_type)\n\n msg = [\n ' S T R A I N S I N B A R E L E M E N T S ( C B A R )\\n',\n ]\n if self.table_name in ['OSTRATO1', 'OSTRATO2']:\n msg += [' ( AUTO-CORRELATION FUNCTION )\\n']\n elif self.table_name in ['OSTRPSD1', 'OSTRPSD2']:\n msg += [' ( POWER SPECTRAL DENSITY FUNCTION )\\n']\n elif self.table_name in ['OSTRRMS1', 'OSTRRMS2']:\n msg += [' ( ROOT MEAN SQUARE )\\n']\n elif self.table_name in ['OSTRCRM1', 'OSTRCRM2']:\n msg += [' ( CUMULATIVE ROOT MEAN SQUARE )\\n']\n elif self.table_name in ['OSTRNO1', 'OSTRNO2']:\n msg += [' ( NUMBER OF ZERO CROSSINGS )\\n']\n else:\n raise NotImplementedError(self.table_name)\n msg += [\n '\\n ELEMENT SA1 SA2 SA3 SA4 AXIAL\\n',\n ' ID. SB1 SB2 SB3 SB4 STRAIN\\n',\n ]\n\n return msg\n\n",
"#pylint disable=C0103,C0301\nfrom typing import List\n\nimport numpy as np\n\nfrom pyNastran.utils.numpy_utils import integer_types\nfrom pyNastran.op2.result_objects.op2_objects import BaseElement\nfrom pyNastran.f06.f06_formatting import (\n write_float_13e, write_floats_13e, _eigenvalue_header)\nfrom pyNastran.op2.result_objects.element_table_object import RealElementTableArray\n\n\nclass Real1DHeatFluxArray(BaseElement):\n \"\"\"1-ROD, 2-BEAM, 3-TUBE, 10-CONROD, 34-BAR, 69-BEND\"\"\"\n def __init__(self, data_code, is_sort1, isubcase, dt):\n self.element_type = None\n self.element_name = None\n BaseElement.__init__(self, data_code, isubcase)\n #self.code = [self.format_code, self.sort_code, self.s_code]\n\n #self.ntimes = 0 # or frequency/mode\n #self.ntotal = 0\n self.nelements = 0 # result specific\n self.itotal = 0\n self.ielement = 0\n\n if not is_sort1:\n raise NotImplementedError('SORT2')\n\n @property\n def is_real(self) -> bool:\n \"\"\"is the result real?\"\"\"\n return True\n\n @property\n def is_complex(self) -> bool:\n \"\"\"is the result complex?\"\"\"\n return False\n\n def _reset_indices(self):\n self.itotal = 0\n self.ielement = 0\n\n def get_headers(self) -> List[str]:\n headers = [\n 'xgrad', 'ygrad', 'zgrad', 'xflux', 'yflux', 'zflux'\n ]\n return headers\n\n #def get_headers(self):\n #headers = ['axial', 'torque']\n #return headers\n\n def build(self):\n \"\"\"sizes the vectorized attributes of the Real1DHeatFluxArray\"\"\"\n #print('ntimes=%s nelements=%s ntotal=%s' % (self.ntimes, self.nelements, self.ntotal))\n if self.is_built:\n return\n\n assert self.ntimes > 0, 'ntimes=%s' % self.ntimes\n assert self.nelements > 0, 'nelements=%s' % self.nelements\n assert self.ntotal > 0, 'ntotal=%s' % self.ntotal\n #self.names = []\n self.nelements //= self.ntimes\n self.itime = 0\n self.ielement = 0\n self.itotal = 0\n #self.ntimes = 0\n #self.nelements = 0\n self.is_built = True\n\n #print(\"ntimes=%s nelements=%s ntotal=%s\" % (self.ntimes, self.nelements, self.ntotal))\n dtype = 'float32'\n if isinstance(self.nonlinear_factor, integer_types):\n dtype = 'int32'\n self._times = np.zeros(self.ntimes, dtype=dtype)\n self.element = np.zeros(self.nelements, dtype='int32')\n self.element_data_type = np.empty(self.nelements, dtype='|U8')\n\n #[xgrad, ygrad, zgrad, xflux, yflux, zflux]\n self.data = np.zeros((self.ntimes, self.ntotal, 6), dtype='float32')\n\n def build_dataframe(self):\n \"\"\"creates a pandas dataframe\"\"\"\n import pandas as pd\n headers = self.get_headers()\n assert 0 not in self.element\n if self.nonlinear_factor not in (None, np.nan):\n #LoadStep 1.0\n #ElementID Item\n #14 xgrad 0.000000e+00\n # ygrad 1.401298e-45\n # zgrad 1.401298e-45\n # xflux -0.000000e+00\n # yflux 1.401298e-45\n # zflux 1.401298e-45\n #15 xgrad -2.842171e-14\n column_names, column_values = self._build_dataframe_transient_header()\n data_frame = self._build_pandas_transient_elements(\n column_values, column_names,\n headers, self.element, self.data)\n else:\n data_frame = pd.Panel(self.data,\n major_axis=self.element,\n minor_axis=headers).to_frame()\n data_frame.columns.names = ['Static']\n data_frame.index.names = ['ElementID', 'Item']\n self.data_frame = data_frame\n\n def __eq__(self, table): # pragma: no cover\n self._eq_header(table)\n assert self.is_sort1 == table.is_sort1\n if not np.array_equal(self.element, table.element):\n assert self.element.shape == table.element.shape, 'element shape=%s table.shape=%s' % (self.element.shape, table.element.shape)\n msg = 'table_name=%r class_name=%s\\n' % (self.table_name, self.__class__.__name__)\n msg += '%s\\n' % str(self.code_information())\n msg += 'Eid, EType\\n'\n for (eid, etype, eid2, etype2) in zip(self.element, self.element_data_type,\n table.element, table.element_data_type):\n msg += '(%s, %s), (%s, %s)\\n' % (eid, etype, eid2, etype2)\n print(msg)\n raise ValueError(msg)\n if not np.array_equal(self.data, table.data):\n msg = 'table_name=%r class_name=%s\\n' % (self.table_name, self.__class__.__name__)\n msg += '%s\\n' % str(self.code_information())\n i = 0\n for itime in range(self.ntimes):\n for ie, e in enumerate(self.element):\n eid = e\n t1 = self.data[itime, ie, :]\n t2 = table.data[itime, ie, :]\n (xgrad1, ygrad1, zgrad1, xflux1, yflux1, zflux1) = t1\n (xgrad2, ygrad2, zgrad2, xflux2, yflux2, zflux2) = t2\n\n if not np.array_equal(t1, t2):\n msg += (\n '%s (%s, %s, %s, %s, %s, %s)\\n'\n ' (%s, %s, %s, %s, %s, %s)\\n' % (\n eid,\n xgrad1, ygrad1, zgrad1, xflux1, yflux1, zflux1,\n xgrad2, ygrad2, zgrad2, xflux2, yflux2, zflux2,\n ))\n i += 1\n if i > 10:\n print(msg)\n raise ValueError(msg)\n #print(msg)\n if i > 0:\n raise ValueError(msg)\n return True\n\n def add_sort1(self, dt, eid, etype, xgrad, ygrad, zgrad, xflux, yflux, zflux):\n \"\"\"unvectorized method for adding SORT1 transient data\"\"\"\n assert isinstance(eid, integer_types) and eid > 0, 'dt=%s eid=%s' % (dt, eid)\n self._times[self.itime] = dt\n self.element[self.ielement] = eid\n self.element_data_type[self.ielement] = etype\n self.data[self.itime, self.ielement, :] = [xgrad, ygrad, zgrad, xflux, yflux, zflux]\n self.ielement += 1\n\n def get_stats(self, short=False) -> List[str]:\n if not self.is_built:\n return [\n '<%s>\\n' % self.__class__.__name__,\n ' ntimes: %i\\n' % self.ntimes,\n ' ntotal: %i\\n' % self.ntotal,\n ]\n\n nelements = self.nelements\n ntimes = self.ntimes\n #ntotal = self.ntotal\n\n msg = []\n if self.nonlinear_factor not in (None, np.nan): # transient\n msg.append(' type=%s ntimes=%i nelements=%i\\n'\n % (self.__class__.__name__, ntimes, nelements))\n ntimes_word = 'ntimes'\n else:\n msg.append(' type=%s nelements=%i\\n'\n % (self.__class__.__name__, nelements))\n ntimes_word = '1'\n headers = self.get_headers()\n n = len(headers)\n msg.append(' data: [%s, nelements, %i] where %i=[%s]\\n' % (\n ntimes_word, n, n, str(', '.join(headers))))\n msg.append(' data.shape = %s\\n' % str(self.data.shape).replace('L', ''))\n #msg.append(' element type: %s\\n' % self.element_type)\n #msg.append(' element name: %s\\n ' % self.element_name)\n msg += self.get_data_code()\n return msg\n\n def write_f06(self, f06_file, header=None, page_stamp='PAGE %s',\n page_num=1, is_mag_phase=False, is_sort1=True):\n if header is None:\n header = []\n msg_temp = [\n ' F I N I T E E L E M E N T T E M P E R A T U R E G R A D I E N T S A N D F L U X E S \\n'\n ' \\n'\n ' ELEMENT-ID EL-TYPE X-GRADIENT Y-GRADIENT Z-GRADIENT X-FLUX Y-FLUX Z-FLUX\\n'\n #' 10 ROD -1.889713E+02 3.779427E+04'\n ]\n ntimes = self.data.shape[0]\n\n eids = self.element\n etype = self.element_data_type\n for itime in range(ntimes):\n dt = self._times[itime] # TODO: rename this...\n header = _eigenvalue_header(self, header, itime, ntimes, dt)\n f06_file.write(''.join(header + msg_temp))\n\n xgrad = self.data[itime, :, 0]\n #ygrad = self.data[itime, :, 1]\n #zgrad = self.data[itime, :, 2]\n xflux = self.data[itime, :, 1]\n #yflux = self.data[itime, :, 4]\n #zflux = self.data[itime, :, 5]\n\n for (eid, etypei, xgradi, xfluxi) in zip(eids, etype, xgrad, xflux):\n (sxgradi, sxfluxi) = write_floats_13e([xgradi, xfluxi])\n\n # TODO: hopa is probably the wrong type\n f06_file.write(' %8i %8s %-13s %-13s %-13s %s\\n' % (\n eid, etypei, sxgradi, '', '', sxfluxi))\n f06_file.write(page_stamp % page_num)\n page_num += 1\n return page_num - 1\n\n\nclass RealHeatFluxVU3DArray(BaseElement):\n \"\"\"189-VUQUAD 190-VUTRIA,191-VUBEAM\"\"\"\n def __init__(self, data_code, is_sort1, isubcase, dt):\n self.element_type = None\n self.element_name = None\n BaseElement.__init__(self, data_code, isubcase)\n #self.code = [self.format_code, self.sort_code, self.s_code]\n\n #self.ntimes = 0 # or frequency/mode\n #self.ntotal = 0\n self.nelements = 0 # result specific\n self.ielement = 0\n self.itotal = 0\n self.itime = 0\n\n def _reset_indices(self):\n self.itotal = 0\n self.ielement = 0\n\n def get_headers(self) -> List[str]:\n headers = [\n 'xgrad', 'ygrad', 'zgrad', 'xflux', 'yflux', 'zflux',\n ]\n return headers\n\n def build(self):\n \"\"\"sizes the vectorized attributes of the RealHeatFluxVU3DArray\"\"\"\n #print('ntimes=%s nelements=%s ntotal=%s' % (self.ntimes, self.nelements, self.ntotal))\n assert self.ntimes > 0, 'ntimes=%s' % self.ntimes\n assert self.nelements > 0, 'nelements=%s' % self.nelements\n assert self.ntotal > 0, 'ntotal=%s' % self.ntotal\n #self.names = []\n self.nelements //= self.ntimes\n self.itime = 0\n self.ielement = 0\n self.itotal = 0\n #self.ntimes = 0\n #self.nelements = 0\n self.is_built = True\n\n #print(\"ntimes=%s nelements=%s ntotal=%s\" % (self.ntimes, self.nelements, self.ntotal))\n dtype = 'float32'\n if isinstance(self.nonlinear_factor, integer_types):\n dtype = 'int32'\n self._times = np.zeros(self.ntimes, dtype=dtype)\n self.element_parent = np.zeros((self.nelements, 2), dtype='int32')\n\n self.vugrid = np.zeros((self.ntimes, self.ntotal), dtype='int32')\n #[xgrad, ygrad, zgrad, xflux, yflux, zflux]\n self.data = np.zeros((self.ntimes, self.ntotal, 6), dtype='float32')\n\n def _build_dataframe(self):\n \"\"\"creates a pandas dataframe\"\"\"\n import pandas as pd\n # TODO: fix me\n headers = self.get_headers()\n #assert 0 not in self.element\n element_parent = [\n self.element_parent[:, 0],\n self.element_parent[:, 1],\n ]\n if self.nonlinear_factor not in (None, np.nan):\n column_names, column_values = self._build_dataframe_transient_header()\n self.data_frame = pd.Panel(self.data, items=column_values,\n major_axis=element_parent,\n minor_axis=headers).to_frame()\n self.data_frame.columns.names = column_names\n else:\n self.data_frame = pd.Panel(self.data,\n major_axis=element_parent,\n minor_axis=headers).to_frame()\n self.data_frame.columns.names = ['Static']\n self.data_frame.index.names = ['ElementID', 'Parent', 'Item']\n\n def __eq__(self, table): # pragma: no cover\n self._eq_header(table)\n assert self.is_sort1 == table.is_sort1\n if not np.array_equal(self.element_parent, table.element_parent):\n assert self.element_parent.shape == table.element_parent.shape, 'element_parent shape=%s table.shape=%s' % (\n self.element_parent.shape, table.element_parent.shape)\n msg = 'table_name=%r class_name=%s\\n' % (self.table_name, self.__class__.__name__)\n msg += '%s\\n' % str(self.code_information())\n msg += 'Eid, Parent, Coord, iCoord\\n'\n for (eid1, parent1, coord1, icord1), (eid2, parent2, coord2, icord2) in zip(\n self.element_parent, table.element_parent_coord_icord):\n msg += '(%s, %s, %s, %s) (%s, %s, %s, %s)\\n' % (\n eid1, parent1, coord1, icord1,\n eid2, parent2, coord2, icord2)\n print(msg)\n raise ValueError(msg)\n if not np.array_equal(self.data, table.data):\n msg = 'table_name=%r class_name=%s\\n' % (self.table_name, self.__class__.__name__)\n msg += '%s\\n' % str(self.code_information())\n i = 0\n #eids = self.element_node[:, 0]\n #ntotal = self.data.shape[2]\n for itime in range(self.ntimes):\n vugrids = self.int_data[itime, :, :]\n for j, vugrid in enumerate(vugrids):\n t1 = self.data[itime, j, :]\n t2 = table.data[itime, j, :]\n if not np.array_equal(t1, t2):\n (xgrad1, ygrad1, zgrad1, xflux1, yflux1, zflux1) = t1\n (xgrad2, ygrad2, zgrad2, xflux2, yflux2, zflux2) = t2\n msg += (\n '(%s, %s) (%s, %s, %s, %s, %s, %s) (%s, %s, %s, %s, %s, %s)\\n' % (\n j, vugrid,\n xgrad1, ygrad1, zgrad1, xflux1, yflux1, zflux1,\n xgrad2, ygrad2, zgrad2, xflux2, yflux2, zflux2,\n ))\n i += 1\n if i > 10:\n print(msg)\n raise ValueError(msg)\n #print(msg)\n if i > 0:\n raise ValueError(msg)\n return True\n\n def add_sort1(self, dt, eid, parent, grad_fluxes):\n \"\"\"unvectorized method for adding SORT1 transient data\"\"\"\n #print('ntimes=%s nelements=%s ntotal=%s' % (self.ntimes, self.nelements, self.ntotal))\n assert isinstance(eid, integer_types) and eid > 0, 'dt=%s eid=%s' % (dt, eid)\n self._times[self.itime] = dt\n #icord,\n self.element_parent[self.ielement, :] = [eid, parent]\n #try:\n #self.element_parent[self.ielement, :] = [eid, parent]\n #print([self.ielement, eid, parent])\n #except:\n #print(['*', self.ielement, eid, parent])\n\n for grad_flux in grad_fluxes:\n #print(self.itime, self.itotal, grad_flux)\n self.vugrid[self.itime, self.itotal] = grad_flux[0]\n self.data[self.itime, self.itotal, :] = grad_flux[1:]\n self.itotal += 1\n self.ielement += 1\n\n def get_stats(self, short=False) -> List[str]:\n if not self.is_built:\n return [\n '<%s>\\n' % self.__class__.__name__,\n ' ntimes: %i\\n' % self.ntimes,\n ' ntotal: %i\\n' % self.ntotal,\n ]\n\n nelements = self.nelements\n ntimes = self.ntimes\n #ntotal = self.ntotal\n\n msg = []\n if self.nonlinear_factor not in (None, np.nan): # transient\n msg.append(' type=%s ntimes=%i nelements=%i\\n'\n % (self.__class__.__name__, ntimes, nelements))\n ntimes_word = 'ntimes'\n else:\n msg.append(' type=%s nelements=%i\\n'\n % (self.__class__.__name__, nelements))\n ntimes_word = '1'\n headers = self.get_headers()\n n = len(headers)\n msg.append(' data: [%s, nelements, %i] where %i=[%s]\\n' % (\n ntimes_word, n, n, str(', '.join(headers))))\n msg.append(' data.shape = %s\\n' % str(self.data.shape).replace('L', ''))\n msg.append(' element type: %s\\n' % self.element_type)\n msg.append(' element name: %s\\n' % self.element_name)\n msg += self.get_data_code()\n return msg\n\n def write_f06(self, f06_file, header=None, page_stamp='PAGE %s',\n page_num=1, is_mag_phase=False, is_sort1=True):\n if header is None:\n header = []\n ## TODO: add the f06 header\n msg_temp = [\n ' T E M P E R A T U R E G R A D I E N T S A N D F L U X E S I N T R I A N G U L A R P - E L E M E N T S\\n'\n ' VU-ELEMENT ID= 100005001, P-ELEMENT ID = 5, OUTPUT COORD. ID= (LOCAL), P OF EDGES = 2 2 2\\n' # TODO: wrong\n ' LOCAL X DIR. = PROJECTED +X DIR., LOCAL NORMAL = COUNTER-CLOCKWISE, ANGLE = 0.0000\\n' # TODO: wrong\n '\\n'\n ' VUGRID X-GRADIENT Y-GRADIENT Z-GRADIENT X-FLUX Y-FLUX Z-FLUX \\n'\n #' 111005001 2.000000E+01 -4.799646E-14 0.000000E+00 -4.080000E+03 9.791279E-12 0.000000E+00\\n'\n ]\n #vu3d\n ntimes = self.data.shape[0]\n\n #eids = self.element\n for itime in range(ntimes):\n dt = self._times[itime] # TODO: rename this...\n header = _eigenvalue_header(self, header, itime, ntimes, dt)\n f06_file.write(''.join(header + msg_temp))\n\n # [xgrad, ygrad, zgrad, xflux, yflux, zflux]\n #nids = self.int_data[itime, :, 0]\n #self.element_parent = np.zeros((self.nelements, 2), dtype='int32')\n #self.vugrid = np.zeros((self.ntimes, self.ntotal), dtype='int32')\n vugrids = self.vugrid[itime, :]\n #print(vugrids)\n xgrad = self.data[itime, :, 0]\n ygrad = self.data[itime, :, 1]\n zgrad = self.data[itime, :, 2]\n xflux = self.data[itime, :, 3]\n yflux = self.data[itime, :, 4]\n zflux = self.data[itime, :, 5]\n\n for (vugrid, xgradi, ygradi, zgradi, xfluxi, yfluxi, zfluxi) in zip(\n vugrids, xgrad, ygrad, zgrad, xflux, yflux, zflux):\n f06_file.write(\n ' %10i %-13E %-13E %-13E %-13E %-13E %-13E\\n' % (\n vugrid, xgradi, ygradi, zgradi, xfluxi, yfluxi, zfluxi))\n f06_file.write(page_stamp % page_num)\n page_num += 1\n return page_num - 1\n\n\nclass RealHeatFluxVUBeamArray(BaseElement): # 191-VUBEAM\n def __init__(self, data_code, is_sort1, isubcase, dt):\n self.element_type = None\n self.element_name = None\n BaseElement.__init__(self, data_code, isubcase)\n #self.code = [self.format_code, self.sort_code, self.s_code]\n\n #self.ntimes = 0 # or frequency/mode\n #self.ntotal = 0\n self.nelements = 0 # result specific\n self.itotal = 0\n self.ielement = 0\n self.itime = 0\n\n if not is_sort1:\n raise NotImplementedError('SORT2')\n\n def _reset_indices(self):\n self.itotal = 0\n self.ielement = 0\n\n def get_headers(self) -> List[str]:\n headers = [\n 'xgrad', 'ygrad', 'zgrad', 'xflux', 'yflux', 'zflux',\n ]\n return headers\n\n def build(self):\n \"\"\"sizes the vectorized attributes of the RealHeatFluxVUBeamArray\"\"\"\n #print('ntimes=%s nelements=%s ntotal=%s' % (self.ntimes, self.nelements, self.ntotal))\n assert self.ntimes > 0, 'ntimes=%s' % self.ntimes\n assert self.nelements > 0, 'nelements=%s' % self.nelements\n assert self.ntotal > 0, 'ntotal=%s' % self.ntotal\n #self.names = []\n self.nelements //= self.ntimes\n self.itime = 0\n self.ielement = 0\n self.itotal = 0\n #self.ntimes = 0\n #self.nelements = 0\n self.is_built = True\n\n #print(\"ntimes=%s nelements=%s ntotal=%s\" % (self.ntimes, self.nelements, self.ntotal))\n dtype = 'float32'\n if isinstance(self.nonlinear_factor, integer_types):\n dtype = 'int32'\n self._times = np.zeros(self.ntimes, dtype=dtype)\n self.element_parent_coord = np.zeros((self.nelements, 3), dtype='int32')\n\n #[xgrad, ygrad, zgrad, xflux, yflux, zflux]\n self.vugrid = np.zeros((self.ntimes, self.ntotal, 1), dtype='int32')\n self.data = np.zeros((self.ntimes, self.ntotal, 6), dtype='float32')\n\n def build_dataframe(self):\n \"\"\"creates a pandas dataframe\"\"\"\n import pandas as pd\n headers = self.get_headers()\n #assert 0 not in self.element\n element_node = [\n self.element_parent_coord[:, 0],\n #self.element_parent_coord[:, 1],\n #self.element_parent_coord[:, 2],\n ]\n #print(pd.DataFrame(self.element_parent_coord))\n element_parent_coord = [\n np.vstack([self.element_parent_coord[:, 0], self.element_parent_coord[:, 0]]).T.ravel(),\n np.vstack([self.element_parent_coord[:, 1], self.element_parent_coord[:, 1]]).T.ravel(),\n np.vstack([self.element_parent_coord[:, 2], self.element_parent_coord[:, 2]]).T.ravel(),\n ]\n #print(element_parent_coord)\n if self.nonlinear_factor not in (None, np.nan):\n # TODO: rework\n column_names, column_values = self._build_dataframe_transient_header()\n self.data_frame = pd.Panel(self.data, items=column_values,\n major_axis=element_node,\n minor_axis=headers).to_frame()\n self.data_frame.columns.names = column_names\n self.data_frame.index.names = ['ElementID', 'Node', 'Item']\n else:\n df1 = pd.DataFrame(element_parent_coord).T\n df1.columns = ['Element', 'Parent', 'Coord']\n df2 = pd.DataFrame(self.vugrid[0])\n df2.columns = ['VU_Grid']\n df3 = pd.DataFrame(self.data[0])\n df3.columns = headers\n self.data_frame = df1.join([df2, df3])\n #print(self.data_frame)\n\n def __eq__(self, table): # pragma: no cover\n self._eq_header(table)\n assert self.is_sort1 == table.is_sort1\n if not np.array_equal(self.element_parent_coord, table.element_parent_coord):\n assert self.element_parent_coord.shape == table.element_parent_coord.shape, 'element_parent_coord shape=%s table.shape=%s' % (\n self.element_parent_coord.shape, table.element_parent_coord.shape)\n msg = 'table_name=%r class_name=%s\\n' % (self.table_name, self.__class__.__name__)\n msg += '%s\\n' % str(self.code_information())\n msg += 'Eid, Parent, Coord\\n'\n for (eid1, parent1, coord1), (eid2, parent2, coord2) in zip(self.element_parent_coord, table.element_parent_coord):\n msg += '(%s, %s, %s) (%s, %s, %s)\\n' % (\n eid1, parent1, coord1, eid2, parent2, coord2)\n print(msg)\n raise ValueError(msg)\n\n if not np.array_equal(self.data, table.data):\n msg = 'table_name=%r class_name=%s\\n' % (self.table_name, self.__class__.__name__)\n msg += '%s\\n' % str(self.code_information())\n i = 0\n eids = self.element_node[:, 0]\n for itime in range(self.ntimes):\n for ie, e in enumerate(eids):\n eid = e\n t1 = self.data[itime, ie, :]\n t2 = table.data[itime, ie, :]\n (free_conv1, free_conv_k1) = t1\n (free_conv2, free_conv_k2) = t2\n\n if not np.array_equal(t1, t2):\n msg += (\n '%s (%s, %s) (%s, %s)\\n' % (\n eid,\n free_conv1, free_conv_k1,\n free_conv2, free_conv_k2,\n ))\n i += 1\n if i > 10:\n print(msg)\n raise ValueError(msg)\n #print(msg)\n if i > 0:\n raise ValueError(msg)\n return True\n\n def add_sort1(self, dt, eid, parent, coord, unused_icord, grad_fluxes):\n \"\"\"unvectorized method for adding SORT1 transient data\"\"\"\n assert isinstance(eid, integer_types) and eid > 0, 'dt=%s eid=%s' % (dt, eid)\n self._times[self.itime] = dt\n self.element_parent_coord[self.ielement, :] = [eid, parent, coord]\n for grad_flux in grad_fluxes:\n self.vugrid[self.itime, self.itotal, :] = grad_flux[0]\n self.data[self.itime, self.itotal, :] = grad_flux[1:]\n self.itotal += 1\n self.ielement += 1\n\n def get_stats(self, short=False) -> List[str]:\n if not self.is_built:\n return [\n '<%s>\\n' % self.__class__.__name__,\n ' ntimes: %i\\n' % self.ntimes,\n ' ntotal: %i\\n' % self.ntotal,\n ]\n\n nelements = self.nelements\n ntimes = self.ntimes\n #ntotal = self.ntotal\n\n msg = []\n if self.nonlinear_factor not in (None, np.nan): # transient\n msg.append(' type=%s ntimes=%i nelements=%i\\n'\n % (self.__class__.__name__, ntimes, nelements))\n ntimes_word = 'ntimes'\n else:\n msg.append(' type=%s nelements=%i\\n'\n % (self.__class__.__name__, nelements))\n ntimes_word = '1'\n headers = self.get_headers()\n n = len(headers)\n msg.append(' data: [%s, nelements, %i] where %i=[%s]\\n' % (\n ntimes_word, n, n, str(', '.join(headers))))\n msg.append(' data.shape = %s\\n' % str(self.data.shape).replace('L', ''))\n msg.append(' element type: %s\\n' % self.element_type)\n msg.append(' element name: %s\\n' % self.element_name)\n msg += self.get_data_code()\n return msg\n\n def write_f06(self, f06_file, header=None, page_stamp='PAGE %s',\n page_num=1, is_mag_phase=False, is_sort1=True):\n if header is None:\n header = []\n #vubeam\n msg_temp = [\n ' T E M P E R A T U R E G R A D I E N T S A N D F L U X E S I N B E A M P - E L E M E N T S\\n'\n ' VU-ELEMENT ID= 100005001, P-ELEMENT ID = 5, OUTPUT COORD. ID= (LOCAL), P OF EDGES = 2\\n'\n '\\n'\n ' VUGRID X-GRADIENT Y-GRADIENT Z-GRADIENT X-FLUX Y-FLUX Z-FLUX \\n'\n #' 111005001 -2.000000E+01 0.000000E+00 0.000000E+00 4.080000E+03 0.000000E+00 0.000000E+00\\n'\n ]\n\n #(elem_name, msg_temp) = self.get_f06_header(is_mag_phase=is_mag_phase, is_sort1=is_sort1)\n #(ntimes, ntotal, two) = self.data.shape\n ntimes = self.data.shape[0]\n\n #eids = self.element\n for itime in range(ntimes):\n dt = self._times[itime] # TODO: rename this...\n header = _eigenvalue_header(self, header, itime, ntimes, dt)\n f06_file.write(''.join(header + msg_temp))\n\n vugrids = self.vugrid[itime, :, 0]\n # [xgrad, ygrad, zgrad, xflux, yflux, zflux]\n xgrad = self.data[itime, :, 0]\n ygrad = self.data[itime, :, 1]\n zgrad = self.data[itime, :, 2]\n xflux = self.data[itime, :, 3]\n yflux = self.data[itime, :, 4]\n zflux = self.data[itime, :, 5]\n\n for (nid, xgradi, ygradi, zgradi, xfluxi, yfluxi, zfluxi) in zip(\n vugrids, xgrad, ygrad, zgrad, xflux, yflux, zflux):\n vals2 = write_floats_13e([xgradi, ygradi, zgradi, xfluxi, yfluxi, zfluxi])\n [sxgradi, sygradi, szgradi, sxfluxi, syfluxi, szfluxi] = vals2\n f06_file.write(' %10i %13s %13s %13s %13s %13s %s\\n' % (\n nid, sxgradi, sygradi, szgradi, sxfluxi, syfluxi, szfluxi))\n f06_file.write(page_stamp % page_num)\n page_num += 1\n return page_num - 1\n\n\nclass RealHeatFlux_2D_3DArray(RealElementTableArray):\n def __init__(self, data_code, is_sort1, isubcase, dt):\n RealElementTableArray.__init__(self, data_code, is_sort1, isubcase, dt)\n\n def build_dataframe(self):\n \"\"\"creates a pandas dataframe\"\"\"\n import pandas as pd\n headers = self.get_headers()\n\n #nelements = self.element.shape[0]# // 2\n if self.nonlinear_factor not in (None, np.nan):\n #Time 0.0 10.0\n #ElementID Item\n #1 grad1 0.0 -1.734723e-18\n # grad2 0.0 -1.301043e-18\n # grad3 0.0 1.951564e-18\n # flux1 -0.0 3.538836e-16\n # flux2 -0.0 2.654127e-16\n # flux3 -0.0 -3.981190e-16\n column_names, column_values = self._build_dataframe_transient_header()\n data_frame = self._build_pandas_transient_elements(\n column_values, column_names,\n headers, self.element, self.data)\n else:\n df1 = pd.DataFrame(self.element)\n df1.columns = ['ElementID']\n df2 = pd.DataFrame(self.data[0])\n df2.columns = headers\n data_frame = df1.join(df2)\n #print(self.data_frame)\n self.data_frame = data_frame\n\n def write_f06(self, f06_file, header=None, page_stamp='PAGE %s',\n page_num=1, is_mag_phase=False, is_sort1=True):\n if header is None:\n header = []\n words = [\n ' F I N I T E E L E M E N T T E M P E R A T U R E G R A D I E N T S A N D F L U X E S \\n \\n',\n ' ELEMENT-ID EL-TYPE X-GRADIENT Y-GRADIENT Z-GRADIENT X-FLUX Y-FLUX Z-FLUX\\n']\n #' \\n',\n #' POINT ID. TYPE T1 T2 T3 R1 R2 R3\\n']\n #words += self.get_table_marker()\n if self.nonlinear_factor not in (None, np.nan):\n return self._write_f06_transient_block(words, header, page_stamp, page_num, f06_file,\n is_mag_phase=is_mag_phase, is_sort1=is_sort1)\n return self._write_f06_block(words, header, page_stamp, page_num, f06_file,\n is_mag_phase=is_mag_phase, is_sort1=is_sort1)\n\n def get_headers(self) -> List[str]:\n return ['grad1', 'grad2', 'grad3', 'flux1', 'flux2', 'flux3']\n\n\nclass RealConvHeatFluxArray(BaseElement): # 107-CHBDYE 108-CHBDYG 109-CHBDYP\n def __init__(self, data_code, is_sort1, isubcase, dt):\n self.element_type = None\n self.element_name = None\n BaseElement.__init__(self, data_code, isubcase)\n #self.code = [self.format_code, self.sort_code, self.s_code]\n\n #self.ntimes = 0 # or frequency/mode\n #self.ntotal = 0\n self.nelements = 0 # result specific\n self.ielement = 0\n self.itotal = 0\n\n if not is_sort1:\n raise NotImplementedError('SORT2')\n\n @property\n def is_real(self) -> bool:\n \"\"\"is the result real?\"\"\"\n return True\n\n @property\n def is_complex(self) -> bool:\n \"\"\"is the result complex?\"\"\"\n return False\n\n def _reset_indices(self):\n self.itotal = 0\n self.ielement = 0\n\n def get_headers(self) -> List[str]:\n headers = [\n 'free_conv', 'free_conv_k',\n ]\n return headers\n\n def build(self):\n \"\"\"sizes the vectorized attributes of the RealConvHeatFluxArray\"\"\"\n #print('ntimes=%s nelements=%s ntotal=%s' % (self.ntimes, self.nelements, self.ntotal))\n assert self.ntimes > 0, 'ntimes=%s' % self.ntimes\n assert self.nelements > 0, 'nelements=%s' % self.nelements\n assert self.ntotal > 0, 'ntotal=%s' % self.ntotal\n #self.names = []\n self.nelements //= self.ntimes\n self.itime = 0\n self.ielement = 0\n self.itotal = 0\n #self.ntimes = 0\n #self.nelements = 0\n self.is_built = True\n\n #print(\"ntimes=%s nelements=%s ntotal=%s\" % (self.ntimes, self.nelements, self.ntotal))\n dtype = 'float32'\n if isinstance(self.nonlinear_factor, integer_types):\n dtype = 'int32'\n self._times = np.zeros(self.ntimes, dtype=dtype)\n self.element_node = np.zeros((self.nelements, 2), dtype='int32')\n\n #[free_conv, free_conv_k]\n self.data = np.zeros((self.ntimes, self.ntotal, 2), dtype='float32')\n\n def build_dataframe(self):\n \"\"\"creates a pandas dataframe\"\"\"\n import pandas as pd\n # TODO: fix me\n headers = self.get_headers()\n #assert 0 not in self.element\n element_node = [\n self.element_node[:, 0],\n self.element_node[:, 1],\n ]\n if self.nonlinear_factor not in (None, np.nan):\n column_names, column_values = self._build_dataframe_transient_header()\n #data_frame = self._build_pandas_transient_elements(\n #column_values, column_names,\n #headers, self.element, self.data)\n #print(data_frame)\n #asdf\n data_frame = pd.Panel(self.data, items=column_values,\n major_axis=element_node,\n minor_axis=headers).to_frame()\n data_frame.columns.names = column_names\n else:\n # >=25.0\n #Static free_conv free_conv_k\n #ElementID NodeID\n #1 0 -0.166667 10.0\n #2 0 -0.166667 10.0\n #3 0 -0.166667 10.0\n #4 0 -0.166667 10.0\n #5 0 -0.166667 10.0\n #6 0 -0.166667 10.0\n # <v24.2\n #Static 0\n #ElementID Node Item\n #1 0 free_conv -0.166667\n # free_conv_k 10.000000\n #2 0 free_conv -0.166667\n # free_conv_k 10.000000\n index = pd.MultiIndex.from_arrays(self.element_node.T, names=['ElementID', 'NodeID'])\n data_frame = pd.DataFrame(self.data[0], columns=headers, index=index)\n data_frame.columns.names = ['Static']\n #data_frame = pd.Panel(self.data,\n #major_axis=element_node,\n #minor_axis=headers).to_frame()\n #data_frame.columns.names = ['Static']\n #data_frame.index.names = ['ElementID', 'Node', 'Item']\n self.data_frame = data_frame\n\n def __eq__(self, table): # pragma: no cover\n self._eq_header(table)\n assert self.is_sort1 == table.is_sort1\n if not np.array_equal(self.data, table.data):\n msg = 'table_name=%r class_name=%s\\n' % (self.table_name, self.__class__.__name__)\n msg += '%s\\n' % str(self.code_information())\n i = 0\n eids = self.element_node[:, 0]\n for itime in range(self.ntimes):\n for ie, e in enumerate(eids):\n eid = e\n t1 = self.data[itime, ie, :]\n t2 = table.data[itime, ie, :]\n (free_conv1, free_conv_k1) = t1\n (free_conv2, free_conv_k2) = t2\n\n if not np.array_equal(t1, t2):\n msg += (\n '%s (%s, %s) (%s, %s)\\n' % (\n eid,\n free_conv1, free_conv_k1,\n free_conv2, free_conv_k2,\n ))\n i += 1\n if i > 10:\n print(msg)\n raise ValueError(msg)\n #print(msg)\n if i > 0:\n raise ValueError(msg)\n return True\n\n def add_sort1(self, dt, eid, cntl_node, free_conv, free_conv_k):\n \"\"\"unvectorized method for adding SORT1 transient data\"\"\"\n assert isinstance(eid, integer_types) and eid > 0, 'dt=%s eid=%s' % (dt, eid)\n self._times[self.itime] = dt\n self.element_node[self.ielement, :] = [eid, cntl_node]\n self.data[self.itime, self.ielement, :] = [free_conv, free_conv_k]\n self.ielement += 1\n\n def get_stats(self, short=False) -> List[str]:\n if not self.is_built:\n return [\n '<%s>\\n' % self.__class__.__name__,\n ' ntimes: %i\\n' % self.ntimes,\n ' ntotal: %i\\n' % self.ntotal,\n ]\n\n nelements = self.nelements\n ntimes = self.ntimes\n #ntotal = self.ntotal\n\n msg = []\n if self.nonlinear_factor not in (None, np.nan): # transient\n msg.append(' type=%s ntimes=%i nelements=%i\\n'\n % (self.__class__.__name__, ntimes, nelements))\n ntimes_word = 'ntimes'\n else:\n msg.append(' type=%s nelements=%i\\n'\n % (self.__class__.__name__, nelements))\n ntimes_word = '1'\n headers = self.get_headers()\n n = len(headers)\n msg.append(' data: [%s, nelements, %i] where %i=[%s]\\n' % (\n ntimes_word, n, n, str(', '.join(headers))))\n msg.append(' data.shape = %s\\n' % str(self.data.shape).replace('L', ''))\n msg.append(' element type: %s\\n' % self.element_type)\n msg.append(' element name: %s\\n' % self.element_name)\n msg += self.get_data_code()\n return msg\n\n def write_f06(self, f06_file, header=None, page_stamp='PAGE %s',\n page_num=1, is_mag_phase=False, is_sort1=True):\n if header is None:\n header = []\n msg_temp = [\n #' F I N I T E E L E M E N T T E M P E R A T U R E G R A D I E N T S A N D F L U X E S '\n #' '\n #' ELEMENT-ID EL-TYPE X-GRADIENT Y-GRADIENT Z-GRADIENT X-FLUX Y-FLUX Z-FLUX'\n #' 1 QUAD4 -8.372393E-01 1.776357E-15 8.372393E-01 -1.776357E-15'\n ' RealConvHeatFluxArray\\n'\n ' ELEMENT-ID FREE-CONVECTION CONTROL-NODE FREE-CONVECTION-K\\n'\n ]\n ntimes = self.data.shape[0]\n\n eids = self.element_node[:, 0]\n nids = self.element_node[:, 1]\n for itime in range(ntimes):\n dt = self._times[itime] # TODO: rename this...\n header = _eigenvalue_header(self, header, itime, ntimes, dt)\n f06_file.write(''.join(header + msg_temp))\n\n # [free_conv, free_conv_k]\n free_conv = self.data[itime, :, 0]\n free_conv_k = self.data[itime, :, 1]\n\n for (eid, nid, free_convi, free_conv_ki) in zip(eids, nids, free_conv, free_conv_k):\n f06_file.write(' %8i %-13s %-13s %s\\n' % (\n eid,\n write_float_13e(free_convi),\n nid,\n write_float_13e(free_conv_ki)\n ))\n f06_file.write(page_stamp % page_num)\n page_num += 1\n return page_num - 1\n\n\nclass RealChbdyHeatFluxArray(BaseElement): # 107-CHBDYE 108-CHBDYG 109-CHBDYP\n def __init__(self, data_code, is_sort1, isubcase, dt):\n self.element_type = None\n self.element_name = None\n BaseElement.__init__(self, data_code, isubcase)\n #self.code = [self.format_code, self.sort_code, self.s_code]\n\n #self.ntimes = 0 # or frequency/mode\n #self.ntotal = 0\n self.nelements = 0 # result specific\n self.ielement = 0\n self.itotal = 0\n\n if not is_sort1:\n raise NotImplementedError('SORT2')\n\n @property\n def is_real(self) -> bool:\n \"\"\"is the result real?\"\"\"\n return True\n\n @property\n def is_complex(self) -> bool:\n \"\"\"is the result complex?\"\"\"\n return False\n\n @property\n def nnodes_per_element(self) -> int:\n return 1\n\n def _reset_indices(self):\n self.itotal = 0\n self.ielement = 0\n\n def get_headers(self) -> List[str]:\n headers = [\n 'fapplied', 'free_conv', 'force_conv', 'frad', 'ftotal',\n ]\n return headers\n\n def build(self):\n \"\"\"sizes the vectorized attributes of the RealChbdyHeatFluxArray\"\"\"\n #print('ntimes=%s nelements=%s ntotal=%s' % (self.ntimes, self.nelements, self.ntotal))\n if self.is_built:\n return\n\n assert self.ntimes > 0, 'ntimes=%s' % self.ntimes\n assert self.nelements > 0, 'nelements=%s' % self.nelements\n assert self.ntotal > 0, 'ntotal=%s' % self.ntotal\n #self.names = []\n self.nelements //= self.ntimes\n self.itime = 0\n self.ielement = 0\n self.itotal = 0\n #self.ntimes = 0\n #self.nelements = 0\n self.is_built = True\n\n #print(\"ntimes=%s nelements=%s ntotal=%s\" % (self.ntimes, self.nelements, self.ntotal))\n dtype = 'float32'\n if isinstance(self.nonlinear_factor, integer_types):\n dtype = 'int32'\n self._times = np.zeros(self.ntimes, dtype=dtype)\n self.element = np.zeros(self.nelements, dtype='int32')\n self.element_type = np.empty(self.nelements, dtype='|U8')\n\n #[fapplied, free_conv, force_conv, frad, ftotal]\n self.data = np.zeros((self.ntimes, self.ntotal, 5), dtype='float32')\n\n def build_dataframe(self):\n \"\"\"creates a pandas dataframe\"\"\"\n import pandas as pd\n headers = self.get_headers()\n assert 0 not in self.element\n if self.nonlinear_factor not in (None, np.nan):\n #Time 0.0 10.0\n #ElementID Item\n #10 fapplied 0.0 0.000000\n # free_conv 0.0 499.376068\n # force_conv 0.0 0.000000\n # frad 0.0 0.000000\n # ftotal 0.0 499.376068\n #20 fapplied 0.0 0.000000\n column_names, column_values = self._build_dataframe_transient_header()\n data_frame = self._build_pandas_transient_elements(\n column_values, column_names,\n headers, self.element, self.data)\n else:\n # >=25.0\n #Static fapplied free_conv force_conv frad ftotal\n #ElementID\n #1 0.166667 -0.166667 0.0 0.0 0.0\n #2 0.166667 -0.166667 0.0 0.0 0.0\n #3 0.166667 -0.166667 0.0 0.0 0.0\n #4 0.166667 -0.166667 0.0 0.0 0.0\n #5 0.166667 -0.166667 0.0 0.0 0.0\n #6 0.166667 -0.166667 0.0 0.0 0.0\n #\n # <=24.2\n #Static 0\n #ElementID Item\n #1 fapplied 0.166667\n # free_conv -0.166667\n # force_conv 0.000000\n # frad 0.000000\n # ftotal 0.000000\n data_frame = pd.DataFrame(self.data[0], columns=headers, index=self.element)\n data_frame.index.name = 'ElementID'\n data_frame.columns.names = ['Static']\n #data_frame = pd.Panel(self.data, major_axis=self.element, minor_axis=headers).to_frame()\n #data_frame.columns.names = ['Static']\n #data_frame.index.names = ['ElementID', 'Item']\n self.data_frame = data_frame\n\n def __eq__(self, table): # pragma: no cover\n self._eq_header(table)\n assert self.is_sort1 == table.is_sort1\n if not np.array_equal(self.data, table.data):\n msg = 'table_name=%r class_name=%s\\n' % (self.table_name, self.__class__.__name__)\n msg += '%s\\n' % str(self.code_information())\n i = 0\n for itime in range(self.ntimes):\n for ie, e in enumerate(self.element):\n eid = e\n t1 = self.data[itime, ie, :]\n t2 = table.data[itime, ie, :]\n (fapplied1, free_conv1, force_conv1, frad1, ftotal1) = t1\n (fapplied2, free_conv2, force_conv2, frad2, ftotal2) = t2\n\n if not np.array_equal(t1, t2):\n msg += (\n '%s (%s, %s, %s, %s, %s)\\n'\n ' (%s, %s, %s, %s, %s)\\n' % (\n eid,\n fapplied1, free_conv1, force_conv1, frad1, ftotal1,\n fapplied2, free_conv2, force_conv2, frad2, ftotal2,\n ))\n i += 1\n if i > 10:\n print(msg)\n raise ValueError(msg)\n #print(msg)\n if i > 0:\n raise ValueError(msg)\n return True\n\n def add_sort1(self, dt, eid, etype, fapplied, free_conv, force_conv, frad, ftotal):\n \"\"\"unvectorized method for adding SORT1 transient data\"\"\"\n assert isinstance(eid, integer_types) and eid > 0, 'dt=%s eid=%s' % (dt, eid)\n self._times[self.itime] = dt\n self.element[self.ielement] = eid\n self.element_type[self.ielement] = etype\n self.data[self.itime, self.ielement, :] = [fapplied, free_conv, force_conv, frad, ftotal]\n self.ielement += 1\n\n def get_stats(self, short=False) -> List[str]:\n if not self.is_built:\n return [\n '<%s>\\n' % self.__class__.__name__,\n ' ntimes: %i\\n' % self.ntimes,\n ' ntotal: %i\\n' % self.ntotal,\n ]\n\n nelements = self.nelements\n ntimes = self.ntimes\n #ntotal = self.ntotal\n\n msg = []\n if self.nonlinear_factor not in (None, np.nan): # transient\n msg.append(' type=%s ntimes=%i nelements=%i\\n'\n % (self.__class__.__name__, ntimes, nelements))\n ntimes_word = 'ntimes'\n else:\n msg.append(' type=%s nelements=%i\\n'\n % (self.__class__.__name__, nelements))\n ntimes_word = '1'\n headers = self.get_headers()\n n = len(headers)\n msg.append(' data: [%s, nelements, %i] where %i=[%s]\\n' % (ntimes_word, n, n, str(', '.join(headers))))\n msg.append(' data.shape = %s\\n' % str(self.data.shape).replace('L', ''))\n #msg.append(' element type: %s\\n' % self.element_type)\n #msg.append(' element name: %s\\n' % self.element_name)\n msg += self.get_data_code()\n return msg\n\n def write_f06(self, f06_file, header=None, page_stamp='PAGE %s', page_num=1, is_mag_phase=False, is_sort1=True):\n if header is None:\n header = []\n\n assert self.is_sort1 == True, self.is_sort1\n\n\n msg_temp = [\n ' H E A T F L O W I N T O H B D Y E L E M E N T S (CHBDY)\\n'\n ' \\n'\n ' ELEMENT-ID APPLIED-LOAD FREE-CONVECTION FORCED-CONVECTION RADIATION TOTAL\\n'\n #' 60 0.000000E+00 1.641941E+02 0.000000E+00 0.000000E+00 1.641941E+02'\n ]\n\n #(elem_name, msg_temp) = self.get_f06_header(is_mag_phase=is_mag_phase, is_sort1=is_sort1)\n #(ntimes, ntotal, two) = self.data.shape\n ntimes = self.data.shape[0]\n\n eids = self.element\n for itime in range(ntimes):\n dt = self._times[itime] # TODO: rename this...\n header = _eigenvalue_header(self, header, itime, ntimes, dt)\n f06_file.write(''.join(header + msg_temp))\n\n # [fapplied, free_conv, force_conv, frad, ftotal]\n fapplied = self.data[itime, :, 0]\n free_conv = self.data[itime, :, 1]\n force_conv = self.data[itime, :, 2]\n frad = self.data[itime, :, 3]\n ftotal = self.data[itime, :, 4]\n\n for (eid, fappliedi, free_convi, force_convi, fradi, ftotali) in zip(\n eids, fapplied, free_conv, force_conv, frad, ftotal):\n #vals2 = write_floats_13e(\n #[fappliedi, free_convi, force_convi, fradi, ftotali])\n #[sfapplied, sfree_conv, sforce_conv, sfrad, sftotal] = vals2\n\n f06_file.write(' %8i %13E %13E %13E %13E %13E\\n' % (\n eid, fappliedi, free_convi, force_convi, fradi, ftotali))\n f06_file.write(page_stamp % page_num)\n page_num += 1\n return page_num - 1\n\nclass RealHeatFluxVUShellArray(BaseElement):\n def __init__(self, data_code, is_sort1, isubcase, dt):\n self.nonlinear_factor = np.nan\n self.table_name = None\n self.approach_code = None\n self.analysis_code = None\n BaseElement.__init__(self, data_code, isubcase, apply_data_code=True) # no double inheritance\n unused_sort1 = self.is_sort1\n #self.dt = dt\n #self.code = [self.format_code, self.sort_code, self.s_code]\n\n #self.ntimes = 0 # or frequency/mode\n self.ntotal = 0\n self.nelements = 0 # result specific\n\n @property\n def is_real(self) -> bool:\n \"\"\"is the result real?\"\"\"\n return True\n\n @property\n def is_complex(self) -> bool:\n \"\"\"is the result complex?\"\"\"\n return False\n\n def data_type(self):\n return 'float32'\n\n def get_stats(self, short=False) -> List[str]:\n if not self.is_built:\n return [\n '<%s>\\n' % self.__class__.__name__,\n ' ntimes: %i\\n' % self.ntimes,\n ' ntotal: %i\\n' % self.ntotal,\n ]\n #ngrids = len(self.gridTypes)\n msg = []\n\n unused_ntimesi, ntotal = self.data.shape[:2]\n ntimes = len(self._times)\n nelements = self.element.shape[0]\n\n nmajor = self.ntimes\n nminor = self.ntotal\n if self.is_sort1:\n assert nmajor == ntimes, 'ntimes=%s expected=%s' % (nmajor, ntimes)\n assert nminor == ntotal, 'ntotal=%s expected=%s' % (nminor, nelements)\n else:\n assert nmajor == nelements, 'nelements=%s expected=%s' % (nmajor, nelements)\n assert nminor == ntotal, 'ntotal=%s expected=%s' % (nminor, ntimes)\n\n msg.append(' isubcase = %s\\n' % self.isubcase)\n if self.nonlinear_factor not in (None, np.nan): # transient\n msg.append(' type=%s ntimes=%s nelements=%s\\n'\n % (self.__class__.__name__, ntimes, nelements))\n else:\n msg.append(' type=%s nelements=%s\\n'\n % (self.__class__.__name__, nelements))\n headers = ', '.join(self._get_headers())\n #msg.append(' data: [%s] shape=%s dtype=%s\\n'\n #% (headers, [int(i) for i in self.data.shape], self.data.dtype))\n msg.append(' data: [%s] shape=%s dtype=%s\\n'\n % (headers,\n [int(i) for i in self.data.shape], self.data.dtype))\n msg += self.get_data_code()\n return msg\n\n @property\n def headers(self):\n return ['xgrad', 'ygrad', 'zgrad', 'xflux', 'yflux', 'zflux']\n\n def _get_headers(self):\n return self.headers\n\n def _reset_indices(self):\n self.itotal = 0\n self.ielement = 0\n\n def build(self):\n \"\"\"sizes the vectorized attributes of the ElementTableArray\"\"\"\n #print('nelements=%s ntimes=%s sort1?=%s ntotal=%s -> _nelements=%s' % (\n #self.nelements, self.ntimes, self.is_sort1,\n #self.ntotal, self.nelements))\n\n self.nelements //= self.ntimes\n self.itime = 0\n self.itotal = 0\n self.is_built = True\n\n if self.is_sort1:\n ntimes = self.ntimes\n nelements = self.ntotal\n nx = ntimes\n ny = self.ntotal\n #print(\"ntimes=%s nelements=%s\" % (ntimes, nelements))\n if self.is_sort2:\n #unused_ntotal = self.ntotal\n nelements = self.ntimes\n ntimes = self.ntotal\n nx = nelements\n ny = ntimes\n #print(\"ntotal=%s nelements=%s ntimes=%s\" % (ntotal, nelements, ntimes))\n\n self._times = np.zeros(ntimes, dtype=self._times_dtype)\n #self.types = array(self.nelements, dtype='|S1')\n\n self.element = np.zeros(nelements, dtype='int32')\n self.element_parent_coord_icord = np.zeros((nelements, 4), dtype='int32')\n #self.element_data_type = empty(nelements, dtype='|U8')\n\n #[xgrad, ygrad, zgrad, xflux, yflux, zflux]\n self.data = np.zeros((nx, ny, 6), self.data_type())\n\n def __eq__(self, table): # pragma: no cover\n assert self.is_sort1 == table.is_sort1\n is_nan = (self.nonlinear_factor is not None and\n np.isnan(self.nonlinear_factor) and\n np.isnan(table.nonlinear_factor))\n if not is_nan:\n assert self.nonlinear_factor == table.nonlinear_factor\n assert self.ntotal == table.ntotal\n assert self.table_name == table.table_name, 'table_name=%r table.table_name=%r' % (self.table_name, table.table_name)\n assert self.approach_code == table.approach_code\n if not is_nan:\n assert np.array_equal(self._times, table._times), 'ename=%s-%s times=%s table.times=%s' % (\n self.element_name, self.element_type, self._times, table._times)\n\n if not np.array_equal(self.element, table.element):\n assert self.element.shape == table.element.shape, 'shape=%s table.shape=%s' % (self.element.shape, table.element.shape)\n msg = 'table_name=%r class_name=%s\\n' % (self.table_name, self.__class__.__name__)\n msg += '%s\\n' % str(self.code_information())\n msg += 'eid:'\n for (eid, eid2) in zip(self.element, table.element):\n msg += '%s, %s\\n' % (eid, eid2)\n print(msg)\n raise ValueError(msg)\n\n if not np.array_equal(self.element_parent_coord_icord, table.element_parent_coord_icord):\n assert self.element_parent_coord_icord.shape == table.element_parent_coord_icord.shape, 'shape=%s table.shape=%s' % (self.element.shape, table.element.shape)\n msg = 'table_name=%r class_name=%s\\n' % (self.table_name, self.__class__.__name__)\n msg += '%s\\n' % str(self.code_information())\n msg += 'element_parent_coord_icord:'\n for (epci1, epci2) in zip(self.element_parent_coord_icord, table.element_parent_coord_icord):\n msg += '%s, %s\\n' % (epci1, epci2)\n print(msg)\n raise ValueError(msg)\n\n if not np.array_equal(self.data, table.data):\n msg = 'table_name=%r class_name=%s\\n' % (self.table_name, self.__class__.__name__)\n msg += '%s\\n' % str(self.code_information())\n ntimes = self.data.shape[0]\n\n i = 0\n if self.is_sort1:\n for itime in range(ntimes):\n for ieid, eid in enumerate(self.element):\n t1 = self.data[itime, ieid, :]\n t2 = table.data[itime, ieid, :]\n (tx1, ty1, tz1, rx1, ry1, rz1) = t1\n (tx2, ty2, tz2, rx2, ry2, rz2) = t2\n if not np.allclose(t1, t2):\n #if not np.array_equal(t1, t2):\n msg += '%s\\n (%s, %s, %s, %s, %s, %s)\\n (%s, %s, %s, %s, %s, %s)\\n' % (\n eid,\n tx1, ty1, tz1, rx1, ry1, rz1,\n tx2, ty2, tz2, rx2, ry2, rz2)\n i += 1\n if i > 10:\n print(msg)\n raise ValueError(msg)\n else:\n raise NotImplementedError(self.is_sort2)\n if i > 0:\n print(msg)\n raise ValueError(msg)\n return True\n\n def add_sort1(self, dt, eid, parent, coord, unused_icord, unused_theta,\n xgrad, ygrad, zgrad, xflux, yflux, zflux):\n \"\"\"unvectorized method for adding SORT1 transient data\"\"\"\n assert isinstance(eid, integer_types) and eid > 0, 'dt=%s eid=%s' % (dt, eid)\n # itotal - the node number\n # itime - the time/frequency step\n\n # the times/freqs\n self._times[self.itime] = dt\n self.element[self.itotal] = eid\n #print(eid, parent, coord, icord)\n # icord is a string?\n self.element_parent_coord_icord[self.itotal] = [eid, parent, coord, 0]\n #self.element_data_type[self.itotal] = etype\n self.data[self.itime, self.itotal, :] = [xgrad, ygrad, zgrad, xflux, yflux, zflux]\n self.itotal += 1\n\n #def write_f06(self, f06_file, header=None, page_stamp='PAGE %s',\n #page_num=1, is_mag_phase=False, is_sort1=True):\n #pass\n"
] | [
[
"numpy.hstack",
"numpy.unique"
],
[
"scipy.sparse.coo_matrix",
"numpy.unique",
"numpy.asarray",
"numpy.argsort",
"numpy.array",
"numpy.zeros",
"numpy.where"
],
[
"numpy.array"
],
[
"numpy.allclose",
"numpy.array_equal",
"numpy.isfinite",
"pandas.DataFrame",
"numpy.searchsorted",
"numpy.zeros"
],
[
"numpy.allclose",
"numpy.array_equal",
"pandas.Panel",
"numpy.searchsorted",
"numpy.zeros"
],
[
"numpy.allclose",
"numpy.array_equal",
"numpy.isnan",
"pandas.Panel",
"numpy.vstack",
"pandas.MultiIndex.from_arrays",
"pandas.DataFrame",
"numpy.zeros",
"numpy.empty"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.7",
"1.0",
"0.10",
"1.2",
"0.14",
"0.19",
"1.5",
"0.12",
"0.17",
"0.13",
"1.6",
"1.4",
"1.9",
"1.3",
"1.10",
"0.15",
"0.18",
"0.16",
"1.8"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"0.19",
"0.24",
"0.20"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"0.19",
"0.24",
"0.20"
],
"scipy": [],
"tensorflow": []
}
] |
hengwei-chan/molecular_attention_transformer | [
"29193d4155df528e3a6a0c1e0da39111d0b8db93"
] | [
"soltrannet/__init__.py"
] | [
"from .predict import predict \nimport argparse\nimport sys, multiprocessing\nimport torch\n\ndef _parse_args():\n parser=argparse.ArgumentParser(description=\"Run SolTranNet aqueous solubility predictor\")\n parser.add_argument('input',nargs='?',type=argparse.FileType('r'),default=sys.stdin,help='PATH to the file containing the SMILES you wish to use. Assumes the content is 1 SMILE per line.')\n parser.add_argument('output',nargs='?',type=argparse.FileType('w'),default=sys.stdout,help='Name of the output file. Defaults to stdout.')\n parser.add_argument('--batchsize',default=32,type=int,help='Batch size for the data loader. Defaults to 32.')\n parser.add_argument('--cpus',default=multiprocessing.cpu_count(),type=int,help='Number of CPU cores to use for the data loader. Defaults to use all available cores. Pass 0 to only run on 1 CPU.')\n parser.add_argument('--cpu_predict',action='store_true',help='Flag to force the predictions to be made on only the CPU. Default behavior is to use GPU if available.')\n\n args=parser.parse_args()\n\n return args\n\ndef _run(args):\n\n smiles=[x.rstrip() for x in args.input]\n if args.cpu_predict:\n predictions=predict(smiles,batch_size=args.batchsize,num_workers=args.cpus,device=torch.device('cpu'))\n else:\n predictions=predict(smiles,batch_size=args.batchsize,num_workers=args.cpus)\n for pred, smi, warn in predictions:\n args.output.write(f'{smi},{pred:.3f},{warn}\\n')\n\n"
] | [
[
"torch.device"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
adibellathur/garage | [
"482a26a07d46091f878c41b582f1478588e397ff"
] | [
"src/garage/torch/algos/_utils.py"
] | [
"\"\"\"Utility functions used by PyTorch algorithms.\"\"\"\nimport torch\nimport torch.nn.functional as F\n\n\nclass _Default: # pylint: disable=too-few-public-methods\n \"\"\"A wrapper class to represent default arguments.\n\n Args:\n val (object): Argument value.\n\n \"\"\"\n\n def __init__(self, val):\n self.val = val\n\n\ndef make_optimizer(optimizer_type, module, **kwargs):\n \"\"\"Create an optimizer for PyTorch algos.\n\n Args:\n optimizer_type (Union[type, tuple[type, dict]]): Type of optimizer.\n This can be an optimizer type such as 'torch.optim.Adam' or a\n tuple of type and dictionary, where dictionary contains arguments\n to initialize the optimizer e.g. (torch.optim.Adam, {'lr' = 1e-3})\n module (torch.nn.Module): The module whose parameters needs to be\n optimized.\n kwargs (dict): Other keyword arguments to initialize optimizer. This\n is not used when `optimizer_type` is tuple.\n\n Returns:\n torch.optim.Optimizer: Constructed optimizer.\n\n Raises:\n ValueError: Raises value error when `optimizer_type` is tuple, and\n non-default argument is passed in `kwargs`.\n\n \"\"\"\n if isinstance(optimizer_type, tuple):\n opt_type, opt_args = optimizer_type\n for name, arg in kwargs.items():\n if not isinstance(arg, _Default):\n raise ValueError('Should not specify {} and explicit \\\n optimizer args at the same time'.format(name))\n return opt_type(module.parameters(), **opt_args)\n\n opt_args = {}\n for name, arg in kwargs.items():\n if isinstance(arg, _Default):\n opt_args[name] = arg.val\n else:\n opt_args[name] = arg\n return optimizer_type(module.parameters(), **opt_args)\n\n\ndef compute_advantages(discount, gae_lambda, max_path_length, baselines,\n rewards):\n \"\"\"Calculate advantages.\n\n Advantages are a discounted cumulative sum.\n\n Calculate advantages using a baseline (value function) according to\n Generalized Advantage Estimation (GAE)\n\n The discounted cumulative sum can be computed using conv2d with filter.\n filter:\n [1, (discount * gae_lambda), (discount * gae_lambda) ^ 2, ...]\n where the length is same with max_path_length.\n\n baselines and rewards are also has same shape.\n baselines:\n [ [b_11, b_12, b_13, ... b_1n],\n [b_21, b_22, b_23, ... b_2n],\n ...\n [b_m1, b_m2, b_m3, ... b_mn] ]\n rewards:\n [ [r_11, r_12, r_13, ... r_1n],\n [r_21, r_22, r_23, ... r_2n],\n ...\n [r_m1, r_m2, r_m3, ... r_mn] ]\n\n Args:\n discount (float): RL discount factor (i.e. gamma).\n gae_lambda (float): Lambda, as used for Generalized Advantage\n Estimation (GAE).\n max_path_length (int): Maximum length of a single rollout.\n baselines (torch.Tensor): A 2D vector of value function estimates with\n shape (N, T), where N is the batch dimension (number of episodes)\n and T is the maximum path length experienced by the agent. If an\n episode terminates in fewer than T time steps, the remaining\n elements in that episode should be set to 0.\n rewards (torch.Tensor): A 2D vector of per-step rewards with shape\n (N, T), where N is the batch dimension (number of episodes) and T\n is the maximum path length experienced by the agent. If an episode\n terminates in fewer than T time steps, the remaining elements in\n that episode should be set to 0.\n\n Returns:\n torch.Tensor: A 2D vector of calculated advantage values with shape\n (N, T), where N is the batch dimension (number of episodes) and T\n is the maximum path length experienced by the agent. If an episode\n terminates in fewer than T time steps, the remaining values in that\n episode should be set to 0.\n\n \"\"\"\n adv_filter = torch.full((1, 1, 1, max_path_length - 1),\n discount * gae_lambda)\n adv_filter = torch.cumprod(F.pad(adv_filter, (1, 0), value=1), dim=-1)\n\n deltas = (rewards + discount * F.pad(baselines, (0, 1))[:, 1:] - baselines)\n deltas = F.pad(deltas, (0, max_path_length - 1)).unsqueeze(0).unsqueeze(0)\n\n advantages = F.conv2d(deltas, adv_filter, stride=1).squeeze()\n return advantages\n\n\ndef pad_to_last(nums, total_length, axis=-1, val=0):\n \"\"\"Pad val to last in nums in given axis.\n\n length of the result in given axis should be total_length.\n\n Raises:\n IndexError: If the input axis value is out of range of the nums array\n\n Args:\n nums (numpy.ndarray): The array to pad.\n total_length (int): The final width of the Array.\n axis (int): Axis along which a sum is performed.\n val (int): The value to set the padded value.\n\n Returns:\n torch.Tensor: Padded array\n\n \"\"\"\n tensor = torch.Tensor(nums)\n axis = (axis + len(tensor.shape)) if axis < 0 else axis\n\n if len(tensor.shape) <= axis:\n raise IndexError('axis {} is out of range {}'.format(\n axis, tensor.shape))\n\n padding_config = [0, 0] * len(tensor.shape)\n padding_idx = abs(axis - len(tensor.shape)) * 2 - 1\n padding_config[padding_idx] = max(total_length - tensor.shape[axis], val)\n return F.pad(tensor, padding_config)\n\n\ndef filter_valids(tensor, valids):\n \"\"\"Filter out tensor using valids (last index of valid tensors).\n\n valids contains last indices of each rows.\n\n Args:\n tensor (torch.Tensor): The tensor to filter\n valids (list[int]): Array of length of the valid values\n\n Returns:\n torch.Tensor: Filtered Tensor\n\n \"\"\"\n return [tensor[i][:valids[i]] for i in range(len(valids))]\n"
] | [
[
"torch.nn.functional.pad",
"torch.Tensor",
"torch.nn.functional.conv2d",
"torch.full"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
amuamushu/wavedata | [
"1745c646ff3a76b38a81c439a0edd900c986c9f7"
] | [
"wavedata/tools/core/voxel_grid_2d.py"
] | [
"import numpy as np\n\nfrom wavedata.wavedata.tools.core import geometry_utils\n\n\nclass VoxelGrid2D(object):\n \"\"\"\n Voxel grids represent occupancy info. The voxelize_2d method projects a point cloud\n onto a plane, while saving height and point density information for each voxel.\n \"\"\"\n\n # Class Constants\n VOXEL_EMPTY = -1\n VOXEL_FILLED = 0\n\n def __init__(self):\n\n # Quantization size of the voxel grid\n self.voxel_size = 0.0\n\n # Voxels at the most negative/positive xyz\n self.min_voxel_coord = np.array([])\n self.max_voxel_coord = np.array([])\n\n # Size of the voxel grid along each axis\n self.num_divisions = np.array([0, 0, 0])\n\n # Points in sorted order, to match the order of the voxels\n self.points = []\n\n # Indices of filled voxels\n self.voxel_indices = []\n\n # Max point height in projected voxel\n self.heights = []\n\n # Number of points corresponding to projected voxel\n self.num_pts_in_voxel = []\n\n # Full occupancy grid, VOXEL_EMPTY or VOXEL_FILLED\n self.leaf_layout_2d = []\n\n def voxelize_2d(self, pts, voxel_size, extents=None,\n ground_plane=None, create_leaf_layout=True):\n \"\"\"Voxelizes the point cloud into a 2D voxel grid by\n projecting it down into a flat plane, and stores the maximum\n point height, and number of points corresponding to the voxel\n\n :param pts: Point cloud as N x [x, y, z]\n :param voxel_size: Quantization size for the grid\n :param extents: Optional, specifies the full extents of the point cloud.\n Used for creating same sized voxel grids.\n :param ground_plane: Plane coefficients (a, b, c, d), xz plane used if\n not specified\n :param create_leaf_layout: Set this to False to create an empty\n leaf_layout, which will save computation\n time.\n \"\"\"\n # Check if points are 3D, otherwise early exit\n if pts.shape[1] != 3:\n raise ValueError(\"Points have the wrong shape: {}\".format(\n pts.shape))\n\n self.voxel_size = voxel_size\n\n # Discretize voxel coordinates to given quantization size\n discrete_pts = np.floor(pts / voxel_size).astype(np.int32)\n\n # Use Lex Sort, sort by x, then z, then y (\n x_col = discrete_pts[:, 0]\n y_col = discrete_pts[:, 1]\n z_col = discrete_pts[:, 2]\n sorted_order = np.lexsort((y_col, z_col, x_col))\n\n # Save original points in sorted order\n self.points = pts[sorted_order]\n\n # Save discrete points in sorted order\n discrete_pts = discrete_pts[sorted_order]\n\n # Project all points to a 2D plane\n discrete_pts_2d = discrete_pts.copy()\n discrete_pts_2d[:, 1] = 0\n\n # Format the array to c-contiguous array for unique function\n contiguous_array = np.ascontiguousarray(discrete_pts_2d).view(\n np.dtype((np.void, discrete_pts_2d.dtype.itemsize *\n discrete_pts_2d.shape[1])))\n\n # The new coordinates are the discretized array with its unique indexes\n _, unique_indices = np.unique(contiguous_array, return_index=True)\n\n # Sort unique indices to preserve order\n unique_indices.sort()\n\n voxel_coords = discrete_pts_2d[unique_indices]\n\n # Number of points per voxel, last voxel calculated separately\n num_points_in_voxel = np.diff(unique_indices)\n num_points_in_voxel = np.append(num_points_in_voxel,\n discrete_pts_2d.shape[0] -\n unique_indices[-1])\n\n if ground_plane is None:\n # Use first point in voxel as highest point\n height_in_voxel = self.points[unique_indices, 1]\n else:\n # Ground plane provided\n height_in_voxel = geometry_utils.dist_to_plane(\n ground_plane, self.points[unique_indices])\n\n # Set the height and number of points for each voxel\n self.heights = height_in_voxel\n self.num_pts_in_voxel = num_points_in_voxel\n\n # Find the minimum and maximum voxel coordinates\n if extents is not None:\n # Check provided extents\n extents_transpose = np.array(extents).transpose()\n if extents_transpose.shape != (2, 3):\n raise ValueError(\"Extents are the wrong shape {}\".format(\n extents.shape))\n\n # Set voxel grid extents\n self.min_voxel_coord = np.floor(extents_transpose[0] / voxel_size)\n self.max_voxel_coord = \\\n np.ceil((extents_transpose[1] / voxel_size) - 1)\n\n self.min_voxel_coord[1] = 0\n self.max_voxel_coord[1] = 0\n\n # Check that points are bounded by new extents\n if not (self.min_voxel_coord <= np.amin(voxel_coords,\n axis=0)).all():\n raise ValueError(\"Extents are smaller than min_voxel_coord\")\n if not (self.max_voxel_coord >= np.amax(voxel_coords,\n axis=0)).all():\n raise ValueError(\"Extents are smaller than max_voxel_coord\")\n\n else:\n # Automatically calculate extents\n self.min_voxel_coord = np.amin(voxel_coords, axis=0)\n self.max_voxel_coord = np.amax(voxel_coords, axis=0)\n\n # Get the voxel grid dimensions\n self.num_divisions = ((self.max_voxel_coord - self.min_voxel_coord)\n + 1).astype(np.int32)\n\n # Bring the min voxel to the origin\n self.voxel_indices = (voxel_coords - self.min_voxel_coord).astype(int)\n\n if create_leaf_layout:\n # Create Voxel Object with -1 as empty/occluded, 0 as occupied\n self.leaf_layout_2d = self.VOXEL_EMPTY * \\\n np.ones(self.num_divisions.astype(int))\n\n # Fill out the leaf layout\n self.leaf_layout_2d[self.voxel_indices[:, 0], 0,\n self.voxel_indices[:, 2]] = \\\n self.VOXEL_FILLED\n\n def map_to_index(self, map_index):\n \"\"\"Converts map coordinate values to 1-based discretized grid index\n coordinate. Note: Any values outside the extent of the grid will be\n forced to be the maximum grid coordinate.\n\n :param map_index: N x 2 points\n\n :return: N x length(dim) (grid coordinate)\n [] if min_voxel_coord or voxel_size or grid_index or dim is not set\n \"\"\"\n if self.voxel_size == 0 \\\n or len(self.min_voxel_coord) == 0 \\\n or len(map_index) == 0:\n return []\n\n num_divisions_2d = self.num_divisions[[0, 2]]\n min_voxel_coord_2d = self.min_voxel_coord[[0, 2]]\n\n # Truncate index (same as np.floor for positive values) and clip\n # to valid voxel index range\n indices = np.int32(map_index / self.voxel_size) - min_voxel_coord_2d\n indices[:, 0] = np.clip(indices[:, 0], 0, num_divisions_2d[0])\n indices[:, 1] = np.clip(indices[:, 1], 0, num_divisions_2d[1])\n\n return indices\n"
] | [
[
"numpy.amax",
"numpy.unique",
"numpy.clip",
"numpy.amin",
"numpy.ascontiguousarray",
"numpy.int32",
"numpy.lexsort",
"numpy.dtype",
"numpy.ceil",
"numpy.append",
"numpy.diff",
"numpy.floor",
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
zmcx16/ReclassifyAnimeCG | [
"f5f95b229447564502564d9ffc7edf6215fec83d"
] | [
"src/data/dataset.py"
] | [
"import torch\nfrom torch.utils.data import Dataset, DataLoader\nimport numpy as np\nfrom PIL import Image\nImage.MAX_IMAGE_PIXELS = None\n\nfrom data import get_train_transform, get_test_transform\n\n\nclass CustomDataset(Dataset):\n img_aug = True\n imgs = []\n transform = None\n\n def __init__(self, label_file, image_set, input_size):\n with open(label_file, 'r', encoding=\"utf-8\") as f:\n self.imgs = list(map(lambda line: line.strip().split('|'), f))\n\n if image_set == 'train':\n self.transform = get_train_transform(size=input_size)\n else:\n self.transform = get_test_transform(size=input_size)\n self.input_size = input_size\n\n def __getitem__(self, index):\n # print(self.imgs)\n # print(index)\n # print(len(self.imgs[index]))\n img_path, label = self.imgs[index]\n # print(img_path)\n img = Image.open(img_path).convert('RGB')\n if self.img_aug:\n img = self.transform(img)\n else:\n img = np.array(img)\n img = torch.from_numpy(img)\n\n return img, torch.from_numpy(np.array(int(label)))\n \n def __len__(self):\n return len(self.imgs)\n\n\ndef get_datasets_and_dataloader(label_path, image_set, batch_size, input_size):\n _dataset = CustomDataset(label_path, image_set=image_set, input_size=input_size)\n _dataloader = DataLoader(_dataset, batch_size=batch_size, shuffle=True, num_workers=2)\n return _dataset, _dataloader\n"
] | [
[
"numpy.array",
"torch.utils.data.DataLoader",
"torch.from_numpy"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
stormymcstorm/condensa | [
"c7321e0a362f73eca9349769b341a7dd688ee1b9"
] | [
"test/schemes/test_qz.py"
] | [
"# Copyright 2019 NVIDIA Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport torch\n\nimport condensa\nfrom condensa import schemes\n\ndef test_float16(device):\n scheme = schemes.Quantize(condensa.float16)\n fc = torch.nn.Linear(100, 10).float().to(device)\n\n scheme.pi(fc)\n assert fc.weight.dtype == torch.float16\n scheme.delta(fc)\n assert fc.weight.dtype == torch.float32\n\nif __name__ == '__main__':\n test_float16('cpu')\n if torch.cuda.is_available():\n test_float16('cpu')\n"
] | [
[
"torch.nn.Linear",
"torch.cuda.is_available"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
mathischeap/mifem | [
"3242e253fb01ca205a76568eaac7bbdb99e3f059",
"3242e253fb01ca205a76568eaac7bbdb99e3f059",
"3242e253fb01ca205a76568eaac7bbdb99e3f059",
"3242e253fb01ca205a76568eaac7bbdb99e3f059",
"3242e253fb01ca205a76568eaac7bbdb99e3f059",
"3242e253fb01ca205a76568eaac7bbdb99e3f059"
] | [
"objects/CSCG/_3d/forms/standard/base/export/field.py",
"objects/CSCG/_3d/forms/trace/_2tr/discretize/scalar/boundary_wise.py",
"tools/deprecated/serial_runners/COMPONENTS/data/COMPONENTS/MODULES/m_tir_visualize.py",
"objects/CSCG/_3d/forms/standard/base/dofs/dof/basis_function.py",
"objects/CSCG/_3d/forms/standard/base/dofs/dof/visualize/matplot/_2sf.py",
"objects/CSCG/_2d/forms/standard/_2_form/base/reconstruct.py"
] | [
"\"\"\"We want to export the field to some data files.\n\"\"\"\n\nfrom root.config.main import *\nfrom screws.freeze.main import FrozenOnly\nfrom screws.miscellaneous.timer import check_filename, check_no_splcharacter\nfrom scipy.io import savemat\n\n\n\nclass _3dCSC_SF_Export_Field(FrozenOnly):\n \"\"\"\"\"\"\n\n def __init__(self, sf):\n \"\"\"\"\"\"\n assert '3dCSCG_standard_form' in sf.standard_properties.tags\n self._sf_ = sf\n self._freeze_self_()\n\n\n def to_file(self, filename, numOfSamples=1e6, regions=None):\n \"\"\"\"\"\"\n filename, extension = check_filename(filename)\n if extension is None: extension = 'txt'\n\n supported_formats = ('txt', 'mat')\n assert extension in supported_formats, \\\n f\"format={extension} is not among the supported formats {supported_formats}.\"\n\n if isinstance(numOfSamples, (int, float)):\n assert numOfSamples > 0, f\"numOfSamples={numOfSamples} is wrong.\"\n numOfSamples = [numOfSamples, numOfSamples, numOfSamples]\n else:\n assert isinstance(numOfSamples, (tuple, list)) and len(numOfSamples) == 3, \\\n f\"numOfSamples={numOfSamples} wrong.\"\n for nos in numOfSamples:\n assert isinstance(nos, (int, float)) and nos > 0, f\"numOfSamples={numOfSamples} wrong.\"\n\n mesh = self._sf_.mesh\n\n if regions is None:\n regions = mesh.domain.regions.names\n elif isinstance(regions, str):\n regions = [regions,]\n else:\n pass\n assert isinstance(regions, (list, tuple)), f\"regions={regions} is wrong.\"\n assert len(set(regions)) == len(regions), f\"regions={regions} has repeated regions.\"\n for i, r in enumerate(regions):\n assert r in mesh.domain.regions, f\"regions[{i}]={r} is wrong.\"\n\n rst = list()\n for i in range(3):\n density = int((numOfSamples[i] / mesh.elements.GLOBAL_num) ** (1/3)) + 1\n interval = 2 / density\n rst.append(np.linspace(-1 + interval/2, 1-interval/2, density))\n\n xyz, v = self._sf_.reconstruct(*rst, regions=regions)\n\n # Now, we gather xyz & v from all cores into Master Core, store in XYZ & V --- BELOW ---\n if rAnk == mAster_rank:\n X = [None for _ in range(mesh.elements.GLOBAL_num)]\n Y = [None for _ in range(mesh.elements.GLOBAL_num)]\n Z = [None for _ in range(mesh.elements.GLOBAL_num)]\n Vx = [None for _ in range(mesh.elements.GLOBAL_num)]\n if self._sf_.k in (1, 2):\n Vy = [None for _ in range(mesh.elements.GLOBAL_num)]\n Vz = [None for _ in range(mesh.elements.GLOBAL_num)]\n for j in mesh.elements.indices:\n X[j] = xyz[j][0]\n Y[j] = xyz[j][1]\n Z[j] = xyz[j][2]\n Vx[j] = v[j][0]\n if self._sf_.k in (1, 2):\n # noinspection PyUnboundLocalVariable\n Vy[j] = v[j][1]\n # noinspection PyUnboundLocalVariable\n Vz[j] = v[j][2]\n for i in sLave_ranks:\n xyz, v = cOmm.recv(source=i, tag=0)\n for j in xyz:\n X[j] = xyz[j][0]\n Y[j] = xyz[j][1]\n Z[j] = xyz[j][2]\n Vx[j] = v[j][0]\n if self._sf_.k in (1, 2):\n Vy[j] = v[j][1]\n Vz[j] = v[j][2]\n del xyz, v\n else:\n cOmm.send([xyz, v], dest=mAster_rank, tag=0)\n del xyz, v\n\n # Now, we reshape the XYZ and V for export in the master core. -------- BELOW ----------\n if rAnk == mAster_rank:\n if self._sf_.k in (1, 2):\n # noinspection PyUnboundLocalVariable\n X, Y, Z, Vx, Vy, Vz = mesh.do.regionwsie_stack(X, Y, Z, Vx, Vy, Vz)\n else:\n # noinspection PyUnboundLocalVariable\n X, Y, Z, V = mesh.do.regionwsie_stack(X, Y, Z, Vx)\n\n for rn in regions:\n assert rn in X and rn in Y and rn in Z, \"Data not full!\"\n\n x, y, z = X[rn], Y[rn], Z[rn]\n if self._sf_.k in (1, 2):\n vx, vy, vz = Vx[rn], Vy[rn], Vz[rn]\n else:\n # noinspection PyUnboundLocalVariable\n vx = V[rn]\n\n # we take care of the file names ------------------ BELOW -----------------------\n RN = rn[2:] # if regions name is R:center, we select\n assert check_no_splcharacter(RN), f\"region name={RN} wrong.\"\n\n FILE_NAME = filename + '__InRegion_' + RN\n if self._sf_.k in (1, 2):\n FILE_NAME += '__x_y_z_vx_vy_vz'\n else:\n FILE_NAME += '__x_y_z_v'\n FILE_NAME = FILE_NAME + '.' + extension\n\n\n # It's time to do the save or writing ------------------- BELOW -----------------\n\n if extension == 'txt':\n # for .txt, we have to flat the data =====================\n x = x.ravel(order='F')[:,np.newaxis]\n y = y.ravel(order='F')[:,np.newaxis]\n z = z.ravel(order='F')[:,np.newaxis]\n if self._sf_.k in (1, 2):\n vx = vx.ravel(order='F')[:,np.newaxis]\n # noinspection PyUnboundLocalVariable\n vy = vy.ravel(order='F')[:,np.newaxis]\n # noinspection PyUnboundLocalVariable\n vz = vz.ravel(order='F')[:,np.newaxis]\n else:\n vx = vx.ravel(order='F')[:,np.newaxis]\n if self._sf_.k in (1, 2):\n # noinspection PyUnboundLocalVariable\n TO_BE_WRITTEN = np.hstack((x, y, z, vx, vy, vz))\n else:\n TO_BE_WRITTEN = np.hstack((x, y, z, vx))\n # noinspection PyTypeChecker\n np.savetxt(FILE_NAME, TO_BE_WRITTEN)\n\n elif extension == 'mat':\n # for .mat, we save 3-d arrays. ==========================\n m_dic = dict()\n m_dic['x'] = x\n m_dic['y'] = y\n m_dic['z'] = z\n if self._sf_.k in (1, 2):\n m_dic['vx'] = vx\n m_dic['vy'] = vy\n m_dic['vz'] = vz\n else:\n m_dic['v'] = vx\n\n savemat(FILE_NAME, m_dic)\n\n else:\n raise Exception(f\"Format=.{extension} is not supported.\")",
"\nimport sys\nif './' not in sys.path: sys.path.append('./')\n\n\nfrom screws.freeze.base import FrozenOnly\nimport numpy as np\nfrom screws.quadrature import Quadrature\n\n\n\nclass _3dCSCG_2Trace_Discretize_BoundaryWiseScalar(FrozenOnly):\n \"\"\"\"\"\"\n def __init__(self, tf):\n self._tf_ = tf\n self.___cache_DISCRETIZE_STANDARD___ = None\n self._freeze_self_()\n\n def __call__(self, quad_degree=None):\n \"\"\"\n 'locally full local TEW cochain' means the cochain is a dict whose keys are trace-element\n numbers and values are trace-element-wise local cochains.\n \"\"\"\n SELF = self._tf_\n\n if self.___cache_DISCRETIZE_STANDARD___ is None or \\\n self.___cache_DISCRETIZE_STANDARD___['quadDegree'] != quad_degree:\n p = [SELF.dqp[i] + 1 for i in range(SELF.ndim)] if quad_degree is None else quad_degree\n quad_nodes, quad_weights = Quadrature(p, category='Gauss').quad\n nodes = SELF.space.nodes\n num_edges = [len(nodes[i])-1 for i in range(SELF.ndim)]\n lens = [nodes[i][1:]-nodes[i][0:-1] for i in range(SELF.ndim)]\n qnodes = []\n for i in range(SELF.ndim):\n qnodes_i = ((np.array(quad_nodes[i])+1)/2)[np.newaxis,:].repeat(num_edges[i],\n axis=0)*lens[i][:,np.newaxis]\n qnodes_i += np.array(nodes[i][:-1])[:,np.newaxis].repeat(p[i]+1, axis=1)\n qnodes.append(qnodes_i)\n # NS sides\n qn_NS_y = []\n qn_NS_z = []\n for k in range(SELF.p[2]):\n for j in range(SELF.p[1]):\n qn_NS_y.append(qnodes[1][j][:,np.newaxis].repeat(p[2]+1, axis=1))\n qn_NS_z.append(qnodes[2][k][np.newaxis,:].repeat(p[1]+1, axis=0))\n qn_NS_y, qn_NS_z = np.array(qn_NS_y), np.array(qn_NS_z)\n area_NS = np.kron(lens[2], lens[1]) * 0.25\n # WE sides\n qn_WE_x = []\n qn_WE_z = []\n for k in range(SELF.p[2]):\n for i in range(SELF.p[0]):\n qn_WE_x.append(qnodes[0][i][:,np.newaxis].repeat(p[2]+1, axis=1))\n qn_WE_z.append(qnodes[2][k][np.newaxis,:].repeat(p[0]+1, axis=0))\n qn_WE_x, qn_WE_z = np.array(qn_WE_x), np.array(qn_WE_z)\n area_WE = np.kron(lens[2], lens[0]) * 0.25\n # BF sides\n qn_BF_x = []\n qn_BF_y = []\n for j in range(SELF.p[1]):\n for i in range(SELF.p[0]):\n qn_BF_x.append(qnodes[0][i][:,np.newaxis].repeat(p[1]+1, axis=1))\n qn_BF_y.append(qnodes[1][j][np.newaxis,:].repeat(p[0]+1, axis=0))\n qn_BF_x, qn_BF_y = np.array(qn_BF_x), np.array(qn_BF_y)\n area_BF = np.kron(lens[1], lens[0]) * 0.25\n\n cd = dict()\n cd['quadDegree'] = quad_degree\n cd['qn_NS_y'] = qn_NS_y\n cd['qn_NS_z'] = qn_NS_z\n cd['area_NS'] = area_NS\n cd['qn_WE_x'] = qn_WE_x\n cd['qn_WE_z'] = qn_WE_z\n cd['area_WE'] = area_WE\n cd['qn_BF_x'] = qn_BF_x\n cd['qn_BF_y'] = qn_BF_y\n cd['area_BF'] = area_BF\n cd['quad_weights'] = quad_weights\n self.___cache_DISCRETIZE_STANDARD___ = cd\n else:\n qn_NS_y = self.___cache_DISCRETIZE_STANDARD___['qn_NS_y']\n qn_NS_z = self.___cache_DISCRETIZE_STANDARD___['qn_NS_z']\n area_NS = self.___cache_DISCRETIZE_STANDARD___['area_NS']\n qn_WE_x = self.___cache_DISCRETIZE_STANDARD___['qn_WE_x']\n qn_WE_z = self.___cache_DISCRETIZE_STANDARD___['qn_WE_z']\n area_WE = self.___cache_DISCRETIZE_STANDARD___['area_WE']\n qn_BF_x = self.___cache_DISCRETIZE_STANDARD___['qn_BF_x']\n qn_BF_y = self.___cache_DISCRETIZE_STANDARD___['qn_BF_y']\n area_BF = self.___cache_DISCRETIZE_STANDARD___['area_BF']\n quad_weights = self.___cache_DISCRETIZE_STANDARD___['quad_weights']\n\n assert SELF.BC.body is not None, f\"No BC.body!\"\n FUNC = SELF.BC.body\n RANGE_trace_elements = SELF.mesh.boundaries.range_of_trace_elements\n local_TEW = dict()\n for bn in FUNC:\n func_bn = FUNC[bn]\n trace_elements = RANGE_trace_elements[bn]\n _lf_ = func_bn[0]\n for i in trace_elements:\n te = SELF.mesh.trace.elements[i]\n ele = te.CHARACTERISTIC_element\n ele_side = te.CHARACTERISTIC_side\n if ele_side in 'NS':\n qn0, qn1 = qn_NS_y, qn_NS_z\n qw0, qw1 = quad_weights[1], quad_weights[2]\n area = area_NS\n x, y, z = te.coordinate_transformation.mapping(qn0, qn1, from_element=ele, side=ele_side)\n g = te.coordinate_transformation.metric(qn0, qn1)\n elif ele_side in 'WE':\n qn0, qn1 = qn_WE_x, qn_WE_z\n qw0, qw1 = quad_weights[0], quad_weights[2]\n area = area_WE\n x, y, z = te.coordinate_transformation.mapping(qn0, qn1, from_element=ele, side=ele_side)\n g = te.coordinate_transformation.metric(qn0, qn1)\n elif ele_side in 'BF':\n qn0, qn1 = qn_BF_x, qn_BF_y\n qw0, qw1 = quad_weights[0], quad_weights[1]\n area = area_BF\n x, y, z = te.coordinate_transformation.mapping(qn0, qn1, from_element=ele, side=ele_side)\n g = te.coordinate_transformation.metric(qn0, qn1)\n else:\n raise Exception()\n\n f = _lf_(x, y, z)\n sqrt_g = np.sqrt(g)\n te_primal_local = np.einsum('mij, i, j, m -> m', f * sqrt_g,\n qw0, qw1, area,\n optimize='greedy')\n if not SELF.space.IS_Kronecker: raise NotImplementedError()\n assert i not in local_TEW, f\"Trace element #{i} can only appear once (be on one mesh boundary).\"\n local_TEW[i] = te_primal_local\n\n # 'locally full local TEW cochain': provide cochain.local_TEW and for all dofs on the trace element.\n return 'locally full local TEW cochain', local_TEW\n\n\n\n\n\nif __name__ == '__main__':\n # mpiexec -n 5 python _3dCSCG\\forms\\trace\\_2_trace\\discretize\\scalar\\boundary_wise.py\n\n from objects.CSCG._3d.master import MeshGenerator, SpaceInvoker, FormCaller\n\n mesh = MeshGenerator('crazy', c=0.)([2,2,2])\n space = SpaceInvoker('polynomials')([('Lobatto',5), ('Lobatto',5), ('Lobatto',5)])\n FC = FormCaller(mesh, space)",
"# -*- coding: utf-8 -*-\n\"\"\"\nINTRO\n\nYi Zhang (C)\nCreated on Wed Apr 17 22:34:33 2019\nAerodynamics, AE\nTU Delft\n\"\"\"\nimport numpy as np\n#import pandas as pd\nimport matplotlib.pyplot as plt\nfrom matplotlib import cm\nfrom typing import List\n\n\n\n\nclass M_TIR_Visualize:\n def ___plot_MTIR___(self, plot_type, line_var, res2plot, \n prime='input2',\n hcp=None, show_order=False, order_text_size=18, plot_order_triangle=None, # h-convergence plot related\n title=None, left=0.15, bottom=0.15, \n ylabel=None, yticks=None, \n xlabel=None, xticks=None,\n linewidth=1.2, corlormap='viridis',\n styles=None, colors=None, COLORS=None,\n labels=None, legend_local='best', legend_frame=False,\n minor_tick_length=6, major_tick_length=12, tick_pad=8,\n tick_size=20, label_size=20, legend_size=20, title_size=20, title_pad=12,\n figsize=(7.5,5), usetex=False, saveto=None):\n \"\"\" \n IMPORTANT: this ploter only works for input's criterion is 'standard'.\n \n Here we wrap `plot`, `semilogx`, `semilogy`, `loglog`.\n \n We use the `matplotlib` to do the plot.\n \n Parameters\n ----------\n plot_type : str\n This is the indicator of which type of plot will it be.\n line_var :\n Notice `line_var` is not the x-axis name. It refers to how many lines we \n want to have. It can only be `input_names[0]` or `input_names[1]`. When\n it is `input_names[0]`, the x-axis is `input_names[1]`, and when it is [1]\n the x-axis is [0].\n res2plot :\n y-axis. We plot `res2plot` against `line_var`. \n prime : str, optional\n If `prime` == 'line_var', we use `data_sequence_line_var`. If `prime` = \n 'input2', we will use `data_sequence__input2`.\n hcp :\n `h-convergence parameter`. We will use `x_data` = `hcp`/`x_data`. This is\n usually used for h-convergence plot, so we call it `hcp`.\n plot_order_triangle :\n If it is not False, we will plot triangles to illustrate the order of plot\n lines.\n \n We will parse `plot_order_triangle` to get parameters for the triangle. For\n the data structure of `plot_order_triangle`, we refer to method\n `___plot_MTIR_plot_order_triangle___`\n COLORS:\n The color sequence of all lines regardless of the group of the line.\n \n \"\"\"\n # noinspection PyUnresolvedReferences\n D = self._data_\n #_____________check `line_var` and `res2plot`___________________________________\n # noinspection PyUnresolvedReferences\n input_names = self._dfw_._runner_.input_names\n # noinspection PyUnresolvedReferences\n output_names = self._dfw_._runner_.output_names\n if isinstance(res2plot, str): # we make it possible to plot more than one result.\n res2plot = (res2plot,)\n assert all([res2plot[i] in output_names for i in range(len(res2plot))]), \\\n \" <RunnerVisualize> : res2plot={} is wrong.\".format(res2plot)\n assert line_var in input_names[:2], \" <RunnerVisualize> : line_var={} is wrong.\".format(line_var)\n line_var_index = input_names.index(line_var)\n x_index = 0 if line_var_index == 1 else 1\n x_name = input_names[x_index]\n # group the plot data: the data will be grouped into a tuple called `data_sequence`\n num_lines = 0\n data_sequence_line_var = () # `i2` changes in each `line_var`.\n data_sequence_inputs2 = () # `line_var` changes in each `i2`.\n if prime == 'line_var':\n for ai in set(D[line_var]):\n sub_rdf = D[D[line_var]==ai]\n for i2i in set(D[input_names[2]][D[line_var]==ai]):\n data_sequence_line_var += (sub_rdf[sub_rdf[input_names[2]]==i2i],)\n num_lines += 1\n data_sequence = data_sequence_line_var\n elif prime == 'input2':\n for i2i in set(D[input_names[2]]):\n sub_rdf = D[D[input_names[2]]==i2i]\n for ai in set(D[line_var][D[input_names[2]]==i2i]):\n data_sequence_inputs2 += (sub_rdf[sub_rdf[line_var]==ai],)\n num_lines += 1\n data_sequence = data_sequence_inputs2\n else:\n raise Exception(\" <RunnerVisualize> : prime={} is wrong.\".format(prime))\n #_____ prepare styles, colors, labels If they are none.________________________\n num_res2plot = len(res2plot)\n num_lines = len(data_sequence)\n # noinspection PyUnresolvedReferences\n line_groups = self._dfw_._runner_.input_shape[2]\n num_lines_per_group = int(num_lines/line_groups)\n if styles is None:\n styles = ('-^', '-x', '-o', '-s', '-*', '-8', '->', '-p', \n '-H', '-h', '->', '-D', '-d', '-P', '-v') * 5\n if colors is None:\n color = cm.get_cmap(corlormap, line_groups)\n colors = []\n for j in range(line_groups):\n colors.append(color(j))\n \n if labels is None:\n labels = ()\n for i in range(line_groups):\n for j in range(num_lines_per_group):\n k = j + i * num_lines_per_group\n list_line_var = data_sequence[k][line_var].tolist()\n assert all([i == list_line_var[0] for i in list_line_var[1:]]), \\\n \" <RunnerVisualize> : data grouping is wrong for %r\"%data_sequence[k]\n list_input2 = data_sequence[k][input_names[2]].tolist()\n assert all([i == list_input2[0] for i in list_input2[1:]]), \\\n \" <RunnerVisualize> : data grouping is wrong for %r\"%data_sequence[k]\n if num_res2plot == 1:\n labels += (line_var+'='+str(list_line_var[0])+', '+\n input_names[2]+'='+str(list_input2[0]).replace('_','-'),)\n else:\n for m in range(num_res2plot):\n labels += (line_var+'='+str(list_line_var[0])+', '+\n input_names[2]+'='+str(list_input2[0])+', '+\n res2plot[m].replace('_','-'),)\n labels = list(labels)\n #___ preparing orders _________________________________________________________\n orders : List[float] = [0.0 for _ in range(len(labels))]\n #___ pre-parameterize the plot_________________________________________________\n plt.rc('text', usetex=usetex)\n if usetex: plt.rcParams['text.latex.preamble']= r\"\\usepackage{amsmath}\"\n plt.figure(figsize=figsize)\n plt.gcf().subplots_adjust(left=left)\n plt.gcf().subplots_adjust(bottom=bottom)\n #__ find the range of x_data___________________________________________________\n xd_max = ()\n xd_min = ()\n ploter = getattr(plt, plot_type) # we get the ploter from matplotlib\n for i in range(line_groups):\n for j in range(num_lines_per_group):\n k = j + i*num_lines_per_group\n xdata2plot = np.array(data_sequence[k][x_name].tolist())\n if hcp is not None:\n xdata2plot = hcp / xdata2plot\n xd_max += (np.max(xdata2plot),)\n xd_min += (np.min(xdata2plot),)\n xd_max = np.min(xd_max)\n xd_min = np.min(xd_min)\n x_range = (xd_max, xd_min)\n #___ do THE PLOT_______________________________________________________________\n for i in range(line_groups):\n for j in range(num_lines_per_group):\n k = j + i*num_lines_per_group\n xdata2plot = np.array(data_sequence[k][x_name].tolist())\n if hcp is not None:\n xdata2plot = hcp / xdata2plot\n for m in range(num_res2plot):\n n = m + j*num_res2plot + i*num_res2plot*num_lines_per_group\n J = m + j*num_res2plot\n N = m + j*num_res2plot + i*num_res2plot*num_lines_per_group\n ydata2plot = data_sequence[k][res2plot[m]]\n #__add order to label______________________________________________\n if show_order:\n try:\n orders[n] = (np.log10(ydata2plot.values[-1])- np.log10(ydata2plot.values[-2])) /\\\n (np.log10(xdata2plot[-1])-np.log10(xdata2plot[-2]))\n except IndexError:\n orders[n] = float('nan')\n labels[n] += ', order$={}$'.format('%0.2f'%(orders[n]))\n #___ get data and plot the triangle that shows the order___________\n if plot_order_triangle is not None:\n # __ check_____________________________________________________\n assert isinstance(plot_order_triangle, dict), \\\n \" <RunnerVisualize> : plot_order_triangle needs to be a dict.\"\n #__compute_data________________________________________________\n if n in plot_order_triangle:\n potn = plot_order_triangle[n]\n assert isinstance(potn, dict), \\\n \" <RunnerVisualize> : plot_order_triangle[{}] needs to be a dict.\".format(n)\n c0, c1, c2, textpos, ordern = self.___plot_MTIR_plot_order_triangle___(\n plot_type, x_range, potn, xdata2plot, ydata2plot.values)\n #___plot triangle______________________________________________\n c0x, c0y = c0\n c1x, c1y = c1\n c2x, c2y = c2\n plt.fill_between([c0x, c1x], [c0y, c1y], [c0y, c2y], color='grey', alpha=0.5)\n if isinstance(ordern, int):\n plt.text(textpos[0], textpos[1], \"${}$\".format(ordern), fontsize=order_text_size)\n else:\n plt.text(textpos[0], textpos[1], \"${}$\".format('%0.2f'%ordern), fontsize=order_text_size)\n #--------------------------------------------------------------\n #------------------------------------------------------------------\n if COLORS is None:\n ploter(xdata2plot, ydata2plot, styles[J], \n color=colors[i], label=labels[n], linewidth=linewidth)\n else:\n ploter(xdata2plot, ydata2plot, styles[J], \n color=COLORS[N], label=labels[n], linewidth=linewidth)\n #___ post-parameterize the plot________________________________________________\n plt.tick_params(which='both', labeltop=False, labelright=False, top=True, right=True)\n plt.tick_params(axis='both', which='minor', direction='in', length=minor_tick_length)\n plt.tick_params(axis='both', which='major', direction='in', length=major_tick_length)\n plt.tick_params(axis='both', which='both', labelsize=tick_size)\n plt.tick_params(axis='x', which='both', pad=tick_pad)\n plt.tick_params(axis='y', which='both', pad=tick_pad)\n plt.legend(fontsize=legend_size, loc=legend_local, frameon=legend_frame)\n if xlabel is not None: \n plt.xlabel(xlabel, fontsize=label_size)\n else:\n if hcp is None:\n plt.xlabel(x_name, fontsize=label_size)\n else:\n plt.xlabel(str(hcp)+'/'+x_name, fontsize=label_size)\n if ylabel is not None: plt.ylabel(ylabel, fontsize=label_size)\n if xticks is not None: plt.xticks(xticks)\n if yticks is not None: plt.yticks(yticks)\n if title is None: \n if len(res2plot) == 1:\n plt.title(r'' + res2plot[0].replace('_','-'), fontsize=title_size, pad=title_pad)\n else:\n plt.title(r'' + str(res2plot).replace('_','-'), fontsize=title_size, pad=title_pad)\n elif title is False:\n pass\n else:\n plt.title(r'' + title, fontsize=title_size, pad=title_pad)\n\n\n plt.tight_layout()\n if saveto is not None and saveto != '':\n plt.savefig(saveto, bbox_inches='tight')\n else:\n plt.show()\n\n plt.close()\n #------------------------------------------------------------------------------\n return\n \n def ___plot_MTIR_plot_order_triangle___(self, plot_type, x_range, potn, xd, yd):\n \"\"\" \n We plot a triangle along the the line indicated by `xd`, `yd`.\n \n Parameters\n ----------\n plot_type :\n x_range :\n The x_range of whole xd's for all lines.\n potn : dict or True\n The information that is going to use for the triangle.\n \n It should have following keys (optional):\n \"p\" : a tuple of shape (2,), like (a, b) and 0 <= a, b <= 1. (a, b) \n represents the pointing corner of the triangle. And (a, b) is the \n distance from the last value point.\n \"tp\":\n A tuple of shape (2,), like (c,d), (c,d) decides the position of \n the text (the order number).\n \"l\": the length of the bottom line: -1 <= l <= 1.\n It means we will draw a horizontal line, starting from 'p',\n going toward right (l>0) or left (l<0), whose length is \n abs(l)*`total_length_of_plot_horizontal_length`.\n \"h\": the height; the order. # NOT USED KEY!!!\n if \"h\" > 0, the height goes up, if \"h\" < 0, it goes down. Both from\n the edge point of the horizontal line.\n \"h\" means the height of the height line is `l*h`, of course, the \n scale of x-y axes are considered.\n \"order\": the order text shown besides the triangle.\n \n xd :\n The x-axis data to plot for this line.\n yd :\n The y-axis data to plot for this line.\n \n \"\"\"\n #___default \"p\"________________________________________________________________\n if \"p\" not in potn:\n potn[\"p\"] = (0, -0.3)\n if \"tp\" not in potn:\n potn[\"tp\"] = (0.02, 0.2)\n #___default \"l\"________________________________________________________________\n if \"l\" not in potn:\n potn[\"l\"] = 0.1\n #___default \"h\"________________________________________________________________\n if \"order\" not in potn:\n potn[\"order\"] = (np.log10(yd[-1]) - np.log10(yd[-2])) / (np.log10(xd[-1]) - np.log10(xd[-2]))\n #___ loglog____________________________________________________________________\n if plot_type == 'loglog':\n x_max, xmin = x_range\n x_range = np.log10(x_max) - np.log10(xmin)\n origin = (xd[-1], yd[-1])\n otc0x = np.log10(origin[0]) + x_range*potn[\"p\"][0] \n otc0x = 10**otc0x \n otc0y = np.log10(origin[1]) + x_range*potn[\"p\"][1] \n otc0y = 10**otc0y \n otc0 = (otc0x, otc0y) # order_triangle_corner_0\n otc1x = np.log10(otc0x) + x_range*potn[\"l\"]\n otc1x = 10**otc1x \n otc1 = (otc1x, otc0y) # order_triangle_corner_0\n otc2y = np.log10(otc0y) + x_range*potn[\"l\"]*potn[\"order\"]\n otc2y = 10**otc2y\n otc2 = (otc1x, otc2y) # order_triangle_corner_0\n ttps_x, ttps_y = potn[\"tp\"]\n textpos_x = 10**(np.log10(otc1x) + x_range*ttps_x)\n textpos_y = 10**(np.log10(otc0y) + x_range*potn[\"l\"]*potn[\"order\"]*ttps_y)\n order = potn[\"order\"]\n return otc0, otc1, otc2, (textpos_x, textpos_y), order\n #___ELSE: ERRORING_____________________________________________________________\n else:\n raise NotImplementedError(\" <plot_order_triangle> does not work for {} plot\".format(plot_type))",
"\"\"\"The class for the basis function of a dof (not dofs).\"\"\"\n\n\nfrom screws.freeze.main import FrozenOnly\nimport numpy as np\n\n\nclass _3dCSCG_SF_DOF_BF(FrozenOnly):\n \"\"\"\"\"\"\n def __init__(self, dof):\n self._dof_ = dof\n self._sf_ = dof._sf_\n self._mesh_ = self._sf_.mesh\n self._space_ = self._sf_.space\n self._freeze_self_()\n\n\n def reconstruct(self, xi, et, sg, ravel=False):\n \"\"\"We reconstruct this single basis function in the corresponding local mesh element(s).\n\n :param xi: must be increasing 1d array in [-1,1].\n :param et: must be increasing 1d array in [-1,1].\n :param sg: must be increasing 1d array in [-1,1].\n :param ravel: If ravelled, we return flat results, otherwise, we return 3d results.\n :return: A tuple of reconstructions corresponding to the positions.\n \"\"\"\n positions = self._dof_.positions # get all local positions.\n\n if positions == list():\n return tuple(), tuple()\n\n k = self._sf_.k\n mesh = self._mesh_\n space = self._space_\n XYZ = tuple()\n IN_SITE_BF = tuple()\n\n shape = [len(xi), len(et), len(sg)]\n xietasigma, RBF= space.DO_evaluate_form_basis_at_meshgrid(k, xi, et ,sg, compute_xietasigma=True)\n RBF = np.vstack(RBF)\n for POS in positions:\n E, I = POS\n rbf = RBF[I] # find the bf for this position\n ME = mesh.elements[E] # the mesh-element of this position.\n\n #----------- do the reconstruction according to the k ------------------------------------------\n if k == 0:\n xyz, in_site_bf = self.___PRIVATE_reconstruct_0_bf___(xietasigma, rbf, ME)\n else:\n raise NotImplementedError()\n #---------- post process the data ---------------------------------------------------------------\n if ravel:\n pass\n else:\n xyz = [xyz[_].reshape(shape, order='F') for _ in range(3)]\n in_site_bf = [in_site_bf[_].reshape(shape, order='F') for _ in range(len(in_site_bf))]\n #============================================================================================\n\n XYZ += (xyz,)\n IN_SITE_BF += (in_site_bf,)\n\n return XYZ, IN_SITE_BF\n\n @staticmethod\n def ___PRIVATE_reconstruct_0_bf___(xi_et_sg, rbf, ME):\n \"\"\"We make the in-site bf from the rbf (reference bf) for the 0-standard-form.\n\n :param xi_et_sg: rbf is evaluated at meshgrid(*xi_et_sg).\n :param rbf: reference bf\n :param ME: this bf is in this Mesh-Element #.\n :return: The in-site bf: xyz and bfv (basis function value).\n \"\"\"\n xyz = ME.coordinate_transformation.mapping(*xi_et_sg)\n return xyz, (rbf,) # for 0-standard-form, in-site-bf is equal to reference bf.\n def ___PRIVATE_reconstruct_1_bf___(self, xi_et_sg, rbf, ME):\n \"\"\"We make the in-site bf from the rbf (reference bf) for the 1-standard-form.\n\n :param xi_et_sg: rbf is evaluated at meshgrid(*xi_et_sg).\n :param rbf: reference bf\n :param ME: this bf is in this Mesh-Element #.\n :return: The in-site bf: xyz and bfv (basis function value).\n \"\"\"\n raise NotImplementedError()\n def ___PRIVATE_reconstruct_2_bf___(self, xi_et_sg, rbf, ME):\n \"\"\"We make the in-site bf from the rbf (reference bf) for the 2-standard-form.\n\n :param xi_et_sg: rbf is evaluated at meshgrid(*xi_et_sg).\n :param rbf: reference bf\n :param ME: this bf is in this Mesh-Element #.\n :return: The in-site bf: xyz and bfv (basis function value).\n \"\"\"\n raise NotImplementedError()\n def ___PRIVATE_reconstruct_3_bf___(self, xi_et_sg, rbf, ME):\n \"\"\"We make the in-site bf from the rbf (reference bf) for the 3-standard-form.\n\n :param xi_et_sg: rbf is evaluated at meshgrid(*xi_et_sg).\n :param rbf: reference bf\n :param ME: this bf is in this Mesh-Element #.\n :return: The in-site bf: xyz and bfv (basis function value).\n \"\"\"\n raise NotImplementedError()\n\n\n\n\n def visualize(self, *args, **kwargs):\n \"\"\"We use matplotlib to visualize the basis function of this dof in-site.\n\n \"in-site\" means we will visualize it in the corresponding mesh element instead of the\n reference domain.\n\n Cause a dof instance is already local (may in multiple cores), we will call this visualizing\n method locally.\n \"\"\"\n return getattr(self, f\"___PRIVATE_visualize_{self._sf_.k}form___\")(*args, **kwargs)\n\n def ___PRIVATE_visualize_0form___(self, density=1000):\n \"\"\"\"\"\"\n # density = int(density**(1/3)) + 1\n # xi = eta = sigma = np.linspace(-1, 1, density)\n raise NotImplementedError()\n def ___PRIVATE_visualize_1form___(self, density=1000):\n \"\"\"\"\"\"\n raise NotImplementedError()\n def ___PRIVATE_visualize_2form___(self, density=1000):\n \"\"\"\"\"\"\n raise NotImplementedError()\n def ___PRIVATE_visualize_3form___(self, density=1000):\n \"\"\"\"\"\"\n raise NotImplementedError()\n",
"\nfrom root.config.main import *\nimport matplotlib.pyplot as plt\nfrom screws.freeze.main import FrozenOnly\n\n\nclass _3dCSCG_SF_DOF_VISUALIZE_matplot_2SF(FrozenOnly):\n \"\"\"\"\"\"\n def __init__(self, dof):\n \"\"\"\"\"\"\n self._dof_ = dof\n self._mesh_ = dof._sf_.mesh\n self._sf_ = dof._sf_\n self._freeze_self_()\n\n def __call__(self, *args, **kwargs):\n \"\"\"We plot this dof of a standard 0-form.\"\"\"\n if self._sf_.IS.hybrid:\n return self.___PRIVATE_matplot_dof_k_form_IS_hybrid__(*args, **kwargs)\n else:\n return self.___PRIVATE_matplot_dof_2form_IS_NOT_hybrid__(*args, **kwargs)\n\n\n def ___PRIVATE_matplot_dof_k_form_IS_hybrid__(self, *args, **kwargs):\n \"\"\"\"\"\"\n f_kwargs = self._sf_.___define_parameters___['kwargs']\n f_kwargs['is_hybrid'] = False\n non_hybrid_form = self._sf_.__class__(self._sf_.mesh, self._sf_.space, **f_kwargs)\n dofs = non_hybrid_form.dofs\n hy_i = self._dof_._i_\n\n GPs = self._dof_.GLOBAL_positions\n assert len(GPs) == 1, f\"trivial check!\"\n\n pos = GPs[0]\n if pos in self._dof_.positions:\n Ele, index = pos\n nhy_i = non_hybrid_form.numbering.gathering[Ele][index]\n else:\n nhy_i = None\n\n nhy_i = cOmm.gather(nhy_i, root=mAster_rank)\n if rAnk == mAster_rank:\n I = 0\n for _ in nhy_i:\n if _ is not None:\n I += 1\n NHY_I = _\n assert I == 1, \"only find one position.\"\n\n nhy_i = NHY_I\n else:\n pass\n\n nhy_i = cOmm.bcast(nhy_i, root=mAster_rank)\n DI = dofs[nhy_i]\n\n assert len(GPs) == 1, f\"A hybrid dof must appear only at 1 place.\"\n GPs = GPs[0]\n position = 'in hybrid ME-' + str(GPs[0])\n if 'title' not in kwargs:\n kwargs['title'] = f\"dof#{hy_i} of {self._sf_.k}-form: {self._sf_.standard_properties.name}, \" + position\n\n\n DI.visualize.matplot(*args, **kwargs)\n\n def ___PRIVATE_matplot_dof_2form_IS_NOT_hybrid__(\n self, density=20, saveto=None, linewidth=0.6, title=None):\n \"\"\"\"\"\"\n positions = self._dof_.positions\n EF = dict()\n for E_I in positions:\n E, I = E_I\n EF[E] = self._sf_.___PRIVATE_element_grid_data_generator_1___(E, density=density)\n GPs = self._dof_.GLOBAL_positions\n Element_Frames = cOmm.gather(EF, root=mAster_rank)\n if rAnk == mAster_rank:\n # ------------ prepare figure -----------------------------------------------------------------\n fig = plt.figure(figsize=(12, 8))\n ax = fig.add_subplot(111, projection='3d')\n # make the panes transparent\n ax.xaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))\n ax.yaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))\n ax.zaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))\n # make the grid lines transparent\n ax.xaxis._axinfo[\"grid\"]['color'] = (1, 1, 1, 0)\n ax.yaxis._axinfo[\"grid\"]['color'] = (1, 1, 1, 0)\n ax.zaxis._axinfo[\"grid\"]['color'] = (1, 1, 1, 0)\n ax.tick_params(labelsize=12)\n ax.set_xlabel(r'$x$', fontsize=15)\n ax.set_ylabel(r'$y$', fontsize=15)\n ax.set_zlabel(r'$z$', fontsize=15)\n # ------ plot element frame -----------------------------------------------------------------\n Element_Frame = dict()\n for ef in Element_Frames:\n Element_Frame.update(ef)\n for e in Element_Frame:\n EFe = Element_Frame[e]\n if 'xLines_x' in EFe:\n X, Y, Z = EFe['xLines_x'], EFe['xLines_y'], EFe['xLines_z']\n for x, y, z in zip(X, Y, Z):\n plt.plot(x, y, z, 'gray', linewidth=linewidth)\n\n if 'yLines_x' in EFe:\n X, Y, Z = EFe['yLines_x'], EFe['yLines_y'], EFe['yLines_z']\n for x, y, z in zip(X, Y, Z):\n plt.plot(x, y, z, 'gray', linewidth=linewidth)\n\n if 'zLines_x' in EFe:\n X, Y, Z = EFe['zLines_x'], EFe['zLines_y'], EFe['zLines_z']\n for x, y, z in zip(X, Y, Z):\n plt.plot(x, y, z, 'gray', linewidth=linewidth)\n\n X, Y, Z = EFe['xLines_x_B'], EFe['xLines_y_B'], EFe['xLines_z_B']\n for x, y, z in zip(X, Y, Z):\n plt.plot(x, y, z, 'green', linewidth=linewidth)\n X, Y, Z = EFe['yLines_x_B'], EFe['yLines_y_B'], EFe['yLines_z_B']\n for x, y, z in zip(X, Y, Z):\n plt.plot(x, y, z, 'green', linewidth=linewidth)\n X, Y, Z = EFe['zLines_x_B'], EFe['zLines_y_B'], EFe['zLines_z_B']\n for x, y, z in zip(X, Y, Z):\n plt.plot(x, y, z, 'green', linewidth=linewidth)\n\n x, y, z = EFe['center']['coordinate']\n x, y, z = x[0], y[0], z[0]\n num = EFe['center']['number']\n ax.text(x, y, z, num, color='red', ha='center', va='center', ma='center')\n\n # ------ plot the dof of 2-form ----------------------------------------------------------------\n pos = GPs[0]\n element, index = pos\n i, j, k = np.where(self._sf_.numbering.local[0] == index)\n if len(i) == 0:\n i, j, k = np.where(self._sf_.numbering.local[1] == index)\n if len(i) == 0:\n i, j, k = np.where(self._sf_.numbering.local[2] == index)\n assert len(i) != 0, f\"Must have found the index.\"\n IND = 2\n else:\n IND = 1\n else:\n IND = 0\n i, j, k = i[0], j[0], k[0]\n nodes = self._sf_.space.nodes #\n if IND == 0:\n x, y, z = nodes[0][i] * np.ones((density, density)), \\\n np.linspace(nodes[1][j], nodes[1][j+1], density), \\\n np.linspace(nodes[2][k], nodes[2][k+1], density)\n y, z = np.meshgrid(y, z, indexing='ij')\n elif IND == 1:\n x, y, z = np.linspace(nodes[0][i], nodes[0][i+1], density), \\\n nodes[1][j] * np.ones((density, density)), \\\n np.linspace(nodes[2][k], nodes[2][k+1], density)\n x, z = np.meshgrid(x, z, indexing='ij')\n elif IND == 2:\n x, y, z = np.linspace(nodes[0][i], nodes[0][i+1], density), \\\n np.linspace(nodes[1][j], nodes[1][j+1], density), \\\n nodes[2][k] * np.ones((density, density))\n x, y = np.meshgrid(x, y, indexing='ij')\n else:\n raise Exception()\n xyz = x, y, z\n else:\n element = None\n xyz = None\n xyz, element = cOmm.bcast([xyz, element], root=mAster_rank)\n\n if element in self._mesh_.elements:\n xyz = self._mesh_.elements[element].coordinate_transformation.mapping(*xyz)\n else:\n xyz = None\n\n xyz = cOmm.gather(xyz, root=mAster_rank)\n\n if rAnk == mAster_rank:\n for ___ in xyz:\n if ___ is not None:\n x, y, z = ___\n break\n\n ax.plot_surface(x, y, z, color=(0,0,1,0.5))\n\n # --------- title ------------------------------------------------------------------------------\n if title is None:\n plt.title(f\"dof#{self._dof_._i_} of {self._sf_.k}-form: {self._sf_.standard_properties.name}.\")\n else:\n plt.title(title)\n # ---------- SAVE TO ---------------------------------------------------------------------------\n plt.tight_layout()\n if saveto is not None and saveto != '':\n plt.savefig(saveto, bbox_inches='tight')\n plt.close()\n else:\n plt.show()\n # ================================================================================\n\n return fig",
"\nfrom screws.freeze.base import FrozenOnly\nimport numpy as np\n\n\nclass _2dCSCG_S2F_Reconstruct(FrozenOnly):\n \"\"\"\"\"\"\n def __init__(self, f):\n self._f_ = f\n self._freeze_self_()\n\n\n def __call__(self, xi, eta, ravel=False, i=None, vectorized=False, value_only=False):\n \"\"\"\n Reconstruct the standard 3-form.\n\n Given ``xi``, ``eta`` and ``sigma``, we reconstruct the 3-form on ``meshgrid(xi, eta, sigma)``\n in all elements.\n\n :param xi: A 1d iterable object of floats between -1 and 1.\n :param eta: A 1d iterable object of floats between -1 and 1.\n :param i: (`default`:``None``) Do the reconstruction for ``#i`` element. if it is ``None``,\n then do it for all elements.\n :type i: int, None\n :type xi: list, tuple, numpy.ndarray\n :type eta: list, tuple, numpy.ndarray\n :param bool ravel: (`default`:``False``) If we return 1d data?\n :param vectorized:\n :param value_only:\n :returns: A tuple of outputs\n\n 1. (Dict[int, list]) -- :math:`x, y, z` coordinates.\n 2. (Dict[int, list]) -- Reconstructed values.\n \"\"\"\n f = self._f_\n mesh = self._f_.mesh\n\n xietasigma, basis = f.do.evaluate_basis_at_meshgrid(xi, eta)\n #--- parse indices --------------------------------------------------\n if i is None: # default, in all local mesh-elements.\n INDICES = mesh.elements.indices\n else:\n if vectorized: vectorized = False\n\n if isinstance(i ,int):\n INDICES = [i, ]\n else:\n raise NotImplementedError()\n #---- vectorized -----------------------------------------------\n if vectorized:\n\n assert INDICES == mesh.elements.indices, f\"currently, vectorized computation only works\" \\\n f\"for full reconstruction.\"\n\n det_iJ = mesh.elements.coordinate_transformation.vectorized.inverse_Jacobian(*xietasigma)\n\n if len(INDICES) > 0:\n if mesh.elements.IS.homogeneous_according_to_types_wrt_metric:\n v = np.einsum('ij, ki, j -> kj', basis[0], f.cochain.array, det_iJ, optimize='greedy')\n else:\n v = np.einsum('ij, ki, kj -> kj', basis[0], f.cochain.array, det_iJ, optimize='greedy')\n\n else:\n v = None\n\n if ravel:\n pass\n else:\n raise NotImplementedError()\n\n if value_only:\n return (v,)\n else:\n raise Exception()\n\n #----- non-vectorized ------------------------------------------------\n else:\n if value_only:\n raise NotImplementedError()\n else:\n xyz = dict()\n value = dict()\n shape = [len(xi), len(eta)]\n iJC = dict()\n for i in INDICES:\n element = mesh.elements[i]\n typeWr2Metric = element.type_wrt_metric.mark\n xyz[i] = element.coordinate_transformation.mapping(*xietasigma)\n if typeWr2Metric in iJC:\n basis_det_iJ = iJC[typeWr2Metric]\n else:\n det_iJ = element.coordinate_transformation.inverse_Jacobian(*xietasigma)\n basis_det_iJ = basis[0] * det_iJ\n if isinstance(typeWr2Metric, str):\n iJC[typeWr2Metric] = basis_det_iJ\n\n v = np.einsum('ij, i -> j', basis_det_iJ, f.cochain.local[i], optimize='greedy')\n if ravel:\n value[i] = [v,]\n else:\n # noinspection PyUnresolvedReferences\n xyz[i] = [xyz[i][j].reshape(shape, order='F') for j in range(2)]\n value[i] = [v.reshape(shape, order='F'),]\n return xyz, value"
] | [
[
"scipy.io.savemat"
],
[
"numpy.array",
"numpy.kron",
"numpy.sqrt",
"numpy.einsum"
],
[
"matplotlib.pyplot.legend",
"matplotlib.pyplot.rc",
"numpy.max",
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.gcf",
"matplotlib.pyplot.close",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.title",
"numpy.min",
"matplotlib.pyplot.savefig",
"numpy.log10",
"matplotlib.pyplot.fill_between",
"matplotlib.pyplot.show",
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.ylabel",
"matplotlib.cm.get_cmap",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.yticks",
"matplotlib.pyplot.tick_params"
],
[
"numpy.vstack"
],
[
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.title",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.close",
"matplotlib.pyplot.show",
"matplotlib.pyplot.figure"
],
[
"numpy.einsum"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.7",
"1.0",
"0.10",
"1.2",
"0.14",
"0.19",
"1.5",
"0.12",
"0.17",
"0.13",
"1.6",
"1.4",
"1.9",
"1.3",
"1.10",
"0.15",
"0.18",
"0.16",
"1.8"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
LongKt7/Face_Recognize_Pytorch | [
"baa02e633d379abe1001c8b8acb942617177329c",
"baa02e633d379abe1001c8b8acb942617177329c"
] | [
"config.py",
"Face_Alignt/predict_m.py"
] | [
"from easydict import EasyDict as edict\n# from pathlib import Path\nimport torch\nimport os\nfrom torchvision import transforms as trans\nfrom utils.constants import *\nlist_model = ['wget https://www.dropbox.com/s/akktsgxp0n8cwn2/model_mobilefacenet.pth?dl=0 -O model_mobilefacenet.pth',\n'wget https://www.dropbox.com/s/kzo52d9neybjxsb/model_ir_se50.pth?dl=0 -O model_ir_se50.pth',\n'wget https://www.dropbox.com/s/rxavczg9dlxy3a8/model_ir50.pth?dl=0 -O model_ir50.pth']\ndef get_config(mode = 'app', net_size = 'large', net_mode = 'ir_se', use_mtcnn = 1, threshold = 1.25):\n conf = edict()\n conf.device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n conf.input_size = [112, 112]\n conf.face_limit = 5 \n conf.min_face_size = 30\n conf.mode = mode\n conf.net_size = net_size\n if mode =='app':\n assert net_size in ['mobi', 'large', None], 'net_size should be mobi or large, please change in cogfig.py'\n conf.use_tensor = True\n conf.work_path = WORK_PATH\n conf.model_path = '%s/models'%WORK_PATH\n conf.log_path = '%s/log'%WORK_PATH\n conf.save_path = '%s/save'%WORK_PATH\n conf.facebank_path = '%s/Face_bank'%WORK_PATH\n conf.threshold = threshold\n if use_mtcnn:\n conf.use_mtcnn = True\n else:\n conf.use_mtcnn = False\n #when inference, at maximum detect 10 faces in one image, my laptop is slow\n conf.test_transform = trans.Compose([\n trans.ToTensor(),\n trans.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])\n ])\n if net_size == 'large':\n conf.use_mobilfacenet = False\n if net_mode == 'ir_se':\n conf.net_mode = 'ir_se' # or 'ir'\n conf.weight_path = '%s/weights/model_ir_se50.pth'%WORK_PATH\n conf.url = list_model[1]\n else:\n conf.net_mode = 'ir' # or 'ir'\n conf.weight_path = '%s/weights/model_ir50.pth'%WORK_PATH\n conf.url = list_model[2]\n if net_size =='mobi':\n conf.use_mobilfacenet = True\n conf.weight_path = '%s/weights/model_mobilefacenet.pth'%WORK_PATH\n conf.url = list_model[0]\n conf.video_source = 0\n\n if mode =='training_eval':\n conf.lr = 1e-3\n conf.milestones = [18,30,42]\n conf.momentum = 0.9\n conf.pin_memory = True\n# conf.num_workers = 4 # when batchsize is 200\n conf.num_workers = 3\n conf.train_root = \"/mnt/01D4A1D481139570/Dataset/Face/casia\"\n conf.file_list = '/mnt/01D4A1D481139570/Dataset/Face/casia_train.txt' \n conf.batch_size = 4\n conf.lfw_root = '/mnt/01D4A1D481139570/Dataset/Face/data/LFW/lfw_align_112'\n conf.lfw_file_list = '/mnt/01D4A1D481139570/Dataset/Face/data/LFW/pairs.txt'\n conf.agedb_root = '/mnt/01D4A1D481139570/Dataset/Face/data/AgeDB-30/agedb30_align_112'\n conf.agedb_file_list = '/mnt/01D4A1D481139570/Dataset/Face/data/AgeDB-30/agedb_30_pair.txt'\n conf.cfp_root = '/mnt/01D4A1D481139570/Dataset/Face/data/CFP-FP/CFP_FP_aligned_112'\n conf.cfp_file_list = '/mnt/01D4A1D481139570/Dataset/Face/data/CFP-FP/cfp_fp_pair.txt'\n return conf",
"from network import PNet,ONet\r\nimport torch,cv2,itertools\r\nfrom torch.autograd import Variable\r\nimport torch.nn.functional as F\r\nimport numpy as np\r\nimport time\r\nfrom matlab_cp2tform import get_similarity_transform_for_cv2\r\n\r\nimport math\r\ndef alignment(src_img,src_pts, crop_size = (112, 112)):\r\n ref_pts = np.array([ [30.2946, 51.6963],\r\n [65.5318, 51.5014],\r\n [48.0252, 71.7366],\r\n [33.5493, 92.3655],\r\n [62.7299, 92.2041] ])\r\n \r\n if crop_size[1]==112:\r\n ref_pts[:,0] += 8.0\r\n src_pts = np.array(src_pts).reshape(5,2)\r\n \r\n s = np.array(src_pts).astype(np.float32)\r\n r = np.array(ref_pts).astype(np.float32)\r\n\r\n tfm = get_similarity_transform_for_cv2(s, r)\r\n face_img = cv2.warpAffine(src_img, tfm, crop_size)\r\n return face_img\r\ndef resize_square(img, height=128, color=(0, 0, 0)): # resize a rectangular image to a padded square\r\n shape = img.shape[:2] # shape = [height, width]\r\n ratio = float(height) / max(shape) # ratio = old / new\r\n new_shape = [round(shape[0] * ratio), round(shape[1] * ratio)]\r\n dw = height - new_shape[1] # width padding\r\n dh = height - new_shape[0] # height padding\r\n top, bottom = dh // 2, dh - (dh // 2)\r\n left, right = dw // 2, dw - (dw // 2)\r\n img = cv2.resize(img, (new_shape[1], new_shape[0]), interpolation=cv2.INTER_AREA) # resized, no border\r\n return cv2.copyMakeBorder(img, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color), ratio, dw // 2, dh // 2\r\n\r\ndef dotproduct(v1, v2):\r\n return sum((a*b) for a, b in zip(v1, v2))\r\n\r\ndef length(v):\r\n return math.sqrt(dotproduct(v, v))\r\n\r\ndef angle(v1, v2):\r\n return math.acos(dotproduct(v1, v2) / (length(v1) * length(v2)))\r\ndef get_anchors(scale=64):\r\n '''\r\n compute anchors\r\n return:\r\n u_boxes:tensor([anchor_num,4]) (cx,cy,w,h): real anchors\r\n boxes:tensor([anchor_num,4]) (x1,y1,x2,y2): crop box for ONet,each with size 80\r\n '''\r\n sizes = [float(s) / scale for s in [32]]\r\n \r\n aspect_ratios = [(1.,)]\r\n feature_map_sizes = [int(scale/16)]\r\n \r\n num_layers = len(feature_map_sizes)\r\n u_boxes,boxes = [],[]\r\n for i in range(num_layers):\r\n fmsize = feature_map_sizes[i]\r\n for h,w in itertools.product(range(fmsize),repeat=2):\r\n cx = float(w)/feature_map_sizes[i]\r\n cy = float(h)/feature_map_sizes[i]\r\n \r\n s = sizes[i]\r\n for j,ar in enumerate(aspect_ratios[i]):\r\n u_boxes.append((cx,cy,float(s)*ar,float(s)*ar))\r\n boxes.append((w*16-32,h*16-32,w*16+32,h*16+32)) \r\n return torch.Tensor(u_boxes),torch.Tensor(boxes).long()\r\n\r\ndef nms(bboxes,scores,threshold=0.35):\r\n '''\r\n bboxes(tensor) [N,4]\r\n scores(tensor) [N,]\r\n '''\r\n x1 = bboxes[:,0]\r\n y1 = bboxes[:,1]\r\n x2 = bboxes[:,2]\r\n y2 = bboxes[:,3]\r\n areas = (x2-x1) * (y2-y1)\r\n\r\n _,order = scores.sort(0,descending=True)\r\n keep = []\r\n while order.numel() > 0:\r\n if order.numel() == 1:\r\n i = order.item()\r\n else:\r\n i = order[0].item()\r\n keep.append(i) \r\n\r\n if order.numel() == 1:\r\n break \r\n\r\n xx1 = x1[order[1:]].clamp(min=x1[i]) \r\n yy1 = y1[order[1:]].clamp(min=y1[i])\r\n xx2 = x2[order[1:]].clamp(max=x2[i])\r\n yy2 = y2[order[1:]].clamp(max=y2[i])\r\n\r\n w = (xx2-xx1).clamp(min=0)\r\n h = (yy2-yy1).clamp(min=0)\r\n inter = w*h\r\n\r\n ovr = inter / (areas[i] + areas[order[1:]] - inter) \r\n ids = (ovr<=threshold).nonzero().squeeze()\r\n if ids.numel() == 0:\r\n break\r\n order = order[ids+1] \r\n return torch.LongTensor(keep)\r\n \r\ndef decode_box(loc, size=64):\r\n variances = [0.1,0.2]\r\n anchor,crop = get_anchors(scale=size)\r\n cxcy = loc[:,:2] * variances[0] * anchor[:,2:] + anchor[:,:2]\r\n wh = torch.exp(loc[:,2:] * variances[1]) * anchor[:,2:]\r\n boxes = torch.cat([cxcy-wh/2,cxcy+wh/2],1)\r\n \r\n return boxes,anchor,crop\r\n \r\ndef decode_ldmk(ldmk,anchor):\r\n variances = [0.1,0.2]\r\n index_x = torch.Tensor([0,2,4,6,8]).long()\r\n index_y = torch.Tensor([1,3,5,7,9]).long()\r\n ldmk[:,index_x] = ldmk[:,index_x] * variances[0] * anchor[:,2].view(-1,1) + anchor[:,0].view(-1,1)\r\n ldmk[:,index_y] = ldmk[:,index_y] * variances[0] * anchor[:,3].view(-1,1) + anchor[:,1].view(-1,1)\r\n return ldmk\r\n \r\nimport os\r\n# list_per = []\r\ndef detect(file, pic = None):\r\n def change(boxes,ldmks, h, w, pad1):\r\n index_x = torch.LongTensor([0,2,4,6,8])\r\n index_y = torch.LongTensor([1,3,5,7,9])\r\n if h <= w:\r\n boxes[:,1] = boxes[:,1]*w-pad1\r\n boxes[:,3] = boxes[:,3]*w-pad1\r\n boxes[:,0] = boxes[:,0]*w\r\n boxes[:,2] = boxes[:,2]*w \r\n ldmks[:,index_x] = ldmks[:,index_x] * w\r\n ldmks[:,index_y] = ldmks[:,index_y] * w - torch.Tensor([pad1])\r\n else:\r\n boxes[:,1] = boxes[:,1]*h\r\n boxes[:,3] = boxes[:,3]*h\r\n boxes[:,0] = boxes[:,0]*h-pad1\r\n boxes[:,2] = boxes[:,2]*h-pad1\r\n ldmks[:,index_x] = ldmks[:,index_x] * h - torch.Tensor([pad1])\r\n ldmks[:,index_y] = ldmks[:,index_y] * h \r\n return boxes,ldmks\r\n if not isinstance(file, np.ndarray):\r\n im = cv2.imread(file)\r\n else:\r\n im = file\r\n if im is None:\r\n print(\"can not open image:\", file)\r\n return\r\n\r\n # pad img to square\r\n h, w,_ = im.shape\r\n\r\n dim_diff = np.abs(h - w)\r\n pad1, pad2 = dim_diff //2, dim_diff - dim_diff // 2\r\n pad = ((pad1,pad2),(0,0),(0,0)) if h<=w else ((0,0),(pad1, pad2),(0,0))\r\n img = np.pad(im, pad,'constant', constant_values=128)\r\n \r\n #get img_pyramid\r\n img_scale, img_size = 0,int((img.shape[0]-1)/32)\r\n while img_size > 0:\r\n img_scale += 1\r\n img_size /= 2\r\n if img_scale == 6:\r\n break\r\n img_size = 64\r\n img_pyramid = []\r\n t_boxes,t_probs, t_anchors, t_crops, t_which = None, None, None, None, None\r\n \r\n for scale in range(4):\r\n # print('scale:{0} img_size:{1}'.format(scale, img_size))\r\n input_img = cv2.resize(img,(img_size, img_size))\r\n img_pyramid.append(input_img.transpose(2,0,1))\r\n im_tensor = torch.from_numpy(input_img.transpose(2,0,1)).float()\r\n if use_gpu:\r\n im_tensor = im_tensor.cuda()\r\n #get conf and loc(box)\r\n if use_gpu:\r\n torch.cuda.synchronize()\r\n loc,conf = pnet(torch.unsqueeze(im_tensor,0))\r\n if use_gpu:\r\n torch.cuda.synchronize()\r\n \r\n # print('forward time:{}s'.format(e_t-s_t)) \r\n loc, conf = loc.detach().cpu(),conf.detach().cpu()\r\n loc, conf = loc.data.squeeze(0),F.softmax(conf.squeeze(0))\r\n boxes, anchor, crop = decode_box(loc,size=img_size)\r\n which_img = torch.tensor([scale]).long().expand((crop.shape[0],))\r\n \r\n #add box into stack\r\n if scale == 0:\r\n t_boxes, t_confs, t_anchors, t_crops, t_which = boxes, conf, anchor, crop, which_img\r\n else:\r\n t_boxes = torch.cat((t_boxes, boxes),0)\r\n t_confs = torch.cat((t_confs, conf),0)\r\n t_anchors = torch.cat((t_anchors, anchor),0)\r\n t_crops = torch.cat((t_crops, crop),0)\r\n t_which = torch.cat((t_which, which_img),0)\r\n img_size *= 2\r\n\r\n #get right boxes and nms\r\n t_confs[:,0] = 0.6\r\n max_conf, labels = t_confs.max(1)\r\n if labels.long().sum().item() is 0:\r\n return None\r\n ids = labels.nonzero().squeeze(1)\r\n t_boxes, t_confs, t_anchors, t_crops, t_which = t_boxes[ids], t_confs[ids], t_anchors[ids], t_crops[ids], t_which[ids]\r\n max_conf = max_conf[ids]\r\n \r\n keep = nms(t_boxes, max_conf)\r\n t_boxes, max_conf, t_anchors, t_crops, t_which = t_boxes[keep], max_conf[keep], t_anchors[keep], t_crops[keep], t_which[keep]\r\n\r\n t_boxes = t_boxes.detach().numpy()\r\n max_conf = max_conf.detach().numpy()\r\n \r\n #get crop and ldmks\r\n crop_imgs = []\r\n for i in range(t_boxes.shape[0]):\r\n img = img_pyramid[t_which[i]]\r\n crop = t_crops[i].numpy()\r\n _,h_,w_ = img.shape\r\n o_x1,o_y1,o_x2,o_y2 = max(crop[0],0),max(crop[1],0),min(crop[2],w_),min(crop[3],h_)\r\n c_x1 = 0 if crop[0] >=0 else -crop[0]\r\n c_y1 = 0 if crop[1] >=0 else -crop[1]\r\n c_x2 = 64 if crop[2] <= w_ else 64 - (crop[2] - w_)\r\n c_y2 = 64 if crop[3] <= h_ else 64 - (crop[3] - h_)\r\n crop_img = np.ones((3,64,64))*128\r\n np.copyto(crop_img[:,c_y1:c_y2,c_x1:c_x2],img[:,o_y1:o_y2,o_x1:o_x2])\r\n crop_imgs.append(crop_img)\r\n crop_imgs = torch.from_numpy(np.array(crop_imgs)).float()\r\n if use_gpu:\r\n crop_imgs = crop_imgs.cuda()\r\n t_ldmks = onet(crop_imgs).detach().cpu()[:,10,:].squeeze(1)\r\n t_ldmks = decode_ldmk(t_ldmks, t_anchors)\r\n t_boxes, t_ldmks = change(t_boxes,t_ldmks, h, w, pad1)\r\n t_faces = []\r\n for i in range(len(t_boxes)):\r\n box, prob, ldmk = t_boxes[i], max_conf[i], t_ldmks[i]\r\n if prob <= 0.7:\r\n continue\r\n ldmk_fn = ldmk.reshape(5,2)\r\n x1 = min(int(box[0])-5, 0)\r\n x2 = min(int(box[2]) -5, 0)\r\n y1 = max(int(box[1])+5, im.shape[1])\r\n y2 = max(int(box[3])+5, im.shape[2])\r\n face = alignment(im, ldmk_fn)\r\n cv2.rectangle(im, (x1,y1),(x2,y2), (255,0,0), 1)\r\n cv2.imwrite('a.png',im) \r\n t_faces.append(face)\r\n return t_boxes, t_faces\r\nimport glob, tqdm\r\nclass Face_Alignt():\r\n def __init__(self, use_gpu = True):\r\n pnet,onet = PNet(),ONet() \r\n pnet.load_state_dict(torch.load('weight/msos_pnet_rotate.pt',map_location=lambda storage, loc:storage), strict=False) \r\n onet.load_state_dict(torch.load('weight/msos_onet_rotate.pt',map_location=lambda storage, loc:storage), strict=False)\r\n onet.float()\r\n pnet.eval()\r\n onet.eval()\r\n if use_gpu:\r\n torch.cuda.set_device(0)\r\n pnet.cuda()\r\n onet.cuda()\r\n else:\r\n torch.set_num_threads(1)\r\n def align_multi(img, limit=None, min_face_size=30.0):\r\n return detect(img)"
] | [
[
"torch.cuda.is_available"
],
[
"torch.LongTensor",
"torch.cuda.synchronize",
"numpy.pad",
"numpy.abs",
"torch.cat",
"torch.Tensor",
"torch.load",
"torch.cuda.set_device",
"torch.unsqueeze",
"numpy.ones",
"torch.exp",
"torch.tensor",
"torch.set_num_threads",
"numpy.copyto",
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Splendon/examples | [
"ed4a8a01857b6ddca49559141acf5d0986eb01e1",
"ed4a8a01857b6ddca49559141acf5d0986eb01e1",
"ed4a8a01857b6ddca49559141acf5d0986eb01e1",
"ed4a8a01857b6ddca49559141acf5d0986eb01e1",
"ed4a8a01857b6ddca49559141acf5d0986eb01e1"
] | [
"utils/tests/test_util.py",
"code_examples/tensorflow/kernel_benchmarks/dense.py",
"applications/tensorflow/cnns/inference/data.py",
"applications/tensorflow/cnns/training/Models/squeezenet.py",
"applications/popart/resnext_inference/get_model.py"
] | [
"# Copyright 2019 Graphcore Ltd.\nfrom statistics import mean\nimport numpy as np\nimport os\nimport re\nimport subprocess\nimport sys\nimport time\n\n\n\"\"\"Library of utility functions common between frameworks\"\"\"\n\n\ndef parse_results_for_speed(output, iter_tolerance, speed_tolerance):\n \"\"\"Look for <iter number> sec/itr. <speed number> {other stuff}\"\"\"\n found_a_result = False\n\n for line in output.split(\"\\n\"):\n matches = re.match(r\"([\\d.]+) +sec/itr. +([\\d.]+)\", line)\n if matches:\n found_a_result = True\n iterations, speed = matches.groups()\n iterations = float(iterations)\n speed = float(speed)\n _verify_model_numbers(\n iter_tolerance, iterations, speed_tolerance, speed, line\n )\n\n if not found_a_result:\n raise AssertionError(\"No results detected in this run\")\n\n\ndef parse_results_for_accuracy(output, expected_accuracies, acc_tolerance):\n \"\"\"Look for Accuracy=<accuracy>%\"\"\"\n\n accuracies = []\n for line in output.split(\"\\n\"):\n if re.match(r\" + Accuracy=+([\\d.]+)%\", line):\n accuracy = float(re.match(r\" + Accuracy=+([\\d.]+)%\", line).groups()[0])\n accuracies.append(accuracy)\n elif re.search(r\"Validation accuracy\", line):\n accuracy_str = re.search(r\"accuracy:\\s(.*)\", line).group(1)\n accuracy = float(accuracy_str[:accuracy_str.rfind(\"%\")])\n accuracies.append(accuracy)\n\n if len(accuracies) == 0:\n raise AssertionError(\"No results detected in this run\")\n elif len(accuracies) != len(expected_accuracies):\n raise AssertionError(\"Expected accuracies and parsed accuracies have\"\n \" different lengths\")\n\n _verify_model_accuracies(accuracies, expected_accuracies, acc_tolerance)\n\n\ndef _verify_model_numbers(iter_tolerance, iterations,\n speed_tolerance, speed, line):\n iter_error = \"\"\n speed_error = \"\"\n\n # Verify iteration speed\n if iterations > iter_tolerance[1]:\n iter_error = (\"The time per iteration has regressed above\"\n \" the tolerance maximum: \" +\n str(iter_tolerance[1]))\n elif iterations < iter_tolerance[0]:\n iter_error = (\"Time taken to compete an iteration was \"\n \"suspiciously fast. Please verify the model\"\n \" is operating correctly and tune tolerances\"\n \" accordingly.\")\n\n # Verify item processing speed\n if speed < speed_tolerance[0]:\n speed_error = (\"The number of items processed per second\"\n \" has regressed below the tolerance: \" +\n str(speed_tolerance[0]))\n elif speed > speed_tolerance[1]:\n speed_error = (\"The number of items processed per second\"\n \" was suspiciously high. Please verify the\"\n \" model is behaving correctly and tune\"\n \" tolerances accordingly.\")\n\n if iter_error and speed_error:\n sys.stderr.write(\"\\n\".join([line, iter_error, speed_error]))\n raise AssertionError(\"Timings out of tolerance range\")\n elif iter_error or speed_error:\n sys.stderr.write(line)\n raise AssertionError(iter_error + speed_error)\n\n\ndef _verify_model_accuracies(accuracies, expected_accuracy, acc_tolerance):\n \"\"\"Asserts a list of accuracies is within a list of expected accuracies\n with a tolerance applied.\n\n Args:\n accuracies: A list of floats representing the accuracies (%) produced\n by the model at each step.\n expected_accuracy: A list of floats representing the expected\n accuracies (%) produced by the model at each step.\n acc_tolerance: A float representing a percentage tolerance applied on\n top of the expected accuracies that the accuracies produced by\n the model should sit within.\n\n Raises:\n Assertion Error: Accuracy produced by the model are not within\n the expected limits.\n \"\"\"\n\n for iter_num in range(len(accuracies)):\n exp_acc = expected_accuracy[iter_num]\n exp_acc_str = (\n \"{0} = {1} +- {2} = [{3:.{5}f}, {4:.{5}f}]\".format(\n \"Expected accuracy (%)\".ljust(22),\n exp_acc,\n acc_tolerance,\n exp_acc - acc_tolerance,\n exp_acc + acc_tolerance,\n 2\n )\n )\n acc = accuracies[iter_num]\n acc_str = \"{} = {:.{}f}\".format(\n \"Accuracy (%)\".ljust(22),\n acc,\n 2\n )\n full_acc_str = \"{}\\n{}\".format(acc_str, exp_acc_str)\n if acc < exp_acc - acc_tolerance:\n raise AssertionError(\n \"After iteration {}, the model is less accurate\"\n \" than expected.\\n\"\n \"{}\".format(iter_num + 1, full_acc_str)\n )\n elif acc > exp_acc + acc_tolerance:\n raise AssertionError(\n \"After iteration {}, the model is producing an accuracy\"\n \" that is suspiciously high and should be reviewed.\\n\"\n \"{}\".format(iter_num + 1, full_acc_str)\n )\n\n\ndef assert_result_equals_tensor_value(output, tensor):\n \"\"\"Searches for a single tensor result in the first line of the output\n\n\n Searches the first line of the string output for a line with format\n '[array([3., 8.], dtype=float32)]' and asserts its equal to the numpy\n tensor argument\n\n Args:\n output: String containing the string representation of a numpy\n tensor\n tensor: numpy tensor representing the expected result\n\n Returns:\n None\n\n Raises:\n Assertion Error: Output is not in correct format\n Assertion Error: Output does not contain a string representation\n of a numpy array\n Assertion Error: Output numpy array does not equal the expected\n numpy array\n \"\"\"\n # TODO - np representation over multiple lines\n # TODO - large np array output\n # TODO - multiple dimension np output\n list_regex = r\"^\\[.*?\\]$\"\n np_array_str_regex = r\"array\\(.*?, dtype=.*?\\)$\"\n first_line = output.split(\"\\n\")[0]\n if not re.match(list_regex, first_line):\n raise AssertionError(\n \"Result not in expected string format.\"\n \" Expecting stringified list \"\n \" eg. [array([3., 8.], dtype=float32)]\"\n )\n\n contents = first_line[1:-1]\n if not re.match(np_array_str_regex, contents):\n raise AssertionError(\n \"Expecting numpy representation \"\n \"array with dtype \"\n \"eg. array([3., 8.], dtype=float32)\"\n )\n\n assert contents == np.array_repr(tensor), (\n \"Output value {} does not \"\n \"equal expected value {}\".format(np.array_repr(contents), tensor)\n )\n\n\ndef parse_results_for_ipus_used(output):\n \"\"\"Finds the number of IPUs used in the model by looking for\n string with format ' On 2 IPUs.' in output\"\"\"\n shards_regex = r\" On ([\\d.]+) IPUs.\"\n for line in output.split(\"\\n\"):\n matches = re.match(shards_regex, line)\n if matches:\n shards = matches.group(1)\n return int(shards)\n raise AssertionError(\"Expecting line detailing IPU usage \"\n \"eg. ' On 2 IPUs.'\")\n\n\ndef assert_shards(output, expected_shards):\n \"\"\"Verify the expected number of shards used were actually\n used\"\"\"\n actual_shards = parse_results_for_ipus_used(output)\n assert actual_shards == expected_shards\n\n\ndef get_final_accuracy(output):\n \"\"\"Find and return the accuracy reported in a test's output.\"\"\"\n result_regex = r\"Accuracy=([\\d.]+)\\%\"\n result_list = parse_results_with_regex(output, result_regex)\n result = result_list[0]\n return result[-1]\n\n\ndef get_final_loss(output):\n \"\"\"Find and return the loss reported in a test's output.\"\"\"\n result_regex = r\"Loss=([\\d.]+)\"\n result_list = parse_results_with_regex(output, result_regex)\n result = result_list[0]\n return result[-1]\n\n\ndef get_average_speeds(output):\n \"\"\"Finds the average seconds/iteration and tokens/second\n\n Args:\n output: String representing the output of a test.\n\n Returns:\n A tuple where the first element is a float representing\n the average iterations per second and the second the\n average tokens processed per second\n \"\"\"\n\n result_regex = r\"([\\d.]+) +sec/itr. +([\\d.]+)\"\n results = parse_results_with_regex(output, result_regex)\n\n itr_sec_list = results[0]\n tokens_sec_list = results[1]\n\n return mean(itr_sec_list), mean(tokens_sec_list)\n\n\ndef parse_results_with_regex(output, regex):\n \"\"\"Find and returns the regex matching results in output\n\n Looks through the output line by line looking for a matching regex.\n The function assembles a list of lists where each parent list is\n the results for that position in the regex string and each item in\n the child lists represents an order of the results found in the output\n\n Args:\n output: String representing the output of a test.\n regex: Regex of result to find.\n\n Returns:\n A list of lists of floats. Parent list represents the result at each\n position in the regex. Child list contains results received in the\n order they were output.\n\n Raises:\n AssertionError: a line matching the regex could not be found in the\n output\n \"\"\"\n\n results = []\n\n for line in output.split(\"\\n\"):\n matches = re.search(regex, line)\n if matches:\n number_of_results = matches.lastindex\n if results == []:\n results = [None] * number_of_results\n for match_index in range(0, number_of_results):\n result = float(matches.group(match_index + 1))\n if results[match_index]:\n results[match_index].append(result)\n continue\n results[match_index] = [result]\n\n if results == []:\n raise AssertionError(\"Regex {} not found in result\".format(regex))\n\n return results\n\n\ndef get_total_epochs(output):\n \"\"\"Finds the number of epochs model has run through by looking for\n string with format 'Epoch #3' in the models raw output\"\"\"\n epochs = None\n for line in output.split(\"\\n\"):\n epoch_match = re.search(r\"Epoch #([\\d.]+)\", line)\n if epoch_match:\n epochs = int(epoch_match.group(1))\n if not epochs:\n raise AssertionError(\"Epochs not found in output, eg. \"\n \"Epoch #3\")\n return epochs\n\n\ndef assert_total_run_time(total_time, time_range):\n \"\"\"Checks total run time is within the required range\n\n Args:\n total_time: float representing number of seconds the test took to\n run\n time_range: a tuple of floats where the first element is the minimum\n time the test should run in in seconds and the second the\n maximum\n\n Raises:\n AssertionError: if the total_time is not between the minimum time\n and maximum time\n \"\"\"\n minimum_time = time_range[0]\n maximum_time = time_range[1]\n assert total_time >= minimum_time\n assert total_time <= maximum_time\n\n\ndef assert_final_accuracy(output, minimum, maximum):\n \"\"\"Gets the final accuracy given a raw model output and checks its value\n is between the minimum and maximum\n\n Args:\n output: String representing the raw output of a model\n minimum: a float representing a percentage (between 0.0% and 100%)\n that is the minimum accuracy for the model after running\n maximum: a float representing a percentage (between 0.0% and 100%)\n that is the maximum accuracy for the model after running\n\n Raises:\n AssertionError: if the final accuracy is not between the maximum and\n minimum percentages\n \"\"\"\n accuracy = get_final_accuracy(output)\n assert accuracy >= minimum\n assert accuracy <= maximum\n\n\ndef run_python_script_helper(cwd, script, **kwargs):\n \"\"\"A function that given a path and python script name, runs the script\n with kwargs as the command line arguments\n\n Args:\n cwd: string representing the directory of the python script\n script: string representing the full name of the python script\n kwargs: dictionary of string key and values that form the command\n line arguments when the script is run.\n\n Returns:\n A string representing the raw output of the python script run\n\n Raises:\n AssertionError: if the final accuracy is not between the maximum and\n minimum percentages\n \"\"\"\n py_version = \"python{}\".format(sys.version_info[0])\n cmd = [py_version, script]\n if kwargs:\n args = [\n str(item) for sublist in kwargs.items() for item in sublist if item != \"\"\n ]\n cmd.extend(args)\n out = subprocess.check_output(cmd, cwd=cwd, universal_newlines=True)\n print(out)\n return out\n\n\ndef run_test_helper(subprocess_function, total_run_time=None,\n total_run_time_tolerance=0.1, **kwargs):\n \"\"\"Helper function for running tests\n\n Takes in testable parameters, runs the test and checks the relevant\n parameters against test results\n\n Args:\n subprocess_function: the function that runs a subprocess of\n the model in question\n total_run_time_range: tuple float representing the expected\n upper and lower bounds for the total time taken to run\n the test\n\n Returns:\n A String representing the raw output of the models subprocess\n\n Raises:\n AssertionError: If the accuracy, time taken etc. are not within\n the expected bounds\n \"\"\"\n\n start_time = time.time()\n\n out = subprocess_function(**kwargs)\n\n total_time = time.time() - start_time\n\n if total_run_time:\n total_run_time_range = range_from_tolerances(\n total_run_time, total_run_time_tolerance\n )\n assert_total_run_time(total_time, total_run_time_range)\n\n return out\n\n\ndef range_from_tolerances(value, tolerance):\n \"\"\"Helper function that takes a value and applies the tolerance\n\n Args:\n value: a float representing the mean value to which the tolerance\n will be applied\n tolerance: a float representing a percentage (between 0.0 and 1.0)\n which is applied symmetrically across the value argument\n\n Returns:\n A tuple of floats, the first element representing the tolerance\n applied below the value (minimum) and the second above (maximum)\n \"\"\"\n return (\n get_minimum_with_tolerance(value, tolerance),\n get_maximum_with_tolerance(value, tolerance),\n )\n\n\ndef get_minimum_with_tolerance(value, tolerance):\n \"\"\"Helper function that takes a value and applies the tolerance\n below the value\n\n Args:\n value: a float representing the mean value to which the tolerance\n will be applied\n tolerance: a float representing a percentage (between 0.0 and 1.0)\n which is applied to the value argument\n\n Returns:\n A float representing the tolerance applied below the value (maximum)\n \"\"\"\n return value * (1 - tolerance)\n\n\ndef get_maximum_with_tolerance(value, tolerance):\n \"\"\"Helper function that takes a value and applies the tolerance\n above the value\n\n Args:\n value: a float representing the mean value to which the tolerance\n will be applied\n tolerance: a float representing a percentage (between 0.0 and 1.0)\n which is applied to the value argument\n\n Returns:\n A float representing the tolerance applied above the value (minimum)\n \"\"\"\n return value * (1 + tolerance)\n\n\ndef check_data_exists(data_path, expected_files_list):\n \"\"\"Helper function that checks the expected data exists in a directory\n\n Args:\n data_path: A string representing the directory of where the\n data is expected to be\n expected_files_list: a list of strings representing the expected\n file names in the data_path directory\n\n Returns:\n A boolean which represents whether the expected files are found in\n the data_path directory\n \"\"\"\n\n if os.path.exists(data_path):\n for filename in expected_files_list:\n if not os.path.isfile(os.path.join(data_path, filename)):\n return False\n return True\n\n return False\n",
"#!/usr/bin/env python\n\"\"\"\nBenchmark a single Dense layer with no host/device data transfers.\n\nThe Items/sec reported at the end of the benchmark is based on wall time.\n\nRun with -h or --help for options.\n\"\"\"\nimport inspect\nimport os\nimport sys\nimport tensorflow as tf\nfrom tensorflow.python.ipu import utils\n\n\ndef dense(opts, inputs):\n # Add ReLU activation function if appropriate option is set\n if opts.activation:\n return tf.layers.dense(units=opts.size, inputs=inputs, activation=tf.nn.relu)\n\n else:\n return tf.layers.dense(units=opts.size, inputs=inputs)\n\n\ndef inputs(opts, index):\n value = tf.cast(index, tf.float16)\n return {\n \"inputs\": tf.broadcast_to(value, [opts.batch_size, opts.size]),\n }\n\n\ndef graph_builder(opts, inputs):\n output = dense(opts, inputs[\"inputs\"])\n\n if opts.train:\n # Loss is the mean across output matrix:\n loss = tf.reduce_mean(output)\n optimiser = tf.train.GradientDescentOptimizer(0.01)\n with tf.variable_scope(\"train\", reuse=tf.AUTO_REUSE):\n # We need to ensure that the train op is executed as part of\n # the benchmarking loop by maintaining a step variable and\n # forcing a control dependency between it and the train op:\n global_step = tf.get_variable(\n \"step_control\", dtype=tf.int32, shape=[])\n grads_and_vars = optimiser.compute_gradients(\n loss, tf.trainable_variables())\n train = optimiser.apply_gradients(grads_and_vars, global_step)\n with tf.control_dependencies([train]):\n global_step = tf.identity(global_step)\n return global_step\n return output\n\n\ndef initializer():\n utils.move_variable_initialization_to_cpu()\n return tf.global_variables_initializer()\n\n\ndef add_args(parser):\n parser.add_argument(\"--batch-size\", default=32, type=int,\n help=\"Number of inputs in a mini-batch\")\n parser.add_argument(\"--size\", default=1024, type=int,\n help=\"Dense layer size\")\n parser.add_argument(\"--train\", action='store_true', dest='train',\n help=\"Compute loss and optimization pass\")\n parser.add_argument(\"--include-activation\", action='store_true', dest='activation',\n help=\"Include ReLU activation (otherwise linear/no activation\")\n\n parser.set_defaults(train=False, batches_per_step=5000, steps=5)\n return parser\n\n\ndef iteration_report(opts, time):\n return \"{:5f} items/sec\".format(opts.batch_size * opts.batches_per_step / time)\n\n\nif __name__ == '__main__':\n # Add benchmark module to path\n cwd = os.path.dirname(os.path.abspath(inspect.stack()[0][1]))\n sys.path.insert(1, os.path.join(cwd, '..', '..', '..', 'utils',\n 'benchmarks', 'tensorflow'))\n import benchmark\n\n module = benchmark.Benchmark(\n graph_builder,\n inputs,\n initializer,\n add_args,\n iteration_report\n )\n\n options = benchmark.parse_opts(module, False)\n\n if options.shards > 0:\n raise NotImplementedError(\n \"--shards option has not been implemented with this example\")\n\n # Log Benchmark Message\n print(\" Dense layer {} Synthetic benchmark.\\n\"\n \" Batch size {}.\\n\"\n \" Batches per Step {}.\\n\"\n \" Dense size {}.\\n\"\n .format(\n \"Training\" if options.train else \"Inference\",\n options.batch_size,\n options.batches_per_step if not options.cycle_report else \"n/a\",\n options.size))\n\n benchmark.run(module, options)\n",
"# Copyright 2019 Graphcore Ltd.\nfrom functools import partial\nfrom typing import Callable, Tuple\n\nimport tensorflow as tf\n\n\ndef load_and_preprocess_data(img_path: str, img_width: int, img_height: int,\n preprocess_fn: Callable, dtype: tf.DType) -> tf.Tensor:\n \"\"\"Read and pre-process image.\n\n Args:\n img_path: Path to image\n img_width: Target width\n img_height: Target height\n preprocess_fn: Function that scales the input to the correct range.\n\n Returns: tf.Tensor representing pre-processed image in fp16.\n\n \"\"\"\n image = tf.read_file(img_path)\n image = tf.image.decode_jpeg(image, channels=3)\n image = tf.image.resize(image, [img_height, img_width])\n image = preprocess_fn(image, data_format='channels_last')\n return tf.cast(image, dtype)\n\n\ndef get_dataset(image_filenames: Tuple, batch_size: int, preprocess_fn: Callable, img_width: int, img_height: int,\n loop: bool, dtype: tf.DType) -> tf.data.Dataset:\n \"\"\"Creates an `Iterator` for enumerating the elements of this dataset.\n\n Note: The returned iterator will be in an uninitialized state,\n and you must run the `iterator.initializer` operation before using it:\n\n ```python\n dataset = ...\n iterator = dataset.make_initializable_iterator()\n # ...\n sess.run(iterator.initializer)\n ```\n\n Args:\n image_filenames: Tuple of image filenames, with each filename corresponding to the label of the image.\n batch_size: Number of images per batch\n preprocess_fn: Pre-processing to apply\n img_width: Expected width of image\n img_height: Expected height of image\n loop: Repeatedly loop through images.\n dtype: Input data type.\n\n\n Returns:\n Iterator over images and labels.\n\n \"\"\"\n\n image_ds = tf.data.Dataset.from_tensor_slices(tf.constant([str(item) for item in image_filenames]))\n if loop:\n image_ds = image_ds.repeat()\n input_preprocess = partial(load_and_preprocess_data, img_width=img_width, img_height=img_height,\n preprocess_fn=preprocess_fn, dtype=dtype)\n image_ds = image_ds.map(map_func=input_preprocess, num_parallel_calls=100)\n image_ds = image_ds.batch(batch_size, drop_remainder=True)\n image_ds = image_ds.prefetch(buffer_size=100)\n return image_ds\n",
"# Copyright 2019 Graphcore Ltd.\n\"\"\"\nSqueezeNet\n\nA Convolutional ineural network with relatively few parameters (~1.25M) but equivalent\naccuracy to AlexNet.\n\nArchitecture originally described in Caffe. Implemented here in Tensorflow for the IPU.\n\n\nSQUEEZENET: ALEXNET-LEVEL ACCURACY WITH\n50X FEWER PARAMETERS AND <0.5MB MODEL SIZE\nhttps://arxiv.org/pdf/1602.07360.pdf\n\nSqueezeNet was originally implemented using a polynomial decay learnng rate. To use this,\nrun with --lr-schedule polynomial_decay_lr.\n\nThis will set a default rate of linear decay of the learning rate. You can change this\nrate with the parameters --poly-lr-decay-steps, --poly-lr-initial-lr, --poly-lr-decay-power\nand --poly-lr-end-lr.\n\nUnlike the original implementation, this version does not use quantization or compression.\nAdditionally, unlike the original, this model trains in fp16 as default, but can be run in\nfp32 with --precision 32.32. In this case, you will need to run over two IPUs.\n\"\"\"\n\nimport tensorflow as tf\nfrom functools import partial\nimport base as BASE\nimport validation as VALID\nfrom tensorflow.contrib.ipu.python.poprand import dropout\n\n\nclass SqueezeNet:\n def __init__(self, opts, is_training=True):\n self.is_training = is_training\n self.num_classes = 1000\n\n def _build_graph(self, image):\n \"\"\"Classifies a batch of ImageNet images\n\n Returns:\n A logits Tensor with shape [<batch_size>, self.num_classes]\n \"\"\"\n image = _conv1(image, name=\"initialconv\")\n x = tf.compat.v1.layers.max_pooling2d(image, pool_size=3, strides=2)\n x = _fire(x, 16, 64, 64, name=\"fire2\")\n x = _fire(x, 16, 64, 64, name=\"fire3\")\n x = _fire(x, 32, 128, 128, name=\"fire4\")\n # maxpool4\n x = tf.compat.v1.layers.max_pooling2d(x, pool_size=3, strides=2)\n x = _fire(x, 32, 128, 128, name=\"fire5\")\n x = _fire(x, 48, 192, 192, name=\"fire6\")\n x = _fire(x, 48, 192, 192, name=\"fire7\")\n x = _fire(x, 64, 256, 256, name=\"fire8\")\n # maxpool8\n x = tf.compat.v1.layers.max_pooling2d(x, pool_size=3, strides=2)\n x = _fire(x, 64, 256, 256, name=\"fire9\")\n x = tf.nn.dropout(\n x, keep_prob=0.5 if self.is_training else 0.0, name=\"drop9\")\n image = _conv10(x, name=\"finalconv\")\n avgpool = tf.layers.average_pooling2d(\n image, pool_size=13, strides=1, name=\"final_pool\")\n logits = tf.layers.flatten(avgpool)\n return logits\n\n def __call__(self, x):\n shape = x.get_shape().as_list()\n if len(shape) != 4:\n raise ValueError(\"Input size must be [batch,height,width,channels]\")\n if shape[1] < 224 or shape[2] < 224:\n raise ValueError(\"Input image must be at least 224x224\")\n return self._build_graph(x)\n\n\ndef Model(opts, training, image):\n return SqueezeNet(opts, training)(image)\n\n\n###########################################\n# SqueezeNet block definitions\n###########################################\n\ndef _conv1(inputs, name):\n \"\"\"The first layer of squeezenet\n Convolution\n\n name: a string name for the tensor output of the block layer.\n\n Returns:\n The output tensor of the block.\n \"\"\"\n with tf.variable_scope(name):\n inputs = conv(inputs, ksize=7, stride=2, filters_out=96,\n kernel_initializer=tf.contrib.layers.xavier_initializer_conv2d(), bias=False)\n return inputs\n\n\ndef _conv10(inputs, name):\n \"\"\"The first layer of squeezenet\n Convolution then average pooling\n\n name: a string name for the tensor output of the block layer.\n\n Returns:\n The output tensor of the block.\n \"\"\"\n with tf.variable_scope(name):\n inputs = conv(inputs, ksize=1, stride=1, filters_out=1000,\n kernel_initializer=tf.initializers.truncated_normal(\n mean=0.0, stddev=0.01),\n bias=False)\n return inputs\n\n\ndef _fire(inputs, s_1, e_1, e_3, name):\n \"\"\"Fire module:\n A 'squeeze' convolution layer, which has only 1x1 filters, feeding\n into an 'expand' layer, that has a mix of 1x1 and 3x3 filters.\n\n s_1: The number of 1x1 filters in the squeeze layer\n e_1: The number of 1x1 filters in the expand layer\n e_3: The number of 3x3 filters in the expand layer\n name: a string name for the tensor output of the block layer.\n \"\"\"\n with tf.variable_scope(name):\n # squeeze layer\n with tf.variable_scope(name+\"s_1\"):\n inputs = conv(inputs, ksize=1, stride=1, filters_out=s_1,\n kernel_initializer=tf.contrib.layers.xavier_initializer_conv2d(),\n bias=False)\n s1_out = tf.nn.relu(inputs)\n # expand layer\n with tf.variable_scope(name+\"e_1\"):\n e1_out = conv(s1_out, ksize=1, stride=1, filters_out=e_1,\n kernel_initializer=tf.contrib.layers.xavier_initializer_conv2d(),\n bias=False)\n e1_out = tf.nn.relu(e1_out)\n # expand layer\n with tf.variable_scope(name+\"e_3\"):\n e3_out = conv(s1_out, ksize=3, stride=1, filters_out=e_3,\n kernel_initializer=tf.contrib.layers.xavier_initializer_conv2d(),\n bias=False)\n e3_out = tf.nn.relu(e3_out)\n inputs = tf.concat([e1_out, e3_out], axis=3, name='concat')\n return inputs\n\n\n###########################################\n# Layer definitions\n###########################################\n\ndef conv(x, ksize, stride, filters_out, kernel_initializer, bias=True):\n with tf.variable_scope('conv', use_resource=True):\n\n return tf.layers.conv2d(\n inputs=x, filters=filters_out, kernel_size=ksize, strides=stride,\n padding='same',\n use_bias=bias,\n kernel_initializer=kernel_initializer,\n bias_initializer=tf.zeros_initializer(),\n kernel_regularizer=tf.contrib.layers.l2_regularizer(scale=0.0002),\n activation=tf.nn.relu,\n data_format='channels_last')\n\n\ndef add_arguments(parser):\n return parser\n\n\ndef set_defaults(opts):\n opts['summary_str'] += \"SqueezeNet\\n\"\n\n if opts['dataset'] == 'imagenet':\n opts['shortcut_type'] = 'B'\n elif 'cifar' in opts['dataset']:\n opts['shortcut_type'] = 'A'\n\n# opts['dataset'] = 'imagenet'\n opts['lr_schedule'] = 'polynomial_decay_lr'\n\n if not opts.get('epochs') and not opts.get('iterations'):\n opts['epochs'] = 100\n\n if not opts.get(\"batch_size\"):\n opts['batch_size'] = 4\n\n if (opts['precision'] == '32.32') and not opts.get(\"shards\"):\n opts['shards'] = 2\n\n opts['name'] = \"SN_bs{}\".format(opts['batch_size'])\n\n if opts.get('replicas') > 1:\n opts['name'] += \"x{}r\".format(opts['replicas'])\n if opts['pipeline_depth'] > 1:\n opts['name'] += \"x{}p\".format(opts['pipeline_depth'])\n elif opts.get('gradients_to_accumulate') > 1:\n opts['name'] += \"x{}a\".format(opts['gradients_to_accumulate'])\n\n opts['name'] += '_{}{}'.format(opts['precision'],\n '_noSR' if opts['no_stochastic_rounding'] else '')\n",
"# Copyright 2019 Graphcore Ltd.\nimport torch\nimport torch.onnx\nimport urllib.request\nimport pretrainedmodels\nimport os\nimport onnx\nimport argparse\n\n\"\"\"\nDownloads the model in Pytorch format and converts to ONNX.\nCreates copies with different batch size dimensions.\n\"\"\"\n\n\ndef get_model(opts):\n\n path = \"models/\" + opts.model_name + \"/\"\n filename = \"model.onnx\"\n\n if not os.path.exists(path):\n print(\"Creating models directory\")\n os.makedirs(path)\n\n if not os.path.exists(\"logs/\"):\n print(\"Creating logs directory\")\n os.makedirs(\"logs/\")\n\n # Get the model. If it doesn't exist it will be downloaded\n if not os.path.isfile(path + filename):\n print(f\"Downloading model to {path + filename}\")\n\n # Create the right input shape\n dummy_input = torch.randn(1, 3, 224, 224)\n model = pretrainedmodels.__dict__[opts.model_name](\n num_classes=1000, pretrained='imagenet')\n torch.onnx.export(model, dummy_input, path + filename)\n\n model_path = path + filename\n onnx_model = onnx.load(model_path)\n onnx_model.graph.input[0].type.tensor_type.shape.dim[0].dim_value = opts.batch_size\n print(\n f\"Converting model to batch size {opts.batch_size} and saving to {path + 'model_' + str(opts.batch_size) + '.onnx'}\")\n onnx.save(onnx_model, path + f\"model_{opts.batch_size}.onnx\")\n\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"--batch-size\", type=int, default=1)\nparser.add_argument(\"--model-name\", type=str, default='resnext101_32x4d',\n help=\"pretrained model name, according to `pretrainedmodels` Python package\")\n\n\n# set up directory\nmodel_name = 'resnext101_32x4d'\nfilename = \"model.onnx\"\n\n\nif __name__ == \"__main__\":\n opts = parser.parse_args()\n get_model(opts)\n"
] | [
[
"numpy.array_repr"
],
[
"tensorflow.get_variable",
"tensorflow.control_dependencies",
"tensorflow.reduce_mean",
"tensorflow.broadcast_to",
"tensorflow.cast",
"tensorflow.identity",
"tensorflow.layers.dense",
"tensorflow.trainable_variables",
"tensorflow.global_variables_initializer",
"tensorflow.train.GradientDescentOptimizer",
"tensorflow.variable_scope",
"tensorflow.python.ipu.utils.move_variable_initialization_to_cpu"
],
[
"tensorflow.cast",
"tensorflow.image.resize",
"tensorflow.image.decode_jpeg",
"tensorflow.read_file"
],
[
"tensorflow.nn.relu",
"tensorflow.layers.flatten",
"tensorflow.concat",
"tensorflow.zeros_initializer",
"tensorflow.initializers.truncated_normal",
"tensorflow.contrib.layers.l2_regularizer",
"tensorflow.compat.v1.layers.max_pooling2d",
"tensorflow.layers.average_pooling2d",
"tensorflow.variable_scope",
"tensorflow.contrib.layers.xavier_initializer_conv2d",
"tensorflow.nn.dropout"
],
[
"torch.randn",
"torch.onnx.export"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
MasterScott/Formasaurus | [
"d7d916237a6d2ca4c80c4c8ae5d66999c8beebed"
] | [
"tests/test_fieldtype_model.py"
] | [
"# -*- coding: utf-8 -*-\nfrom __future__ import absolute_import, division\nimport itertools\n\nimport numpy as np\nfrom sklearn_crfsuite.metrics import flat_accuracy_score\n\nfrom formasaurus.fieldtype_model import (\n train,\n _PRECISE_C1_C2,\n _REALISTIC_C1_C2,\n get_Xy,\n)\n\n\ndef test_training(storage, capsys):\n annotations = (a for a in storage.iter_annotations(\n simplify_form_types=True,\n simplify_field_types=True,\n ) if a.fields_annotated)\n annotations = list(itertools.islice(annotations, 0, 300))\n\n crf = train(\n annotations=annotations,\n use_precise_form_types=False,\n optimize_hyperparameters_iters=2,\n optimize_hyperparameters_folds=2,\n optimize_hyperparameters_jobs=-1,\n full_form_type_names=False,\n full_field_type_names=False\n )\n\n out, err = capsys.readouterr()\n\n assert 'Training on 300 forms' in out\n assert 'realistic form types' in out\n assert 'Best hyperparameters' in out\n\n assert 0.0 < crf.c1 < 2.5\n assert 0.0 < crf.c2 < 0.9\n assert crf.c1, crf.c2 != _REALISTIC_C1_C2\n assert crf.c1, crf.c2 != _PRECISE_C1_C2\n\n form_types = np.asarray([a.type for a in annotations])\n X, y = get_Xy(annotations, form_types, full_type_names=False)\n y_pred = crf.predict(X)\n score = flat_accuracy_score(y, y_pred)\n assert 0.9 < score < 1.0 # overfitting FTW!\n\n field_schema = storage.get_field_schema()\n short_names = set(field_schema.types_inv.keys())\n assert set(crf.classes_).issubset(short_names)\n"
] | [
[
"numpy.asarray"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Wentaobi/Udacity | [
"00af9c36b42d6bca5f2d42d2744efed2ddb51587",
"00af9c36b42d6bca5f2d42d2744efed2ddb51587"
] | [
"Self_Driving_Car/P1/LaneLines-P1/P1.py",
"Self_Driving_Car/P4/project04.py"
] | [
"#importing some useful packages\nimport matplotlib.pyplot as plt\nimport matplotlib.image as mpimg\nimport numpy as np\nimport cv2\n\n\n#reading in an image\nimage = mpimg.imread('test_images/solidWhiteRight.jpg');\n#printing out some stats and plotting\nprint('This image is:', type(image), 'with dimesions:', image.shape)\nplt.imshow(image); #call as plt.imshow(gray, cmap='gray') to show a grayscaled image\n\nimport math\n\ndef grayscale(img):\n \"\"\"Applies the Grayscale transform\n This will return an image with only one color channel\n but NOTE: to see the returned image as grayscale\n you should call plt.imshow(gray, cmap='gray')\"\"\"\n return cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)\n # Or use BGR2GRAY if you read an image with cv2.imread()\n # return cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n\ndef hsv(img):\n return cv2.cvtColor(img, cv2.COLOR_RGB2HSV)\n\ndef canny(img, low_threshold, high_threshold):\n \"\"\"Applies the Canny transform\"\"\"\n return cv2.Canny(img, low_threshold, high_threshold)\n\ndef gaussian_blur(img, kernel_size):\n \"\"\"Applies a Gaussian Noise kernel\"\"\"\n return cv2.GaussianBlur(img, (kernel_size, kernel_size), 0)\n\ndef region_of_interest(img, vertices):\n \"\"\"\n Applies an image mask.\n\n Only keeps the region of the image defined by the polygon\n formed from `vertices`. The rest of the image is set to black.\n \"\"\"\n #defining a blank mask to start with\n mask = np.zeros_like(img)\n\n #defining a 3 channel or 1 channel color to fill the mask with depending on the input image\n if len(img.shape) > 2:\n channel_count = img.shape[2] # i.e. 3 or 4 depending on your image\n ignore_mask_color = (255,) * channel_count\n else:\n ignore_mask_color = 255\n\n #filling pixels inside the polygon defined by \"vertices\" with the fill color\n cv2.fillPoly(mask, vertices, ignore_mask_color)\n\n #returning the image only where mask pixels are nonzero\n masked_image = cv2.bitwise_and(img, mask)\n return masked_image\n\n\ndef draw_lines(img, lines, color=[255, 0, 0], thickness=13):\n \"\"\"\n NOTE: this is the function you might want to use as a starting point once you want to\n average/extrapolate the line segments you detect to map out the full\n extent of the lane (going from the result shown in raw-lines-example.mp4\n to that shown in P1_example.mp4).\n\n Think about things like separating line segments by their\n slope ((y2-y1)/(x2-x1)) to decide which segments are part of the left\n line vs. the right line. Then, you can average the position of each of\n the lines and extrapolate to the top and bottom of the lane.\n\n This function draws `lines` with `color` and `thickness`.\n Lines are drawn on the image inplace (mutates the image).\n If you want to make the lines semi-transparent, think about combining\n this function with the weighted_img() function below\n \"\"\"\n x_size = img.shape[1]\n y_size = img.shape[0]\n lines_slope_intercept = np.zeros(shape=(len(lines),2))\n for index,line in enumerate(lines):\n for x1,y1,x2,y2 in line:\n slope = (y2-y1)/(x2-x1)\n intercept = y1 - x1 * slope\n lines_slope_intercept[index]=[slope,intercept]\n max_slope_line = lines_slope_intercept[lines_slope_intercept.argmax(axis=0)[0]]\n min_slope_line = lines_slope_intercept[lines_slope_intercept.argmin(axis=0)[0]]\n left_slopes = []\n left_intercepts = []\n right_slopes = []\n right_intercepts = []\n # this gets slopes and intercepts of lines similar to the lines with the max (immediate left) and min\n # (immediate right) slopes (i.e. slope and intercept within x%)\n for line in lines_slope_intercept:\n if abs(line[0] - max_slope_line[0]) < 0.15 and abs(line[1] - max_slope_line[1]) < (0.15 * x_size):\n left_slopes.append(line[0])\n left_intercepts.append(line[1])\n elif abs(line[0] - min_slope_line[0]) < 0.15 and abs(line[1] - min_slope_line[1]) < (0.15 * x_size):\n right_slopes.append(line[0])\n right_intercepts.append(line[1])\n # left and right lines are averages of these slopes and intercepts, extrapolate lines to edges and center*\n # *roughly\n new_lines = np.zeros(shape=(1,2,4), dtype=np.int32)\n if len(left_slopes) > 0:\n left_line = [sum(left_slopes)/len(left_slopes),sum(left_intercepts)/len(left_intercepts)]\n left_bottom_x = (y_size - left_line[1])/left_line[0]\n left_top_x = (y_size*.575 - left_line[1])/left_line[0]\n if (left_bottom_x >= 0):\n new_lines[0][0] =[left_bottom_x,y_size,left_top_x,y_size*.575]\n if len(right_slopes) > 0:\n right_line = [sum(right_slopes)/len(right_slopes),sum(right_intercepts)/len(right_intercepts)]\n right_bottom_x = (y_size - right_line[1])/right_line[0]\n right_top_x = (y_size*.575 - right_line[1])/right_line[0]\n if (right_bottom_x <= x_size):\n new_lines[0][1]=[right_bottom_x,y_size,right_top_x,y_size*.575]\n for line in new_lines:\n for x1,y1,x2,y2 in line:\n cv2.line(img, (x1, y1), (x2, y2), color, thickness)\n\ndef hough_lines(img, rho, theta, threshold, min_line_len, max_line_gap):\n \"\"\"\n `img` should be the output of a Canny transform.\n\n Returns an image with hough lines drawn.\n \"\"\"\n lines = cv2.HoughLinesP(img, rho, theta, threshold, np.array([]), minLineLength=min_line_len, maxLineGap=max_line_gap)\n line_img = np.zeros((img.shape[0], img.shape[1], 3), dtype=np.uint8)\n draw_lines(line_img, lines)\n return line_img\n\n# Python 3 has support for cool math symbols.\n\ndef weighted_img(img, initial_img, α=0.8, β=1., λ=0.):\n \"\"\"\n `img` is the output of the hough_lines(), An image with lines drawn on it.\n Should be a blank image (all black) with lines drawn on it.\n\n `initial_img` should be the image before any processing.\n\n The result image is computed as follows:\n\n initial_img * α + img * β + λ\n NOTE: initial_img and img must be the same shape!\n \"\"\"\n return cv2.addWeighted(initial_img, α, img, β, λ)\n\nimport os\nos.listdir(\"test_images/\")\n\n#reading in an image\nfor index, img in enumerate(os.listdir(\"test_images/\")):\n image = mpimg.imread('test_images/' + img)\n\n gray_img = grayscale(image)\n\n hsv_img = hsv(image)\n\n # define range of color in HSV\n lower_yel = np.array([20,100,100])\n upper_yel = np.array([30,255,255])\n lower_wht = np.array([0,0,235])\n upper_wht = np.array([255,255,255])\n\n # Threshold the HSV image to get only yellow/white\n yellow_mask = cv2.inRange(hsv_img, lower_yel, upper_yel)\n white_mask = cv2.inRange(hsv_img, lower_wht, upper_wht)\n # Bitwise-AND mask and original image\n full_mask = cv2.bitwise_or(yellow_mask, white_mask)\n\n subdued_gray = (gray_img / 2).astype('uint8')\n\n boosted_lanes = cv2.bitwise_or(subdued_gray, full_mask)\n\n kernel_size = 5\n blurred_img = gaussian_blur(boosted_lanes,kernel_size)\n\n canny_low_threshold = 60\n canny_high_threshold = 150\n edges_img = canny(blurred_img,canny_low_threshold,canny_high_threshold)\n\n x = edges_img.shape[1]\n y = edges_img.shape[0]\n vertices = np.array([[(x*0.,y),(x*.475, y*.575), (x*.525, y*.575), (x,y)]], dtype=np.int32)\n masked_img = region_of_interest(edges_img, vertices)\n\n hough_rho = 3\n hough_theta = np.pi/180\n hough_threshold = 70\n hough_min_line_length = 70\n hough_max_line_gap = 250\n hough_img = hough_lines(masked_img,hough_rho,hough_theta,hough_threshold,hough_min_line_length,hough_max_line_gap)\n\n result = weighted_img(hough_img,image)\n\n fig = plt.figure(figsize=(6,10))\n plt.imshow(result, cmap=\"gray\") #call as plt.imshow(gray, cmap='gray') to show a grayscaled image\n\n#reading in an image\nfor index, img in enumerate(os.listdir(\"test_images2/\")):\n image = mpimg.imread('test_images2/' + img)\n\n gray_img = grayscale(image)\n\n hsv_img = hsv(image)\n\n # define range of color in HSV\n lower_yel = np.array([20,100,100])\n upper_yel = np.array([30,255,255])\n lower_wht = np.array([0,0,235])\n upper_wht = np.array([255,255,255])\n\n # Threshold the HSV image to get only yellow/white\n yellow_mask = cv2.inRange(hsv_img, lower_yel, upper_yel)\n white_mask = cv2.inRange(hsv_img, lower_wht, upper_wht)\n # Bitwise-AND mask and original image\n full_mask = cv2.bitwise_or(yellow_mask, white_mask)\n\n subdued_gray = (gray_img / 2).astype('uint8')\n\n boosted_lanes = cv2.bitwise_or(subdued_gray, full_mask)\n\n kernel_size = 5\n blurred_img = gaussian_blur(boosted_lanes,kernel_size)\n\n canny_low_threshold = 60\n canny_high_threshold = 150\n edges_img = canny(blurred_img,canny_low_threshold,canny_high_threshold)\n\n x = edges_img.shape[1]\n y = edges_img.shape[0]\n vertices = np.array([[(x*0.,y),(x*.475, y*.575), (x*.525, y*.575), (x,y)]], dtype=np.int32)\n masked_img = region_of_interest(edges_img, vertices)\n\n hough_rho = 3\n hough_theta = np.pi/180\n hough_threshold = 70\n hough_min_line_length = 70\n hough_max_line_gap = 250\n hough_img = hough_lines(masked_img,hough_rho,hough_theta,hough_threshold,hough_min_line_length,hough_max_line_gap)\n\n result = weighted_img(hough_img,image)\n\n fig = plt.figure(figsize=(8,10))\n plt.imshow(result, cmap=\"gray\") #call as plt.imshow(gray, cmap='gray') to show a grayscaled image\n\n# Import everything needed to edit/save/watch video clips\nfrom moviepy.editor import VideoFileClip\n# from IPython.display import HTML\n\ndef process_image(image):\n # NOTE: The output you return should be a color image (3 channel) for processing video below\n # TODO: put your pipeline here,\n # you should return the final output (image with lines are drawn on lanes)\n\n gray_img = grayscale(image)\n\n hsv_img = hsv(image)\n\n # define range of color in HSV\n lower_yel = np.array([20,100,100])\n upper_yel = np.array([30,255,255])\n lower_wht = np.array([0,0,235])\n upper_wht = np.array([255,255,255])\n\n # Threshold the HSV image to get only yellow/white\n yellow_mask = cv2.inRange(hsv_img, lower_yel, upper_yel)\n white_mask = cv2.inRange(hsv_img, lower_wht, upper_wht)\n # Bitwise-AND mask and original image\n full_mask = cv2.bitwise_or(yellow_mask, white_mask)\n\n subdued_gray = (gray_img / 2).astype('uint8')\n\n boosted_lanes = cv2.bitwise_or(subdued_gray, full_mask)\n\n kernel_size = 5\n blurred_img = gaussian_blur(boosted_lanes,kernel_size)\n\n canny_low_threshold = 60\n canny_high_threshold = 150\n edges_img = canny(blurred_img,canny_low_threshold,canny_high_threshold)\n\n x = edges_img.shape[1]\n y = edges_img.shape[0]\n vertices = np.array([[(x*0.,y),(x*.475, y*.575), (x*.525, y*.575), (x,y)]], dtype=np.int32)\n masked_img = region_of_interest(edges_img, vertices)\n\n hough_rho = 3\n hough_theta = np.pi/180\n hough_threshold = 70\n hough_min_line_length = 70\n hough_max_line_gap = 250\n hough_img = hough_lines(masked_img,hough_rho,hough_theta,hough_threshold,hough_min_line_length,hough_max_line_gap)\n\n result = weighted_img(hough_img,image)\n\n #return cv2.cvtColor(masked_img, cv2.COLOR_GRAY2RGB)\n return result\n\n\nwhite_output = 'white.mp4'\nclip1 = VideoFileClip(\"solidWhiteRight.mp4\")\nwhite_clip = clip1.fl_image(process_image) #NOTE: this function expects color images!!\nwhite_clip.write_videofile(white_output, audio=False)\n\n\n# HTML(\"\"\"\n# <video width=\"960\" height=\"540\" controls>\n# <source src=\"{0}\">\n# </video>\n# \"\"\".format(white_output))\n\n\nyellow_output = 'yellow.mp4'\nclip2 = VideoFileClip('solidYellowLeft.mp4')\nyellow_clip = clip2.fl_image(process_image)\nyellow_clip.write_videofile(yellow_output, audio=False)\n\n\n# HTML(\"\"\"\n# <video width=\"960\" height=\"540\" controls>\n# <source src=\"{0}\">\n# </video>\n# \"\"\".format(yellow_output))\n\n\nchallenge_output = 'extra.mp4'\nclip2 = VideoFileClip('challenge.mp4')\nchallenge_clip = clip2.fl_image(process_image)\nchallenge_clip.write_videofile(challenge_output, audio=False)\n\n#\n# HTML(\"\"\"\n# <video width=\"960\" height=\"540\" controls>\n# <source src=\"{0}\">\n# </video>\n# \"\"\".format(challenge_output))\n",
"\"\"\"Project 04 - Advanced Lane Detection\n\nUsage:\n project04.py <input_video> <output_video> [-c <camera_file>]\n project04.py (-h | --help)\n\nOptions:\n -h --help Show this screen.\n -c <camera_file> Specify camera calibration file [default: camera_data.npz]\n\"\"\"\nimport os\nimport cv2\nimport glob\nimport numpy as np\nfrom math import *\nimport matplotlib.pyplot as plt\nimport matplotlib.image as mpimage\nimport collections\nfrom itertools import chain\nfrom functools import reduce\nfrom scipy.signal import find_peaks_cwt\nfrom moviepy.editor import VideoFileClip\n\n\ndef calibrate_camera(cal_images, nx, ny):\n objpoints = [] # 3D points\n imgpoints = [] # 2D points\n\n objp = np.zeros((nx*ny,3), np.float32)\n objp[:,:2] = np.mgrid[0:nx,0:ny].T.reshape(-1, 2)\n\n for fname in cal_images:\n img = cv2.imread(fname)\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n\n ret, corners = cv2.findChessboardCorners(gray, (nx, ny), None)\n if ret == True:\n objpoints.append(objp)\n imgpoints.append(corners)\n\n ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, gray.shape[::-1],None,None)\n\n return mtx, dist\n\ndef camera_setup(calibration_path):\n cal_images = glob.glob(calibration_path)\n nx, ny = 9, 6\n cam_mtx, cam_dist = calibrate_camera(cal_images, nx, ny)\n return cam_mtx, cam_dist\n\ndef get_perspective_transform(image, src_in = None, dst_in = None, display=False):\n img_size = image.shape\n if src_in is None:\n src = np.array([[585. /1280.*img_size[1], 455./720.*img_size[0]],\n [705. /1280.*img_size[1], 455./720.*img_size[0]],\n [1130./1280.*img_size[1], 720./720.*img_size[0]],\n [190. /1280.*img_size[1], 720./720.*img_size[0]]], np.float32)\n else:\n src = src_in\n\n if dst_in is None:\n dst = np.array([[300. /1280.*img_size[1], 100./720.*img_size[0]],\n [1000./1280.*img_size[1], 100./720.*img_size[0]],\n [1000./1280.*img_size[1], 720./720.*img_size[0]],\n [300. /1280.*img_size[1], 720./720.*img_size[0]]], np.float32)\n else:\n dst = dst_in\n\n warp_m = cv2.getPerspectiveTransform(src, dst)\n warp_minv = cv2.getPerspectiveTransform(dst, src)\n\n if display:\n plt.subplot(1,2,1)\n plt.hold(True)\n plt.imshow(image, cmap='gray')\n colors = ['r+','g+','b+','w+']\n for i in range(4):\n plt.plot(src[i,0],src[i,1],colors[i])\n\n im2 = cv2.warpPerspective(image, warp_m, (image.shape[1], image.shape[0]), flags=cv2.INTER_LINEAR)\n plt.subplot(1,2,2)\n plt.hold(True)\n plt.imshow(im2, cmap='gray')\n for i in range(4):\n plt.plot(dst[i,0],dst[i,1],colors[i])\n plt.show()\n return warp_m, warp_minv\n\ndef find_perspective_points(image):\n edges = find_edges(image)\n\n # Computing perspective points automatically\n rho = 2 # distance resolution in pixels of the Hough grid\n theta = 1*np.pi/180 # angular resolution in radians of the Hough grid\n threshold = 100 # minimum number of votes (intersections in Hough grid cell)\n min_line_length = 100 # minimum number of pixels making up a line\n max_line_gap = 25 # maximum gap in pixels between connectable line segments\n\n angle_min_mag = 20*pi/180\n angle_max_mag = 65*pi/180\n\n lane_markers_x = [[], []]\n lane_markers_y = [[], []]\n\n masked_edges = np.copy(edges)\n masked_edges[:edges.shape[0]*6//10,:] = 0\n lines = cv2.HoughLinesP(masked_edges, rho, theta, threshold, min_line_length, max_line_gap)\n for line in lines:\n for x1,y1,x2,y2 in line:\n theta = atan2(y1-y2, x2-x1)\n rho = ((x1+x2)*cos(theta) + (y1+y2)*sin(theta))/2\n if (abs(theta) >= angle_min_mag and abs(theta) <= angle_max_mag):\n if theta > 0: # positive theta is downward in image space?\n i = 0 # Left lane marker\n else:\n i = 1 # Right lane marker\n lane_markers_x[i].append(x1)\n lane_markers_x[i].append(x2)\n lane_markers_y[i].append(y1)\n lane_markers_y[i].append(y2)\n\n if len(lane_markers_x[0]) < 1 or len(lane_markers_x[1]) < 1:\n # Failed to find two lane markers\n return None\n\n p_left = np.polyfit(lane_markers_y[0], lane_markers_x[0], 1)\n p_right = np.polyfit(lane_markers_y[1], lane_markers_x[1], 1)\n\n # Find intersection of the two lines\n apex_pt = np.linalg.solve([[p_left[0], -1], [p_right[0], -1]], [-p_left[1], -p_right[1]])\n top_y = ceil(apex_pt[0] + 0.075*edges.shape[0])\n\n bl_pt = ceil(np.polyval(p_left, edges.shape[0]))\n tl_pt = ceil(np.polyval(p_left, top_y))\n\n br_pt = ceil(np.polyval(p_right, edges.shape[0]))\n tr_pt = ceil(np.polyval(p_right, top_y))\n\n src = np.array([[tl_pt, top_y],\n [tr_pt, top_y],\n [br_pt, edges.shape[0]],\n [bl_pt, edges.shape[0]]], np.float32)\n\n get_perspective_transform(edges, src_in = src, dst_in = None, display=False)\n return src\n\ndef find_edges(image, mask_half=False):\n hls = cv2.cvtColor(image.astype(np.uint8), cv2.COLOR_RGB2HLS)\n s = hls[:,:,2]\n gray = (0.5*image[:,:,0] + 0.4*image[:,:,1] + 0.1*image[:,:,2]).astype(np.uint8)\n\n _, gray_binary = cv2.threshold(gray.astype('uint8'), 130, 255, cv2.THRESH_BINARY)\n\n # switch to gray image for laplacian if 's' doesn't give enough details\n total_px = image.shape[0]*image.shape[1]\n laplacian = cv2.Laplacian(gray, cv2.CV_32F, ksize=21)\n mask_one = (laplacian < 0.15*np.min(laplacian)).astype(np.uint8)\n if cv2.countNonZero(mask_one)/total_px < 0.01:\n laplacian = cv2.Laplacian(gray, cv2.CV_32F, ksize=21)\n mask_one = (laplacian < 0.075*np.min(laplacian)).astype(np.uint8)\n\n _, s_binary = cv2.threshold(s.astype('uint8'), 150, 255, cv2.THRESH_BINARY)\n mask_two = s_binary\n\n\n combined_binary = np.clip(cv2.bitwise_and(gray_binary,\n cv2.bitwise_or(mask_one, mask_two)), 0, 1).astype('uint8')\n\n return combined_binary\n\nym_per_pix = 30/720 # meters per pixel in y dimension\nxm_per_pix = 3.7/700 # meteres per pixel in x dimension\n\nclass Lane():\n def __init__(self, base_pt, img_size, cache_length):\n # was the line detected in the last iteration?\n self.detected = False\n # x values of the last n fits of the line\n self.recent_xfitted = collections.deque(maxlen=cache_length)\n self.recent_yfitted = collections.deque(maxlen=cache_length)\n\n #polynomial coefficients for the most recent fit\n self.current_fit = [np.array([False])]\n #radius of curvature of the line in some units\n self.radius_of_curvature = None\n #distance in meters of vehicle center from the line\n self.insanity = 0.0\n\n self.current_xfit = None\n\n self.img_size = img_size\n self.base_pt = base_pt\n\n self.yvals = np.linspace(0, img_size[0], 101)\n self.mask = np.ones(img_size, dtype=np.uint8)*255\n\n self.dropped_frames = 0\n\n def add_lane_pixels(self, x, y):\n \"\"\"\n Adds lane pixels and recomputes curve-fit.\n \"\"\"\n # Use all pixels from previous detections for curve fit\n weights = np.ones(len(self.recent_xfitted))\n if len(weights) > 1:\n weights[0] = 0.8\n weights[1:] = 0.2/(len(weights) - 1)\n\n w_x = reduce(lambda a,b: a + b[0]*b[1], zip(weights, self.recent_xfitted), np.zeros(len(self.yvals)))\n w_y = reduce(lambda a,b: a + b[0]*b[1], zip(weights, self.recent_yfitted), np.zeros(len(self.yvals)))\n else:\n w_x, w_y = [], []\n x_hist = np.fromiter(chain(w_x, x), np.int32)\n y_hist = np.fromiter(chain(w_y, y), np.int32)\n\n try:\n p_lane = np.polyfit(y_hist, x_hist, 2)\n rad_curv = self.compute_rad_curv(x_hist, y_hist)\n self.detected = self.sanity_check_lane(rad_curv)\n except Exception as e:\n print(e)\n self.detected = False\n\n if self.detected and len(p_lane) == 3:\n x_fit = p_lane[0]*self.yvals**2 + p_lane[1]*self.yvals + p_lane[2]\n\n self.current_xfit = x_fit # For drawing\n\n self.recent_xfitted.append(x_fit)\n self.recent_yfitted.append(self.yvals)\n\n self.radius_of_curvature = rad_curv\n self.current_fit = p_lane\n self.dropped_frames = 0\n else:\n # Sanity check failed\n # Use last fit if current one failed\n p_lane = self.current_fit\n rad_curv = self.radius_of_curvature\n x_fit = p_lane[0]*self.yvals**2 + p_lane[1]*self.yvals + p_lane[2]\n self.dropped_frames += 1\n\n # Update ROI mask\n self.mask.fill(0)\n # http://stackoverflow.com/a/35902430/538379\n pts = np.transpose(np.vstack([x_fit, self.yvals])).reshape((-1,1,2)).astype(np.int32)\n cv2.drawContours(self.mask, pts, -1, (255,255,255), thickness=80)\n\n\n @staticmethod\n def compute_rad_curv(xvals, yvals):\n fit_cr = np.polyfit(yvals*ym_per_pix, xvals*xm_per_pix, 2)\n y_eval = np.max(yvals)\n curverad = ((1 + (2*fit_cr[0]*y_eval + fit_cr[1])**2)**1.5) \\\n /np.absolute(2*fit_cr[0])\n return curverad\n\n\n def sanity_check_lane(self, R):\n \"\"\"\n Checks new radius of curvature `R` against the radius stored in the object.\n \"\"\"\n # Return true if there is no prior data\n if self.radius_of_curvature is None:\n return True\n\n R0 = self.radius_of_curvature\n self.insanity = abs(R-R0)/R0\n return self.insanity <= 0.5 # Max change from frame to frame is 200%\n\n\n def detect_from_mask(self, image):\n mask_lanes = cv2.bitwise_and(image, self.mask)\n all_pts = cv2.findNonZero(mask_lanes)\n if all_pts is not None:\n all_pts = all_pts.reshape((-1,2))\n self.add_lane_pixels(all_pts[:,0], all_pts[:,1])\n else:\n self.detected = False\n\n def draw_lane(self, image):\n \"\"\"\n Draws lane on given image\n \"\"\"\n pts = np.array([np.transpose(np.vstack([self.current_xfit, self.yvals]))])\n cv2.fillPoly(image, np.int_([pts]), (0,255, 0))\n return image\n\ndef reject_outliers(x_list, y_list):\n if not x_list or not y_list:\n return x_list, y_list\n mu_x, mu_y = np.mean(x_list), np.mean(y_list)\n sig_x, sig_y = np.std(x_list), np.std(y_list)\n new_x, new_y = zip(*[(x, y) for (x,y) in zip(x_list, y_list)\n if abs(x - mu_x) < 2*sig_x and abs(y - mu_y) < 2*sig_y])\n return new_x, new_y\n\ndef sliding_window(image, left_lane, right_lane, base_pts, num_bands = 10, window_width = 0.2):\n \"\"\"Uses histogram and sliding window to detect lanes from scratch\"\"\"\n\n height = image.shape[0]\n band_height = int(1./num_bands * height) # Divide image into horizontal bands\n band_width = int(window_width*image.shape[1])\n\n l_x, l_y, r_x, r_y = [], [], [], []\n\n base_left, base_right = base_pts\n\n idx_left, idx_right = base_pts\n for i in reversed(range(num_bands)):\n w_left = image[i*band_height:(i+1)*band_height,base_left-band_width//2:base_left+band_width//2]\n w_right = image[i*band_height:(i+1)*band_height,base_right-band_width//2:base_right+band_width//2]\n\n left_y_pt, left_x_pt = np.nonzero(w_left)\n right_y_pt, right_x_pt = np.nonzero(w_right)\n\n l_x.extend(left_x_pt + base_left-band_width//2)\n l_y.extend(left_y_pt + i*band_height)\n r_x.extend(right_x_pt+ base_right-band_width//2)\n r_y.extend(right_y_pt+ i*band_height)\n\n # Find 'x' with maximum nonzero elements as baseline for next window\n s_left = np.sum(w_left, axis=0)\n s_right = np.sum(w_right, axis=0)\n if np.any(s_left > 0):\n base_left = np.argmax(s_left) + base_left-band_width//2\n if np.any(s_right > 0):\n base_right = np.argmax(s_right) + base_right-band_width//2\n\n l_x, l_y = reject_outliers(l_x, l_y)\n r_x, r_y = reject_outliers(r_x, r_y)\n\n left_lane.add_lane_pixels(l_x, l_y)\n right_lane.add_lane_pixels(r_x, r_y)\n\n return left_lane, right_lane\n\ndef histogram_base_points(lanes, min_peak = 25.0):\n \"\"\"Uses histogram to find possible base points for lane lines\"\"\"\n hist = np.sum(lanes[int(lanes.shape[0]*0.5):,:], axis=0)\n\n widths = [100]\n idx = find_peaks_cwt(hist, widths, max_distances=widths, noise_perc=50)\n if len(idx) < 2:\n return None\n\n # Avoid edges\n idx = [i for i in idx if i > lanes.shape[1]*0.1\n and i < lanes.shape[1]*0.9\n and max(hist[i-50:i+50]) > min_peak]\n\n return [min(idx), max(idx)]\n\ndef process_image(image, key_frame_interval=20, cache_length=10):\n global cam_mtx, cam_dist\n\n if process_image.cache is None:\n\n left_lane = Lane(int(0.16*image.shape[0]), image.shape[:2], cache_length=cache_length)\n right_lane = Lane(int(0.62*image.shape[0]), image.shape[:2], cache_length=cache_length)\n\n cache = {'cam_mtx': cam_mtx,\n 'cam_dist': cam_dist,\n 'warp_m': None,\n 'warp_minv': None,\n 'frame_ctr': 0,\n 'left': left_lane,\n 'right': right_lane,\n 'base_pts': None}\n else:\n cache = process_image.cache\n\n\n left_lane = cache['left']\n right_lane = cache['right']\n\n # Preprocess image and find edges using thresholding\n undist = cv2.undistort(image, cam_mtx, cam_dist, None, cam_mtx)\n\n if cache['warp_m'] is None:# or cache['frame_ctr'] % key_frame_interval == 0:\n src = find_perspective_points(undist)\n warp_m, warp_minv = get_perspective_transform(image, src_in = src)\n\n if src is not None:\n # Save only if customized perspective transform is found\n cache['warp_m'] = warp_m\n cache['warp_minv'] = warp_minv\n else:\n warp_m, warp_minv = cache['warp_m'], cache['warp_minv']\n\n edges = find_edges(undist)\n warp_edges = cv2.warpPerspective(edges, warp_m, (image.shape[1], image.shape[0]), flags=cv2.INTER_LINEAR)\n\n # Reverse pipeline (warp before thresholding)\n # warp_img = cv2.warpPerspective(undist, warp_m, (image.shape[1], image.shape[0]), flags=cv2.INTER_LINEAR)\n # warp_edges = find_edges(warp_img)\n\n base_pts = cache['base_pts']\n if base_pts is None: #or cache['frame_ctr'] % key_frame_interval == 0:\n new_base_pts = histogram_base_points(warp_edges)\n\n if new_base_pts is not None:\n base_pts = new_base_pts\n else:\n # Could not find new base points\n # Re-use previous data if base points could not be found\n cache['frame_ctr'] = cache['frame_ctr'] - 1 # Make sure we try again in the next frame\n return undist\n\n if ((left_lane.current_xfit is None or left_lane.dropped_frames > 16)\n or (right_lane.current_xfit is None or right_lane.dropped_frames > 16)):\n # Detect from scratch\n left_lane.radius_of_curvature = None\n right_lane.radius_of_curvature = None\n sliding_window(warp_edges, left_lane, right_lane, base_pts)\n else:\n left_lane.detect_from_mask(warp_edges)\n right_lane.detect_from_mask(warp_edges)\n\n cache['frame_ctr'] = cache['frame_ctr'] + 1\n cache['base_pts'] = base_pts\n process_image.cache = cache\n\n # Create an image to draw the lines on\n color_warp = np.zeros_like(image).astype(np.uint8)\n\n yvals = left_lane.yvals\n left_fitx = left_lane.current_xfit\n right_fitx = right_lane.current_xfit\n\n # Create an image to draw the lines on\n color_warp = np.zeros_like(image).astype(np.uint8)\n\n # Recast the x and y points into usable format for cv2.fillPoly()\n pts_left = np.array([np.transpose(np.vstack([left_fitx, yvals]))])\n pts_right = np.array([np.flipud(np.transpose(np.vstack([right_fitx, yvals])))])\n pts = np.hstack((pts_left, pts_right))\n\n # Draw the lane onto the warped blank image\n cv2.fillPoly(color_warp, np.int_([pts]), (0,255, 0))\n\n # Draw lane markers\n pts = np.transpose(np.vstack([left_lane.current_xfit, left_lane.yvals])).reshape((-1,1,2)).astype(np.int32)\n cv2.drawContours(color_warp, pts, -1, (255,0,0), thickness=30)\n pts = np.transpose(np.vstack([right_lane.current_xfit, right_lane.yvals])).reshape((-1,1,2)).astype(np.int32)\n cv2.drawContours(color_warp, pts, -1, (0,0,255), thickness=30)\n\n # Warp the blank back to original image space using inverse perspective matrix (Minv)\n newwarp = cv2.warpPerspective(color_warp, warp_minv, (image.shape[1], image.shape[0]))\n\n # Combine the result with the original image\n result = cv2.addWeighted(undist, 1, newwarp, 0.3, 0)\n\n left_r = left_lane.radius_of_curvature\n right_r = right_lane.radius_of_curvature\n middle = (left_fitx[-1] + right_fitx[-1])//2\n veh_pos = image.shape[1]//2\n\n dx = (veh_pos - middle)*xm_per_pix # Positive if on right, Negative on left\n\n font = cv2.FONT_HERSHEY_SIMPLEX\n cv2.putText(result,'Left radius of curvature = %.2f m'%(left_r),(50,50), font, 1,(255,255,255),2,cv2.LINE_AA)\n cv2.putText(result,'Right radius of curvature = %.2f m'%(right_r),(50,80), font, 1,(255,255,255),2,cv2.LINE_AA)\n cv2.putText(result,'Vehicle position : %.2f m %s of center'%(abs(dx), 'left' if dx < 0 else 'right'),(50,110),\n font, 1,(255,255,255),2,cv2.LINE_AA)\n\n is_tracking = left_lane.detected or right_lane.detected\n cv2.putText(result,'Tracking Locked' if is_tracking else 'Tracking Lost',(50,140),\n font, 1,(0,255,0) if is_tracking else (255,0,0), 3,cv2.LINE_AA)\n\n cache['left'] = left_lane\n cache['right'] = right_lane\n\n return result\n\ndef clear_cache():\n process_image.cache = None\n\nfrom docopt import docopt\n\nif __name__ == '__main__':\n arguments = docopt(__doc__, version='Advanced Lane Lines 1.0')\n\n clear_cache()\n cam_file = arguments['-c']\n\n if not os.path.isfile(cam_file):\n print('Calibrating camera ...')\n cam_mtx, cam_dist = camera_setup('camera_cal/calibration*.jpg')\n np.savez_compressed(cam_file, cam_mtx=cam_mtx, cam_dist=cam_dist)\n else:\n print('Loading camera data from', cam_file)\n data = np.load(cam_file)\n cam_mtx = data['cam_mtx']\n cam_dist = data['cam_dist']\n\n in_file = arguments['<input_video>']\n out_file = arguments['<output_video>']\n\n print('Processing video ...')\n clip2 = VideoFileClip(in_file)\n vid_clip = clip2.fl_image(process_image)\n vid_clip.write_videofile(out_file, audio=False)\n"
] | [
[
"matplotlib.pyplot.imshow",
"matplotlib.image.imread",
"numpy.zeros_like",
"numpy.array",
"numpy.zeros",
"matplotlib.pyplot.figure"
],
[
"numpy.polyfit",
"matplotlib.pyplot.imshow",
"numpy.linspace",
"matplotlib.pyplot.hold",
"matplotlib.pyplot.plot",
"numpy.max",
"numpy.mean",
"numpy.any",
"numpy.zeros_like",
"numpy.polyval",
"numpy.hstack",
"numpy.copy",
"matplotlib.pyplot.subplot",
"numpy.std",
"numpy.argmax",
"numpy.load",
"numpy.zeros",
"numpy.nonzero",
"numpy.min",
"scipy.signal.find_peaks_cwt",
"numpy.int_",
"numpy.array",
"matplotlib.pyplot.show",
"numpy.sum",
"numpy.linalg.solve",
"numpy.absolute",
"numpy.ones",
"numpy.savez_compressed",
"numpy.vstack"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"1.6",
"0.14",
"1.10",
"0.15",
"1.4",
"0.16",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"0.12",
"1.0",
"0.17",
"1.3",
"1.8"
],
"tensorflow": []
}
] |
adelavega/pliers | [
"dee21102689c77a56b7da48bf9a0ac10c90be0eb"
] | [
"pliers/tests/extractors/api/test_clarifai_extractors.py"
] | [
"from os.path import join\nfrom ...utils import get_test_data_path\nfrom pliers.extractors import ClarifaiAPIExtractor\nfrom pliers.stimuli import ImageStim\nfrom pliers.extractors.base import merge_results\nimport numpy as np\nimport pytest\n\n\[email protected](\"'CLARIFAI_API_KEY' not in os.environ\")\ndef test_clarifai_api_extractor():\n image_dir = join(get_test_data_path(), 'image')\n stim = ImageStim(join(image_dir, 'apple.jpg'))\n result = ClarifaiAPIExtractor().transform(stim).to_df()\n assert result['apple'][0] > 0.5\n assert result.ix[:, 5][0] > 0.0\n\n result = ClarifaiAPIExtractor(max_concepts=5).transform(stim).to_df()\n assert result.shape == (1, 9)\n\n result = ClarifaiAPIExtractor(\n min_value=0.9).transform(stim).to_df(object_id=False)\n assert all(np.isnan(d) or d > 0.9 for d in result.values[0, 3:])\n\n concepts = ['cat', 'dog']\n result = ClarifaiAPIExtractor(select_concepts=concepts).transform(stim)\n result = result.to_df()\n assert result.shape == (1, 6)\n assert 'cat' in result.columns and 'dog' in result.columns\n\n\[email protected](\"'CLARIFAI_API_KEY' not in os.environ\")\ndef test_clarifai_api_extractor_batch():\n image_dir = join(get_test_data_path(), 'image')\n stim = ImageStim(join(image_dir, 'apple.jpg'))\n stim2 = ImageStim(join(image_dir, 'obama.jpg'))\n ext = ClarifaiAPIExtractor()\n results = ext.transform([stim, stim2])\n results = merge_results(results)\n assert results['ClarifaiAPIExtractor#apple'][0] > 0.5 or \\\n results['ClarifaiAPIExtractor#apple'][1] > 0.5\n\n # This takes too long to execute\n # video = VideoStim(join(get_test_data_path(), 'video', 'small.mp4'))\n # results = ExtractorResult.merge_stims(ext.transform(video))\n # assert 'Lego' in results.columns and 'robot' in results.columns\n"
] | [
[
"numpy.isnan"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
makistsantekidis/opendr | [
"07dee3b59d3487b9c5a93d6946317178a02c9890",
"07dee3b59d3487b9c5a93d6946317178a02c9890",
"07dee3b59d3487b9c5a93d6946317178a02c9890",
"07dee3b59d3487b9c5a93d6946317178a02c9890",
"07dee3b59d3487b9c5a93d6946317178a02c9890",
"07dee3b59d3487b9c5a93d6946317178a02c9890"
] | [
"src/opendr/perception/object_tracking_2d/fair_mot/object_tracking_2d_fair_mot_learner.py",
"src/opendr/perception/facial_expression_recognition/landmark_based_facial_expression_recognition/algorithm/datasets/gen_facial_muscles_data.py",
"projects/control/single_demo_grasp/simulation_ws/src/single_demo_grasping_demo/inference/single_demo_grasp_camera_stream.py",
"src/opendr/perception/skeleton_based_action_recognition/algorithm/models/stgcn.py",
"src/opendr/perception/face_recognition/algorithm/align/align_trans.py",
"src/opendr/perception/object_detection_2d/detr/algorithm/models/detr.py"
] | [
"# Copyright 2020-2021 OpenDR European Project\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\nimport json\nimport torch\nimport ntpath\nimport shutil\nimport numpy as np\nimport onnxruntime as ort\nfrom torchvision.transforms import transforms as T\nfrom opendr.engine.learners import Learner\nfrom opendr.engine.datasets import DatasetIterator, ExternalDataset, MappedDatasetIterator\nfrom opendr.perception.object_tracking_2d.logger import Logger\nfrom opendr.perception.object_tracking_2d.datasets.mot_dataset import JointDataset, RawMotDatasetIterator\nfrom opendr.perception.object_tracking_2d.fair_mot.algorithm.lib.models.model import create_model\nfrom opendr.perception.object_tracking_2d.fair_mot.algorithm.run import train, evaluate\nfrom opendr.perception.object_tracking_2d.fair_mot.algorithm.load import load_from_checkpoint\nfrom opendr.perception.object_tracking_2d.datasets.mot_dataset import letterbox, process as process_dataset\nfrom opendr.perception.object_tracking_2d.fair_mot.algorithm.lib.tracker.multitracker import JDETracker\nfrom opendr.engine.data import Image\nfrom opendr.engine.target import TrackingAnnotation, TrackingAnnotationList\nfrom opendr.engine.constants import OPENDR_SERVER_URL\nfrom urllib.request import urlretrieve\n\n\nclass ObjectTracking2DFairMotLearner(Learner):\n def __init__(\n self,\n lr=0.0001,\n iters=-1,\n batch_size=4,\n optimizer=\"adam\",\n lr_schedule=\"\",\n backbone=\"dla_34\",\n network_head=\"\",\n checkpoint_after_iter=0,\n checkpoint_load_iter=0,\n temp_path=\"\",\n device=\"cuda\",\n threshold=0.3,\n scale=1.0,\n lr_step=[20],\n head_conv=256,\n ltrb=True,\n num_classes=1,\n reg_offset=True,\n gpus=[0],\n num_workers=4,\n mse_loss=False,\n reg_loss='l1',\n dense_wh=False,\n cat_spec_wh=False,\n reid_dim=128,\n norm_wh=False,\n wh_weight=0.1,\n off_weight=1,\n id_weight=1,\n num_epochs=30,\n hm_weight=1,\n down_ratio=4,\n max_objs=500,\n track_buffer=30,\n image_mean=[0.408, 0.447, 0.47],\n image_std=[0.289, 0.274, 0.278],\n frame_rate=30,\n min_box_area=100,\n ):\n # Pass the shared parameters on super's constructor so they can get initialized as class attributes\n super(ObjectTracking2DFairMotLearner, self).__init__(\n lr=lr,\n iters=iters,\n batch_size=batch_size,\n optimizer=optimizer,\n lr_schedule=lr_schedule,\n backbone=backbone,\n network_head=network_head,\n checkpoint_after_iter=checkpoint_after_iter,\n checkpoint_load_iter=checkpoint_load_iter,\n temp_path=temp_path,\n device=device,\n threshold=threshold,\n scale=scale,\n )\n\n self.ltrb = ltrb\n self.head_conv = head_conv\n self.num_classes = num_classes\n self.reid_dim = reid_dim\n self.reg_offset = reg_offset\n self.gpus = gpus\n self.num_workers = num_workers\n self.mse_loss = mse_loss\n self.reg_loss = reg_loss\n self.dense_wh = dense_wh\n self.cat_spec_wh = cat_spec_wh\n self.reid_dim = reid_dim\n self.norm_wh = norm_wh\n self.wh_weight = wh_weight\n self.off_weight = off_weight\n self.id_weight = id_weight\n self.num_epochs = num_epochs\n self.lr_step = lr_step\n self.hm_weight = hm_weight\n self.down_ratio = down_ratio\n self.max_objs = max_objs\n self.track_buffer = track_buffer\n self.image_mean = image_mean\n self.image_mean = image_mean\n self.image_std = image_std\n self.frame_rate = frame_rate\n self.min_box_area = min_box_area\n\n main_batch_size = self.batch_size // len(self.gpus)\n rest_batch_size = (self.batch_size - main_batch_size)\n self.chunk_sizes = [main_batch_size]\n\n for i in range(len(self.gpus) - 1):\n worker_chunk_size = rest_batch_size // (len(self.gpus) - 1)\n if i < rest_batch_size % (len(self.gpus) - 1):\n worker_chunk_size += 1\n self.chunk_sizes.append(worker_chunk_size)\n\n self.__create_model()\n\n def save(self, path, verbose=False):\n \"\"\"\n This method is used to save a trained model.\n Provided with the path, absolute or relative, including a *folder* name, it creates a directory with the name\n of the *folder* provided and saves the model inside with a proper format and a .json file with metadata.\n If self.optimize was ran previously, it saves the optimized ONNX model in a similar fashion, by copying it\n from the self.temp_path it was saved previously during conversion.\n :param path: for the model to be saved, including the folder name\n :type path: str\n :param verbose: whether to print success message or not, defaults to 'False'\n :type verbose: bool, optional\n \"\"\"\n\n if self.model is None and self.ort_session is None:\n raise UserWarning(\"No model is loaded, cannot save.\")\n\n folder_name, _, tail = self.__extract_trailing(path) # Extract trailing folder name from path\n # Also extract folder name without any extension if extension is erroneously provided\n folder_name_no_ext = folder_name.split(sep='.')[0]\n\n # Extract path without folder name, by removing folder name from original path\n path_no_folder_name = ''.join(path.rsplit(folder_name, 1))\n # If tail is '', then path was a/b/c/, which leaves a trailing double '/'\n if tail == '':\n path_no_folder_name = path_no_folder_name[0:-1] # Remove one '/'\n\n # Create model directory\n new_path = path_no_folder_name + folder_name_no_ext\n os.makedirs(new_path, exist_ok=True)\n\n model_metadata = {\"model_paths\": [], \"framework\": \"pytorch\", \"format\": \"\", \"has_data\": False,\n \"inference_params\": {}, \"optimized\": None, \"optimizer_info\": {}}\n\n if self.model.ort_session is None:\n model_metadata[\"model_paths\"] = [\n folder_name_no_ext + \".pth\",\n ]\n model_metadata[\"optimized\"] = False\n model_metadata[\"format\"] = \"pth\"\n\n torch.save({\n 'state_dict': self.model.state_dict()\n }, os.path.join(path_no_folder_name, folder_name_no_ext, model_metadata[\"model_paths\"][0]))\n if verbose:\n print(\"Saved Pytorch model.\")\n else:\n model_metadata[\"model_paths\"] = [\n folder_name_no_ext + \".onnx\"\n ]\n model_metadata[\"optimized\"] = True\n model_metadata[\"format\"] = \"onnx\"\n\n shutil.copy2(\n os.path.join(self.temp_path, \"onnx_model_temp.onnx\"),\n os.path.join(path_no_folder_name, folder_name_no_ext, model_metadata[\"model_paths\"][0])\n )\n if verbose:\n print(\"Saved ONNX model.\")\n\n with open(os.path.join(new_path, folder_name_no_ext + \".json\"), 'w') as outfile:\n json.dump(model_metadata, outfile)\n\n def load(\n self,\n path,\n verbose=False,\n ):\n \"\"\"\n Loads the model from inside the path provided, based on the metadata .json file included.\n :param path: path of the directory the model was saved\n :type path: str\n :param verbose: whether to print success message or not, defaults to 'False'\n :type verbose: bool, optional\n \"\"\"\n\n model_name, _, _ = self.__extract_trailing(path) # Trailing folder name from the path provided\n\n with open(os.path.join(path, model_name + \".json\")) as metadata_file:\n metadata = json.load(metadata_file)\n\n if not metadata[\"optimized\"]:\n self.__load_from_pth(self.model, os.path.join(path, metadata[\"model_paths\"][0]))\n if verbose:\n print(\"Loaded Pytorch model.\")\n else:\n self.__load_rpn_from_onnx(os.path.join(path, metadata[\"model_paths\"][0]))\n if verbose:\n print(\"Loaded ONNX model.\")\n\n def reset(self):\n self.tracker.reset()\n\n def fit(\n self,\n dataset,\n val_dataset=None,\n val_epochs=-1,\n logging_path=None,\n silent=False,\n verbose=False,\n train_split_paths=None,\n val_split_paths=None,\n resume_optimizer=False,\n nID=None\n ):\n\n if train_split_paths is None:\n train_split_paths = {\n \"mot20\": os.path.join(\n \"perception\", \"object_tracking_2d\", \"datasets\", \"splits\", \"mot20.train\"\n )\n }\n\n if val_split_paths is None:\n val_split_paths = train_split_paths\n\n logger = Logger(silent, verbose, logging_path)\n\n (\n input_dataset_iterator,\n eval_dataset_iterator,\n ) = self._prepare_datasets(\n dataset,\n val_dataset,\n train_split_paths,\n val_split_paths,\n require_val_dataset=val_epochs > 0,\n )\n\n if nID is None:\n nID = input_dataset_iterator.nID if hasattr(input_dataset_iterator, \"nID\") else dataset.nID\n\n checkpoints_path = os.path.join(self.temp_path, \"checkpoints\")\n if self.checkpoint_after_iter != 0 or self.checkpoint_load_iter != 0:\n os.makedirs(checkpoints_path, exist_ok=True)\n\n start_epoch = 0\n\n if self.checkpoint_load_iter != 0:\n _, _, start_epoch = load_from_checkpoint(\n self.model, os.path.join(checkpoints_path, f\"checkpoint_{self.checkpoint_load_iter}.pth\"),\n self.model_optimizer, resume_optimizer, self.lr, self.lr_step, log=logger.log,\n )\n\n last_eval_result = train(\n self.model,\n self.infer,\n self.model_optimizer,\n input_dataset_iterator,\n eval_dataset_iterator,\n self.batch_size,\n self.num_workers,\n self.gpus,\n self.chunk_sizes,\n self.iters,\n \"train\", # exp_id,\n self.device,\n silent, # hide_data_time,\n 1 if verbose else (-1 if silent else 10), # print_iter,\n self.mse_loss,\n self.reg_loss,\n self.dense_wh,\n self.cat_spec_wh,\n self.reid_dim,\n nID,\n self.norm_wh,\n 1, # num_stack,\n self.wh_weight,\n self.off_weight,\n self.id_weight,\n self.num_epochs,\n self.lr_step,\n self.temp_path,\n self.lr,\n self.reg_offset,\n self.hm_weight,\n checkpoints_path,\n self.checkpoint_after_iter,\n start_epoch,\n val_epochs=val_epochs,\n log=logger.log,\n )\n\n logger.close()\n\n return last_eval_result\n\n def eval(\n self,\n dataset,\n val_split_paths=None,\n logging_path=None,\n silent=False,\n verbose=False,\n ):\n\n logger = Logger(silent, verbose, logging_path)\n\n (\n _,\n eval_dataset_iterator,\n ) = self._prepare_datasets(\n None,\n dataset,\n None,\n val_split_paths,\n require_dataset=False,\n )\n\n result = evaluate(self.infer, dataset)\n\n logger.log(Logger.LOG_WHEN_NORMAL, result)\n\n logger.close()\n\n return result\n\n def infer(self, batch, frame_ids=None, img_size=(1088, 608)):\n\n if self.model is None:\n raise ValueError(\"No model loaded or created\")\n\n self.model.eval()\n\n is_single_image = False\n\n if isinstance(batch, Image):\n batch = [batch]\n is_single_image = True\n elif not isinstance(batch, list):\n raise ValueError(\"Input batch should be an engine.Image or a list of engine.Image\")\n\n if frame_ids is None:\n frame_ids = [-1] * len(batch)\n elif is_single_image:\n frame_ids = [frame_ids]\n\n results = []\n\n for image, frame_id in zip(batch, frame_ids):\n\n img0 = image.convert(\"channels_last\", \"bgr\") # BGR\n img, _, _, _ = letterbox(img0, height=img_size[1], width=img_size[0])\n\n # Normalize RGB\n img = img[:, :, ::-1].transpose(2, 0, 1)\n img = np.ascontiguousarray(img, dtype=np.float32)\n img /= 255.0\n\n blob = torch.from_numpy(img).to(self.device).unsqueeze(0)\n\n online_targets = self.tracker.update(blob, img0)\n online_tlwhs = []\n online_ids = []\n online_scores = []\n for t in online_targets:\n tlwh = t.tlwh\n tid = t.track_id\n vertical = tlwh[2] / tlwh[3] > 1.6\n if tlwh[2] * tlwh[3] > self.min_box_area and not vertical:\n online_tlwhs.append(tlwh)\n online_ids.append(tid)\n online_scores.append(t.score)\n\n result = TrackingAnnotationList([\n TrackingAnnotation(\n name=0,\n top=tlwh[0],\n left=tlwh[1],\n width=tlwh[2],\n height=tlwh[3],\n id=id,\n score=score,\n frame=frame_id,\n ) for tlwh, id, score in zip(\n online_tlwhs,\n online_ids,\n online_scores\n )\n ])\n\n results.append(result)\n\n if is_single_image:\n results = results[0]\n\n return results\n\n def optimize(self, do_constant_folding=False, img_size=(1088, 608), optimizable_dcn_v2=False):\n \"\"\"\n Optimize method converts the model to ONNX format and saves the\n model in the parent directory defined by self.temp_path. The ONNX model is then loaded.\n :param do_constant_folding: whether to optimize constants, defaults to 'False'\n :type do_constant_folding: bool, optional\n \"\"\"\n\n if not optimizable_dcn_v2:\n raise Exception(\"Can not optimize the model while DCNv2 implementation is not optimizable\")\n\n if self.model is None:\n raise UserWarning(\"No model is loaded, cannot optimize. Load or train a model first.\")\n if self.model.ort_session is not None:\n raise UserWarning(\"Model is already optimized in ONNX.\")\n\n input_shape = [\n 1,\n 3,\n img_size[1],\n img_size[0],\n ]\n\n try:\n self.__convert_to_onnx(\n input_shape,\n os.path.join(self.temp_path, \"onnx_model_temp.onnx\"), do_constant_folding\n )\n except FileNotFoundError:\n # Create temp directory\n os.makedirs(self.temp_path, exist_ok=True)\n self.__convert_rpn_to_onnx(\n input_shape,\n os.path.join(self.temp_path, \"onnx_model_temp.onnx\"), do_constant_folding\n )\n\n self.__load_rpn_from_onnx(os.path.join(self.temp_path, \"onnx_model_rpn_temp.onnx\"))\n\n @staticmethod\n def download(model_name, path, server_url=None):\n\n if server_url is None and model_name not in [\n \"crowdhuman_dla34\",\n \"fairmot_dla34\",\n ]:\n raise ValueError(\"Unknown model_name: \" + model_name)\n\n os.makedirs(path, exist_ok=True)\n\n if server_url is None:\n server_url = os.path.join(\n OPENDR_SERVER_URL, \"perception\", \"object_tracking_2d\",\n \"fair_mot\"\n )\n\n url = os.path.join(\n server_url, model_name\n )\n\n model_dir = os.path.join(path, model_name)\n os.makedirs(model_dir, exist_ok=True)\n\n urlretrieve(os.path.join(\n url, model_name + \".json\"\n ), os.path.join(\n model_dir, model_name + \".json\"\n ))\n\n try:\n urlretrieve(os.path.join(\n url, model_name + \".pth\"\n ), os.path.join(\n model_dir, model_name + \".pth\"\n ))\n except Exception:\n urlretrieve(os.path.join(\n url, model_name + \".tckpt\"\n ), os.path.join(\n model_dir, model_name + \".pth\"\n ))\n\n print(\"Downloaded model\", model_name, \"to\", model_dir)\n\n return model_dir\n\n def __convert_to_onnx(self, input_shape, output_name, do_constant_folding=False, verbose=False):\n inp = torch.randn(input_shape).to(self.device)\n input_names = [\"data\"]\n output_names = self.heads.keys()\n\n torch.onnx.export(\n self.model, inp, output_name, verbose=verbose, enable_onnx_checker=True,\n do_constant_folding=do_constant_folding, input_names=input_names, output_names=output_names\n )\n\n def __load_from_onnx(self, path):\n \"\"\"\n This method loads an ONNX model from the path provided into an onnxruntime inference session.\n\n :param path: path to ONNX model\n :type path: str\n \"\"\"\n self.model.rpn_ort_session = ort.InferenceSession(path)\n\n # The comments below are the alternative way to use the onnx model, it might be useful in the future\n # depending on how ONNX saving/loading will be implemented across the toolkit.\n # # Load the ONNX model\n # self.model = onnx.load(path)\n #\n # # Check that the IR is well formed\n # onnx.checker.check_model(self.model)\n #\n # # Print a human readable representation of the graph\n # onnx.helper.printable_graph(self.model.graph)\n\n def __load_from_pth(self, model, path, use_original_dict=False):\n all_params = torch.load(path, map_location=self.device)\n model.load_state_dict(all_params if use_original_dict else all_params[\"state_dict\"])\n\n def _prepare_datasets(\n self,\n dataset,\n val_dataset,\n train_split_paths,\n val_split_paths,\n require_dataset=True,\n require_val_dataset=True,\n ):\n\n input_dataset_iterator = None\n eval_dataset_iterator = None\n\n if isinstance(dataset, ExternalDataset):\n\n dataset_path = dataset.path\n if dataset.dataset_type.lower() != \"mot\":\n raise ValueError(\n \"ExternalDataset (\" + str(dataset) +\n \") is given as a dataset, but it is not a MOT dataset\")\n\n transforms = T.Compose([T.ToTensor()])\n input_dataset_iterator = JointDataset(\n dataset_path,\n train_split_paths,\n down_ratio=self.down_ratio,\n max_objects=self.max_objs,\n ltrb=self.ltrb,\n mse_loss=self.mse_loss,\n augment=False, transforms=transforms,\n )\n elif isinstance(dataset, DatasetIterator):\n input_dataset_iterator = MappedDatasetIterator(\n dataset,\n lambda d: process_dataset(\n d[0], d[1], self.ltrb, self.down_ratio,\n self.max_objs, self.num_classes, self.mse_loss\n )\n )\n else:\n if require_dataset or dataset is not None:\n raise ValueError(\n \"dataset parameter should be an ExternalDataset or a DatasetIterator\"\n )\n\n if isinstance(val_dataset, ExternalDataset):\n\n val_dataset_path = val_dataset.path\n if val_dataset.dataset_type.lower() != \"mot\":\n raise ValueError(\n \"ExternalDataset (\" + str(val_dataset) +\n \") is given as a val_dataset, but it is not a MOT dataset\"\n )\n\n eval_dataset_iterator = RawMotDatasetIterator(\n val_dataset_path,\n val_split_paths,\n down_ratio=self.down_ratio,\n max_objects=self.max_objs,\n ltrb=self.ltrb,\n mse_loss=self.mse_loss,\n )\n\n elif isinstance(val_dataset, DatasetIterator):\n eval_dataset_iterator = val_dataset\n elif val_dataset is None:\n if isinstance(dataset, ExternalDataset):\n val_dataset_path = dataset.path\n if dataset.dataset_type.lower() != \"mot\":\n raise ValueError(\n \"ExternalDataset (\" + str(dataset) +\n \") is given as a dataset, but it is not a MOT dataset\"\n )\n\n eval_dataset_iterator = RawMotDatasetIterator(\n val_dataset_path,\n val_split_paths,\n down_ratio=self.down_ratio,\n max_objects=self.max_objs,\n ltrb=self.ltrb,\n mse_loss=self.mse_loss,\n )\n\n elif require_val_dataset:\n raise ValueError(\n \"val_dataset is None and can't be derived from\" +\n \" the dataset object because the dataset is not an ExternalDataset\"\n )\n else:\n eval_dataset_iterator = input_dataset_iterator\n else:\n raise ValueError(\n \"val_dataset parameter should be an ExternalDataset or a DatasetIterator or None\"\n )\n\n return input_dataset_iterator, eval_dataset_iterator\n\n def __create_model(self):\n\n heads = {\n 'hm': self.num_classes,\n 'wh': 2 if not self.ltrb else 4,\n 'id': self.reid_dim\n }\n if self.reg_offset:\n heads.update({'reg': 2})\n\n self.heads = heads\n\n self.model = create_model(self.backbone, heads, self.head_conv)\n self.model.to(self.device)\n self.model.ort_session = None\n self.model.heads_names = heads.keys()\n\n self.model_optimizer = torch.optim.Adam(self.model.parameters(), self.lr)\n\n self.tracker = JDETracker(\n self.model,\n self.threshold,\n self.track_buffer,\n self.max_objs,\n self.image_mean,\n self.image_std,\n self.down_ratio,\n self.num_classes,\n self.reg_offset,\n self.ltrb,\n self.frame_rate,\n )\n\n @staticmethod\n def __extract_trailing(path):\n \"\"\"\n Extracts the trailing folder name or filename from a path provided in an OS-generic way, also handling\n cases where the last trailing character is a separator. Returns the folder name and the split head and tail.\n :param path: the path to extract the trailing filename or folder name from\n :type path: str\n :return: the folder name, the head and tail of the path\n :rtype: tuple of three strings\n \"\"\"\n head, tail = ntpath.split(path)\n folder_name = tail or ntpath.basename(head) # handle both a/b/c and a/b/c/\n return folder_name, head, tail\n",
"# Copyright 2020-2021 OpenDR European Project\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport numpy as np\nfrom numpy.lib.format import open_memmap\nfrom scipy.spatial import Delaunay\nimport argparse\n\n\ndef find_graph_edges(x):\n points = np.transpose(x[0, :, 0, :, 0])\n print(points.shape)\n tri = Delaunay(points)\n neigh = tri.simplices\n print(neigh.shape)\n G = []\n N = neigh.shape[0]\n for i in range(N):\n G.append((neigh[i][0], neigh[i][1]))\n G.append((neigh[i][0], neigh[i][2]))\n G.append((neigh[i][1], neigh[i][2]))\n # connect the master node (nose) to all other nodes\n for i in range(51):\n G.append((i+1, 17))\n edges = G\n return edges\n\n\ndef gen_muscle_data(data, muscle_path):\n \"\"\"Generate facial muscle data from facial landmarks\"\"\"\n N, C, T, V, M = data.shape\n edges = find_graph_edges(data)\n V_muscle = len(edges)\n fp_sp = open_memmap(muscle_path, dtype='float32', mode='w+', shape=(N, C, T, V_muscle, M))\n # Copy the landmark data to muscle placeholder tensor\n fp_sp[:, :, :, :V, :] = data\n for edge_id, (source_node, target_node) in enumerate(edges):\n fp_sp[:, :, :, edge_id, :] = data[:, :, :, source_node-1, :] - data[:, :, :, target_node-1, :]\n return fp_sp\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='Facial muscle data generator.')\n parser.add_argument('--landmark_data_folder', default='./data/CASIA_10fold/')\n parser.add_argument('--muscle_data_folder', default='./data/muscle_data/')\n parser.add_argument('--dataset_name', default='CASIA')\n arg = parser.parse_args()\n part = ['Train', 'Val']\n for p in part:\n if arg.dataset_name == 'CASIA' or arg.dataset_name == 'CK+':\n for i in range(10):\n landmark_path = arg.landmark_data_folder + '/{}/{}_{}.npy'.format(arg.dataset_name, p, i)\n landmark_data = np.load(landmark_path)\n muscle_path = arg.muscle_data_folder + '/{}/{}_muscle_{}.npy'.format(arg.dataset_name, p, i)\n muscle_data = gen_muscle_data(landmark_data, muscle_path)\n elif arg.dataset_name == 'AFEW':\n landmark_path = arg.landmark_data_folder + '/{}/{}.npy'.format(arg.dataset_name, p)\n landmark_data = np.load(landmark_path)\n muscle_path = arg.muscle_data_folder + '/{}/{}_muscle.npy'.format(arg.dataset_name, p)\n muscle_data = gen_muscle_data(landmark_data, muscle_path)\n",
"#!/usr/bin/env python\n\n# Copyright 2020-2021 OpenDR European Project\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\nimport sys\nimport rospy\nimport numpy as np\nfrom std_msgs.msg import Int16\nfrom sensor_msgs.msg import Image as ROS_Image\nfrom std_msgs.msg import Float32MultiArray\nfrom single_demo_inference import SingleDemoInference\nfrom opendr_bridge import ROSBridge\n\n\nclass SingleDemoGraspCameraStream(object):\n\n def __init__(self, path_to_dt_model, thresh):\n \"\"\"SingleDemoGraspCameraStream initialization\"\"\"\n self.object_locator = SingleDemoInference(path_to_dt_model, thresh)\n self.rgb_image = None\n self.command_publisher = rospy.Publisher('/commands', Float32MultiArray, queue_size=1)\n self.detection_request_sub = rospy.Subscriber(\"/request_detection\", Int16, self.request_callback)\n self.image_sub = rospy.Subscriber(\"/camera/color/raw\", ROS_Image, self.image_callback)\n self.bridge = ROSBridge()\n\n def image_callback(self, data):\n self.rgb_image = self.bridge.from_ros_image(data, encoding='rgb8')\n\n def request_callback(self, data):\n print(\"new request:\")\n print(data.data)\n self.image_analyze(data.data)\n\n def image_analyze(self, msg_id):\n analyze_img = self.rgb_image.opencv()\n flag, bbx, pred_angle, pred_kps_center = self.object_locator.predict(analyze_img)\n bbx = np.asarray(bbx)\n bbx = bbx.astype(int)\n msg = Float32MultiArray()\n\n if (flag > 0):\n print(bbx)\n ctr_X = int((bbx[0] + bbx[2]) / 2)\n ctr_Y = int((bbx[1] + bbx[3]) / 2)\n angle = pred_angle\n ref_x = 640 / 2\n ref_y = 480 / 2\n\n # distance to the center of bounding box representing the center of object\n dist = [ctr_X - ref_x, ref_y - ctr_Y]\n # distance center of keypoints representing the grasp location of the object\n dist_kps_ctr = [pred_kps_center[0] - ref_x, ref_y - pred_kps_center[1]]\n msg.data = [msg_id, dist[0], dist[1], angle, dist_kps_ctr[0], dist_kps_ctr[1]]\n self.command_publisher.publish(msg)\n\n else:\n # 1e10 as a big large enough number out of range. reciever use this number\n # to check whether a detection is available or not\n msg.data = [msg_id, 1e10, 1e10, 1e10, 1e10]\n self.command_publisher.publish(msg)\n\n\nif __name__ == '__main__':\n\n dir_temp = os.path.join(\"./\", \"sdg_temp\")\n rospy.init_node('grasp_server', anonymous=True)\n camera_streamer = SingleDemoGraspCameraStream(os.path.join(dir_temp, \"pendulum\", \"output\", \"model_final.pth\"), 0.8)\n rospy.spin()\n input()\n sys.exit()\n",
"\"\"\"\nModified based on: https://github.com/open-mmlab/mmskeleton\n\"\"\"\n\nimport math\nimport numpy as np\nimport torch\nimport torch.nn as nn\nfrom torch.autograd import Variable\nfrom opendr.perception.skeleton_based_action_recognition.algorithm.graphs.nturgbd import NTUGraph\nfrom opendr.perception.skeleton_based_action_recognition.algorithm.graphs.kinetics import KineticsGraph\n\n\ndef weights_init(module_, bs=1):\n if isinstance(module_, nn.Conv2d) and bs == 1:\n nn.init.kaiming_normal_(module_.weight, mode='fan_out')\n nn.init.constant_(module_.bias, 0)\n elif isinstance(module_, nn.Conv2d) and bs != 1:\n nn.init.normal_(module_.weight, 0,\n math.sqrt(2. / (module_.weight.size(0) * module_.weight.size(1) * module_.weight.size(2) * bs)))\n nn.init.constant_(module_.bias, 0)\n elif isinstance(module_, nn.BatchNorm2d):\n nn.init.constant_(module_.weight, bs)\n nn.init.constant_(module_.bias, 0)\n elif isinstance(module_, nn.Linear):\n nn.init.normal_(module_.weight, 0, math.sqrt(2. / bs))\n\n\nclass GraphConvolution(nn.Module):\n def __init__(self, in_channels, out_channels, A, cuda_):\n super(GraphConvolution, self).__init__()\n self.cuda_ = cuda_\n self.graph_attn = nn.Parameter(torch.from_numpy(A.astype(np.float32)))\n nn.init.constant_(self.graph_attn, 1)\n self.A = Variable(torch.from_numpy(A.astype(np.float32)), requires_grad=False)\n self.num_subset = 3\n self.g_conv = nn.ModuleList()\n for i in range(self.num_subset):\n self.g_conv.append(nn.Conv2d(in_channels, out_channels, 1))\n weights_init(self.g_conv[i], bs=self.num_subset)\n\n if in_channels != out_channels:\n self.gcn_residual = nn.Sequential(\n nn.Conv2d(in_channels, out_channels, 1),\n nn.BatchNorm2d(out_channels)\n )\n weights_init(self.gcn_residual[0], bs=1)\n weights_init(self.gcn_residual[1], bs=1)\n else:\n self.gcn_residual = lambda x: x\n\n self.bn = nn.BatchNorm2d(out_channels)\n weights_init(self.bn, bs=1e-6)\n self.relu = nn.ReLU()\n\n def forward(self, x):\n N, C, T, V = x.size()\n if self.cuda_:\n A = self.A.cuda(x.get_device())\n else:\n A = self.A\n A = A * self.graph_attn\n hidden_ = None\n for i in range(self.num_subset):\n x_a = x.view(N, C * T, V)\n z = self.g_conv[i](torch.matmul(x_a, A[i]).view(N, C, T, V))\n hidden_ = z + hidden_ if hidden_ is not None else z\n hidden_ = self.bn(hidden_)\n hidden_ += self.gcn_residual(x)\n return self.relu(hidden_)\n\n\nclass TemporalConvolution(nn.Module):\n def __init__(self, in_channels, out_channels, kernel_size=9, stride=1):\n super(TemporalConvolution, self).__init__()\n\n pad = int((kernel_size - 1) / 2)\n self.t_conv = nn.Conv2d(in_channels, out_channels, kernel_size=(kernel_size, 1),\n padding=(pad, 0), stride=(stride, 1))\n self.bn = nn.BatchNorm2d(out_channels)\n weights_init(self.t_conv, bs=1)\n weights_init(self.bn, bs=1)\n\n def forward(self, x):\n x = self.bn(self.t_conv(x))\n return x\n\n\nclass ST_GCN_block(nn.Module):\n def __init__(self, in_channels, out_channels, A, cuda_=False, stride=1, residual=True):\n super(ST_GCN_block, self).__init__()\n\n self.gcn = GraphConvolution(in_channels, out_channels, A, cuda_)\n self.tcn = TemporalConvolution(out_channels, out_channels, stride=stride)\n self.relu = nn.ReLU()\n if not residual:\n self.residual = lambda x: 0\n elif (in_channels == out_channels) and (stride == 1):\n self.residual = lambda x: x\n else:\n self.residual = TemporalConvolution(in_channels, out_channels, kernel_size=1, stride=stride)\n\n def forward(self, x):\n x = self.tcn(self.gcn(x)) + self.residual(x)\n return self.relu(x)\n\n\nclass STGCN(nn.Module):\n def __init__(self, num_class, num_point, num_person, in_channels, graph_type, cuda_=False):\n super(STGCN, self).__init__()\n\n if graph_type == 'ntu' or num_point == 25:\n self.graph = NTUGraph()\n elif graph_type == 'openpose' or num_point == 18:\n self.graph = KineticsGraph()\n\n A = self.graph.A\n self.data_bn = nn.BatchNorm1d(num_person * in_channels * num_point)\n weights_init(self.data_bn, bs=1)\n\n self.layers = nn.ModuleDict(\n {'layer1': ST_GCN_block(in_channels, 64, A, cuda_, residual=False),\n 'layer2': ST_GCN_block(64, 64, A, cuda_),\n 'layer3': ST_GCN_block(64, 64, A, cuda_),\n 'layer4': ST_GCN_block(64, 64, A, cuda_),\n 'layer5': ST_GCN_block(64, 128, A, cuda_, stride=2),\n 'layer6': ST_GCN_block(128, 128, A, cuda_),\n 'layer7': ST_GCN_block(128, 128, A, cuda_),\n 'layer8': ST_GCN_block(128, 256, A, cuda_, stride=2),\n 'layer9': ST_GCN_block(256, 256, A, cuda_),\n 'layer10': ST_GCN_block(256, 256, A, cuda_)}\n )\n\n self.fc = nn.Linear(256, num_class)\n weights_init(self.fc, bs=num_class)\n\n def forward(self, x):\n N, C, T, V, M = x.size()\n x = x.permute(0, 4, 3, 1, 2).contiguous().view(N, M * V * C, T)\n x = self.data_bn(x)\n x = x.view(N, M, V, C, T).permute(0, 1, 3, 4, 2).contiguous().view(N * M, C, T, V)\n for i in range(len(self.layers)):\n x = self.layers['layer' + str(i+1)](x)\n # N*M,C,T,V\n c_new = x.size(1)\n x = x.view(N, M, c_new, -1)\n x = x.mean(3).mean(1)\n return self.fc(x)\n",
"import numpy as np\nimport cv2\nfrom .matlab_cp2tform import get_similarity_transform_for_cv2\n\nREFERENCE_FACIAL_POINTS = [\n [30.29459953, 51.69630051],\n [65.53179932, 51.50139999],\n [48.02519989, 71.73660278],\n [33.54930115, 92.3655014],\n [62.72990036, 92.20410156]\n]\n\nDEFAULT_CROP_SIZE = (96, 112)\n\n\nclass FaceWarpException(Exception):\n def __str__(self):\n return 'In File {}:{}'.format(\n __file__, super.__str__(self))\n\n\ndef get_reference_facial_points(output_size=None,\n inner_padding_factor=0.0,\n outer_padding=(0, 0),\n default_square=False):\n \"\"\"\n Function:\n ----------\n get reference 5 key points according to crop settings:\n 0. Set default crop_size:\n if default_square:\n crop_size = (112, 112)\n else:\n crop_size = (96, 112)\n 1. Pad the crop_size by inner_padding_factor in each side;\n 2. Resize crop_size into (output_size - outer_padding*2),\n pad into output_size with outer_padding;\n 3. Output reference_5point;\n Parameters:\n ----------\n @output_size: (w, h) or None\n size of aligned face image\n @inner_padding_factor: (w_factor, h_factor)\n padding factor for inner (w, h)\n @outer_padding: (w_pad, h_pad)\n each row is a pair of coordinates (x, y)\n @default_square: True or False\n if True:\n default crop_size = (112, 112)\n else:\n default crop_size = (96, 112);\n !!! make sure, if output_size is not None:\n (output_size - outer_padding)\n = some_scale * (default crop_size * (1.0 + inner_padding_factor))\n Returns:\n ----------\n @reference_5point: 5x2 np.array\n each row is a pair of transformed coordinates (x, y)\n \"\"\"\n\n tmp_5pts = np.array(REFERENCE_FACIAL_POINTS)\n tmp_crop_size = np.array(DEFAULT_CROP_SIZE)\n\n if default_square:\n size_diff = max(tmp_crop_size) - tmp_crop_size\n tmp_5pts += size_diff / 2\n tmp_crop_size += size_diff\n\n if (output_size and\n output_size[0] == tmp_crop_size[0] and\n output_size[1] == tmp_crop_size[1]):\n return tmp_5pts\n\n if (inner_padding_factor == 0 and\n outer_padding == (0, 0)):\n if output_size is None:\n return tmp_5pts\n else:\n raise FaceWarpException(\n 'No paddings to do, output_size must be None or {}'.format(tmp_crop_size))\n\n if not (0 <= inner_padding_factor <= 1.0):\n raise FaceWarpException('Not (0 <= inner_padding_factor <= 1.0)')\n\n if ((inner_padding_factor > 0 or outer_padding[0] > 0 or outer_padding[1] > 0) and\n output_size is None):\n output_size = tmp_crop_size * \\\n (1 + inner_padding_factor * 2).astype(np.int32)\n output_size += np.array(outer_padding)\n\n if not (outer_padding[0] < output_size[0] and\n outer_padding[1] < output_size[1]):\n raise FaceWarpException('Not (outer_padding[0] < output_size[0]'\n 'and outer_padding[1] < output_size[1])')\n\n if inner_padding_factor > 0:\n size_diff = tmp_crop_size * inner_padding_factor * 2\n tmp_5pts += size_diff / 2\n tmp_crop_size += np.round(size_diff).astype(np.int32)\n\n size_bf_outer_pad = np.array(output_size) - np.array(outer_padding) * 2\n\n if size_bf_outer_pad[0] * tmp_crop_size[1] != size_bf_outer_pad[1] * tmp_crop_size[0]:\n raise FaceWarpException('Must have (output_size - outer_padding)'\n '= some_scale * (crop_size * (1.0 + inner_padding_factor)')\n\n scale_factor = size_bf_outer_pad[0].astype(np.float32) / tmp_crop_size[0]\n tmp_5pts = tmp_5pts * scale_factor\n tmp_crop_size = size_bf_outer_pad\n\n reference_5point = tmp_5pts + np.array(outer_padding)\n tmp_crop_size = output_size\n return reference_5point\n\n\ndef get_affine_transform_matrix(src_pts, dst_pts):\n \"\"\"\n Function:\n ----------\n get affine transform matrix 'tfm' from src_pts to dst_pts\n Parameters:\n ----------\n @src_pts: Kx2 np.array\n source points matrix, each row is a pair of coordinates (x, y)\n @dst_pts: Kx2 np.array\n destination points matrix, each row is a pair of coordinates (x, y)\n Returns:\n ----------\n @tfm: 2x3 np.array\n transform matrix from src_pts to dst_pts\n \"\"\"\n\n tfm = np.float32([[1, 0, 0], [0, 1, 0]])\n n_pts = src_pts.shape[0]\n ones = np.ones((n_pts, 1), src_pts.dtype)\n src_pts_ = np.hstack([src_pts, ones])\n dst_pts_ = np.hstack([dst_pts, ones])\n\n A, res, rank, s = np.linalg.lstsq(src_pts_, dst_pts_)\n\n if rank == 3:\n tfm = np.float32([\n [A[0, 0], A[1, 0], A[2, 0]],\n [A[0, 1], A[1, 1], A[2, 1]]\n ])\n elif rank == 2:\n tfm = np.float32([\n [A[0, 0], A[1, 0], 0],\n [A[0, 1], A[1, 1], 0]\n ])\n\n return tfm\n\n\ndef warp_and_crop_face(src_img,\n facial_pts,\n reference_pts=None,\n crop_size=(96, 112),\n align_type='similarity'):\n \"\"\"\n Function:\n ----------\n apply affine transform 'trans' to uv\n Parameters:\n ----------\n @src_img: 3x3 np.array\n input image\n @facial_pts: could be\n 1)a list of K coordinates (x,y)\n or\n 2) Kx2 or 2xK np.array\n each row or col is a pair of coordinates (x, y)\n @reference_pts: could be\n 1) a list of K coordinates (x,y)\n or\n 2) Kx2 or 2xK np.array\n each row or col is a pair of coordinates (x, y)\n or\n 3) None\n if None, use default reference facial points\n @crop_size: (w, h)\n output face image size\n @align_type: transform type, could be one of\n 1) 'similarity': use similarity transform\n 2) 'cv2_affine': use the first 3 points to do affine transform,\n by calling cv2.getAffineTransform()\n 3) 'affine': use all points to do affine transform\n Returns:\n ----------\n @face_img: output face image with size (w, h) = @crop_size\n \"\"\"\n\n if reference_pts is None:\n if crop_size[0] == 96 and crop_size[1] == 112:\n reference_pts = REFERENCE_FACIAL_POINTS\n else:\n default_square = False\n inner_padding_factor = 0\n outer_padding = (0, 0)\n output_size = crop_size\n\n reference_pts = get_reference_facial_points(output_size,\n inner_padding_factor,\n outer_padding,\n default_square)\n\n ref_pts = np.float32(reference_pts)\n ref_pts_shp = ref_pts.shape\n if max(ref_pts_shp) < 3 or min(ref_pts_shp) != 2:\n raise FaceWarpException(\n 'reference_pts.shape must be (K,2) or (2,K) and K>2')\n\n if ref_pts_shp[0] == 2:\n ref_pts = ref_pts.T\n\n src_pts = np.float32(facial_pts)\n src_pts_shp = src_pts.shape\n if max(src_pts_shp) < 3 or min(src_pts_shp) != 2:\n raise FaceWarpException(\n 'facial_pts.shape must be (K,2) or (2,K) and K>2')\n\n if src_pts_shp[0] == 2:\n src_pts = src_pts.T\n\n if src_pts.shape != ref_pts.shape:\n raise FaceWarpException(\n 'facial_pts and reference_pts must have the same shape')\n\n if align_type == 'cv2_affine':\n tfm = cv2.getAffineTransform(src_pts[0:3], ref_pts[0:3])\n elif align_type == 'affine':\n tfm = get_affine_transform_matrix(src_pts, ref_pts)\n else:\n tfm = get_similarity_transform_for_cv2(src_pts, ref_pts)\n face_img = cv2.warpAffine(src_img, tfm, (crop_size[0], crop_size[1]))\n\n return face_img\n",
"\n# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\n\n# Modifications Copyright 2021 - present, OpenDR European Project\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nDETR model and criterion classes.\n\"\"\"\nimport torch\nimport torch.nn.functional as F\nfrom torch import nn\n\nfrom opendr.perception.object_detection_2d.detr.algorithm.util import box_ops\nfrom opendr.perception.object_detection_2d.detr.algorithm.util.misc import (NestedTensor, nested_tensor_from_tensor_list,\n accuracy, get_world_size, interpolate,\n is_dist_avail_and_initialized)\n\nfrom .backbone import build_backbone\nfrom .matcher import build_matcher\nfrom .segmentation import (DETRsegm, PostProcessPanoptic, PostProcessSegm,\n dice_loss, sigmoid_focal_loss)\nfrom .transformer import build_transformer\n\n\nclass DETR(nn.Module):\n \"\"\" This is the DETR module that performs object detection \"\"\"\n\n def __init__(self, backbone, transformer, num_classes, num_queries, aux_loss=False):\n \"\"\" Initializes the model.\n Parameters:\n backbone: torch module of the backbone to be used. See backbone.py\n transformer: torch module of the transformer architecture. See transformer.py\n num_classes: number of object classes\n num_queries: number of object queries, ie detection slot. This is the maximal number of objects\n DETR can detect in a single image. For COCO, we recommend 100 queries.\n aux_loss: True if auxiliary decoding losses (loss at each decoder layer) are to be used.\n \"\"\"\n super().__init__()\n self.num_queries = num_queries\n self.transformer = transformer\n hidden_dim = transformer.d_model\n self.class_embed = nn.Linear(hidden_dim, num_classes + 1)\n self.bbox_embed = MLP(hidden_dim, hidden_dim, 4, 3)\n self.query_embed = nn.Embedding(num_queries, hidden_dim)\n self.input_proj = nn.Conv2d(backbone.num_channels, hidden_dim, kernel_size=1)\n self.backbone = backbone\n self.aux_loss = aux_loss\n\n def forward(self, samples: NestedTensor):\n \"\"\" The forward expects a NestedTensor, which consists of:\n - samples.tensor: batched images, of shape [batch_size x 3 x H x W]\n - samples.mask: a binary mask of shape [batch_size x H x W], containing 1 on padded pixels\n\n It returns a dict with the following elements:\n - \"pred_logits\": the classification logits (including no-object) for all queries.\n Shape= [batch_size x num_queries x (num_classes + 1)]\n - \"pred_boxes\": The normalized boxes coordinates for all queries, represented as\n (center_x, center_y, height, width). These values are normalized in [0, 1],\n relative to the size of each individual image (disregarding possible padding).\n See PostProcess for information on how to retrieve the unnormalized bounding box.\n - \"aux_outputs\": Optional, only returned when auxilary losses are activated. It is a list of\n dictionnaries containing the two above keys for each decoder layer.\n \"\"\"\n if isinstance(samples, (list, torch.Tensor)):\n samples = nested_tensor_from_tensor_list(samples)\n features, pos = self.backbone(samples)\n\n src, mask = features[-1].decompose()\n assert mask is not None\n hs = self.transformer(self.input_proj(src), mask, self.query_embed.weight, pos[-1])[0]\n\n outputs_class = self.class_embed(hs)\n outputs_coord = self.bbox_embed(hs).sigmoid()\n out = {'pred_logits': outputs_class[-1], 'pred_boxes': outputs_coord[-1]}\n if self.aux_loss:\n out['aux_outputs'] = self._set_aux_loss(outputs_class, outputs_coord)\n return out\n\n @torch.jit.unused\n def _set_aux_loss(self, outputs_class, outputs_coord):\n # this is a workaround to make torchscript happy, as torchscript\n # doesn't support dictionary with non-homogeneous values, such\n # as a dict having both a Tensor and a list.\n return [{'pred_logits': a, 'pred_boxes': b}\n for a, b in zip(outputs_class[:-1], outputs_coord[:-1])]\n\n\nclass SetCriterion(nn.Module):\n \"\"\" This class computes the loss for DETR.\n The process happens in two steps:\n 1) we compute hungarian assignment between ground truth boxes and the outputs of the model\n 2) we supervise each pair of matched ground-truth / prediction (supervise class and box)\n \"\"\"\n\n def __init__(self, num_classes, matcher, weight_dict, eos_coef, losses):\n \"\"\" Create the criterion.\n Parameters:\n num_classes: number of object categories, omitting the special no-object category\n matcher: module able to compute a matching between targets and proposals\n weight_dict: dict containing as key the names of the losses and as values their relative weight.\n eos_coef: relative classification weight applied to the no-object category\n losses: list of all the losses to be applied. See get_loss for list of available losses.\n \"\"\"\n super().__init__()\n self.num_classes = num_classes\n self.matcher = matcher\n self.weight_dict = weight_dict\n self.eos_coef = eos_coef\n self.losses = losses\n empty_weight = torch.ones(self.num_classes + 1)\n empty_weight[-1] = self.eos_coef\n self.register_buffer('empty_weight', empty_weight)\n\n def loss_labels(self, outputs, targets, indices, num_boxes, log=True):\n \"\"\"Classification loss (NLL)\n targets dicts must contain the key \"labels\" containing a tensor of dim [nb_target_boxes]\n \"\"\"\n assert 'pred_logits' in outputs\n src_logits = outputs['pred_logits']\n\n idx = self._get_src_permutation_idx(indices)\n target_classes_o = torch.cat([t[\"labels\"][J] for t, (_, J) in zip(targets, indices)])\n target_classes = torch.full(src_logits.shape[:2], self.num_classes,\n dtype=torch.int64, device=src_logits.device)\n target_classes[idx] = target_classes_o\n\n loss_ce = F.cross_entropy(src_logits.transpose(1, 2), target_classes, self.empty_weight)\n losses = {'loss_ce': loss_ce}\n\n if log:\n # TODO this should probably be a separate loss, not hacked in this one here\n losses['class_error'] = 100 - accuracy(src_logits[idx], target_classes_o)[0]\n return losses\n\n @torch.no_grad()\n def loss_cardinality(self, outputs, targets, indices, num_boxes):\n \"\"\" Compute the cardinality error, ie the absolute error in the number of predicted non-empty boxes\n This is not really a loss, it is intended for logging purposes only. It doesn't propagate gradients\n \"\"\"\n pred_logits = outputs['pred_logits']\n device = pred_logits.device\n tgt_lengths = torch.as_tensor([len(v[\"labels\"]) for v in targets], device=device)\n # Count the number of predictions that are NOT \"no-object\" (which is the last class)\n card_pred = (pred_logits.argmax(-1) != pred_logits.shape[-1] - 1).sum(1)\n card_err = F.l1_loss(card_pred.float(), tgt_lengths.float())\n losses = {'cardinality_error': card_err}\n return losses\n\n def loss_boxes(self, outputs, targets, indices, num_boxes):\n \"\"\"Compute the losses related to the bounding boxes, the L1 regression loss and the GIoU loss\n targets dicts must contain the key \"boxes\" containing a tensor of dim [nb_target_boxes, 4]\n The target boxes are expected in format (center_x, center_y, w, h), normalized by the image size.\n \"\"\"\n assert 'pred_boxes' in outputs\n idx = self._get_src_permutation_idx(indices)\n src_boxes = outputs['pred_boxes'][idx]\n target_boxes = torch.cat([t['boxes'][i] for t, (_, i) in zip(targets, indices)], dim=0)\n\n loss_bbox = F.l1_loss(src_boxes, target_boxes, reduction='none')\n\n losses = {}\n losses['loss_bbox'] = loss_bbox.sum() / num_boxes\n\n loss_giou = 1 - torch.diag(box_ops.generalized_box_iou(\n box_ops.box_cxcywh_to_xyxy(src_boxes),\n box_ops.box_cxcywh_to_xyxy(target_boxes)))\n losses['loss_giou'] = loss_giou.sum() / num_boxes\n return losses\n\n def loss_masks(self, outputs, targets, indices, num_boxes):\n \"\"\"Compute the losses related to the masks: the focal loss and the dice loss.\n targets dicts must contain the key \"masks\" containing a tensor of dim [nb_target_boxes, h, w]\n \"\"\"\n assert \"pred_masks\" in outputs\n\n src_idx = self._get_src_permutation_idx(indices)\n tgt_idx = self._get_tgt_permutation_idx(indices)\n src_masks = outputs[\"pred_masks\"]\n src_masks = src_masks[src_idx]\n masks = [t[\"masks\"] for t in targets]\n # TODO use valid to mask invalid areas due to padding in loss\n target_masks, valid = nested_tensor_from_tensor_list(masks).decompose()\n target_masks = target_masks.to(src_masks)\n target_masks = target_masks[tgt_idx]\n\n # upsample predictions to the target size\n src_masks = interpolate(src_masks[:, None], size=target_masks.shape[-2:],\n mode=\"bilinear\", align_corners=False)\n src_masks = src_masks[:, 0].flatten(1)\n\n target_masks = target_masks.flatten(1)\n target_masks = target_masks.view(src_masks.shape)\n losses = {\n \"loss_mask\": sigmoid_focal_loss(src_masks, target_masks, num_boxes),\n \"loss_dice\": dice_loss(src_masks, target_masks, num_boxes),\n }\n return losses\n\n def _get_src_permutation_idx(self, indices):\n # permute predictions following indices\n batch_idx = torch.cat([torch.full_like(src, i) for i, (src, _) in enumerate(indices)])\n src_idx = torch.cat([src for (src, _) in indices])\n return batch_idx, src_idx\n\n def _get_tgt_permutation_idx(self, indices):\n # permute targets following indices\n batch_idx = torch.cat([torch.full_like(tgt, i) for i, (_, tgt) in enumerate(indices)])\n tgt_idx = torch.cat([tgt for (_, tgt) in indices])\n return batch_idx, tgt_idx\n\n def get_loss(self, loss, outputs, targets, indices, num_boxes, **kwargs):\n loss_map = {\n 'labels': self.loss_labels,\n 'cardinality': self.loss_cardinality,\n 'boxes': self.loss_boxes,\n 'masks': self.loss_masks\n }\n assert loss in loss_map, f'do you really want to compute {loss} loss?'\n return loss_map[loss](outputs, targets, indices, num_boxes, **kwargs)\n\n def forward(self, outputs, targets):\n \"\"\" This performs the loss computation.\n Parameters:\n outputs: dict of tensors, see the output specification of the model for the format\n targets: list of dicts, such that len(targets) == batch_size.\n The expected keys in each dict depends on the losses applied, see each loss' doc\n \"\"\"\n outputs_without_aux = {k: v for k, v in outputs.items() if k != 'aux_outputs'}\n\n # Retrieve the matching between the outputs of the last layer and the targets\n indices = self.matcher(outputs_without_aux, targets)\n\n # Compute the average number of target boxes accross all nodes, for normalization purposes\n num_boxes = sum(len(t[\"labels\"]) for t in targets)\n num_boxes = torch.as_tensor([num_boxes], dtype=torch.float, device=next(iter(outputs.values())).device)\n if is_dist_avail_and_initialized():\n torch.distributed.all_reduce(num_boxes)\n num_boxes = torch.clamp(num_boxes / get_world_size(), min=1).item()\n\n # Compute all the requested losses\n losses = {}\n for loss in self.losses:\n losses.update(self.get_loss(loss, outputs, targets, indices, num_boxes))\n\n # In case of auxiliary losses, we repeat this process with the output of each intermediate layer.\n if 'aux_outputs' in outputs:\n for i, aux_outputs in enumerate(outputs['aux_outputs']):\n indices = self.matcher(aux_outputs, targets)\n for loss in self.losses:\n if loss == 'masks':\n # Intermediate masks losses are too costly to compute, we ignore them.\n continue\n kwargs = {}\n if loss == 'labels':\n # Logging is enabled only for the last layer\n kwargs = {'log': False}\n l_dict = self.get_loss(loss, aux_outputs, targets, indices, num_boxes, **kwargs)\n l_dict = {k + f'_{i}': v for k, v in l_dict.items()}\n losses.update(l_dict)\n\n return losses\n\n\nclass PostProcess(nn.Module):\n \"\"\" This module converts the model's output into the format expected by the coco api\"\"\"\n @torch.no_grad()\n def forward(self, outputs, target_sizes):\n \"\"\" Perform the computation\n Parameters:\n outputs: raw outputs of the model\n target_sizes: tensor of dimension [batch_size x 2] containing the size of each images of the batch\n For evaluation, this must be the original image size (before any data augmentation)\n For visualization, this should be the image size after data augment, but before padding\n \"\"\"\n out_logits, out_bbox = outputs['pred_logits'], outputs['pred_boxes']\n\n assert len(out_logits) == len(target_sizes)\n assert target_sizes.shape[1] == 2\n\n prob = F.softmax(out_logits, -1)\n scores, labels = prob[..., :-1].max(-1)\n\n # convert to [x0, y0, x1, y1] format\n boxes = box_ops.box_cxcywh_to_xyxy(out_bbox)\n # and from relative [0, 1] to absolute [0, height] coordinates\n img_h, img_w = target_sizes.unbind(1)\n scale_fct = torch.stack([img_w, img_h, img_w, img_h], dim=1)\n boxes = boxes * scale_fct[:, None, :]\n\n results = [{'scores': s, 'labels': l, 'boxes': b} for s, l, b in zip(scores, labels, boxes)]\n\n return results\n\n\nclass MLP(nn.Module):\n \"\"\" Very simple multi-layer perceptron (also called FFN)\"\"\"\n\n def __init__(self, input_dim, hidden_dim, output_dim, num_layers):\n super().__init__()\n self.num_layers = num_layers\n h = [hidden_dim] * (num_layers - 1)\n self.layers = nn.ModuleList(nn.Linear(n, k) for n, k in zip([input_dim] + h, h + [output_dim]))\n\n def forward(self, x):\n for i, layer in enumerate(self.layers):\n x = F.relu(layer(x)) if i < self.num_layers - 1 else layer(x)\n return x\n\n\ndef build(args):\n # the `num_classes` naming here is somewhat misleading.\n # it indeed corresponds to `max_obj_id + 1`, where max_obj_id\n # is the maximum id for a class in your dataset. For example,\n # COCO has a max_obj_id of 90, so we pass `num_classes` to be 91.\n # As another example, for a dataset that has a single class with id 1,\n # you should pass `num_classes` to be 2 (max_obj_id + 1).\n # For more details on this, check the following discussion\n # https://github.com/facebookresearch/detr/issues/108#issuecomment-650269223\n device = torch.device(args.device)\n\n backbone = build_backbone(args)\n\n transformer = build_transformer(args)\n\n model = DETR(\n backbone,\n transformer,\n num_classes=args.num_classes,\n num_queries=args.num_queries,\n aux_loss=args.aux_loss,\n )\n if args.masks:\n model = DETRsegm(model, freeze_detr=(args.frozen_weights is not None))\n matcher = build_matcher(args)\n weight_dict = {'loss_ce': 1, 'loss_bbox': args.bbox_loss_coef}\n weight_dict['loss_giou'] = args.giou_loss_coef\n if args.masks:\n weight_dict[\"loss_mask\"] = args.mask_loss_coef\n weight_dict[\"loss_dice\"] = args.dice_loss_coef\n # TODO this is a hack\n if args.aux_loss:\n aux_weight_dict = {}\n for i in range(args.dec_layers - 1):\n aux_weight_dict.update({k + f'_{i}': v for k, v in weight_dict.items()})\n weight_dict.update(aux_weight_dict)\n\n losses = ['labels', 'boxes', 'cardinality']\n if args.masks:\n losses += [\"masks\"]\n criterion = SetCriterion(args.num_classes, matcher=matcher, weight_dict=weight_dict,\n eos_coef=args.eos_coef, losses=losses)\n criterion.to(device)\n postprocessors = {'bbox': PostProcess()}\n if args.masks:\n postprocessors['segm'] = PostProcessSegm()\n if args.dataset_file == \"coco_panoptic\":\n is_thing_map = {i: i <= 90 for i in range(201)}\n postprocessors[\"panoptic\"] = PostProcessPanoptic(is_thing_map, threshold=0.85)\n\n return model, criterion, postprocessors\n\n\ndef build_c(args):\n # the `num_classes` naming here is somewhat misleading.\n # it indeed corresponds to `max_obj_id + 1`, where max_obj_id\n # is the maximum id for a class in your dataset. For example,\n # COCO has a max_obj_id of 90, so we pass `num_classes` to be 91.\n # As another example, for a dataset that has a single class with id 1,\n # you should pass `num_classes` to be 2 (max_obj_id + 1).\n # For more details on this, check the following discussion\n # https://github.com/facebookresearch/detr/issues/108#issuecomment-650269223\n device = torch.device(args.device)\n\n matcher = build_matcher(args)\n weight_dict = {'loss_ce': 1, 'loss_bbox': args.bbox_loss_coef}\n weight_dict['loss_giou'] = args.giou_loss_coef\n if args.masks:\n weight_dict[\"loss_mask\"] = args.mask_loss_coef\n weight_dict[\"loss_dice\"] = args.dice_loss_coef\n # TODO this is a hack\n if args.aux_loss:\n aux_weight_dict = {}\n for i in range(args.dec_layers - 1):\n aux_weight_dict.update({k + f'_{i}': v for k, v in weight_dict.items()})\n weight_dict.update(aux_weight_dict)\n\n losses = ['labels', 'boxes', 'cardinality']\n if args.masks:\n losses += [\"masks\"]\n criterion = SetCriterion(args.num_classes, matcher=matcher, weight_dict=weight_dict,\n eos_coef=args.eos_coef, losses=losses)\n criterion.to(device)\n\n return criterion\n\n\ndef build_pp(args):\n # the `num_classes` naming here is somewhat misleading.\n # it indeed corresponds to `max_obj_id + 1`, where max_obj_id\n # is the maximum id for a class in your dataset. For example,\n # COCO has a max_obj_id of 90, so we pass `num_classes` to be 91.\n # As another example, for a dataset that has a single class with id 1,\n # you should pass `num_classes` to be 2 (max_obj_id + 1).\n # For more details on this, check the following discussion\n # https://github.com/facebookresearch/detr/issues/108#issuecomment-650269223\n postprocessors = {'bbox': PostProcess()}\n if args.masks:\n postprocessors['segm'] = PostProcessSegm()\n if args.dataset_file == \"coco_panoptic\":\n is_thing_map = {i: i <= 90 for i in range(201)}\n postprocessors[\"panoptic\"] = PostProcessPanoptic(is_thing_map, threshold=0.85)\n\n return postprocessors\n"
] | [
[
"torch.onnx.export",
"torch.load",
"numpy.ascontiguousarray",
"torch.randn",
"torch.from_numpy"
],
[
"numpy.load",
"numpy.lib.format.open_memmap",
"scipy.spatial.Delaunay",
"numpy.transpose"
],
[
"numpy.asarray"
],
[
"torch.nn.BatchNorm1d",
"torch.nn.init.constant_",
"torch.nn.ModuleList",
"torch.nn.Conv2d",
"torch.nn.Linear",
"torch.matmul",
"torch.nn.BatchNorm2d",
"torch.nn.ReLU",
"torch.nn.init.kaiming_normal_"
],
[
"numpy.hstack",
"numpy.ones",
"numpy.round",
"numpy.linalg.lstsq",
"numpy.float32",
"numpy.array"
],
[
"torch.nn.functional.softmax",
"torch.ones",
"torch.full",
"torch.nn.functional.l1_loss",
"torch.cat",
"torch.nn.Conv2d",
"torch.nn.Embedding",
"torch.nn.Linear",
"torch.full_like",
"torch.no_grad",
"torch.device",
"torch.distributed.all_reduce",
"torch.stack"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
pedrob37/Phys_Seg | [
"7adc65d7b228b3a5702acfa9e6d0494d6b4c2dee"
] | [
"Phys_Seg/run.py"
] | [
"import torch\nimport numpy as np\nimport SimpleITK as sitk\nfrom Phys_Seg.data_loading import load_and_preprocess, save_segmentation_nifti, read_file, save_img\nfrom Phys_Seg.predict_case import predict_phys_seg, physics_preprocessing, image_preprocessing\nimport importlib\nfrom Phys_Seg.utils import postprocess_prediction, get_params_fname, maybe_download_parameters\nfrom network_architecture import nnUNet\nimport os\nimport Phys_Seg\n\n\ndef apply_phys_seg(img, out_fname):\n img_itk = sitk.ReadImage(img)\n img_npy = sitk.GetArrayFromImage(img_itk)\n out = sitk.GetImageFromArray(img_npy)\n out.CopyInformation(img_itk)\n sitk.WriteImage(out, out_fname)\n\n\ndef run_phys_seg(mri_fnames, output_fnames, sequence='MPRAGE', physics_params=None,\n # config_file=os.path.join(Phys_Seg.__path__[0], \"config.py\"),\n device=None, overwrite=True):\n \"\"\"\n\n :param mri_fnames: str or list/tuple of str\n :param output_fnames: str or list/tuple of str. If list: must have the same length as output_fnames\n :param sequence: MPRAGE or SPGR (for now)\n :param config_file: config.py\n :param device: either int (for device id) or 'cpu'\n :param overwrite: True or False\n :param postprocess: whether to do postprocessing or not. Postprocessing here consists of simply discarding all\n but the largest predicted connected component. Default False\n :return:\n \"\"\"\n\n physics_input_size = {'MPRAGE': 4,\n 'SPGR': 6}\n\n # Load in model weights\n maybe_download_parameters(sequence=sequence, physics_flag=True if physics_params else False)\n params_file = get_params_fname(sequence=sequence, physics_flag=True if physics_params else False)\n\n net = nnUNet(1, 4, physics_flag=True if physics_params else False,\n physics_input=physics_input_size[sequence],\n physics_output=40)\n\n if device == \"cpu\":\n net = net.cpu()\n else:\n net.cuda(device)\n\n net = torch.nn.DataParallel(net, device_ids=[device, int(1-device)])\n net.to(f'cuda:{net.device_ids[0]}')\n # net = torch.nn.DataParallel(net)\n\n if not isinstance(mri_fnames, (list, tuple)):\n mri_fnames = [mri_fnames]\n\n if not isinstance(output_fnames, (list, tuple)):\n output_fnames = [output_fnames]\n\n params = torch.load(params_file, map_location=lambda storage, loc: storage)\n\n for in_fname, out_fname in zip(mri_fnames, output_fnames):\n if overwrite or not (os.path.isfile(out_fname)):\n print(\"File:\", in_fname)\n print(\"preprocessing...\")\n try:\n data, aff = read_file(in_fname)\n except RuntimeError:\n print(\"\\nERROR\\nCould not read file\", in_fname, \"\\n\")\n continue\n except AssertionError as e:\n print(e)\n continue\n\n # Process data\n if physics_params is not None:\n physics_params = eval(physics_params)\n # Convert TR to pTD\n physics_params[1] = physics_params[1] - physics_params[0]\n print(physics_params)\n processed_physics = physics_preprocessing(np.array(physics_params), sequence)\n else:\n processed_physics = None\n data = image_preprocessing(patient_data=data)\n\n print(\"prediction (CNN id)...\")\n net.load_state_dict(params['model_state_dict'])\n net.eval()\n seg = predict_phys_seg(net=net,\n patient_data=data,\n processed_physics=processed_physics,\n main_device=device)\n\n print(\"exporting segmentation...\")\n save_segmentation_nifti(seg, aff, out_fname)\n\n # apply_phys_seg(in_fname, out_fname)\n"
] | [
[
"numpy.array",
"torch.load"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
teristam/openephys-fileIO | [
"8089e7c4aff829c13a79656b8812a3d3e68eb1eb"
] | [
"test/test_binary.py"
] | [
"import numpy as np \nfrom openephys_fileIO.fileIO import *\nfrom openephys_fileIO.Binary import *\n\ndef test_write_binary_data():\n # Test writing of binary data\n \n dataFolder = 'test/data'\n\n # Read the data in original int16 format\n data,headers = load_OpenEphysRecording4BinaryFile(dataFolder,\n num_data_channel=1,num_aux_channel=1, num_adc_channel=1)\n print(headers)\n\n # Write to binary file\n writeBinaryData(dataFolder+'/experiment1/recording1/',data)\n writeStructFile(dataFolder+'/experiment1/recording1/structure.oebin',headers)\n\n #load the data in float format (take care of the bit per volt)\n data,headers = load_OpenEphysRecording4BinaryFile(dataFolder,\n num_data_channel=1,num_aux_channel=1, num_adc_channel=1,dtype=float)\n\n # Load binary file using the offical function\n data2, rate2 = Load('test/data')\n\n np.allclose(data.T,data2['100']['0']['0'])\n\ndef test_numpy2binary():\n # test write of numpy data\n Fs = 30000\n x = np.random.randn(3*Fs,4)\n bitVolts = 0.195\n dataFolder = 'test/data2'\n channel_names = [f'CH{i}' for i in range(x.shape[1])]\n writeBinaryData(dataFolder+'/experiment1/recording1/', x, bitVolts)\n writeStructFile(dataFolder+'/experiment1/recording1/structure.oebin',samplerate=30000,\n num_channels= x.shape[1], bit_volts=bitVolts,channel_names=channel_names)\n\n # load the binary file\n data, rate = Load(dataFolder)\n\n np.allclose(x, data['100']['0']['0'])\n\n\n\n\n\n\n \n"
] | [
[
"numpy.random.randn",
"numpy.allclose"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
FlowerForAlgernon/ai_tetris | [
"7ac0d3875ad9b31fb260f7567a218e0de340c4e4"
] | [
"QLearning.py"
] | [
"\"\"\"\n这份代码使用 Q learning 算法训练并运行俄罗斯方块游戏 ai。其中简化状态空间的方法可参考论文 Adapting Reinforcement Learning to Tetris\n\"\"\"\n\nimport numpy as np\nfrom game import *\n\n\n\nsub_well = 4\nbase = 7\n\n\ndef getStateIndex(field_width, field_height, field_map):\n \"\"\"\n 因为每一列有 7 种不同的情况,所以采用七进制数来作为状态索引\n \"\"\"\n temp = [0 for _ in range(field_width)]\n convert = {}\n for i in range(-(base - 1)//2, (base - 1)//2 + 1):\n convert[i] = i + (base - 1)//2\n for x in range(field_width):\n while temp[x] < field_height and field_map[temp[x]][x] == 0:\n temp[x] += 1\n index = 0\n for i in range(field_width-1):\n if temp[i+1] - temp[i] > (base - 1)//2:\n index += base**i * convert[(base - 1)//2]\n elif temp[i+1] - temp[i] < -(base - 1)//2:\n index += base**i * convert[-(base - 1)//2]\n else:\n index += base**i * convert[temp[i+1] - temp[i]]\n return index\n\n\ndef getAllPossibleLocation(field_width, field_map, block, layout):\n all_possible_position = []\n for x in range(field_width):\n if block.isLegal(layout, (x, -4), field_map) is not State.Middle:\n all_possible_position.append(x)\n return all_possible_position\n\n\ndef findBottomPosition(field_map, block, x, layout):\n y = -4\n while block.isLegal(layout, (x, y), field_map) is not State.Bottom:\n y += 1\n return y - 1\n\n\ndef dropBlock(field_height, field_map, x0, y0, layout):\n for (x, y) in layout:\n if 0 <= y0 + y < field_height:\n field_map[y0 + y][x0 + x] = 1\n if y0 + y < 0:\n return False\n return True\n\n\ndef resetMap(field_width, field_height, field_map):\n count = 0\n for y in range(field_height):\n for x in range(field_width):\n if field_map[y][x] == 1:\n field_map[y][x] = 0\n count += 1\n if count == 4:\n return\n\n\ndef getNewMap(block, position, direction, field_map):\n while block.direction is not direction:\n block.rotate(field_map)\n while block.position[0] > position[0]:\n block.left(field_map)\n while block.position[0] < position[0]:\n block.right(field_map)\n while not block.is_stop:\n block.down(field_map)\n\n\nclass QLearning(Game):\n def __init__(self):\n super(QLearning, self).__init__(sub_well, 1000)\n self.repeat_num = 200\n self.alpha = 0.2\n self.gamma = 0.8\n self.lambda_ = 0.3\n self.epsilon = 0.01\n self.key = [((s, b), (p, d)) for s in range(base**(self.field_width-1)) for b in range(7) for p in range(self.field_width) for d in range(4)]\n self.V = [0 for _ in range(len(self.key))]\n self.Q = dict(zip(self.key, self.V))\n #self.Q = np.load('QL.npy').item()\n\n def checkEvents(self):\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.quit()\n exit(0)\n\n def getBlock(self, block):\n for x in range(len(Blocks_color)):\n if block.color == Blocks_color[x]:\n return x\n\n def getReward(self):\n temp = [0 for _ in range(self.field_width)]\n for x in range(self.field_width):\n while temp[x] < self.field_height and self.field_map[temp[x]][x] == 0:\n temp[x] += 1\n buried_holes = 0\n block = self.block_factory.cur_block\n for (x, y) in block.layout:\n i = 1\n while block.position[1]+y+i < self.field_height and self.field_map[block.position[1]+y+i][x] == 0:\n buried_holes += 1\n i += 1\n return np.var(temp)*(-2) + buried_holes*(-1)\n\n def getAllActions(self, block):\n actions = []\n for direction in range(len(block.layouts)):\n for x in getAllPossibleLocation(self.field_width, self.field_map, block, block.layouts[direction]):\n y = findBottomPosition(self.field_map, block, x, block.layouts[direction])\n if dropBlock(self.field_height, self.field_map, x, y, block.layouts[direction]):\n actions.append((x, direction))\n resetMap(self.field_width, self.field_height, self.field_map)\n return actions\n\n def getBestActionWithGreedy(self, block):\n block_type = self.getBlock(block)\n state = getStateIndex(self.field_width, self.field_height, self.field_map)\n actions = self.getAllActions(block)\n actions_value = {}\n for action in actions:\n actions_value[action] = self.Q[((state, block_type), action)]\n if actions_value == {}:\n return None\n elif random.random() > self.epsilon:\n return max(actions_value, key=actions_value.get)\n else:\n return list(actions_value.keys())[random.randint(0, len(actions_value)-1)]\n\n def getBestAction(self, block):\n block_type = self.getBlock(block)\n state = getStateIndex(self.field_width, self.field_height, self.field_map)\n actions = self.getAllActions(block)\n actions_value = {}\n for action in actions:\n actions_value[action] = self.Q[((state, block_type), action)]\n if actions_value == {}:\n return None\n return max(actions_value, key=actions_value.get)\n\n def train(self):\n record = []\n for i in range(1, self.repeat_num+1):\n self.initialize()\n while not self.block_factory.is_failed:\n cur_state = getStateIndex(self.field_width, self.field_height, self.field_map)\n cur_block = self.getBlock(self.block_factory.cur_block)\n cur_action = self.getBestActionWithGreedy(self.block_factory.cur_block)\n cur_index = ((cur_state, cur_block), cur_action)\n if cur_action == None: break\n getNewMap(self.block_factory.cur_block, cur_action, cur_action[1], self.field_map)\n next_state = getStateIndex(self.field_width, self.field_height, self.field_map)\n next_block = self.getBlock(self.block_factory.next_block)\n next_action = self.getBestAction(self.block_factory.next_block)\n next_index = ((next_state, next_block), next_action)\n if next_action == None: break\n self.Q[cur_index] += self.alpha*(self.getReward()+self.gamma*self.Q[next_index] - self.Q[cur_index])\n self.update()\n print(\"Epoch:\"+str(i)+\"/\"+str(self.repeat_num)+\" Lines:\"+ str(self.lines_num)+\" Alpha:\"+str(self.alpha))\n record.append(self.lines_num)\n if i % 100 == 0:\n self.alpha *= 0.5\n np.save('QL.npy', {\"V\": self.V})\n np.save('record_QL.npy', {\"record\": record})\n np.save('QL.npy', self.Q)\n np.save('record_QL.npy', {\"record\": record})\n\n\nclass QLGame(Game):\n def __init__(self):\n super(QLGame, self).__init__(10, 20)\n self.Q = np.load('QL.npy', allow_pickle=True).item()\n self.col = 0\n\n def checkEvents(self):\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.quit()\n exit(0)\n\n def getBlock(self, block):\n for x in range(len(Blocks_color)):\n if block.color == Blocks_color[x]:\n return x\n\n def cutFieldMap(self, position):\n new_field_map = [[0]*sub_well for _ in range(self.field_height)]\n for y in range(self.field_height):\n for x in range(sub_well):\n new_field_map[y][x] = self.field_map[y][position+x]\n return new_field_map\n\n def getAllActions(self, field_width, field_height, block, field_map, init_pos):\n actions = {}\n for direction in range(len(block.layouts)):\n for x in getAllPossibleLocation(field_width, field_map, block, block.layouts[direction]):\n y = findBottomPosition(field_map, block, x, block.layouts[direction])\n if dropBlock(field_height, field_map, x, y, block.layouts[direction]):\n block_type = self.getBlock(block)\n state = getStateIndex(field_width, field_height, field_map)\n actions[(x + init_pos, direction)] = self.Q[((state, block_type), (x, direction))]\n resetMap(field_width, field_height, field_map)\n return actions\n\n def getBestAction(self):\n actions = {}\n cur_block = Block(self.block_factory.cur_block.screen, sub_well, self.field_height, self.block_factory.cur_block.layouts, self.block_factory.cur_block.direction, self.block_factory.cur_block.color, (0, -4))\n for x in range(self.field_width - sub_well + 1):\n loc_actions = self.getAllActions(sub_well, self.field_height, cur_block, self.cutFieldMap(x), x)\n for k, v in loc_actions.items():\n if k in actions:\n actions[k].append(v)\n else:\n actions[k] = [v]\n for k, v in actions.items():\n actions[k] = max(v)\n return max(actions, key=actions.get) if actions != {} else None\n\n def start(self):\n self.initialize()\n self.initializePygame()\n while not self.block_factory.is_failed:\n self.checkEvents()\n action = self.getBestAction()\n if action == None:\n break\n getNewMap(self.block_factory.cur_block, action, action[1], self.field_map)\n self.update()\n self.draw()\n return self.lines_num\n\n\n\nif __name__ == '__main__':\n train = QLearning()\n train.train()\n \n game = QLGame()\n game.start()\n"
] | [
[
"numpy.var",
"numpy.load",
"numpy.save"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
starkworld/Python-Course-work | [
"28715f079939129b442aedcd7edb2e0838886ba0"
] | [
"source code/Data Visualization.py"
] | [
"\"\"\"\nAuthor : nkalyan🤠\nimplementing Python Scripts on reading and returning the name no of mails that sent each day in week\n and plot/display them in bar graph\n\n I wrote code In counting to count the number of emails sent by each distinct user. That code may be helpful for this assignment.\n\"\"\"\n\n\nimport matplotlib.pyplot as plt\nfrom os import getcwd\n\n\ndef file_path():\n \"\"\"Method that ask the users file name and returns it\"\"\"\n file_name = input(\"Enter the file name:\")\n return file_name\n\n\ndef pop_values(filename):\n \"\"\"Method the reads file and returning value\"\"\"\n file_name = filename\n try: # look for exception\n fp = open(file_name, \"r\")\n except FileNotFoundError: # if found exception display error\n print(\"File Does not exist, please check your file name\")\n exit()\n else: # if no exceptions thrown then performs this block\n with fp:\n for line in fp:\n line = line.strip(\"\\n\")\n offset = line.find(\"From\")\n offset1 = line.find(\"@\")\n line = line[-24:]\n offset3 = line.find(\"@\")\n if offset == 0 and offset1 > 0 and offset3 == -1:\n line = line[:-21]\n yield line\n\n\ndef main():\n \"\"\"Calls the all functions that necessary to get the output\"\"\"\n name = file_path() # calls the file path method\n dictionary = {'Sun': 0, 'Mon': 0, 'Tue': 0, 'Wed': 0, 'Thu': 0, 'Fri': 0, 'Sat': 0} # store the day val in dict\n value = pop_values(name)\n count = 0\n for i in value:\n if i in dictionary:\n dictionary[i] += 1\n count += len(i)\n val = dictionary.values()\n keys = dictionary.keys()\n zp = zip(dictionary.keys(), dictionary.values())\n for item in val:\n i = val\n j = keys\n plt.bar(j, i, align='center', alpha=0.5)\n\n plt.ylabel('Number of messages') \n plt.title('Emails per day')\n plt.show() # method that shows the bar graph of our code result\n\n\nif __name__ == '__main__':\n \"\"\"calls the main method\"\"\"\n main()\n"
] | [
[
"matplotlib.pyplot.show",
"matplotlib.pyplot.bar",
"matplotlib.pyplot.title",
"matplotlib.pyplot.ylabel"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
jasonrute/puzzle_cube | [
"7e05a21acd26cb30e729ba6a95e14e16c76c1780"
] | [
"analysis/stats.py"
] | [
"\"\"\"\nTraining Statics Tools\n\nA class for loading statistics related to a particular rutraiining session.\n\"\"\"\n\nimport numpy as np\n#from scipy import stats\nimport pandas as pd\nimport os\n\ndef str_between(s, start, end):\n return (s.split(start))[1].split(end)[0]\n\ndef is_stat_file_version(file_name, version):\n return file_name.startswith(\"stats_{}_gen\".format(version)) and file_name.endswith(\".h5\")\n\nclass TrainingStates:\n def __init__(self, versions, directory, verbose=True):\n self.stats_files = self.get_stat_files(versions, directory)\n \n if verbose:\n print(\"Loading files:\")\n for f in self.stats_files:\n print(directory + f)\n\n self.generation_stats = self.load_stats('generation_stats')\n self.game_stats = self.load_stats('game_stats')\n self.move_stats = self.load_stats('self_play_stats')\n\n def get_stat_files(self, versions, directory):\n stat_files = []\n for version in reversed(versions):\n files = [directory + f for f in os.listdir(directory) if is_stat_file_version(f, version)]\n stat_files += list(sorted(files))\n\n return stat_files\n\n def load_stats(self, key_name):\n df_list = []\n for f in self.stats_files:\n path = f\n generation = str_between(f, \"_gen\", \".h5\")\n df = pd.read_hdf(path, key=key_name)\n df['_generation'] = int(generation)\n df_list.append(df)\n\n if df_list:\n stats = pd.concat(df_list, ignore_index=True)\n else:\n return pd.DataFrame()\n \n return stats\n\n def first_move_stats(self):\n \"\"\"\n Note: There is an indexing issue (the index of first_play_stats is the orginal index\n while the index of game_stats is the game number). The easiest fix is to just use\n the values (an array) of the series and not the series itself.\n \"\"\"\n return self.move_stats[self.move_stats['_step_id'] == 0]\n\n def found_target_on_first_move(self):\n return (self.first_move_stats()['shortest_path'] >= 0).values\n\n def lost_but_found_target_on_first_move(self):\n return self.found_target_on_first_move() & ~self.game_stats['win']\n\n def win_but_did_not_find_target_on_first_move(self):\n return ~self.found_target_on_first_move() & self.game_stats['win']\n\nif __name__ == '__main__':\n from pprint import pprint\n versions = ['v0.9.3']\n save_dir = '../save/stats_v0.9.3/'\n #VERSIONS = ['v0.9.2.1', 'v0.9.2']\n #SAVE_DIR = '../save/stats_archive/'\n\n cube_stats = TrainingStates(versions, save_dir)\n\n pprint(cube_stats.generation_stats)\n\n pprint(np.mean(cube_stats.lost_but_found_target_on_first_move()))\n pprint(np.mean(cube_stats.win_but_did_not_find_target_on_first_move()))\n\n\n\n"
] | [
[
"pandas.read_hdf",
"pandas.concat",
"pandas.DataFrame"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
kufusha/cabot | [
"52a40a39a29f0bd79b6fdd8f961708e09fda9a51",
"52a40a39a29f0bd79b6fdd8f961708e09fda9a51"
] | [
"cabot_ui/src/cabot_ui/geojson.py",
"mf_localization/src/altitude_manager.py"
] | [
"# Copyright (c) 2020 Carnegie Mellon University\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\n\"\"\"\nMapService GeoJson mapper\n\nMapService: https://github.com/hulop/MapService\n\nAuthor: Daisuke Sato<[email protected]>\n\"\"\"\n# -*- coding: utf-8 -*-\nimport sys\nimport traceback\nimport copy\nimport math\nimport json\nimport scipy\nimport scipy.spatial\nimport numpy\nimport numpy.linalg\nimport rospy\nimport tf\nimport angles\nimport geometry_msgs.msg\nfrom cabot_ui import geoutil, i18n\n\nclass Geometry(object):\n \"\"\"Geometry class\"\"\"\n \n @classmethod\n def marshal(cls, dic):\n \"\"\"marshal Geometry subclasses object\"\"\"\n if 'type' in dic:\n if dic['type'] == \"Point\":\n cls = Point\n elif dic['type'] == \"LineString\":\n cls = LineString\n if cls == Geometry:\n return cls(**dic)\n return cls.marshal(dic)\n\n def __init__(self, **dic):\n s = super(Geometry, self)\n if self.__class__.mro()[-2] == s.__thisclass__:\n s.__init__()\n else:\n s.__init__(**dic)\n\n if 'coordinates' in dic:\n self.coordinates = dic['coordinates']\n if 'type' in dic:\n self.geometry_type = dic['type']\n\nclass Point(Geometry, geoutil.Latlng):\n \"\"\"Point class representing global point\"\"\"\n \n @classmethod\n def marshal(cls, dic):\n \"\"\"marshal Point object\"\"\"\n return cls(**dic)\n\n def __init__(self, **dic):\n c = dic['coordinates']\n super(Point, self).__init__(lat=c[1], lng=c[0], **dic)\n\nclass LineString(Geometry):\n \"\"\"Point class representing global line (start to end)\"\"\"\n @classmethod\n def marshal(cls, dic):\n \"\"\"marshal LineString object\"\"\"\n return cls(**dic)\n\n def __init__(self, **dic):\n super(LineString, self).__init__(**dic)\n self.start = geoutil.Latlng(lat=self.coordinates[0][1], lng=self.coordinates[0][0])\n self.end = geoutil.Latlng(lat=self.coordinates[1][1], lng=self.coordinates[1][0])\n\n def distance_to(self, point):\n if isinstance(point, Point):\n return self.nearest_point_on_line(point).distance_to(point)\n raise RuntimeError(\"Need to pass a Point object (%s)\"%(type(point))) \n\n def nearest_point_on_line(self, point):\n A = geoutil.latlng2mercator(self.start)\n B = geoutil.latlng2mercator(self.end)\n C = geoutil.latlng2mercator(point)\n \n # Distance between A and B\n distAB = math.sqrt(math.pow(A.x - B.x, 2) + math.pow(A.y - B.y, 2));\n \n # Direction vector from A to B\n vecABx = (B.x - A.x) / distAB;\n vecABy = (B.y - A.y) / distAB;\n \n # Time from A to C\n timeAC = max(0, min(distAB, vecABx * (C.x - A.x) + vecABy * (C.y - A.y)));\n \n # LatLng of the point\n x = timeAC * vecABx + A.x;\n y = timeAC * vecABy + A.y;\n \n return geoutil.mercator2latlng(geoutil.Point(x=x, y=y))\n\n\nclass Properties(object):\n @classmethod\n def marshal(cls, dic):\n \"\"\"marshal Properties object\"\"\"\n return cls(**dic)\n\n DEFAULT_VALUES = {\n \"hulop_building\": None,\n \"hulop_major_category\": None,\n \"hulop_sub_category\": None,\n \"hulop_minor_category\": None,\n \"hulop_heading\": 0,\n \"hulop_angle\": 180,\n \"hulop_height\": 0,\n \"hulop_long_description\": None,\n \"hulop_short_description\": None,\n \"hulop_description\": None,\n \"hulop_location_description\": None,\n \"hulop_content\": None,\n \"hulop_tags\": None,\n \"hulop_poi_external_category\": None,\n \"hulop_show_labels_zoomlevel\": None\n }\n \n def __getattr__(self, name):\n value = self.__dict__.get(name)\n if not value:\n if name in Properties.DEFAULT_VALUES:\n return Properties.DEFAULT_VALUES[name]\n\n raise AttributeError(\"%s.%s is invalid\"%(self.__class__.__name__, name))\n return value\n\n def __init__(self, **dic):\n for key in dic:\n try:\n setattr(self, key, dic[key])\n except:\n print(\"Cannot use unicode string for a property name: \\\"{}\\\"\".format(key.encode('utf8')))\n\n def __str__(self):\n return json.dumps(self.__dict__, sort_keys=True, indent=2)\n\n\nclass Object(object):\n \"\"\"Object class\"\"\"\n\n @classmethod\n def marshal_list(cls, objects):\n \"\"\"marshal list of Object subclasses objects\"\"\"\n temp = []\n for obj in objects:\n temp.append(cls.marshal(obj))\n return temp\n\n @classmethod\n def marshal_dict(cls, objects):\n \"\"\"marshal dict of Object subclasses objects\"\"\"\n temp = {}\n for key in objects.keys():\n temp[key] = cls.marshal(objects[key])\n return temp\n\n @classmethod\n def marshal(cls, dic):\n \"\"\"marshal Object subclasses object\"\"\"\n if 'node' in dic:\n cls = Landmark\n else:\n prop = dic['properties'] if 'properties' in dic else None\n if prop is not None:\n if 'node_id' in prop:\n cls = Node\n if 'link_id' in prop:\n cls = Link\n if 'facil_id' in prop:\n cls = Facility\n\n if cls == Object:\n return cls(**dic)\n return cls.marshal(dic)\n\n _id_map = {}\n _all_objects = []\n @staticmethod\n def get_object_by_id(_id, func=None):\n \"\"\"get object having id by callback function, it can be defered\"\"\"\n if _id in Object._id_map:\n if isinstance(Object._id_map[_id], list):\n Object._id_map[_id].append(func)\n else:\n if func is not None and callable(func):\n func(Object._id_map[_id])\n return None\n return Object._id_map[_id]\n else:\n Object._id_map[_id] = [func]\n return None\n\n @staticmethod\n def get_objects_by_type(_type):\n \"\"\"get objects of specified type\"\"\"\n temp = []\n for obj in Object._all_objects:\n if isinstance(obj, _type):\n temp.append(obj)\n return temp\n\n @staticmethod\n def get_all_objects():\n return Object._all_objects\n\n @staticmethod\n def _register(obj):\n \"\"\"store object with id and type\"\"\"\n # register with id\n _id = obj._id\n if _id in Object._id_map:\n if isinstance(Object._id_map[_id], list):\n for func in Object._id_map[_id]:\n if callable(func):\n func(obj)\n Object._id_map[_id] = obj\n Object._all_objects.append(obj)\n else:\n #raise RuntimeError(\"duplicate id\")\n pass\n else:\n Object._id_map[_id] = obj\n Object._all_objects.append(obj)\n\n @staticmethod\n def reset_all_objects():\n \"\"\"reset all state in the objects\"\"\"\n for obj in Object._all_objects:\n obj.reset()\n\n @staticmethod\n def _reset_link_index():\n Object._link_index = []\n Object._link_points = []\n Object._link_kdtree = None\n \n _link_index = []\n _link_points = []\n _link_kdtree = None\n @staticmethod\n def _build_link_index():\n for obj in Object.get_objects_by_type(Link):\n if obj.start_node and obj.end_node:\n sp = numpy.array([obj.start_node.local_geometry.x, obj.start_node.local_geometry.y])\n ep = numpy.array([obj.end_node.local_geometry.x, obj.end_node.local_geometry.y])\n Object._add_link_index(sp, ep, obj)\n if Object._link_points:\n Object._link_kdtree = scipy.spatial.KDTree(Object._link_points)\n\n @staticmethod\n def _add_link_index(sp, ep, obj):\n mp = (sp+ep)/2.0\n Object._link_points.append(mp)\n Object._link_index.append(obj)\n if numpy.linalg.norm(sp-ep) > 1:\n Object._add_link_index(sp, mp, obj)\n Object._add_link_index(mp, ep, obj)\n\n @staticmethod\n def get_nearest_link(node, exclude=None):\n point = node.local_geometry\n latlng = node.geometry\n _, index = Object._link_kdtree.query([point.x, point.y], 50)\n\n min_index = None\n min_dist = 1000\n for i in index:\n link = Object._link_index[i]\n if exclude is not None and exclude(link):\n continue\n \n dist = link.geometry.distance_to(latlng)\n if node.floor is not None:\n if link.start_node.floor != node.floor and \\\n link.end_node.floor != node.floor:\n dist += 1000\n if dist < min_dist:\n min_dist = dist\n min_index = i\n \n if min_index is None:\n return None\n return Object._link_index[min_index]\n\n @staticmethod\n def update_anchor_all(anchor):\n \"\"\"update anchor of all object\"\"\"\n Object._reset_link_index()\n for obj in Object._all_objects:\n obj.update_anchor(anchor)\n Object._build_link_index()\n\n\n def __init__(self, **dic):\n s = super(Object, self)\n if self.__class__.mro()[-2] == s.__thisclass__:\n s.__init__()\n else:\n s.__init__(**dic)\n \n if 'geometry' in dic:\n self.geometry = Geometry.marshal(dic['geometry'])\n if 'properties' in dic:\n self.properties = Properties.marshal(dic['properties'])\n if '_id' in dic:\n self._id = dic['_id']\n if 'no_registration' not in dic or not dic['no_registration']:\n Object._register(self)\n self.anchor = None\n self.local_geometry = None \n\n def __str__(self):\n ret = \"%s, (%s)\\n\" % (type(self), hex(id(self)))\n for key in self.__dict__:\n value = getattr(self, key)\n if isinstance(value, Object):\n ret += \"%s: %s<%s>\\n\"%(key, type(value), value._id)\n else:\n ret += \"%s: %s\\n\"%(key, str(value))\n \n import inspect\n for method in inspect.getmembers(type(self), predicate=lambda o: isinstance(o, property)):\n ret += \"%s: %s\\n\"%(method[0], method[1].__get__(self, type(self)))\n\n return ret\n\n def __repr__(self):\n return \"%s<%s>\"%(type(self), self._id)\n\n def update_anchor(self, anchor):\n self.anchor = anchor\n if anchor is not None:\n try:\n self.local_geometry = geoutil.global2local(self.geometry, anchor)\n except:\n print(\"Could not convert geometry: {}\".format(self.local_geometry))\n\n def distance_to(self, point):\n if isinstance(point, geoutil.Point):\n return self.local_geometry.distance_to(point)\n if isinstance(point, geoutil.Latlng):\n return self.geometry.distance_to(point)\n\n def reset(self):\n pass\n\nclass Link(Object):\n \"\"\"Link class\"\"\"\n ROUTE_TYPE_WALKWAY = 1\n ROUTE_TYPE_MOVING_WALKWAY = 2\n ROUTE_TYPE_RAILROAD_CROSSING = 3\n ROUTE_TYPE_ELEVATOR = 4\n ROUTE_TYPE_ESCALATOR = 5\n ROUTE_TYPE_STAIRS = 6\n ROUTE_TYPE_SLOPE = 7\n ROUTE_TYPE_UNKNOWN = 99\n\n @classmethod\n def marshal(cls, dic):\n \"\"\"marshal Link subclasses object\"\"\"\n if 'properties' in dic:\n prop = dic['properties']\n if 'sourceNode' in prop:\n cls = RouteLink\n if cls == Link:\n return cls(**dic)\n return cls.marshal(dic)\n\n def __init__(self, **dic):\n super(Link, self).__init__(**dic)\n self.start_node = None\n self.end_node = None\n self.pois = []\n self.floor = 0\n Object.get_object_by_id(self.properties.start_id, self._set_start_node)\n Object.get_object_by_id(self.properties.end_id, self._set_end_node)\n\n def _set_start_node(self, node):\n self.start_node = node\n self._update()\n\n def _set_end_node(self, node):\n self.end_node = node\n self._update()\n\n def _update(self):\n if self.start_node is not None and \\\n self.end_node is not None:\n self.floor = (self.start_node.floor + self.end_node.floor)/2.0\n\n @property\n def is_elevator(self):\n \"\"\"wheather this links is an elevator or not\"\"\"\n return self.properties.route_type == Link.ROUTE_TYPE_ELEVATOR\n\n @property\n def is_escalator(self):\n \"\"\"wheather this links is an escalator or not\"\"\"\n return self.properties.route_type == Link.ROUTE_TYPE_ESCALATOR\n\n @property\n def is_leaf(self):\n \"\"\"wheather this links is a leaf or not\"\"\"\n if self.start_node is None or self.end_node is None:\n return False\n return self.start_node.is_leaf or self.end_node.is_leaf\n\n @property\n def length(self):\n \"\"\"distance from start to end\"\"\"\n if self.start_node is None or self.end_node is None:\n return float('nan')\n return self.start_node.geometry.distance_to(self.end_node.geometry)\n\n def register_poi(self, poi):\n self.pois.append(poi)\n\n def update_anchor(self, anchor):\n self.anchor = anchor\n #TODO\n\nclass RouteLink(Link):\n \"\"\"Route Link class\"\"\"\n\n @classmethod\n def marshal(cls, dic):\n \"\"\"marshal Directed Link object\"\"\"\n return cls(**dic)\n\n def __init__(self, **dic):\n super(RouteLink, self).__init__(no_registration=True, **dic)\n self.source_node = None\n self.target_node = None\n Object.get_object_by_id(self.properties.sourceNode, self._set_source_node)\n Object.get_object_by_id(self.properties.targetNode, self._set_target_node)\n Object.get_object_by_id(self._id, self._found_link)\n\n def _set_source_node(self, node):\n self.source_node = node\n\n def _set_target_node(self, node):\n self.target_node = node\n\n def _found_link(self, link):\n self.pois = link.pois\n\n @property\n def is_temp(self):\n return self._id.startswith(\"_TEMP_LINK\")\n\n\nclass Node(Object):\n \"\"\"Node class\"\"\"\n\n @classmethod\n def marshal(cls, dic):\n \"\"\"marshal Node object\"\"\"\n return cls(**dic)\n\n def __init__(self, **dic):\n super(Node, self).__init__(**dic)\n self.links = []\n for i in range(1, 100):\n attr = \"link%d_id\"%(i)\n if hasattr(self.properties, attr):\n Object.get_object_by_id(getattr(self.properties, attr), self._add_link)\n\n if hasattr(self.properties, 'floor'):\n self.floor = self.properties.floor\n else:\n self.floor = 0\n\n self.facility = None\n Facility.get_facility_by_id(self._id, self._set_facility)\n\n def _add_link(self, link):\n self.links.append(link)\n\n def _set_facility(self, facility):\n self.facility = facility\n\n @property\n def is_leaf(self):\n \"\"\"wheather this node is the end of leaf link\"\"\"\n return len(self.links) == 1\n\n @property\n def is_elevator(self):\n \"\"\"wheather this node is connected to elevator link\"\"\"\n res = False\n for link in self.links:\n res = res or link.is_elevator\n return res\n\n\nclass Facility(Object):\n \"\"\"Facility class\"\"\"\n\n @classmethod\n def marshal(cls, dic):\n \"\"\"marshal Facility subclasses object\"\"\"\n if 'properties' in dic:\n prop = dic['properties']\n if 'hulop_major_category' in prop:\n category = prop['hulop_major_category']\n if category == '_nav_poi_':\n cls = POI\n if cls == Facility:\n return cls(**dic)\n return cls.marshal(dic)\n\n def __init__(self, **dic):\n super(Facility, self).__init__(**dic)\n self.entrances = []\n for i in range(1, 100):\n attr = \"ent%d_node\"%(i)\n if hasattr(self.properties, attr):\n Facility._id_map[getattr(self.properties, attr)] = self\n Object.get_object_by_id(getattr(self.properties, attr), self._add_facility)\n\n self.name = i18n.localized_attr(self.properties, \"name\")\n self.name_pron = i18n.localized_attr(self.properties, \"name_hira\", only_if=\"ja\") ## special case\n self.long_description = i18n.localized_attr(self.properties, \"hulop_long_description\")\n\n def _add_facility(self, node):\n self.entrances.append(node)\n\n _id_map = {}\n @staticmethod\n def get_facility_by_id(_id, func=None):\n \"\"\"get facility having id by callback function, it can be defered\"\"\"\n if _id in Facility._id_map:\n if isinstance(Facility._id_map[_id], list):\n Facility._id_map[_id].append(func)\n else:\n if func is not None and callable(func):\n func(Facility._id_map[_id])\n return None\n return Facility._id_map[_id]\n else:\n Facility._id_map[_id] = [func]\n return None\n\nclass POI(Facility, geoutil.TargetPlace):\n \"\"\"POI class\"\"\"\n\n @classmethod\n def marshal(cls, dic):\n \"\"\"marshal POI object\"\"\"\n if 'properties' in dic:\n prop = dic['properties']\n if 'hulop_sub_category' in prop:\n category = prop['hulop_sub_category']\n if category == '_nav_door_':\n cls = DoorPOI\n if category == '_nav_info_':\n cls = InfoPOI\n if category == '_cabot_speed_':\n cls = SpeedPOI\n if category == '_nav_elevator_cab_':\n cls = ElevatorCabPOI\n if category == '_nav_queue_wait_':\n cls = QueueWaitPOI\n if category == '_nav_queue_target_':\n cls = QueueTargetPOI\n\n if cls == POI:\n return cls(**dic)\n return cls.marshal(dic)\n\n def __init__(self, **dic):\n if 'properties' in dic:\n prop = dic['properties']\n get_prop = lambda prop, key: prop[key] if key in prop else Properties.DEFAULT_VALUES[key]\n r = (-get_prop(prop, 'hulop_heading') + 90) / 180.0 * math.pi\n angle = get_prop(prop, 'hulop_angle')\n self.floor = get_prop(prop, 'hulop_height')\n\n super(POI, self).__init__(r=r, x=0, y=0, angle=angle, floor=self.floor, **dic)\n\n self.sub_category = self.properties.hulop_sub_category \\\n if hasattr(self.properties, 'hulop_sub_category') else \"\"\n self.minor_category = self.properties.hulop_minor_category \\\n if hasattr(self.properties, 'hulop_minor_category') else \"\"\n\n #backward compatibility\n self.local_pose = self\n\n def approaching_statement(self):\n return None\n\n def approached_statement(self):\n return None\n\n def passed_statement(self):\n return None\n\n def update_anchor(self, anchor):\n super(POI, self).update_anchor(anchor) \n if anchor is not None:\n rad = (-self.properties.hulop_heading + 90 + anchor.rotate) / 180.0 * math.pi\n self.update_pose(self.local_geometry, rad)\n\n def reset(self):\n self.reset_target()\n\nclass DoorPOI(POI):\n \"\"\"POI class\"\"\"\n\n @classmethod\n def marshal(cls, dic):\n \"\"\"marshal Door POI object\"\"\"\n return cls(**dic)\n\n def __init__(self, **dic):\n super(DoorPOI, self).__init__(**dic)\n\n @property\n def title(self):\n if self.is_auto:\n return i18n.localized_string(\"AUTO_DOOR\")\n else:\n return i18n.localized_string(\"DOOR\")\n\n @property\n def is_auto(self):\n \"\"\"wheather this is auto door or not\"\"\"\n return self.minor_category is not None and \\\n '_flag_auto_' in self.minor_category\n\n def approaching_statement(self):\n return i18n.localized_string(\"DOOR_POI_APPROACHING\", self.title) \n\nclass InfoPOI(POI):\n \"\"\"Nav Info POI class\"\"\"\n\n @classmethod\n def marshal(cls, dic):\n \"\"\"marshal Info POI object\"\"\"\n return cls(**dic)\n\n def __init__(self, **dic):\n super(InfoPOI, self).__init__(**dic)\n\n def approached_statement(self):\n return self.name\n\nclass SpeedPOI(POI):\n \"\"\"Cabot Speed POI class\"\"\"\n\n @classmethod\n def marshal(cls, dic):\n \"\"\"marshal Speed POI object\"\"\"\n return cls(**dic)\n\n def __init__(self, **dic):\n super(SpeedPOI, self).__init__(**dic)\n self.limit = float(self.properties.hulop_content)\n\nclass ElevatorCabPOI(POI):\n \"\"\"Elevator Cab POI class\"\"\"\n\n @classmethod\n def marshal(cls, dic):\n \"\"\"marshal Elevator Cab POI object\"\"\"\n return cls(**dic)\n\n def __init__(self, **dic):\n super(ElevatorCabPOI, self).__init__(**dic)\n self.set_back = (3.0, 0.0)\n self.set_forward = (3.0, 0.0)\n self.door = (1.0, 0.0)\n if self.properties.hulop_content:\n try:\n hulop_content_json = json.loads(self.properties.hulop_content)\n if \"set_back\" in hulop_content_json:\n self.set_back = hulop_content_json[\"set_back\"]\n if \"set_forward\" in hulop_content_json:\n self.set_forward = hulop_content_json[\"set_forward\"]\n if \"door\" in hulop_content_json:\n self.door = hulop_content_json[\"door\"]\n if \"buttons\" in hulop_content_json:\n self.buttons = hulop_content_json[\"buttons\"]\n except:\n traceback.print_exc(file=sys.std_out)\n\n @property\n def door_geometry(self):\n x = self.x + math.cos(self.r) * self.door[0] - math.sin(self.r) * self.door[1]\n y = self.y + math.sin(self.r) * self.door[0] + math.cos(self.r) * self.door[1]\n return geoutil.Point(x=x, y=y)\n\n def where_is_buttons(self, pose):\n x = self.x + math.cos(self.r) * self.buttons[0] - math.sin(self.r) * self.buttons[1]\n y = self.y + math.sin(self.r) * self.buttons[0] + math.cos(self.r) * self.buttons[1]\n\n b_pos = geoutil.Point(x=x,y=y)\n b_pose = geoutil.Pose.pose_from_points(b_pos, pose)\n dir = angles.shortest_angular_distance(pose.r, b_pose.r)\n\n print(pose, b_pos, b_pose, dir)\n\n if abs(dir) > math.pi / 3 * 2:\n return \"BACK\"\n elif abs(dir) > math.pi / 3:\n if dir > 0:\n return \"LEFT\"\n elif dir < 0:\n return \"RIGHT\"\n elif abs(dir) < math.pi / 10:\n return \"FRONT\"\n elif dir > 0:\n return \"FRONT_LEFT\"\n elif dir < 0:\n return \"FRONT_RIGHT\"\n\n rospy.logerror(\"should not happen\")\n return None\n\nclass QueueWaitPOI(POI):\n \"\"\"Queue Wait POI class\"\"\"\n\n @classmethod\n def marshal(cls, dic):\n \"\"\"marshal Queue TaWaitrget POI object\"\"\"\n return cls(**dic)\n\n def __init__(self, **dic):\n super(QueueWaitPOI, self).__init__(**dic)\n self.interval = 1.0\n hulop_content_json = json.loads(self.properties.hulop_content)\n if \"interval\" in hulop_content_json:\n self.interval = float(hulop_content_json[\"interval\"])\n self.is_copied = False\n self.link_orientation = None\n\n# def approached_statement(self):\n# return \"queue wait point\"\n\n def register_link(self, link):\n end_pose = geoutil.Pose.pose_from_points(link.end_node.local_geometry, link.start_node.local_geometry)\n quat = tf.transformations.quaternion_from_euler(0, 0, end_pose.r)\n\n self.link_orientation = geometry_msgs.msg.Quaternion()\n self.link_orientation.x = quat[0]\n self.link_orientation.y = quat[1]\n self.link_orientation.z = quat[2]\n self.link_orientation.w = quat[3]\n\n def copy_to_link(self, link, local_geometry_x, local_geometry_y):\n copied_poi = copy.deepcopy(self)\n copied_poi.x = local_geometry_x\n copied_poi.y = local_geometry_y\n copied_poi.local_geometry.x = local_geometry_x\n copied_poi.local_geometry.y = local_geometry_y\n copied_poi.geometry = geoutil.local2global(copied_poi.local_geometry, copied_poi.anchor)\n \n link.register_poi(copied_poi)\n copied_poi.register_link(link)\n self.is_copied = True\n return copied_poi\n\nclass QueueTargetPOI(POI):\n \"\"\"Queue Target POI class\"\"\"\n\n @classmethod\n def marshal(cls, dic):\n \"\"\"marshal Queue Target POI object\"\"\"\n return cls(**dic)\n\n def __init__(self, **dic):\n super(QueueTargetPOI, self).__init__(**dic)\n self.enter_node = None\n self.exit_node = None\n hulop_content_json = json.loads(self.properties.hulop_content)\n Object.get_object_by_id(hulop_content_json[\"enter\"], self._set_enter_node)\n Object.get_object_by_id(hulop_content_json[\"exit\"], self._set_exit_node)\n\n def _set_enter_node(self, node):\n self.enter_node = node\n\n def _set_exit_node(self, node):\n self.exit_node = node\n\nclass Landmark(Facility):\n \"\"\"Landmark class\"\"\"\n\n @classmethod\n def marshal(cls, dic):\n \"\"\"marshal Landmark object\"\"\"\n return cls(**dic)\n\n\n def __init__(self, **dic):\n self._id = dic['node']+\"_landmark\"\n super(Landmark, self).__init__(**dic)\n",
"import numpy\nimport rospy\nfrom std_msgs.msg import Float64\n\nclass AltitudeManager():\n def __init__(self, verbose=False):\n self.queue = []\n self.verbose = verbose\n self.queue_limit = 60\n self.timestamp_interval_limit = 3.0\n self.window_size = 20\n self.threshold = 0.3 * 12\n\n self.initial_pressure = None\n self.pressure_std_pub = rospy.Publisher(\"pressure_std\", Float64, latch=True, queue_size=10)\n\n def put_pressure(self, pressure):\n if not self.initial_pressure:\n self.initial_pressure = pressure\n \n if self.queue_limit <= len(self.queue):\n self.queue.pop(0)\n\n if len(self.queue) == 0:\n self.queue.append(pressure)\n return\n\n if (self.queue[-1].header.stamp - pressure.header.stamp).to_sec() > self.timestamp_interval_limit:\n if self.verbose:\n rospy.logerr(\"timestamp interval between two altimters is too large ({} sec).\"+\n \"AltitudeManager was reset.\".format(self.timestamp_interval_limit))\n self.queue.clear()\n\n self.queue.append(pressure)\n\n def is_height_changed(self):\n if len(self.queue) < self.window_size:\n return False\n\n relative = []\n for i in range(1, self.window_size+1):\n relative.append(self.queue[-i].fluid_pressure - self.initial_pressure.fluid_pressure)\n\n stdev = numpy.std(relative)\n\n msg = Float64()\n msg.data = stdev\n self.pressure_std_pub.publish(msg)\n\n if self.verbose:\n rospy.loginfo(\"Altimeter changed: {}\".format(stdev))\n\n if self.threshold < stdev:\n return True\n\n return False\n"
] | [
[
"numpy.array",
"scipy.spatial.KDTree",
"numpy.linalg.norm"
],
[
"numpy.std"
]
] | [
{
"matplotlib": [],
"numpy": [
"1.10",
"1.12",
"1.11",
"1.19",
"1.24",
"1.13",
"1.16",
"1.9",
"1.18",
"1.23",
"1.21",
"1.22",
"1.20",
"1.7",
"1.15",
"1.14",
"1.17",
"1.8"
],
"pandas": [],
"scipy": [
"1.7",
"1.0",
"0.10",
"1.2",
"0.14",
"0.19",
"1.5",
"0.12",
"0.17",
"0.13",
"1.6",
"1.4",
"1.9",
"1.3",
"1.10",
"0.15",
"0.18",
"0.16",
"1.8"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Corentin-LF/pyGPs | [
"b9d36777584cd53756bd4311c3c20ea52e945451"
] | [
"pyGPs/Core/gp.py"
] | [
"from __future__ import division\nfrom __future__ import absolute_import\nfrom builtins import str\nfrom builtins import range\nfrom builtins import object\nfrom past.utils import old_div\n#================================================================================\n# Marion Neumann [marion dot neumann at uni-bonn dot de]\n# Daniel Marthaler [dan dot marthaler at gmail dot com]\n# Shan Huang [shan dot huang at iais dot fraunhofer dot de]\n# Kristian Kersting [kristian dot kersting at cs dot tu-dortmund dot de]\n#\n# This file is part of pyGPs.\n# The software package is released under the BSD 2-Clause (FreeBSD) License.\n#\n# Copyright (c) by\n# Marion Neumann, Daniel Marthaler, Shan Huang & Kristian Kersting, 18/02/2014\n#================================================================================\n\n# MEANING OF NOTATION:\n#\n# inffunc function specifying the inference method\n# covfunc prior covariance function (see below)\n# meanfunc prior mean function\n# likfunc likelihood function\n# x n by D matrix of training inputs\n# y column vector of length n of training targets\n# xs n by D matrix of test inputs\n# ys column vector of length nn of true test targets (optional)\n# nlZ returned value of the negative log marginal likelihood\n# dnlZ column vector of partial derivatives of the negative\n# log marginal likelihood w.r.t. each hyperparameter\n# ym column vector (of length ns) of predictive output means\n# ys2 column vector (of length ns) of predictive output variances\n# fm column vector (of length ns) of predictive latent means\n# fs2 column vector (of length ns) of predictive latent variances\n# lp column vector (of length ns) of log predictive probabilities\n# post struct representation of the (approximate) posterior\n# post consists of post.alpha, post.L, post.sW\n#\n# This is a object-oriented python implementation of gpml functionality\n# (Copyright (c) by Carl Edward Rasmussen and Hannes Nickisch, 2011-02-18).\n# based on the functional-version of python implementation\n# (Copyright (c) by Marion Neumann and Daniel Marthaler, 20/05/2013)\n#\n# Copyright (c) by Marion Neumann and Shan Huang, 30/09/2013\n\nimport itertools\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom . import inf, mean, lik, cov, opt\nfrom .tools import unique, jitchol, solve_chol\nfrom copy import deepcopy\nimport pyGPs\nfrom pyGPs.Core.cov import FITCOfKernel\nimport logging\n\nSHADEDCOLOR = [0.7539, 0.89453125, 0.62890625, 1.0]\nMEANCOLOR = [ 0.2109375, 0.63385, 0.1796875, 1.0]\nDATACOLOR = [0.12109375, 0.46875, 1., 1.0]\n\nclass GP(object):\n '''\n Base class for GP model.\n '''\n def __init__(self):\n super(GP, self).__init__()\n self.usingDefaultMean = True # was using default mean function now?\n self.meanfunc = None # mean function\n self.covfunc = None # covariance function\n self.likfunc = None # likelihood function\n self.inffunc = None # inference function\n self.optimizer = None # optimizer object\n self.nlZ = None # negative log marginal likelihood\n self.dnlZ = None # column vector of partial derivatives of the negative\n # log marginal likelihood w.r.t. each hyperparameter\n self.posterior = None # struct representation of the (approximate) posterior\n self.x = None # n by D matrix of training inputs\n self.y = None # column vector of length n of training targets\n self.xs = None # n by D matrix of test inputs\n self.ys = None # column vector of length nn of true test targets (optional)\n self.ym = None # column vector (of length ns) of predictive output means\n self.ys2 = None # column vector (of length ns) of predictive output variances\n self.fm = None # column vector (of length ns) of predictive latent means\n self.fs2 = None # column vector (of length ns) of predictive latent variances\n self.lp = None # column vector (of length ns) of log predictive probabilities\n\n self.logger = logging.getLogger(__name__)\n\n\n\n def __str__(self):\n strvalue = 'To get the properties of the model use:\\n'+\\\n 'model.nlZ # negative log marginal likelihood\\n'+\\\n 'model.dnlZ.cov # derivatives of cov func of negative log marginal likelihood\\n'+\\\n 'model.dnlZ.lik # derivatives of lik func of negative log marginal likelihood\\n'+\\\n 'model.dnlZ.mean # derivatives of mean func of negative log marginal likelihood\\n'+\\\n 'model.posterior # posterior structure\\n'+\\\n 'model.covfunc.hyp # hyperparameters of cov func\\n'+\\\n 'model.meanfunc.hyp # hyperparameters of mean func\\n'+\\\n 'model.likfunc.hyp # hyperparameters of lik func\\n'+\\\n 'model.fm # latent mean\\n'+\\\n 'model.fs2 # latent variance\\n'+\\\n 'model.ym # predictive mean\\n'+\\\n 'model.ys2 # predictive variance\\n'+\\\n 'model.lp # log predictive probability'\n return strvalue\n\n\n\n def __repr__(self):\n strvalue = str(type(self))+': '+\\\n 'to get the properties of the model use:\\n'+\\\n 'model.nlZ # negative log marginal likelihood\\n'+\\\n 'model.dnlZ.cov # derivatives of cov func of negative log marginal likelihood\\n'+\\\n 'model.dnlZ.lik # derivatives of lik func of negative log marginal likelihood\\n'+\\\n 'model.dnlZ.mean # derivatives of mean func of negative log marginal likelihood\\n'+\\\n 'model.posterior # posterior structure\\n'+\\\n 'model.covfunc.hyp # hyperparameters of cov func\\n'+\\\n 'model.meanfunc.hyp # hyperparameters of mean func\\n'+\\\n 'model.likfunc.hyp # hyperparameters of lik func\\n'+\\\n 'model.fm # latent mean\\n'+\\\n 'model.fs2 # latent variance\\n'+\\\n 'model.ym # predictive mean\\n'+\\\n 'model.ys2 # predictive variance\\n'+\\\n 'model.lp # log predictive probability'\n return strvalue\n\n\n\n\n def setData(self, x, y):\n '''\n Set training inputs and traning labels to model.\n\n :param x: training inputs in shape (n,D)\n :param y: training labels in shape (n,1)\n\n Note this method will transform x, y to correct shape\n if x, y is given in 1d array.\n '''\n # check wether the number of inputs and labels match\n assert x.shape[0] == y.shape[0], \"number of inputs and labels does not match\"\n\n # check the shape of inputs\n # transform to the correct shape\n if x.ndim == 1:\n x = np.reshape(x, (x.shape[0],1))\n if y.ndim == 1:\n y = np.reshape(y, (y.shape[0],1))\n\n self.x = x\n self.y = y\n if self.usingDefaultMean:\n c = np.mean(y)\n self.meanfunc = mean.Const(c) # adapt default prior mean wrt. training labels\n\n\n\n def plotData_1d(self, axisvals=None):\n '''\n Toy Method for ploting 1d data of the model.\n\n :param list axisvals: [min_x, max_x, min_y, max_y] setting the plot range\n '''\n plt.figure()\n plt.plot(self.x, self.y, ls='None', marker='+', color=DATACOLOR, ms=12, mew=2)\n if axisvals:\n plt.axis(axisvals)\n plt.grid()\n plt.xlabel('input x')\n plt.ylabel('target y')\n plt.show()\n\n\n\n def plotData_2d(self,x1,x2,t1,t2,p1,p2,axisvals=None):\n '''\n Toy Method for ploting 2d data of the model. \\n\n For plotting, we superimpose the data points with the posterior equi-probability contour\n lines for the probability of class two given complete information about the generating mechanism.\n\n :param x1: inputs for class +1\n :param x2: inputs for class -1\n :param t1: meshgrid array for the first axis\n :param t2: meshgrid array for the second axis\n :param p1,p2: contour lines contains p2/(p1+p2)\n :param list axisvals: [min_x, max_x, min_y, max_y] setting the plot range\n\n That is to say, the contour is ploted by plt.contour(t1, t2, p2/(p1+p2) )\n Note these parameters are (only) used for our hard-coded data for classification demo.\n '''\n fig = plt.figure()\n plt.plot(x1[:,0], x1[:,1], 'b+', markersize = 12)\n plt.plot(x2[:,0], x2[:,1], 'r+', markersize = 12)\n pc = plt.contour(t1, t2, np.reshape(old_div(p2,(p1+p2)), (t1.shape[0],t1.shape[1]) ))\n fig.colorbar(pc)\n plt.grid()\n if axisvals:\n plt.axis(axisvals)\n plt.show()\n\n\n\n def setPrior(self, mean=None, kernel=None):\n '''\n Set prior mean and covariance other than the default setting of current model.\n\n :param mean: instance of mean class. (e.g. mean.Linear())\n :param kernel: instance of covariance class. (e.g. cov.RBF())\n '''\n # check the type of inputs\n # ensure they are the right class before setting prior\n if not mean is None:\n assert isinstance(mean, pyGPs.mean.Mean), \"mean function is not an instance of pyGPs.mean.Mean\"\n self.meanfunc = mean\n self.usingDefaultMean = False\n if not kernel is None:\n assert isinstance(kernel, pyGPs.cov.Kernel), \"cov function is not an instance of pyGPs.cov.Kernel\"\n self.covfunc = kernel\n if type(kernel) is cov.Pre:\n self.usingDefaultMean = False\n\n\n\n def setOptimizer(self, method, num_restarts=None, min_threshold=None, meanRange=None, covRange=None, likRange=None):\n '''\n This method is used to sepecify optimization configuration. By default, gp uses a single run \"minimize\".\n\n :param method: Optimization methods. Possible values are:\\n\n \"Minimize\" -> minimize by Carl Rasmussen (python implementation of \"minimize\" in GPML)\\n\n \"CG\" -> conjugent gradient\\n\n \"BFGS\" -> quasi-Newton method of Broyden, Fletcher, Goldfarb, and Shanno (BFGS)\\n\n \"SCG\" -> scaled conjugent gradient (faster than CG)\\n\n :param num_restarts: Set if you want to run mulitiple times of optimization with different initial guess.\n It specifys the maximum number of runs/restarts/trials.\n :param min_threshold: Set if you want to run mulitiple times of optimization with different initial guess.\n It specifys the threshold of objective function value. Stop optimization when this value is reached.\n :param meanRange: The range of initial guess for mean hyperparameters.\n e.g. meanRange = [(-2,2), (-5,5), (0,1)].\n Each tuple specifys the range (low, high) of this hyperparameter,\n This is only the range of initial guess, during optimization process, optimal hyperparameters may go out of this range.\n (-5,5) for each hyperparameter by default.\n :param covRange: The range of initial guess for kernel hyperparameters. Usage see meanRange\n :param likRange: The range of initial guess for likelihood hyperparameters. Usage see meanRange\n '''\n pass\n\n\n\n def optimize40(self, x=None, y=None, numIterations=40):\n '''\n Train optimal hyperparameters based on training data,\n adjust new hyperparameters to all mean/cov/lik functions.\n\n :param x: training inputs in shape (n,D)\n :param y: training labels in shape (n,1)\n '''\n # check wether the number of inputs and labels match\n if x is not None and y is not None:\n assert x.shape[0] == y.shape[0], \"number of inputs and labels does not match\"\n\n # check the shape of inputs\n # transform to the correct shape\n if not x is None:\n if x.ndim == 1:\n x = np.reshape(x, (x.shape[0],1))\n self.x = x\n\n if not y is None:\n if y.ndim == 1:\n y = np.reshape(y, (y.shape[0],1))\n self.y = y\n\n if self.usingDefaultMean and self.meanfunc is None:\n c = np.mean(y)\n self.meanfunc = mean.Const(c) # adapt default prior mean wrt. training labels\n\n # optimize\n optimalHyp, optimalNlZ = self.optimizer.findMin(self.x, self.y, numIters = numIterations)\n self.nlZ = optimalNlZ\n\n # apply optimal hyp to all mean/cov/lik functions here\n self.optimizer._apply_in_objects(optimalHyp)\n self.getPosterior()\n\n \n \n def optimize(self, x=None, y=None, numIterations=1000):\n '''\n Train optimal hyperparameters based on training data,\n adjust new hyperparameters to all mean/cov/lik functions.\n\n :param x: training inputs in shape (n,D)\n :param y: training labels in shape (n,1)\n '''\n # check wether the number of inputs and labels match\n if x is not None and y is not None:\n assert x.shape[0] == y.shape[0], \"number of inputs and labels does not match\"\n\n # check the shape of inputs\n # transform to the correct shape\n if not x is None:\n if x.ndim == 1:\n x = np.reshape(x, (x.shape[0],1))\n self.x = x\n\n if not y is None:\n if y.ndim == 1:\n y = np.reshape(y, (y.shape[0],1))\n self.y = y\n\n if self.usingDefaultMean and self.meanfunc is None:\n c = np.mean(y)\n self.meanfunc = mean.Const(c) # adapt default prior mean wrt. training labels\n\n # optimize\n optimalHyp, optimalNlZ = self.optimizer.findMin(self.x, self.y, numIters = numIterations)\n self.nlZ = optimalNlZ\n\n # apply optimal hyp to all mean/cov/lik functions here\n self.optimizer._apply_in_objects(optimalHyp)\n self.getPosterior()\n\n\n def getPosterior(self, x=None, y=None, der=True):\n '''\n Fit the training data. Update negative log marginal likelihood(nlZ),\n partial derivatives of nlZ w.r.t. each hyperparameter(dnlZ),\n and struct representation of the (approximate) posterior(post),\n which consists of post.alpha, post.L, post.sW.\n\n nlZ, dnlZ, post = getPosterior(x, y, der=True)\\n\n nlZ, post = getPosterior(x, y, der=False )\n\n :param x: training inputs in shape (n,D)\n :param y: training labels in shape (n,1)\n :param boolean der: flag for whether to compute derivatives\n\n :return: negative log marginal likelihood (nlZ), derivatives of nlZ (dnlZ), posterior structure(post)\n\n You can print post to see descriptions of posterior.\n or see pyGPs.Core.inf for details.\n '''\n\n # check wether the number of inputs and labels match\n if x is not None and y is not None:\n assert x.shape[0] == y.shape[0], \"number of inputs and labels does not match\"\n\n # check the shape of inputs\n # transform to the correct shape\n if not x is None:\n if x.ndim == 1:\n x = np.reshape(x, (x.shape[0],1))\n self.x = x\n\n if not y is None:\n if y.ndim == 1:\n y = np.reshape(y, (y.shape[0],1))\n self.y = y\n\n if self.usingDefaultMean and self.meanfunc is None:\n c = np.mean(y)\n self.meanfunc = mean.Const(c) # adapt default prior mean wrt. training labels\n\n # call inference method\n if isinstance(self.likfunc, lik.Erf): #or is instance(self.likfunc, lik.Logistic):\n uy = unique(self.y)\n ind = ( uy != 1 )\n if any( uy[ind] != -1):\n raise Exception('You attempt classification using labels different from {+1,-1}')\n if not der:\n post, nlZ = self.inffunc.evaluate(self.meanfunc, self.covfunc, self.likfunc, self.x, self.y, 2)\n self.nlZ = nlZ\n self.posterior = deepcopy(post)\n return nlZ, post\n else:\n post, nlZ, dnlZ = self.inffunc.evaluate(self.meanfunc, self.covfunc, self.likfunc, self.x, self.y, 3)\n self.nlZ = nlZ\n self.dnlZ = deepcopy(dnlZ)\n self.posterior = deepcopy(post)\n return nlZ, dnlZ, post\n\n\n\n def predict(self, xs, ys=None):\n '''\n Prediction of test points (given by xs) based on training data of the current model.\n This method will output the following value:\\n\n predictive output means(ym),\\n\n predictive output variances(ys2),\\n\n predictive latent means(fm),\\n\n predictive latent variances(fs2),\\n\n log predictive probabilities(lp).\\n\n Theses values can also be achieved from model's property. (e.g. model.ym)\n\n :param xs: test input in shape of nn by D\n :param ys: test target(optional) in shape of nn by 1 if given\n\n :return: ym, ys2, fm, fs2, lp\n '''\n # check the shape of inputs\n # transform to correct shape if neccessary\n if xs.ndim == 1:\n xs = np.reshape(xs, (xs.shape[0],1))\n self.xs = xs\n if not ys is None:\n if ys.ndim == 1:\n ys = np.reshape(ys, (ys.shape[0],1))\n self.ys = ys\n\n meanfunc = self.meanfunc\n covfunc = self.covfunc\n likfunc = self.likfunc\n inffunc = self.inffunc\n x = self.x\n y = self.y\n\n if self.posterior is None:\n self.getPosterior()\n alpha = self.posterior.alpha\n L = self.posterior.L\n sW = self.posterior.sW\n\n nz = list(range(len(alpha[:,0]))) # non-sparse representation\n if len(L) == 0: # in case L is not provided, we compute it\n K = covfunc.getCovMatrix(x=x[nz,:], mode='train')\n #L = np.linalg.cholesky( (np.eye(nz) + np.dot(sW,sW.T)*K).T )\n L = jitchol( (np.eye(len(nz)) + np.dot(sW,sW.T)*K).T )\n Ltril = np.all( np.tril(L,-1) == 0 ) # is L an upper triangular matrix?\n ns = xs.shape[0] # number of data points\n nperbatch = 1000 # number of data points per mini batch\n nact = 0 # number of already processed test data points\n ymu = np.zeros((ns,1))\n ys2 = np.zeros((ns,1))\n fmu = np.zeros((ns,1))\n fs2 = np.zeros((ns,1))\n lp = np.zeros((ns,1))\n while nact<=ns-1: # process minibatches of test cases to save memory\n ids = list(range(nact,min(nact+nperbatch,ns))) # data points to process\n kss = covfunc.getCovMatrix(z=xs[ids,:], mode='self_test') # self-variances\n if isinstance(covfunc, FITCOfKernel):\n Ks = covfunc.getCovMatrix(x=x, z=xs[ids,:], mode='cross') # cross-covariances\n Ks = Ks[nz,:]\n else:\n Ks = covfunc.getCovMatrix(x=x[nz,:], z=xs[ids,:], mode='cross') # cross-covariances\n ms = meanfunc.getMean(xs[ids,:])\n N = (alpha.shape)[1] # number of alphas (usually 1; more in case of sampling)\n Fmu = np.tile(ms,(1,N)) + np.dot(Ks.T,alpha[nz]) # conditional mean fs|f\n fmu[ids] = np.reshape(old_div(Fmu.sum(axis=1),N),(len(ids),1)) # predictive means\n if Ltril: # L is triangular => use Cholesky parameters (alpha,sW,L)\n V = np.linalg.solve(L.T,np.tile(sW,(1,len(ids)))*Ks)\n fs2[ids] = kss - np.array([(V*V).sum(axis=0)]).T # predictive variances\n else: # L is not triangular => use alternative parametrization\n fs2[ids] = kss + np.array([(Ks*np.dot(L,Ks)).sum(axis=0)]).T # predictive variances\n fs2[ids] = np.maximum(fs2[ids],0) # remove numerical noise i.e. negative variances\n Fs2 = np.tile(fs2[ids],(1,N)) # we have multiple values in case of sampling\n if ys is None:\n Lp, Ymu, Ys2 = likfunc.evaluate(None,Fmu[:],Fs2[:],None,None,3)\n else:\n Lp, Ymu, Ys2 = likfunc.evaluate(np.tile(ys[ids],(1,N)), Fmu[:], Fs2[:],None,None,3)\n lp[ids] = np.reshape( old_div(np.reshape(Lp,(np.prod(Lp.shape),N)).sum(axis=1),N) , (len(ids),1) ) # log probability; sample averaging\n ymu[ids] = np.reshape( old_div(np.reshape(Ymu,(np.prod(Ymu.shape),N)).sum(axis=1),N) ,(len(ids),1) ) # predictive mean ys|y and ... \n ys2[ids] = np.reshape( old_div(np.reshape(Ys2,(np.prod(Ys2.shape),N)).sum(axis=1),N) , (len(ids),1) ) # .. variance\n nact = ids[-1]+1 # set counter to index of next data point\n self.ym = ymu\n self.ys2 = ys2\n self.lp = lp\n self.fm = fmu\n self.fs2 = fs2\n if ys is None:\n return ymu, ys2, fmu, fs2, None\n else:\n return ymu, ys2, fmu, fs2, lp\n\n\n\n def predict_with_posterior(self, post, xs, ys=None):\n '''\n Prediction of test points (given by xs) based on training data\n of the current model with posterior already provided.\n (i.e. you already have the posterior and thus don't need the fitting phase.)\n This method will output the following value:\\n\n predictive output means(ym),\\n\n predictive output variances(ys2),\\n\n predictive latent means(fm),\\n\n predictive latent variances(fs2),\\n\n log predictive probabilities(lp).\\n\n Theses values can also be achieved from model's property. (e.g. model.ym)\n\n :param post: struct representation of posterior\n :param xs: test input\n :param ys: test target(optional)\n\n :return: ym, ys2, fm, fs2, lp\n '''\n # check the shape of inputs\n # transform to correct shape if neccessary\n if xs.ndim == 1:\n xs = np.reshape(xs, (xs.shape[0],1))\n self.xs = xs\n if not ys is None:\n if ys.ndim == 1:\n ys = np.reshape(ys, (ys.shape[0],1))\n self.ys = ys\n\n meanfunc = self.meanfunc\n covfunc = self.covfunc\n likfunc = self.likfunc\n inffunc = self.inffunc\n x = self.x\n y = self.y\n\n self.posterior = deepcopy(post)\n alpha = post.alpha\n L = post.L\n sW = post.sW\n\n nz = list(range(len(alpha[:,0]))) # non-sparse representation\n if len(L) == 0: # in case L is not provided, we compute it\n K = covfunc.getCovMatrix(x=x[nz,:], mode='train')\n #L = np.linalg.cholesky( (np.eye(nz) + np.dot(sW,sW.T)*K).T )\n L = jitchol( (np.eye(len(nz)) + np.dot(sW,sW.T)*K).T )\n Ltril = np.all( np.tril(L,-1) == 0 ) # is L an upper triangular matrix?\n ns = xs.shape[0] # number of data points\n nperbatch = 1000 # number of data points per mini batch\n nact = 0 # number of already processed test data points\n ymu = np.zeros((ns,1))\n ys2 = np.zeros((ns,1))\n fmu = np.zeros((ns,1))\n fs2 = np.zeros((ns,1))\n lp = np.zeros((ns,1))\n while nact<=ns-1: # process minibatches of test cases to save memory\n id = list(range(nact,min(nact+nperbatch,ns))) # data points to process\n kss = covfunc.getCovMatrix(z=xs[id,:], mode='self_test') # self-variances\n Ks = covfunc.getCovMatrix(x=x[nz,:], z=xs[id,:], mode='cross') # cross-covariances\n ms = meanfunc.getMean(xs[id,:])\n N = (alpha.shape)[1] # number of alphas (usually 1; more in case of sampling)\n Fmu = np.tile(ms,(1,N)) + np.dot(Ks.T,alpha[nz]) # conditional mean fs|f\n fmu[id] = np.reshape(old_div(Fmu.sum(axis=1),N),(len(id),1)) # predictive means\n if Ltril: # L is triangular => use Cholesky parameters (alpha,sW,L)\n V = np.linalg.solve(L.T,np.tile(sW,(1,len(id)))*Ks)\n fs2[id] = kss - np.array([(V*V).sum(axis=0)]).T # predictive variances\n else: # L is not triangular => use alternative parametrization\n fs2[id] = kss + np.array([(Ks*np.dot(L,Ks)).sum(axis=0)]).T # predictive variances\n fs2[id] = np.maximum(fs2[id],0) # remove numerical noise i.e. negative variances\n Fs2 = np.tile(fs2[id],(1,N)) # we have multiple values in case of sampling\n if ys is None:\n [Lp, Ymu, Ys2] = likfunc.evaluate(None,Fmu[:],Fs2[:],None,None,3)\n else:\n [Lp, Ymu, Ys2] = likfunc.evaluate(np.tile(ys[id],(1,N)), Fmu[:], Fs2[:],None,None,3)\n lp[id] = np.reshape( old_div(np.reshape(Lp,(np.prod(Lp.shape),N)).sum(axis=1),N) , (len(id),1) ) # log probability; sample averaging\n ymu[id] = np.reshape( old_div(np.reshape(Ymu,(np.prod(Ymu.shape),N)).sum(axis=1),N) ,(len(id),1) ) # predictive mean ys|y and ...\n ys2[id] = np.reshape( old_div(np.reshape(Ys2,(np.prod(Ys2.shape),N)).sum(axis=1),N) , (len(id),1) ) # .. variance\n nact = id[-1]+1 # set counter to index of next data point\n self.ym = ymu\n self.ys2 = ys2\n self.lp = lp\n self.fm = fmu\n self.fs2 = fs2\n if ys is None:\n return ymu, ys2, fmu, fs2, None\n else:\n return ymu, ys2, fmu, fs2, lp\n\n\n\n\n\nclass GPR(GP):\n '''\n Model for Gaussian Process Regression\n '''\n def __init__(self):\n super(GPR, self).__init__()\n self.meanfunc = mean.Zero() # default prior mean\n self.covfunc = cov.RBF() # default prior covariance\n self.likfunc = lik.Gauss() # likihood with default noise variance 0.1\n self.inffunc = inf.Exact() # inference method\n self.optimizer = opt.Minimize(self) # default optimizer\n\n\n\n def setNoise(self,log_sigma):\n '''\n Set noise other than default noise value\n\n :param log_sigma: logarithm of the noise sigma\n '''\n self.likfunc = lik.Gauss(log_sigma)\n\n\n def setOptimizer(self, method, num_restarts=None, min_threshold=None, meanRange=None, covRange=None, likRange=None):\n '''\n Overriding. Usage see base class pyGPs.gp.GP.setOptimizer\n '''\n conf = None\n if (num_restarts!=None) or (min_threshold!=None):\n conf = pyGPs.Optimization.conf.random_init_conf(self.meanfunc,self.covfunc,self.likfunc)\n conf.num_restarts = num_restarts\n conf.min_threshold = min_threshold\n if not meanRange is None:\n conf.meanRange = meanRange\n if not covRange is None:\n conf.covRange = covRange\n if not likRange is None:\n conf.likRange = likRange\n if method == \"Minimize\":\n self.optimizer = opt.Minimize(self,conf)\n elif method == \"SCG\":\n self.optimizer = opt.SCG(self,conf)\n elif method == \"CG\":\n self.optimizer = opt.CG(self,conf)\n elif method == \"BFGS\":\n self.optimizer = opt.BFGS(self,conf)\n elif method == \"Nelder-Mead\":\n self.optimizer = opt.Simplex(self, conf)\n else:\n raise Exception('Optimization method is not set correctly in setOptimizer')\n\n\n def plot(self,axisvals=None):\n '''\n Plot 1d GP regression result.\n\n :param list axisvals: [min_x, max_x, min_y, max_y] setting the plot range\n '''\n xs = self.xs # test point\n x = self.x\n y = self.y\n ym = self.ym # predictive test mean\n ys2 = self.ys2 # predictive test variance\n plt.figure()\n xss = np.reshape(xs,(xs.shape[0],))\n ymm = np.reshape(ym,(ym.shape[0],))\n ys22 = np.reshape(ys2,(ys2.shape[0],))\n plt.plot(x, y, color=DATACOLOR, ls='None', marker='+',ms=12, mew=2)\n plt.plot(xs, ym, color=MEANCOLOR, ls='-', lw=3.)\n plt.fill_between(xss,ymm + 2.*np.sqrt(ys22), ymm - 2.*np.sqrt(ys22), facecolor=SHADEDCOLOR,linewidths=0.0)\n plt.grid()\n if not axisvals is None:\n plt.axis(axisvals)\n plt.xlabel('input x')\n plt.ylabel('target y')\n plt.show()\n\n\n def useInference(self, newInf):\n '''\n Use another inference techinique other than default exact inference.\n\n :param str newInf: 'Laplace' or 'EP'\n '''\n if newInf == \"Laplace\":\n self.inffunc = inf.Laplace()\n elif newInf == \"EP\":\n self.inffunc = inf.EP()\n else:\n raise Exception('Possible inf values are \"Laplace\", \"EP\".')\n\n\n def useLikelihood(self,newLik):\n '''\n Use another likelihood function other than default Gaussian likelihood.\n\n :param str newLik: 'Laplace'\n '''\n if newLik == \"Laplace\":\n self.likfunc = lik.Laplace()\n self.inffunc = inf.EP()\n else:\n raise Exception('Possible lik values are \"Laplace\".')\n\n\n\n\n\nclass GPC(GP):\n '''\n Model for Gaussian Process Classification.\n '''\n def __init__(self):\n super(GPC, self).__init__()\n self.meanfunc = mean.Zero() # default prior mean\n self.covfunc = cov.RBF() # default prior covariance\n self.likfunc = lik.Erf() # erf likihood\n self.inffunc = inf.EP() # default inference method\n self.optimizer = opt.Minimize(self) # default optimizer\n\n\n\n def setOptimizer(self, method, num_restarts=None, min_threshold=None, meanRange=None, covRange=None, likRange=None):\n '''\n Overriding. Usage see base class pyGPs.gp.GP.setOptimizer\n '''\n conf = None\n if (num_restarts!=None) or (min_threshold!=None):\n conf = pyGPs.Optimization.conf.random_init_conf(self.meanfunc,self.covfunc,self.likfunc)\n conf.num_restarts = num_restarts\n conf.min_threshold = min_threshold\n if not meanRange is None:\n conf.meanRange = meanRange\n if not covRange is None:\n conf.covRange = covRange\n if not likRange is None:\n conf.likRange = likRange\n if method == \"Minimize\":\n self.optimizer = opt.Minimize(self,conf)\n elif method == \"SCG\":\n self.optimizer = opt.SCG(self,conf)\n elif method == \"CG\":\n self.optimizer = opt.CG(self,conf)\n elif method == \"BFGS\":\n self.optimizer = opt.BFGS(self,conf)\n\n\n\n def plot(self,x1,x2,t1,t2,axisvals=None):\n '''\n Plot 2d GP Classification result.\n\n For plotting, we superimpose the data points with the posterior equi-probability contour\n lines for the probability of class two given complete information about the generating mechanism.\n\n :param x1: inputs for class +1\n :param x2: inputs for class -1\n :param t1: meshgrid array for the first axis\n :param t2: meshgrid array for the second axis\n :param list axisvals: [min_x, max_x, min_y, max_y] setting the plot range\n\n Note these parameters are (only) used for our hard-coded data for classification demo.\n '''\n fig = plt.figure()\n plt.plot(x1[:,0], x1[:,1], 'b+', markersize = 12)\n plt.plot(x2[:,0], x2[:,1], 'r+', markersize = 12)\n pc = plt.contour(t1, t2, np.reshape(np.exp(self.lp), (t1.shape[0],t1.shape[1]) ))\n fig.colorbar(pc)\n plt.grid()\n if not axisvals is None:\n plt.axis(axisvals)\n plt.show()\n\n\n\n def useInference(self, newInf):\n '''\n Use another inference techinique other than default EP inference.\n\n :param str newInf: 'Laplace'\n '''\n if newInf == \"Laplace\":\n self.inffunc = inf.Laplace()\n else:\n raise Exception('Possible inf values are \"Laplace\".')\n\n\n\n def useLikelihood(self,newLik):\n '''\n Use another likelihood function other than default error function.\n (Not used in this version)\n\n :param str newLik: 'Logistic'\n '''\n if newLik == \"Logistic\":\n raise Exception(\"Logistic likelihood is currently not implemented.\")\n #self.likfunc = lik.Logistic()\n else:\n raise Exception('Possible lik values are \"Logistic\".')\n\n\n\n\n\nclass GPMC(object):\n '''\n This is a one vs. one classification wrapper for GP Classification\n '''\n def __init__(self, n_class):\n self.meanfunc = mean.Zero() # default prior mean\n self.covfunc = cov.RBF() # default prior covariance\n self.n_class = n_class # number of different classes\n self.x_all = None\n self.y_all = None\n self.newInf = None # new inference? -> call useInference\n self.newLik = None # new likelihood? -> call useLikelihood\n self.newPrior = False\n\n\n\n def setPrior(self, mean=None, kernel=None):\n '''\n Set prior mean and covariance other than the default setting of current model.\n\n :param mean: instance of mean class. (e.g. mean.Linear())\n :param kernel: instance of covariance class. (e.g. cov.RBF())\n '''\n # check the type of inputs\n # ensure they are the right class before setting prior\n if not mean is None:\n assert isinstance(mean, pyGPs.mean.Mean), \"mean function is not an instance of pyGPs.mean.Mean\"\n self.meanfunc = mean\n self.usingDefaultMean = False\n if not kernel is None:\n assert isinstance(kernel, pyGPs.cov.Kernel), \"cov function is not an instance of pyGPs.cov.Kernel\"\n self.covfunc = kernel\n if type(kernel) is cov.Pre:\n self.usingDefaultMean = False\n self.newPrior = True\n\n\n\n def useInference(self, newInf):\n '''\n Use another inference techinique other than default EP inference.\n\n :param str newInf: 'Laplace'\n '''\n if newInf == \"Laplace\":\n self.inffunc = inf.Laplace()\n else:\n raise Exception('Possible inf values are \"Laplace\".')\n\n\n\n def useLikelihood(self,newLik):\n '''\n Use another likelihood function other than default error function.\n (Not used in this version)\n\n :param str newLik: 'Logistic'\n '''\n if newLik == \"Logistic\":\n raise Exception(\"Logistic likelihood is currently not implemented.\")\n #self.likfunc = lik.Logistic()\n else:\n raise Exception('Possible lik values are \"Logistic\".')\n\n\n\n def setData(self,x,y):\n '''\n Set training inputs and traning labels to model.\n\n :param x: training inputs in shape (n,D)\n :param y: training labels in shape (n,1)\n\n Note this method will transform x, y to correct shape\n if x, y is given in 1d array.\n '''\n # check wether the number of inputs and labels match\n assert x.shape[0] == y.shape[0], \"number of inputs and labels does not match\"\n\n # check the shape of inputs\n # transform to the correct shape\n if x.ndim == 1:\n x = np.reshape(x, (x.shape[0],1))\n if y.ndim == 1:\n y = np.reshape(y, (y.shape[0],1))\n\n self.x_all = x\n self.y_all = y\n\n\n\n def fitAndPredict(self, xs):\n '''\n Fit the model with given training data and predict for test points (given by xs).\n predictive_vote is a matrix where row i is each test point i,\n and column j is the probability for being class j\n\n :param xs: test inputs in shape of nn by D\n :return: predictive_vote\n '''\n # check the shape of inputs\n if xs.ndim == 1:\n xs = np.reshape(xs, (xs.shape[0],1))\n\n predictive_vote = np.zeros((xs.shape[0],self.n_class))\n for i in range(self.n_class): # classifier for class i...\n for j in range(i+1,self.n_class): # ...and class j\n x,y = self.createBinaryClass(i,j)\n model = GPC()\n if self.newPrior:\n model.setPrior(mean=self.meanfunc, kernel=self.covfunc)\n if self.newInf:\n model.useInference(self.newInf)\n if self.newLik:\n model.useLikelihood(self.newLik)\n model.getPosterior(x,y) # fitting\n ym = model.predict(xs)[0]\n ym += 1 # now scale into 0 to 2, ym=0 is class j, ym=2 is class i\n vote_i = np.zeros((xs.shape[0],self.n_class))\n vote_j = np.zeros((xs.shape[0],self.n_class))\n vote_i[:,i:i+1] = ym\n vote_j[:,j:j+1] = 2-ym\n predictive_vote += vote_i\n predictive_vote += vote_j\n predictive_vote /= predictive_vote.sum(axis=1)[:,np.newaxis]\n return predictive_vote\n\n\n\n def optimizeAndPredict(self, xs):\n '''\n Optimize the model with given training data and predict for test points (given by xs).\n predictive_vote is a matrix where row i is each test point i,\n and column j is the probability for being class j\n\n :param xs: test inputs in shape of nn by D\n :return: predictive_vote\n '''\n # check the shape of inputs\n if xs.ndim == 1:\n xs = np.reshape(xs, (xs.shape[0],1))\n\n predictive_vote = np.zeros((xs.shape[0],self.n_class))\n for i in range(self.n_class): # classifier for class i...\n for j in range(i+1,self.n_class): # ...and class j\n x,y = self.createBinaryClass(i,j)\n model = GPC()\n if self.newPrior:\n model.setPrior(mean=self.meanfunc, kernel=self.covfunc)\n if self.newInf:\n model.useInference(self.newInf)\n if self.newLik:\n model.useLikelihood(self.newLik)\n model.optimize(x,y) # training\n ym = model.predict(xs)[0]\n ym += 1 # now scale into 0 to 2, ym=0 is class j, ym=2 is class i\n vote_i = np.zeros((xs.shape[0],self.n_class))\n vote_j = np.zeros((xs.shape[0],self.n_class))\n vote_i[:,i:i+1] = ym\n vote_j[:,j:j+1] = 2-ym\n predictive_vote += vote_i\n predictive_vote += vote_j\n predictive_vote /= predictive_vote.sum(axis=1)[:,np.newaxis]\n return predictive_vote\n\n\n\n def createBinaryClass(self, i,j):\n '''\n Create dataset x(data) and y(label) which only contains class i and j.\n Relabel class i to +1 and class j to -1\n\n :param int i: the i_th class\n :param int j: the j_th class\n :return: x(data) and y(label) which only contains class i and j\n\n '''\n class_i = []\n class_j = []\n for index in range(len(self.y_all)): # check all classes\n target = self.y_all[index]\n if target == i:\n class_i.append(index)\n elif target == j:\n class_j.append(index)\n n1 = len(class_i)\n n2 = len(class_j)\n class_i.extend(class_j)\n x = self.x_all[class_i,:]\n y = np.concatenate((np.ones((1,n1)),-np.ones((1,n2))),axis=1).T\n return x,y\n\n\n\n\n\nclass GP_FITC(GP):\n '''\n Model for FITC GP base class\n '''\n def __init__(self):\n super(GP_FITC, self).__init__()\n self.u = None # inducing points\n\n\n\n def setData(self, x, y, value_per_axis=5):\n '''\n Set training inputs and traning labels to model and derive deault inducing_points..\n\n :param x: training inputs in shape (n,D)\n :param y: training labels in shape (n,1)\n :param int value_per_axis: number of value in each dimension\n when using a uni-distant default inducing points\n\n Note this method will transform x, y to correct shape\n if x, y is given in 1d array.\n '''\n # check wether the number of inputs and labels match\n assert x.shape[0] == y.shape[0], \"number of inputs and labels does not match\"\n\n # check dimension of inputs\n # transform to correct shape if neccessary\n if x.ndim == 1:\n x = np.reshape(x, (x.shape[0],1))\n if y.ndim == 1:\n y = np.reshape(y, (y.shape[0],1))\n self.x = x\n self.y = y\n if self.usingDefaultMean:\n c = np.mean(y)\n self.meanfunc = mean.Const(c) # adapt default prior mean wrt. training labels\n\n # get range of x in each dimension\n # 5 uniformally selected value for each dimension\n gridAxis=[]\n for d in range(x.shape[1]):\n column = x[:,d]\n mini = np.min(column)\n maxi = np.max(column)\n axis = np.linspace(mini,maxi,value_per_axis)\n gridAxis.append(axis)\n # default inducing points-> a grid\n if self.u is None:\n self.u = np.array(list(itertools.product(*gridAxis)))\n self.covfunc = self.covfunc.fitc(self.u)\n\n\n\n def setPrior(self, mean=None, kernel=None, inducing_points=None):\n '''\n Set prior mean and covariance other than the default setting of current model,\n as well as the inducing points\n\n :param mean: instance of mean class. (e.g. mean.Linear())\n :param kernel: instance of covariance class. (e.g. cov.RBF())\n :inducing_points: matrix of inducing points in shape of (nu,D)\n '''\n if not kernel is None:\n if not inducing_points is None:\n self.covfunc = kernel.fitc(inducing_points)\n self.u = inducing_points\n else:\n if not self.u is None:\n self.covfunc = kernel.fitc(self.u)\n else:\n raise Exception(\"To use default inducing points, please call setData() first!\")\n if type(kernel) is cov.Pre:\n self.usingDefaultMean = False\n if not mean is None:\n self.meanfunc = mean\n self.usingDefaultMean = False\n\n\n\n\n\nclass GPR_FITC(GP_FITC):\n '''\n Model for Gaussian Process Regression FITC\n '''\n def __init__(self):\n super(GPR_FITC, self).__init__()\n self.meanfunc = mean.Zero() # default prior mean\n self.covfunc = cov.RBF() # default prior covariance\n self.likfunc = lik.Gauss() # likihood with default noise variance 0.1\n self.inffunc = inf.FITC_Exact() # inference method\n self.optimizer = opt.Minimize(self) # default optimizer\n self.u = None # no default inducing points\n\n\n\n def setNoise(self,log_sigma):\n '''\n Set noise other than default noise value\n\n :param log_sigma: logarithm of the noise sigma\n '''\n self.likfunc = lik.Gauss(log_sigma)\n\n\n\n def setOptimizer(self, method, num_restarts=None, min_threshold=None, meanRange=None, covRange=None, likRange=None):\n '''\n Overriding. Usage see base class pyGPs.gp.GP.setOptimizer\n '''\n conf = None\n if (num_restarts!=None) or (min_threshold!=None):\n conf = pyGPs.Optimization.conf.random_init_conf(self.meanfunc,self.covfunc,self.likfunc)\n conf.num_restarts = num_restarts\n conf.min_threshold = min_threshold\n if not meanRange is None:\n conf.meanRange = meanRange\n if not covRange is None:\n conf.covRange = covRange\n if not likRange is None:\n conf.likRange = likRange\n if method == \"Minimize\":\n self.optimizer = opt.Minimize(self,conf)\n elif method == \"SCG\":\n self.optimizer = opt.SCG(self,conf)\n elif method == \"CG\":\n self.optimizer = opt.CG(self,conf)\n elif method == \"BFGS\":\n self.optimizer = opt.BFGS(self,conf)\n\n\n\n def plot(self,axisvals=None):\n '''\n Plot 1d GP FITC Regression result.\n\n :param list axisvals: [min_x, max_x, min_y, max_y] setting the plot range\n '''\n plt.figure()\n xss = np.reshape(self.xs,(self.xs.shape[0],))\n ymm = np.reshape(self.ym,(self.ym.shape[0],))\n ys22 = np.reshape(self.ys2,(self.ys2.shape[0],))\n plt.plot(self.x, self.y, color=DATACOLOR, ls='None', marker='+',ms=12, mew=2)\n plt.plot(self.xs, self.ym, color=MEANCOLOR, ls='-', lw=3.)\n plt.fill_between(xss,ymm + 2.*np.sqrt(ys22), ymm - 2.*np.sqrt(ys22), facecolor=SHADEDCOLOR,linewidths=0.0)\n plt.grid()\n if not axisvals is None:\n plt.axis(axisvals)\n plt.xlabel('input x')\n plt.ylabel('output y')\n plt.plot(self.u,np.ones_like(self.u), ls='None', color='k',marker='x',markersize=12,mew=2)\n plt.show()\n\n\n\n def useInference(self, newInf):\n '''\n Use another inference techinique other than default exact inference.\n\n :param str newInf: 'Laplace' or 'EP'\n '''\n if newInf == \"Laplace\":\n self.inffunc = inf.FITC_Laplace()\n elif newInf == \"EP\":\n self.inffunc = inf.FITC_EP()\n else:\n raise Exception('Possible inf values are \"Laplace\", \"EP\".')\n\n\n\n def useLikelihood(self,newLik):\n '''\n Use another inference techinique other than default Gaussian likelihood.\n\n :param str newLik: 'Laplace'\n '''\n if newLik == \"Laplace\":\n self.likfunc = lik.Laplace()\n self.inffunc = inf.FITC_EP()\n else:\n raise Exception('Possible lik values are \"Laplace\".')\n\n\n\n\n\nclass GPC_FITC(GP_FITC):\n '''\n Model for Gaussian Process Classification FITC\n '''\n def __init__(self):\n super(GPC_FITC, self).__init__()\n self.meanfunc = mean.Zero() # default prior mean\n self.covfunc = cov.RBF() # default prior covariance\n self.likfunc = lik.Erf() # erf liklihood\n self.inffunc = inf.FITC_EP() # default inference method\n self.optimizer = opt.Minimize(self) # default optimizer\n self.u = None # no default inducing points\n\n\n\n def setOptimizer(self, method, num_restarts=None, min_threshold=None, meanRange=None, covRange=None, likRange=None):\n '''\n Overriding. Usage see base class pyGPs.gp.GP.setOptimizer\n '''\n conf = None\n if (num_restarts!=None) or (min_threshold!=None):\n conf = pyGPs.Optimization.conf.random_init_conf(self.meanfunc,self.covfunc,self.likfunc)\n conf.num_restarts = num_restarts\n conf.min_threshold = min_threshold\n if not meanRange is None:\n conf.meanRange = meanRange\n if not covRange is None:\n conf.covRange = covRange\n if not likRange is None:\n conf.likRange = likRange\n if method == \"Minimize\":\n self.optimizer = opt.Minimize(self,conf)\n elif method == \"SCG\":\n self.optimizer = opt.SCG(self,conf)\n elif method == \"CG\":\n self.optimizer = opt.CG(self,conf)\n elif method == \"BFGS\":\n self.optimizer = opt.BFGS(self,conf)\n\n\n\n def plot(self,x1,x2,t1,t2,axisvals=None):\n '''Plot 2d GP FITC classification.\n For plotting, we superimpose the data points with the posterior equi-probability contour\n lines for the probability of class two given complete information about the generating mechanism.\n\n :param x1: inputs for class +1\n :param x2: inputs for class -1\n :param t1: meshgrid array for the first axis\n :param t2: meshgrid array for the second axis\n :param list axisvals: [min_x, max_x, min_y, max_y] setting the plot range\n\n Note these parameters are (only) used for our hard-coded data for classification demo.\n '''\n fig = plt.figure()\n plt.plot(x1[:,0], x1[:,1], 'b+', markersize = 12)\n plt.plot(x2[:,0], x2[:,1], 'r+', markersize = 12)\n plt.plot(self.u[:,0],self.u[:,1],'ko', markersize=12)\n pc = plt.contour(t1, t2, np.reshape(np.exp(self.lp), (t1.shape[0],t1.shape[1]) ))\n fig.colorbar(pc)\n plt.grid()\n if not axisvals is None:\n plt.axis(axisvals)\n plt.show()\n\n\n\n def useInference(self, newInf):\n '''\n Use another inference techinique other than default exact inference.\n\n :param str newInf: 'Laplace' or 'EP'\n '''\n if newInf == \"Laplace\":\n self.inffunc = inf.FITC_Laplace()\n else:\n raise Exception('Possible inf values are \"Laplace\".')\n\n\n\n def useLikelihood(self,newLik):\n '''\n Use another inference techinique other than default Erf likelihood.\n (Not used in this version)\n\n :param str newLik: 'Logistic'\n '''\n if newLik == \"Logistic\":\n raise Exception(\"Logistic likelihood is currently not implemented.\")\n else:\n raise Exception('Possible lik values are \"Logistic\".')\n"
] | [
[
"numpy.dot",
"numpy.sqrt",
"numpy.linspace",
"matplotlib.pyplot.plot",
"numpy.max",
"numpy.mean",
"numpy.exp",
"numpy.tril",
"numpy.ones_like",
"numpy.reshape",
"matplotlib.pyplot.axis",
"numpy.zeros",
"matplotlib.pyplot.figure",
"numpy.min",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylabel",
"numpy.maximum",
"numpy.tile",
"numpy.ones",
"matplotlib.pyplot.grid",
"numpy.prod",
"matplotlib.pyplot.xlabel"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Algomorph/NeuralTracking | [
"6312be8e18828344c65e25a423c239efcd3428dd",
"6312be8e18828344c65e25a423c239efcd3428dd",
"6312be8e18828344c65e25a423c239efcd3428dd"
] | [
"tests/data_generation/animate_berlin_y_stretch.py",
"tests/data_generation/animate_berlin_x_offset.py",
"tests/test_alignment_holistic.py"
] | [
"import sys\nimport os\nimport shutil\n\nimport cv2\nimport open3d as o3d\nimport open3d.core as o3c\nimport numpy as np\n\nfrom rendering.pytorch3d_renderer import PyTorch3DRenderer\nfrom data import StandaloneFrameDataset\nimport data.presets as presets\nimport tsdf.default_voxel_grid\nimport data.camera\nfrom settings import process_arguments, PathParameters, DeformNetParameters\n\nPROGRAM_EXIT_SUCCESS = 0\n\n\ndef main():\n process_arguments()\n frame_dataset: StandaloneFrameDataset = presets.StandaloneFramePreset.BERLIN_0.value\n\n device = o3c.Device(\"cuda:0\")\n volume: o3d.t = tsdf.default_voxel_grid.make_default_tsdf_voxel_grid(device)\n\n depth_image = frame_dataset.load_depth_image_open3d(device)\n color_image = frame_dataset.load_color_image_open3d(device)\n intrinsics_open3d_cpu, _ = data.camera.load_open3d_intrinsics_from_text_4x4_matrix_and_image(frame_dataset.get_intrinsics_path(),\n frame_dataset.get_depth_image_path())\n intrinsics_open3d_cuda = o3d.core.Tensor(intrinsics_open3d_cpu.intrinsic_matrix, o3d.core.Dtype.Float32, device)\n extrinsics_open3d_cuda = o3d.core.Tensor.eye(4, o3d.core.Dtype.Float32, device)\n\n volume.integrate(depth_image, color_image, intrinsics_open3d_cuda, extrinsics_open3d_cuda, DeformNetParameters.depth_scale.value, 3.0)\n original_mesh: o3d.geometry.TriangleMesh = volume.extract_surface_mesh(-1, 0).to_legacy_triangle_mesh()\n renderer = PyTorch3DRenderer((depth_image.rows, depth_image.columns), device, intrinsics_open3d_cuda)\n\n frame_count = 6\n scale_factor_increment = 0.1\n\n scale_center = np.array([0.0855289, -0.03289237, 2.79831315], dtype=np.float32)\n\n def scale_mesh_y(mesh: o3d.geometry.TriangleMesh, factor: float) -> o3d.geometry.TriangleMesh:\n vertices = np.array(mesh.vertices)\n stretched_vertices = vertices - scale_center\n stretched_vertices[:, 1] *= factor\n stretched_vertices += scale_center\n\n _scaled_mesh = o3d.geometry.TriangleMesh(o3d.cuda.pybind.utility.Vector3dVector(stretched_vertices), mesh.triangles)\n _scaled_mesh.vertex_colors = mesh.vertex_colors\n return _scaled_mesh\n\n # prepare folders\n root_output_directory = os.path.join(PathParameters.output_directory.value, \"berlin_y_stretch_sequence\")\n depth_output_directory = os.path.join(root_output_directory, \"depth\")\n if not os.path.exists(depth_output_directory):\n os.makedirs(depth_output_directory)\n color_output_directory = os.path.join(root_output_directory, \"color\")\n if not os.path.exists(color_output_directory):\n os.makedirs(color_output_directory)\n\n # record animation rendering output\n for i_frame in range(0, frame_count):\n scaled_mesh = scale_mesh_y(original_mesh, 1.0 + scale_factor_increment * i_frame)\n depth, color = renderer.render_mesh_legacy(scaled_mesh, depth_scale=1000.0)\n color_path = os.path.join(color_output_directory, f\"{i_frame:06d}.jpg\")\n depth_path = os.path.join(depth_output_directory, f\"{i_frame:06d}.png\")\n cv2.imwrite(color_path, color)\n cv2.imwrite(depth_path, depth.astype(np.uint16))\n\n shutil.copy(frame_dataset.get_intrinsics_path(), os.path.join(root_output_directory, \"intrinsics.txt\"))\n\n return PROGRAM_EXIT_SUCCESS\n\n\nif __name__ == \"__main__\":\n sys.exit(main())\n",
"import sys\nimport os\nimport shutil\n\nimport cv2\nimport open3d as o3d\nimport open3d.core as o3c\nimport numpy as np\n\nfrom rendering.pytorch3d_renderer import PyTorch3DRenderer\nfrom data import StandaloneFrameDataset\nimport data.presets as presets\nimport tsdf.default_voxel_grid\nimport data.camera\nfrom settings import process_arguments, PathParameters, DeformNetParameters\n\nPROGRAM_EXIT_SUCCESS = 0\n\n\ndef main():\n process_arguments()\n frame_dataset: StandaloneFrameDataset = presets.StandaloneFramePreset.BERLIN_0.value\n\n device = o3c.Device(\"cuda:0\")\n volume: o3d.t = tsdf.default_voxel_grid.make_default_tsdf_voxel_grid(device)\n\n depth_image = frame_dataset.load_depth_image_open3d(device)\n color_image = frame_dataset.load_color_image_open3d(device)\n intrinsics_open3d_cpu, _ = data.camera.load_open3d_intrinsics_from_text_4x4_matrix_and_image(frame_dataset.get_intrinsics_path(),\n frame_dataset.get_depth_image_path())\n intrinsics_open3d_cuda = o3d.core.Tensor(intrinsics_open3d_cpu.intrinsic_matrix, o3d.core.Dtype.Float32, device)\n extrinsics_open3d_cuda = o3d.core.Tensor.eye(4, o3d.core.Dtype.Float32, device)\n\n volume.integrate(depth_image, color_image, intrinsics_open3d_cuda, extrinsics_open3d_cuda, DeformNetParameters.depth_scale.value, 3.0)\n original_mesh: o3d.geometry.TriangleMesh = volume.extract_surface_mesh(-1, 0).to_legacy_triangle_mesh()\n renderer = PyTorch3DRenderer((depth_image.rows, depth_image.columns), device, intrinsics_open3d_cuda)\n\n frame_count = 6\n offset_increment = 0.01\n\n def offset_mesh_plus_x(mesh: o3d.geometry.TriangleMesh, offset: float) -> o3d.geometry.TriangleMesh:\n vertices = np.array(mesh.vertices)\n vertices[:, 0] += offset\n _offset_mesh = o3d.geometry.TriangleMesh(o3d.cuda.pybind.utility.Vector3dVector(vertices), mesh.triangles)\n _offset_mesh.vertex_colors = mesh.vertex_colors\n return _offset_mesh\n\n # prepare folders\n root_output_directory = os.path.join(PathParameters.output_directory.value, \"berlin_x_offset_sequence\")\n depth_output_directory = os.path.join(root_output_directory, \"depth\")\n if not os.path.exists(depth_output_directory):\n os.makedirs(depth_output_directory)\n color_output_directory = os.path.join(root_output_directory, \"color\")\n if not os.path.exists(color_output_directory):\n os.makedirs(color_output_directory)\n\n # record animation rendering output\n for i_frame in range(0, frame_count):\n offset_mesh = offset_mesh_plus_x(original_mesh, offset_increment * i_frame)\n depth, color = renderer.render_mesh_legacy(offset_mesh, depth_scale=1000.0)\n color_path = os.path.join(color_output_directory, f\"{i_frame:06d}.jpg\")\n depth_path = os.path.join(depth_output_directory, f\"{i_frame:06d}.png\")\n cv2.imwrite(color_path, color)\n cv2.imwrite(depth_path, depth.astype(np.uint16))\n\n shutil.copy(frame_dataset.get_intrinsics_path(), os.path.join(root_output_directory, \"intrinsics.txt\"))\n\n return PROGRAM_EXIT_SUCCESS\n\n\nif __name__ == \"__main__\":\n sys.exit(main())\n",
"import os\nfrom collections import namedtuple\n\nimport pytest\nimport sys\nimport random\nfrom pathlib import Path\n\nsys.path.append(str(Path(__file__).parent.parent))\n\n# SWAP THESE LINES IF YOU WANNA USE THE ORIGINAL AUTHORS' DEFORM-NET CODE (e.g. to save test data)\n# from alignment.deform_net_legacy import DeformNet\nfrom alignment import DeformNet\nfrom alignment.default import load_default_nnrt_network\n\nimport torch\nimport open3d.core as o3c\n\n# This test is simply used to ensure (as much as possible) that things are not messed up while we overhaul\n# the forward method of DeformNet\nfrom settings import Parameters\n\nAlignmentTestParameters = namedtuple('AlignmentTestParameters', ['config_file_name', 'input_data_folder', 'gt_data_folder'])\n\n\[email protected](\"parameters\", [AlignmentTestParameters(\"nnrt_fusion_parameters_flow.yaml\", \"inputs\", \"gt_flow\"),\n AlignmentTestParameters(\"nnrt_fusion_parameters_test1.yaml\", \"inputs\", \"gt_test1\")])\ndef test_alignment_holistic(parameters: AlignmentTestParameters):\n import ext_argparse\n configuration_path = os.path.join(Path(__file__).parent.parent.resolve(), f\"configuration_files/{parameters.config_file_name}\")\n ext_argparse.process_settings_file(Parameters, configuration_path, generate_default_settings_if_missing=True)\n\n # make output deterministic\n seed = 1234\n torch.cuda.manual_seed(seed)\n torch.manual_seed(seed)\n random.seed(seed)\n torch.backends.cudnn.deterministic = True\n\n deform_net: DeformNet = load_default_nnrt_network(o3c.Device.CUDA)\n\n test_path = Path(__file__).parent.resolve()\n test_data_path = os.path.join(test_path, \"test_data\")\n\n # load inputs\n alignment_test_data_path = os.path.join(test_data_path, \"alignment_holistic_tests\")\n inputs_path = os.path.join(alignment_test_data_path, parameters.input_data_folder)\n gt_path = os.path.join(alignment_test_data_path, parameters.gt_data_folder)\n\n source_cuda = torch.load(os.path.join(inputs_path, \"source_cuda.pt\"))\n target_cuda = torch.load(os.path.join(inputs_path, \"target_cuda.pt\"))\n graph_nodes_cuda = torch.load(os.path.join(inputs_path, \"graph_nodes_cuda.pt\"))\n graph_edges_cuda = torch.load(os.path.join(inputs_path, \"graph_edges_cuda.pt\"))\n graph_edges_weights_cuda = torch.load(os.path.join(inputs_path, \"graph_edges_weights_cuda.pt\"))\n graph_clusters_cuda = torch.load(os.path.join(inputs_path, \"graph_clusters_cuda.pt\"))\n pixel_anchors_cuda = torch.load(os.path.join(inputs_path, \"pixel_anchors_cuda.pt\"))\n pixel_weights_cuda = torch.load(os.path.join(inputs_path, \"pixel_weights_cuda.pt\"))\n node_count_cuda = torch.load(os.path.join(inputs_path, \"node_count_cuda.pt\"))\n intrinsics_cuda = torch.load(os.path.join(inputs_path, \"intrinsics_cuda.pt\"))\n\n with torch.no_grad():\n deform_net_data = deform_net(\n source_cuda, target_cuda,\n graph_nodes_cuda, graph_edges_cuda, graph_edges_weights_cuda, graph_clusters_cuda,\n pixel_anchors_cuda, pixel_weights_cuda,\n node_count_cuda, intrinsics_cuda,\n evaluate=True, split=\"test\"\n )\n\n flow2, flow3, flow4, flow5, flow6 = tuple(deform_net_data[\"flow_data\"])\n node_rotations = deform_net_data[\"node_rotations\"]\n node_translations = deform_net_data[\"node_translations\"]\n deformations_validity = deform_net_data[\"deformations_validity\"]\n deformed_points_pred = deform_net_data[\"deformed_points_pred\"]\n valid_solve = deform_net_data[\"valid_solve\"]\n mask_pred = deform_net_data[\"mask_pred\"]\n # @formatter:off\n xy_coords_warped, source_points, valid_source_points, target_matches, \\\n valid_target_matches, valid_correspondences, deformed_points_idxs, deformed_points_subsampled = \\\n tuple(deform_net_data[\"correspondence_info\"])\n # @formatter:on\n\n save_ground_truth = False\n if save_ground_truth:\n torch.save(flow2, os.path.join(gt_path, \"flow2.pt\"))\n torch.save(flow3, os.path.join(gt_path, \"flow3.pt\"))\n torch.save(flow4, os.path.join(gt_path, \"flow4.pt\"))\n torch.save(flow5, os.path.join(gt_path, \"flow5.pt\"))\n torch.save(flow6, os.path.join(gt_path, \"flow6.pt\"))\n torch.save(node_rotations, os.path.join(gt_path, \"node_rotations.pt\"))\n torch.save(node_translations, os.path.join(gt_path, \"node_translations.pt\"))\n torch.save(deformations_validity, os.path.join(gt_path, \"deformations_validity.pt\"))\n torch.save(deformed_points_pred, os.path.join(gt_path, \"deformed_points_pred.pt\"))\n torch.save(valid_solve, os.path.join(gt_path, \"valid_solve.pt\"))\n torch.save(mask_pred, os.path.join(gt_path, \"mask_pred.pt\"))\n torch.save(xy_coords_warped, os.path.join(gt_path, \"xy_coords_warped.pt\"))\n torch.save(source_points, os.path.join(gt_path, \"source_points.pt\"))\n torch.save(valid_source_points, os.path.join(gt_path, \"valid_source_points.pt\"))\n torch.save(target_matches, os.path.join(gt_path, \"target_matches.pt\"))\n torch.save(valid_target_matches, os.path.join(gt_path, \"valid_target_matches.pt\"))\n torch.save(valid_correspondences, os.path.join(gt_path, \"valid_correspondences.pt\"))\n torch.save(deformed_points_idxs, os.path.join(gt_path, \"deformed_points_idxs.pt\"))\n torch.save(deformed_points_subsampled, os.path.join(gt_path, \"deformed_points_subsampled.pt\"))\n else:\n # load ground truth\n gt_flow2 = torch.load(os.path.join(gt_path, \"flow2.pt\"))\n gt_flow3 = torch.load(os.path.join(gt_path, \"flow3.pt\"))\n gt_flow4 = torch.load(os.path.join(gt_path, \"flow4.pt\"))\n gt_flow5 = torch.load(os.path.join(gt_path, \"flow5.pt\"))\n gt_flow6 = torch.load(os.path.join(gt_path, \"flow6.pt\"))\n gt_node_rotations = torch.load(os.path.join(gt_path, \"node_rotations.pt\"))\n gt_node_translations = torch.load(os.path.join(gt_path, \"node_translations.pt\"))\n gt_deformations_validity = torch.load(os.path.join(gt_path, \"deformations_validity.pt\"))\n gt_deformed_points_pred = torch.load(os.path.join(gt_path, \"deformed_points_pred.pt\"))\n gt_valid_solve = torch.load(os.path.join(gt_path, \"valid_solve.pt\"))\n gt_mask_pred = torch.load(os.path.join(gt_path, \"mask_pred.pt\"))\n gt_xy_coords_warped = torch.load(os.path.join(gt_path, \"xy_coords_warped.pt\"))\n gt_source_points = torch.load(os.path.join(gt_path, \"source_points.pt\"))\n gt_valid_source_points = torch.load(os.path.join(gt_path, \"valid_source_points.pt\"))\n gt_target_matches = torch.load(os.path.join(gt_path, \"target_matches.pt\"))\n gt_valid_target_matches = torch.load(os.path.join(gt_path, \"valid_target_matches.pt\"))\n gt_valid_correspondences = torch.load(os.path.join(gt_path, \"valid_correspondences.pt\"))\n gt_deformed_points_idxs = torch.load(os.path.join(gt_path, \"deformed_points_idxs.pt\"))\n gt_deformed_points_subsampled = torch.load(os.path.join(gt_path, \"deformed_points_subsampled.pt\"))\n\n # compare output to ground truth\n assert torch.equal(flow2, gt_flow2)\n assert torch.equal(flow3, gt_flow3)\n assert torch.equal(flow4, gt_flow4)\n assert torch.equal(flow5, gt_flow5)\n assert torch.equal(flow6, gt_flow6)\n\n assert torch.equal(node_rotations, gt_node_rotations)\n assert torch.equal(node_translations, gt_node_translations)\n assert torch.equal(deformations_validity, gt_deformations_validity)\n assert torch.equal(deformed_points_pred, gt_deformed_points_pred)\n assert torch.equal(valid_solve, gt_valid_solve)\n assert torch.equal(mask_pred, gt_mask_pred)\n assert torch.equal(xy_coords_warped, gt_xy_coords_warped)\n assert torch.equal(source_points, gt_source_points)\n assert torch.equal(valid_source_points, gt_valid_source_points)\n assert torch.equal(target_matches, gt_target_matches)\n assert torch.equal(valid_target_matches, gt_valid_target_matches)\n assert torch.equal(valid_correspondences, gt_valid_correspondences)\n assert torch.equal(deformed_points_idxs, gt_deformed_points_idxs)\n assert torch.equal(deformed_points_subsampled, gt_deformed_points_subsampled)\n"
] | [
[
"numpy.array"
],
[
"numpy.array"
],
[
"torch.equal",
"torch.manual_seed",
"torch.no_grad",
"torch.cuda.manual_seed"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
aliabid2243/deepgaze | [
"8c602db89a1d1d8a644b44a381ddb8a693375e08"
] | [
"new_model/test_big.py"
] | [
"import os\nfrom load_data import load_batch, load_data_names, load_batch_from_names, load_batch_from_names_random\nfrom my_model import get_eye_tracker_model\nimport numpy as np\nfrom keras.models import load_model\nfrom keras.optimizers import SGD, adam\n\ndef generator(data, batch_size, img_cols, img_rows, img_ch):\n\n while True:\n for it in list(range(0, data[0].shape[0], batch_size)):\n x, y = load_batch([l[it:it + batch_size] for l in data], img_cols, img_rows, img_ch)\n yield x, y\n\n\ndef test_big(args):\n\n os.environ[\"CUDA_DEVICE_ORDER\"] = \"PCI_BUS_ID\"\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = args.dev\n names_path = r\"C:\\Users\\Aliab\\PycharmProjects\\data\\test\"\n print(\"Names to test: {}\".format(names_path))\n\n dataset_path = r\"D:\\GazeCapture\"\n print(\"Dataset: {}\".format(names_path))\n\n weights_path = \"weight_vgg.hdf5\"\n print(\"Weights: {}\".format(weights_path))\n\n # image parameter\n img_cols = 128\n img_rows = 128\n img_ch = 3\n\n # test parameter\n batch_size = 64\n chunk_size = 500\n\n # model\n model = get_eye_tracker_model(img_cols, img_rows, img_ch)\n\n # model summary\n model.summary()\n\n # weights\n print(\"Loading weights...\")\n model = load_model(weights_path)\n\n model.load_weights(weights_path)\n # data\n test_names = load_data_names(names_path)\n\n # limit amount of testing data\n # test_names = test_names[:1000]\n\n # results\n err_x = []\n err_y = []\n\n print(\"Loading testing data...\")\n for it in list(range(0, len(test_names), chunk_size)):\n\n x, y = load_batch_from_names_random(test_names[it:it + chunk_size], dataset_path, batch_size, img_cols, img_rows, img_ch)\n # x, y = load_batch_from_names(test_names[it:it + chunk_size], dataset_path, img_ch, img_cols, img_rows)\n predictions = model.predict(x=x, batch_size=batch_size, verbose=1)\n\n # print and analyze predictions\n for i, prediction in enumerate(predictions):\n print(\"PR: {} {}\".format(prediction[0], prediction[1]))\n print(\"GT: {} {} \\n\".format(y[i][0], y[i][1]))\n\n err_x.append(abs(prediction[0] - y[i][0]))\n err_y.append(abs(prediction[1] - y[i][1]))\n\n # mean absolute error\n mae_x = np.mean(err_x)\n mae_y = np.mean(err_y)\n\n # standard deviation\n std_x = np.std(err_x)\n std_y = np.std(err_y)\n\n # final results\n print(\"MAE: {} {} ( samples)\".format(mae_x, mae_y))\n print(\"STD: {} {} ( samples)\".format(std_x, std_y))\n\n\nif __name__ == '__main__':\n test_big()\n"
] | [
[
"numpy.std",
"numpy.mean"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
edpolanco/air_cargo | [
"20ddf6c72dafed85b87486ca46a9c09656f31d90"
] | [
"analysis.py"
] | [
"\"\"\"Module for summarizing cargo planning testing results.\n\n Ed Polanco\n [email protected]\n\"\"\"\nimport pandas as pd\nfrom collections import OrderedDict\nimport datetime\nimport time \nfrom aimacode.search import Problem, Node\nfrom timeit import default_timer as timer\nfrom run_search import PrintableProblem, PROBLEMS\nfrom aimacode.search import (breadth_first_search, astar_search,\n breadth_first_tree_search, depth_first_graph_search, uniform_cost_search,\n greedy_best_first_graph_search, depth_limited_search,\n recursive_best_first_search)\n\n#Names of the various search algorithms\nSEARCHES_SHORT_NAME = [[\"Breadth First\", breadth_first_search, \"\"], #1\n ['Breadth First Tree', breadth_first_tree_search, \"\"], #2\n ['Depth First Graph', depth_first_graph_search, \"\"], #3\n ['Depth Limited', depth_limited_search, \"\"], #4\n ['Uniform Cost', uniform_cost_search, \"\"], #5\n ['Recursive Best First w/ h1', recursive_best_first_search, 'h_1'], #6\n ['Greedy Best First Graph w/ h1', greedy_best_first_graph_search, 'h_1'], #7\n ['Astar w/ h1', astar_search, 'h_1'], #8\n ['Astar w/ ignore pre-cond.', astar_search, 'h_ignore_preconditions'], #9\n ['Astar w/ level-sum', astar_search, 'h_pg_levelsum'], #10\n ]\n\ndef show_path(node:Node):\n \"\"\"\n Print solution set to screen\n\n Paremeter\n ----------\n node: Node\n Search tree object that has 'solution()' method \n \"\"\"\n if node is None:\n print(\"The selected planner did not find a solution for this problem. \" +\n \"Make sure you have completed the AirCargoProblem implementation \" +\n \"and pass all unit tests first.\")\n else:\n msg = \"Search function {} plan length: {} \".format(node[0],len(node[1].solution()) )\n print(msg)\n for action in node[1].solution():\n print(\"{}{}\".format(action.name, action.args))\n\ndef run_search_table(problem: Problem, search_function, parameter=None):\n \"\"\"Perform a test to find a solution to one of cargo problems.\n\n Paremeters:\n ----------\n problem: Problem\n Cargo planning problem\n \n search_function: str\n Search algorithm function name\n \n parameter: parameter value if any [None]\n Parameter value for the search algorithms that require it.\n\n Returns:\n ----------\n Returns tuple of 5 values:\n 1 = Node expansions count\n 2 = number of times we tested for goal state\n 3 = Number of new nodes\n 4 = Number of steps\n 5 = Search tree Node object\n \"\"\" \n start = timer()\n ip = PrintableProblem(problem)\n if parameter is not None:\n node = search_function(ip, parameter)\n else:\n node = search_function(ip)\n end = timer()\n\n return (ip.succs, ip.goal_tests, ip.states, end - start, node )\n\ndef search_data(problem_id: int, s_choices: list):\n \"\"\" Perform test to solve cargo planning problem with\n the given search algorithms.\n\n Paremeters:\n ----------\n problem_id: int\n Cargo planning problem id\n \n s_choices: list\n List of the search algorithm to try.\n\n Returns:\n ----------\n Returns tuple of two items:\n 1 = DataFrame that summarizes test result\n 2 = A list of tuples, where the first item in the \n tuple is the search algorithm name and the second\n is its corresponding search Node object.\n \"\"\"\n #lets get a list of problems and search algorithms\n problem_name,problem = PROBLEMS[problem_id - 1][0],PROBLEMS[problem_id- 1][1]\n searches = [SEARCHES_SHORT_NAME[i-1] for i in map(int, s_choices)]\n\n # helper variables to create DataFrame\n steps = []\n fun_name = []\n expansions = []\n goal_test =[]\n new_nodes = []\n elapsed_time = []\n nodes = []\n\n for sname, s, h in searches:\n start_time = datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %I:%M:%S%p')\n print(\"\\nSolving {} using {} start time {}...\".format(problem_name, sname, start_time))\n\n _p = problem()\n _h = None if not h else getattr(_p, h)\n \n #perform test get result\n result = run_search_table(_p, s, _h)\n\n #update helper list variables\n fun_name.append(sname)\n expansions.append(result[0])\n goal_test.append(result[1])\n new_nodes.append(result[2])\n elapsed_time.append(result[3])\n steps.append(len(result[4].solution()) )\n nodes.append([sname,result[4]])\n \n #create dictionary for DataFrame input\n table_dict = OrderedDict()\n table_dict[\"Function Name\"] = fun_name\n table_dict[\"Solution Steps\"] = steps\n table_dict[\"Expansions\"] = expansions\n table_dict[\"Goal Tests\"] = goal_test\n table_dict[\"New_Nodes\"] = new_nodes\n table_dict[\"Elapsed Seconds\"] = elapsed_time\n \n dataframe = pd.DataFrame(table_dict)\n dataframe.index +=1\n return dataframe, nodes"
] | [
[
"pandas.DataFrame"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.