repo_name
stringlengths 8
130
| hexsha
sequence | file_path
sequence | code
sequence | apis
sequence |
---|---|---|---|---|
zoeyuchao/onpolicy-release | [
"c2cb64e59c5b1f21cce022db76c378b396fd480e"
] | [
"onpolicy/envs/mpe/scenarios/simple_push.py"
] | [
"import numpy as np\nfrom onpolicy.envs.mpe.core import World, Agent, Landmark\nfrom onpolicy.envs.mpe.scenario import BaseScenario\nimport random\n\n#\n# # the non-ensemble version of <ensemble_push>\n#\n#\n\nclass Scenario(BaseScenario):\n def make_world(self, args):\n world = World()\n world.world_length = args.episode_length\n # set any world properties first\n world.dim_c = 2\n num_agents = args.num_agents#2\n num_adversaries = 1\n num_landmarks = args.num_landmarks#2\n # add agents\n world.agents = [Agent() for i in range(num_agents)]\n for i, agent in enumerate(world.agents):\n agent.name = 'agent %d' % i\n agent.collide = True\n agent.silent = True\n if i < num_adversaries:\n agent.adversary = True\n else:\n agent.adversary = False\n # agent.u_noise = 1e-1\n # agent.c_noise = 1e-1\n # add landmarks\n world.landmarks = [Landmark() for i in range(num_landmarks)]\n for i, landmark in enumerate(world.landmarks):\n landmark.name = 'landmark %d' % i\n landmark.collide = False\n landmark.movable = False\n # make initial conditions\n self.reset_world(world)\n return world\n\n def reset_world(self, world):\n # random properties for landmarks\n for i, landmark in enumerate(world.landmarks):\n landmark.color = np.array([0.1, 0.1, 0.1])\n landmark.color[i + 1] += 0.8\n landmark.index = i\n # set goal landmark\n goal = np.random.choice(world.landmarks)\n for i, agent in enumerate(world.agents):\n agent.goal_a = goal\n agent.color = np.array([0.25, 0.25, 0.25])\n if agent.adversary:\n agent.color = np.array([0.75, 0.25, 0.25])\n else:\n j = goal.index\n agent.color[j + 1] += 0.5\n # set random initial states\n for agent in world.agents:\n agent.state.p_pos = np.random.uniform(-1, +1, world.dim_p)\n agent.state.p_vel = np.zeros(world.dim_p)\n agent.state.c = np.zeros(world.dim_c)\n for i, landmark in enumerate(world.landmarks):\n landmark.state.p_pos = 0.8 * np.random.uniform(-1, +1, world.dim_p)\n landmark.state.p_vel = np.zeros(world.dim_p)\n\n def reward(self, agent, world):\n # Agents are rewarded based on minimum agent distance to each landmark\n return self.adversary_reward(agent, world) if agent.adversary else self.agent_reward(agent, world)\n\n def agent_reward(self, agent, world):\n # the distance to the goal\n return -np.sqrt(np.sum(np.square(agent.state.p_pos - agent.goal_a.state.p_pos)))\n\n def adversary_reward(self, agent, world):\n # keep the nearest good agents away from the goal\n agent_dist = [np.sqrt(np.sum(np.square(a.state.p_pos - a.goal_a.state.p_pos))) for a in world.agents if not a.adversary]\n pos_rew = min(agent_dist)\n #nearest_agent = world.good_agents[np.argmin(agent_dist)]\n #neg_rew = np.sqrt(np.sum(np.square(nearest_agent.state.p_pos - agent.state.p_pos)))\n neg_rew = np.sqrt(np.sum(np.square(agent.goal_a.state.p_pos - agent.state.p_pos)))\n #neg_rew = sum([np.sqrt(np.sum(np.square(a.state.p_pos - agent.state.p_pos))) for a in world.good_agents])\n return pos_rew - neg_rew\n \n def observation(self, agent, world):\n # get positions of all entities in this agent's reference frame\n entity_pos = []\n for entity in world.landmarks: # world.entities:\n entity_pos.append(entity.state.p_pos - agent.state.p_pos)\n # entity colors\n entity_color = []\n for entity in world.landmarks: # world.entities:\n entity_color.append(entity.color)\n # communication of all other agents\n comm = []\n other_pos = []\n for other in world.agents:\n if other is agent: continue\n comm.append(other.state.c)\n other_pos.append(other.state.p_pos - agent.state.p_pos)\n if not agent.adversary:\n return np.concatenate([agent.state.p_vel] + [agent.goal_a.state.p_pos - agent.state.p_pos] + [agent.color] + entity_pos + entity_color + other_pos)\n else:\n #other_pos = list(reversed(other_pos)) if random.uniform(0,1) > 0.5 else other_pos # randomize position of other agents in adversary network\n return np.concatenate([agent.state.p_vel] + entity_pos + other_pos)\n"
] | [
[
"numpy.random.uniform",
"numpy.zeros",
"numpy.random.choice",
"numpy.array",
"numpy.concatenate",
"numpy.square"
]
] |
ael-noblegas/pychron | [
"1a81e05d9fba43b797f335ceff6837c016633bcf",
"1a81e05d9fba43b797f335ceff6837c016633bcf"
] | [
"pychron/core/ui/qt/color_map_bar_editor.py",
"pychron/mv/focus/autofocus_manager.py"
] | [
"# ===============================================================================\n# Copyright 2012 Jake Ross\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ===============================================================================\n\n# ============= enthought library imports =======================\nfrom __future__ import absolute_import\nfrom chaco.data_range_1d import DataRange1D\nfrom chaco.default_colormaps import color_map_dict, color_map_name_dict\nfrom pyface.qt.QtGui import QPainter, QColor, QFrame\nfrom traits.api import Float, Int, Str\nfrom traitsui.basic_editor_factory import BasicEditorFactory\nfrom traitsui.qt4.editor import Editor\nfrom numpy import array\n\n# ============= local library imports ==========================\n# from matplotlib.cm import get_cmap\n\n\nclass Bar(QFrame):\n value = None\n low = 0\n high = 1\n color_scalar = 1\n colormap = 'jet'\n bar_width = 100\n scale = 'power'\n\n # def __init__(self, parent, ident=-1):\n # super(Bar, self).__init__()\n # self._cmap = get_cmap(self.colormap)\n\n def paintEvent(self, e):\n qp = QPainter()\n qp.begin(self)\n qp.setBrush(QColor(*self.value))\n qp.drawRect(0, 0, self.bar_width, 20)\n qp.end()\n\n def set_value(self, v):\n \"\"\"\n map v to users color scale\n use power law v=A*x**(1/cs)\n increase cs increases the rate of change at low values\n increase cs will make it easier to see small pertubations (more color change) at\n the low end.\n\n \"\"\"\n if self.scale == 'power':\n N = 1 / float(self.color_scalar)\n A = 1 / self.high ** N\n nv = A * v ** N\n else:\n nv = min(1, max(0, (v - self.low) / (self.high - self.low)))\n\n vs = self.cmap.map_screen(array([nv,]))[0][:3]\n self.value = [x * 255 for x in vs]\n self.update()\n\n\nclass _BarGaugeEditor(Editor):\n def init(self, parent):\n self.control = Bar()\n self.control.low = low = self.factory.low\n self.control.high = high = self.factory.high\n self.control.color_scalar = self.factory.color_scalar\n self.control.bar_width = self.factory.width\n self.control.scale = self.factory.scale\n\n # if self.factory.scale == 'power':\n # high = N = 1 / float(self.color_scalar)\n # A = 1 / self.high ** N\n self.control.cmap = color_map_name_dict[self.factory.colormap](DataRange1D(low_setting=0, high_setting=1))\n\n def update_editor(self):\n if self.control:\n self.control.set_value(self.value)\n\n\nclass BarGaugeEditor(BasicEditorFactory):\n klass = _BarGaugeEditor\n low = Float\n high = Float\n color_scalar = Int(1)\n scale = Str('power')\n colormap = Str('jet')\n width = Int(100)\n\n# ============= EOF =============================================\n",
"# ===============================================================================\n# Copyright 2011 Jake Ross\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ===============================================================================\n\n# =============enthought library imports=======================\nfrom __future__ import absolute_import\nfrom __future__ import print_function\nimport six.moves.cPickle as pickle\n\nfrom traits.api import Bool, Any, Instance, Button, Property, Event, on_trait_change\nfrom traitsui.api import View, Item, Handler, HGroup\n\n# ============= standard library imports ========================\n# from threading import Thread\nfrom threading import Event as TEvent\nfrom numpy import linspace, argmin, argmax, random, asarray\nimport time\nimport os\n# ============= local library imports ==========================\nfrom pychron.core.time_series.time_series import smooth\nfrom pychron.image.cv_wrapper import grayspace, crop, get_focus_measure\n# from pychron.image.cvwrapper import grayspace, get_focus_measure, crop, resize\nfrom scipy.ndimage.measurements import variance\nfrom scipy.ndimage.filters import generic_gradient_magnitude, sobel\nfrom scipy.ndimage import sum as ndsum\nfrom pychron.paths import paths\nfrom pychron.managers.manager import Manager\nfrom pychron.image.image import Image\n# from pychron.machine_vision.focus_parameters import FocusParameters\n# from pychron.image.image_editor import ImageEditor\nfrom pychron.graph.graph import Graph\nfrom pychron.mv.focus.focus_parameters import FocusParameters\nfrom pychron.core.ui.image_editor import ImageEditor\nfrom pychron.core.ui.gui import invoke_in_main_thread\nfrom pychron.core.ui.thread import Thread\n\n\nclass ConfigureHandler(Handler):\n def closed(self, info, isok):\n if isok:\n info.object.dump_parameters()\n\n\nclass AutoFocusManager(Manager):\n \"\"\"\n currently uses passive focus techniques\n see\n\n http://en.wikipedia.org/wiki/Autofocus\n\n \"\"\"\n\n video = Any\n laser_manager = Any\n stage_controller = Any\n canvas = Any\n parameters = Instance(FocusParameters)\n configure_button = Button('configure')\n\n autofocus_button = Event\n autofocus_label = Property(depends_on='autofocusing')\n autofocusing = Bool\n\n # threading event for cancel signal\n _evt_autofocusing = None\n\n image = Instance(Image, ())\n\n graph = None\n\n def dump_parameters(self):\n p = os.path.join(paths.hidden_dir, 'autofocus_configure')\n self.info('dumping parameters to {}'.format(p))\n with open(p, 'wb') as f:\n pickle.dump(self.parameters, f)\n\n def load_parameter(self):\n p = os.path.join(paths.hidden_dir, 'autofocus_configure')\n if os.path.isfile(p):\n with open(p, 'rb') as f:\n try:\n params = pickle.load(f)\n self.info('loading parameters from {}'.format(p))\n\n if not isinstance(params, FocusParameters):\n self.info('out of date parameters file. using default')\n params = FocusParameters()\n return params\n\n except Exception as e:\n print('autofocus load parameter', e)\n return FocusParameters()\n else:\n return FocusParameters()\n\n def passive_focus(self, block=False, **kw):\n\n self._evt_autofocusing = TEvent()\n self._evt_autofocusing.clear()\n# manager = self.laser_manager\n oper = self.parameters.operator\n self.info('passive focus. operator = {}'.format(oper))\n\n g = self.graph\n if not g:\n g = Graph(plotcontainer_dict=dict(padding=10),\n window_x=0.70,\n window_y=20,\n window_width=325,\n window_height=325,\n window_title='Autofocus'\n )\n self.graph = g\n\n g.clear()\n\n g.new_plot(padding=[40, 10, 10, 40],\n xtitle='Z (mm)',\n ytitle='Focus Measure ({})'.format(oper)\n )\n g.new_series()\n g.new_series()\n\n invoke_in_main_thread(self._open_graph)\n\n target = self._passive_focus\n self._passive_focus_thread = Thread(name='autofocus', target=target,\n args=(self._evt_autofocusing,\n\n ),\n kwargs=kw\n )\n self._passive_focus_thread.start()\n if block:\n# while 1:\n# if not self._passive_focus_thread.isRunning():\n# break\n# time.sleep(0.25)\n self._passive_focus_thread.join()\n\n def _open_graph(self):\n ui = self.graph.edit_traits()\n self.add_window(ui)\n\n def stop_focus(self):\n\n if self.stage_controller:\n self.stage_controller.stop()\n\n self.info('autofocusing stopped by user')\n\n def _passive_focus(self, stop_signal, set_zoom=True):\n '''\n sweep z looking for max focus measure\n FMgrad= roberts or sobel (sobel removes noise)\n FMvar = intensity variance\n '''\n\n self.autofocusing = True\n\n manager = self.laser_manager\n fstart = self.parameters.fstart\n fend = self.parameters.fend\n step_scalar = self.parameters.step_scalar\n zoom = self.parameters.zoom\n operator = self.parameters.operator\n\n steps = step_scalar * (max(fend, fstart) - min(fend, fstart)) + 1\n\n prev_zoom = None\n if set_zoom and \\\n manager is not None and \\\n zoom:\n motor = manager.get_motor('zoom')\n if motor:\n prev_zoom = motor.data_position\n self.info('setting zoom: {}'.format(zoom))\n manager.set_motor('zoom', zoom, block=True)\n time.sleep(1.5)\n\n args = self._do_focusing(fstart, fend, steps, operator)\n\n if manager is not None:\n if prev_zoom is not None:\n self.info('returning to previous zoom: {}'.format(prev_zoom))\n manager.set_motor('zoom', prev_zoom, block=True)\n\n if args:\n mi, fmi, ma, fma = args\n\n self.info('''passive focus results:Operator={}\nImageGradmin={} (z={})\nImageGradmax={}, (z={})'''.format(operator, mi, fmi, ma, fma))\n\n focus_pos = fma\n self.graph.add_vertical_rule(focus_pos)\n self.graph.redraw()\n# self.graph.add_vertical_rule(fma)\n\n self.info('calculated focus z= {}'.format(focus_pos))\n\n# if set_z:\n controller = self.stage_controller\n if controller is not None:\n if not stop_signal.isSet():\n controller.single_axis_move('z', focus_pos, block=True)\n controller._z_position = focus_pos\n controller.z_progress = focus_pos\n\n self.autofocusing = False\n\n def _cancel_sweep(self, vo):\n if self._evt_autofocusing.isSet():\n # return to original velocity\n self.autofocusing = False\n self._reset_velocity(vo)\n return True\n\n def _reset_velocity(self, vo):\n if self.stage_controller:\n pdict = dict(velocity=vo, key='z')\n self.stage_controller.set_single_axis_motion_parameters(pdict=pdict)\n\n def _do_focusing(self, start, end, steps, operator):\n screen_roi = self._get_roi()\n self._add_focus_area_rect(*screen_roi)\n\n src = self._load_source()\n src = asarray(src)\n h, w, _d = src.shape\n\n cx = w / 2.\n cy = h / 2.\n\n cw = self.parameters.crop_width\n ch = self.parameters.crop_height\n\n roi = cx, cy, cw, ch\n\n '''\n start the z in motion and take pictures as you go\n query stage_controller to get current z\n '''\n\n self.info('focus sweep start={} end={}'.format(start, end))\n # move to start position\n controller = self.stage_controller\n if controller:\n vo = controller.axes['z'].velocity\n if self._cancel_sweep(vo):\n return\n self.graph.set_x_limits(min(start, end), max(start, end), pad=2)\n # sweep 1 and velocity 1\n self._do_sweep(start, end, velocity=self.parameters.velocity_scalar1)\n fms, focussteps = self._collect_focus_measures(operator, roi)\n if not (fms and focussteps):\n return\n\n # reached end of sweep\n # calculate a nominal focal point\n args = self._calculate_nominal_focal_point(fms, focussteps)\n if not args:\n return\n nfocal = args[3]\n\n nwin = self.parameters.negative_window\n pwin = self.parameters.positive_window\n\n if self._cancel_sweep(vo):\n return\n nstart, nend = max(0, nfocal - nwin), nfocal + pwin\n# mi = min(min(nstart, nend), min(start, end))\n# ma = max(max(nstart, nend), max(start, end))\n# self.graph.set_x_limits(mi, ma, pad=2)\n time.sleep(1)\n # do a slow tight sweep around the nominal focal point\n self._do_sweep(nstart, nend, velocity=self.parameters.velocity_scalar2)\n fms, focussteps = self._collect_focus_measures(operator, roi, series=1)\n\n self._reset_velocity(vo)\n\n else:\n focussteps = linspace(0, 10, 11)\n fms = -(focussteps - 5) ** 2 + 10 + random.random(11)\n\n self.info('frames analyzed {}'.format(len(fms)))\n\n# self.canvas.markupcontainer.pop('croprect')\n return self._calculate_nominal_focal_point(fms, focussteps)\n\n def _do_sweep(self, start, end, velocity=None):\n controller = self.stage_controller\n controller.single_axis_move('z', start, block=True)\n# time.sleep(0.1)\n # explicitly check for motion\n# controller.block(axis='z')\n\n if velocity:\n vo = controller.axes['z'].velocity\n\n controller.set_single_axis_motion_parameters(pdict=dict(velocity=vo * velocity,\n key='z'))\n\n self.info('starting sweep from {}'.format(controller.z_progress))\n # pause before moving to end\n time.sleep(0.25)\n controller.single_axis_move('z', end, update=100, immediate=True)\n\n def _collect_focus_measures(self, operator, roi, series=0):\n controller = self.stage_controller\n focussteps = []\n fms = []\n if controller.timer:\n p = controller.timer.get_interval()\n self.debug('controller timer period {}'.format(p))\n pz = controller.z_progress\n\n while 1:\n src = self._load_source()\n x = controller.z_progress\n if x != pz:\n y = self._calculate_focus_measure(src, operator, roi)\n self.graph.add_datum((x, y), series=series)\n\n focussteps.append(x)\n fms.append(y)\n\n pz = x\n\n if not (controller.timer.isActive() and \\\n not self._evt_autofocusing.isSet()):\n break\n time.sleep(p)\n\n self.debug('sweep finished')\n\n\n return fms, focussteps\n\n def _calculate_nominal_focal_point(self, fms, focussteps):\n if fms:\n sfms = smooth(fms)\n if sfms is not None:\n\n self.graph.new_series(focussteps, sfms)\n self.graph.redraw()\n\n fmi = focussteps[argmin(sfms)]\n fma = focussteps[argmax(sfms)]\n\n mi = min(sfms)\n ma = max(sfms)\n\n return mi, fmi, ma, fma\n\n def _calculate_focus_measure(self, src, operator, roi):\n '''\n see\n IMPLEMENTATION OF A PASSIVE AUTOMATIC FOCUSING ALGORITHM\n FOR DIGITAL STILL CAMERA\n DOI 10.1109/30.468047\n and\n http://cybertron.cg.tu-berlin.de/pdci10/frankencam/#autofocus\n '''\n\n # need to resize to 640,480. this is the space the roi is in\n# s = resize(grayspace(pychron), 640, 480)\n src = grayspace(src)\n v = crop(src, *roi)\n\n di = dict(var=lambda x:variance(x),\n laplace=lambda x: get_focus_measure(x, 'laplace'),\n sobel=lambda x: ndsum(generic_gradient_magnitude(x, sobel, mode='nearest'))\n )\n\n func = di[operator]\n return func(v)\n\n def image_view(self):\n v = View(Item('image', show_label=False, editor=ImageEditor(),\n width=640,\n height=480,\n style='custom'))\n return v\n\n def traits_view(self):\n v = View(\n HGroup(self._button_factory('autofocus_button', 'autofocus_label'),\n Item('configure_button', show_label=False),\n show_border=True,\n label='Autofocus'\n )\n )\n return v\n\n def configure_view(self):\n v = View(Item('parameters', style='custom', show_label=False),\n handler=ConfigureHandler,\n buttons=['OK', 'Cancel'],\n kind='livemodal',\n title='Configure Autofocus',\n x=0.80,\n y=0.05\n )\n return v\n\n def _load_source(self):\n src = self.video.get_frame()\n return src\n# if pychron:\n# return Image.new_frame(pychron)\n# self.image.load(pychron)\n\n# return self.image.source_frame\n\n def _get_roi(self):\n w = self.parameters.crop_width\n h = self.parameters.crop_height\n\n cx, cy = self.canvas.get_center_rect_position(w, h)\n\n\n# cw, ch = self.canvas.outer_bounds\n# print w, h, cw, ch\n# cx = cw / 2. - w / 2.\n# cy = ch / 2. - h / 2.\n# cx = (cw - w) / 2.\n# cy = (ch - h) / 2.\n# cx = (640 * self.canvas.scaling - w) / 2\n# cy = (480 * self.canvas.scaling - h) / 2\n roi = cx, cy, w, h\n\n return roi\n\n def _add_focus_area_rect(self, cx, cy, w, h):\n# pl = self.canvas.padding_left\n# pb = self.canvas.padding_bottom\n\n self.canvas.remove_item('croprect')\n self.canvas.add_markup_rect(cx, cy, w, h, identifier='croprect')\n\n def _autofocus_button_fired(self):\n if not self.autofocusing:\n self.autofocusing = True\n\n self.passive_focus()\n else:\n self.autofocusing = False\n self._evt_autofocusing.set()\n self.stop_focus()\n\n def _configure_button_fired(self):\n self._crop_rect_update()\n self.edit_traits(view='configure_view', kind='livemodal')\n\n self.canvas.remove_item('croprect')\n# try:\n# self.canvas.markupcontainer.pop('croprect')\n# except KeyError:\n# pass\n\n @on_trait_change('parameters:[_crop_width,_crop_height]')\n def _crop_rect_update(self):\n roi = self._get_roi()\n self._add_focus_area_rect(*roi)\n\n def _get_autofocus_label(self):\n return 'Autofocus' if not self.autofocusing else 'Stop'\n\n\n def _parameters_default(self):\n return self.load_parameter()\n\n def _autofocusing_changed(self, new):\n if not new:\n self.canvas.remove_item('croprect')\n# ===============================================================================\n# Deprecated\n# ===============================================================================\n# ============= EOF =====================================\n\n"
] | [
[
"numpy.array"
],
[
"numpy.argmin",
"numpy.asarray",
"numpy.argmax",
"numpy.random.random",
"scipy.ndimage.measurements.variance",
"scipy.ndimage.filters.generic_gradient_magnitude",
"numpy.linspace"
]
] |
awinawin1/prediksi | [
"b3d552555f775d7b6a1b22077146443fe09bbf5d"
] | [
"public/code/simpleCropPredictSpektogram.py"
] | [
"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat May 15 00:21:05 2021\n\n@author: marina\n\"\"\"\nimport os\nimport shutil\nimport pyedflib\nimport numpy as np\nimport pandas as pd\nimport sys\nimport mne \nfrom pywt import wavedec\nfrom sklearn.preprocessing import LabelEncoder\nimport matplotlib.pyplot as plt\nfrom scipy import signal\nfrom keras.models import Sequential\n #importing layers\nfrom keras.layers import Conv2D,Flatten,Dense,MaxPooling2D \nfrom tensorflow.keras.optimizers import SGD\n# pathDataSet = \"D:\\\\Kuliah\\Tugas Akhir\\chb-mit-scalp-eeg-database-1.0.0\\\\chb07\\\\\"\npathDataSet = \"/Applications/XAMPP/xamppfiles/htdocs/prediksi/storage/app/public/uploadedSpektogram/\"\npathSaveData = \"/Applications/XAMPP/xamppfiles/htdocs/prediksi/storage/app/public/uploadedSpektogram/spektogram/\"\n\n\ndef data_load(FILE, selected_channels=[]): \n fullNm = pathDataSet + FILE\n # fullNm = FILE\n f = pyedflib.EdfReader(fullNm )\n n = f.signals_in_file\n signal_labels = f.getSignalLabels()\n channel_freq = f.getSampleFrequencies()\n\n sigbufs = np.zeros((n, f.getNSamples()[0]))\n for i in np.arange(n):\n sigbufs[i, :] = f.readSignal(i)\n f.close()\n \n # and load the data into a DataFrame\n df_signals = pd.DataFrame(sigbufs)\n df_signals = df_signals.transpose()\n df_signals.columns = signal_labels\n df_signals = df_signals.loc[:,~df_signals.columns.duplicated()]\n df_signals = df_signals[selected_channels].astype('float32') \n return df_signals,channel_freq[0]\n\ndef mne_object(data, freq, events = None):\n info = mne.create_info(ch_names=list(data.columns), \n sfreq=freq, \n ch_types=['eeg']*data.shape[-1])\n data_T = data.transpose()\n raw = mne.io.RawArray(data_T, info,verbose=False)\n if events:\n start_times = np.array(events[::2])\n end_times = np.array(events[1::2])\n anno_length = end_times-start_times\n event_name = np.array(['Ictal']*len(anno_length))\n raw.set_annotations(mne.Annotations(start_times,\n anno_length,\n event_name))\n return raw\n\ndef loadAndFiltering(FILE,channel_keeps):\n raw_data, freq = data_load(FILE, channel_keeps)\n if len(raw_data) ==0:\n print(\"no data \")\n return raw_data\n mne_data = mne_object(raw_data, freq)\n raw=mne_data.copy()\n return raw\n\ndef extract_windows(array, start, max_time, sub_window_size,\n stride_size): \n sub_windows = (\n start + \n np.expand_dims(np.arange(sub_window_size), 0) +\n np.expand_dims(np.arange(max_time + 1- sub_window_size-start, step=stride_size), 0).T\n ) \n return array[:,sub_windows]\n\n\ndef Crop(raw): \n cropS = 3\n strides = 1\n \n tMin=0\n tMax=raw.get_data().shape[1]#18*256*cropS \n\n\n sub_window_size,stride_size = 256*cropS,256*strides\n cropData = extract_windows(raw.get_data(), tMin, tMax , sub_window_size,stride_size)\n cropData = cropData.reshape(cropData.shape[1],cropData.shape[0],cropData.shape[2])\n \n return cropData\n\n# def create_modelCNN(input_shape, num_class,flatten=False):\n# from tensorflow.keras.models import Sequential\n# from tensorflow.keras.layers import Dense\n# from tensorflow.keras.backend import clear_session\n# from tensorflow.keras.optimizers import Adam\n \n# from tensorflow.keras.layers import Conv1D#, Input\n# from tensorflow.keras.layers import MaxPooling1D\n# from tensorflow.keras.layers import GlobalAveragePooling1D#, GlobalMaxPooling1D\n# from keras.layers import Activation,Flatten, Dropout\n \n# clear_session()\n# model = Sequential()\n# def add_conv_block(model, num_filters, input_shape=None):\n# if input_shape:\n# model.add(Conv1D(num_filters, kernel_size=3, activation='relu', padding='same', input_shape=input_shape))\n# else:\n# model.add(Conv1D(num_filters, kernel_size=3, activation='relu', padding='same'))\n# return model\n# model = add_conv_block(model, 128, input_shape=input_shape[1:])\n# model = add_conv_block(model, 128)\n# model.add(Dropout(0.3)) \n# model.add(MaxPooling1D(pool_size=3, # size of the window\n# strides=2, # factor to downsample\n# padding='same'))\n# model.add(Dropout(0.1))\n# for i in range(2):\n# model.add(Conv1D(filters=256,kernel_size=3,padding=\"same\",activation='relu'))\n# model.add(Dropout(0.1))\n# if flatten:\n# model.add(Flatten())\n# else:\n# model.add(GlobalAveragePooling1D())\n# model.add(Dense(units=128,activation='relu'))\n# model.add(Dropout(0.1))\n# model.add(Dense(num_class))\n# model.add(Activation('softmax'))\n# model.compile(optimizer=Adam(0.0001), \n# loss='categorical_crossentropy', \n# metrics=['accuracy'])\n# return model\n\ndef modelCNN2(input_shape,nb_classes):\n model = Sequential()\n model.add(Conv2D(32, (3, 3), activation='relu', padding='same', input_shape=input_shape))\n model.add(Conv2D(32, (3, 3), activation='relu', padding='same'))\n model.add(MaxPooling2D((2, 2)))\n model.add(Conv2D(64, (3, 3), activation='relu', padding='same'))\n model.add(Conv2D(64, (3, 3), activation='relu', padding='same'))\n model.add(MaxPooling2D((2, 2)))\n model.add(Conv2D(128, (3, 3), activation='relu', padding='same'))\n model.add(Conv2D(128, (3, 3), activation='relu', padding='same'))\n model.add(MaxPooling2D((2, 2)))\n model.add(Flatten())\n model.add(Dense(128, activation='relu'))\n model.add(Dense(nb_classes, activation='softmax'))\n\t# compile model\n opt = SGD(lr=0.001, momentum=0.9)\n model.compile(optimizer=opt, loss='categorical_crossentropy', metrics=['accuracy'])\n return model\n\ndef plotSpektogram(x,fs,nmFile=''):\n f, t, Sxx = signal.spectrogram(x, fs)\n cut=10\n imgAll=[]\n for i,sinyal in enumerate(Sxx):\n img = plt.pcolormesh(t, f[:cut], sinyal[:cut], shading='gouraud')\n imgAll.append([(r, g, b) for r, g, b, a in img.to_rgba(img.get_array())])\n # print(nmFile)\n # if nmFile !='':\n #(18, 30, 3)\n # print(\"masuk sini\")\n # plt.savefig(nmFile)\n # plt.show()\n # plt.imsave(nmFile, imgAll)\n \n # imgAll = np.array(imgAll)# .reshape(-1,3)\n imgAll = np.array(imgAll).ravel()\n #(18, 30, 3)\n return imgAll \n \nif __name__ == '__main__':\n FILE=sys.argv[1]\n # FILE = 'D:\\\\Kuliah\\Tugas Akhir\\chb-mit-scalp-eeg-database-1.0.0\\\\chb24\\\\chb24_22.edf'\n # FILE = 'chb07_12.edf'\n FILE = FILE.replace(\"'\",\"\")\n dir_path = \"/Applications/XAMPP/xamppfiles/htdocs/prediksi/storage/app/public/fitur3Kelas30DetikImg/\"\n # if(os.path.isdir(dir_path+FILE)):\n # shutil.rmtree(dir_path+FILE)\n # os.mkdir(\"/Applications/XAMPP/xamppfiles/htdocs/prediksi/storage/app/public/fitur3Kelas30DetikImg/\"+FILE,0o777)\n loaded = np.load(\"/Applications/XAMPP/xamppfiles/htdocs/prediksi/storage/app/public/spektogram/channel_keeps.npz\")\n selected_channels =loaded['channel_keeps'] \n segmen=[]\n raw = loadAndFiltering(FILE,selected_channels)\n \n cropData = Crop(raw) \n numCH = cropData[0].shape[0]\n oneData = cropData[0]\n oneData = plotSpektogram(oneData,256)\n \n oneData = oneData.reshape(1,numCH,-1, 3)\n KELAS = 3\n bntk_input = (18, 30, 3)\n model = modelCNN2(bntk_input,KELAS)\n # model = modelCNN2(oneData.shape,KELAS)#,False) \n nmModel = '/Applications/XAMPP/xamppfiles/htdocs/prediksi/storage/app/public/modelCNNSpektrogram_3.h5'\n\n model.load_weights(nmModel) \n cnt=0 \n \n for idx in range(cropData.shape[0]): \n numCH = cropData[idx].shape[0]\n oneData = cropData[idx]\n nmFile = \"/Applications/XAMPP/xamppfiles/htdocs/prediksi/storage/app/public/fitur3Kelas30DetikImg/%s/%s_%d.png\"%(FILE,FILE,idx)\n # nmFile = dir+\"%s_%s.png\"%(FILE,idx)\n oneData = plotSpektogram(oneData,256,nmFile)\n oneData = oneData.reshape(1,numCH,-1, 3)\n yPred = model.predict(oneData)\n yPred = np.argmax(yPred,axis=1)\n if yPred[0] == 0:\n hasil = \"Normal\"\n elif yPred[0] == 1:\n hasil = \"Inter\" \n else:\n hasil = \"Ictal\"\n # break\n segmen.append(hasil) \n # print(\"segment=%d prediksi=%s <br>\"%(idx,hasil))\n cnt+=1\n if cnt>1000:\n break\n saveHistory = open(pathSaveData+FILE+\".txt\",\"w\")\n saveHistory.write(str(segmen))\n saveHistory.close()\n print(segmen)\n \n \n \n \n\n"
] | [
[
"numpy.load",
"matplotlib.pyplot.pcolormesh",
"pandas.DataFrame",
"numpy.argmax",
"numpy.arange",
"scipy.signal.spectrogram",
"tensorflow.keras.optimizers.SGD",
"numpy.array"
]
] |
Aympab/BigDataHadoopSparkDaskCourse | [
"42f9e0475cbd7c5db240ccc6dc00c19b9006012a"
] | [
"TPs/TP4/test_flower.py"
] | [
"import pyspark\nfrom pyspark import SparkContext\nfrom pyspark.sql import Row\nfrom pyspark.sql import SQLContext\nfrom pyspark import SparkFiles\nimport os\nimport pandas as pd\n\nsc =SparkContext()\nsqlContext = SQLContext(sc)\n\n\ndata_dir=\"/work/irlin355_1/gratienj/ParallelProgrammingCourse/BigDataHadoopSpark/data\"\nfile = os.path.join(data_dir,\"iris.csv\")\npanda_df = pd.read_csv(file)\n\nsqlContext = SQLContext(sc)\n#df = sqlContext.read.csv(SparkFiles.get(\"iris.csv\"), header=True, inferSchema= True)\t\ndf=sqlContext.createDataFrame(panda_df)\ndf.printSchema()\ndf.show(5, truncate = False)\ndf.select('petal_width','variety').show(5)\n\ndf.groupBy(\"variety\").count().sort(\"count\",ascending=True).show()\n\ndf.describe().show()\n"
] | [
[
"pandas.read_csv"
]
] |
owenshen24/acme | [
"71434dffd3449236f9b8aaf7a53ceab515e75a2a"
] | [
"acme/agents/actors_tf2_test.py"
] | [
"# python3\n# Copyright 2018 DeepMind Technologies Limited. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Tests for actors_tf2.\"\"\"\n\nfrom absl.testing import absltest\n\nfrom acme import environment_loop\nfrom acme import specs\nfrom acme.agents import actors_tf2\nfrom acme.testing import fakes\n\nimport dm_env\nimport numpy as np\nimport sonnet as snt\nimport tensorflow as tf\n\n\ndef _make_fake_env() -> dm_env.Environment:\n env_spec = specs.EnvironmentSpec(\n observations=specs.Array(shape=(10, 5), dtype=np.float32),\n actions=specs.DiscreteArray(num_values=3),\n rewards=specs.Array(shape=(), dtype=np.float32),\n discounts=specs.BoundedArray(\n shape=(), dtype=np.float32, minimum=0., maximum=1.),\n )\n return fakes.Environment(env_spec, episode_length=10)\n\n\nclass ActorTest(absltest.TestCase):\n\n def test_feedforward(self):\n environment = _make_fake_env()\n env_spec = specs.make_environment_spec(environment)\n\n network = snt.Sequential([\n snt.Flatten(),\n snt.Linear(env_spec.actions.num_values),\n lambda x: tf.argmax(x, axis=-1, output_type=env_spec.actions.dtype),\n ])\n\n actor = actors_tf2.FeedForwardActor(network)\n loop = environment_loop.EnvironmentLoop(environment, actor)\n loop.run(20)\n\n def test_recurrent(self):\n environment = _make_fake_env()\n env_spec = specs.make_environment_spec(environment)\n\n network = snt.DeepRNN([\n snt.Flatten(),\n snt.Linear(env_spec.actions.num_values),\n lambda x: tf.argmax(x, axis=-1, output_type=env_spec.actions.dtype),\n ])\n\n actor = actors_tf2.RecurrentActor(network)\n loop = environment_loop.EnvironmentLoop(environment, actor)\n loop.run(20)\n\n\nif __name__ == '__main__':\n absltest.main()\n"
] | [
[
"tensorflow.argmax"
]
] |
prasadph/ga-learner-dsmp-repo | [
"ac1cc9d96250718f2842592e643c885d54ab2903"
] | [
"NLP/code.py"
] | [
"# --------------\n# import packages\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\nimport seaborn as sns\nimport re\nfrom nltk.corpus import stopwords\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer\nfrom sklearn.naive_bayes import MultinomialNB\nfrom sklearn.multiclass import OneVsRestClassifier\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.metrics import accuracy_score ,confusion_matrix\n\n\n# Code starts here\n\n# load data\nnews = pd.read_csv(path)\n\n# subset data\nnews = news[[\"TITLE\",\"CATEGORY\"]]\n# distribution of classes\ndist = news.CATEGORY.value_counts()\n\n# display class distribution\nprint(dist)\n\n# display data\nprint(news.head())\n\n# Code ends here\n\n\n# --------------\n# Code starts here\n\n# stopwords \n\nstop = set(stopwords.words('english'))\n# retain only alphabets\nnews.TITLE = news.TITLE.apply(lambda x:re.sub(\"[^a-zA-Z]\", \" \",x))\n\n# convert to lowercase and tokenize\nnews.TITLE = news.TITLE.apply(lambda row:row.lower().split())\n\n# remove stopwords\nnews.TITLE = news.TITLE.apply(lambda row:[i for i in row if i not in stop] )\n\n# join list elements\nnews.TITLE = news.TITLE.apply(lambda x: ' '.join(x))\n\n# split into training and test sets\nX_train, X_test, y_train, y_test = train_test_split(news['TITLE'], news['CATEGORY'], test_size=0.2, random_state=3)\n\n# Code ends here\n\n\n# --------------\n# Code starts here\n\n# initialize count vectorizer\ncount_vectorizer = CountVectorizer()\n# initialize tfidf vectorizer\ntfidf_vectorizer = TfidfVectorizer(ngram_range=(1,3))\n# fit and transform with count vectorizer\nX_train_count = count_vectorizer.fit_transform(X_train)\nX_test_count = count_vectorizer.transform(X_test)\n\n# fit and transform with tfidf vectorizer\nX_train_tfidf = tfidf_vectorizer.fit_transform(X_train)\nX_test_tfidf = tfidf_vectorizer.transform(X_test)\n\n# Code ends here\n\n\n# --------------\n# Code starts here\n\n# initialize multinomial naive bayes\nnb_1 = MultinomialNB()\nnb_2 = MultinomialNB() \n# fit on count vectorizer training data\nnb_1.fit(X_train_count, y_train)\n# fit on tfidf vectorizer training data\nnb_2.fit(X_train_tfidf, y_train)\n\n# accuracy with count vectorizer\nacc_count_nb = accuracy_score(nb_1.predict(X_test_count), y_test)\n\n# accuracy with tfidf vectorizer\nacc_tfidf_nb = accuracy_score(nb_2.predict(X_test_tfidf), y_test)\n\n# display accuracies\nprint(acc_count_nb)\nprint(acc_tfidf_nb)\n\n# Code ends here\n\n\n# --------------\nimport warnings\nwarnings.filterwarnings('ignore')\n\n# initialize logistic regression\nlogreg_1 = OneVsRestClassifier(LogisticRegression(random_state=10))\nlogreg_2 = OneVsRestClassifier(LogisticRegression(random_state=10))\n# fit on count vectorizer training data\nlogreg_1.fit(X_train_count, y_train)\n\n# fit on tfidf vectorizer training data\nlogreg_2.fit(X_train_tfidf, y_train)\n\n# accuracy with count vectorizer\nacc_count_logreg = accuracy_score(logreg_1.predict(X_test_count), y_test)\n# accuracy with tfidf vectorizer\nacc_tfidf_logreg = accuracy_score(logreg_2.predict(X_test_tfidf), y_test)\n\n# display accuracies\nprint(acc_count_logreg)\nprint(acc_tfidf_logreg)\n# Code ends here\n\n\n"
] | [
[
"sklearn.feature_extraction.text.CountVectorizer",
"pandas.read_csv",
"sklearn.naive_bayes.MultinomialNB",
"sklearn.feature_extraction.text.TfidfVectorizer",
"sklearn.linear_model.LogisticRegression",
"sklearn.model_selection.train_test_split"
]
] |
okdshin/onnx | [
"27b40225ea98f6412ae2879ed67211d49564af2a"
] | [
"onnx/backend/test/case/node/xor.py"
] | [
"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport numpy as np\n\nimport onnx\nfrom ..base import Base\nfrom . import expect\n\n\nclass Xor(Base):\n\n @staticmethod\n def export():\n node = onnx.helper.make_node(\n 'Xor',\n inputs=['x', 'y'],\n outputs=['xor'],\n )\n\n # 2d\n x = (np.random.randn(3, 4) > 0).astype(np.bool)\n y = (np.random.randn(3, 4) > 0).astype(np.bool)\n z = np.logical_xor(x, y)\n expect(node, inputs=[x, y], outputs=[z],\n name='test_xor2d')\n\n # 3d\n x = (np.random.randn(3, 4, 5) > 0).astype(np.bool)\n y = (np.random.randn(3, 4, 5) > 0).astype(np.bool)\n z = np.logical_xor(x, y)\n expect(node, inputs=[x, y], outputs=[z],\n name='test_xor3d')\n\n # 4d\n x = (np.random.randn(3, 4, 5, 6) > 0).astype(np.bool)\n y = (np.random.randn(3, 4, 5, 6) > 0).astype(np.bool)\n z = np.logical_xor(x, y)\n expect(node, inputs=[x, y], outputs=[z],\n name='test_xor4d')\n\n @staticmethod\n def export_xor_broadcast():\n node = onnx.helper.make_node(\n 'Xor',\n inputs=['x', 'y'],\n outputs=['xor'],\n broadcast=1,\n )\n\n #3d vs 1d\n x = (np.random.randn(3, 4, 5) > 0).astype(np.bool)\n y = (np.random.randn(5) > 0).astype(np.bool)\n z = np.logical_xor(x, y)\n expect(node, inputs=[x, y], outputs=[z],\n name='test_xor_bcast3v1d')\n\n #3d vs 2d\n x = (np.random.randn(3, 4, 5) > 0).astype(np.bool)\n y = (np.random.randn(4, 5) > 0).astype(np.bool)\n z = np.logical_xor(x, y)\n expect(node, inputs=[x, y], outputs=[z],\n name='test_xor_bcast3v2d')\n\n #4d vs 2d\n x = (np.random.randn(3, 4, 5, 6) > 0).astype(np.bool)\n y = (np.random.randn(5, 6) > 0).astype(np.bool)\n z = np.logical_xor(x, y)\n expect(node, inputs=[x, y], outputs=[z],\n name='test_xor_bcast4v2d')\n\n #4d vs 3d\n x = (np.random.randn(3, 4, 5, 6) > 0).astype(np.bool)\n y = (np.random.randn(4, 5, 6) > 0).astype(np.bool)\n z = np.logical_xor(x, y)\n expect(node, inputs=[x, y], outputs=[z],\n name='test_xor_bcast4v3d')\n\n @staticmethod\n def export_xor_axis():\n x = (np.random.randn(5, 5, 5, 5) > 0).astype(np.bool)\n y = (np.random.randn(5) > 0).astype(np.bool)\n\n node = onnx.helper.make_node(\n 'Xor',\n inputs=['x', 'y'],\n outputs=['xor'],\n broadcast=1,\n axis=0,\n )\n\n z = np.logical_xor(x, y[:, np.newaxis, np.newaxis, np.newaxis])\n expect(node, inputs=[x, y], outputs=[z],\n name='test_xor_axis0')\n\n node = onnx.helper.make_node(\n 'Xor',\n inputs=['x', 'y'],\n outputs=['xor'],\n broadcast=1,\n axis=1,\n )\n\n z = np.logical_xor(x, y[:, np.newaxis, np.newaxis,])\n expect(node, inputs=[x, y], outputs=[z],\n name='test_xor_axis1')\n\n node = onnx.helper.make_node(\n 'Xor',\n inputs=['x', 'y'],\n outputs=['xor'],\n broadcast=1,\n axis=2,\n )\n\n z = np.logical_xor(x, y[:, np.newaxis,])\n expect(node, inputs=[x, y], outputs=[z],\n name='test_xor_axis2')\n\n node = onnx.helper.make_node(\n 'Xor',\n inputs=['x', 'y'],\n outputs=['xor'],\n broadcast=1,\n axis=3,\n )\n\n z = np.logical_xor(x, y)\n expect(node, inputs=[x, y], outputs=[z],\n name='test_xor_axis3')"
] | [
[
"numpy.logical_xor",
"numpy.random.randn"
]
] |
GAA-UAM/scikit-fda | [
"a9953a3104195ce9796397d094b17b1b90fd090f"
] | [
"skfda/_utils/_utils.py"
] | [
"\"\"\"Module with generic methods.\"\"\"\n\nfrom __future__ import annotations\n\nimport functools\nimport numbers\nfrom typing import (\n TYPE_CHECKING,\n Any,\n Callable,\n Iterable,\n List,\n Optional,\n Sequence,\n Tuple,\n TypeVar,\n Union,\n cast,\n overload,\n)\n\nimport numpy as np\nimport scipy.integrate\nfrom numpy import ndarray\nfrom pandas.api.indexers import check_array_indexer\nfrom sklearn.base import clone\nfrom sklearn.preprocessing import LabelEncoder\nfrom sklearn.utils.multiclass import check_classification_targets\nfrom typing_extensions import Literal, Protocol\n\nfrom ..representation._typing import (\n ArrayLike,\n DomainRange,\n DomainRangeLike,\n GridPoints,\n GridPointsLike,\n)\nfrom ..representation.extrapolation import ExtrapolationLike\n\nRandomStateLike = Optional[Union[int, np.random.RandomState]]\n\nif TYPE_CHECKING:\n from ..exploratory.depth import Depth\n from ..representation import FData, FDataGrid\n from ..representation.basis import Basis\n T = TypeVar(\"T\", bound=FData)\n\n\ndef check_is_univariate(fd: FData) -> None:\n \"\"\"Check if an FData is univariate and raises an error.\n\n Args:\n fd: Functional object to check if is univariate.\n\n Raises:\n ValueError: If it is not univariate, i.e., `fd.dim_domain != 1` or\n `fd.dim_codomain != 1`.\n\n \"\"\"\n if fd.dim_domain != 1 or fd.dim_codomain != 1:\n domain_str = (\n \"\" if fd.dim_domain == 1\n else f\"(currently is {fd.dim_domain}) \"\n )\n\n codomain_str = (\n \"\" if fd.dim_codomain == 1\n else f\"(currently is {fd.dim_codomain})\"\n )\n\n raise ValueError(\n f\"The functional data must be univariate, i.e., \"\n f\"with dim_domain=1 {domain_str}\"\n f\"and dim_codomain=1 {codomain_str}\",\n )\n\n\ndef _check_compatible_fdata(fdata1: FData, fdata2: FData) -> None:\n \"\"\"Check that fdata is compatible.\"\"\"\n if (fdata1.dim_domain != fdata2.dim_domain):\n raise ValueError(\n f\"Functional data has incompatible domain dimensions: \"\n f\"{fdata1.dim_domain} != {fdata2.dim_domain}\",\n )\n\n if (fdata1.dim_codomain != fdata2.dim_codomain):\n raise ValueError(\n f\"Functional data has incompatible codomain dimensions: \"\n f\"{fdata1.dim_codomain} != {fdata2.dim_codomain}\",\n )\n\n\ndef _to_grid(\n X: FData,\n y: FData,\n eval_points: Optional[np.ndarray] = None,\n) -> Tuple[FDataGrid, FDataGrid]:\n \"\"\"Transform a pair of FDatas in grids to perform calculations.\"\"\"\n from .. import FDataGrid\n x_is_grid = isinstance(X, FDataGrid)\n y_is_grid = isinstance(y, FDataGrid)\n\n if eval_points is not None:\n X = X.to_grid(eval_points)\n y = y.to_grid(eval_points)\n elif x_is_grid and not y_is_grid:\n y = y.to_grid(X.grid_points[0])\n elif not x_is_grid and y_is_grid:\n X = X.to_grid(y.grid_points[0])\n elif not x_is_grid and not y_is_grid:\n X = X.to_grid()\n y = y.to_grid()\n\n return X, y\n\n\ndef _to_grid_points(grid_points_like: GridPointsLike) -> GridPoints:\n \"\"\"Convert to grid points.\n\n If the original list is one-dimensional (e.g. [1, 2, 3]), return list to\n array (in this case [array([1, 2, 3])]).\n\n If the original list is two-dimensional (e.g. [[1, 2, 3], [4, 5]]), return\n a list containing other one-dimensional arrays (in this case\n [array([1, 2, 3]), array([4, 5])]).\n\n In any other case the behaviour is unespecified.\n\n \"\"\"\n unidimensional = False\n\n if not isinstance(grid_points_like, Iterable):\n grid_points_like = [grid_points_like]\n\n if not isinstance(grid_points_like[0], Iterable):\n unidimensional = True\n\n if unidimensional:\n return (_int_to_real(np.asarray(grid_points_like)),)\n\n return tuple(_int_to_real(np.asarray(i)) for i in grid_points_like)\n\n\ndef _to_domain_range(sequence: DomainRangeLike) -> DomainRange:\n \"\"\"Convert sequence to a proper domain range.\"\"\"\n seq_aux = cast(\n Sequence[Sequence[float]],\n (sequence,) if isinstance(sequence[0], numbers.Real) else sequence,\n )\n\n tuple_aux = tuple(tuple(s) for s in seq_aux)\n\n if not all(len(s) == 2 and s[0] <= s[1] for s in tuple_aux):\n raise ValueError(\n \"Domain intervals should have 2 bounds for \"\n \"dimension: (lower, upper).\",\n )\n\n return cast(DomainRange, tuple_aux)\n\n\ndef _to_array_maybe_ragged(\n array: Iterable[ArrayLike],\n *,\n row_shape: Optional[Sequence[int]] = None,\n) -> np.ndarray:\n \"\"\"\n Convert to an array where each element may or may not be of equal length.\n\n If each element is of equal length the array is multidimensional.\n Otherwise it is a ragged array.\n\n \"\"\"\n def convert_row(row: ArrayLike) -> np.ndarray:\n r = np.array(row)\n\n if row_shape is not None:\n r = r.reshape(row_shape)\n\n return r\n\n array_list = [convert_row(a) for a in array]\n shapes = [a.shape for a in array_list]\n\n if all(s == shapes[0] for s in shapes):\n return np.array(array_list)\n\n res = np.empty(len(array_list), dtype=np.object_)\n\n for i, a in enumerate(array_list):\n res[i] = a\n\n return res\n\n\n@overload\ndef _cartesian_product(\n axes: Sequence[np.ndarray],\n *,\n flatten: bool = True,\n return_shape: Literal[False] = False,\n) -> np.ndarray:\n pass\n\n\n@overload\ndef _cartesian_product(\n axes: Sequence[np.ndarray],\n *,\n flatten: bool = True,\n return_shape: Literal[True],\n) -> Tuple[np.ndarray, Tuple[int, ...]]:\n pass\n\n\ndef _cartesian_product( # noqa: WPS234\n axes: Sequence[np.ndarray],\n *,\n flatten: bool = True,\n return_shape: bool = False,\n) -> Union[np.ndarray, Tuple[np.ndarray, Tuple[int, ...]]]:\n \"\"\"\n Compute the cartesian product of the axes.\n\n Computes the cartesian product of the axes and returns a numpy array of\n 1 dimension with all the possible combinations, for an arbitrary number of\n dimensions.\n\n Args:\n axes: List with axes.\n flatten: Whether to return the flatten array or keep one dimension per\n axis.\n return_shape: If ``True`` return the shape of the array before\n flattening.\n\n Returns:\n Numpy 2-D array with all the possible combinations.\n The entry (i,j) represent the j-th coordinate of the i-th point.\n If ``return_shape`` is ``True`` returns also the shape of the array\n before flattening.\n\n Examples:\n >>> from skfda._utils import _cartesian_product\n >>> axes = [[0,1],[2,3]]\n >>> _cartesian_product(axes)\n array([[0, 2],\n [0, 3],\n [1, 2],\n [1, 3]])\n\n >>> axes = [[0,1],[2,3],[4]]\n >>> _cartesian_product(axes)\n array([[0, 2, 4],\n [0, 3, 4],\n [1, 2, 4],\n [1, 3, 4]])\n\n >>> axes = [[0,1]]\n >>> _cartesian_product(axes)\n array([[0],\n [1]])\n \"\"\"\n cartesian = np.stack(np.meshgrid(*axes, indexing='ij'), -1)\n\n shape = cartesian.shape\n\n if flatten:\n cartesian = cartesian.reshape(-1, len(axes))\n\n if return_shape:\n return cartesian, shape\n\n return cartesian\n\n\ndef _same_domain(fd: Union[Basis, FData], fd2: Union[Basis, FData]) -> bool:\n \"\"\"Check if the domain range of two objects is the same.\"\"\"\n return np.array_equal(fd.domain_range, fd2.domain_range)\n\n\n@overload\ndef _reshape_eval_points(\n eval_points: ArrayLike,\n *,\n aligned: Literal[True],\n n_samples: int,\n dim_domain: int,\n) -> np.ndarray:\n pass\n\n\n@overload\ndef _reshape_eval_points(\n eval_points: Sequence[ArrayLike],\n *,\n aligned: Literal[True],\n n_samples: int,\n dim_domain: int,\n) -> np.ndarray:\n pass\n\n\n@overload\ndef _reshape_eval_points(\n eval_points: Union[ArrayLike, Sequence[ArrayLike]],\n *,\n aligned: bool,\n n_samples: int,\n dim_domain: int,\n) -> np.ndarray:\n pass\n\n\ndef _reshape_eval_points(\n eval_points: Union[ArrayLike, Iterable[ArrayLike]],\n *,\n aligned: bool,\n n_samples: int,\n dim_domain: int,\n) -> np.ndarray:\n \"\"\"Convert and reshape the eval_points to ndarray.\n\n Args:\n eval_points: Evaluation points to be reshaped.\n aligned: Boolean flag. True if all the samples\n will be evaluated at the same evaluation_points.\n n_samples: Number of observations.\n dim_domain: Dimension of the domain.\n\n Returns:\n Numpy array with the eval_points, if\n evaluation_aligned is True with shape `number of evaluation points`\n x `dim_domain`. If the points are not aligned the shape of the\n points will be `n_samples` x `number of evaluation points`\n x `dim_domain`.\n\n \"\"\"\n if aligned:\n eval_points = np.asarray(eval_points)\n else:\n eval_points = cast(Iterable[ArrayLike], eval_points)\n\n eval_points = _to_array_maybe_ragged(\n eval_points,\n row_shape=(-1, dim_domain),\n )\n\n # Case evaluation of a single value, i.e., f(0)\n # Only allowed for aligned evaluation\n if aligned and (\n eval_points.shape == (dim_domain,)\n or (eval_points.ndim == 0 and dim_domain == 1)\n ):\n eval_points = np.array([eval_points])\n\n if aligned: # Samples evaluated at same eval points\n eval_points = eval_points.reshape(\n (eval_points.shape[0], dim_domain),\n )\n\n else: # Different eval_points for each sample\n\n if eval_points.shape[0] != n_samples:\n raise ValueError(\n f\"eval_points should be a list \"\n f\"of length {n_samples} with the \"\n f\"evaluation points for each sample.\",\n )\n\n return eval_points\n\n\ndef _one_grid_to_points(\n axes: GridPointsLike,\n *,\n dim_domain: int,\n) -> Tuple[np.ndarray, Tuple[int, ...]]:\n \"\"\"\n Convert a list of ndarrays, one per domain dimension, in the points.\n\n Returns also the shape containing the information of how each point\n is formed.\n \"\"\"\n axes = _to_grid_points(axes)\n\n if len(axes) != dim_domain:\n raise ValueError(\n f\"Length of axes should be {dim_domain}\",\n )\n\n cartesian, shape = _cartesian_product(axes, return_shape=True)\n\n # Drop domain size dimension, as it is not needed to reshape the output\n shape = shape[:-1]\n\n return cartesian, shape\n\n\nclass EvaluateMethod(Protocol):\n \"\"\"Evaluation method.\"\"\"\n\n def __call__(\n self,\n __eval_points: np.ndarray, # noqa: WPS112\n extrapolation: Optional[ExtrapolationLike],\n aligned: bool,\n ) -> np.ndarray:\n \"\"\"Evaluate a function.\"\"\"\n pass\n\n\n@overload\ndef _evaluate_grid(\n axes: GridPointsLike,\n *,\n evaluate_method: EvaluateMethod,\n n_samples: int,\n dim_domain: int,\n dim_codomain: int,\n extrapolation: Optional[ExtrapolationLike] = None,\n aligned: Literal[True] = True,\n) -> np.ndarray:\n pass\n\n\n@overload\ndef _evaluate_grid(\n axes: Iterable[GridPointsLike],\n *,\n evaluate_method: EvaluateMethod,\n n_samples: int,\n dim_domain: int,\n dim_codomain: int,\n extrapolation: Optional[ExtrapolationLike] = None,\n aligned: Literal[False],\n) -> np.ndarray:\n pass\n\n\ndef _evaluate_grid( # noqa: WPS234\n axes: Union[GridPointsLike, Iterable[GridPointsLike]],\n *,\n evaluate_method: EvaluateMethod,\n n_samples: int,\n dim_domain: int,\n dim_codomain: int,\n extrapolation: Optional[ExtrapolationLike] = None,\n aligned: bool = True,\n) -> np.ndarray:\n \"\"\"\n Evaluate the functional object in the cartesian grid.\n\n This method is called internally by :meth:`evaluate` when the argument\n `grid` is True.\n\n Evaluates the functional object in the grid generated by the cartesian\n product of the axes. The length of the list of axes should be equal\n than the domain dimension of the object.\n\n If the list of axes has lengths :math:`n_1, n_2, ..., n_m`, where\n :math:`m` is equal than the dimension of the domain, the result of the\n evaluation in the grid will be a matrix with :math:`m+1` dimensions and\n shape :math:`n_{samples} x n_1 x n_2 x ... x n_m`.\n\n If `aligned` is false each sample is evaluated in a\n different grid, and the list of axes should contain a list of axes for\n each sample.\n\n If the domain dimension is 1, the result of the behaviour of the\n evaluation will be the same than :meth:`evaluate` without the grid\n option, but with worst performance.\n\n Args:\n axes: List of axes to generated the grid where the\n object will be evaluated.\n evaluate_method: Function used to evaluate the functional object.\n n_samples: Number of samples.\n dim_domain: Domain dimension.\n dim_codomain: Codomain dimension.\n extrapolation: Controls the\n extrapolation mode for elements outside the domain range. By\n default it is used the mode defined during the instance of the\n object.\n aligned: If False evaluates each sample\n in a different grid.\n evaluate_method: method to use to evaluate the points\n n_samples: number of samples\n dim_domain: dimension of the domain\n dim_codomain: dimensions of the codomain\n\n Returns:\n Numpy array with dim_domain + 1 dimensions with\n the result of the evaluation.\n\n Raises:\n ValueError: If there are a different number of axes than the domain\n dimension.\n\n \"\"\"\n # Compute intersection points and resulting shapes\n if aligned:\n\n axes = cast(GridPointsLike, axes)\n\n eval_points, shape = _one_grid_to_points(axes, dim_domain=dim_domain)\n\n else:\n\n axes_per_sample = cast(Iterable[GridPointsLike], axes)\n\n axes_per_sample = list(axes_per_sample)\n\n eval_points_tuple, shape_tuple = zip(\n *[\n _one_grid_to_points(a, dim_domain=dim_domain)\n for a in axes_per_sample\n ],\n )\n\n if len(eval_points_tuple) != n_samples:\n raise ValueError(\n \"Should be provided a list of axis per sample\",\n )\n\n eval_points = _to_array_maybe_ragged(eval_points_tuple)\n\n # Evaluate the points\n evaluated = evaluate_method(\n eval_points,\n extrapolation=extrapolation,\n aligned=aligned,\n )\n\n # Reshape the result\n if aligned:\n\n res = evaluated.reshape(\n [n_samples] + list(shape) + [dim_codomain],\n )\n\n else:\n\n res = _to_array_maybe_ragged([\n r.reshape(list(s) + [dim_codomain])\n for r, s in zip(evaluated, shape_tuple)\n ])\n\n return res\n\n\ndef nquad_vec(\n func: Callable[[np.ndarray], np.ndarray],\n ranges: Sequence[Tuple[float, float]],\n) -> np.ndarray:\n \"\"\"Perform multiple integration of vector valued functions.\"\"\"\n initial_depth = len(ranges) - 1\n\n def integrate(*args: Any, depth: int) -> np.ndarray: # noqa: WPS430\n\n if depth == 0:\n f = functools.partial(func, *args)\n else:\n f = functools.partial(integrate, *args, depth=depth - 1)\n\n return scipy.integrate.quad_vec(f, *ranges[initial_depth - depth])[0]\n\n return integrate(depth=initial_depth)\n\n\ndef _map_in_batches(\n function: Callable[..., np.ndarray],\n arguments: Tuple[Union[FData, np.ndarray], ...],\n indexes: Tuple[np.ndarray, ...],\n memory_per_batch: Optional[int] = None,\n **kwargs: Any,\n) -> np.ndarray:\n \"\"\"\n Map a function over samples of FData or ndarray tuples efficiently.\n\n This function prevents a large set of indexes to use all available\n memory and hang the PC.\n\n \"\"\"\n if memory_per_batch is None:\n # 256MB is not too big\n memory_per_batch = 256 * 1024 * 1024 # noqa: WPS432\n\n memory_per_element = sum(a.nbytes // len(a) for a in arguments)\n n_elements_per_batch_allowed = memory_per_batch // memory_per_element\n if n_elements_per_batch_allowed < 1:\n raise ValueError(\"Too few memory allowed for the operation\")\n\n n_indexes = len(indexes[0])\n\n assert all(n_indexes == len(i) for i in indexes)\n\n batches: List[np.ndarray] = []\n\n for pos in range(0, n_indexes, n_elements_per_batch_allowed):\n batch_args = tuple(\n a[i[pos:pos + n_elements_per_batch_allowed]]\n for a, i in zip(arguments, indexes)\n )\n\n batches.append(function(*batch_args, **kwargs))\n\n return np.concatenate(batches, axis=0)\n\n\ndef _pairwise_symmetric(\n function: Callable[..., np.ndarray],\n arg1: Union[FData, np.ndarray],\n arg2: Optional[Union[FData, np.ndarray]] = None,\n memory_per_batch: Optional[int] = None,\n **kwargs: Any,\n) -> np.ndarray:\n \"\"\"Compute pairwise a commutative function.\"\"\"\n dim1 = len(arg1)\n if arg2 is None or arg2 is arg1:\n indices = np.triu_indices(dim1)\n\n matrix = np.empty((dim1, dim1))\n\n triang_vec = _map_in_batches(\n function,\n (arg1, arg1),\n indices,\n memory_per_batch=memory_per_batch,\n **kwargs,\n )\n\n # Set upper matrix\n matrix[indices] = triang_vec\n\n # Set lower matrix\n matrix[(indices[1], indices[0])] = triang_vec\n\n return matrix\n\n dim2 = len(arg2)\n indices = np.indices((dim1, dim2))\n\n vec = _map_in_batches(\n function,\n (arg1, arg2),\n (indices[0].ravel(), indices[1].ravel()),\n memory_per_batch=memory_per_batch,\n **kwargs,\n )\n\n return vec.reshape((dim1, dim2))\n\n\ndef _int_to_real(array: np.ndarray) -> np.ndarray:\n \"\"\"Convert integer arrays to floating point.\"\"\"\n return array + 0.0\n\n\ndef _check_array_key(array: np.ndarray, key: Any) -> Any:\n \"\"\"Check a getitem key.\"\"\"\n key = check_array_indexer(array, key)\n if isinstance(key, tuple):\n non_ellipsis = [i for i in key if i is not Ellipsis]\n if len(non_ellipsis) > 1:\n raise KeyError(key)\n key = non_ellipsis[0]\n\n if isinstance(key, numbers.Integral): # To accept also numpy ints\n key = int(key)\n key = range(len(array))[key]\n\n return slice(key, key + 1)\n\n return key\n\n\ndef _check_estimator(estimator):\n from sklearn.utils.estimator_checks import (\n check_get_params_invariance,\n check_set_params,\n )\n\n name = estimator.__name__\n instance = estimator()\n check_get_params_invariance(name, instance)\n check_set_params(name, instance)\n\n\ndef _classifier_get_classes(y: ndarray) -> Tuple[ndarray, ndarray]:\n\n check_classification_targets(y)\n\n le = LabelEncoder()\n y_ind = le.fit_transform(y)\n\n classes = le.classes_\n\n if classes.size < 2:\n raise ValueError(\n f'The number of classes has to be greater than'\n f'one; got {classes.size} class',\n )\n return classes, y_ind\n\n\ndef _classifier_get_depth_methods(\n classes: ndarray,\n X: T,\n y_ind: ndarray,\n depth_methods: Sequence[Depth[T]],\n) -> Sequence[Depth[T]]:\n return [\n clone(depth_method).fit(X[y_ind == cur_class])\n for cur_class in range(classes.size)\n for depth_method in depth_methods\n ]\n\n\ndef _classifier_fit_depth_methods(\n X: T,\n y: ndarray,\n depth_methods: Sequence[Depth[T]],\n) -> Tuple[ndarray, Sequence[Depth[T]]]:\n classes, y_ind = _classifier_get_classes(y)\n\n class_depth_methods_ = _classifier_get_depth_methods(\n classes, X, y_ind, depth_methods,\n )\n\n return classes, class_depth_methods_\n\n\n_DependenceMeasure = Callable[[np.ndarray, np.ndarray], np.ndarray]\n\n\ndef _compute_dependence(\n X: np.ndarray,\n y: np.ndarray,\n *,\n dependence_measure: _DependenceMeasure,\n) -> np.ndarray:\n \"\"\"\n Compute dependence between points and target.\n\n Computes the dependence of each point in each trajectory in X with the\n corresponding class label in Y.\n\n \"\"\"\n from dcor import rowwise\n\n # Move n_samples to the end\n # The shape is now input_shape + n_samples + n_output\n X = np.moveaxis(X, 0, -2)\n\n input_shape = X.shape[:-2]\n\n # Join input in a list for rowwise\n X = X.reshape(-1, X.shape[-2], X.shape[-1])\n\n if y.ndim == 1:\n y = np.atleast_2d(y).T\n Y = np.array([y] * len(X))\n\n dependence_results = rowwise(dependence_measure, X, Y)\n\n return dependence_results.reshape(input_shape)\n"
] | [
[
"sklearn.utils.multiclass.check_classification_targets",
"numpy.meshgrid",
"numpy.empty",
"numpy.atleast_2d",
"sklearn.base.clone",
"sklearn.utils.estimator_checks.check_set_params",
"numpy.moveaxis",
"numpy.asarray",
"numpy.triu_indices",
"pandas.api.indexers.check_array_indexer",
"numpy.indices",
"sklearn.preprocessing.LabelEncoder",
"numpy.array_equal",
"numpy.array",
"numpy.concatenate",
"sklearn.utils.estimator_checks.check_get_params_invariance"
]
] |
tombackstrom/mdct | [
"f59e708f9a7f65ee672dbf44e6f164e79c82d83a"
] | [
"tests/test_windows.py"
] | [
"import pytest\nimport numpy\nimport mdct.windows\n\n\ndef test_kbd():\n M = 100\n w = mdct.windows.kaiser_derived(M, beta=4.)\n\n assert numpy.allclose(w[:M//2] ** 2 + w[-M//2:] ** 2, 1.)\n\n with pytest.raises(ValueError):\n mdct.windows.kaiser_derived(M + 1, beta=4.)\n\n assert numpy.allclose(\n mdct.windows.kaiser_derived(2, beta=numpy.pi/2)[:1],\n [numpy.sqrt(2)/2])\n\n assert numpy.allclose(\n mdct.windows.kaiser_derived(4, beta=numpy.pi/2)[:2],\n [0.518562710536, 0.855039598640])\n\n assert numpy.allclose(\n mdct.windows.kaiser_derived(6, beta=numpy.pi/2)[:3],\n [0.436168993154, 0.707106781187, 0.899864772847])\n"
] | [
[
"numpy.sqrt",
"numpy.allclose"
]
] |
dpetrini/nova | [
"00b7637901420f68c7d805c13ccd4c39d514efb1"
] | [
"trainer.py"
] | [
"from matplotlib.pyplot import show\nimport torch\nfrom torch.autograd import Variable\nfrom torch.cuda.amp import GradScaler, autocast\nimport numpy as np\nfrom sklearn.metrics import roc_auc_score\n\nfrom callbacks.cb_handler import CallbackHandler\nfrom callbacks.cb_base import BaseCB\nfrom callbacks.cb_lr_patch_clf import LR_SchedCB_patch\nfrom callbacks.cb_lr_full_clf import LR_SchedCB_full\nfrom callbacks.cb_lr_2views_clf import LR_SchedCB_2views\nfrom callbacks.cb_lr_w_cyc_cos import LR_SchedCB_W_Cyc_Cos\nfrom callbacks.cb_lr_w_cos import LR_SchedCB_W_Cos\nfrom callbacks.cb_auc import AUC_CB\n\n# from parallel import DataParallelModel, DataParallelCriterion\nfrom util.util import show_auc, calc_auc_desv\n\nparallel = False\n\n#APAGAR\nimport cv2\n\n# Accuracy\ndef acc(y_hat, labels):\n \"\"\" Default accuracy \"\"\"\n\n # para parallel\n if len(y_hat) > 1 and parallel:\n y_hat = torch.cat(y_hat)\n\n return (torch.argmax(y_hat, dim=1) == labels).float().sum()\n\n\nclass Trainer():\n \"\"\"\n Many possible configurations for Trainer\n config = {\n 'num_epochs': NUM_EPOCHS,\n 'batch_size': MINI_BATCH,\n 'name': 'example',\n 'title': 'Cats & Dogs Classifier',\n 'save_last': True, # optional: Save last model (default=False)\n 'save_best': True, # optional: Save best models (ACC, {AUC}) (default=True)\n 'stable_metric: N # optional: extend epochs number to wait N epochs with no metric change (ex.AUC)\n 'save_checkpoints': N, # Save checkpoint each N epochs\n 'features': ['auc'], # optional: features like auc stats or some scheduler (if none default:optim)\n 'save_path': folder, # if want to save artifacts in other place (eg.cloud)\n 'show_plots': False, # if want to show plots\n 'make_plots': False, # if want to disable plots\n 'cv_k': (number), # interactio number if using Cross Validation\n }\n \"\"\"\n\n def __init__(self, model, train_dataloader, val_dataloader,\n loss_criterion, optimizer, optimizer_args,\n device, config):\n self.model = model\n self.device = device\n self.loss_criterion = loss_criterion\n\n # parts of config are only retrieved in callbacks\n self.epochs = int(config['num_epochs']) if 'num_epochs' in config else 10\n self.mini_batch = int(config['batch_size']) if 'batch_size' in config else 1\n self.first_epoch = int(config['start_epoch']) if 'start_epoch' in config else 1\n self.stable_metric = int(config['stable_metric']) if 'stable_metric' in config else False\n self.name = config['name'] if 'name' in config else 'default'\n self.title = config['title'] if 'title' in config else 'Classifier'\n self.features = config['features'] if 'features' in config else []\n self.make_plots = config['make_plots'] if 'make_plots' in config else True\n\n if train_dataloader:\n self.train_dataloader = train_dataloader\n else:\n return\n\n self.train_dataloader = train_dataloader\n self.val_dataloader = val_dataloader\n self.optimizer = optimizer\n self.optimizer_args = optimizer_args\n\n print(self.title)\n\n # Load Callbacks for this session\n callbacks = [BaseCB(self.name, self.title, config)]\n for feat in self.features:\n if feat == 'auc':\n callbacks.append(AUC_CB(self.name, config))\n if feat == 'lr_step_full':\n callbacks.append(LR_SchedCB_full())\n if feat == 'lr_step_patch':\n callbacks.append(LR_SchedCB_patch())\n if feat == 'lr_step_2views':\n callbacks.append(LR_SchedCB_2views())\n if feat == 'lr_warmup_cos':\n callbacks.append(LR_SchedCB_W_Cos())\n if feat == 'lr_warmup_cyc_cos':\n callbacks.append(LR_SchedCB_W_Cyc_Cos())\n if feat == 'LR_SchedCB_W_Cos':\n callbacks.append(LR_SchedCB_W_Cos())\n self.cb = CallbackHandler(callbacks)\n\n\n def train_and_validate(self, **kwargs):\n \"\"\"\n Main train and validate function that runs main loop (fit).\n Receives all parameters and feed callback system.\n Loop through epochs and executes pytorch forward, loss,\n backpropagation and optimization (grads calc).\n Returns the model trained.\n \"\"\"\n\n calc_acc = kwargs.get('accuracy') if kwargs.get('accuracy') else acc\n input_dict = kwargs.get('input_dict') if kwargs.get('input_dict') else []\n\n if not self.cb.begin_train_val(self.epochs, self.model, self.train_dataloader,\n self.val_dataloader, self.mini_batch, self.optimizer):\n return\n\n self.cb.update_loss(self.loss_criterion, calc_acc)\n\n device = self.device\n\n for epoch in range(self.first_epoch, self.epochs+1):\n self.model.train()\n train_loss, train_acc = 0.0, 0.0\n val_loss, val_acc = 0.0, 0.0\n\n if not self.cb.begin_epoch(epoch): return # noqa: E701\n\n optim = self.cb.update_LR(epoch, self.model, self.optimizer, self.optimizer_args)\n if optim: self.optimizer = optim\n\n # Train loop\n for _, (inputs, labels) in enumerate(self.train_dataloader):\n\n if isinstance(inputs, dict):\n for key in input_dict:\n inputs[key] = inputs[key].to(device)\n else:\n inputs = Variable(inputs.to(device))\n\n labels = Variable(labels.to(device))\n\n # inserting MIXUP handling\n res = self.cb.begin_batch(inputs, labels)\n if res: inputs, labels, self.loss_criterion, calc_acc = res\n\n self.optimizer.zero_grad() # clean existing gradients\n outputs = self.model(inputs) # forward pass\n loss = self.loss_criterion(outputs, labels) # compute loss\n if parallel:\n loss = loss.mean() # list in this case\n loss.backward() # backprop the gradients\n self.optimizer.step() # update parameters\n train_loss += loss.item() * labels.size(0) # inputs.size(0) == mini_batch size\n train_acc += calc_acc(outputs, labels).item()\n\n self.cb.after_step(labels.size(0), labels, outputs)\n\n # validation - no gradient tracking needed\n with torch.no_grad():\n self.model.eval()\n self.cb.begin_val()\n\n # validation loop\n for _, (inputs, labels) in enumerate(self.val_dataloader):\n\n if isinstance(inputs, dict):\n for key in input_dict:\n inputs[key] = inputs[key].to(device)\n else:\n inputs = Variable(inputs.to(device))\n\n labels = Variable(labels.to(device))\n\n outputs = self.model(inputs) # forward pass\n loss = self.loss_criterion(outputs, labels) # compute loss\n if parallel:\n loss = loss.mean()\n val_loss += loss.item() * labels.size(0) # inputs.size(0) == mini_batch size\n val_acc += calc_acc(outputs, labels).item()\n\n self.cb.after_step_val(labels.size(0), labels, outputs)\n\n self.cb.after_epoch(self.model, train_acc, train_loss, val_acc, val_loss)\n\n self.cb.after_train_val()\n\n return self.model\n\n def train_and_validate_amp(self, **kwargs):\n \"\"\"\n Mixed precision (automatic) version for train_and_validate.\n Uses FP16 and FP32 in main loop with pytorch Automatic Mixed Precision.\n In simple tests: use 75% of memory in 66% of time. Less memory and faster.\n Sometimes it just don't work and get worse, like for resnest...\n \"\"\"\n\n assert torch.__version__ >= '1.6.0', \"[Mixed precision] Please use PyTorch 1.6.0+\"\n\n print('Using AMP')\n\n calc_acc = kwargs.get('accuracy') if kwargs.get('accuracy') else acc\n input_dict = kwargs.get('input_dict') if kwargs.get('input_dict') else []\n\n if not self.cb.begin_train_val(self.epochs, self.model, self.train_dataloader,\n self.val_dataloader, self.mini_batch, self.optimizer):\n return\n\n # Creates a GradScaler once at the beginning of training.\n scaler = GradScaler()\n\n device = self.device\n\n # for epoch in range(self.first_epoch, self.epochs+1):\n epoch = self.first_epoch # suport for \"wait N epochs after best metric\"\n last_epoch = self.epochs\n while epoch <= last_epoch:\n self.model.train()\n train_loss, train_acc = 0.0, 0.0\n val_loss, val_acc = 0.0, 0.0\n\n if not self.cb.begin_epoch(epoch): return # noqa: E701\n\n optim = self.cb.update_LR(epoch, self.model, self.optimizer, self.optimizer_args)\n if optim: self.optimizer = optim\n\n # Train loop\n for _, (inputs, labels) in enumerate(self.train_dataloader):\n\n if isinstance(inputs, dict):\n for key in input_dict:\n inputs[key] = inputs[key].to(device)\n else:\n inputs = Variable(inputs.to(device))\n\n labels = Variable(labels.to(device))\n\n self.optimizer.zero_grad() # clean existing gradients\n # Runs the forward pass with autocasting.\n with autocast():\n outputs = self.model(inputs) # forward pass\n loss = self.loss_criterion(outputs, labels) # compute loss\n if parallel:\n loss = loss.mean() # list in this case\n scaler.scale(loss).backward() # backward() on scaled loss for scaled gradients. \n scaler.step(self.optimizer) # update parameters\n scaler.update() # Updates the scale for next iteration.\n\n train_loss += loss.item() * labels.size(0) # == mini_batch size\n train_acc += calc_acc(outputs, labels).item()\n\n self.cb.after_step(labels.size(0), labels, outputs)\n\n # validation - no gradient tracking needed\n with torch.no_grad():\n self.model.eval()\n\n # validation loop\n for _, (inputs, labels) in enumerate(self.val_dataloader):\n\n if isinstance(inputs, dict):\n for key in input_dict:\n inputs[key] = inputs[key].to(device)\n else:\n inputs = Variable(inputs.to(device))\n\n labels = Variable(labels.to(device))\n\n outputs = self.model(inputs) # forward pass\n loss = self.loss_criterion(outputs, labels) # compute loss\n if parallel:\n loss = loss.mean()\n val_loss += loss.item() * labels.size(0) # == mini_batch size\n val_acc += calc_acc(outputs, labels).item()\n\n self.cb.after_step_val(labels.size(0), labels, outputs)\n\n self.cb.after_epoch(self.model, train_acc, train_loss, val_acc, val_loss)\n\n epoch += 1\n # print('-', self.cb.best_metric_epoch[self.cb.metric_name[-1]], last_epoch)\n # Is use stable metric - will stop training earlier, after \n # stable_metric epochs without validation metric (to be selected) improve\n # last_epoch = self.epochs if not self.stable_metric else max(self.epochs, self.cb.best_metric_epoch[self.cb.metric_name[-1]] + self.stable_metric)\n # for metric in self.cb.metric_name:\n # print(metric)\n last_epoch = self.epochs if not self.stable_metric else min(self.epochs, self.cb.best_metric_epoch[self.cb.metric_name[-1]] + self.stable_metric)\n\n self.cb.after_train_val()\n\n values = [self.cb.best_metric, self.cb.best_metric_epoch, self.cb.elapsed_mins, \n self.cb.metric_name, self.cb.loss_plot, self.cb.metric_plot, \n self.cb.best_model_file]\n\n return values\n\n\n def run_test(self, test_dataloader, model_type, **kwargs):\n \"\"\" Run test from test_dataloader according to model_type.\n if model_type = 'normal' : use last saved model\n if model_type = 'best' : use best model\n Uses: loss function from Trainer\n Input: test_dataloader\n \"\"\"\n calc_acc = kwargs.get('accuracy') if kwargs.get('accuracy') else acc\n quiet = kwargs.get('quiet') if kwargs.get('quiet') else False\n\n if model_type == 'normal':\n model = self.cb.last_model\n elif model_type == 'best':\n model = self.cb.best_model\n elif model_type == 'bootstrap':\n model = self.model\n\n test_acc, test_loss = 0., 0.\n batch_val_counter = 0\n device = self.device\n\n with torch.no_grad():\n model.eval()\n\n # validation loop\n for _, (inputs, labels) in enumerate(test_dataloader):\n\n if isinstance(inputs, dict):\n for key in ['CC', 'MLO']:\n inputs[key] = inputs[key].to(device)\n labels = Variable(labels.to(device))\n else:\n inputs = Variable(inputs.to(device))\n labels = Variable(labels.to(device))\n\n outputs = model(inputs) # forward pass\n loss = self.loss_criterion(outputs, labels) # compute loss\n if parallel:\n loss = loss.mean()\n test_loss += loss.item() * labels.size(0)\n test_acc += calc_acc(outputs, labels).item()\n\n batch_val_counter += labels.size(0)\n\n # Find average test loss and test accuracy\n avg_test_loss = test_loss/batch_val_counter\n avg_test_acc = test_acc/batch_val_counter\n\n if not quiet:\n print(f'Model: {model_type} - Test accuracy : {avg_test_acc:.5f}' +\n f' Test loss : {avg_test_loss:.5f}')\n\n return avg_test_acc \n\n\n def run_test_auc(self, test_dataloader, model_type, **kwargs):\n \"\"\" Run test from test_dataloader, calculating AUC and ROC curve\n According to model_type:\n if model_type = 'normal' : use last saved model\n if model_type = 'best' : use best model\n If we are running test iunference only can pass model through kwargs.\n Uses: loss function from Trainer\n Input: test_dataloader\n \"\"\"\n calc_acc = kwargs.get('accuracy') if kwargs.get('accuracy') else acc\n model = kwargs.get('model') if kwargs.get('model') else None\n show_results = kwargs.get('show_results') if kwargs.get('show_results') else False\n m_positive = kwargs.get('m') if kwargs.get('m') else False\n n_negative = kwargs.get('n') if kwargs.get('n') else False\n\n if model is None:\n if model_type == 'normal':\n model = self.cb.last_model\n elif model_type == 'best':\n model = self.cb.best_model\n elif model_type == 'test':\n model = self.model\n elif model_type == 'bootstrap':\n model = self.model\n\n test_acc, test_loss = 0., 0.\n batch_val_counter = 0\n y_hat_auc, label_auc = [], []\n device = self.device\n\n with torch.no_grad():\n model.eval()\n\n # validation loop\n for _, (inputs, labels) in enumerate(test_dataloader):\n if isinstance(inputs, dict):\n for key in ['CC', 'MLO']:\n inputs[key] = inputs[key].to(device)\n labels = Variable(labels.to(device))\n else:\n inputs = Variable(inputs.to(device))\n labels = Variable(labels.to(device))\n outputs = model(inputs) # forward pass\n loss = self.loss_criterion(outputs, labels) # compute loss\n test_loss += loss.item() * labels.size(0)\n\n # calculate acc\n test_acc += calc_acc(outputs, labels).item()\n batch_val_counter += labels.size(0)\n\n # Store auc for malignant\n label_auc = np.append(label_auc, labels.cpu().detach().numpy())\n y_hat_auc = np.append(y_hat_auc, torch.softmax(outputs, dim=1)[:, 1].cpu().detach().numpy())\n\n # enter show result mode\n if self.mini_batch == 1 and show_results:\n print(f'{labels.item()} {torch.softmax(outputs, dim=1)[:, 1].item():.3f}')\n\n # Find average test loss and test accuracy\n avg_test_loss = test_loss/batch_val_counter\n avg_test_acc = test_acc/batch_val_counter\n\n print(f\"Model: {model_type} - Test accuracy : {avg_test_acc:.3f}\" +\n f\" Test loss : {avg_test_loss:.4f}\", end='')\n\n # calculate AUC TEST\n auc_mal_val = roc_auc_score(label_auc.ravel(), y_hat_auc.ravel())\n # print(f' AUC Malignant: {auc_mal_val:.4f}', end='')\n if m_positive and n_negative:\n auc_final = f'{auc_mal_val:.4f}±{calc_auc_desv(m_positive, n_negative, auc_mal_val):.4f}'\n # print(f'±{calc_auc_desv(m_positive, n_negative, auc_mal_val):.4f}')\n print(f' AUC Malignant: {auc_final}')\n else:\n auc_final = f'{auc_mal_val:.4f}'\n print(f' AUC Malignant: {auc_final}')\n # print()\n\n if self.make_plots:\n show_auc(label_auc, y_hat_auc, self.title, show_plt=False)\n \n # return auc_mal_val\n return auc_final\n\n\n # Not fully tested yet (2021-05)\n # it seems to be working - maybe integrate in single function as above\n # and use kwargs to indicate that it is test-data- aug?\n def run_test_data_aug_auc(self, test_dataloader, model_type, **kwargs):\n \"\"\" Run test from test_dataloader, calculating AUC and ROC curve\n --> Using test-data augmentation: rotation 0°, 90°, 180°, 270°\n --> All rotated sample will be infered and AUC will consider all.\n According to model_type:\n if model_type = 'normal' : use last saved model\n if model_type = 'best' : use best model\n If we are running test iunference only can pass model through kwargs.\n Uses: loss function from Trainer\n Input: test_dataloader\n \"\"\"\n calc_acc = kwargs.get('accuracy') if kwargs.get('accuracy') else acc\n model = kwargs.get('model') if kwargs.get('model') else None\n\n if model is None:\n if model_type == 'normal':\n model = self.cb.last_model\n elif model_type == 'best':\n model = self.cb.best_model\n elif model_type == 'test':\n model = self.model\n\n test_acc, test_loss = 0., 0.\n batch_val_counter = 0\n y_hat_auc, label_auc = [], []\n device = self.device\n\n with torch.no_grad():\n model.eval()\n\n # validation loop\n for _, (inputs, labels) in enumerate(test_dataloader):\n for rot in range(0,4):\n \n # print(rot, inputs.shape)\n inputs = torch.rot90(inputs, rot, [2, 3])\n # inputs = Variable(inputs.to(device))\n # labels = Variable(labels.to(device))\n # print(counter, rot, inputs.shape)\n\n inputs = Variable(inputs.to(device))\n labels = Variable(labels.to(device))\n\n # img = inputs.cpu().detach().numpy()\n # img = img.transpose(0,2,3,1)\n # print(img[0, :, :, 0:3].shape)\n # cv2.imwrite('thrash/test-aug_'+str(rot)+'.png', img[0, :, :, 0:3]*65535)\n\n outputs = model(inputs) # forward pass\n loss = self.loss_criterion(outputs, labels) # compute loss\n test_loss += loss.item() * labels.size(0)\n\n # calculate acc\n test_acc += calc_acc(outputs, labels).item()\n batch_val_counter += labels.size(0)\n\n # Store auc for malignant\n label_auc = np.append(label_auc, labels.cpu().detach().numpy())\n y_hat_auc = np.append(y_hat_auc, torch.softmax(outputs, dim=1)[:, 1].cpu().detach().numpy())\n\n # enter show result mode\n if self.mini_batch == 1:\n print(f'{labels.item()} {torch.softmax(outputs, dim=1)[:, 1].item():.3f}')\n\n print('batch_val_counter ', batch_val_counter)\n\n # Find average test loss and test accuracy\n avg_test_loss = test_loss/batch_val_counter\n avg_test_acc = test_acc/batch_val_counter\n\n print(f\"Model: {model_type} - Test accuracy : {avg_test_acc:.3f}\" +\n f\" Test loss : {avg_test_loss:.4f}\", end='')\n\n # calculate AUC TEST\n auc_mal_val = roc_auc_score(label_auc.ravel(), y_hat_auc.ravel())\n print(f' AUC Malignant: {auc_mal_val:.4f}')\n\n if self.make_plots:\n show_auc(label_auc, y_hat_auc, self.title, show_plt=False)\n \n return auc_mal_val\n"
] | [
[
"torch.cuda.amp.GradScaler",
"torch.argmax",
"torch.no_grad",
"torch.cuda.amp.autocast",
"torch.rot90",
"torch.cat",
"torch.softmax"
]
] |
QDaria/pennylane | [
"5a28983fc7bd950cde8a4014e54261fef4b54293"
] | [
"tests/templates/test_subroutines/test_qmc.py"
] | [
"# Copyright 2018-2021 Xanadu Quantum Technologies Inc.\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n\n# http://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport numpy as np\nimport pytest\nfrom scipy.stats import norm\n\nimport pennylane as qml\nfrom pennylane.templates.subroutines.qmc import (\n QuantumMonteCarlo,\n _make_V,\n _make_Z,\n func_to_unitary,\n make_Q,\n probs_to_unitary,\n)\nfrom pennylane.wires import Wires\n\n\nclass TestProbsToUnitary:\n \"\"\"Tests for the probs_to_unitary function\"\"\"\n\n def test_invalid_distribution_sum_to_not_one(self):\n \"\"\"Test if a ValueError is raised when a distribution that does not sum to one is input\"\"\"\n p = np.ones(4)\n with pytest.raises(ValueError, match=\"A valid probability distribution of non-negative\"):\n probs_to_unitary(p)\n\n def test_invalid_distribution_negative(self):\n \"\"\"Test if a ValueError is raised when a distribution with a negative value is input\"\"\"\n p = [2, 0, 0, -1]\n with pytest.raises(ValueError, match=\"A valid probability distribution of non-negative\"):\n probs_to_unitary(p)\n\n ps = [\n [0.46085261032920616, 0.5391473896707938],\n [0.2111821738452515, 0.4235979103670337, 0.36521991578771484],\n [0.3167916924190049, 0.2651843704361695, 0.1871934980886578, 0.23083043905616774],\n [0.8123242419241959, 0.07990911578859018, 0.07983919018902215, 0.027927452098191852],\n ]\n\n @pytest.mark.parametrize(\"p\", ps)\n def test_fixed_examples(self, p):\n \"\"\"Test if the correct unitary is returned for fixed input examples. A correct unitary has\n its first column equal to the square root of the distribution and satisfies\n U @ U.T = U.T @ U = I.\"\"\"\n unitary = probs_to_unitary(p)\n assert np.allclose(np.sqrt(p), unitary[:, 0])\n assert np.allclose(unitary @ unitary.T, np.eye(len(unitary)))\n assert np.allclose(unitary.T @ unitary, np.eye(len(unitary)))\n\n\nclass TestFuncToUnitary:\n \"\"\"Tests for the func_to_unitary function\"\"\"\n\n def test_not_bounded_func(self):\n \"\"\"Test if a ValueError is raised if a function that evaluates outside of the [0, 1]\n interval is provided\"\"\"\n func = lambda i: np.sin(i)\n\n with pytest.raises(ValueError, match=\"func must be bounded within the interval\"):\n func_to_unitary(func, 8)\n\n def test_example(self):\n \"\"\"Test for a fixed example if the returned unitary maps input states to the\n expected output state as well as if the unitary satisfies U @ U.T = U.T @ U = I.\"\"\"\n M = 8\n func = lambda i: np.sin(i) ** 2\n\n r = func_to_unitary(func, M)\n\n for i in range(M):\n # The control qubit is the last qubit, so we have to look at every other term\n # using [::2].\n output_state = r[::2][i]\n output_0 = output_state[::2]\n output_1 = output_state[1::2]\n assert np.allclose(output_0[i], np.sqrt(1 - func(i)))\n assert np.allclose(output_1[i], np.sqrt(func(i)))\n\n assert np.allclose(r @ r.T, np.eye(2 * M))\n assert np.allclose(r.T @ r, np.eye(2 * M))\n\n def test_example_with_pl(self):\n \"\"\"Test for a fixed example if the returned unitary behaves as expected\n when used within a PennyLane circuit, i.e., so that the probability of the final control\n wire encodes the function.\"\"\"\n wires = 3\n M = 2**wires\n func = lambda i: np.sin(i) ** 2\n\n r = func_to_unitary(func, M)\n\n dev = qml.device(\"default.qubit\", wires=(wires + 1))\n\n @qml.qnode(dev)\n def apply_r(input_state):\n qml.QubitStateVector(input_state, wires=range(wires))\n qml.QubitUnitary(r, wires=range(wires + 1))\n return qml.probs(wires)\n\n for i, state in enumerate(np.eye(M)):\n p = apply_r(state)[1]\n assert np.allclose(p, func(i))\n\n\ndef test_V():\n \"\"\"Test for the _make_V function\"\"\"\n dim = 4\n\n V_expected = -np.eye(dim)\n V_expected[1, 1] = V_expected[3, 3] = 1\n V = _make_V(dim)\n\n assert np.allclose(V, V_expected)\n\n\ndef test_Z():\n \"\"\"Test for the _make_Z function\"\"\"\n dim = 4\n\n Z_expected = -np.eye(dim)\n Z_expected[0, 0] = 1\n Z = _make_Z(dim)\n\n assert np.allclose(Z, Z_expected)\n\n\ndef test_Q():\n \"\"\"Test for the make_Q function using a fixed example\"\"\"\n\n A = np.array(\n [\n [0.85358423 - 0.32239299j, -0.12753659 + 0.38883306j],\n [0.39148136 - 0.11915985j, 0.34064316 - 0.84646648j],\n ]\n )\n R = np.array(\n [\n [\n 0.45885289 + 0.03972856j,\n 0.2798685 - 0.05981098j,\n 0.64514642 - 0.51555038j,\n 0.11015177 - 0.10877695j,\n ],\n [\n 0.19407005 - 0.35483005j,\n 0.29756077 + 0.80153453j,\n -0.19147104 + 0.0507968j,\n 0.15553799 - 0.20493631j,\n ],\n [\n 0.35083011 - 0.20807392j,\n -0.27602911 - 0.13934692j,\n 0.11874165 + 0.34532609j,\n -0.45945242 - 0.62734969j,\n ],\n [\n -0.11379919 - 0.66706921j,\n -0.21120956 - 0.2165113j,\n 0.30133006 + 0.23367271j,\n 0.54593491 + 0.08446372j,\n ],\n ]\n )\n\n Q_expected = np.array(\n [\n [\n -0.46513201 - 1.38777878e-17j,\n -0.13035515 - 2.23341802e-01j,\n -0.74047856 + 7.08652160e-02j,\n -0.0990036 - 3.91977176e-01j,\n ],\n [\n 0.13035515 - 2.23341802e-01j,\n 0.46494302 + 0.00000000e00j,\n 0.05507901 - 1.19182067e-01j,\n -0.80370146 - 2.31904873e-01j,\n ],\n [\n -0.74047856 - 7.08652160e-02j,\n -0.05507901 - 1.19182067e-01j,\n 0.62233412 - 2.77555756e-17j,\n -0.0310774 - 2.02894077e-01j,\n ],\n [\n 0.0990036 - 3.91977176e-01j,\n -0.80370146 + 2.31904873e-01j,\n 0.0310774 - 2.02894077e-01j,\n -0.30774091 + 2.77555756e-17j,\n ],\n ]\n )\n\n Q = make_Q(A, R)\n\n assert np.allclose(Q, Q_expected)\n\n\nclass TestQuantumMonteCarlo:\n \"\"\"Tests for the QuantumMonteCarlo template\"\"\"\n\n @staticmethod\n def func(i):\n return np.sin(i) ** 2\n\n def test_non_flat(self):\n \"\"\"Test if a ValueError is raised when a non-flat array is input\"\"\"\n p = np.ones((4, 1)) / 4\n with pytest.raises(ValueError, match=\"The probability distribution must be specified as a\"):\n QuantumMonteCarlo(p, self.func, range(3), range(3, 5))\n\n def test_wrong_size_p(self):\n \"\"\"Test if a ValueError is raised when a probability distribution is passed whose length\n cannot be mapped to qubits\"\"\"\n p = np.ones(5) / 5\n with pytest.raises(ValueError, match=\"The probability distribution must have a length\"):\n QuantumMonteCarlo(p, self.func, range(3), range(3, 5))\n\n def test_unexpected_target_wires_number(self):\n \"\"\"Test if a ValueError is raised when the number of target wires is incompatible with the\n expected number of target wires inferred from the length of the input probability\n distribution\"\"\"\n p = np.ones(4) / 4\n with pytest.raises(\n ValueError,\n match=\"The probability distribution of dimension 4 requires\" \" 3 target wires\",\n ):\n QuantumMonteCarlo(p, self.func, range(4), range(4, 6))\n\n def test_expected_circuit(self):\n \"\"\"Test if the circuit applied when using the QMC template is the same as the expected\n circuit for a fixed example\"\"\"\n p = np.ones(4) / 4\n target_wires, estimation_wires = Wires(range(3)), Wires(range(3, 5))\n\n op = QuantumMonteCarlo(p, self.func, target_wires, estimation_wires)\n tape = op.expand()\n\n # Do expansion in two steps to avoid also decomposing the first QubitUnitary\n queue_before_qpe = tape.operations[:2]\n\n # 2-qubit decomposition has 10 operations, and after is a 3-qubit gate so start at 11\n queue_after_qpe = tape.expand().operations[11:]\n\n A = probs_to_unitary(p)\n R = func_to_unitary(self.func, 4)\n\n assert len(queue_before_qpe) == 2\n assert queue_before_qpe[0].name == \"QubitUnitary\"\n assert queue_before_qpe[1].name == \"QubitUnitary\"\n assert np.allclose(queue_before_qpe[0].matrix, A)\n assert np.allclose(queue_before_qpe[1].matrix, R)\n assert queue_before_qpe[0].wires == target_wires[:-1]\n assert queue_before_qpe[1].wires == target_wires\n\n Q = make_Q(A, R)\n\n with qml.tape.QuantumTape() as qpe_tape:\n qml.QuantumPhaseEstimation(Q, target_wires, estimation_wires)\n\n qpe_tape = qpe_tape.expand()\n\n assert len(queue_after_qpe) == len(qpe_tape.operations)\n assert all(o1.name == o2.name for o1, o2 in zip(queue_after_qpe, qpe_tape.operations))\n assert all(\n np.allclose(o1.matrix, o2.matrix)\n for o1, o2 in zip(queue_after_qpe, qpe_tape.operations)\n )\n assert all(o1.wires == o2.wires for o1, o2 in zip(queue_after_qpe, qpe_tape.operations))\n\n def test_expected_value(self):\n \"\"\"Test that the QuantumMonteCarlo template can correctly estimate the expectation value\n following the example in the usage details\"\"\"\n m = 5\n M = 2**m\n\n xmax = np.pi\n xs = np.linspace(-xmax, xmax, M)\n\n probs = np.array([norm().pdf(x) for x in xs])\n probs /= np.sum(probs)\n\n func = lambda i: np.cos(xs[i]) ** 2\n\n estimates = []\n\n for n in range(4, 11):\n N = 2**n\n\n target_wires = range(m + 1)\n estimation_wires = range(m + 1, n + m + 1)\n\n dev = qml.device(\"default.qubit\", wires=(n + m + 1))\n\n @qml.qnode(dev)\n def circuit():\n qml.QuantumMonteCarlo(\n probs, func, target_wires=target_wires, estimation_wires=estimation_wires\n )\n return qml.probs(estimation_wires)\n\n phase_estimated = np.argmax(circuit()[: int(N / 2)]) / N\n mu_estimated = (1 - np.cos(np.pi * phase_estimated)) / 2\n estimates.append(mu_estimated)\n\n exact = 0.432332358381693654\n\n # Check that the error is monotonically decreasing\n for i in range(len(estimates) - 1):\n err1 = np.abs(estimates[i] - exact)\n err2 = np.abs(estimates[i + 1] - exact)\n assert err1 >= err2\n\n assert np.allclose(estimates[-1], exact, rtol=1e-3)\n\n def test_expected_value_custom_wires(self):\n \"\"\"Test that the QuantumMonteCarlo template can correctly estimate the expectation value\n following the example in the usage details when the wires have custom labels\"\"\"\n m = 5\n M = 2**m\n\n xmax = np.pi\n xs = np.linspace(-xmax, xmax, M)\n\n probs = np.array([norm().pdf(x) for x in xs])\n probs /= np.sum(probs)\n\n func = lambda i: np.cos(xs[i]) ** 2\n\n n = 10\n N = 2**n\n\n target_wires = [0, \"a\", -1.1, -10, \"bbb\", 1000]\n estimation_wires = [\"bob\", -3, 42, \"penny\", \"lane\", 247, \"straw\", \"berry\", 5.5, 6.6]\n\n dev = qml.device(\"default.qubit\", wires=target_wires + estimation_wires)\n\n @qml.qnode(dev)\n def circuit():\n qml.QuantumMonteCarlo(\n probs, func, target_wires=target_wires, estimation_wires=estimation_wires\n )\n return qml.probs(estimation_wires)\n\n phase_estimated = np.argmax(circuit()[: int(N / 2)]) / N\n mu_estimated = (1 - np.cos(np.pi * phase_estimated)) / 2\n\n exact = 0.432332358381693654\n assert np.allclose(mu_estimated, exact, rtol=1e-3)\n\n def test_id(self):\n \"\"\"Tests that the id attribute can be set.\"\"\"\n xs = np.linspace(-np.pi, np.pi, 2**5)\n probs = np.array([norm().pdf(x) for x in xs])\n probs /= np.sum(probs)\n func = lambda i: np.cos(xs[i]) ** 2\n\n target_wires = [0, \"a\", -1.1, -10, \"bbb\", 1000]\n estimation_wires = [\"bob\", -3, 42, \"penny\", \"lane\", 247, \"straw\", \"berry\", 5.5, 6.6]\n\n template = qml.QuantumMonteCarlo(\n probs, func, target_wires=target_wires, estimation_wires=estimation_wires, id=\"a\"\n )\n\n assert template.id == \"a\"\n"
] | [
[
"numpy.sqrt",
"numpy.allclose",
"numpy.eye",
"numpy.ones",
"numpy.sum",
"numpy.abs",
"numpy.cos",
"scipy.stats.norm",
"numpy.array",
"numpy.sin",
"numpy.linspace"
]
] |
kgizdov/hep_ml | [
"114ac9e896c3a601761092760a7b315f448d59c6"
] | [
"tests/test_nnet.py"
] | [
"from __future__ import division, print_function\n\nimport numpy\nfrom sklearn.linear_model.logistic import LogisticRegression\nfrom sklearn.metrics import roc_auc_score, mean_squared_error, log_loss\nfrom sklearn.base import clone\nfrom sklearn.datasets import make_blobs\n\nfrom hep_ml import nnet\nfrom hep_ml.commonutils import generate_sample\nfrom hep_ml.nnet import MLPRegressor\nfrom hep_ml.preprocessing import BinTransformer, IronTransformer\n\n__author__ = 'Alex Rogozhnikov'\n\nnn_types = [\n nnet.SimpleNeuralNetwork,\n nnet.MLPClassifier,\n nnet.SoftmaxNeuralNetwork,\n nnet.RBFNeuralNetwork,\n nnet.PairwiseNeuralNetwork,\n nnet.PairwiseSoftplusNeuralNetwork,\n]\n\n\n# TODO test pipelines, bagging and boosting\n\ndef check_single_classification_network(neural_network, n_samples=200, n_features=7, distance=0.8, retry_attempts=3):\n X, y = generate_sample(n_samples=n_samples, n_features=n_features, distance=distance)\n # each combination is tried 3 times. before raising exception\n\n for retry_attempt in range(retry_attempts):\n # to initial state\n neural_network = clone(neural_network)\n neural_network.set_params(random_state=42 + retry_attempt)\n print(neural_network)\n neural_network.fit(X, y)\n quality = roc_auc_score(y, neural_network.predict_proba(X)[:, 1])\n # checking that computations don't fail\n computed_loss = neural_network.compute_loss(X, y, sample_weight=y * 0 + 1)\n if quality > 0.8:\n break\n else:\n print('attempt {} : {}'.format(retry_attempt, quality))\n if retry_attempt == retry_attempts - 1:\n raise RuntimeError('quality of model is too low: {} {}'.format(quality, neural_network))\n\n\ndef test_classification_nnets():\n \"\"\"\n checking combinations of losses, nn_types, trainers, most of them are used once during tests.\n \"\"\"\n attempts = max(len(nnet.losses), len(nnet.trainers), len(nn_types))\n losses_shift = numpy.random.randint(10)\n trainers_shift = numpy.random.randint(10)\n for combination in range(attempts):\n loss = list(nnet.losses.keys())[(combination + losses_shift) % len(nnet.losses)]\n trainer = list(nnet.trainers.keys())[(combination + trainers_shift) % len(nnet.trainers)]\n\n nn_type = nn_types[combination % len(nn_types)]\n neural_network = nn_type(layers=[5], loss=loss, trainer=trainer, epochs=200)\n yield check_single_classification_network, neural_network\n\n\ndef test_regression_nnets():\n from sklearn.datasets import make_regression\n X, y = make_regression(n_samples=300, n_features=20, n_informative=10, bias=5)\n print(y[:20])\n\n original_mse = mean_squared_error(y, y * 0 + y.mean())\n for loss in ['mse_loss', 'smooth_huber_loss']:\n reg = MLPRegressor(layers=(5,), loss=loss)\n reg.fit(X, y)\n p = reg.predict(X)\n print(numpy.sort(abs(p))[-10:])\n mse = mean_squared_error(y, p)\n assert mse < original_mse * 0.3\n\n # fitting a constant\n y[:] = 100.\n for loss in ['mse_loss', 'smooth_huber_loss']:\n reg = MLPRegressor(layers=(1,), loss=loss, epochs=300)\n reg.fit(X, y)\n print(mean_squared_error(y, reg.predict(X)))\n assert mean_squared_error(y, reg.predict(X)) < 5., \"doesn't fit constant\"\n\n\ndef compare_nnets_quality(n_samples=200, n_features=7, distance=0.8):\n X, y = generate_sample(n_samples=n_samples, n_features=n_features, distance=distance)\n # checking all possible combinations\n for loss in ['log_loss']: # nnet.losses:\n for NNType in nn_types:\n for trainer in nnet.trainers:\n nn = NNType(layers=[5], loss=loss, trainer=trainer, epochs=100, random_state=42)\n nn.fit(X, y)\n print(roc_auc_score(y, nn.predict_proba(X)[:, 1]), nn)\n\n lr = LogisticRegression().fit(X, y)\n print(roc_auc_score(y, lr.predict_proba(X)[:, 1]), lr)\n\n\ndef test_network_with_scaler(n_samples=200, n_features=15, distance=0.5):\n X, y = generate_sample(n_samples=n_samples, n_features=n_features, distance=distance)\n for scaler in [BinTransformer(max_bins=16), IronTransformer()]:\n clf = nnet.SimpleNeuralNetwork(scaler=scaler, epochs=300)\n clf.fit(X, y)\n\n p = clf.predict_proba(X)\n assert roc_auc_score(y, p[:, 1]) > 0.8, 'quality is too low for model: {}'.format(clf)\n\n\ndef test_adaptive_methods(n_samples=200, n_features=15, distance=0.5):\n X, y = generate_sample(n_samples=n_samples, n_features=n_features, distance=distance)\n for trainer in ['sgd', 'adadelta']:\n clf = nnet.SimpleNeuralNetwork(trainer=trainer, trainer_parameters={'batch': 1})\n clf.fit(X, y)\n assert roc_auc_score(y, clf.predict_proba(X)[:, 1]) > 0.8, 'quality is too low for model: {}'.format(clf)\n\n\ndef test_reproducibility(n_samples=200, n_features=15, distance=0.5):\n X, y = generate_sample(n_samples=n_samples, n_features=n_features, distance=distance)\n for trainer in nnet.trainers.keys():\n clf1 = nnet.MLPClassifier(trainer=trainer, random_state=42).fit(X, y)\n clf2 = nnet.MLPClassifier(trainer=trainer, random_state=42).fit(X, y)\n assert numpy.allclose(clf1.predict_proba(X), clf2.predict_proba(X))\n\n\ndef test_multiclassification(n_samples=200, n_features=10):\n for n_classes in [2, 3, 4]:\n X, y = make_blobs(n_samples=n_samples, centers=n_classes, n_features=n_features)\n losses = []\n for n_epochs in [1, 10, 100]:\n clf = nnet.MLPMultiClassifier(epochs=n_epochs).fit(X, y)\n loss1 = log_loss(y, clf.predict_proba(X))\n loss2 = clf.compute_loss(X, y)\n assert numpy.allclose(loss1, loss2), 'computed losses are different'\n losses.append(loss1)\n\n assert losses[0] > losses[-1], 'loss is not decreasing'\n"
] | [
[
"numpy.allclose",
"sklearn.base.clone",
"sklearn.metrics.mean_squared_error",
"sklearn.linear_model.logistic.LogisticRegression",
"sklearn.datasets.make_regression",
"sklearn.metrics.roc_auc_score",
"numpy.random.randint",
"sklearn.datasets.make_blobs"
]
] |
jopetty/transd-dev | [
"0078dfd8a049f5b97a7b3be6e883821e4994d4c0"
] | [
"src/models/modules/rnn_decoder.py"
] | [
"import random\nfrom typing import Dict\n\nimport torch\nfrom torch import Tensor, nn\nfrom torch.nn import functional as F\n\n\nclass RNNDecoder(nn.Module):\n @property\n def max_gen_length(self) -> int:\n return self.hparams[\"dec_max_gen_length\"]\n\n @property\n def EOS_idx(self) -> int:\n return self.hparams[\"dec_EOS_idx\"]\n\n def __init__(self, hparams: dict) -> None:\n super().__init__()\n\n self.hparams = hparams\n\n self.embedding = nn.Embedding(\n hparams[\"dec_vocab_size\"], hparams[\"dec_embedding_size\"]\n )\n self.unit = nn.RNN(\n hparams[\"dec_embedding_size\"],\n hparams[\"dec_hidden_size\"],\n num_layers=hparams[\"dec_num_layers\"],\n batch_first=True,\n )\n self.output = nn.Linear(hparams[\"dec_hidden_size\"], hparams[\"dec_vocab_size\"])\n\n def forward_step(self, step_input: Dict[str, Tensor]) -> Dict[str, Tensor]:\n\n # Unsqueeze if only one batch is present\n no_squeeze = lambda a: a.unsqueeze(0) if a.shape == 2 else a\n\n # print(\"Step Input\")\n # for key in step_input:\n # print(f\"{key}: {step_input[key].shape}\")\n\n h = no_squeeze(step_input[\"h\"])\n unit_input = no_squeeze(F.relu(self.embedding(step_input[\"x\"])))\n _, state = self.unit(unit_input, h)\n y = self.output(no_squeeze(state[-1, :, :]))\n\n # print(f\"h: {h.shape}\")\n # print(f\"unit_input: {unit_input.shape}\")\n # print(f\"unk: {unk.shape}\")\n # print(f\"state: {state.shape}\")\n # print(f\"state[-1]: {state[-1].shape}\")\n # print(f\"y: {y.shape}\")\n\n return {\"y\": y, \"h\": state}\n\n def get_step_input(self, dec_input: Dict[str, Tensor]) -> Dict[str, Tensor]:\n\n if \"h\" in dec_input:\n h = dec_input[\"h\"]\n elif \"encoder_last_state\" in dec_input:\n h = torch.transpose(dec_input[\"encoder_last_state\"], 0, 1)\n else:\n raise ValueError(\n f\"You must provide a hidden input in dec_input '{dec_input}'\"\n )\n\n if \"x\" in dec_input:\n x = dec_input[\"x\"]\n elif \"transform\" in dec_input:\n # print(\"No x found\")\n # print(dec_input[\"transform\"][:, 1:-1].shape)\n x = dec_input[\"transform\"][:, 1:-1]\n else:\n raise ValueError(\n f\"You must provide a step input in dec_input '{dec_input}'\"\n )\n\n step_input = {\"x\": x, \"h\": h}\n\n if \"encoder_output\" in dec_input:\n step_input[\"encoder_output\"] = dec_input[\"encoder_output\"]\n\n return step_input\n\n def forward(self, dec_input: Dict[str, Tensor], tf_ratio) -> Dict[str, Tensor]:\n\n is_teacher_forcing = random.random() < tf_ratio\n\n batch_size: int = dec_input[\"encoder_output\"].shape[0]\n hidden_size: int = self.output.in_features\n vocab_size: int = self.output.out_features\n gen_length = (\n dec_input[\"target\"][0].shape[0]\n if is_teacher_forcing\n else self.max_gen_length\n )\n\n dec_step_input = self.get_step_input(dec_input)\n\n has_finished = torch.zeros(batch_size, dtype=torch.bool)\n dec_output = torch.zeros(gen_length, batch_size, vocab_size)\n dec_hidden = torch.zeros(gen_length, batch_size, hidden_size)\n\n for i in range(gen_length):\n\n # print(f\"STEP {i} (tf={is_teacher_forcing})\")\n\n step_result = self.forward_step(dec_step_input)\n step_prediction = step_result[\"y\"].argmax(dim=-1)\n\n # for key in step_result:\n # print(f\"step_result[{key}]: {step_result[key].shape}\")\n # print(\"dec_hidden: \", dec_hidden.shape)\n\n dec_output[i] = step_result[\"y\"]\n dec_hidden[i] = step_result[\"h\"]\n\n has_finished[step_prediction == self.EOS_idx] = True\n if all(has_finished):\n break\n else:\n x = dec_input[\"target\"][:, i] if is_teacher_forcing else step_prediction\n step_result[\"x\"] = x.unsqueeze(-1)\n step_result[\"encoder_output\"] = dec_input[\"encoder_output\"]\n\n dec_step_input = self.get_step_input(step_result)\n\n output = {\n \"logits\": torch.transpose(dec_output, 0, 1),\n \"predictions\": torch.transpose(dec_output, 0, 1).argmax(dim=-1),\n \"decoder_hiddens\": dec_hidden,\n }\n\n return output\n"
] | [
[
"torch.nn.Linear",
"torch.nn.Embedding",
"torch.nn.RNN",
"torch.zeros",
"torch.transpose"
]
] |
continue-nature/google-research | [
"7011fe008efc4f11592ace842dbd4c9dffd46c29"
] | [
"capsule_em/norb/norb_record.py"
] | [
"# coding=utf-8\n# Copyright 2020 The Google Research Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Input utility functions for norb.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\n\nimport os.path\nimport numpy as np\nimport tensorflow.compat.v1 as tf\n\n\ndef _read_and_decode(filename_queue, image_pixel=96, distort=0):\n \"\"\"Read a norb tf record file.\"\"\"\n reader = tf.TFRecordReader()\n _, serialized_example = reader.read(filename_queue)\n features = tf.parse_single_example(\n serialized_example,\n # Defaults are not specified since both keys are required.\n features={\n 'image_raw': tf.FixedLenFeature([], tf.string),\n 'label': tf.FixedLenFeature([], tf.int64),\n 'height': tf.FixedLenFeature([], tf.int64),\n 'width': tf.FixedLenFeature([], tf.int64),\n 'depth': tf.FixedLenFeature([], tf.int64),\n 'meta': tf.FixedLenFeature([4], tf.int64),\n })\n\n # Convert from a scalar string tensor (whose single string has\n # length image_pixels) to a uint8 tensor with shape\n # [image_pixels].\n image = tf.decode_raw(features['image_raw'], tf.uint8)\n height = tf.cast(features['height'], tf.int32)\n depth = tf.cast(features['depth'], tf.int32)\n image = tf.reshape(image, tf.stack([depth, height, height]))\n image = tf.transpose(image, [1, 2, 0])\n image = tf.cast(image, tf.float32)\n print(image.get_shape()[0].value)\n if image_pixel < 96:\n print('image resizing to {}'.format(image_pixel))\n image = tf.image.resize_images(image, [image_pixel, image_pixel])\n orig_images = image\n\n if image_pixel == 48:\n new_dim = 32\n elif image_pixel == 32:\n new_dim = 22\n if distort == 1:\n image = tf.image.random_brightness(image, max_delta=63)\n image = tf.image.random_contrast(image, lower=0.2, upper=1.8)\n image = tf.random_crop(image, tf.stack([new_dim, new_dim, depth]))\n # 0.26179938779 is 15 degress in radians\n image = tf.image.per_image_standardization(image)\n image_pixel = new_dim\n elif distort == 2:\n image = tf.image.resize_image_with_crop_or_pad(image, new_dim, new_dim)\n image = tf.image.per_image_standardization(image)\n image_pixel = new_dim\n else:\n image = image * (1.0 / 255.0)\n image = tf.div(\n tf.subtract(image, tf.reduce_min(image)),\n tf.subtract(tf.reduce_max(image), tf.reduce_min(image)))\n\n # Convert label from a scalar uint8 tensor to an int32 scalar.\n label = tf.cast(features['label'], tf.int32)\n\n return image, label, image_pixel, orig_images\n\n\nbxs_m2 = [[1, 1], [1, -1], [-1, 1], [-1, -1]]\n\n\ndef inputs(train_dir,\n batch_size,\n split,\n multi,\n image_pixel=96,\n distort=False,\n patching=False):\n \"\"\"Reads input data num_epochs times.\"\"\"\n if multi:\n filename = os.path.join(train_dir, '{}duo-az.tfrecords'.format(split))\n else:\n filename = os.path.join(train_dir, '{}.tfrecords'.format(split))\n\n with tf.name_scope('input'):\n filename_queue = tf.train.string_input_producer([filename])\n\n if distort:\n d = 1 + (split == 'test')\n else:\n d = 0\n\n # Even when reading in multiple threads, share the filename\n # queue.\n image, label, dim, orig_image = _read_and_decode(\n filename_queue, image_pixel=image_pixel, distort=d)\n orig_image.set_shape([48, 48, 1 + multi])\n image.set_shape([dim, dim, 1 + multi])\n image = tf.transpose(image, [2, 0, 1])\n\n if split == 'train':\n images, sparse_labels = tf.train.shuffle_batch(\n [image, label],\n batch_size=batch_size,\n num_threads=2,\n capacity=2000 + 3 * batch_size,\n # Ensures a minimum amount of shuffling of examples.\n min_after_dequeue=2000)\n else:\n images, sparse_labels, orig_images = tf.train.batch(\n [image, label, orig_image],\n batch_size=batch_size,\n num_threads=1,\n capacity=1000 + 3 * batch_size)\n if patching:\n t_images = tf.tile(orig_images, [4, 1, 1, 1])\n c_images = tf.image.extract_glimpse(\n t_images, [32, 32], bxs_m2, centered=True, normalized=False)\n c2images = tf.image.extract_glimpse(\n t_images, [32, 32],\n 2 * np.array(bxs_m2),\n centered=True,\n normalized=False)\n c3images = tf.image.extract_glimpse(\n t_images, [32, 32],\n 3 * np.array(bxs_m2),\n centered=True,\n normalized=False)\n c_images = tf.map_fn(tf.image.per_image_standardization, c_images)\n c2images = tf.map_fn(tf.image.per_image_standardization, c2images)\n c3images = tf.map_fn(tf.image.per_image_standardization, c3images)\n c_images = tf.transpose(c_images, [0, 3, 1, 2])\n c2images = tf.transpose(c2images, [0, 3, 1, 2])\n c3images = tf.transpose(c3images, [0, 3, 1, 2])\n # cc_images = tf.concat([images, m_images, c_images], axis=0)\n # cc_labels = tf.tile(sparse_labels, [9])\n cc_images = tf.concat([images, c_images, c2images, c3images], axis=0)\n cc_labels = tf.tile(sparse_labels, [13])\n features = {\n 'images': images,\n 'labels': tf.one_hot(sparse_labels, 5),\n 'recons_image': images,\n 'recons_label': sparse_labels,\n 'height': dim,\n 'depth': 1 + multi,\n 'num_classes': 5,\n 'cc_images': cc_images,\n 'cc_recons_label': cc_labels,\n 'cc_labels': tf.one_hot(cc_labels, 5),\n }\n\n return features\n"
] | [
[
"tensorflow.compat.v1.image.resize_image_with_crop_or_pad",
"tensorflow.compat.v1.image.per_image_standardization",
"tensorflow.compat.v1.reduce_max",
"tensorflow.compat.v1.cast",
"tensorflow.compat.v1.train.shuffle_batch",
"tensorflow.compat.v1.stack",
"tensorflow.compat.v1.transpose",
"tensorflow.compat.v1.TFRecordReader",
"tensorflow.compat.v1.tile",
"tensorflow.compat.v1.one_hot",
"tensorflow.compat.v1.image.random_contrast",
"tensorflow.compat.v1.FixedLenFeature",
"tensorflow.compat.v1.concat",
"tensorflow.compat.v1.map_fn",
"tensorflow.compat.v1.decode_raw",
"tensorflow.compat.v1.image.resize_images",
"tensorflow.compat.v1.image.random_brightness",
"tensorflow.compat.v1.reduce_min",
"tensorflow.compat.v1.train.string_input_producer",
"tensorflow.compat.v1.image.extract_glimpse",
"numpy.array",
"tensorflow.compat.v1.train.batch",
"tensorflow.compat.v1.name_scope"
]
] |
haowen-xu/ml-essentials | [
"ca44186be37887461205227c32995f1485b4ff41"
] | [
"mltk/data/loaders.py"
] | [
"\"\"\"\nSimple dataset loaders.\n\nFor more datasets and more comprehensive loaders, you may turn to dedicated\nlibraries like `fuel`.\n\"\"\"\n\nimport gzip\nimport hashlib\nimport os\nimport pickle\nfrom typing import *\n\nimport idx2numpy\nimport numpy as np\n\nfrom ..typing_ import *\nfrom ..utils import CacheDir, validate_enum_arg\n\n__all__ = ['load_mnist', 'load_fashion_mnist', 'load_cifar10', 'load_cifar100']\n\n_MNIST_LIKE_FILE_NAMES = {\n 'train_x': 'train-images-idx3-ubyte.gz',\n 'train_y': 'train-labels-idx1-ubyte.gz',\n 'test_x': 't10k-images-idx3-ubyte.gz',\n 'test_y': 't10k-labels-idx1-ubyte.gz',\n}\n_MNIST_URI_PREFIX = 'http://yann.lecun.com/exdb/mnist/'\n_MNIST_FILE_MD5 = {\n 'train_x': 'f68b3c2dcbeaaa9fbdd348bbdeb94873',\n 'train_y': 'd53e105ee54ea40749a09fcbcd1e9432',\n 'test_x': '9fb629c4189551a2d022fa330f9573f3',\n 'test_y': 'ec29112dd5afa0611ce80d1b7f02629c',\n}\n_FASHION_MNIST_URI_PREFIX = 'http://fashion-mnist.s3-website.eu-central-1.' \\\n 'amazonaws.com/'\n_FASHION_MNIST_FILE_MD5 = {\n 'train_x': '8d4fb7e6c68d591d4c3dfef9ec88bf0d',\n 'train_y': '25c81989df183df01b3e8a0aad5dffbe',\n 'test_x': 'bef4ecab320f06d8554ea6380940ec79',\n 'test_y': 'bb300cfdad3c16e7a12a480ee83cd310',\n}\n\n_CIFAR_10_URI = 'https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz'\n_CIFAR_10_MD5 = 'c58f30108f718f92721af3b95e74349a'\n_CIFAR_10_CONTENT_DIR = 'cifar-10-batches-py'\n_CIFAR_100_URI = 'https://www.cs.toronto.edu/~kriz/cifar-100-python.tar.gz'\n_CIFAR_100_MD5 = 'eb9058c3a382ffc7106e4002c42a8d85'\n_CIFAR_100_CONTENT_DIR = 'cifar-100-python'\n\n\ndef _validate_x_shape(shape, default_shape):\n shape = tuple(int(v) for v in shape)\n default_shape = tuple(int(v) for v in default_shape)\n value_size = int(np.prod(default_shape))\n\n if np.prod(shape) != value_size:\n raise ValueError(f'`x_shape` does not product to {value_size}: {shape}')\n return shape\n\n\ndef load_mnist_like(uri_prefix: str,\n file_md5: Dict[str, str],\n cache_name: str,\n x_shape: Sequence[int] = (28, 28),\n x_dtype: ArrayDType = np.uint8,\n y_dtype: ArrayDType = np.int32\n ) -> Tuple[XYArrayTuple, XYArrayTuple]:\n \"\"\"\n Load an MNIST-like dataset as NumPy arrays.\n\n Args:\n uri_prefix: Common prefix of the URIs in `remote_files`.\n file_md5: The remote file MD5 hash sums, a dict of\n `{'train_x': ..., 'train_y': ..., 'test_x': ..., 'test_y': ...}`,\n where each value is the md5 sum.\n cache_name: Name of the cache directory.\n x_shape: Reshape each digit into this shape.\n x_dtype: Cast each digit into this data type.\n y_dtype: Cast each label into this data type.\n\n Returns:\n The ``(train_x, train_y), (test_x, test_y)`` arrays.\n \"\"\"\n\n def _fetch_array(array_name):\n uri = uri_prefix + _MNIST_LIKE_FILE_NAMES[array_name]\n md5 = file_md5[array_name]\n path = CacheDir(cache_name).download(\n uri, hasher=hashlib.md5(), expected_hash=md5)\n with gzip.open(path, 'rb') as f:\n return idx2numpy.convert_from_file(f)\n\n # check arguments\n x_shape = _validate_x_shape(x_shape, (28, 28))\n\n # load data\n train_x = _fetch_array('train_x').astype(x_dtype)\n train_y = _fetch_array('train_y').astype(y_dtype)\n test_x = _fetch_array('test_x').astype(x_dtype)\n test_y = _fetch_array('test_y').astype(y_dtype)\n\n assert(len(train_x) == len(train_y) == 60000)\n assert(len(test_x) == len(test_y) == 10000)\n\n # change shape\n train_x = train_x.reshape([len(train_x)] + list(x_shape))\n test_x = test_x.reshape([len(test_x)] + list(x_shape))\n\n return (train_x, train_y), (test_x, test_y)\n\n\ndef load_mnist(x_shape: Sequence[int] = (28, 28),\n x_dtype: ArrayDType = np.uint8,\n y_dtype: ArrayDType = np.int32\n ) -> Tuple[XYArrayTuple, XYArrayTuple]:\n \"\"\"\n Load an MNIST dataset as NumPy arrays.\n\n Args:\n x_shape: Reshape each digit into this shape.\n x_dtype: Cast each digit into this data type.\n y_dtype: Cast each label into this data type.\n\n Returns:\n The ``(train_x, train_y), (test_x, test_y)`` arrays.\n \"\"\"\n return load_mnist_like(\n _MNIST_URI_PREFIX, _MNIST_FILE_MD5, 'mnist', x_shape, x_dtype, y_dtype)\n\n\ndef load_fashion_mnist(x_shape: Sequence[int] = (28, 28),\n x_dtype: ArrayDType = np.uint8,\n y_dtype: ArrayDType = np.int32\n ) -> Tuple[XYArrayTuple, XYArrayTuple]:\n \"\"\"\n Load an MNIST dataset as NumPy arrays.\n\n Args:\n x_shape: Reshape each digit into this shape.\n x_dtype: Cast each digit into this data type.\n y_dtype: Cast each label into this data type.\n\n Returns:\n The ``(train_x, train_y), (test_x, test_y)`` arrays.\n \"\"\"\n return load_mnist_like(\n _FASHION_MNIST_URI_PREFIX, _FASHION_MNIST_FILE_MD5, 'fashion_mnist',\n x_shape, x_dtype, y_dtype)\n\n\ndef _cifar_load_batch(path, x_shape, x_dtype, y_dtype, expected_batch_label,\n labels_key='labels'):\n # load from file\n with open(path, 'rb') as f:\n d = {\n k.decode('utf-8'): v\n for k, v in pickle.load(f, encoding='bytes').items()\n }\n d['batch_label'] = d['batch_label'].decode('utf-8')\n assert(d['batch_label'] == expected_batch_label)\n\n data = np.asarray(d['data'], dtype=x_dtype)\n labels = np.asarray(d[labels_key], dtype=y_dtype)\n\n # change shape\n data = data.reshape((data.shape[0], 3, 32, 32))\n data = np.transpose(data, (0, 2, 3, 1))\n if x_shape:\n data = data.reshape([data.shape[0]] + list(x_shape))\n\n return data, labels\n\n\ndef load_cifar10(x_shape: Sequence[int] = (32, 32, 3),\n x_dtype: ArrayDType = np.float32,\n y_dtype: ArrayDType = np.int32) -> Tuple[XYArrayTuple, XYArrayTuple]:\n \"\"\"\n Load the CIFAR-10 dataset as NumPy arrays.\n\n Args:\n x_shape: Reshape each digit into this shape.\n x_dtype: Cast each digit into this data type.\n y_dtype: Cast each label into this data type.\n\n Returns:\n The ``(train_x, train_y), (test_x, test_y)`` arrays.\n \"\"\"\n # check the arguments\n x_shape = _validate_x_shape(x_shape, (32, 32, 3))\n\n # fetch data\n path = CacheDir('cifar').download_and_extract(\n _CIFAR_10_URI, hasher=hashlib.md5(), expected_hash=_CIFAR_10_MD5)\n data_dir = os.path.join(path, _CIFAR_10_CONTENT_DIR)\n\n # load the data\n train_num = 50000\n train_x = np.zeros((train_num,) + x_shape, dtype=x_dtype)\n train_y = np.zeros((train_num,), dtype=y_dtype)\n\n for i in range(1, 6):\n path = os.path.join(data_dir, 'data_batch_{}'.format(i))\n x, y = _cifar_load_batch(\n path, x_shape=x_shape, x_dtype=x_dtype, y_dtype=y_dtype,\n expected_batch_label='training batch {} of 5'.format(i)\n )\n (train_x[(i - 1) * 10000: i * 10000, ...],\n train_y[(i - 1) * 10000: i * 10000]) = x, y\n\n path = os.path.join(data_dir, 'test_batch')\n test_x, test_y = _cifar_load_batch(\n path, x_shape=x_shape, x_dtype=x_dtype, y_dtype=y_dtype,\n expected_batch_label='testing batch 1 of 1'\n )\n assert(len(test_x) == len(test_y) == 10000)\n\n return (train_x, train_y), (test_x, test_y)\n\n\ndef load_cifar100(label_mode: str = 'fine',\n x_shape: Sequence[int] = (32, 32, 3),\n x_dtype: ArrayDType = np.float32,\n y_dtype: ArrayDType = np.int32) -> Tuple[XYArrayTuple, XYArrayTuple]:\n \"\"\"\n Load the CIFAR-100 dataset as NumPy arrays.\n\n Args:\n label_mode: One of {\"fine\", \"coarse\"}.\n x_shape: Reshape each digit into this shape.\n x_dtype: Cast each digit into this data type.\n y_dtype: Cast each label into this data type.\n\n Returns:\n The ``(train_x, train_y), (test_x, test_y)`` arrays.\n \"\"\"\n # check the arguments\n label_mode = validate_enum_arg('label_mode', label_mode, ('fine', 'coarse'))\n x_shape = _validate_x_shape(x_shape, (32, 32, 3))\n\n # fetch data\n path = CacheDir('cifar').download_and_extract(\n _CIFAR_100_URI, hasher=hashlib.md5(), expected_hash=_CIFAR_100_MD5)\n data_dir = os.path.join(path, _CIFAR_100_CONTENT_DIR)\n\n # load the data\n path = os.path.join(data_dir, 'train')\n train_x, train_y = _cifar_load_batch(\n path, x_shape=x_shape, x_dtype=x_dtype, y_dtype=y_dtype,\n expected_batch_label='training batch 1 of 1',\n labels_key='{}_labels'.format(label_mode)\n )\n assert(len(train_x) == len(train_y) == 50000)\n\n path = os.path.join(data_dir, 'test')\n test_x, test_y = _cifar_load_batch(\n path, x_shape=x_shape, x_dtype=x_dtype, y_dtype=y_dtype,\n expected_batch_label='testing batch 1 of 1',\n labels_key='{}_labels'.format(label_mode)\n )\n assert(len(test_x) == len(test_y) == 10000)\n\n return (train_x, train_y), (test_x, test_y)\n"
] | [
[
"numpy.transpose",
"numpy.asarray",
"numpy.zeros",
"numpy.prod"
]
] |
fxia22/gibson_demos | [
"5f8d253694b23b41c53959774203ba5787578b74"
] | [
"igibson/test/test_motion_planning.py"
] | [
"import igibson\nfrom igibson.envs.igibson_env import iGibsonEnv\nfrom time import time\nimport os\nfrom igibson.utils.assets_utils import download_assets, download_demo_data\nfrom igibson.utils.motion_planning_wrapper import MotionPlanningWrapper\nimport numpy as np\nimport matplotlib.pyplot as plt\n\ndef test_occupancy_grid():\n print(\"Test env\")\n download_assets()\n download_demo_data()\n config_filename = os.path.join(igibson.root_path, 'test', 'test_house_occupancy_grid.yaml')\n \n nav_env = iGibsonEnv(config_file=config_filename, mode='headless')\n nav_env.reset()\n nav_env.robots[0].set_position_orientation([0,0,0],[0,0,0,1])\n nav_env.simulator.step()\n\n action = nav_env.action_space.sample()\n ts = nav_env.step(action)\n assert np.sum(ts[0]['occupancy_grid'] == 0) > 0\n assert np.sum(ts[0]['occupancy_grid'] == 1) > 0\n plt.imshow(ts[0]['occupancy_grid'][:,:,0])\n plt.colorbar()\n plt.savefig('occupancy_grid.png')\n nav_env.clean()\n\n\ndef test_base_planning():\n print(\"Test env\")\n download_assets()\n download_demo_data()\n config_filename = os.path.join(igibson.root_path, 'test', 'test_house_occupancy_grid.yaml')\n\n nav_env = iGibsonEnv(config_file=config_filename, mode='headless')\n motion_planner = MotionPlanningWrapper(nav_env)\n state = nav_env.reset()\n nav_env.robots[0].set_position_orientation([0,0,0],[0,0,0,1])\n nav_env.simulator.step()\n plan = None\n itr = 0\n while plan is None and itr < 10:\n plan = motion_planner.plan_base_motion([0.5,0,0])\n print(plan)\n itr += 1\n motion_planner.dry_run_base_plan(plan)\n\n assert len(plan) > 0 \n nav_env.clean()\n\n"
] | [
[
"matplotlib.pyplot.colorbar",
"matplotlib.pyplot.imshow",
"numpy.sum",
"matplotlib.pyplot.savefig"
]
] |
adityabasu1/Event-Extraction-NLP | [
"98faa88d36f09330ebce6fc180ab2f087776f2e1"
] | [
"Joint_Event_Extraction.py"
] | [
"import sys\nimport os\nimport numpy as np\nimport random\n\nfrom collections import OrderedDict\nimport pickle\nimport datetime\nfrom tqdm import tqdm\nfrom recordclass import recordclass\nimport math\nimport torch\nimport torch.autograd as autograd\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nimport json\n\n# Helper funcs\ndef custom_print(*msg):\n for i in range(0, len(msg)):\n if i == len(msg) - 1:\n print(msg[i])\n logger.write(str(msg[i]) + '\\n')\n else:\n print(msg[i], ' ', end='')\n logger.write(str(msg[i]))\n\n\ndef load_word_embedding(embed_file, vocab):\n custom_print('vocab length:', len(vocab))\n embed_vocab = OrderedDict()\n rev_embed_vocab = OrderedDict()\n embed_matrix = list()\n\n embed_vocab['<PAD>'] = 0\n rev_embed_vocab[0] = '<PAD>'\n embed_matrix.append(np.zeros(word_embed_dim, dtype=np.float32))\n\n embed_vocab['<UNK>'] = 1\n rev_embed_vocab[1] = '<UNK>'\n embed_matrix.append(np.random.uniform(-0.25, 0.25, word_embed_dim))\n\n embed_vocab['<SOS>'] = 2\n rev_embed_vocab[2] = '<SOS>'\n embed_matrix.append(np.random.uniform(-0.25, 0.25, word_embed_dim))\n\n embed_vocab['<EOS>'] = 3\n rev_embed_vocab[3] = '<EOS>'\n embed_matrix.append(np.random.uniform(-0.25, 0.25, word_embed_dim))\n\n word_idx = 4\n with open(embed_file, \"r\") as f:\n for line in f:\n parts = line.split()\n if len(parts) < word_embed_dim + 1:\n continue\n word = parts[0]\n if word in vocab and vocab[word] >= word_min_freq:\n vec = [np.float32(val) for val in parts[1:]]\n embed_matrix.append(vec)\n embed_vocab[word] = word_idx\n rev_embed_vocab[word_idx] = word\n word_idx += 1\n\n for word in vocab:\n if word not in embed_vocab and vocab[word] >= word_min_freq:\n embed_matrix.append(np.random.uniform(-0.25, 0.25, word_embed_dim))\n embed_vocab[word] = word_idx\n rev_embed_vocab[word_idx] = word\n word_idx += 1\n\n custom_print('embed dictionary length:', len(embed_vocab))\n return embed_vocab, rev_embed_vocab, np.array(embed_matrix, dtype=np.float32)\n\n\ndef build_vocab(data, events, arguments, roles, vocab_file, embed_file):\n vocab = OrderedDict()\n char_v = OrderedDict()\n char_v['<PAD>'] = 0\n char_v['<UNK>'] = 1\n char_v[';'] = 2\n char_v['|'] = 3\n char_idx = 4\n for d in data:\n for word in d.SrcWords:\n if word not in vocab:\n vocab[word] = 1\n else:\n vocab[word] += 1\n\n for c in word:\n if c not in char_v:\n char_v[c] = char_idx\n char_idx += 1\n\n for event in events:\n vocab[event] = word_min_freq\n for argument in arguments:\n vocab[argument] = word_min_freq\n for role in roles:\n vocab[role] = word_min_freq\n\n vocab[';'] = word_min_freq\n vocab['|'] = word_min_freq\n\n word_v, rev_word_v, embed_matrix = load_word_embedding(embed_file, vocab)\n output = open(vocab_file, 'wb')\n pickle.dump([word_v, char_v], output)\n output.close()\n return word_v, rev_word_v, char_v, embed_matrix\n\n\ndef load_vocab(vocab_file):\n with open(vocab_file, 'rb') as f:\n word_v, char_v = pickle.load(f)\n return word_v, char_v\n\ndef get_adj_mat(amat):\n K = 5\n adj_mat = np.zeros((len(amat), len(amat)), np.float32)\n for i in range(len(amat)):\n for j in range(len(amat)):\n if 0 <= amat[i][j] <= K:\n adj_mat[i][j] = 1.0 / math.pow(2, amat[i][j])\n else:\n adj_mat[i][j] = 0\n return adj_mat\n\n\n\ndef get_data(src_lines, trg_lines, datatype):\n samples = []\n uid = 1\n src_len = -1\n trg_len = -1\n for i in range(0, len(src_lines)):\n src_line = src_lines[i].strip()\n trg_line = trg_lines[i].strip()\n src_words = src_line.split()\n\n if datatype == 1:\n tuples = trg_line.strip().split('|')\n random.shuffle(tuples)\n new_trg_line = ' | '.join(tuples)\n assert len(trg_line.split()) == len(new_trg_line.split())\n trg_line = new_trg_line\n\n trg_words = list()\n trg_words.append('<SOS>')\n trg_words += trg_line.split()\n trg_words.append('<EOS>')\n\n if datatype == 1 and (len(src_words) > max_src_len or len(trg_words) > max_trg_len + 1):\n continue\n if len(src_words) > src_len:\n src_len = len(src_words)\n if len(trg_words) > trg_len:\n trg_len = len(trg_words)\n \n sample = Sample(Id=uid, SrcLen=len(src_words), SrcWords=src_words, TrgLen=len(trg_words),\n TrgWords=trg_words) #c\n samples.append(sample)\n \n uid += 1\n print(src_len)\n print(trg_len)\n return samples\n\n\ndef read_data(src_file, trg_file, datatype):\n reader = open(src_file)\n src_lines = reader.readlines()\n reader.close()\n\n reader = open(trg_file)\n trg_lines = reader.readlines()\n reader.close()\n\n # tot_len = 100\n # src_lines = src_lines[0:min(tot_len, len(src_lines))]\n # trg_lines = trg_lines[0:min(tot_len, len(trg_lines))]\n # adj_lines = adj_lines[0:min(tot_len, len(adj_lines))]\n\n data = get_data(src_lines, trg_lines, datatype)\n return data\n\n\n#event_lines, argument_lines, roles_lines\n\n# to add option for less detailed checks\n\ndef check_event_trigger(ref_string, pred_string):\n return (ref_string == pred_string)\n pass\n\ndef check_event_type(ref_string, pred_string, event_lines):\n if granular_mode == 0:\n if pred_string in event_lines:\n return (ref_string == pred_string)\n else:\n # print(\"invalid prediction\")\n return False\n pass\n\n if granular_mode == 1:\n pred_token = pred_string.split(\":\")[0]\n ref_token = ref_string.split(\":\")[0]\n return (pred_token == ref_token)\n pass\n\n\ndef check_event_argument(ref_string, pred_string):\n return (ref_string == pred_string)\n pass\n\ndef check_argument_type(ref_string, pred_string, argument_lines):\n if granular_mode == 0:\n if pred_string in argument_lines:\n return (ref_string == pred_string)\n else:\n # print(\"invalid prediction\")\n return False\n pass\n\n if granular_mode == 1:\n pred_token = pred_string.split(\":\")[0]\n ref_token = ref_string.split(\":\")[0]\n return (pred_token == ref_token)\n pass\n\ndef check_argument_role(ref_string, pred_string, roles_lines):\n if pred_string in roles_lines:\n return (ref_string == pred_string)\n else:\n # print(\"invalid prediction\")\n return False\n pass\n\ndef calculate_f1(ref_lines, pred_lines, event_lines, argument_lines, roles_lines):\n\n list_of_tracking_metrics = ['predicted_tuples',\n 'ground_truth_tuples',\n 'correct_predictions',\n 'events_count',\n 'correct_events',\n 'correct_event_type',\n 'correct_arguments',\n 'correct_argment_types',\n 'correct_argument_roles'\n ]\n\n metric_counts = dict.fromkeys(list_of_tracking_metrics, 0)\n \n\n for i in range(0, min(len(ref_lines), len(pred_lines))):\n \n ref_line = ref_lines[i].strip()\n pred_line = pred_lines[i].strip()\n\n ref_tuples = ref_line.split('|')\n pred_tuples = pred_line.split('|')\n\n # find a way to compare multiple tuples\n\n # correct - t1 | t2 | t3\n # pred - p1 | p2\n # postives = 3 [number of ground truths minus nones]\n # predicted_pos = 2 [number of preds minus nones]\n # TP = correct preds \n # TP + FP = predicted\n # TP + FN = positives \n # Precision = correct / predicted_pos \n # Recall = correct / positives\n # f = pr/p+r\n\n # handling repeated predictions \n # set_of_preds = set()\n # for pred_tuple in pred_tuples:\n # set_of_preds.add(pred_tuple.strip())\n # pred_tuples = list(set_of_preds)\n\n for pred_tuple in pred_tuples:\n pred_strings = pred_tuple.split(';')\n if(len(pred_strings) < 3):\n continue\n\n\n # in the case of no argument detection, we only calculate the event trigger scores\n if(pred_strings[2].strip().lower()) == 'none':\n max_matches = 0\n part_matches = []\n\n for ref_tuple in ref_tuples:\n # ssss\n ev1, ev2 = cal_f1_for_pair(ref_tuple, pred_tuple, event_lines)\n\n pair_score = ev1+ev2\n\n if pair_score > max_matches:\n max_matches = pair_score\n part_matches = (ev1, ev2)\n pass\n pass\n\n metric_counts['events_count'] += 1\n if ev1 == 1:\n metric_counts['correct_events'] += 1\n if ev2 == 1:\n metric_counts['correct_event_type'] += 1\n\n continue\n \n max_matches = 0\n part_matches = cal_f1_for_tuple(ref_tuples[0], pred_tuple, event_lines, argument_lines, roles_lines)\n\n for ref_tuple in ref_tuples:\n res = cal_f1_for_tuple(ref_tuple, pred_tuple, event_lines, argument_lines, roles_lines)\n\n tuple_score = sum(res)\n\n if tuple_score >= max_matches:\n max_matches = tuple_score\n part_matches = res\n pass\n pass\n\n metric_counts['predicted_tuples'] += 1\n metric_counts['events_count'] += 1\n\n if max_matches >= 4:\n metric_counts['correct_predictions'] += 1\n if part_matches[0] == 1:\n metric_counts['correct_events'] += 1\n if part_matches[1] == 1:\n metric_counts['correct_event_type'] += 1\n if part_matches[2] == 1:\n metric_counts['correct_arguments'] += 1\n if part_matches[3] == 1:\n metric_counts['correct_argment_types'] += 1\n if part_matches[4] == 1:\n metric_counts['correct_argument_roles'] += 1\n pass\n \n for ref_tuple in ref_tuples:\n if(ref_tuple.split(';')[2].strip().lower()) != 'none':\n metric_counts['ground_truth_tuples'] += 1\n\n pass\n \n print(metric_counts)\n\n precision = float(metric_counts['correct_predictions'] / (metric_counts['predicted_tuples'] + 1e-08))\n recall = float(metric_counts['correct_predictions'] / (metric_counts['ground_truth_tuples'] + 1e-08))\n f1 = 2 * precision * recall / (precision + recall + 1e-08)\n precision = round(precision, 3)\n recall = round(recall, 3)\n f1 = round(f1, 3)\n\n print(\"Partwise Results\")\n \n event_acc = metric_counts['correct_events']/ (metric_counts['events_count'] + 1e-08)\n evtype_acc = metric_counts['correct_event_type']/ (metric_counts['events_count'] + 1e-08)\n argument_acc = metric_counts['correct_arguments']/ (metric_counts['predicted_tuples'] + 1e-08)\n argtype_acc = metric_counts['correct_argment_types']/ (metric_counts['predicted_tuples'] + 1e-08)\n role_acc = metric_counts['correct_argument_roles']/ (metric_counts['predicted_tuples'] + 1e-08)\n\n\n print(f'Event Trigger Word Accuracy: {event_acc}')\n print(f'Event Type Accuracy: {evtype_acc}')\n print(f'Argument Identification Accuracy: {argument_acc}')\n print(f'Argument Type Accuracy: {argtype_acc}')\n print(f'Argument Role Accuracy: {role_acc}')\n\n print(f'Macro f-score: {f1}')\n\n targ_file = os.path.join(trg_data_folder, 'Results_logger.txt')\n\n f = open(targ_file, \"a\")\n\n f.write(f'Event Trigger Word Accuracy: {event_acc}')\n f.write(\"\\n\")\n f.write(f'Event Type Accuracy: {evtype_acc}')\n f.write(\"\\n\")\n f.write(f'Argument Identification Accuracy: {argument_acc}')\n f.write(\"\\n\")\n f.write(f'Argument Type Accuracy: {argtype_acc}')\n f.write(\"\\n\")\n f.write(f'Argument Role Accuracy: {role_acc}')\n f.write(\"\\n\")\n\n f.write(f'Macro f-score: {f1}')\n f.write(\"\\n\")\n\n f.close()\n\n\n return f1\n\ndef cal_f1_for_pair(ref_tuple: str ,\n pred_tuple: str,\n event_lines: list\n ) -> list:\n \n ref_strings = ref_tuple.split(';')\n pred_strings = pred_tuple.split(';')\n\n ev1 = int( check_event_trigger(ref_strings[0].strip(), pred_strings[0].strip()) )\n ev2 = int( check_event_type(ref_strings[1].strip(), pred_strings[1].strip(), event_lines) )\n\n return ev1, ev2\n\ndef cal_f1_for_tuple(ref_tuple: str ,\n pred_tuple: str,\n event_lines: list,\n argument_lines: list,\n roles_lines: list\n ) -> list:\n\n ref_strings = ref_tuple.split(';')\n pred_strings = pred_tuple.split(';')\n\n if (len (pred_strings) != 5 ):\n if (len (pred_strings) >= 2 ):\n ev1 = int( check_event_trigger(ref_strings[0].strip(), pred_strings[0].strip()) )\n ev2 = int( check_event_type(ref_strings[1].strip(), pred_strings[1].strip(), event_lines) )\n return [ev1, ev2, 0, 0, 0]\n return list([0,0,0,0,0])\n\n ev1 = int( check_event_trigger(ref_strings[0].strip(), pred_strings[0].strip()) )\n ev2 = int( check_event_type(ref_strings[1].strip(), pred_strings[1].strip(), event_lines) )\n ev3 = int( check_event_argument(ref_strings[2].strip(), pred_strings[2].strip()) )\n ev4 = int( check_argument_type(ref_strings[3].strip(), pred_strings[3].strip(), argument_lines) )\n ev5 = int( check_argument_role(ref_strings[4].strip(), pred_strings[4].strip(), roles_lines) )\n\n ret = [ev1, ev2, ev3, ev4, ev5]\n \n return ret\n\n\n\ndef get_model(model_id):\n if model_id == 1:\n return SeqToSeqModel()\n\ndef write_test_res(data, preds, attns, outfile):\n writer = open(outfile, 'w')\n for i in range(0, len(data)):\n pred_words = get_pred_words(preds[i], attns[i], data[i].SrcWords)[:-1]\n writer.write(' '.join(pred_words) + '\\n')\n writer.close()\n\n\ndef set_random_seeds(seed):\n random.seed(seed)\n np.random.seed(seed)\n torch.manual_seed(seed)\n if n_gpu > 1:\n torch.cuda.manual_seed_all(seed)\n\ndef get_max_len(sample_batch):\n src_max_len = len(sample_batch[0].SrcWords)\n for idx in range(1, len(sample_batch)):\n if len(sample_batch[idx].SrcWords) > src_max_len:\n src_max_len = len(sample_batch[idx].SrcWords)\n\n trg_max_len = len(sample_batch[0].TrgWords)\n for idx in range(1, len(sample_batch)):\n if len(sample_batch[idx].TrgWords) > trg_max_len:\n trg_max_len = len(sample_batch[idx].TrgWords)\n\n return src_max_len, trg_max_len\n\ndef get_words_index_seq(words, max_len):\n seq = list()\n for word in words:\n if word in word_vocab:\n seq.append(word_vocab[word])\n else:\n seq.append(word_vocab['<UNK>'])\n pad_len = max_len - len(words)\n for i in range(0, pad_len):\n seq.append(word_vocab['<PAD>'])\n return seq\n\n\ndef get_target_words_index_seq(words, max_len):\n seq = list()\n for word in words:\n if word in word_vocab:\n seq.append(word_vocab[word])\n else:\n seq.append(word_vocab['<UNK>'])\n pad_len = max_len - len(words)\n for i in range(0, pad_len):\n seq.append(word_vocab['<EOS>'])\n return seq\n\n\ndef get_padded_mask(cur_len, max_len):\n mask_seq = list()\n for i in range(0, cur_len):\n mask_seq.append(0)\n pad_len = max_len - cur_len\n for i in range(0, pad_len):\n mask_seq.append(1)\n return mask_seq\n\n\ndef get_target_vocab_mask(src_words):\n mask = []\n for i in range(0, len(word_vocab)):\n mask.append(1)\n for word in src_words:\n if word in word_vocab:\n mask[word_vocab[word]] = 0\n # events, arguments, roles\n for event in events:\n mask[word_vocab[event]] = 0\n for argument in arguments:\n mask[word_vocab[argument]] = 0\n for role in roles:\n mask[word_vocab[role]] = 0\n\n mask[word_vocab['<UNK>']] = 0\n mask[word_vocab['<EOS>']] = 0\n mask[word_vocab[';']] = 0\n mask[word_vocab['|']] = 0\n return mask\n\n\ndef get_rel_mask(trg_words, max_len):\n mask_seq = list()\n for word in trg_words:\n mask_seq.append(0)\n # if word in relations:\n # mask_seq.append(0)\n # else:\n # mask_seq.append(1)\n pad_len = max_len - len(trg_words)\n for i in range(0, pad_len):\n mask_seq.append(1)\n return mask_seq\n\n\ndef get_char_seq(words, max_len):\n char_seq = list()\n for i in range(0, conv_filter_size - 1):\n char_seq.append(char_vocab['<PAD>'])\n for word in words:\n for c in word[0:min(len(word), max_word_len)]:\n if c in char_vocab:\n char_seq.append(char_vocab[c])\n else:\n char_seq.append(char_vocab['<UNK>'])\n pad_len = max_word_len - len(word)\n for i in range(0, pad_len):\n char_seq.append(char_vocab['<PAD>'])\n for i in range(0, conv_filter_size - 1):\n char_seq.append(char_vocab['<PAD>'])\n\n pad_len = max_len - len(words)\n for i in range(0, pad_len):\n for i in range(0, max_word_len + conv_filter_size - 1):\n char_seq.append(char_vocab['<PAD>'])\n return char_seq\n\n\n\ndef get_relations(file_name):\n rels = []\n reader = open(file_name)\n lines = reader.readlines()\n reader.close()\n for line in lines:\n rels.append(line.strip())\n return rels\n\ndef get_batch_data(cur_samples, is_training=False):\n \"\"\"\n Returns the training samples and labels as numpy array\n \"\"\"\n batch_src_max_len, batch_trg_max_len = get_max_len(cur_samples)\n src_words_list = list()\n src_words_mask_list = list()\n src_char_seq = list()\n\n trg_words_list = list()\n trg_vocab_mask = list()\n adj_lst = []\n\n target = list()\n cnt = 0\n for sample in cur_samples:\n src_words_list.append(get_words_index_seq(sample.SrcWords, batch_src_max_len))\n src_words_mask_list.append(get_padded_mask(sample.SrcLen, batch_src_max_len))\n src_char_seq.append(get_char_seq(sample.SrcWords, batch_src_max_len))\n trg_vocab_mask.append(get_target_vocab_mask(sample.SrcWords))\n\n # cur_masked_adj = np.zeros((batch_src_max_len, batch_src_max_len), dtype=np.float32)\n # cur_masked_adj[:len(sample.SrcWords), :len(sample.SrcWords)] = sample.AdjMat\n # adj_lst.append(cur_masked_adj)\n\n if is_training:\n padded_trg_words = get_words_index_seq(sample.TrgWords, batch_trg_max_len)\n trg_words_list.append(padded_trg_words)\n target.append(padded_trg_words[1:])\n else:\n trg_words_list.append(get_words_index_seq(['<SOS>'], 1))\n cnt += 1\n\n return {'src_words': np.array(src_words_list, dtype=np.float32),\n 'src_chars': np.array(src_char_seq),\n 'src_words_mask': np.array(src_words_mask_list),\n 'adj': np.array(adj_lst),\n 'trg_vocab_mask': np.array(trg_vocab_mask),\n 'trg_words': np.array(trg_words_list, dtype=np.int32),\n 'target': np.array(target)}\n\ndef shuffle_data(data):\n custom_print(len(data))\n data.sort(key=lambda x: x.SrcLen)\n num_batch = int(len(data) / batch_size)\n rand_idx = random.sample(range(num_batch), num_batch)\n new_data = []\n for idx in rand_idx:\n new_data += data[batch_size * idx: batch_size * (idx + 1)]\n if len(new_data) < len(data):\n new_data += data[num_batch * batch_size:]\n return new_data\n\n\ndef get_pred_words(preds, attns, src_words):\n pred_words = []\n for i in range(0, max_trg_len):\n word_idx = preds[i]\n if word_vocab['<EOS>'] == word_idx:\n pred_words.append('<EOS>')\n break\n elif att_type != 'None' and copy_on and word_vocab['<UNK>'] == word_idx:\n word_idx = attns[i]\n pred_words.append(src_words[word_idx])\n else:\n pred_words.append(rev_word_vocab[word_idx])\n return pred_words\n\n\nclass WordEmbeddings(nn.Module):\n def __init__(self, vocab_size, embed_dim, pre_trained_embed_matrix, drop_out_rate):\n super(WordEmbeddings, self).__init__()\n self.embeddings = nn.Embedding(vocab_size, embed_dim, padding_idx=0)\n self.embeddings.weight.data.copy_(torch.from_numpy(pre_trained_embed_matrix))\n self.dropout = nn.Dropout(drop_out_rate)\n\n def forward(self, words_seq):\n word_embeds = self.embeddings(words_seq)\n word_embeds = self.dropout(word_embeds)\n return word_embeds\n\n def weight(self):\n return self.embeddings.weight\n\n# Potentially use a pretrained BERT - 509\nclass CharEmbeddings(nn.Module):\n def __init__(self, vocab_size, embed_dim, drop_out_rate):\n super(CharEmbeddings, self).__init__()\n\n # Layers\n self.embeddings = nn.Embedding(vocab_size, embed_dim, padding_idx=0)\n self.dropout = nn.Dropout(drop_out_rate)\n\n def forward(self, words_seq):\n char_embeds = self.embeddings(words_seq)\n char_embeds = self.dropout(char_embeds)\n return char_embeds\n\n\n# DONT CHANGE CLASSES\n# 543\nclass Encoder(nn.Module):\n def __init__(self, input_dim, hidden_dim, layers, is_bidirectional, drop_out_rate):\n super(Encoder, self).__init__()\n self.input_dim = input_dim\n self.hidden_dim = hidden_dim\n self.layers = layers\n self.is_bidirectional = is_bidirectional\n self.drop_rate = drop_out_rate\n self.char_embeddings = CharEmbeddings(len(char_vocab), char_embed_dim, drop_rate)\n # Remove In case we want to BERT \n\n self.lstm = nn.LSTM(self.input_dim, self.hidden_dim, self.layers, batch_first=True,\n bidirectional=self.is_bidirectional)\n self.dropout = nn.Dropout(self.drop_rate)\n self.conv1d = nn.Conv1d(char_embed_dim, char_feature_size, conv_filter_size)\n self.max_pool = nn.MaxPool1d(max_word_len + conv_filter_size - 1, max_word_len + conv_filter_size - 1)\n\n def forward(self, words_input, char_seq, adj, is_training=False):\n char_embeds = self.char_embeddings(char_seq)\n char_embeds = char_embeds.permute(0, 2, 1)\n\n char_feature = torch.tanh(self.max_pool(self.conv1d(char_embeds)))\n char_feature = char_feature.permute(0, 2, 1)\n\n words_input = torch.cat((words_input, char_feature), -1)\n\n outputs, hc = self.lstm(words_input)\n outputs = self.dropout(outputs)\n \n return outputs\n\n\n# 597\nclass Attention(nn.Module):\n def __init__(self, input_dim):\n super(Attention, self).__init__()\n self.input_dim = input_dim\n self.linear_ctx = nn.Linear(self.input_dim, self.input_dim, bias=False)\n self.linear_query = nn.Linear(self.input_dim, self.input_dim, bias=True)\n self.v = nn.Linear(self.input_dim, 1)\n\n def forward(self, s_prev, enc_hs, src_mask):\n uh = self.linear_ctx(enc_hs)\n wq = self.linear_query(s_prev)\n wquh = torch.tanh(wq + uh)\n attn_weights = self.v(wquh).squeeze()\n attn_weights.data.masked_fill_(src_mask.data, -float('inf'))\n attn_weights = F.softmax(attn_weights, dim=-1)\n ctx = torch.bmm(attn_weights.unsqueeze(1), enc_hs).squeeze()\n return ctx, attn_weights\n\n# 617\nclass NGram_Attention(nn.Module):\n def __init__(self, input_dim, N):\n super(NGram_Attention, self).__init__()\n self.input_dim = input_dim\n self.layers = N\n self.V_layers = nn.ModuleList()\n self.W_layers = nn.ModuleList()\n for i in range(N):\n self.V_layers.append(nn.Linear(input_dim, input_dim))\n self.W_layers.append(nn.Linear(input_dim, input_dim))\n\n def forward(self, s_prev, enc_hs, src_mask):\n att = torch.bmm(s_prev.unsqueeze(1), self.V_layers[0](enc_hs).transpose(1, 2)).squeeze()\n att.data.masked_fill_(src_mask.data, -float('inf'))\n att = F.softmax(att, dim=-1)\n ctx = self.W_layers[0](torch.bmm(att.unsqueeze(1), enc_hs).squeeze())\n for i in range(1, self.layers):\n enc_hs_ngram = torch.nn.AvgPool1d(i+1, 1)(enc_hs.transpose(1, 2)).transpose(1, 2)\n n_mask = src_mask.unsqueeze(1).float()\n n_mask = torch.nn.AvgPool1d(i+1, 1)(n_mask).squeeze()\n n_mask[n_mask > 0] = 1\n n_mask = n_mask.byte()\n n_att = torch.bmm(s_prev.unsqueeze(1), self.V_layers[i](enc_hs_ngram).transpose(1, 2)).squeeze()\n n_att.data.masked_fill_(n_mask.data, -float('inf'))\n n_att = F.softmax(n_att, dim=-1)\n ctx += self.W_layers[i](torch.bmm(n_att.unsqueeze(1), enc_hs_ngram).squeeze())\n return ctx, att\n\n# 588\ndef mean_over_time(x, mask):\n x.data.masked_fill_(mask.unsqueeze(2).data, 0)\n x = torch.sum(x, dim=1)\n time_steps = torch.sum(mask.eq(0), dim=1, keepdim=True).float()\n x /= time_steps\n return x\n\n# 645\nclass Decoder(nn.Module):\n def __init__(self, input_dim, hidden_dim, layers, drop_out_rate, max_length):\n super(Decoder, self).__init__()\n \n self.input_dim = input_dim\n self.hidden_dim = hidden_dim\n self.layers = layers\n self.drop_rate = drop_out_rate\n self.max_length = max_length\n\n if att_type == 'None':\n self.lstm = nn.LSTMCell(2 * self.input_dim, self.hidden_dim, self.layers)\n elif att_type == 'Unigram':\n self.attention = Attention(input_dim)\n self.lstm = nn.LSTMCell(2 * self.input_dim, self.hidden_dim, self.layers)\n else:\n self.attention = NGram_Attention(input_dim, 3)\n self.lstm = nn.LSTMCell(3 * self.input_dim, self.hidden_dim, self.layers)\n\n self.dropout = nn.Dropout(self.drop_rate)\n self.ent_out = nn.Linear(self.input_dim, len(word_vocab))\n\n def forward(self, y_prev, h_prev, enc_hs, src_word_embeds, src_mask, is_training=False):\n src_time_steps = enc_hs.size()[1]\n if att_type == 'None':\n ctx = mean_over_time(enc_hs, src_mask)\n attn_weights = torch.zeros(src_mask.size()).cuda()\n elif att_type == 'Unigram':\n s_prev = h_prev[0]\n s_prev = s_prev.unsqueeze(1)\n s_prev = s_prev.repeat(1, src_time_steps, 1)\n ctx, attn_weights = self.attention(s_prev, enc_hs, src_mask)\n else:\n last_index = src_mask.size()[1] - torch.sum(src_mask, dim=-1).long() - 1\n last_index = last_index.unsqueeze(1).unsqueeze(1).repeat(1, 1, enc_hs.size()[-1])\n enc_last = torch.gather(enc_hs, 1, last_index).squeeze()\n ctx, attn_weights = self.attention(enc_last, src_word_embeds, src_mask)\n ctx = torch.cat((enc_last, ctx), -1)\n\n y_prev = y_prev.squeeze()\n s_cur = torch.cat((y_prev, ctx), 1)\n hidden, cell_state = self.lstm(s_cur, h_prev)\n hidden = self.dropout(hidden)\n output = self.ent_out(hidden)\n return output, (hidden, cell_state), attn_weights\n\n# 690\n\nclass SeqToSeqModel(nn.Module):\n def __init__(self):\n super(SeqToSeqModel, self).__init__()\n self.word_embeddings = WordEmbeddings(len(word_vocab), word_embed_dim, word_embed_matrix, drop_rate)\n self.encoder = Encoder(enc_inp_size, int(enc_hidden_size/2), layers, True, drop_rate)\n self.decoder = Decoder(dec_inp_size, dec_hidden_size, layers, drop_rate, max_trg_len)\n\n def forward(self, src_words_seq, src_chars_seq, src_mask, trg_words_seq, trg_vocab_mask, adj, is_training=False):\n src_word_embeds = self.word_embeddings(src_words_seq)\n trg_word_embeds = self.word_embeddings(trg_words_seq)\n\n batch_len = src_word_embeds.size()[0]\n \n if is_training:\n time_steps = trg_word_embeds.size()[1] - 1\n else:\n time_steps = max_trg_len\n\n encoder_output = self.encoder(src_word_embeds, src_chars_seq, adj, is_training)\n\n h0 = autograd.Variable(torch.FloatTensor(torch.zeros(batch_len, word_embed_dim)))\n h0 = h0.cuda()\n c0 = autograd.Variable(torch.FloatTensor(torch.zeros(batch_len, word_embed_dim)))\n c0 = c0.cuda()\n dec_hid = (h0, c0)\n\n if is_training:\n dec_inp = trg_word_embeds[:, 0, :]\n dec_out, dec_hid, dec_attn = self.decoder(dec_inp, dec_hid, encoder_output, src_word_embeds,\n src_mask, is_training) \n dec_out = dec_out.view(-1, len(word_vocab))\n dec_out = F.log_softmax(dec_out, dim=-1)\n dec_out = dec_out.unsqueeze(1)\n\n for t in range(1, time_steps):\n dec_inp = trg_word_embeds[:, t, :]\n cur_dec_out, dec_hid, dec_attn = self.decoder(dec_inp, dec_hid, encoder_output, src_word_embeds,\n src_mask, is_training)\n cur_dec_out = cur_dec_out.view(-1, len(word_vocab))\n dec_out = torch.cat((dec_out, F.log_softmax(cur_dec_out, dim=-1).unsqueeze(1)), 1)\n else:\n dec_inp = trg_word_embeds[:, 0, :]\n dec_out, dec_hid, dec_attn = self.decoder(dec_inp, dec_hid, encoder_output, src_word_embeds,\n src_mask, is_training)\n dec_out = dec_out.view(-1, len(word_vocab))\n if copy_on:\n dec_out.data.masked_fill_(trg_vocab_mask.data, -float('inf'))\n dec_out = F.log_softmax(dec_out, dim=-1)\n topv, topi = dec_out.topk(1)\n dec_out_v, dec_out_i = dec_out.topk(1)\n dec_attn_v, dec_attn_i = dec_attn.topk(1)\n\n for t in range(1, time_steps):\n dec_inp = self.word_embeddings(topi.squeeze().detach())\n cur_dec_out, dec_hid, cur_dec_attn = self.decoder(dec_inp, dec_hid, encoder_output, src_word_embeds,\n src_mask, is_training)\n cur_dec_out = cur_dec_out.view(-1, len(word_vocab))\n if copy_on:\n cur_dec_out.data.masked_fill_(trg_vocab_mask.data, -float('inf'))\n cur_dec_out = F.log_softmax(cur_dec_out, dim=-1)\n topv, topi = cur_dec_out.topk(1)\n cur_dec_out_v, cur_dec_out_i = cur_dec_out.topk(1)\n dec_out_i = torch.cat((dec_out_i, cur_dec_out_i), 1)\n cur_dec_attn_v, cur_dec_attn_i = cur_dec_attn.topk(1)\n dec_attn_i = torch.cat((dec_attn_i, cur_dec_attn_i), 1)\n\n if is_training:\n dec_out = dec_out.view(-1, len(word_vocab))\n return dec_out\n else:\n return dec_out_i, dec_attn_i\n\ndef predict(samples, model, model_id):\n pred_batch_size = batch_size\n batch_count = math.ceil(len(samples) / pred_batch_size)\n move_last_batch = False\n if len(samples) - batch_size * (batch_count - 1) == 1:\n move_last_batch = True\n batch_count -= 1\n \n preds = list()\n attns = list()\n \n model.eval()\n \n set_random_seeds(random_seed)\n \n start_time = datetime.datetime.now()\n \n for batch_idx in tqdm(range(0, batch_count)):\n batch_start = batch_idx * pred_batch_size\n batch_end = min(len(samples), batch_start + pred_batch_size)\n if batch_idx == batch_count - 1 and move_last_batch:\n batch_end = len(samples)\n\n cur_batch = samples[batch_start:batch_end]\n cur_samples_input = get_batch_data(cur_batch, False)\n\n src_words_seq = torch.from_numpy(cur_samples_input['src_words'].astype('long'))\n src_words_mask = torch.from_numpy(cur_samples_input['src_words_mask'].astype('uint8'))\n trg_vocab_mask = torch.from_numpy(cur_samples_input['trg_vocab_mask'].astype('uint8'))\n trg_words_seq = torch.from_numpy(cur_samples_input['trg_words'].astype('long'))\n adj = torch.from_numpy(cur_samples_input['adj'].astype('float32'))\n src_chars_seq = torch.from_numpy(cur_samples_input['src_chars'].astype('long'))\n\n if torch.cuda.is_available():\n src_words_seq = src_words_seq.cuda()\n src_words_mask = src_words_mask.cuda()\n trg_vocab_mask = trg_vocab_mask.cuda()\n trg_words_seq = trg_words_seq.cuda()\n adj = adj.cuda()\n src_chars_seq = src_chars_seq.cuda()\n\n src_words_seq = autograd.Variable(src_words_seq)\n src_words_mask = autograd.Variable(src_words_mask)\n trg_vocab_mask = autograd.Variable(trg_vocab_mask)\n adj = autograd.Variable(adj)\n src_chars_seq = autograd.Variable(src_chars_seq)\n\n trg_words_seq = autograd.Variable(trg_words_seq)\n with torch.no_grad():\n outputs = model(src_words_seq, src_chars_seq, src_words_mask, trg_words_seq, trg_vocab_mask, adj,False)\n\n preds += list(outputs[0].data.cpu().numpy())\n attns += list(outputs[1].data.cpu().numpy())\n model.zero_grad()\n end_time = datetime.datetime.now()\n custom_print('Prediction time:', end_time - start_time)\n return preds, attns\n\ndef train_model(model_id, train_samples, dev_samples, best_model_file):\n train_size = len(train_samples)\n batch_count = int(math.ceil(train_size/batch_size))\n move_last_batch = False\n \n if len(train_samples) - batch_size * (batch_count - 1) == 1:\n move_last_batch = True\n batch_count -= 1\n \n custom_print(batch_count)\n\n # model = get_model(model_id)\n model = SeqToSeqModel()\n\n pytorch_total_params = sum(p.numel() for p in model.parameters() if p.requires_grad)\n custom_print('Parameters size:', pytorch_total_params)\n\n custom_print(model)\n\n if torch.cuda.is_available():\n model.cuda()\n if n_gpu > 1:\n model = torch.nn.DataParallel(model)\n\n criterion = nn.NLLLoss(ignore_index=0)\n optimizer = optim.Adam(model.parameters())\n\n custom_print(optimizer)\n\n best_dev_acc = -1.0\n best_epoch_idx = -1\n best_epoch_seed = -1\n\n for epoch_idx in range(0, num_epoch):\n model.train()\n model.zero_grad()\n\n custom_print('Epoch:', epoch_idx + 1)\n cur_seed = random_seed + epoch_idx + 1\n set_random_seeds(cur_seed)\n\n cur_shuffled_train_data = shuffle_data(train_samples)\n\n start_time = datetime.datetime.now()\n train_loss_val = 0.0\n\n for batch_idx in tqdm(range(0, batch_count)):\n batch_start = batch_idx * batch_size\n batch_end = min(len(cur_shuffled_train_data), batch_start + batch_size)\n\n if batch_idx == batch_count - 1 and move_last_batch:\n batch_end = len(cur_shuffled_train_data)\n\n cur_batch = cur_shuffled_train_data[batch_start:batch_end]\n cur_samples_input = get_batch_data(cur_batch, True)\n\n # np arrays to tensors\n src_words_seq = torch.from_numpy(cur_samples_input['src_words'].astype('long'))\n src_words_mask = torch.from_numpy(cur_samples_input['src_words_mask'].astype('uint8'))\n trg_vocab_mask = torch.from_numpy(cur_samples_input['trg_vocab_mask'].astype('uint8'))\n trg_words_seq = torch.from_numpy(cur_samples_input['trg_words'].astype('long'))\n adj = torch.from_numpy(cur_samples_input['adj'].astype('float32'))\n src_chars_seq = torch.from_numpy(cur_samples_input['src_chars'].astype('long'))\n\n target = torch.from_numpy(cur_samples_input['target'].astype('long'))\n\n if torch.cuda.is_available():\n src_words_seq = src_words_seq.cuda()\n src_words_mask = src_words_mask.cuda()\n trg_vocab_mask = trg_vocab_mask.cuda()\n trg_words_seq = trg_words_seq.cuda()\n adj = adj.cuda()\n src_chars_seq = src_chars_seq.cuda()\n\n target = target.cuda()\n\n src_words_seq = autograd.Variable(src_words_seq)\n src_words_mask = autograd.Variable(src_words_mask)\n trg_vocab_mask = autograd.Variable(trg_vocab_mask)\n trg_words_seq = autograd.Variable(trg_words_seq)\n adj = autograd.Variable(adj)\n src_chars_seq = autograd.Variable(src_chars_seq)\n\n target = autograd.Variable(target)\n\n outputs = model(src_words_seq, src_chars_seq, src_words_mask, trg_words_seq, trg_vocab_mask, adj, True)\n\n target = target.view(-1, 1).squeeze()\n loss = criterion(outputs, target)\n loss.backward()\n torch.nn.utils.clip_grad_norm_(model.parameters(), 10.0)\n\n if (batch_idx + 1) % update_freq == 0:\n optimizer.step()\n model.zero_grad()\n\n train_loss_val += loss.item()\n\n train_loss_val /= batch_count\n end_time = datetime.datetime.now()\n custom_print('Training loss:', train_loss_val)\n custom_print('Training time:', end_time - start_time)\n\n custom_print('\\nDev Results\\n')\n set_random_seeds(random_seed)\n dev_preds, dev_attns = predict(dev_samples, model, model_id)\n \n write_test_res(dev_samples, dev_preds, dev_attns, os.path.join(trg_data_folder, 'dev.out'))\n\n ref_lines = open(trg_dev_file).read().splitlines()\n pred_lines = open(os.path.join(trg_data_folder, 'dev.out')).read().splitlines()\n event_lines = open(events_file).read().splitlines()\n argument_lines = open(arguments_file).read().splitlines()\n roles_lines = open(roles_file).read().splitlines()\n\n dev_acc = calculate_f1(ref_lines, pred_lines, event_lines, argument_lines, roles_lines)\n\n\n # pred_pos, gt_pos, correct_pos = get_F1(dev_samples, dev_preds, dev_attns)\n # custom_print(pred_pos, '\\t', gt_pos, '\\t', correct_pos)\n # p = float(correct_pos) / (pred_pos + 1e-8)\n # r = float(correct_pos) / (gt_pos + 1e-8)\n # dev_acc = (2 * p * r) / (p + r + 1e-8)\n # custom_print('F1:', dev_acc)\n\n if dev_acc >= best_dev_acc:\n best_epoch_idx = epoch_idx + 1\n best_epoch_seed = cur_seed\n custom_print('model saved......')\n best_dev_acc = dev_acc\n torch.save(model.state_dict(), best_model_file)\n\n custom_print('\\n\\n')\n if epoch_idx + 1 - best_epoch_idx >= early_stop_cnt:\n break\n\n custom_print('*******')\n custom_print('Best Epoch:', best_epoch_idx)\n custom_print('Best Epoch Seed:', best_epoch_seed)\n\n\nif __name__ == \"__main__\":\n \n os.environ['CUDA_VISIBLE_DEVICES'] = sys.argv[1]\n random_seed = int(sys.argv[2])\n src_data_folder = sys.argv[3]\n trg_data_folder = sys.argv[4]\n job_mode = sys.argv[5]\n embedding_type = sys.argv[6]\n granular_mode = 1\n\n n_gpu = torch.cuda.device_count()\n set_random_seeds(random_seed)\n\n\n if not os.path.exists(trg_data_folder):\n os.mkdir(trg_data_folder)\n model_name = 1\n\n #Tunable Hyperparameters\n\n batch_size = 32\n num_epoch = 30\n max_src_len = 100\n max_trg_len = 50\n\n if embedding_type == 'w2v':\n embedding_file = os.path.join(src_data_folder, 'w2v.txt')\n else:\n embedding_file = os.path.join(src_data_folder, 'Bert_embeddings.txt')\n\n update_freq = 1\n enc_type = ['LSTM', 'GCN', 'LSTM-GCN'][0]\n att_type = ['None', 'Unigram', 'N-Gram-Enc'][1]\n\n copy_on = True\n\n gcn_num_layers = 3\n\n if embedding_type == 'w2v':\n word_embed_dim = 300\n else:\n word_embed_dim = 768\n \n word_min_freq = 2\n char_embed_dim = 50\n char_feature_size = 50\n conv_filter_size = 3\n max_word_len = 10\n\n enc_inp_size = word_embed_dim + char_feature_size\n enc_hidden_size = word_embed_dim\n dec_inp_size = enc_hidden_size\n dec_hidden_size = dec_inp_size\n\n drop_rate = 0.3\n layers = 1\n early_stop_cnt = 20\n sample_cnt = 0\n Sample = recordclass(\"Sample\", \"Id SrcLen SrcWords TrgLen TrgWords\")\n\n events_file = os.path.join(src_data_folder, 'event_types.txt')\n arguments_file = os.path.join(src_data_folder, 'arguments.txt')\n roles_file = os.path.join(src_data_folder, 'roles.txt')\n\n events = get_relations(events_file)\n arguments = get_relations(arguments_file)\n roles = get_relations(roles_file)\n\n\n # train a model\n if job_mode == 'train':\n logger = open(os.path.join(trg_data_folder, 'training.log'), 'w')\n custom_print(sys.argv)\n custom_print(max_src_len, max_trg_len, drop_rate, layers)\n custom_print('loading data......')\n model_file_name = os.path.join(trg_data_folder, 'model.h5py')\n src_train_file = os.path.join(src_data_folder, 'train.sent')\n trg_train_file = os.path.join(src_data_folder, 'train.tup')\n train_data = read_data(src_train_file, trg_train_file, 1)\n\n src_dev_file = os.path.join(src_data_folder, 'dev.sent')\n trg_dev_file = os.path.join(src_data_folder, 'dev.tup')\n dev_data = read_data(src_dev_file, trg_dev_file, 2)\n\n custom_print('Training data size:', len(train_data))\n custom_print('Development data size:', len(dev_data))\n\n custom_print(\"preparing vocabulary......\")\n save_vocab = os.path.join(trg_data_folder, 'vocab.pkl')\n word_vocab, rev_word_vocab, char_vocab, word_embed_matrix = build_vocab(train_data, events, arguments, roles, save_vocab,\n embedding_file)\n\n custom_print(\"Training started......\")\n train_model(model_name, train_data, dev_data, model_file_name)\n logger.close()\n\n if job_mode == 'test':\n logger = open(os.path.join(trg_data_folder, 'test.log'), 'w')\n custom_print(sys.argv)\n custom_print(\"loading word vectors......\")\n vocab_file_name = os.path.join(trg_data_folder, 'vocab.pkl')\n word_vocab, char_vocab = load_vocab(vocab_file_name)\n\n rev_word_vocab = OrderedDict()\n for word in word_vocab:\n idx = word_vocab[word]\n rev_word_vocab[idx] = word\n\n word_embed_matrix = np.zeros((len(word_vocab), word_embed_dim), dtype=np.float32)\n custom_print('vocab size:', len(word_vocab))\n\n src_test_file = os.path.join(src_data_folder, 'test.sent')\n trg_test_file = os.path.join(src_data_folder, 'test.tup')\n test_data = read_data(src_test_file, trg_test_file, 3)\n\n custom_print('Test data size:', len(test_data))\n\n custom_print('seed:', random_seed)\n model_file = os.path.join(trg_data_folder, 'model.h5py')\n\n best_model = get_model(model_name)\n custom_print(best_model)\n if torch.cuda.is_available():\n best_model.cuda()\n if n_gpu > 1:\n best_model = torch.nn.DataParallel(best_model)\n best_model.load_state_dict(torch.load(model_file))\n\n custom_print('\\nTest Results\\n')\n set_random_seeds(random_seed)\n test_preds, test_attns = predict(test_data, best_model, model_name)\n\n custom_print('Copy On')\n write_test_res(test_data, test_preds, test_attns, os.path.join(trg_data_folder, 'test.out'))\n\n # ref_lines = open(trg_test_file).readlines()\n # pred_lines = open(os.path.join(trg_data_folder, 'test.out')).readlines()\n # event_lines = open(events_file).readlines()\n # argument_lines = open(arguments_file).readlines()\n # roles_lines = open(roles_file).readlines()\n\n ref_lines = open(trg_test_file).read().splitlines()\n pred_lines = open(os.path.join(trg_data_folder, 'test.out')).read().splitlines()\n event_lines = open(events_file).read().splitlines()\n argument_lines = open(arguments_file).read().splitlines()\n roles_lines = open(roles_file).read().splitlines()\n\n mode = 1\n custom_print('Overall F1')\n # custom_print(cal_f1(ref_lines, pred_lines, event_lines, argument_lines, roles_lines, mode))\n calculate_f1(ref_lines, pred_lines, event_lines, argument_lines, roles_lines)\n\n copy_on = False\n custom_print('Copy Off')\n set_random_seeds(random_seed)\n test_preds, test_attns = predict(test_data, best_model, model_name)\n write_test_res(test_data, test_preds, test_attns, os.path.join(trg_data_folder, 'test_without_copy.out'))\n\n # ref_lines = open(trg_test_file).readlines()\n # pred_lines = open(os.path.join(trg_data_folder, 'test_without_copy.out')).readlines()\n # event_lines = open(events_file).readlines()\n # argument_lines = open(arguments_file).readlines()\n # roles_lines = open(roles_file).readlines()\n\n ref_lines = open(trg_test_file).read().splitlines()\n pred_lines = open(os.path.join(trg_data_folder, 'test_without_copy.out')).read().splitlines()\n event_lines = open(events_file).read().splitlines()\n argument_lines = open(arguments_file).read().splitlines()\n roles_lines = open(roles_file).read().splitlines()\n\n mode = 1\n custom_print('Overall F1')\n # custom_print(cal_f1(ref_lines, pred_lines, event_lines, argument_lines, roles_lines, mode))\n calculate_f1(ref_lines, pred_lines, event_lines, argument_lines, roles_lines)\n logger.close()\n\n\n\n\n"
] | [
[
"torch.cuda.manual_seed_all",
"torch.nn.functional.softmax",
"torch.no_grad",
"numpy.random.seed",
"torch.cuda.is_available",
"torch.nn.ModuleList",
"torch.cat",
"torch.nn.Dropout",
"torch.autograd.Variable",
"torch.gather",
"torch.cuda.device_count",
"torch.from_numpy",
"torch.tanh",
"torch.nn.DataParallel",
"numpy.random.uniform",
"torch.nn.MaxPool1d",
"torch.nn.LSTM",
"torch.nn.AvgPool1d",
"torch.load",
"numpy.zeros",
"torch.manual_seed",
"torch.zeros",
"numpy.float32",
"torch.nn.Conv1d",
"torch.sum",
"torch.nn.NLLLoss",
"torch.nn.functional.log_softmax",
"torch.nn.Linear",
"torch.nn.Embedding",
"torch.nn.LSTMCell",
"numpy.array"
]
] |
samellem/autodp | [
"fd14fed07e0bb67fca5f7e82bbdab6cf60b339d3"
] | [
"test/unit_test_fdp_to_approxdp_conversion.py"
] | [
"from autodp.mechanism_zoo import GaussianMechanism\nfrom autodp.dp_bank import get_eps_ana_gaussian\n\nimport numpy as np\n\nfrom absl.testing import absltest\nfrom absl.testing import parameterized\n\nparams = [0.05, 0.1, 0.2, 0.5, 1.0, 2.0, 5.0, 10.0]\n\n\ndef _fdp_conversion(sigma):\n\n delta_list = [0,1e-8, 1e-6, 1e-4, 1e-2, 0.3, 0.5, 1]\n\n # f-DP implementation\n gm3 = GaussianMechanism(sigma, name='GM3', RDP_off=True, approxDP_off=True, fdp_off=False)\n\n # direct approxdp implementation\n agm = lambda x: get_eps_ana_gaussian(sigma, x)\n\n eps_direct = np.array([agm(delta) for delta in delta_list])\n\n # the fdp is converted by numerical methods from privacy profile.\n eps_converted = np.array([gm3.get_approxDP(delta) for delta in delta_list])\n max_diff = eps_direct - eps_converted\n\n rel_diff = max_diff / (eps_direct+1e-10)\n\n if np.isinf(eps_direct[0]) and np.isinf(eps_converted[0]):\n rel_diff[0] = 0\n return rel_diff\n\n\n_fdp_conversion(1.0)\n\nclass Test_approxDP2fDP_Conversion(parameterized.TestCase):\n\n @parameterized.parameters(p for p in params)\n def test_fdp_conversion(self, sigma):\n max_diff = _fdp_conversion(sigma)\n self.assertSequenceAlmostEqual(max_diff, np.zeros_like(max_diff), places=2)\n\n\nif __name__ == '__main__':\n absltest.main()\n\n"
] | [
[
"numpy.zeros_like",
"numpy.isinf"
]
] |
GautamV234/pyro | [
"d5474ebc6101b330bf9060a3731830d4b6a585d5"
] | [
"pyro/contrib/gp/models/gpr.py"
] | [
"# Copyright (c) 2017-2019 Uber Technologies, Inc.\n# SPDX-License-Identifier: Apache-2.0\n\nimport torch\nimport torch.distributions as torchdist\nfrom torch.distributions import constraints\n\nimport pyro\nimport pyro.distributions as dist\nfrom pyro.contrib.gp.models.model import GPModel\nfrom pyro.contrib.gp.util import conditional\nfrom pyro.nn.module import PyroParam, pyro_method\nfrom pyro.util import warn_if_nan\n\n\nclass GPRegression(GPModel):\n r\"\"\"\n Gaussian Process Regression model.\n\n The core of a Gaussian Process is a covariance function :math:`k` which governs\n the similarity between input points. Given :math:`k`, we can establish a\n distribution over functions :math:`f` by a multivarite normal distribution\n\n .. math:: p(f(X)) = \\mathcal{N}(0, k(X, X)),\n\n where :math:`X` is any set of input points and :math:`k(X, X)` is a covariance\n matrix whose entries are outputs :math:`k(x, z)` of :math:`k` over input pairs\n :math:`(x, z)`. This distribution is usually denoted by\n\n .. math:: f \\sim \\mathcal{GP}(0, k).\n\n .. note:: Generally, beside a covariance matrix :math:`k`, a Gaussian Process can\n also be specified by a mean function :math:`m` (which is a zero-value function\n by default). In that case, its distribution will be\n\n .. math:: p(f(X)) = \\mathcal{N}(m(X), k(X, X)).\n\n Given inputs :math:`X` and their noisy observations :math:`y`, the Gaussian Process\n Regression model takes the form\n\n .. math::\n f &\\sim \\mathcal{GP}(0, k(X, X)),\\\\\n y & \\sim f + \\epsilon,\n\n where :math:`\\epsilon` is Gaussian noise.\n\n .. note:: This model has :math:`\\mathcal{O}(N^3)` complexity for training,\n :math:`\\mathcal{O}(N^3)` complexity for testing. Here, :math:`N` is the number\n of train inputs.\n\n Reference:\n\n [1] `Gaussian Processes for Machine Learning`,\n Carl E. Rasmussen, Christopher K. I. Williams\n\n :param torch.Tensor X: A input data for training. Its first dimension is the number\n of data points.\n :param torch.Tensor y: An output data for training. Its last dimension is the\n number of data points.\n :param ~pyro.contrib.gp.kernels.kernel.Kernel kernel: A Pyro kernel object, which\n is the covariance function :math:`k`.\n :param torch.Tensor noise: Variance of Gaussian noise of this model.\n :param callable mean_function: An optional mean function :math:`m` of this Gaussian\n process. By default, we use zero mean.\n :param float jitter: A small positive term which is added into the diagonal part of\n a covariance matrix to help stablize its Cholesky decomposition.\n \"\"\"\n\n def __init__(self, X, y, kernel, noise=None, mean_function=None, jitter=1e-6):\n assert isinstance(\n X, torch.Tensor\n ), \"X needs to be a torch Tensor instead of a {}\".format(type(X))\n if y is not None:\n assert isinstance(\n y, torch.Tensor\n ), \"y needs to be a torch Tensor instead of a {}\".format(type(y))\n super().__init__(X, y, kernel, mean_function, jitter)\n\n noise = self.X.new_tensor(1.0) if noise is None else noise\n self.noise = PyroParam(noise, constraints.positive)\n\n @pyro_method\n def model(self):\n self.set_mode(\"model\")\n\n N = self.X.size(0)\n Kff = self.kernel(self.X)\n Kff.view(-1)[:: N + 1] += self.jitter + self.noise # add noise to diagonal\n Lff = torch.linalg.cholesky(Kff)\n\n zero_loc = self.X.new_zeros(self.X.size(0))\n f_loc = zero_loc + self.mean_function(self.X)\n if self.y is None:\n f_var = Lff.pow(2).sum(dim=-1)\n return f_loc, f_var\n else:\n return pyro.sample(\n self._pyro_get_fullname(\"y\"),\n dist.MultivariateNormal(f_loc, scale_tril=Lff)\n .expand_by(self.y.shape[:-1])\n .to_event(self.y.dim() - 1),\n obs=self.y,\n )\n\n @pyro_method\n def guide(self):\n self.set_mode(\"guide\")\n self._load_pyro_samples()\n\n def forward(self, Xnew, full_cov=False, noiseless=True):\n r\"\"\"\n Computes the mean and covariance matrix (or variance) of Gaussian Process\n posterior on a test input data :math:`X_{new}`:\n\n .. math:: p(f^* \\mid X_{new}, X, y, k, \\epsilon) = \\mathcal{N}(loc, cov).\n\n .. note:: The noise parameter ``noise`` (:math:`\\epsilon`) together with\n kernel's parameters have been learned from a training procedure (MCMC or\n SVI).\n\n :param torch.Tensor Xnew: A input data for testing. Note that\n ``Xnew.shape[1:]`` must be the same as ``self.X.shape[1:]``.\n :param bool full_cov: A flag to decide if we want to predict full covariance\n matrix or just variance.\n :param bool noiseless: A flag to decide if we want to include noise in the\n prediction output or not.\n :returns: loc and covariance matrix (or variance) of :math:`p(f^*(X_{new}))`\n :rtype: tuple(torch.Tensor, torch.Tensor)\n \"\"\"\n self._check_Xnew_shape(Xnew)\n self.set_mode(\"guide\")\n\n N = self.X.size(0)\n Kff = self.kernel(self.X).contiguous()\n Kff.view(-1)[:: N + 1] += self.jitter + self.noise # add noise to the diagonal\n Lff = torch.linalg.cholesky(Kff)\n\n y_residual = self.y - self.mean_function(self.X)\n loc, cov = conditional(\n Xnew,\n self.X,\n self.kernel,\n y_residual,\n None,\n Lff,\n full_cov,\n jitter=self.jitter,\n )\n\n if full_cov and not noiseless:\n M = Xnew.size(0)\n cov = cov.contiguous()\n cov.view(-1, M * M)[:, :: M + 1] += self.noise # add noise to the diagonal\n if not full_cov and not noiseless:\n cov = cov + self.noise\n\n return loc + self.mean_function(Xnew), cov\n\n def iter_sample(self, noiseless=True):\n r\"\"\"\n Iteratively constructs a sample from the Gaussian Process posterior.\n\n Recall that at test input points :math:`X_{new}`, the posterior is\n multivariate Gaussian distributed with mean and covariance matrix\n given by :func:`forward`.\n\n This method samples lazily from this multivariate Gaussian. The advantage\n of this approach is that later query points can depend upon earlier ones.\n Particularly useful when the querying is to be done by an optimisation\n routine.\n\n .. note:: The noise parameter ``noise`` (:math:`\\epsilon`) together with\n kernel's parameters have been learned from a training procedure (MCMC or\n SVI).\n\n :param bool noiseless: A flag to decide if we want to add sampling noise\n to the samples beyond the noise inherent in the GP posterior.\n :returns: sampler\n :rtype: function\n \"\"\"\n noise = self.noise.detach()\n X = self.X.clone().detach()\n y = self.y.clone().detach()\n N = X.size(0)\n Kff = self.kernel(X).contiguous()\n Kff.view(-1)[:: N + 1] += noise # add noise to the diagonal\n\n outside_vars = {\"X\": X, \"y\": y, \"N\": N, \"Kff\": Kff}\n\n def sample_next(xnew, outside_vars):\n \"\"\"Repeatedly samples from the Gaussian process posterior,\n conditioning on previously sampled values.\n \"\"\"\n warn_if_nan(xnew)\n\n # Variables from outer scope\n X, y, Kff = outside_vars[\"X\"], outside_vars[\"y\"], outside_vars[\"Kff\"]\n\n # Compute Cholesky decomposition of kernel matrix\n Lff = torch.linalg.cholesky(Kff)\n y_residual = y - self.mean_function(X)\n\n # Compute conditional mean and variance\n loc, cov = conditional(\n xnew, X, self.kernel, y_residual, None, Lff, False, jitter=self.jitter\n )\n if not noiseless:\n cov = cov + noise\n\n ynew = torchdist.Normal(\n loc + self.mean_function(xnew), cov.sqrt()\n ).rsample()\n\n # Update kernel matrix\n N = outside_vars[\"N\"]\n Kffnew = Kff.new_empty(N + 1, N + 1)\n Kffnew[:N, :N] = Kff\n cross = self.kernel(X, xnew).squeeze()\n end = self.kernel(xnew, xnew).squeeze()\n Kffnew[N, :N] = cross\n Kffnew[:N, N] = cross\n # No noise, just jitter for numerical stability\n Kffnew[N, N] = end + self.jitter\n # Heuristic to avoid adding degenerate points\n if Kffnew.logdet() > -15.0:\n outside_vars[\"Kff\"] = Kffnew\n outside_vars[\"N\"] += 1\n outside_vars[\"X\"] = torch.cat((X, xnew))\n outside_vars[\"y\"] = torch.cat((y, ynew))\n\n return ynew\n\n return lambda xnew: sample_next(xnew, outside_vars)\n"
] | [
[
"torch.linalg.cholesky",
"torch.cat"
]
] |
aphearin/c3dev | [
"d36d083c9eb688640670dbe066bf299777a78ba7"
] | [
"c3dev/galmocks/data_loaders/load_tng_data.py"
] | [
"\"\"\"\n\"\"\"\nfrom collections import OrderedDict\nimport numpy as np\nfrom halotools.utils import sliding_conditional_percentile\nfrom astropy.table import Table\nfrom ..utils.galprops import compute_lg_ssfr\n\n\nSANDY_SCRATCH_PATH = \"/global/cscratch1/sd/sihany/TNG300-1/output\"\nBEBOP = \"/lcrc/project/halotools/C3EMC/TNG300-1\"\nNERSC = \"/global/cfs/cdirs/desi/users/aphearin/C3EMC/TNG300-1\"\nTNG_LBOX = 205.0\n\n\ndef load_tng_subhalos(drn=NERSC, snapNum=55):\n import illustris_python as il\n\n subhalos = il.groupcat.loadSubhalos(drn, snapNum)\n return subhalos\n\n\ndef load_tng_host_halos(drn=NERSC, snapNum=55):\n import illustris_python as il\n\n host_halos = il.groupcat.loadHalos(drn, snapNum)\n return host_halos\n\n\ndef get_value_added_tng_data(subs, hosts):\n hosts[\"halo_id\"] = np.arange(len(hosts[\"GroupMass\"])).astype(int)\n\n host_keys_to_keep = [\"halo_id\", \"GroupFirstSub\", \"GroupPos\", \"GroupVel\"]\n tng_hosts = Table(OrderedDict([(key, hosts[key]) for key in host_keys_to_keep]))\n tng_hosts.rename_column(\"GroupPos\", \"pos\")\n tng_hosts.rename_column(\"GroupVel\", \"vel\")\n tng_hosts[\"logmh\"] = np.log10(hosts[\"GroupMass\"]) + 10\n tng_hosts[\"pos\"] = tng_hosts[\"pos\"] / 1000\n\n tng = Table()\n tng[\"host_halo_logmh\"] = tng_hosts[\"logmh\"][subs[\"SubhaloGrNr\"]]\n tng[\"host_halo_pos\"] = tng_hosts[\"pos\"][subs[\"SubhaloGrNr\"]]\n tng[\"host_halo_vel\"] = tng_hosts[\"vel\"][subs[\"SubhaloGrNr\"]]\n\n tng[\"subhalo_pos\"] = subs[\"SubhaloPos\"] / 1000\n tng[\"subhalo_vel\"] = subs[\"SubhaloVel\"]\n tng[\"subhalo_mass\"] = subs[\"SubhaloMass\"] * 1e10\n tng[\"subhalo_vmax\"] = subs[\"SubhaloVmax\"]\n tng[\"subhalo_vdisp\"] = subs[\"SubhaloVelDisp\"]\n\n tng[\"stellar_metallicity\"] = subs[\"SubhaloStarMetallicity\"]\n tng[\"subhalo_mgas\"] = subs[\"SubhaloMassType\"][:, 0] * 1e10\n tng[\"subhalo_dm\"] = subs[\"SubhaloMassType\"][:, 1] * 1e10\n tng[\"mstar\"] = subs[\"SubhaloMassType\"][:, 4] * 1e10\n tng[\"sfr\"] = subs[\"SubhaloSFR\"]\n tng[\"lgssfr\"] = compute_lg_ssfr(tng[\"mstar\"], tng[\"sfr\"])\n\n tng[\"host_halo_index\"] = subs[\"SubhaloGrNr\"]\n\n subhalo_id = np.arange(len(subs[\"SubhaloGrNr\"])).astype(int)\n subhalo_cen_id = subhalo_id[tng_hosts[\"GroupFirstSub\"]]\n tng[\"is_central\"] = subhalo_cen_id == subhalo_id\n\n # Broadcast properties of the central subhalo to each host\n tng_hosts[\"central_subhalo_vmax\"] = subs[\"SubhaloVmax\"][tng_hosts[\"GroupFirstSub\"]]\n tng_hosts[\"central_subhalo_vdisp\"] = subs[\"SubhaloVelDisp\"][\n tng_hosts[\"GroupFirstSub\"]\n ]\n\n # Broadcast properties of the central subhalo to each group member\n tng[\"host_halo_vmax\"] = tng_hosts[\"central_subhalo_vmax\"][subs[\"SubhaloGrNr\"]]\n tng[\"host_halo_vdisp\"] = tng_hosts[\"central_subhalo_vdisp\"][subs[\"SubhaloGrNr\"]]\n\n tng_hosts[\"p_vmax\"] = sliding_conditional_percentile(\n tng_hosts[\"logmh\"], tng_hosts[\"central_subhalo_vmax\"], 101\n )\n tng_hosts[\"p_vdisp\"] = sliding_conditional_percentile(\n tng_hosts[\"logmh\"], tng_hosts[\"central_subhalo_vdisp\"], 101\n )\n tng[\"host_halo_p_vmax\"] = tng_hosts[\"p_vmax\"][subs[\"SubhaloGrNr\"]]\n tng[\"host_halo_p_vdisp\"] = tng_hosts[\"p_vdisp\"][subs[\"SubhaloGrNr\"]]\n\n return tng, tng_hosts\n"
] | [
[
"numpy.log10"
]
] |
yurivict/incubator-mxnet | [
"3d38dbde744954854015919d4faf56ac1aea16de"
] | [
"python/mxnet/model.py"
] | [
"# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\n# pylint: disable=fixme, invalid-name, too-many-arguments, too-many-locals, too-many-lines\n# pylint: disable=too-many-branches, too-many-statements\n\"\"\"MXNet model module\"\"\"\nfrom __future__ import absolute_import, print_function\n\nimport os\nimport time\nimport logging\nimport warnings\nfrom collections import namedtuple\nimport numpy as np\n\nfrom . import io\nfrom . import ndarray as nd\nfrom . import symbol as sym\nfrom . import optimizer as opt\nfrom . import metric\nfrom . import kvstore as kvs\nfrom .context import Context, cpu\nfrom .initializer import Uniform\nfrom .optimizer import get_updater\nfrom .executor_manager import DataParallelExecutorManager, _check_arguments, _load_data\nfrom .io import DataDesc\nfrom .base import mx_real_t\n\nBASE_ESTIMATOR = object\n\ntry:\n from sklearn.base import BaseEstimator\n BASE_ESTIMATOR = BaseEstimator\nexcept ImportError:\n SKLEARN_INSTALLED = False\n\n# Parameter to pass to batch_end_callback\nBatchEndParam = namedtuple('BatchEndParams',\n ['epoch',\n 'nbatch',\n 'eval_metric',\n 'locals'])\n\ndef _create_sparse_kvstore(kvstore):\n \"\"\"Create kvstore assuming some parameters' storage types are row_sparse.\n\n Parameters\n ----------\n kvstore : KVStore or str\n The kvstore.\n\n Returns\n -------\n kvstore : KVStore\n update_on_kvstore : bool. Always True.\n \"\"\"\n # always update on kvstore\n update_on_kvstore = True\n if isinstance(kvstore, kvs.KVStore):\n kv = kvstore\n elif isinstance(kvstore, str):\n kv = kvs.create(kvstore)\n else:\n raise TypeError(\"Cannot create '%s' KVStore with row_sparse parameters. \"\n \"The type must be KVStore or str.\" % kvstore)\n return (kv, update_on_kvstore)\n\ndef _create_kvstore(kvstore, num_device, arg_params):\n \"\"\"Create kvstore\n This function select and create a proper kvstore if given the kvstore type.\n\n Parameters\n ----------\n kvstore : KVStore or str\n The kvstore.\n num_device : int\n The number of devices\n arg_params : dict of str to `NDArray`.\n Model parameter, dict of name to `NDArray` of net's weights.\n \"\"\"\n update_on_kvstore = bool(int(os.getenv('MXNET_UPDATE_ON_KVSTORE', \"1\")))\n if kvstore is None:\n kv = None\n elif isinstance(kvstore, kvs.KVStore):\n kv = kvstore\n elif isinstance(kvstore, str):\n # create kvstore using the string type\n if num_device == 1 and 'dist' not in kvstore:\n # no need to use kv for single device and single machine\n kv = None\n else:\n kv = kvs.create(kvstore)\n if kvstore == 'local':\n # automatically select a proper local\n max_size = max(np.prod(param.shape) for param in\n arg_params.values())\n if max_size > 1024 * 1024 * 16:\n update_on_kvstore = False\n else:\n raise TypeError('kvstore must be KVStore, str or None')\n\n if kv is None:\n update_on_kvstore = False\n\n return (kv, update_on_kvstore)\n\ndef _initialize_kvstore(kvstore, param_arrays, arg_params, param_names, update_on_kvstore):\n \"\"\"Initialize kvstore\"\"\"\n for idx, param_on_devs in enumerate(param_arrays):\n name = param_names[idx]\n kvstore.init(name, arg_params[name])\n\n if update_on_kvstore:\n kvstore.pull(name, param_on_devs, priority=-idx)\n\ndef _update_params_on_kvstore_nccl(param_arrays, grad_arrays, kvstore, param_names):\n \"\"\"Perform update of param_arrays from grad_arrays on NCCL kvstore.\"\"\"\n valid_indices = [index for index, grad_list in\n enumerate(grad_arrays) if grad_list[0] is not None]\n valid_grad_arrays = [grad_arrays[i] for i in valid_indices]\n valid_param_arrays = [param_arrays[i] for i in valid_indices]\n valid_param_names = [param_names[i] for i in valid_indices]\n size = len(valid_grad_arrays)\n start = 0\n # Use aggregation by default only with NCCL\n default_batch = '16'\n batch = int(os.getenv('MXNET_UPDATE_AGGREGATION_SIZE', default_batch))\n while start < size:\n end = start + batch if start + batch < size else size\n # push gradient, priority is negative index\n kvstore.push(valid_param_names[start:end], valid_grad_arrays[start:end], priority=-start)\n # pull back the weights\n kvstore.pull(valid_param_names[start:end], valid_param_arrays[start:end], priority=-start)\n start = end\n\ndef _update_params_on_kvstore(param_arrays, grad_arrays, kvstore, param_names):\n \"\"\"Perform update of param_arrays from grad_arrays on kvstore.\"\"\"\n for index, pair in enumerate(zip(param_arrays, grad_arrays)):\n arg_list, grad_list = pair\n if grad_list[0] is None:\n continue\n name = param_names[index]\n # push gradient, priority is negative index\n kvstore.push(name, grad_list, priority=-index)\n # pull back the weights\n kvstore.pull(name, arg_list, priority=-index)\n\ndef _update_params(param_arrays, grad_arrays, updater, num_device,\n kvstore=None, param_names=None):\n \"\"\"Perform update of param_arrays from grad_arrays not on kvstore.\"\"\"\n updates = [[] for _ in range(num_device)]\n for i, pair in enumerate(zip(param_arrays, grad_arrays)):\n arg_list, grad_list = pair\n if grad_list[0] is None:\n continue\n index = i\n if kvstore:\n name = param_names[index]\n # push gradient, priority is negative index\n kvstore.push(name, grad_list, priority=-index)\n # pull back the sum gradients, to the same locations.\n kvstore.pull(name, grad_list, priority=-index)\n for k, p in enumerate(zip(arg_list, grad_list)):\n # faked an index here, to make optimizer create diff\n # state for the same index but on diff devs, TODO(mli)\n # use a better solution later\n w, g = p\n updates[k].append((index*num_device+k, g, w))\n for dev_updates in updates:\n # update params if param_arrays and grad_arrays are not empty\n if dev_updates:\n i, w, g = zip(*dev_updates)\n updater(i, w, g)\n\n\ndef _multiple_callbacks(callbacks, *args, **kwargs):\n \"\"\"Sends args and kwargs to any configured callbacks.\n This handles the cases where the 'callbacks' variable\n is ``None``, a single function, or a list.\n \"\"\"\n if isinstance(callbacks, list):\n for cb in callbacks:\n cb(*args, **kwargs)\n return\n if callbacks:\n callbacks(*args, **kwargs)\n\n\ndef _train_multi_device(symbol, ctx, arg_names, param_names, aux_names,\n arg_params, aux_params,\n begin_epoch, end_epoch, epoch_size, optimizer,\n kvstore, update_on_kvstore,\n train_data, eval_data=None, eval_metric=None,\n epoch_end_callback=None, batch_end_callback=None,\n logger=None, work_load_list=None, monitor=None,\n eval_end_callback=None,\n eval_batch_end_callback=None, sym_gen=None):\n \"\"\"Internal training function on multiple devices.\n This function will also work for single device as well.\n\n Parameters\n ----------\n symbol : Symbol\n The network configuration.\n ctx : list of Context\n The training devices.\n arg_names: list of str\n Name of all arguments of the network.\n param_names: list of str\n Name of all trainable parameters of the network.\n aux_names: list of str\n Name of all auxiliary states of the network.\n arg_params : dict of str to NDArray\n Model parameter, dict of name to NDArray of net's weights.\n aux_params : dict of str to NDArray\n Model parameter, dict of name to NDArray of net's auxiliary states.\n begin_epoch : int\n The begining training epoch.\n end_epoch : int\n The end training epoch.\n epoch_size : int, optional\n Number of batches in a epoch. In default, it is set to\n ``ceil(num_train_examples / batch_size)``.\n optimizer : Optimizer\n The optimization algorithm\n train_data : DataIter\n Training data iterator.\n eval_data : DataIter\n Validation data iterator.\n eval_metric : EvalMetric\n An evaluation function or a list of evaluation functions.\n epoch_end_callback : callable(epoch, symbol, arg_params, aux_states)\n A callback that is invoked at end of each epoch.\n This can be used to checkpoint model each epoch.\n batch_end_callback : callable(BatchEndParams)\n A callback that is invoked at end of each batch.\n This can be used to measure speed, get result from evaluation metric. etc.\n kvstore : KVStore\n The KVStore.\n update_on_kvstore : bool\n Whether or not perform weight updating on kvstore.\n logger : logging logger\n When not specified, default logger will be used.\n work_load_list : list of float or int, optional\n The list of work load for different devices,\n in the same order as ``ctx``.\n monitor : Monitor, optional\n Monitor installed to executor,\n for monitoring outputs, weights, and gradients for debugging.\n Notes\n -----\n - This function will inplace update the NDArrays in `arg_params` and `aux_states`.\n \"\"\"\n if logger is None:\n logger = logging\n executor_manager = DataParallelExecutorManager(symbol=symbol,\n sym_gen=sym_gen,\n ctx=ctx,\n train_data=train_data,\n param_names=param_names,\n arg_names=arg_names,\n aux_names=aux_names,\n work_load_list=work_load_list,\n logger=logger)\n if monitor:\n executor_manager.install_monitor(monitor)\n\n executor_manager.set_params(arg_params, aux_params)\n\n if not update_on_kvstore:\n updater = get_updater(optimizer)\n else:\n kvstore.set_optimizer(optimizer)\n\n if kvstore:\n _initialize_kvstore(kvstore=kvstore,\n param_arrays=executor_manager.param_arrays,\n arg_params=arg_params,\n param_names=executor_manager.param_names,\n update_on_kvstore=update_on_kvstore)\n\n # Now start training\n train_data.reset()\n for epoch in range(begin_epoch, end_epoch):\n # Training phase\n tic = time.time()\n eval_metric.reset()\n nbatch = 0\n # Iterate over training data.\n while True:\n do_reset = True\n for data_batch in train_data:\n executor_manager.load_data_batch(data_batch)\n\n if monitor is not None:\n monitor.tic()\n\n executor_manager.forward(is_train=True)\n executor_manager.backward()\n\n if update_on_kvstore:\n if 'nccl' in kvstore.type:\n _update_params_on_kvstore_nccl(executor_manager.param_arrays,\n executor_manager.grad_arrays,\n kvstore, executor_manager.param_names)\n else:\n _update_params_on_kvstore(executor_manager.param_arrays,\n executor_manager.grad_arrays,\n kvstore, executor_manager.param_names)\n else:\n _update_params(executor_manager.param_arrays,\n executor_manager.grad_arrays,\n updater=updater,\n num_device=len(ctx),\n kvstore=kvstore,\n param_names=executor_manager.param_names)\n\n if monitor is not None:\n monitor.toc_print()\n\n # evaluate at end, so we can lazy copy\n executor_manager.update_metric(eval_metric, data_batch.label)\n\n nbatch += 1\n # batch callback (for print purpose)\n if batch_end_callback is not None:\n batch_end_params = BatchEndParam(epoch=epoch,\n nbatch=nbatch,\n eval_metric=eval_metric,\n locals=locals())\n _multiple_callbacks(batch_end_callback, batch_end_params)\n\n # this epoch is done possibly earlier\n if epoch_size is not None and nbatch >= epoch_size:\n do_reset = False\n break\n\n if do_reset:\n logger.info('Epoch[%d] Resetting Data Iterator', epoch)\n train_data.reset()\n\n # this epoch is done\n if epoch_size is None or nbatch >= epoch_size:\n break\n\n toc = time.time()\n logger.info('Epoch[%d] Time cost=%.3f', epoch, (toc - tic))\n\n if epoch_end_callback or epoch + 1 == end_epoch:\n executor_manager.copy_to(arg_params, aux_params)\n\n _multiple_callbacks(epoch_end_callback, epoch, symbol, arg_params, aux_params)\n\n # evaluation\n if eval_data:\n eval_metric.reset()\n eval_data.reset()\n total_num_batch = 0\n for i, eval_batch in enumerate(eval_data):\n executor_manager.load_data_batch(eval_batch)\n executor_manager.forward(is_train=False)\n executor_manager.update_metric(eval_metric, eval_batch.label)\n if eval_batch_end_callback is not None:\n batch_end_params = BatchEndParam(epoch=epoch,\n nbatch=i,\n eval_metric=eval_metric,\n locals=locals())\n _multiple_callbacks(eval_batch_end_callback, batch_end_params)\n total_num_batch += 1\n if eval_end_callback is not None:\n eval_end_params = BatchEndParam(epoch=epoch,\n nbatch=total_num_batch,\n eval_metric=eval_metric,\n locals=locals())\n _multiple_callbacks(eval_end_callback, eval_end_params)\n eval_data.reset()\n # end of all epochs\n\n\ndef save_checkpoint(prefix, epoch, symbol, arg_params, aux_params, remove_amp_cast=True):\n \"\"\"Checkpoint the model data into file.\n\n Parameters\n ----------\n prefix : str\n Prefix of model name.\n epoch : int\n The epoch number of the model.\n symbol : Symbol\n The input Symbol.\n arg_params : dict of str to NDArray\n Model parameter, dict of name to NDArray of net's weights.\n aux_params : dict of str to NDArray\n Model parameter, dict of name to NDArray of net's auxiliary states.\n remove_amp_cast : bool, optional\n Whether to remove the amp_cast and amp_multicast operators, before saving the model.\n Notes\n -----\n - ``prefix-symbol.json`` will be saved for symbol.\n - ``prefix-epoch.params`` will be saved for parameters.\n \"\"\"\n if symbol is not None:\n symbol.save('%s-symbol.json' % prefix, remove_amp_cast=remove_amp_cast)\n\n save_dict = {('arg:%s' % k) : v.as_in_context(cpu()) for k, v in arg_params.items()}\n save_dict.update({('aux:%s' % k) : v.as_in_context(cpu()) for k, v in aux_params.items()})\n param_name = '%s-%04d.params' % (prefix, epoch)\n nd.save(param_name, save_dict)\n logging.info('Saved checkpoint to \\\"%s\\\"', param_name)\n\n\ndef load_params(prefix, epoch):\n \"\"\"Load params from a file\n \"\"\"\n save_dict = nd.load(\"%s-%04d.params\" % (prefix, epoch))\n arg_params = {}\n aux_params = {}\n if not save_dict:\n logging.warning(\"Params file '%s' is empty\", '%s-%04d.params' % (prefix, epoch))\n return (arg_params, aux_params)\n for k, v in save_dict.items():\n tp, name = k.split(\":\", 1)\n if tp == \"arg\":\n arg_params[name] = v\n if tp == \"aux\":\n aux_params[name] = v\n return (arg_params, aux_params)\n\ndef load_checkpoint(prefix, epoch):\n \"\"\"Load model checkpoint from file.\n\n Parameters\n ----------\n prefix : str\n Prefix of model name.\n epoch : int\n Epoch number of model we would like to load.\n\n Returns\n -------\n symbol : Symbol\n The symbol configuration of computation network.\n arg_params : dict of str to NDArray\n Model parameter, dict of name to NDArray of net's weights.\n aux_params : dict of str to NDArray\n Model parameter, dict of name to NDArray of net's auxiliary states.\n\n Notes\n -----\n - Symbol will be loaded from ``prefix-symbol.json``.\n - Parameters will be loaded from ``prefix-epoch.params``.\n \"\"\"\n symbol = sym.load('%s-symbol.json' % prefix)\n arg_params, aux_params = load_params(prefix, epoch)\n return (symbol, arg_params, aux_params)\n\nfrom .callback import LogValidationMetricsCallback # pylint: disable=wrong-import-position\n\nclass FeedForward(BASE_ESTIMATOR):\n \"\"\"Model class of MXNet for training and predicting feedforward nets.\n This class is designed for a single-data single output supervised network.\n\n Parameters\n ----------\n symbol : Symbol\n The symbol configuration of computation network.\n ctx : Context or list of Context, optional\n The device context of training and prediction.\n To use multi GPU training, pass in a list of gpu contexts.\n num_epoch : int, optional\n Training parameter, number of training epochs(epochs).\n epoch_size : int, optional\n Number of batches in a epoch. In default, it is set to\n ``ceil(num_train_examples / batch_size)``.\n optimizer : str or Optimizer, optional\n Training parameter, name or optimizer object for training.\n initializer : initializer function, optional\n Training parameter, the initialization scheme used.\n numpy_batch_size : int, optional\n The batch size of training data.\n Only needed when input array is numpy.\n arg_params : dict of str to NDArray, optional\n Model parameter, dict of name to NDArray of net's weights.\n aux_params : dict of str to NDArray, optional\n Model parameter, dict of name to NDArray of net's auxiliary states.\n allow_extra_params : boolean, optional\n Whether allow extra parameters that are not needed by symbol\n to be passed by aux_params and ``arg_params``.\n If this is True, no error will be thrown when ``aux_params`` and ``arg_params``\n contain more parameters than needed.\n begin_epoch : int, optional\n The begining training epoch.\n kwargs : dict\n The additional keyword arguments passed to optimizer.\n \"\"\"\n def __init__(self, symbol, ctx=None,\n num_epoch=None, epoch_size=None, optimizer='sgd',\n initializer=Uniform(0.01),\n numpy_batch_size=128,\n arg_params=None, aux_params=None,\n allow_extra_params=False,\n begin_epoch=0,\n **kwargs):\n warnings.warn(\n '\\033[91mmxnet.model.FeedForward has been deprecated. ' + \\\n 'Please use mxnet.mod.Module instead.\\033[0m',\n DeprecationWarning, stacklevel=2)\n\n if isinstance(symbol, sym.Symbol):\n self.symbol = symbol\n self.sym_gen = None\n else:\n assert(callable(symbol))\n self.symbol = None\n self.sym_gen = symbol\n\n # model parameters\n self.arg_params = arg_params\n self.aux_params = aux_params\n self.allow_extra_params = allow_extra_params\n\n self.argument_checked = False\n if self.sym_gen is None:\n self._check_arguments()\n\n # basic configuration\n if ctx is None:\n ctx = [cpu()]\n elif isinstance(ctx, Context):\n ctx = [ctx]\n self.ctx = ctx\n # training parameters\n self.num_epoch = num_epoch\n self.epoch_size = epoch_size\n self.kwargs = kwargs.copy()\n self.optimizer = optimizer\n self.initializer = initializer\n self.numpy_batch_size = numpy_batch_size\n # internal helper state\n self._pred_exec = None\n self.begin_epoch = begin_epoch\n\n def _check_arguments(self):\n \"\"\"verify the argument of the default symbol and user provided parameters\"\"\"\n if self.argument_checked:\n return\n\n assert(self.symbol is not None)\n self.argument_checked = True\n\n # check if symbol contain duplicated names.\n _check_arguments(self.symbol)\n # rematch parameters to delete useless ones\n if self.allow_extra_params:\n if self.arg_params:\n arg_names = set(self.symbol.list_arguments())\n self.arg_params = {k : v for k, v in self.arg_params.items()\n if k in arg_names}\n if self.aux_params:\n aux_names = set(self.symbol.list_auxiliary_states())\n self.aux_params = {k : v for k, v in self.aux_params.items()\n if k in aux_names}\n\n\n @staticmethod\n def _is_data_arg(name):\n \"\"\"Check if name is a data argument.\"\"\"\n return name.endswith('data') or name.endswith('label')\n\n def _init_params(self, inputs, overwrite=False):\n \"\"\"Initialize weight parameters and auxiliary states.\"\"\"\n inputs = [x if isinstance(x, DataDesc) else DataDesc(*x) for x in inputs]\n input_shapes = {item.name: item.shape for item in inputs}\n arg_shapes, _, aux_shapes = self.symbol.infer_shape(**input_shapes)\n assert arg_shapes is not None\n input_dtypes = {item.name: item.dtype for item in inputs}\n arg_dtypes, _, aux_dtypes = self.symbol.infer_type(**input_dtypes)\n assert arg_dtypes is not None\n\n arg_names = self.symbol.list_arguments()\n input_names = input_shapes.keys()\n param_names = [key for key in arg_names if key not in input_names]\n aux_names = self.symbol.list_auxiliary_states()\n\n param_name_attrs = [x for x in zip(arg_names, arg_shapes, arg_dtypes)\n if x[0] in param_names]\n arg_params = {k : nd.zeros(shape=s, dtype=t)\n for k, s, t in param_name_attrs}\n aux_name_attrs = [x for x in zip(aux_names, aux_shapes, aux_dtypes)\n if x[0] in aux_names]\n aux_params = {k : nd.zeros(shape=s, dtype=t)\n for k, s, t in aux_name_attrs}\n\n for k, v in arg_params.items():\n if self.arg_params and k in self.arg_params and (not overwrite):\n arg_params[k][:] = self.arg_params[k][:]\n else:\n self.initializer(k, v)\n\n for k, v in aux_params.items():\n if self.aux_params and k in self.aux_params and (not overwrite):\n aux_params[k][:] = self.aux_params[k][:]\n else:\n self.initializer(k, v)\n\n self.arg_params = arg_params\n self.aux_params = aux_params\n return (arg_names, list(param_names), aux_names)\n\n def __getstate__(self):\n this = self.__dict__.copy()\n this['_pred_exec'] = None\n return this\n\n def __setstate__(self, state):\n self.__dict__.update(state)\n\n def _init_predictor(self, input_shapes, type_dict=None):\n \"\"\"Initialize the predictor module for running prediction.\"\"\"\n shapes = {name: self.arg_params[name].shape for name in self.arg_params}\n shapes.update(dict(input_shapes))\n if self._pred_exec is not None:\n arg_shapes, _, _ = self.symbol.infer_shape(**shapes)\n assert arg_shapes is not None, \"Incomplete input shapes\"\n pred_shapes = [x.shape for x in self._pred_exec.arg_arrays]\n if arg_shapes == pred_shapes:\n return\n # for now only use the first device\n pred_exec = self.symbol.simple_bind(\n self.ctx[0], grad_req='null', type_dict=type_dict, **shapes)\n pred_exec.copy_params_from(self.arg_params, self.aux_params)\n\n _check_arguments(self.symbol)\n self._pred_exec = pred_exec\n\n def _init_iter(self, X, y, is_train):\n \"\"\"Initialize the iterator given input.\"\"\"\n if isinstance(X, (np.ndarray, nd.NDArray)):\n if y is None:\n if is_train:\n raise ValueError('y must be specified when X is numpy.ndarray')\n y = np.zeros(X.shape[0])\n if not isinstance(y, (np.ndarray, nd.NDArray)):\n raise TypeError('y must be ndarray when X is numpy.ndarray')\n if X.shape[0] != y.shape[0]:\n raise ValueError(\"The numbers of data points and labels not equal\")\n if y.ndim == 2 and y.shape[1] == 1:\n y = y.flatten()\n if y.ndim != 1:\n raise ValueError(\"Label must be 1D or 2D (with 2nd dimension being 1)\")\n if is_train:\n return io.NDArrayIter(X, y, min(X.shape[0], self.numpy_batch_size),\n shuffle=is_train, last_batch_handle='roll_over')\n else:\n return io.NDArrayIter(X, y, min(X.shape[0], self.numpy_batch_size), shuffle=False)\n if not isinstance(X, io.DataIter):\n raise TypeError('X must be DataIter, NDArray or numpy.ndarray')\n return X\n\n def _init_eval_iter(self, eval_data):\n \"\"\"Initialize the iterator given eval_data.\"\"\"\n if eval_data is None:\n return eval_data\n if isinstance(eval_data, (tuple, list)) and len(eval_data) == 2:\n if eval_data[0] is not None:\n if eval_data[1] is None and isinstance(eval_data[0], io.DataIter):\n return eval_data[0]\n input_data = (np.array(eval_data[0]) if isinstance(eval_data[0], list)\n else eval_data[0])\n input_label = (np.array(eval_data[1]) if isinstance(eval_data[1], list)\n else eval_data[1])\n return self._init_iter(input_data, input_label, is_train=True)\n else:\n raise ValueError(\"Eval data is NONE\")\n if not isinstance(eval_data, io.DataIter):\n raise TypeError('Eval data must be DataIter, or ' \\\n 'NDArray/numpy.ndarray/list pair (i.e. tuple/list of length 2)')\n return eval_data\n\n def predict(self, X, num_batch=None, return_data=False, reset=True):\n \"\"\"Run the prediction, always only use one device.\n\n Parameters\n ----------\n X : mxnet.DataIter\n num_batch : int or None\n The number of batch to run. Go though all batches if ``None``.\n Returns\n -------\n y : numpy.ndarray or a list of numpy.ndarray if the network has multiple outputs.\n The predicted value of the output.\n \"\"\"\n X = self._init_iter(X, None, is_train=False)\n\n if reset:\n X.reset()\n data_shapes = X.provide_data\n data_names = [x[0] for x in data_shapes]\n type_dict = dict((key, value.dtype) for (key, value) in self.arg_params.items())\n for x in X.provide_data:\n if isinstance(x, DataDesc):\n type_dict[x.name] = x.dtype\n else:\n type_dict[x[0]] = mx_real_t\n\n self._init_predictor(data_shapes, type_dict)\n batch_size = X.batch_size\n data_arrays = [self._pred_exec.arg_dict[name] for name in data_names]\n output_list = [[] for _ in range(len(self._pred_exec.outputs))]\n if return_data:\n data_list = [[] for _ in X.provide_data]\n label_list = [[] for _ in X.provide_label]\n\n i = 0\n for batch in X:\n\n _load_data(batch, data_arrays)\n self._pred_exec.forward(is_train=False)\n padded = batch.pad\n real_size = batch_size - padded\n\n for o_list, o_nd in zip(output_list, self._pred_exec.outputs):\n o_list.append(o_nd[0:real_size].asnumpy())\n\n if return_data:\n for j, x in enumerate(batch.data):\n data_list[j].append(x[0:real_size].asnumpy())\n for j, x in enumerate(batch.label):\n label_list[j].append(x[0:real_size].asnumpy())\n i += 1\n if num_batch is not None and i == num_batch:\n break\n\n outputs = [np.concatenate(x) for x in output_list]\n if len(outputs) == 1:\n outputs = outputs[0]\n\n if return_data:\n data = [np.concatenate(x) for x in data_list]\n label = [np.concatenate(x) for x in label_list]\n if len(data) == 1:\n data = data[0]\n if len(label) == 1:\n label = label[0]\n return outputs, data, label\n else:\n return outputs\n\n def score(self, X, eval_metric='acc', num_batch=None, batch_end_callback=None, reset=True):\n \"\"\"Run the model given an input and calculate the score\n as assessed by an evaluation metric.\n\n Parameters\n ----------\n X : mxnet.DataIter\n eval_metric : metric.metric\n The metric for calculating score.\n num_batch : int or None\n The number of batches to run. Go though all batches if ``None``.\n Returns\n -------\n s : float\n The final score.\n \"\"\"\n # setup metric\n if not isinstance(eval_metric, metric.EvalMetric):\n eval_metric = metric.create(eval_metric)\n\n X = self._init_iter(X, None, is_train=False)\n if reset:\n X.reset()\n\n data_shapes = X.provide_data\n data_names = [x[0] for x in data_shapes]\n type_dict = dict((key, value.dtype) for (key, value) in self.arg_params.items())\n for x in X.provide_data:\n if isinstance(x, DataDesc):\n type_dict[x.name] = x.dtype\n else:\n type_dict[x[0]] = mx_real_t\n\n self._init_predictor(data_shapes, type_dict)\n data_arrays = [self._pred_exec.arg_dict[name] for name in data_names]\n\n for i, batch in enumerate(X):\n if num_batch is not None and i == num_batch:\n break\n _load_data(batch, data_arrays)\n self._pred_exec.forward(is_train=False)\n eval_metric.update(batch.label, self._pred_exec.outputs)\n\n if batch_end_callback is not None:\n batch_end_params = BatchEndParam(epoch=0,\n nbatch=i,\n eval_metric=eval_metric,\n locals=locals())\n _multiple_callbacks(batch_end_callback, batch_end_params)\n return eval_metric.get()[1]\n\n def fit(self, X, y=None, eval_data=None, eval_metric='acc',\n epoch_end_callback=None, batch_end_callback=None, kvstore='local', logger=None,\n work_load_list=None, monitor=None, eval_end_callback=LogValidationMetricsCallback(),\n eval_batch_end_callback=None):\n \"\"\"Fit the model.\n\n Parameters\n ----------\n X : DataIter, or numpy.ndarray/NDArray\n Training data. If `X` is a `DataIter`, the name or (if name not available)\n the position of its outputs should match the corresponding variable\n names defined in the symbolic graph.\n y : numpy.ndarray/NDArray, optional\n Training set label.\n If X is ``numpy.ndarray`` or `NDArray`, `y` is required to be set.\n While y can be 1D or 2D (with 2nd dimension as 1), its first dimension must be\n the same as `X`, i.e. the number of data points and labels should be equal.\n eval_data : DataIter or numpy.ndarray/list/NDArray pair\n If eval_data is numpy.ndarray/list/NDArray pair,\n it should be ``(valid_data, valid_label)``.\n eval_metric : metric.EvalMetric or str or callable\n The evaluation metric. This could be the name of evaluation metric\n or a custom evaluation function that returns statistics\n based on a minibatch.\n epoch_end_callback : callable(epoch, symbol, arg_params, aux_states)\n A callback that is invoked at end of each epoch.\n This can be used to checkpoint model each epoch.\n batch_end_callback: callable(epoch)\n A callback that is invoked at end of each batch for purposes of printing.\n kvstore: KVStore or str, optional\n The KVStore or a string kvstore type: 'local', 'dist_sync', 'dist_async'\n In default uses 'local', often no need to change for single machiine.\n logger : logging logger, optional\n When not specified, default logger will be used.\n work_load_list : float or int, optional\n The list of work load for different devices,\n in the same order as `ctx`.\n\n Note\n ----\n KVStore behavior\n - 'local', multi-devices on a single machine, will automatically choose best type.\n - 'dist_sync', multiple machines communicating via BSP.\n - 'dist_async', multiple machines with asynchronous communication.\n \"\"\"\n\n data = self._init_iter(X, y, is_train=True)\n eval_data = self._init_eval_iter(eval_data)\n\n if self.sym_gen:\n self.symbol = self.sym_gen(data.default_bucket_key) # pylint: disable=no-member\n self._check_arguments()\n self.kwargs[\"sym\"] = self.symbol\n\n arg_names, param_names, aux_names = \\\n self._init_params(data.provide_data+data.provide_label)\n\n # setup metric\n if not isinstance(eval_metric, metric.EvalMetric):\n eval_metric = metric.create(eval_metric)\n\n # create kvstore\n (kvstore, update_on_kvstore) = _create_kvstore(\n kvstore, len(self.ctx), self.arg_params)\n\n param_idx2name = {}\n if update_on_kvstore:\n param_idx2name.update(enumerate(param_names))\n else:\n for i, n in enumerate(param_names):\n for k in range(len(self.ctx)):\n param_idx2name[i*len(self.ctx)+k] = n\n self.kwargs[\"param_idx2name\"] = param_idx2name\n\n # init optmizer\n if isinstance(self.optimizer, str):\n batch_size = data.batch_size\n if kvstore and 'dist' in kvstore.type and '_async' not in kvstore.type:\n batch_size *= kvstore.num_workers\n optimizer = opt.create(self.optimizer,\n rescale_grad=(1.0/batch_size),\n **(self.kwargs))\n elif isinstance(self.optimizer, opt.Optimizer):\n if not optimizer.idx2name:\n optimizer.idx2name = param_idx2name.copy()\n optimizer = self.optimizer\n\n # do training\n _train_multi_device(self.symbol, self.ctx, arg_names, param_names, aux_names,\n self.arg_params, self.aux_params,\n begin_epoch=self.begin_epoch, end_epoch=self.num_epoch,\n epoch_size=self.epoch_size,\n optimizer=optimizer,\n train_data=data, eval_data=eval_data,\n eval_metric=eval_metric,\n epoch_end_callback=epoch_end_callback,\n batch_end_callback=batch_end_callback,\n kvstore=kvstore, update_on_kvstore=update_on_kvstore,\n logger=logger, work_load_list=work_load_list, monitor=monitor,\n eval_end_callback=eval_end_callback,\n eval_batch_end_callback=eval_batch_end_callback,\n sym_gen=self.sym_gen)\n\n\n def save(self, prefix, epoch=None, remove_amp_cast=True):\n \"\"\"Checkpoint the model checkpoint into file.\n You can also use `pickle` to do the job if you only work on Python.\n The advantage of `load` and `save` (as compared to `pickle`) is that\n the resulting file can be loaded from other MXNet language bindings.\n One can also directly `load`/`save` from/to cloud storage(S3, HDFS)\n\n Parameters\n ----------\n prefix : str\n Prefix of model name.\n remove_amp_cast : bool, optional\n Whether to remove the amp_cast and amp_multicast operators, before saving the model.\n\n Notes\n -----\n - ``prefix-symbol.json`` will be saved for symbol.\n - ``prefix-epoch.params`` will be saved for parameters.\n \"\"\"\n if epoch is None:\n epoch = self.num_epoch\n assert epoch is not None\n save_checkpoint(prefix, epoch, self.symbol, self.arg_params, self.aux_params, remove_amp_cast=remove_amp_cast)\n\n @staticmethod\n def load(prefix, epoch, ctx=None, **kwargs):\n \"\"\"Load model checkpoint from file.\n\n Parameters\n ----------\n prefix : str\n Prefix of model name.\n epoch : int\n epoch number of model we would like to load.\n ctx : Context or list of Context, optional\n The device context of training and prediction.\n kwargs : dict\n Other parameters for model, including `num_epoch`, optimizer and `numpy_batch_size`.\n\n Returns\n -------\n model : FeedForward\n The loaded model that can be used for prediction.\n\n Notes\n -----\n - ``prefix-symbol.json`` will be saved for symbol.\n - ``prefix-epoch.params`` will be saved for parameters.\n \"\"\"\n symbol, arg_params, aux_params = load_checkpoint(prefix, epoch)\n return FeedForward(symbol, ctx=ctx,\n arg_params=arg_params, aux_params=aux_params,\n begin_epoch=epoch,\n **kwargs)\n\n @staticmethod\n def create(symbol, X, y=None, ctx=None,\n num_epoch=None, epoch_size=None, optimizer='sgd', initializer=Uniform(0.01),\n eval_data=None, eval_metric='acc',\n epoch_end_callback=None, batch_end_callback=None,\n kvstore='local', logger=None, work_load_list=None,\n eval_end_callback=LogValidationMetricsCallback(),\n eval_batch_end_callback=None, **kwargs):\n \"\"\"Functional style to create a model.\n This function is more consistent with functional\n languages such as R, where mutation is not allowed.\n\n Parameters\n ----------\n symbol : Symbol\n The symbol configuration of a computation network.\n X : DataIter\n Training data.\n y : numpy.ndarray, optional\n If `X` is a ``numpy.ndarray``, `y` must be set.\n ctx : Context or list of Context, optional\n The device context of training and prediction.\n To use multi-GPU training, pass in a list of GPU contexts.\n num_epoch : int, optional\n The number of training epochs(epochs).\n epoch_size : int, optional\n Number of batches in a epoch. In default, it is set to\n ``ceil(num_train_examples / batch_size)``.\n optimizer : str or Optimizer, optional\n The name of the chosen optimizer, or an optimizer object, used for training.\n initializer : initializer function, optional\n The initialization scheme used.\n eval_data : DataIter or numpy.ndarray pair\n If `eval_set` is ``numpy.ndarray`` pair, it should\n be (`valid_data`, `valid_label`).\n eval_metric : metric.EvalMetric or str or callable\n The evaluation metric. Can be the name of an evaluation metric\n or a custom evaluation function that returns statistics\n based on a minibatch.\n epoch_end_callback : callable(epoch, symbol, arg_params, aux_states)\n A callback that is invoked at end of each epoch.\n This can be used to checkpoint model each epoch.\n batch_end_callback: callable(epoch)\n A callback that is invoked at end of each batch for print purposes.\n kvstore: KVStore or str, optional\n The KVStore or a string kvstore type: 'local', 'dist_sync', 'dis_async'.\n Defaults to 'local', often no need to change for single machine.\n logger : logging logger, optional\n When not specified, default logger will be used.\n work_load_list : list of float or int, optional\n The list of work load for different devices,\n in the same order as `ctx`.\n \"\"\"\n model = FeedForward(symbol, ctx=ctx, num_epoch=num_epoch,\n epoch_size=epoch_size,\n optimizer=optimizer, initializer=initializer, **kwargs)\n model.fit(X, y, eval_data=eval_data, eval_metric=eval_metric,\n epoch_end_callback=epoch_end_callback,\n batch_end_callback=batch_end_callback,\n kvstore=kvstore,\n logger=logger,\n work_load_list=work_load_list,\n eval_end_callback=eval_end_callback,\n eval_batch_end_callback=eval_batch_end_callback)\n return model\n"
] | [
[
"numpy.array",
"numpy.concatenate",
"numpy.zeros",
"numpy.prod"
]
] |
feiwu77777/Face-detection-and-tracking | [
"1135d2d93d5b667110551dc7e4b985b5861eb380"
] | [
"eval_tiny_one_image.py"
] | [
"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon Dec 10 15:49:15 2018\r\n\r\n@author: fei.wu\r\n\"\"\"\r\n\r\n# -*- coding: utf-8 -*-\r\nfrom __future__ import absolute_import\r\nfrom __future__ import division\r\nfrom __future__ import print_function\r\n\r\nimport tensorflow as tf\r\nimport tiny_face_model\r\nimport util\r\nimport cv2\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport pickle\r\nimport pylab as pl\r\nfrom scipy.special import expit\r\n\r\nMAX_INPUT_DIM = 5000.0\r\n\r\ndef overlay_bounding_boxes(raw_img, refined_bboxes, lw):\r\n \"\"\"Overlay bounding boxes of face on images.\r\n Args:\r\n raw_img:\r\n A target image.\r\n refined_bboxes:\r\n Bounding boxes of detected faces.\r\n lw: \r\n Line width of bounding boxes. If zero specified,\r\n this is determined based on confidence of each detection.\r\n Returns:\r\n None.\r\n \"\"\"\r\n\r\n # Overlay bounding boxes on an image with the color based on the confidence.\r\n for r in refined_bboxes:\r\n _score = expit(r[4])\r\n cm_idx = int(np.ceil(_score * 255))\r\n rect_color = [int(np.ceil(x * 255)) for x in util.cm_data[cm_idx]] # parula\r\n _lw = lw\r\n if lw == 0: # line width of each bounding box is adaptively determined.\r\n bw, bh = r[2] - r[0] + 1, r[3] - r[0] + 1\r\n _lw = 1 if min(bw, bh) <= 20 else max(2, min(3, min(bh / 20, bw / 20)))\r\n _lw = int(np.ceil(_lw * _score))\r\n\r\n _r = [int(x) for x in r[:4]]\r\n cv2.rectangle(raw_img, (_r[0], _r[1]), (_r[2], _r[3]), rect_color, _lw)\r\n\r\n\r\ndef evaluate(weight_file_path, frame, prob_thresh=0.5, nms_thresh=0.1, lw=3, display=False):\r\n \"\"\"Detect faces in images.\r\n Args:\r\n prob_thresh:\r\n The threshold of detection confidence.\r\n nms_thresh:\r\n The overlap threshold of non maximum suppression\r\n weight_file_path: \r\n A pretrained weight file in the pickle format \r\n generated by matconvnet_hr101_to_tf.py.\r\n data_dir: \r\n A directory which contains images.\r\n output_dir: \r\n A directory into which images with detected faces are output.\r\n lw: \r\n Line width of bounding boxes. If zero specified,\r\n this is determined based on confidence of each detection.\r\n display:\r\n Display tiny face images on window.\r\n Returns:\r\n None.\r\n \"\"\"\r\n\r\n # placeholder of input images. Currently batch size of one is supported.\r\n x = tf.placeholder(tf.float32, [1, None, None, 3]) # n, h, w, c\r\n\r\n # Create the tiny face model which weights are loaded from a pretrained model.\r\n model = tiny_face_model.Model(weight_file_path)\r\n score_final = model.tiny_face(x)\r\n\r\n # Load an average image and clusters(reference boxes of templates).\r\n with open(weight_file_path, \"rb\") as f:\r\n _, mat_params_dict = pickle.load(f)\r\n\r\n average_image = model.get_data_by_key(\"average_image\")\r\n clusters = model.get_data_by_key(\"clusters\")\r\n clusters_h = clusters[:, 3] - clusters[:, 1] + 1\r\n clusters_w = clusters[:, 2] - clusters[:, 0] + 1\r\n normal_idx = np.where(clusters[:, 4] == 1)\r\n\r\n # main\r\n with tf.Session() as sess:\r\n sess.run(tf.global_variables_initializer())\r\n raw_img = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\r\n raw_img_f = raw_img.astype(np.float32)\r\n \r\n def _calc_scales():\r\n raw_h, raw_w = raw_img.shape[0], raw_img.shape[1]\r\n min_scale = min(np.floor(np.log2(np.max(clusters_w[normal_idx] / raw_w))),\r\n np.floor(np.log2(np.max(clusters_h[normal_idx] / raw_h))))\r\n max_scale = min(1.0, -np.log2(max(raw_h, raw_w) / MAX_INPUT_DIM))\r\n scales_down = pl.frange(min_scale, 0, 1.)\r\n scales_up = pl.frange(0.5, max_scale, 0.5)\r\n scales_pow = np.hstack((scales_down, scales_up))\r\n scales = np.power(2.0, scales_pow)\r\n return scales\r\n \r\n scales = _calc_scales()\r\n\r\n # initialize output\r\n bboxes = np.empty(shape=(0, 5))\r\n \r\n # process input at different scales\r\n for s in scales:\r\n img = cv2.resize(raw_img_f, (0, 0), fx=s, fy=s, interpolation=cv2.INTER_LINEAR)\r\n img = img - average_image\r\n img = img[np.newaxis, :]\r\n \r\n # we don't run every template on every scale ids of templates to ignore\r\n tids = list(range(4, 12)) + ([] if s <= 1.0 else list(range(18, 25)))\r\n ignoredTids = list(set(range(0, clusters.shape[0])) - set(tids))\r\n \r\n # run through the net\r\n score_final_tf = sess.run(score_final, feed_dict={x: img})\r\n \r\n # collect scores\r\n score_cls_tf, score_reg_tf = score_final_tf[:, :, :, :25], score_final_tf[:, :, :, 25:125]\r\n prob_cls_tf = expit(score_cls_tf)\r\n prob_cls_tf[0, :, :, ignoredTids] = 0.0\r\n \r\n def _calc_bounding_boxes():\r\n # threshold for detection\r\n _, fy, fx, fc = np.where(prob_cls_tf > prob_thresh)\r\n \r\n # interpret heatmap into bounding boxes\r\n cy = fy * 8 - 1\r\n cx = fx * 8 - 1\r\n ch = clusters[fc, 3] - clusters[fc, 1] + 1\r\n cw = clusters[fc, 2] - clusters[fc, 0] + 1\r\n \r\n # extract bounding box refinement\r\n Nt = clusters.shape[0]\r\n tx = score_reg_tf[0, :, :, 0:Nt]\r\n ty = score_reg_tf[0, :, :, Nt:2*Nt]\r\n tw = score_reg_tf[0, :, :, 2*Nt:3*Nt]\r\n th = score_reg_tf[0, :, :, 3*Nt:4*Nt]\r\n \r\n # refine bounding boxes\r\n dcx = cw * tx[fy, fx, fc]\r\n dcy = ch * ty[fy, fx, fc]\r\n rcx = cx + dcx\r\n rcy = cy + dcy\r\n rcw = cw * np.exp(tw[fy, fx, fc])\r\n rch = ch * np.exp(th[fy, fx, fc])\r\n \r\n scores = score_cls_tf[0, fy, fx, fc]\r\n tmp_bboxes = np.vstack((rcx - rcw / 2, rcy - rch / 2, rcx + rcw / 2, rcy + rch / 2))\r\n tmp_bboxes = np.vstack((tmp_bboxes / s, scores))\r\n tmp_bboxes = tmp_bboxes.transpose()\r\n return tmp_bboxes\r\n \r\n tmp_bboxes = _calc_bounding_boxes()\r\n bboxes = np.vstack((bboxes, tmp_bboxes)) # <class 'tuple'>: (5265, 5) \r\n \r\n # non maximum suppression\r\n # refind_idx = util.nms(bboxes, nms_thresh)\r\n refind_idx = tf.image.non_max_suppression(tf.convert_to_tensor(bboxes[:, :4], dtype=tf.float32),\r\n tf.convert_to_tensor(bboxes[:, 4], dtype=tf.float32),\r\n max_output_size=bboxes.shape[0], iou_threshold=nms_thresh)\r\n refind_idx = sess.run(refind_idx)\r\n refined_bboxes = bboxes[refind_idx]\r\n overlay_bounding_boxes(raw_img, refined_bboxes, lw)\r\n if display:\r\n # plt.axis('off')\r\n plt.imshow(raw_img)\r\n plt.show() \r\n return refined_bboxes\r\n\r\n\r\ndef main(frame):\r\n print(\"Searching faces...\")\r\n with tf.Graph().as_default():\r\n faces = evaluate(\r\n weight_file_path= \"weights.pckl\", frame = frame,\r\n prob_thresh=0.7, nms_thresh=0.1, #non max suppression threshold,\r\n lw=2, display= False)\r\n return faces\r\n\r\n"
] | [
[
"numpy.vstack",
"tensorflow.placeholder",
"numpy.empty",
"numpy.ceil",
"tensorflow.global_variables_initializer",
"numpy.exp",
"tensorflow.Graph",
"numpy.hstack",
"numpy.power",
"scipy.special.expit",
"tensorflow.convert_to_tensor",
"tensorflow.Session",
"matplotlib.pyplot.imshow",
"matplotlib.pyplot.show",
"numpy.max",
"numpy.where"
]
] |
valanm22/pytorch-lightning | [
"5d190eabd28671a6222741f5dd9ee3f214e519b1"
] | [
"pytorch_lightning/trainer/trainer.py"
] | [
"# Copyright The PyTorch Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Trainer to automate the training.\"\"\"\nimport inspect\nimport logging\nimport math\nimport os\nimport traceback\nimport warnings\nfrom argparse import ArgumentParser, Namespace\nfrom copy import deepcopy\nfrom datetime import timedelta\nfrom pathlib import Path\nfrom typing import Any, Callable, cast, Dict, Iterable, List, Optional, Tuple, Type, Union\nfrom weakref import proxy\n\nimport torch\nfrom packaging.version import Version\nfrom torch.optim import Optimizer\nfrom torch.utils.data import DataLoader\n\nimport pytorch_lightning as pl\nfrom pytorch_lightning.accelerators import Accelerator, GPUAccelerator, IPUAccelerator, TPUAccelerator\nfrom pytorch_lightning.callbacks import Callback, EarlyStopping, ModelCheckpoint, ProgressBarBase\nfrom pytorch_lightning.callbacks.prediction_writer import BasePredictionWriter\nfrom pytorch_lightning.core.datamodule import LightningDataModule\nfrom pytorch_lightning.core.optimizer import LightningOptimizer\nfrom pytorch_lightning.loggers import LightningLoggerBase\nfrom pytorch_lightning.loggers.base import DummyLogger, LoggerCollection\nfrom pytorch_lightning.loggers.tensorboard import TensorBoardLogger\nfrom pytorch_lightning.loops import PredictionLoop, TrainingEpochLoop\nfrom pytorch_lightning.loops.dataloader.evaluation_loop import EvaluationLoop\nfrom pytorch_lightning.loops.fit_loop import FitLoop\nfrom pytorch_lightning.loops.utilities import _parse_loop_limits, _reset_progress\nfrom pytorch_lightning.plugins import (\n ApexMixedPrecisionPlugin,\n NativeMixedPrecisionPlugin,\n PLUGIN_INPUT,\n PrecisionPlugin,\n)\nfrom pytorch_lightning.plugins.environments.slurm_environment import SLURMEnvironment\nfrom pytorch_lightning.profiler import (\n AdvancedProfiler,\n BaseProfiler,\n PassThroughProfiler,\n PyTorchProfiler,\n SimpleProfiler,\n XLAProfiler,\n)\nfrom pytorch_lightning.strategies import ParallelStrategy, Strategy\nfrom pytorch_lightning.strategies.ddp_spawn import DDPSpawnStrategy\nfrom pytorch_lightning.trainer.callback_hook import TrainerCallbackHookMixin\nfrom pytorch_lightning.trainer.configuration_validator import verify_loop_configurations\nfrom pytorch_lightning.trainer.connectors.accelerator_connector import AcceleratorConnector\nfrom pytorch_lightning.trainer.connectors.callback_connector import CallbackConnector\nfrom pytorch_lightning.trainer.connectors.checkpoint_connector import CheckpointConnector\nfrom pytorch_lightning.trainer.connectors.data_connector import DataConnector\nfrom pytorch_lightning.trainer.connectors.logger_connector import LoggerConnector\nfrom pytorch_lightning.trainer.connectors.logger_connector.result import _ResultCollection\nfrom pytorch_lightning.trainer.connectors.signal_connector import SignalConnector\nfrom pytorch_lightning.trainer.data_loading import TrainerDataLoadingMixin\nfrom pytorch_lightning.trainer.optimizers import TrainerOptimizersMixin\nfrom pytorch_lightning.trainer.states import RunningStage, TrainerFn, TrainerState, TrainerStatus\nfrom pytorch_lightning.trainer.supporters import CombinedLoader\nfrom pytorch_lightning.tuner.lr_finder import _LRFinder\nfrom pytorch_lightning.tuner.tuning import Tuner\nfrom pytorch_lightning.utilities import (\n _IPU_AVAILABLE,\n _TPU_AVAILABLE,\n AMPType,\n device_parser,\n GradClipAlgorithmType,\n parsing,\n)\nfrom pytorch_lightning.utilities.apply_func import apply_to_collection\nfrom pytorch_lightning.utilities.argparse import (\n _defaults_from_env_vars,\n add_argparse_args,\n from_argparse_args,\n parse_argparser,\n parse_env_variables,\n)\nfrom pytorch_lightning.utilities.auto_restart import _add_capture_metadata_collate\nfrom pytorch_lightning.utilities.cloud_io import get_filesystem\nfrom pytorch_lightning.utilities.data import _auto_add_worker_init_fn, has_len_all_ranks\nfrom pytorch_lightning.utilities.distributed import distributed_available\nfrom pytorch_lightning.utilities.exceptions import ExitGracefullyException, MisconfigurationException\nfrom pytorch_lightning.utilities.imports import _fault_tolerant_training\nfrom pytorch_lightning.utilities.meta import is_on_meta_device, materialize_module\nfrom pytorch_lightning.utilities.model_helpers import is_overridden\nfrom pytorch_lightning.utilities.rank_zero import rank_zero_deprecation, rank_zero_info, rank_zero_warn\nfrom pytorch_lightning.utilities.seed import isolate_rng\nfrom pytorch_lightning.utilities.signature_utils import is_param_in_hook_signature\nfrom pytorch_lightning.utilities.types import (\n _EVALUATE_OUTPUT,\n _PATH,\n _PREDICT_OUTPUT,\n EVAL_DATALOADERS,\n LRSchedulerConfig,\n STEP_OUTPUT,\n TRAIN_DATALOADERS,\n)\nfrom pytorch_lightning.utilities.warnings import PossibleUserWarning\n\nlog = logging.getLogger(__name__)\n# warnings to ignore in trainer\nwarnings.filterwarnings(\n \"ignore\", message=\"torch.distributed.reduce_op is deprecated, please use torch.distributed.ReduceOp instead\"\n)\n\n\nclass Trainer(\n TrainerCallbackHookMixin, # TODO: Remove in v1.8\n TrainerOptimizersMixin, # TODO: Remove in v1.8\n TrainerDataLoadingMixin, # TODO: Remove in v1.8\n):\n @_defaults_from_env_vars\n def __init__(\n self,\n logger: Union[LightningLoggerBase, Iterable[LightningLoggerBase], bool] = True,\n checkpoint_callback: Optional[bool] = None,\n enable_checkpointing: bool = True,\n callbacks: Optional[Union[List[Callback], Callback]] = None,\n default_root_dir: Optional[str] = None,\n gradient_clip_val: Optional[Union[int, float]] = None,\n gradient_clip_algorithm: Optional[str] = None,\n process_position: int = 0,\n num_nodes: int = 1,\n num_processes: Optional[int] = None,\n devices: Optional[Union[List[int], str, int]] = None,\n gpus: Optional[Union[List[int], str, int]] = None,\n auto_select_gpus: bool = False,\n tpu_cores: Optional[Union[List[int], str, int]] = None,\n ipus: Optional[int] = None,\n log_gpu_memory: Optional[str] = None, # TODO: Remove in 1.7\n progress_bar_refresh_rate: Optional[int] = None, # TODO: remove in v1.7\n enable_progress_bar: bool = True,\n overfit_batches: Union[int, float] = 0.0,\n track_grad_norm: Union[int, float, str] = -1,\n check_val_every_n_epoch: int = 1,\n fast_dev_run: Union[int, bool] = False,\n accumulate_grad_batches: Optional[Union[int, Dict[int, int]]] = None,\n max_epochs: Optional[int] = None,\n min_epochs: Optional[int] = None,\n max_steps: int = -1,\n min_steps: Optional[int] = None,\n max_time: Optional[Union[str, timedelta, Dict[str, int]]] = None,\n limit_train_batches: Optional[Union[int, float]] = None,\n limit_val_batches: Optional[Union[int, float]] = None,\n limit_test_batches: Optional[Union[int, float]] = None,\n limit_predict_batches: Optional[Union[int, float]] = None,\n val_check_interval: Optional[Union[int, float]] = None,\n flush_logs_every_n_steps: Optional[int] = None,\n log_every_n_steps: int = 50,\n accelerator: Optional[Union[str, Accelerator]] = None,\n strategy: Optional[Union[str, Strategy]] = None,\n sync_batchnorm: bool = False,\n precision: Union[int, str] = 32,\n enable_model_summary: bool = True,\n weights_summary: Optional[str] = \"top\",\n weights_save_path: Optional[str] = None, # TODO: Remove in 1.8\n num_sanity_val_steps: int = 2,\n resume_from_checkpoint: Optional[Union[Path, str]] = None,\n profiler: Optional[Union[BaseProfiler, str]] = None,\n benchmark: Optional[bool] = None,\n deterministic: bool = False,\n reload_dataloaders_every_n_epochs: int = 0,\n auto_lr_find: Union[bool, str] = False,\n replace_sampler_ddp: bool = True,\n detect_anomaly: bool = False,\n auto_scale_batch_size: Union[str, bool] = False,\n prepare_data_per_node: Optional[bool] = None,\n plugins: Optional[Union[PLUGIN_INPUT, List[PLUGIN_INPUT]]] = None,\n amp_backend: str = \"native\",\n amp_level: Optional[str] = None,\n move_metrics_to_cpu: bool = False,\n multiple_trainloader_mode: str = \"max_size_cycle\",\n stochastic_weight_avg: bool = False,\n terminate_on_nan: Optional[bool] = None,\n ) -> None:\n r\"\"\"\n Customize every aspect of training via flags.\n\n Args:\n\n accelerator: Supports passing different accelerator types (\"cpu\", \"gpu\", \"tpu\", \"ipu\", \"auto\")\n as well as custom accelerator instances.\n\n .. deprecated:: v1.5\n Passing training strategies (e.g., 'ddp') to ``accelerator`` has been deprecated in v1.5.0\n and will be removed in v1.7.0. Please use the ``strategy`` argument instead.\n\n accumulate_grad_batches: Accumulates grads every k batches or as set up in the dict.\n Default: ``None``.\n\n amp_backend: The mixed precision backend to use (\"native\" or \"apex\").\n Default: ``'native''``.\n\n amp_level: The optimization level to use (O1, O2, etc...). By default it will be set to \"O2\"\n if ``amp_backend`` is set to \"apex\".\n\n auto_lr_find: If set to True, will make trainer.tune() run a learning rate finder,\n trying to optimize initial learning for faster convergence. trainer.tune() method will\n set the suggested learning rate in self.lr or self.learning_rate in the LightningModule.\n To use a different key set a string instead of True with the key name.\n Default: ``False``.\n\n auto_scale_batch_size: If set to True, will `initially` run a batch size\n finder trying to find the largest batch size that fits into memory.\n The result will be stored in self.batch_size in the LightningModule.\n Additionally, can be set to either `power` that estimates the batch size through\n a power search or `binsearch` that estimates the batch size through a binary search.\n Default: ``False``.\n\n auto_select_gpus: If enabled and ``gpus`` is an integer, pick available\n gpus automatically. This is especially useful when\n GPUs are configured to be in \"exclusive mode\", such\n that only one process at a time can access them.\n Default: ``False``.\n\n benchmark: Sets ``torch.backends.cudnn.benchmark``.\n Defaults to ``True`` if :paramref:`~pytorch_lightning.trainer.trainer.Trainer.deterministic`\n is ``False``. Overwrite to manually set a different value. Default: ``None``.\n\n callbacks: Add a callback or list of callbacks.\n Default: ``None``.\n\n checkpoint_callback: If ``True``, enable checkpointing.\n Default: ``None``.\n\n .. deprecated:: v1.5\n ``checkpoint_callback`` has been deprecated in v1.5 and will be removed in v1.7.\n Please consider using ``enable_checkpointing`` instead.\n\n enable_checkpointing: If ``True``, enable checkpointing.\n It will configure a default ModelCheckpoint callback if there is no user-defined ModelCheckpoint in\n :paramref:`~pytorch_lightning.trainer.trainer.Trainer.callbacks`.\n Default: ``True``.\n\n check_val_every_n_epoch: Check val every n train epochs.\n Default: ``1``.\n\n\n default_root_dir: Default path for logs and weights when no logger/ckpt_callback passed.\n Default: ``os.getcwd()``.\n Can be remote file paths such as `s3://mybucket/path` or 'hdfs://path/'\n\n detect_anomaly: Enable anomaly detection for the autograd engine.\n Default: ``False``.\n\n deterministic: If ``True``, sets whether PyTorch operations must use deterministic algorithms.\n Default: ``False``.\n\n devices: Will be mapped to either `gpus`, `tpu_cores`, `num_processes` or `ipus`,\n based on the accelerator type.\n\n fast_dev_run: Runs n if set to ``n`` (int) else 1 if set to ``True`` batch(es)\n of train, val and test to find any bugs (ie: a sort of unit test).\n Default: ``False``.\n\n flush_logs_every_n_steps: How often to flush logs to disk (defaults to every 100 steps).\n\n .. deprecated:: v1.5\n ``flush_logs_every_n_steps`` has been deprecated in v1.5 and will be removed in v1.7.\n Please configure flushing directly in the logger instead.\n\n gpus: Number of GPUs to train on (int) or which GPUs to train on (list or str) applied per node\n Default: ``None``.\n\n gradient_clip_val: The value at which to clip gradients. Passing ``gradient_clip_val=None`` disables\n gradient clipping. If using Automatic Mixed Precision (AMP), the gradients will be unscaled before.\n Default: ``None``.\n\n gradient_clip_algorithm: The gradient clipping algorithm to use. Pass ``gradient_clip_algorithm=\"value\"``\n to clip by value, and ``gradient_clip_algorithm=\"norm\"`` to clip by norm. By default it will\n be set to ``\"norm\"``.\n\n limit_train_batches: How much of training dataset to check (float = fraction, int = num_batches).\n Default: ``1.0``.\n\n limit_val_batches: How much of validation dataset to check (float = fraction, int = num_batches).\n Default: ``1.0``.\n\n limit_test_batches: How much of test dataset to check (float = fraction, int = num_batches).\n Default: ``1.0``.\n\n limit_predict_batches: How much of prediction dataset to check (float = fraction, int = num_batches).\n Default: ``1.0``.\n\n logger: Logger (or iterable collection of loggers) for experiment tracking. A ``True`` value uses\n the default ``TensorBoardLogger``. ``False`` will disable logging. If multiple loggers are\n provided and the `save_dir` property of that logger is not set, local files (checkpoints,\n profiler traces, etc.) are saved in ``default_root_dir`` rather than in the ``log_dir`` of any\n of the individual loggers.\n Default: ``True``.\n\n log_gpu_memory: None, 'min_max', 'all'. Might slow performance.\n\n .. deprecated:: v1.5\n Deprecated in v1.5.0 and will be removed in v1.7.0\n Please use the ``DeviceStatsMonitor`` callback directly instead.\n\n log_every_n_steps: How often to log within steps.\n Default: ``50``.\n\n prepare_data_per_node: If True, each LOCAL_RANK=0 will call prepare data.\n Otherwise only NODE_RANK=0, LOCAL_RANK=0 will prepare data\n\n .. deprecated:: v1.5\n Deprecated in v1.5.0 and will be removed in v1.7.0\n Please set ``prepare_data_per_node`` in ``LightningDataModule`` and/or\n ``LightningModule`` directly instead.\n\n process_position: Orders the progress bar when running multiple models on same machine.\n\n .. deprecated:: v1.5\n ``process_position`` has been deprecated in v1.5 and will be removed in v1.7.\n Please pass :class:`~pytorch_lightning.callbacks.progress.TQDMProgressBar` with ``process_position``\n directly to the Trainer's ``callbacks`` argument instead.\n\n progress_bar_refresh_rate: How often to refresh progress bar (in steps). Value ``0`` disables progress bar.\n Ignored when a custom progress bar is passed to :paramref:`~Trainer.callbacks`. Default: None, means\n a suitable value will be chosen based on the environment (terminal, Google COLAB, etc.).\n\n .. deprecated:: v1.5\n ``progress_bar_refresh_rate`` has been deprecated in v1.5 and will be removed in v1.7.\n Please pass :class:`~pytorch_lightning.callbacks.progress.TQDMProgressBar` with ``refresh_rate``\n directly to the Trainer's ``callbacks`` argument instead. To disable the progress bar,\n pass ``enable_progress_bar = False`` to the Trainer.\n\n enable_progress_bar: Whether to enable to progress bar by default.\n Default: ``False``.\n\n profiler: To profile individual steps during training and assist in identifying bottlenecks.\n Default: ``None``.\n\n overfit_batches: Overfit a fraction of training data (float) or a set number of batches (int).\n Default: ``0.0``.\n\n plugins: Plugins allow modification of core behavior like ddp and amp, and enable custom lightning plugins.\n Default: ``None``.\n\n precision: Double precision (64), full precision (32), half precision (16) or bfloat16 precision (bf16).\n Can be used on CPU, GPU, TPUs or IPUs.\n Default: ``32``.\n\n max_epochs: Stop training once this number of epochs is reached. Disabled by default (None).\n If both max_epochs and max_steps are not specified, defaults to ``max_epochs = 1000``.\n To enable infinite training, set ``max_epochs = -1``.\n\n min_epochs: Force training for at least these many epochs. Disabled by default (None).\n\n max_steps: Stop training after this number of steps. Disabled by default (-1). If ``max_steps = -1``\n and ``max_epochs = None``, will default to ``max_epochs = 1000``. To enable infinite training, set\n ``max_epochs`` to ``-1``.\n\n min_steps: Force training for at least these number of steps. Disabled by default (``None``).\n\n max_time: Stop training after this amount of time has passed. Disabled by default (``None``).\n The time duration can be specified in the format DD:HH:MM:SS (days, hours, minutes seconds), as a\n :class:`datetime.timedelta`, or a dictionary with keys that will be passed to\n :class:`datetime.timedelta`.\n\n num_nodes: Number of GPU nodes for distributed training.\n Default: ``1``.\n\n num_processes: Number of processes for distributed training with ``accelerator=\"cpu\"``.\n Default: ``1``.\n\n num_sanity_val_steps: Sanity check runs n validation batches before starting the training routine.\n Set it to `-1` to run all batches in all validation dataloaders.\n Default: ``2``.\n\n reload_dataloaders_every_n_epochs: Set to a non-negative integer to reload dataloaders every n epochs.\n Default: ``0``.\n\n replace_sampler_ddp: Explicitly enables or disables sampler replacement. If not specified this\n will toggled automatically when DDP is used. By default it will add ``shuffle=True`` for\n train sampler and ``shuffle=False`` for val/test sampler. If you want to customize it,\n you can set ``replace_sampler_ddp=False`` and add your own distributed sampler.\n\n resume_from_checkpoint: Path/URL of the checkpoint from which training is resumed. If there is\n no checkpoint file at the path, an exception is raised. If resuming from mid-epoch checkpoint,\n training will start from the beginning of the next epoch.\n\n .. deprecated:: v1.5\n ``resume_from_checkpoint`` is deprecated in v1.5 and will be removed in v2.0.\n Please pass the path to ``Trainer.fit(..., ckpt_path=...)`` instead.\n\n strategy: Supports different training strategies with aliases\n as well custom training type plugins.\n Default: ``None``.\n\n sync_batchnorm: Synchronize batch norm layers between process groups/whole world.\n Default: ``False``.\n\n terminate_on_nan: If set to True, will terminate training (by raising a `ValueError`) at the\n end of each training batch, if any of the parameters or the loss are NaN or +/-inf.\n\n .. deprecated:: v1.5\n Trainer argument ``terminate_on_nan`` was deprecated in v1.5 and will be removed in 1.7.\n Please use ``detect_anomaly`` instead.\n\n detect_anomaly: Enable anomaly detection for the autograd engine.\n Default: ``False``.\n\n tpu_cores: How many TPU cores to train on (1 or 8) / Single TPU to train on (1)\n Default: ``None``.\n\n ipus: How many IPUs to train on.\n Default: ``None``.\n\n track_grad_norm: -1 no tracking. Otherwise tracks that p-norm. May be set to 'inf' infinity-norm. If using\n Automatic Mixed Precision (AMP), the gradients will be unscaled before logging them.\n Default: ``-1``.\n\n val_check_interval: How often to check the validation set. Pass a ``float`` in the range [0.0, 1.0] to check\n after a fraction of the training epoch. Pass an ``int`` to check after a fixed number of training\n batches.\n Default: ``1.0``.\n\n enable_model_summary: Whether to enable model summarization by default.\n Default: ``True``.\n\n weights_summary: Prints a summary of the weights when training begins.\n\n .. deprecated:: v1.5\n ``weights_summary`` has been deprecated in v1.5 and will be removed in v1.7.\n To disable the summary, pass ``enable_model_summary = False`` to the Trainer.\n To customize the summary, pass :class:`~pytorch_lightning.callbacks.model_summary.ModelSummary`\n directly to the Trainer's ``callbacks`` argument.\n\n weights_save_path: Where to save weights if specified. Will override default_root_dir\n for checkpoints only. Use this if for whatever reason you need the checkpoints\n stored in a different place than the logs written in `default_root_dir`.\n Can be remote file paths such as `s3://mybucket/path` or 'hdfs://path/'\n Defaults to `default_root_dir`.\n\n .. deprecated:: v1.6\n ``weights_save_path`` has been deprecated in v1.6 and will be removed in v1.8. Please pass\n ``dirpath`` directly to the :class:`~pytorch_lightning.callbacks.model_checkpoint.ModelCheckpoint`\n callback.\n\n move_metrics_to_cpu: Whether to force internal logged metrics to be moved to cpu.\n This can save some gpu memory, but can make training slower. Use with attention.\n Default: ``False``.\n\n multiple_trainloader_mode: How to loop over the datasets when there are multiple train loaders.\n In 'max_size_cycle' mode, the trainer ends one epoch when the largest dataset is traversed,\n and smaller datasets reload when running out of their data. In 'min_size' mode, all the datasets\n reload when reaching the minimum length of datasets.\n Default: ``\"max_size_cycle\"``.\n\n stochastic_weight_avg: Whether to use `Stochastic Weight Averaging (SWA)\n <https://pytorch.org/blog/pytorch-1.6-now-includes-stochastic-weight-averaging/>`_.\n Default: ``False``.\n\n .. deprecated:: v1.5\n ``stochastic_weight_avg`` has been deprecated in v1.5 and will be removed in v1.7.\n Please pass :class:`~pytorch_lightning.callbacks.stochastic_weight_avg.StochasticWeightAveraging`\n directly to the Trainer's ``callbacks`` argument instead.\n \"\"\"\n super().__init__()\n Trainer._log_api_event(\"init\")\n log.detail(f\"{self.__class__.__name__}: Initializing trainer with parameters: {locals()}\")\n self.state = TrainerState()\n\n gpu_ids, tpu_cores = self._parse_devices(gpus, auto_select_gpus, tpu_cores)\n\n # init connectors\n self._data_connector = DataConnector(self, multiple_trainloader_mode)\n\n self._accelerator_connector = AcceleratorConnector(\n num_processes=num_processes,\n devices=devices,\n tpu_cores=tpu_cores,\n ipus=ipus,\n accelerator=accelerator,\n strategy=strategy,\n gpus=gpus,\n gpu_ids=gpu_ids,\n num_nodes=num_nodes,\n sync_batchnorm=sync_batchnorm,\n benchmark=benchmark,\n replace_sampler_ddp=replace_sampler_ddp,\n deterministic=deterministic,\n precision=precision,\n amp_type=amp_backend,\n amp_level=amp_level,\n plugins=plugins,\n )\n self._logger_connector = LoggerConnector(self, log_gpu_memory)\n self._callback_connector = CallbackConnector(self)\n self._checkpoint_connector = CheckpointConnector(self, resume_from_checkpoint)\n self._signal_connector = SignalConnector(self)\n self.tuner = Tuner(self)\n\n min_steps, max_steps, min_epochs, max_epochs, max_time = _parse_loop_limits(\n min_steps, max_steps, min_epochs, max_epochs, max_time\n )\n fit_loop = FitLoop(min_epochs=min_epochs, max_epochs=max_epochs)\n training_epoch_loop = TrainingEpochLoop(min_steps=min_steps, max_steps=max_steps)\n fit_loop.connect(epoch_loop=training_epoch_loop)\n\n # default .fit() loop\n self.fit_loop = fit_loop\n\n # default .validate() loop\n self.validate_loop = EvaluationLoop()\n\n # default .test() loop\n self.test_loop = EvaluationLoop()\n\n # default .predict() loop\n self.predict_loop = PredictionLoop()\n\n # set when a checkpoint is loaded via `Trainer.{fit,validate,test,predict}`.\n self._ckpt_path: Optional[str] = None\n\n # .validate(), predict() and .test() set these when they load a checkpoint. They will be removed in favor of\n # the unified read-only `Trainer.ckpt_path` attribute in v1.8\n self._validated_ckpt_path: Optional[str] = None # TODO: remove in v1.8\n self._tested_ckpt_path: Optional[str] = None # TODO: remove in v1.8\n self._predicted_ckpt_path: Optional[str] = None # TODO: remove in v1.8\n\n # todo: remove in v1.7\n self._weights_summary: Optional[str] = None\n\n # init callbacks\n # Declare attributes to be set in _callback_connector on_trainer_init\n self._callback_connector.on_trainer_init(\n callbacks,\n checkpoint_callback,\n enable_checkpointing,\n enable_progress_bar,\n progress_bar_refresh_rate,\n process_position,\n default_root_dir,\n weights_save_path,\n enable_model_summary,\n weights_summary,\n stochastic_weight_avg,\n max_time,\n accumulate_grad_batches,\n )\n\n # hook\n self._call_callback_hooks(\"on_init_start\")\n\n # init data flags\n self.check_val_every_n_epoch: int\n self._data_connector.on_trainer_init(\n check_val_every_n_epoch,\n reload_dataloaders_every_n_epochs,\n prepare_data_per_node,\n )\n\n if terminate_on_nan is not None:\n rank_zero_deprecation(\n \"Trainer argument `terminate_on_nan` was deprecated in v1.5 and will be removed in 1.7.\"\n \" Please use `Trainer(detect_anomaly=True)` instead.\"\n )\n if not isinstance(terminate_on_nan, bool):\n raise TypeError(f\"`terminate_on_nan` should be a bool, got {terminate_on_nan}.\")\n\n # gradient clipping\n if gradient_clip_val is not None and not isinstance(gradient_clip_val, (int, float)):\n raise TypeError(f\"`gradient_clip_val` should be an int or a float. Got {gradient_clip_val}.\")\n\n if gradient_clip_algorithm is not None and not GradClipAlgorithmType.supported_type(\n gradient_clip_algorithm.lower()\n ):\n raise MisconfigurationException(\n f\"`gradient_clip_algorithm` {gradient_clip_algorithm} is invalid. \"\n f\"Allowed algorithms: {GradClipAlgorithmType.supported_types()}.\"\n )\n\n # gradient norm tracking\n if track_grad_norm != -1 and not (\n (isinstance(track_grad_norm, (int, float)) or track_grad_norm == \"inf\") and float(track_grad_norm) > 0\n ):\n raise MisconfigurationException(\n f\"`track_grad_norm` must be a positive number or 'inf' (infinity norm). Got {track_grad_norm}.\"\n )\n\n self._terminate_on_nan = terminate_on_nan\n self.gradient_clip_val: Union[int, float] = gradient_clip_val\n self.gradient_clip_algorithm = (\n GradClipAlgorithmType(gradient_clip_algorithm.lower())\n if gradient_clip_algorithm is not None\n else gradient_clip_algorithm\n )\n self.track_grad_norm: float = float(track_grad_norm)\n\n self._detect_anomaly: bool = detect_anomaly\n self._setup_on_init(num_sanity_val_steps)\n\n # configure tuner\n self.tuner.on_trainer_init(auto_lr_find, auto_scale_batch_size)\n\n # configure profiler\n self.__init_profiler(profiler)\n\n # init logger flags\n self._loggers: List[LightningLoggerBase]\n self._logger_connector.on_trainer_init(logger, flush_logs_every_n_steps, log_every_n_steps, move_metrics_to_cpu)\n\n # init debugging flags\n self.val_check_interval: Union[int, float]\n self._init_debugging_flags(\n limit_train_batches,\n limit_val_batches,\n limit_test_batches,\n limit_predict_batches,\n val_check_interval,\n overfit_batches,\n fast_dev_run,\n )\n\n # Callback system\n self._call_callback_hooks(\"on_init_end\")\n\n def _init_debugging_flags(\n self,\n limit_train_batches: Optional[Union[int, float]],\n limit_val_batches: Optional[Union[int, float]],\n limit_test_batches: Optional[Union[int, float]],\n limit_predict_batches: Optional[Union[int, float]],\n val_check_interval: Optional[Union[int, float]],\n overfit_batches: Union[int, float],\n fast_dev_run: Union[int, bool],\n ) -> None:\n if isinstance(fast_dev_run, int) and (fast_dev_run < 0):\n raise MisconfigurationException(\n f\"fast_dev_run={fast_dev_run} is not a valid configuration. It should be >= 0.\"\n )\n\n self.fast_dev_run = fast_dev_run\n\n # set fast_dev_run=True when it is 1, used while logging\n if fast_dev_run == 1:\n self.fast_dev_run = True\n\n if fast_dev_run:\n num_batches = int(fast_dev_run)\n limit_train_batches = num_batches\n limit_val_batches = num_batches\n limit_test_batches = num_batches\n limit_predict_batches = num_batches\n self.fit_loop.max_steps = num_batches\n self.num_sanity_val_steps = 0\n self.fit_loop.max_epochs = 1\n val_check_interval = 1.0\n self.check_val_every_n_epoch = 1\n self.loggers = [DummyLogger()] if self.loggers else []\n\n rank_zero_info(\n \"Running in fast_dev_run mode: will run a full train,\"\n f\" val, test and prediction loop using {num_batches} batch(es).\"\n )\n\n self.limit_train_batches = _determine_batch_limits(limit_train_batches, \"limit_train_batches\")\n self.limit_val_batches = _determine_batch_limits(limit_val_batches, \"limit_val_batches\")\n self.limit_test_batches = _determine_batch_limits(limit_test_batches, \"limit_test_batches\")\n self.limit_predict_batches = _determine_batch_limits(limit_predict_batches, \"limit_predict_batches\")\n self.val_check_interval = _determine_batch_limits(val_check_interval, \"val_check_interval\")\n self.overfit_batches = _determine_batch_limits(overfit_batches, \"overfit_batches\")\n self._determine_data_use_amount(self.overfit_batches)\n\n def _determine_data_use_amount(self, overfit_batches: float) -> None:\n \"\"\"Use less data for debugging purposes.\"\"\"\n if overfit_batches > 0:\n self.limit_train_batches = overfit_batches\n self.limit_val_batches = 0\n\n def _setup_on_init(self, num_sanity_val_steps: int) -> None:\n self._log_device_info()\n\n self.should_stop = False\n self.state = TrainerState()\n self.num_training_batches = float(\"inf\")\n self.train_dataloader = None\n\n if num_sanity_val_steps == -1:\n self.num_sanity_val_steps = float(\"inf\")\n else:\n self.num_sanity_val_steps = num_sanity_val_steps\n\n self.num_sanity_val_batches = []\n self.num_test_batches = []\n self.num_val_batches = []\n self.test_dataloaders = None\n self.val_dataloaders = None\n self._last_train_dl_reload_epoch = float(\"-inf\")\n self._last_val_dl_reload_epoch = float(\"-inf\")\n\n self.num_predict_batches = []\n\n def _call_and_handle_interrupt(self, trainer_fn: Callable, *args: Any, **kwargs: Any) -> Any:\n r\"\"\"\n Error handling, intended to be used only for main trainer function entry points (fit, validate, test, predict)\n as all errors should funnel through them\n\n Args:\n trainer_fn: one of (fit, validate, test, predict)\n *args: positional arguments to be passed to the `trainer_fn`\n **kwargs: keyword arguments to be passed to `trainer_fn`\n \"\"\"\n try:\n if self.strategy.launcher is not None:\n return self.strategy.launcher.launch(trainer_fn, *args, trainer=self, **kwargs)\n else:\n return trainer_fn(*args, **kwargs)\n # TODO: treat KeyboardInterrupt as BaseException (delete the code below) in v1.7\n except KeyboardInterrupt as exception:\n rank_zero_warn(\"Detected KeyboardInterrupt, attempting graceful shutdown...\")\n # user could press Ctrl+c many times... only shutdown once\n if not self.interrupted:\n self.state.status = TrainerStatus.INTERRUPTED\n self._call_callback_hooks(\"on_keyboard_interrupt\")\n self._call_callback_hooks(\"on_exception\", exception)\n except BaseException as exception:\n self.state.status = TrainerStatus.INTERRUPTED\n if distributed_available() and self.world_size > 1:\n # try syncing remaining processes, kill otherwise\n self.strategy.reconciliate_processes(traceback.format_exc())\n self._call_callback_hooks(\"on_exception\", exception)\n self._teardown()\n # teardown might access the stage so we reset it after\n self.state.stage = None\n raise\n\n def fit(\n self,\n model: \"pl.LightningModule\",\n train_dataloaders: Optional[Union[TRAIN_DATALOADERS, LightningDataModule]] = None,\n val_dataloaders: Optional[EVAL_DATALOADERS] = None,\n datamodule: Optional[LightningDataModule] = None,\n ckpt_path: Optional[str] = None,\n ) -> None:\n r\"\"\"\n Runs the full optimization routine.\n\n Args:\n model: Model to fit.\n\n train_dataloaders: A collection of :class:`torch.utils.data.DataLoader` or a\n :class:`~pytorch_lightning.core.datamodule.LightningDataModule` specifying training samples.\n In the case of multiple dataloaders, please see this :ref:`section <multiple-dataloaders>`.\n\n val_dataloaders: A :class:`torch.utils.data.DataLoader` or a sequence of them specifying validation samples.\n\n ckpt_path: Path/URL of the checkpoint from which training is resumed. If there is\n no checkpoint file at the path, an exception is raised. If resuming from mid-epoch checkpoint,\n training will start from the beginning of the next epoch.\n\n datamodule: An instance of :class:`~pytorch_lightning.core.datamodule.LightningDataModule`.\n \"\"\"\n self.strategy.model = model\n self._call_and_handle_interrupt(\n self._fit_impl, model, train_dataloaders, val_dataloaders, datamodule, ckpt_path\n )\n\n def _fit_impl(\n self,\n model: \"pl.LightningModule\",\n train_dataloaders: Optional[Union[TRAIN_DATALOADERS, LightningDataModule]] = None,\n val_dataloaders: Optional[EVAL_DATALOADERS] = None,\n datamodule: Optional[LightningDataModule] = None,\n ckpt_path: Optional[str] = None,\n ) -> None:\n Trainer._log_api_event(\"fit\")\n log.detail(f\"{self.__class__.__name__}: trainer fit stage\")\n\n self.state.fn = TrainerFn.FITTING\n self.state.status = TrainerStatus.RUNNING\n self.training = True\n self._last_train_dl_reload_epoch = float(\"-inf\")\n self._last_val_dl_reload_epoch = float(\"-inf\")\n\n # if a datamodule comes in as the second arg, then fix it for the user\n if isinstance(train_dataloaders, LightningDataModule):\n datamodule = train_dataloaders\n train_dataloaders = None\n # If you supply a datamodule you can't supply train_dataloader or val_dataloaders\n if (train_dataloaders is not None or val_dataloaders is not None) and datamodule is not None:\n raise MisconfigurationException(\n \"You cannot pass `train_dataloader` or `val_dataloaders` to `trainer.fit(datamodule=...)`\"\n )\n\n # links data to the trainer\n self._data_connector.attach_data(\n model, train_dataloaders=train_dataloaders, val_dataloaders=val_dataloaders, datamodule=datamodule\n )\n\n # TODO: ckpt_path only in v2.0\n ckpt_path = ckpt_path or self.resume_from_checkpoint\n self._ckpt_path = self.__set_ckpt_path(\n ckpt_path, model_provided=True, model_connected=self.lightning_module is not None\n )\n results = self._run(model, ckpt_path=self.ckpt_path)\n\n assert self.state.stopped\n self.training = False\n return results\n\n def validate(\n self,\n model: Optional[\"pl.LightningModule\"] = None,\n dataloaders: Optional[Union[EVAL_DATALOADERS, LightningDataModule]] = None,\n ckpt_path: Optional[str] = None,\n verbose: bool = True,\n datamodule: Optional[LightningDataModule] = None,\n ) -> _EVALUATE_OUTPUT:\n r\"\"\"\n Perform one evaluation epoch over the validation set.\n\n Args:\n model: The model to validate.\n\n dataloaders: A :class:`torch.utils.data.DataLoader` or a sequence of them,\n or a :class:`~pytorch_lightning.core.datamodule.LightningDataModule` specifying validation samples.\n\n ckpt_path: Either ``best`` or path to the checkpoint you wish to validate.\n If ``None`` and the model instance was passed, use the current weights.\n Otherwise, the best model checkpoint from the previous ``trainer.fit`` call will be loaded\n if a checkpoint callback is configured.\n\n verbose: If True, prints the validation results.\n\n datamodule: An instance of :class:`~pytorch_lightning.core.datamodule.LightningDataModule`.\n\n Returns:\n List of dictionaries with metrics logged during the validation phase, e.g., in model- or callback hooks\n like :meth:`~pytorch_lightning.core.lightning.LightningModule.validation_step`,\n :meth:`~pytorch_lightning.core.lightning.LightningModule.validation_epoch_end`, etc.\n The length of the list corresponds to the number of validation dataloaders used.\n \"\"\"\n self.strategy.model = model or self.lightning_module\n return self._call_and_handle_interrupt(self._validate_impl, model, dataloaders, ckpt_path, verbose, datamodule)\n\n def _validate_impl(\n self,\n model: Optional[\"pl.LightningModule\"] = None,\n dataloaders: Optional[Union[EVAL_DATALOADERS, LightningDataModule]] = None,\n ckpt_path: Optional[str] = None,\n verbose: bool = True,\n datamodule: Optional[LightningDataModule] = None,\n ) -> _EVALUATE_OUTPUT:\n # --------------------\n # SETUP HOOK\n # --------------------\n Trainer._log_api_event(\"validate\")\n log.detail(f\"{self.__class__.__name__}: trainer validate stage\")\n\n self.state.fn = TrainerFn.VALIDATING\n self.state.status = TrainerStatus.RUNNING\n self.validating = True\n\n # if a datamodule comes in as the second arg, then fix it for the user\n if isinstance(dataloaders, LightningDataModule):\n datamodule = dataloaders\n dataloaders = None\n # If you supply a datamodule you can't supply val_dataloaders\n if dataloaders is not None and datamodule:\n raise MisconfigurationException(\"You cannot pass both `trainer.validate(dataloaders=..., datamodule=...)`\")\n\n model_provided = model is not None\n model = model or self.lightning_module\n if model is None:\n raise MisconfigurationException(\n \"`model` must be provided to `trainer.validate()` when it hasn't been passed in a previous run\"\n )\n\n self.validate_loop.verbose = verbose\n\n # links data to the trainer\n self._data_connector.attach_data(model, val_dataloaders=dataloaders, datamodule=datamodule)\n\n self._ckpt_path = self.__set_ckpt_path(\n ckpt_path, model_provided=model_provided, model_connected=self.lightning_module is not None\n )\n\n self._validated_ckpt_path = self.ckpt_path # TODO: remove in v1.8\n\n # run validate\n results = self._run(model, ckpt_path=self.ckpt_path)\n\n assert self.state.stopped\n self.validating = False\n\n return results\n\n def test(\n self,\n model: Optional[\"pl.LightningModule\"] = None,\n dataloaders: Optional[Union[EVAL_DATALOADERS, LightningDataModule]] = None,\n ckpt_path: Optional[str] = None,\n verbose: bool = True,\n datamodule: Optional[LightningDataModule] = None,\n ) -> _EVALUATE_OUTPUT:\n r\"\"\"\n Perform one evaluation epoch over the test set.\n It's separated from fit to make sure you never run on your test set until you want to.\n\n Args:\n model: The model to test.\n\n dataloaders: A :class:`torch.utils.data.DataLoader` or a sequence of them,\n or a :class:`~pytorch_lightning.core.datamodule.LightningDataModule` specifying test samples.\n\n ckpt_path: Either ``best`` or path to the checkpoint you wish to test.\n If ``None`` and the model instance was passed, use the current weights.\n Otherwise, the best model checkpoint from the previous ``trainer.fit`` call will be loaded\n if a checkpoint callback is configured.\n\n verbose: If True, prints the test results.\n\n datamodule: An instance of :class:`~pytorch_lightning.core.datamodule.LightningDataModule`.\n\n Returns:\n List of dictionaries with metrics logged during the test phase, e.g., in model- or callback hooks\n like :meth:`~pytorch_lightning.core.lightning.LightningModule.test_step`,\n :meth:`~pytorch_lightning.core.lightning.LightningModule.test_epoch_end`, etc.\n The length of the list corresponds to the number of test dataloaders used.\n \"\"\"\n self.strategy.model = model or self.lightning_module\n return self._call_and_handle_interrupt(self._test_impl, model, dataloaders, ckpt_path, verbose, datamodule)\n\n def _test_impl(\n self,\n model: Optional[\"pl.LightningModule\"] = None,\n dataloaders: Optional[Union[EVAL_DATALOADERS, LightningDataModule]] = None,\n ckpt_path: Optional[str] = None,\n verbose: bool = True,\n datamodule: Optional[LightningDataModule] = None,\n ) -> _EVALUATE_OUTPUT:\n # --------------------\n # SETUP HOOK\n # --------------------\n Trainer._log_api_event(\"test\")\n log.detail(f\"{self.__class__.__name__}: trainer test stage\")\n\n self.state.fn = TrainerFn.TESTING\n self.state.status = TrainerStatus.RUNNING\n self.testing = True\n\n # if a datamodule comes in as the second arg, then fix it for the user\n if isinstance(dataloaders, LightningDataModule):\n datamodule = dataloaders\n dataloaders = None\n # If you supply a datamodule you can't supply test_dataloaders\n if dataloaders is not None and datamodule:\n raise MisconfigurationException(\"You cannot pass both `trainer.test(dataloaders=..., datamodule=...)`\")\n\n model_provided = model is not None\n model = model or self.lightning_module\n if model is None:\n raise MisconfigurationException(\n \"`model` must be provided to `trainer.test()` when it hasn't been passed in a previous run\"\n )\n\n self.test_loop.verbose = verbose\n\n # links data to the trainer\n self._data_connector.attach_data(model, test_dataloaders=dataloaders, datamodule=datamodule)\n\n self._ckpt_path = self.__set_ckpt_path(\n ckpt_path, model_provided=model_provided, model_connected=self.lightning_module is not None\n )\n\n self._tested_ckpt_path = self.ckpt_path # TODO: remove in v1.8\n\n # run test\n results = self._run(model, ckpt_path=self.ckpt_path)\n\n assert self.state.stopped\n self.testing = False\n\n return results\n\n def predict(\n self,\n model: Optional[\"pl.LightningModule\"] = None,\n dataloaders: Optional[Union[EVAL_DATALOADERS, LightningDataModule]] = None,\n datamodule: Optional[LightningDataModule] = None,\n return_predictions: Optional[bool] = None,\n ckpt_path: Optional[str] = None,\n ) -> Optional[_PREDICT_OUTPUT]:\n r\"\"\"\n Run inference on your data.\n This will call the model forward function to compute predictions. Useful to perform distributed\n and batched predictions. Logging is disabled in the predict hooks.\n\n Args:\n model: The model to predict with.\n\n dataloaders: A :class:`torch.utils.data.DataLoader` or a sequence of them,\n or a :class:`~pytorch_lightning.core.datamodule.LightningDataModule` specifying prediction samples.\n\n datamodule: The datamodule with a predict_dataloader method that returns one or more dataloaders.\n\n return_predictions: Whether to return predictions.\n ``True`` by default except when an accelerator that spawns processes is used (not supported).\n\n ckpt_path: Either ``best`` or path to the checkpoint you wish to predict.\n If ``None`` and the model instance was passed, use the current weights.\n Otherwise, the best model checkpoint from the previous ``trainer.fit`` call will be loaded\n if a checkpoint callback is configured.\n\n Returns:\n Returns a list of dictionaries, one for each provided dataloader containing their respective predictions.\n \"\"\"\n self.strategy.model = model or self.lightning_module\n return self._call_and_handle_interrupt(\n self._predict_impl, model, dataloaders, datamodule, return_predictions, ckpt_path\n )\n\n def _predict_impl(\n self,\n model: Optional[\"pl.LightningModule\"] = None,\n dataloaders: Optional[Union[EVAL_DATALOADERS, LightningDataModule]] = None,\n datamodule: Optional[LightningDataModule] = None,\n return_predictions: Optional[bool] = None,\n ckpt_path: Optional[str] = None,\n ) -> Optional[_PREDICT_OUTPUT]:\n # --------------------\n # SETUP HOOK\n # --------------------\n Trainer._log_api_event(\"predict\")\n log.detail(f\"{self.__class__.__name__}: trainer predict stage\")\n\n self.state.fn = TrainerFn.PREDICTING\n self.state.status = TrainerStatus.RUNNING\n self.predicting = True\n\n self.predict_loop.return_predictions = return_predictions\n\n # if a datamodule comes in as the second arg, then fix it for the user\n if isinstance(dataloaders, LightningDataModule):\n datamodule = dataloaders\n dataloaders = None\n if dataloaders is not None and datamodule:\n raise MisconfigurationException(\"You cannot pass both `trainer.predict(dataloaders=..., datamodule=...)`\")\n\n model_provided = model is not None\n model = model or self.lightning_module\n if model is None:\n raise MisconfigurationException(\n \"`model` must be provided to `trainer.predict()` when it hasn't been passed in a previous run\"\n )\n\n # links data to the trainer\n self._data_connector.attach_data(model, predict_dataloaders=dataloaders, datamodule=datamodule)\n\n self._ckpt_path = self.__set_ckpt_path(\n ckpt_path, model_provided=model_provided, model_connected=self.lightning_module is not None\n )\n\n self._predicted_ckpt_path = self.ckpt_path # TODO: remove in v1.8\n\n results = self._run(model, ckpt_path=self.ckpt_path)\n\n assert self.state.stopped\n self.predicting = False\n\n return results\n\n def tune(\n self,\n model: \"pl.LightningModule\",\n train_dataloaders: Optional[Union[TRAIN_DATALOADERS, LightningDataModule]] = None,\n val_dataloaders: Optional[EVAL_DATALOADERS] = None,\n datamodule: Optional[LightningDataModule] = None,\n scale_batch_size_kwargs: Optional[Dict[str, Any]] = None,\n lr_find_kwargs: Optional[Dict[str, Any]] = None,\n ) -> Dict[str, Optional[Union[int, _LRFinder]]]:\n r\"\"\"\n Runs routines to tune hyperparameters before training.\n\n Args:\n model: Model to tune.\n\n train_dataloaders: A collection of :class:`torch.utils.data.DataLoader` or a\n :class:`~pytorch_lightning.core.datamodule.LightningDataModule` specifying training samples.\n In the case of multiple dataloaders, please see this :ref:`section <multiple-dataloaders>`.\n\n val_dataloaders: A :class:`torch.utils.data.DataLoader` or a sequence of them specifying validation samples.\n\n datamodule: An instance of :class:`~pytorch_lightning.core.datamodule.LightningDataModule`.\n\n scale_batch_size_kwargs: Arguments for :func:`~pytorch_lightning.tuner.batch_size_scaling.scale_batch_size`\n\n lr_find_kwargs: Arguments for :func:`~pytorch_lightning.tuner.lr_finder.lr_find`\n \"\"\"\n Trainer._log_api_event(\"tune\")\n\n self.state.fn = TrainerFn.TUNING\n self.state.status = TrainerStatus.RUNNING\n self.tuning = True\n\n # if a datamodule comes in as the second arg, then fix it for the user\n if isinstance(train_dataloaders, LightningDataModule):\n datamodule = train_dataloaders\n train_dataloaders = None\n # If you supply a datamodule you can't supply train_dataloader or val_dataloaders\n if (train_dataloaders is not None or val_dataloaders is not None) and datamodule is not None:\n raise MisconfigurationException(\n \"You cannot pass `train_dataloader` or `val_dataloaders` to `trainer.tune(datamodule=...)`\"\n )\n\n # links data to the trainer\n self._data_connector.attach_data(\n model, train_dataloaders=train_dataloaders, val_dataloaders=val_dataloaders, datamodule=datamodule\n )\n\n with isolate_rng():\n result = self.tuner._tune(\n model, scale_batch_size_kwargs=scale_batch_size_kwargs, lr_find_kwargs=lr_find_kwargs\n )\n\n assert self.state.stopped\n self.tuning = False\n\n return result\n\n def _restore_modules_and_callbacks(self, checkpoint_path: Optional[_PATH] = None) -> None:\n # restore modules after setup\n self._checkpoint_connector.resume_start(checkpoint_path)\n self._checkpoint_connector.restore_model()\n self._checkpoint_connector.restore_datamodule()\n if self.state.fn == TrainerFn.FITTING:\n # restore callback states\n self._checkpoint_connector.restore_callbacks()\n\n def _run(\n self, model: \"pl.LightningModule\", ckpt_path: Optional[str] = None\n ) -> Optional[Union[_EVALUATE_OUTPUT, _PREDICT_OUTPUT]]:\n # clean hparams\n if hasattr(model, \"hparams\"):\n parsing.clean_namespace(model.hparams)\n\n # attach model to the training type plugin\n self.strategy.connect(model)\n\n self._callback_connector._attach_model_callbacks()\n self._callback_connector._attach_model_logging_functions()\n\n verify_loop_configurations(self)\n\n # hook\n log.detail(f\"{self.__class__.__name__}: preparing data\")\n self._data_connector.prepare_data()\n\n # ----------------------------\n # SET UP TRAINING\n # ----------------------------\n self._call_callback_hooks(\"on_before_accelerator_backend_setup\")\n log.detail(f\"{self.__class__.__name__}: setting up strategy environment\")\n self.strategy.setup_environment()\n self.__setup_profiler()\n\n self._call_setup_hook() # allow user to setup lightning_module in accelerator environment\n\n # check if we should delay restoring checkpoint till later\n if not self.strategy.restore_checkpoint_after_setup:\n log.detail(f\"{self.__class__.__name__}: restoring module and callbacks from checkpoint path: {ckpt_path}\")\n self._restore_modules_and_callbacks(ckpt_path)\n\n log.detail(f\"{self.__class__.__name__}: configuring sharded model\")\n self._call_configure_sharded_model() # allow user to setup in model sharded environment\n\n # ----------------------------\n # INSPECT THE CORE LOOPS\n # ----------------------------\n fr\"\"\"\n Lightning internal flow looks like this:\n {Trainer.fit} or {Trainer.test} or {Trainer.predict} ||\n | ||\n spawn processes ||\n {self.strategy.setup_environment} ||\n | ||\n setup accelerator ||\n and strategy || LIGHTNING\n | ||\n {self._run_stage} || FLOW\n | ||\n {self._run_train} || DIRECTION\n or {self._run_evaluate} ||\n or {self._run_predict} ||\n | ||\n results \\/\n This is used to guide readers to the core loops: train, test, predict.\n {self._run_predict} is the simplest to understand, use `Go to Definition` to read it :)\n \"\"\"\n\n # ----------------------------\n # TRAIN\n # ----------------------------\n\n # reset logger connector\n self._logger_connector.reset_results()\n self._logger_connector.reset_metrics()\n\n # strategy will configure model and move it to the device\n self.strategy.setup(self)\n\n # hook\n if self.state.fn == TrainerFn.FITTING:\n self._call_callback_hooks(\"on_fit_start\")\n self._call_lightning_module_hook(\"on_fit_start\")\n\n self._log_hyperparams()\n\n if self.strategy.restore_checkpoint_after_setup:\n log.detail(f\"{self.__class__.__name__}: restoring module and callbacks from checkpoint path: {ckpt_path}\")\n self._restore_modules_and_callbacks(ckpt_path)\n\n # restore optimizers, etc.\n log.detail(f\"{self.__class__.__name__}: restoring training state\")\n self._checkpoint_connector.restore_training_state()\n\n self._checkpoint_connector.resume_end()\n\n results = self._run_stage()\n\n log.detail(f\"{self.__class__.__name__}: trainer tearing down\")\n self._teardown()\n\n # ----------------------------\n # POST-Training CLEAN UP\n # ----------------------------\n # hook\n if self.state.fn == TrainerFn.FITTING:\n self._call_callback_hooks(\"on_fit_end\")\n self._call_lightning_module_hook(\"on_fit_end\")\n\n log.detail(f\"{self.__class__.__name__}: calling teardown hooks\")\n self._call_teardown_hook()\n\n self.state.status = TrainerStatus.FINISHED\n self.state.stage = None\n\n return results\n\n def _log_hyperparams(self) -> None:\n if not self.loggers:\n return\n # log hyper-parameters\n hparams_initial = None\n\n # save exp to get started (this is where the first experiment logs are written)\n datamodule_log_hyperparams = self.datamodule._log_hyperparams if self.datamodule is not None else False\n\n if self.lightning_module._log_hyperparams and datamodule_log_hyperparams:\n datamodule_hparams = self.datamodule.hparams_initial\n lightning_hparams = self.lightning_module.hparams_initial\n inconsistent_keys = []\n for key in lightning_hparams.keys() & datamodule_hparams.keys():\n lm_val, dm_val = lightning_hparams[key], datamodule_hparams[key]\n if type(lm_val) != type(dm_val):\n inconsistent_keys.append(key)\n elif isinstance(lm_val, torch.Tensor) and id(lm_val) != id(dm_val):\n inconsistent_keys.append(key)\n elif lm_val != dm_val:\n inconsistent_keys.append(key)\n if inconsistent_keys:\n raise MisconfigurationException(\n f\"Error while merging hparams: the keys {inconsistent_keys} are present \"\n \"in both the LightningModule's and LightningDataModule's hparams \"\n \"but have different values.\"\n )\n hparams_initial = {**lightning_hparams, **datamodule_hparams}\n elif self.lightning_module._log_hyperparams:\n hparams_initial = self.lightning_module.hparams_initial\n elif datamodule_log_hyperparams:\n hparams_initial = self.datamodule.hparams_initial\n\n for logger in self.loggers:\n if hparams_initial is not None:\n logger.log_hyperparams(hparams_initial)\n logger.log_graph(self.lightning_module)\n logger.save()\n\n def _teardown(self):\n \"\"\"This is the Trainer's internal teardown, unrelated to the `teardown` hooks in LightningModule and\n Callback; those are handled by :meth:`_call_teardown_hook`.\"\"\"\n self.strategy.post_dispatch(self)\n self.strategy.teardown()\n loop = self._active_loop\n # loop should never be `None` here but it can because we don't know the trainer stage with `ddp_spawn`\n if loop is not None:\n loop.teardown()\n self._logger_connector.teardown()\n self._signal_connector.teardown()\n\n def run_stage(self) -> None:\n rank_zero_deprecation(\n \"`Trainer.run_stage` is deprecated in v1.6 and will be removed in v1.8. Use\"\n \" `Trainer.{fit,validate,test,predict}` instead.\"\n )\n return self._run_stage()\n\n def _run_stage(self):\n self.strategy.barrier(\"run-stage\")\n self.strategy.dispatch(self)\n\n if self.evaluating:\n return self._run_evaluate()\n if self.predicting:\n return self._run_predict()\n return self._run_train()\n\n def _pre_training_routine(self):\n # wait for all to join if on distributed\n self.strategy.barrier(\"setup_training\")\n\n # register signals\n self._signal_connector.register_signal_handlers()\n\n # --------------------------\n # Pre-train\n # --------------------------\n self._call_callback_hooks(\"on_pretrain_routine_start\")\n self._call_lightning_module_hook(\"on_pretrain_routine_start\")\n\n self._call_callback_hooks(\"on_pretrain_routine_end\")\n self._call_lightning_module_hook(\"on_pretrain_routine_end\")\n\n def _run_train(self) -> None:\n self._pre_training_routine()\n\n with isolate_rng():\n self._run_sanity_check()\n\n # enable train mode\n self.model.train()\n torch.set_grad_enabled(True)\n\n self.fit_loop.trainer = self\n with torch.autograd.set_detect_anomaly(self._detect_anomaly):\n self.fit_loop.run()\n\n def _run_evaluate(self) -> _EVALUATE_OUTPUT:\n assert self.evaluating\n\n # reload dataloaders\n self._evaluation_loop._reload_evaluation_dataloaders()\n\n # reset trainer on this loop and all child loops in case user connected a custom loop\n self._evaluation_loop.trainer = self\n\n with self.profiler.profile(f\"run_{self.state.stage}_evaluation\"), torch.no_grad():\n eval_loop_results = self._evaluation_loop.run()\n\n # remove the tensors from the eval results\n for result in eval_loop_results:\n if isinstance(result, dict):\n for k, v in result.items():\n if isinstance(v, torch.Tensor):\n result[k] = v.cpu().item()\n\n return eval_loop_results\n\n def _run_predict(self) -> Optional[_PREDICT_OUTPUT]:\n self.reset_predict_dataloader(self.lightning_module)\n # reset trainer on this loop and all child loops in case user connected a custom loop\n self.predict_loop.trainer = self\n with torch.no_grad():\n return self.predict_loop.run()\n\n def _run_sanity_check(self) -> None:\n val_loop = self.fit_loop.epoch_loop.val_loop\n\n should_sanity_check = (\n self.enable_validation\n and self.num_sanity_val_steps > 0\n # do not sanity check if restarting because it would mess up the loaded state\n and not val_loop.restarting\n )\n\n # run tiny validation (if validation defined)\n # to make sure program won't crash during val\n if should_sanity_check:\n stage = self.state.stage\n self.sanity_checking = True\n\n # reset logger connector\n self._logger_connector.reset_results()\n self._logger_connector.reset_metrics()\n\n self._call_callback_hooks(\"on_sanity_check_start\")\n\n # reload dataloaders\n val_loop._reload_evaluation_dataloaders()\n self.num_sanity_val_batches = [\n min(self.num_sanity_val_steps, val_batches) for val_batches in self.num_val_batches\n ]\n\n # run eval step\n with torch.no_grad():\n val_loop.run()\n\n self._call_callback_hooks(\"on_sanity_check_end\")\n\n # reset logger connector\n self._logger_connector.reset_results()\n self._logger_connector.reset_metrics()\n\n # reset the progress tracking state after sanity checking. we don't need to set the state before\n # because sanity check only runs when we are not restarting\n _reset_progress(val_loop)\n\n # restore the previous stage when the sanity check if finished\n self.state.stage = stage\n\n def __set_ckpt_path(self, ckpt_path: Optional[str], model_provided: bool, model_connected: bool) -> Optional[str]:\n # fault-tolerance takes precedence\n from pytorch_lightning.callbacks.fault_tolerance import _FaultToleranceCheckpoint\n\n ft_checkpoints = [cb for cb in self.callbacks if isinstance(cb, _FaultToleranceCheckpoint)]\n if ft_checkpoints:\n ft_ckpt_path = ft_checkpoints[0].ckpt_path\n fs = get_filesystem(ft_ckpt_path)\n if fs.exists(ft_ckpt_path):\n return ft_ckpt_path\n\n if model_provided and ckpt_path is None:\n # use passed model to function without loading weights\n return\n\n fn = self.state.fn.value\n\n if model_connected and ckpt_path is None:\n rank_zero_warn(\n f\"`.{fn}(ckpt_path=None)` was called without a model.\"\n \" The best model of the previous `fit` call will be used.\"\n f\" You can pass `{fn}(ckpt_path='best')` to use and best model\"\n \" checkpoint and avoid this warning or\"\n \" `ckpt_path=trainer.checkpoint_callback.last_model_path` to use the last model.\"\n )\n ckpt_path = \"best\"\n\n if ckpt_path == \"best\":\n if len(self.checkpoint_callbacks) > 1:\n rank_zero_warn(\n f'`.{fn}(ckpt_path=\"best\")` is called with Trainer configured with multiple `ModelCheckpoint`'\n \" callbacks. It will use the best checkpoint path from first checkpoint callback.\"\n )\n\n if not self.checkpoint_callback:\n raise MisconfigurationException(\n f'`.{fn}(ckpt_path=\"best\")` is set but `ModelCheckpoint` is not configured.'\n )\n\n if not self.checkpoint_callback.best_model_path:\n if self.fast_dev_run:\n raise MisconfigurationException(\n f'You cannot execute `.{fn}(ckpt_path=\"best\")` with `fast_dev_run=True`.'\n f\" Please pass an exact checkpoint path to `.{fn}(ckpt_path=...)`\"\n )\n raise MisconfigurationException(\n f'`.{fn}(ckpt_path=\"best\")` is set but `ModelCheckpoint` is not configured to save the best model.'\n )\n # load best weights\n ckpt_path = self.checkpoint_callback.best_model_path\n\n if not ckpt_path:\n raise MisconfigurationException(\n f\"`.{fn}()` found no path for the best weights: {ckpt_path!r}. Please\"\n f\" specify a path for a checkpoint `.{fn}(ckpt_path=PATH)`\"\n )\n return ckpt_path\n\n def _call_setup_hook(self) -> None:\n fn = self.state.fn._setup_fn\n\n self.strategy.barrier(\"pre_setup\")\n\n if self.datamodule is not None:\n self.datamodule.setup(stage=fn)\n self._call_callback_hooks(\"setup\", stage=fn)\n self._call_lightning_module_hook(\"setup\", stage=fn)\n\n self.strategy.barrier(\"post_setup\")\n\n def _call_configure_sharded_model(self) -> None:\n with self.strategy.model_sharded_context():\n self._handle_meta_model()\n self._call_lightning_module_hook(\"configure_sharded_model\")\n self._call_callback_hooks(\"on_configure_sharded_model\")\n\n def _handle_meta_model(self) -> None:\n if not is_on_meta_device(self.lightning_module):\n return\n\n if isinstance(self.strategy, DDPSpawnStrategy):\n raise MisconfigurationException(\"LightningModule on meta device isn't supported with spawn.\")\n\n materialize_module(self.lightning_module)\n # the trainer reference is lost during materialization\n self.lightning_module.trainer = proxy(self)\n\n def _call_teardown_hook(self) -> None:\n fn = self.state.fn._setup_fn\n\n if self.datamodule is not None:\n self.datamodule.teardown(stage=fn)\n\n self._call_callback_hooks(\"teardown\", stage=fn)\n self._call_lightning_module_hook(\"teardown\", stage=fn)\n\n self.lightning_module._current_fx_name = None\n # these could have become stale if metrics are defined in `setup`\n self.lightning_module._metric_attributes = None\n\n # todo: TPU 8 cores hangs in flush with TensorBoard. Might do for all loggers.\n # It might be related to xla tensors blocked when moving the cpu kill loggers.\n for logger in self.loggers:\n logger.finalize(\"success\")\n\n # summarize profile results\n self.profiler.describe()\n\n def call_hook(\n self, hook_name: str, *args: Any, pl_module: Optional[\"pl.LightningModule\"] = None, **kwargs: Any\n ) -> Any:\n r\"\"\"\n .. deprecated:: v1.6\n The Trainer's `call_hook` method was deprecated in v1.6 and will be removed in v1.8.\n \"\"\"\n rank_zero_deprecation(\"The Trainer's `call_hook` method was deprecated in v1.6 and will be removed in v1.8.\")\n pl_module = self.lightning_module or pl_module\n if pl_module:\n prev_fx_name = pl_module._current_fx_name\n pl_module._current_fx_name = hook_name\n\n # always profile hooks\n with self.profiler.profile(hook_name):\n\n # first call trainer hook\n callback_fx = getattr(self, hook_name, None)\n if callable(callback_fx):\n callback_fx(*args, **kwargs)\n\n # next call hook in lightningModule\n output = None\n model_fx = getattr(pl_module, hook_name, None)\n if callable(model_fx):\n output = model_fx(*args, **kwargs)\n\n # call the strategy hook\n if hook_name not in (\"setup\", \"teardown\", \"on_train_start\") and hasattr(self.strategy, hook_name):\n strategy_hook = getattr(self.strategy, hook_name)\n strategy_output = strategy_hook(*args, **kwargs)\n output = strategy_output if output is None else output\n\n if pl_module:\n # restore current_fx when nested context\n pl_module._current_fx_name = prev_fx_name\n\n return output\n\n def _call_lightning_module_hook(\n self,\n hook_name: str,\n *args: Any,\n pl_module: Optional[\"pl.LightningModule\"] = None,\n **kwargs: Any,\n ) -> Any:\n pl_module = pl_module or self.lightning_module\n\n if pl_module is None:\n raise TypeError(\"No Lightning Module is available to call hooks on\")\n\n fn = getattr(pl_module, hook_name)\n if not callable(fn):\n return\n\n prev_fx_name = pl_module._current_fx_name\n pl_module._current_fx_name = hook_name\n\n with self.profiler.profile(f\"[LightningModule]{pl_module.__class__.__name__}.{hook_name}\"):\n output = fn(*args, **kwargs)\n\n # restore current_fx when nested context\n pl_module._current_fx_name = prev_fx_name\n\n return output\n\n def _call_callback_hooks(\n self,\n hook_name: str,\n *args: Any,\n **kwargs: Any,\n ) -> None:\n log.detail(f\"{self.__class__.__name__}: calling callback hook: {hook_name}\")\n # TODO: remove if block in v1.8\n if hook_name in (\"on_init_start\", \"on_init_end\"):\n # these `Callback` hooks are the only ones that do not take a lightning module.\n # we also don't profile bc profiler hasn't been set yet\n for callback in self.callbacks:\n fn = getattr(callback, hook_name)\n if callable(fn):\n fn(self, *args, **kwargs)\n return\n\n pl_module = self.lightning_module\n if pl_module:\n prev_fx_name = pl_module._current_fx_name\n pl_module._current_fx_name = hook_name\n\n # TODO: remove if block in v1.7\n if hook_name == \"on_train_batch_start\":\n with self.profiler.profile(hook_name):\n self._on_train_batch_start(*args, **kwargs)\n elif hook_name == \"on_train_batch_end\":\n with self.profiler.profile(hook_name):\n self._on_train_batch_end(*args, **kwargs)\n else:\n for callback in self.callbacks:\n fn = getattr(callback, hook_name)\n if callable(fn):\n with self.profiler.profile(f\"[Callback]{callback.state_key}.{hook_name}\"):\n fn(self, self.lightning_module, *args, **kwargs)\n\n if pl_module:\n # restore current_fx when nested context\n pl_module._current_fx_name = prev_fx_name\n\n # TODO: Delete this in v1.7 (deprecations: #9816 and #11148)\n def _on_train_batch_start(self, batch, batch_idx, dataloader_idx=0):\n r\"\"\"Called when the training batch begins. This function is needed because of two different deprecations affecting\n the original function in TrainerCallbackHookMixin: #9816 and #11148.\n \"\"\"\n for callback in self.callbacks:\n if is_param_in_hook_signature(callback.on_train_batch_start, \"dataloader_idx\", explicit=True):\n callback.on_train_batch_start(self, self.lightning_module, batch, batch_idx, 0)\n else:\n callback.on_train_batch_start(self, self.lightning_module, batch, batch_idx)\n\n # TODO: Delete this in v1.7 (deprecations: #9816 and #11148)\n def _on_train_batch_end(self, outputs: STEP_OUTPUT, batch, batch_idx, dataloader_idx=0):\n r\"\"\"Called when the training batch ends. This function is needed because of two different deprecations affecting\n the original function in TrainerCallbackHookMixin: #9816 and #11148.\n \"\"\"\n for callback in self.callbacks:\n if is_param_in_hook_signature(callback.on_train_batch_end, \"dataloader_idx\", explicit=True):\n callback.on_train_batch_end(self, self.lightning_module, outputs, batch, batch_idx, 0)\n else:\n callback.on_train_batch_end(self, self.lightning_module, outputs, batch, batch_idx)\n\n def _call_callbacks_state_dict(self) -> Dict[str, dict]:\n \"\"\"Called when saving a model checkpoint, calls and returns every callback's `state_dict`, keyed by\n `Callback.state_key`.\"\"\"\n callback_state_dicts = {}\n for callback in self.callbacks:\n state_dict = callback.state_dict()\n if state_dict:\n callback_state_dicts[callback.state_key] = state_dict\n return callback_state_dicts\n\n def _call_callbacks_on_save_checkpoint(self, checkpoint: Dict[str, Any]) -> None:\n \"\"\"Called when saving a model checkpoint, calls every callback's `on_save_checkpoint` hook.\n\n Will be removed in v1.8: If state is returned, we insert the callback state into\n ``checkpoint[\"callbacks\"][Callback.state_key]``. It overrides ``state_dict`` if already present.\n \"\"\"\n for callback in self.callbacks:\n # TODO: Add profiling for on_save_checkpoint hook\n state = callback.on_save_checkpoint(self, self.lightning_module, checkpoint)\n if state:\n # TODO: Add deprecation warning if state is returned (see reference PR #11887)\n checkpoint[\"callbacks\"][callback.state_key] = state\n\n def _call_callbacks_on_load_checkpoint(self, checkpoint: Dict[str, Any]) -> None:\n \"\"\"Called when loading a model checkpoint.\n\n Calls every callback's `on_load_checkpoint` hook. We have a dedicated function for this rather than using\n `_call_callback_hooks` because we have special logic for getting callback_states.\n \"\"\"\n callback_states: Dict[Union[Type, str], Dict] = checkpoint.get(\"callbacks\")\n\n if callback_states is None:\n return\n\n is_legacy_ckpt = Version(checkpoint[\"pytorch-lightning_version\"]) < Version(\"1.5.0dev\")\n current_callbacks_keys = {cb._legacy_state_key if is_legacy_ckpt else cb.state_key for cb in self.callbacks}\n difference = callback_states.keys() - current_callbacks_keys\n if difference:\n rank_zero_warn(\n \"Be aware that when using `ckpt_path`,\"\n \" callbacks used to create the checkpoint need to be provided during `Trainer` instantiation.\"\n f\" Please add the following callbacks: {list(difference)}.\",\n )\n\n for callback in self.callbacks:\n state = callback_states.get(callback.state_key, callback_states.get(callback._legacy_state_key))\n if state:\n state = deepcopy(state)\n # TODO: Add profiling for on_load_checkpoint hook\n callback.on_load_checkpoint(self, self.lightning_module, state)\n\n def _call_callbacks_load_state_dict(self, checkpoint: Dict[str, Any]) -> None:\n \"\"\"Called when loading a model checkpoint, calls every callback's `load_state_dict`.\"\"\"\n callback_states: Dict[Union[Type, str], Dict] = checkpoint.get(\"callbacks\")\n\n if callback_states is None:\n return\n\n for callback in self.callbacks:\n state = callback_states.get(callback.state_key, callback_states.get(callback._legacy_state_key))\n if state:\n state = deepcopy(state)\n callback.load_state_dict(state)\n\n def _call_strategy_hook(\n self,\n hook_name: str,\n *args: Any,\n **kwargs: Any,\n ) -> Any:\n pl_module = self.lightning_module\n prev_fx_name = pl_module._current_fx_name\n pl_module._current_fx_name = hook_name\n\n fn = getattr(self.strategy, hook_name)\n if not callable(fn):\n return\n\n with self.profiler.profile(f\"[Strategy]{self.strategy.__class__.__name__}.{hook_name}\"):\n output = fn(*args, **kwargs)\n\n # restore current_fx when nested context\n pl_module._current_fx_name = prev_fx_name\n\n return output\n\n @staticmethod\n def _parse_devices(\n gpus: Optional[Union[List[int], str, int]],\n auto_select_gpus: bool,\n tpu_cores: Optional[Union[List[int], str, int]],\n ) -> Tuple[Optional[List[int]], Optional[Union[List[int], int]]]:\n return device_parser._parse_devices(gpus, auto_select_gpus, tpu_cores)\n\n @staticmethod\n def _log_api_event(event: str) -> None:\n torch._C._log_api_usage_once(\"lightning.trainer.\" + event)\n\n def __init_profiler(self, profiler: Optional[Union[BaseProfiler, str]]) -> None:\n if isinstance(profiler, str):\n PROFILERS = {\n \"simple\": SimpleProfiler,\n \"advanced\": AdvancedProfiler,\n \"pytorch\": PyTorchProfiler,\n \"xla\": XLAProfiler,\n }\n profiler = profiler.lower()\n if profiler not in PROFILERS:\n raise MisconfigurationException(\n \"When passing string value for the `profiler` parameter of `Trainer`,\"\n f\" it can only be one of {list(PROFILERS.keys())}\"\n )\n profiler_class = PROFILERS[profiler]\n profiler = profiler_class()\n self.profiler: BaseProfiler = profiler or PassThroughProfiler()\n\n def __setup_profiler(self) -> None:\n local_rank = self.local_rank if self.world_size > 1 else None\n self.profiler._lightning_module = proxy(self.lightning_module)\n self.profiler.setup(stage=self.state.fn._setup_fn, local_rank=local_rank, log_dir=self.log_dir)\n\n def _log_device_info(self) -> None:\n rank_zero_info(\n f\"GPU available: {torch.cuda.is_available()}, used: {isinstance(self.accelerator, GPUAccelerator)}\"\n )\n\n num_tpu_cores = (\n self.tpu_cores if self.tpu_cores is not None and isinstance(self.accelerator, TPUAccelerator) else 0\n )\n rank_zero_info(f\"TPU available: {_TPU_AVAILABLE}, using: {num_tpu_cores} TPU cores\")\n\n num_ipus = self.ipus if self.ipus is not None else 0\n rank_zero_info(f\"IPU available: {_IPU_AVAILABLE}, using: {num_ipus} IPUs\")\n\n if torch.cuda.is_available() and not isinstance(self.accelerator, GPUAccelerator):\n rank_zero_warn(\n \"GPU available but not used. Set `accelerator` and `devices` using\"\n f\" `Trainer(accelerator='gpu', devices={GPUAccelerator.auto_device_count()})`.\",\n category=PossibleUserWarning,\n )\n\n if _TPU_AVAILABLE and not isinstance(self.accelerator, TPUAccelerator):\n rank_zero_warn(\n \"TPU available but not used. Set `accelerator` and `devices` using\"\n f\" `Trainer(accelerator='tpu', devices={TPUAccelerator.auto_device_count()})`.\"\n )\n\n if _IPU_AVAILABLE and not isinstance(self.accelerator, IPUAccelerator):\n rank_zero_warn(\n \"IPU available but not used. Set `accelerator` and `devices` using\"\n f\" `Trainer(accelerator='ipu', devices={IPUAccelerator.auto_device_count()})`.\"\n )\n\n \"\"\"\n Data loading methods\n \"\"\"\n\n def reset_train_dataloader(self, model: Optional[\"pl.LightningModule\"] = None) -> None:\n \"\"\"Resets the train dataloader and initialises required variables (number of batches, when to validate,\n etc.).\n\n Args:\n model: The ``LightningModule`` if calling this outside of the trainer scope.\n \"\"\"\n source = self._data_connector._train_dataloader_source\n pl_module = self.lightning_module or model\n has_step = is_overridden(\"training_step\", pl_module)\n enable_training = self.limit_train_batches > 0\n if not (source.is_defined() and has_step and enable_training):\n return\n\n self.train_dataloader = self._data_connector._request_dataloader(RunningStage.TRAINING, model=model)\n\n if self.overfit_batches > 0:\n self.train_dataloader = self._data_connector._resolve_overfit_batches(self.train_dataloader)\n\n # automatically add samplers\n self.train_dataloader = apply_to_collection(\n self.train_dataloader,\n (DataLoader, CombinedLoader),\n self._data_connector._prepare_dataloader,\n mode=RunningStage.TRAINING,\n )\n loaders = (\n self.train_dataloader.loaders\n if isinstance(self.train_dataloader, CombinedLoader)\n else self.train_dataloader\n )\n\n # check the workers recursively\n apply_to_collection(loaders, DataLoader, self._data_connector._worker_check, \"train_dataloader\")\n\n # add worker_init_fn for correct seeding in worker processes\n apply_to_collection(loaders, DataLoader, _auto_add_worker_init_fn, rank=self.global_rank)\n\n # add collate_fn to collect metadata for fault tolerant training\n if _fault_tolerant_training():\n apply_to_collection(loaders, DataLoader, _add_capture_metadata_collate)\n\n # wrap the sequence of train loaders to a CombinedLoader object for computing the num_training_batches\n if not isinstance(self.train_dataloader, CombinedLoader):\n self.train_dataloader = CombinedLoader(loaders, self._data_connector.multiple_trainloader_mode)\n\n module = model or self.lightning_module or self.datamodule\n self.num_training_batches = (\n len(self.train_dataloader)\n if has_len_all_ranks(self.train_dataloader, self.strategy, module)\n else float(\"inf\")\n )\n\n if isinstance(self.limit_train_batches, int):\n self.num_training_batches = min(self.num_training_batches, int(self.limit_train_batches))\n elif self.num_training_batches != float(\"inf\"):\n self.num_training_batches = int(self.num_training_batches * self.limit_train_batches)\n elif self.limit_train_batches != 1.0:\n raise MisconfigurationException(\n \"When using an IterableDataset for `limit_train_batches`,\"\n \" `Trainer(limit_train_batches)` must be `1.0` or an int. An int k specifies\"\n \" `num_training_batches` to use.\"\n )\n\n if isinstance(self.val_check_interval, int):\n self.val_check_batch = self.val_check_interval\n if self.val_check_batch > self.num_training_batches:\n raise ValueError(\n f\"`val_check_interval` ({self.val_check_interval}) must be less than or equal \"\n f\"to the number of the training batches ({self.num_training_batches}). \"\n \"If you want to disable validation set `limit_val_batches` to 0.0 instead.\"\n )\n else:\n if not has_len_all_ranks(self.train_dataloader, self.strategy, module):\n if self.val_check_interval == 1.0:\n self.val_check_batch = float(\"inf\")\n else:\n raise MisconfigurationException(\n \"When using an IterableDataset for `train_dataloader`,\"\n \" `Trainer(val_check_interval)` must be `1.0` or an int. An int k specifies\"\n \" checking validation every k training batches.\"\n )\n else:\n self.val_check_batch = int(self.num_training_batches * self.val_check_interval)\n self.val_check_batch = max(1, self.val_check_batch)\n\n if self.loggers and self.num_training_batches < self.log_every_n_steps:\n rank_zero_warn(\n f\"The number of training samples ({self.num_training_batches}) is smaller than the logging interval\"\n f\" Trainer(log_every_n_steps={self.log_every_n_steps}). Set a lower value for log_every_n_steps if\"\n \" you want to see logs for the training epoch.\",\n category=PossibleUserWarning,\n )\n\n # store epoch of dataloader reset for reload_dataloaders_every_n_epochs\n self._last_train_dl_reload_epoch = self.current_epoch\n\n def reset_val_dataloader(self, model: Optional[\"pl.LightningModule\"] = None) -> None:\n \"\"\"Resets the validation dataloader and determines the number of batches.\n\n Args:\n model: The ``LightningModule`` if called outside of the trainer scope.\n \"\"\"\n source = self._data_connector._val_dataloader_source\n pl_module = self.lightning_module or model\n has_step = is_overridden(\"validation_step\", pl_module)\n enable_validation = self.limit_val_batches > 0\n if source.is_defined() and has_step and enable_validation:\n self.num_val_batches, self.val_dataloaders = self._data_connector._reset_eval_dataloader(\n RunningStage.VALIDATING, model=pl_module\n )\n\n # store epoch of dataloader reset for reload_dataloaders_every_n_epochs\n self._last_val_dl_reload_epoch = self.current_epoch\n\n def reset_test_dataloader(self, model: Optional[\"pl.LightningModule\"] = None) -> None:\n \"\"\"Resets the test dataloader and determines the number of batches.\n\n Args:\n model: The ``LightningModule`` if called outside of the trainer scope.\n \"\"\"\n source = self._data_connector._test_dataloader_source\n pl_module = self.lightning_module or model\n has_step = is_overridden(\"test_step\", pl_module)\n enable_testing = self.limit_test_batches > 0\n if source.is_defined() and has_step and enable_testing:\n self.num_test_batches, self.test_dataloaders = self._data_connector._reset_eval_dataloader(\n RunningStage.TESTING, model=pl_module\n )\n\n def reset_predict_dataloader(self, model: Optional[\"pl.LightningModule\"] = None) -> None:\n \"\"\"Resets the predict dataloader and determines the number of batches.\n\n Args:\n model: The ``LightningModule`` if called outside of the trainer scope.\n \"\"\"\n source = self._data_connector._predict_dataloader_source\n pl_module = self.lightning_module or model\n enable_prediction = self.limit_predict_batches > 0\n if source.is_defined() and enable_prediction:\n self.num_predict_batches, self.predict_dataloaders = self._data_connector._reset_eval_dataloader(\n RunningStage.PREDICTING, model=pl_module\n )\n\n def reset_train_val_dataloaders(self, model: Optional[\"pl.LightningModule\"] = None) -> None:\n \"\"\"Resets train and val dataloaders if none are attached to the trainer.\n\n The val dataloader must be initialized before training loop starts, as the training loop\n inspects the val dataloader to determine whether to run the evaluation loop.\n Args:\n model: The ``LightningModule`` if called outside of the trainer scope.\n \"\"\"\n if self.train_dataloader is None:\n self.reset_train_dataloader(model=model)\n if self.val_dataloaders is None:\n self.reset_val_dataloader(model=model)\n\n \"\"\"\n Accelerator properties\n \"\"\"\n\n @property\n def accelerator(self) -> Accelerator:\n return self.strategy.accelerator\n\n @property\n def strategy(self) -> Strategy:\n return self._accelerator_connector.strategy\n\n @property\n def training_type_plugin(self) -> Strategy:\n rank_zero_deprecation(\n \"`Trainer.training_type_plugin` is deprecated in v1.6 and will be removed in v1.8. Use\"\n \" `Trainer.strategy` instead.\"\n )\n return self.strategy\n\n @property\n def precision_plugin(self) -> PrecisionPlugin:\n return self.strategy.precision_plugin\n\n @property\n def global_rank(self) -> int:\n return self.strategy.global_rank\n\n @property\n def local_rank(self) -> int:\n # some training types define a local rank\n return getattr(self.strategy, \"local_rank\", 0)\n\n @property\n def node_rank(self) -> int:\n # some training types define a node rank\n return getattr(self.strategy, \"node_rank\", 0)\n\n @property\n def world_size(self) -> int:\n # some training types define a world size\n return getattr(self.strategy, \"world_size\", 1)\n\n @property\n def should_rank_save_checkpoint(self) -> bool:\n rank_zero_deprecation(\n \"`Trainer.should_rank_save_checkpoint` is deprecated in v1.6 and will be removed in v1.8.\", stacklevel=5\n )\n strategy = self.strategy\n return (\n isinstance(strategy, pl.strategies.TPUSpawnStrategy) and strategy.local_rank == 0 or strategy.is_global_zero\n )\n\n @property\n def num_nodes(self) -> int:\n return getattr(self.strategy, \"num_nodes\", 1)\n\n @property\n def device_ids(self) -> List[int]:\n \"\"\"List of device indexes per node.\"\"\"\n devices = getattr(self.strategy, \"parallel_devices\", [self.strategy.root_device])\n device_ids = []\n for idx, device in enumerate(devices):\n if isinstance(device, torch.device):\n device_ids.append(device.index or idx)\n elif isinstance(device, int):\n device_ids.append(device)\n return device_ids\n\n @property\n def num_devices(self) -> int:\n \"\"\"Number of devices the trainer uses per node.\"\"\"\n return len(self.device_ids)\n\n @property\n def num_processes(self) -> int:\n return self._accelerator_connector.num_processes\n\n @property\n def root_gpu(self) -> Optional[int]:\n rank_zero_deprecation(\n \"`Trainer.root_gpu` is deprecated in v1.6 and will be removed in v1.8. \"\n \"Please use `Trainer.strategy.root_device.index` instead.\"\n )\n return self.strategy.root_device.index if isinstance(self.accelerator, GPUAccelerator) else None\n\n @property\n def tpu_cores(self) -> int:\n return self._accelerator_connector.tpu_cores\n\n @property\n def ipus(self) -> int:\n return self._accelerator_connector.num_ipus\n\n @property\n def num_gpus(self) -> int:\n rank_zero_deprecation(\n \"`Trainer.num_gpus` was deprecated in v1.6 and will be removed in v1.8.\"\n \" Please use `Trainer.num_devices` instead.\"\n )\n return self.num_devices if isinstance(self.accelerator, GPUAccelerator) else 0\n\n @property\n def devices(self) -> int:\n rank_zero_deprecation(\n \"`Trainer.devices` was deprecated in v1.6 and will be removed in v1.8.\"\n \" Please use `Trainer.num_devices` or `Trainer.device_ids` to get device information instead.\"\n )\n return self.num_devices\n\n @property\n def data_parallel_device_ids(self) -> Optional[List[int]]:\n return (\n self._accelerator_connector.parallel_device_ids if self._accelerator_connector.parallel_device_ids else None\n )\n\n @property\n def lightning_module(self) -> \"pl.LightningModule\":\n # TODO: this is actually an optional return\n return self.strategy.lightning_module\n\n @property\n def optimizers(self) -> List[Optimizer]:\n return self.strategy.optimizers\n\n @optimizers.setter\n def optimizers(self, new_optims: Optional[List[Optimizer]]) -> None:\n self.strategy.optimizers = new_optims\n\n @property\n def lightning_optimizers(self) -> Dict[int, LightningOptimizer]:\n rank_zero_deprecation(\n \"`Trainer.lightning_optimizers` is deprecated in v1.6 and will be removed in v1.8\", stacklevel=5\n )\n return self.strategy._lightning_optimizers\n\n @property\n def lr_scheduler_configs(self) -> List[LRSchedulerConfig]:\n return self.strategy.lr_scheduler_configs\n\n @property\n def lr_schedulers(self) -> List[Dict[str, Any]]:\n rank_zero_deprecation(\n \"`Trainer.lr_schedulers` is deprecated in v1.6 and will be removed in v1.8.\"\n \" You can use `trainer.lr_scheduler_configs` instead which contains dataclasses instead of dictionaries.\",\n stacklevel=5,\n )\n from dataclasses import asdict\n\n return [asdict(config) for config in self.strategy.lr_scheduler_configs]\n\n @property\n def optimizer_frequencies(self) -> List[int]:\n return self.strategy.optimizer_frequencies\n\n @optimizer_frequencies.setter\n def optimizer_frequencies(self, new_freqs: List[int]) -> None:\n self.strategy.optimizer_frequencies = new_freqs\n\n @property\n def amp_backend(self) -> Optional[AMPType]:\n if isinstance(self.precision_plugin, ApexMixedPrecisionPlugin):\n return AMPType.APEX\n if isinstance(self.precision_plugin, NativeMixedPrecisionPlugin):\n return AMPType.NATIVE\n return None\n\n @property\n def precision(self) -> Union[str, int]:\n return self.strategy.precision_plugin.precision\n\n @property\n def scaler(self) -> Optional[Any]:\n return getattr(self.precision_plugin, \"scaler\", None)\n\n @property\n def gpus(self) -> Optional[Union[List[int], str, int]]:\n return self._accelerator_connector.gpus\n\n @property\n def model(self) -> torch.nn.Module:\n \"\"\"The LightningModule, but possibly wrapped into DataParallel or DistributedDataParallel.\n\n To access the pure LightningModule, use\n :meth:`~pytorch_lightning.trainer.trainer.Trainer.lightning_module` instead.\n \"\"\"\n return self.strategy.model\n\n @model.setter\n def model(self, model: torch.nn.Module) -> None:\n \"\"\"Setter for the model, pass-through to accelerator and plugin where the model reference is stored. Used\n by the Tuner to reset the state of Trainer and Accelerator.\n\n Args:\n model: The LightningModule, possibly wrapped into DataParallel or DistributedDataParallel, depending\n on the backend.\n \"\"\"\n self.strategy.model = model\n\n \"\"\"\n General properties\n \"\"\"\n\n @property\n def log_dir(self) -> Optional[str]:\n if len(self.loggers) == 1:\n if isinstance(self.logger, TensorBoardLogger):\n dirpath = self.logger.log_dir\n else:\n dirpath = self.logger.save_dir\n else:\n dirpath = self.default_root_dir\n\n dirpath = self.strategy.broadcast(dirpath)\n return dirpath\n\n @property\n def use_amp(self) -> bool:\n rank_zero_deprecation(\n \"`Trainer.use_amp` is deprecated in v1.6.0 and will be removed in v1.8.0.\"\n \" Please use `Trainer.amp_backend` instead.\"\n )\n return self.precision == 16\n\n @property\n def is_global_zero(self) -> bool:\n return self.strategy.is_global_zero\n\n @property\n def slurm_job_id(self) -> Optional[int]:\n rank_zero_deprecation(\"Method `slurm_job_id` is deprecated in v1.6.0 and will be removed in v1.7.0.\")\n return SLURMEnvironment.job_id()\n\n @property\n def distributed_sampler_kwargs(self) -> Optional[dict]:\n if isinstance(self.strategy, ParallelStrategy):\n return self.strategy.distributed_sampler_kwargs\n\n @property\n def data_parallel(self) -> bool:\n return isinstance(self.strategy, ParallelStrategy)\n\n @property\n def progress_bar_dict(self) -> dict:\n \"\"\"Read-only for progress bar metrics.\"\"\"\n rank_zero_deprecation(\n \"`trainer.progress_bar_dict` is deprecated in v1.5 and will be removed in v1.7.\"\n \" Use `ProgressBarBase.get_metrics` instead.\"\n )\n ref_model = self.lightning_module\n ref_model = cast(pl.LightningModule, ref_model)\n if self.progress_bar_callback:\n return self.progress_bar_callback.get_metrics(self, ref_model)\n return self.progress_bar_metrics\n\n @property\n def enable_validation(self) -> bool:\n \"\"\"Check if we should run validation during training.\"\"\"\n return (\n self._data_connector._val_dataloader_source.is_defined()\n and is_overridden(\"validation_step\", self.lightning_module)\n and self.limit_val_batches > 0\n )\n\n @property\n def default_root_dir(self) -> str:\n \"\"\"The default location to save artifacts of loggers, checkpoints etc.\n\n It is used as a fallback if logger or checkpoint callback do not define specific save paths.\n \"\"\"\n if get_filesystem(self._default_root_dir).protocol == \"file\":\n return os.path.normpath(self._default_root_dir)\n return self._default_root_dir\n\n @property\n def weights_save_path(self) -> str:\n \"\"\"\n The default root location to save weights (checkpoints), e.g., when the\n :class:`~pytorch_lightning.callbacks.model_checkpoint.ModelCheckpoint` does not define a file path.\n\n .. deprecated:: v1.6\n `Trainer.weights_save_path` has been deprecated in v1.6 and will be removed in v1.8.\n \"\"\"\n rank_zero_deprecation(\"`Trainer.weights_save_path` has been deprecated in v1.6 and will be removed in v1.8.\")\n return self._weights_save_path_internal\n\n # TODO: Remove _weights_save_path_internal in v1.8\n @property\n def _weights_save_path_internal(self) -> str:\n \"\"\"This is an internal implementation of weights_save_path which allows weights_save_path to be used\n internally by the framework without emitting a deprecation warning.\n\n To be removed in v1.8.\n \"\"\"\n if get_filesystem(self._weights_save_path).protocol == \"file\":\n return os.path.normpath(self._weights_save_path)\n return self._weights_save_path\n\n @property\n def early_stopping_callback(self) -> Optional[EarlyStopping]:\n \"\"\"The first :class:`~pytorch_lightning.callbacks.early_stopping.EarlyStopping` callback in the\n Trainer.callbacks list, or ``None`` if it doesn't exist.\"\"\"\n callbacks = self.early_stopping_callbacks\n return callbacks[0] if len(callbacks) > 0 else None\n\n @property\n def early_stopping_callbacks(self) -> List[EarlyStopping]:\n \"\"\"A list of all instances of :class:`~pytorch_lightning.callbacks.early_stopping.EarlyStopping` found in\n the Trainer.callbacks list.\"\"\"\n return [c for c in self.callbacks if isinstance(c, EarlyStopping)]\n\n @property\n def prediction_writer_callbacks(self) -> List[BasePredictionWriter]:\n \"\"\"A list of all instances of :class:`~pytorch_lightning.callbacks.prediction_writer.BasePredictionWriter`\n found in the Trainer.callbacks list.\"\"\"\n return [cb for cb in self.callbacks if isinstance(cb, BasePredictionWriter)]\n\n @property\n def checkpoint_callback(self) -> Optional[ModelCheckpoint]:\n \"\"\"The first :class:`~pytorch_lightning.callbacks.model_checkpoint.ModelCheckpoint` callback in the\n Trainer.callbacks list, or ``None`` if it doesn't exist.\"\"\"\n callbacks = self.checkpoint_callbacks\n return callbacks[0] if len(callbacks) > 0 else None\n\n @property\n def checkpoint_callbacks(self) -> List[ModelCheckpoint]:\n \"\"\"A list of all instances of :class:`~pytorch_lightning.callbacks.model_checkpoint.ModelCheckpoint` found\n in the Trainer.callbacks list.\"\"\"\n return [c for c in self.callbacks if isinstance(c, ModelCheckpoint)]\n\n @property\n def progress_bar_callback(self) -> Optional[ProgressBarBase]:\n \"\"\"An instance of :class:`~pytorch_lightning.callbacks.progress.base.ProgressBarBase` found in the\n Trainer.callbacks list, or ``None`` if one doesn't exist.\"\"\"\n for c in self.callbacks:\n if isinstance(c, ProgressBarBase):\n return c\n return None\n\n @property\n def resume_from_checkpoint(self) -> Optional[Union[str, Path]]:\n resume_from_checkpoint = self._checkpoint_connector.resume_from_checkpoint_fit_path\n if resume_from_checkpoint is not None:\n rank_zero_deprecation(\n \"`trainer.resume_from_checkpoint` is deprecated in v1.5 and will be removed in v2.0.\"\n \" Specify the fit checkpoint path with `trainer.fit(ckpt_path=)` instead.\",\n stacklevel=5,\n )\n\n return resume_from_checkpoint\n\n @property\n def ckpt_path(self) -> Optional[str]:\n \"\"\"Set to the path/URL of a checkpoint loaded via :meth:`~pytorch_lightning.trainer.trainer.Trainer.fit`,\n :meth:`~pytorch_lightning.trainer.trainer.Trainer.validate`,\n :meth:`~pytorch_lightning.trainer.trainer.Trainer.test`, or\n :meth:`~pytorch_lightning.trainer.trainer.Trainer.predict`. ``None`` otherwise.\"\"\"\n return self._ckpt_path\n\n @property\n def validated_ckpt_path(self) -> Optional[str]:\n rank_zero_deprecation(\n \"The `Trainer.validated_ckpt_path` attribute was deprecated in v1.6 and will be removed in v1.8. The\"\n \" path of a checkpoint loaded via `Trainer.{fit,validate,test,predict}` should be accessed via\"\n \" `Trainer.ckpt_path` instead.\",\n stacklevel=5,\n )\n return self._validated_ckpt_path\n\n @validated_ckpt_path.setter\n def validated_ckpt_path(self, ckpt_path: Optional[str]) -> None:\n rank_zero_deprecation(\n \"The `Trainer.validated_ckpt_path` attribute was deprecated in v1.6 and will be removed in v1.8. The\"\n \" path of a checkpoint loaded via `Trainer.{fit,validate,test,predict}` should be accessed via the\"\n \" read-only `Trainer.ckpt_path`.\",\n stacklevel=5,\n )\n self._validated_ckpt_path = ckpt_path\n\n @property\n def tested_ckpt_path(self) -> Optional[str]:\n rank_zero_deprecation(\n \"The `Trainer.tested_ckpt_path` attribute was deprecated in v1.6 and will be removed in v1.8. The\"\n \" path of a checkpoint loaded via `Trainer.{fit,validate,test,predict}` should be accessed via\"\n \" `Trainer.ckpt_path` instead.\",\n stacklevel=5,\n )\n return self._tested_ckpt_path\n\n @tested_ckpt_path.setter\n def tested_ckpt_path(self, ckpt_path: Optional[str]) -> None:\n rank_zero_deprecation(\n \"The `Trainer.tested_ckpt_path` attribute was deprecated in v1.6 and will be removed in v1.8. The\"\n \" path of a checkpoint loaded via `Trainer.{fit,validate,test,predict}` should be accessed via the\"\n \" read-only `Trainer.ckpt_path` instead.\",\n stacklevel=5,\n )\n self._tested_ckpt_path = ckpt_path\n\n @property\n def predicted_ckpt_path(self) -> Optional[str]:\n rank_zero_deprecation(\n \"The `Trainer.predicted_ckpt_path` attribute was deprecated in v1.6 and will be removed in v1.8. The\"\n \" path of a checkpoint loaded via `Trainer.{fit,validate,test,predict}` should be accessed via\"\n \" `Trainer.ckpt_path` instead.\",\n stacklevel=5,\n )\n return self._predicted_ckpt_path\n\n @predicted_ckpt_path.setter\n def predicted_ckpt_path(self, ckpt_path: Optional[str]) -> None:\n rank_zero_deprecation(\n \"The `Trainer.predicted_ckpt_path` attribute was deprecated in v1.6 and will be removed in v1.8. The\"\n \" path of a checkpoint loaded via `Trainer.{fit,validate,test,predict}` should be accessed via the\"\n \" read-only `Trainer.ckpt_path` instead.\",\n stacklevel=5,\n )\n self._predicted_ckpt_path = ckpt_path\n\n def save_checkpoint(\n self, filepath: _PATH, weights_only: bool = False, storage_options: Optional[Any] = None\n ) -> None:\n r\"\"\"\n Runs routine to create a checkpoint.\n\n Args:\n filepath: Path where checkpoint is saved.\n weights_only: If ``True``, will only save the model weights.\n storage_options: parameter for how to save to storage, passed to ``CheckpointIO`` plugin\n\n \"\"\"\n self._checkpoint_connector.save_checkpoint(filepath, weights_only=weights_only, storage_options=storage_options)\n\n \"\"\"\n Parsing properties\n \"\"\"\n\n @classmethod\n def default_attributes(cls) -> dict:\n init_signature = inspect.signature(cls)\n return {k: v.default for k, v in init_signature.parameters.items()}\n\n @classmethod\n def get_deprecated_arg_names(cls) -> List:\n \"\"\"Returns a list with deprecated Trainer arguments.\"\"\"\n depr_arg_names = []\n for name, val in cls.__dict__.items():\n if name.startswith(\"DEPRECATED\") and isinstance(val, (tuple, list)):\n depr_arg_names.extend(val)\n return depr_arg_names\n\n @classmethod\n def from_argparse_args(cls: Any, args: Union[Namespace, ArgumentParser], **kwargs) -> Any:\n return from_argparse_args(cls, args, **kwargs)\n\n @classmethod\n def parse_argparser(cls, arg_parser: Union[ArgumentParser, Namespace]) -> Namespace:\n return parse_argparser(cls, arg_parser)\n\n @classmethod\n def match_env_arguments(cls) -> Namespace:\n return parse_env_variables(cls)\n\n @classmethod\n def add_argparse_args(cls, parent_parser: ArgumentParser, **kwargs) -> ArgumentParser:\n return add_argparse_args(cls, parent_parser, **kwargs)\n\n \"\"\"\n State properties\n \"\"\"\n\n @property\n def interrupted(self) -> bool:\n return self.state.status == TrainerStatus.INTERRUPTED\n\n @property\n def training(self) -> bool:\n return self.state.stage == RunningStage.TRAINING\n\n @training.setter\n def training(self, val: bool) -> None:\n if val:\n self.state.stage = RunningStage.TRAINING\n elif self.training:\n self.state.stage = None\n\n @property\n def testing(self) -> bool:\n return self.state.stage == RunningStage.TESTING\n\n @testing.setter\n def testing(self, val: bool) -> None:\n if val:\n self.state.stage = RunningStage.TESTING\n elif self.testing:\n self.state.stage = None\n\n @property\n def predicting(self) -> bool:\n return self.state.stage == RunningStage.PREDICTING\n\n @predicting.setter\n def predicting(self, val: bool) -> None:\n if val:\n self.state.stage = RunningStage.PREDICTING\n elif self.predicting:\n self.state.stage = None\n\n @property\n def tuning(self) -> bool:\n return self.state.stage == RunningStage.TUNING\n\n @tuning.setter\n def tuning(self, val: bool) -> None:\n if val:\n self.state.stage = RunningStage.TUNING\n elif self.tuning:\n self.state.stage = None\n\n @property\n def validating(self) -> bool:\n return self.state.stage == RunningStage.VALIDATING\n\n @validating.setter\n def validating(self, val: bool) -> None:\n if val:\n self.state.stage = RunningStage.VALIDATING\n elif self.validating:\n self.state.stage = None\n\n @property\n def evaluating(self) -> bool:\n return self.state.stage and self.state.stage.evaluating\n\n @property\n def sanity_checking(self) -> bool:\n return self.state.stage == RunningStage.SANITY_CHECKING\n\n @sanity_checking.setter\n def sanity_checking(self, val: bool) -> None:\n if val:\n self.state.stage = RunningStage.SANITY_CHECKING\n elif self.sanity_checking:\n self.state.stage = None\n\n \"\"\"\n Loop properties\n \"\"\"\n\n @property\n def global_step(self) -> int:\n \"\"\"The number of optimizer steps taken (does not reset each epoch).\n\n This includes multiple optimizers and TBPTT steps (if enabled).\n \"\"\"\n return self.fit_loop.epoch_loop.global_step\n\n @property\n def current_epoch(self) -> int:\n \"\"\"The current epoch, updated after the epoch end hooks are run.\"\"\"\n return self.fit_loop.epoch_progress.current.completed\n\n @property\n def max_epochs(self) -> int:\n return self.fit_loop.max_epochs\n\n @property\n def min_epochs(self) -> int:\n return self.fit_loop.min_epochs\n\n @property\n def max_steps(self) -> int:\n return self.fit_loop.max_steps\n\n @property\n def min_steps(self) -> Optional[int]:\n return self.fit_loop.min_steps\n\n @property\n def is_last_batch(self) -> bool:\n return self.fit_loop.epoch_loop.batch_progress.is_last_batch\n\n @property\n def fit_loop(self) -> FitLoop:\n return self._fit_loop\n\n @fit_loop.setter\n def fit_loop(self, loop: FitLoop):\n \"\"\"Attach a custom fit loop to this Trainer.\n\n It will run with\n :meth:`~pytorch_lightning.trainer.trainer.Trainer.fit`.\n \"\"\"\n loop.trainer = self\n self._fit_loop = loop\n\n @property\n def validate_loop(self) -> EvaluationLoop:\n return self._validate_loop\n\n @validate_loop.setter\n def validate_loop(self, loop: EvaluationLoop):\n \"\"\"Attach a custom validation loop to this Trainer.\n\n It will run with\n :meth:`~pytorch_lightning.trainer.trainer.Trainer.validate`. Note that this loop is different from the one\n running during training inside the :meth:`pytorch_lightning.trainer.trainer.Trainer.fit` call.\n \"\"\"\n loop.trainer = self\n self._validate_loop = loop\n\n @property\n def test_loop(self) -> EvaluationLoop:\n return self._test_loop\n\n @test_loop.setter\n def test_loop(self, loop: EvaluationLoop):\n \"\"\"Attach a custom test loop to this Trainer.\n\n It will run with\n :meth:`~pytorch_lightning.trainer.trainer.Trainer.test`.\n \"\"\"\n loop.trainer = self\n self._test_loop = loop\n\n @property\n def predict_loop(self) -> PredictionLoop:\n return self._predict_loop\n\n @predict_loop.setter\n def predict_loop(self, loop: PredictionLoop):\n \"\"\"Attach a custom prediction loop to this Trainer.\n\n It will run with\n :meth:`~pytorch_lightning.trainer.trainer.Trainer.predict`.\n \"\"\"\n loop.trainer = self\n self._predict_loop = loop\n\n @property\n def verbose_evaluate(self) -> bool:\n rank_zero_deprecation(\n \"The `Trainer.verbose_evaluate` property has been deprecated and will be removed in v1.8. The current value\"\n \" returned is the union of the validate and test loop values. You can choose which one to access with\"\n \" `trainer.{validate,test}_loop.verbose`.\",\n stacklevel=5,\n )\n return self.validate_loop.verbose or self.test_loop.verbose\n\n @verbose_evaluate.setter\n def verbose_evaluate(self, verbose: bool) -> None:\n rank_zero_deprecation(\n \"The `Trainer.verbose_evaluate` property has been deprecated and will be removed in v1.8. This will set\"\n \" the value for both trainer.{validate,test}_loop.verbose`.\",\n stacklevel=5,\n )\n self.validate_loop.verbose = verbose\n self.test_loop.verbose = verbose\n\n @property\n def _evaluation_loop(self) -> EvaluationLoop:\n if self.state.fn in (TrainerFn.FITTING, TrainerFn.TUNING):\n return self.fit_loop.epoch_loop.val_loop\n if self.state.fn == TrainerFn.VALIDATING:\n return self.validate_loop\n if self.state.fn == TrainerFn.TESTING:\n return self.test_loop\n raise RuntimeError(\"The `Trainer._evaluation_loop` property isn't defined. Accessed outside of scope\")\n\n @property\n def _active_loop(self) -> Optional[Union[FitLoop, EvaluationLoop, PredictionLoop]]:\n if self.training:\n return self.fit_loop\n if self.sanity_checking or self.evaluating:\n return self._evaluation_loop\n if self.predicting:\n return self.predict_loop\n\n \"\"\"\n Logging properties\n \"\"\"\n\n @property\n def logger(self) -> Optional[LightningLoggerBase]:\n if len(self.loggers) == 0:\n return None\n if len(self.loggers) == 1:\n return self.loggers[0]\n else:\n rank_zero_warn(\n \"Using trainer.logger when Trainer is configured to use multiple loggers.\"\n \" This behavior will change in v1.8 when LoggerCollection is removed, and\"\n \" trainer.logger will return the first logger in trainer.loggers\"\n )\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n return LoggerCollection(self.loggers)\n\n @logger.setter\n def logger(self, logger: Optional[LightningLoggerBase]) -> None:\n if not logger:\n self.loggers = []\n elif isinstance(logger, LoggerCollection):\n self.loggers = list(logger)\n else:\n self.loggers = [logger]\n\n @property\n def loggers(self) -> List[LightningLoggerBase]:\n return self._loggers\n\n @loggers.setter\n def loggers(self, loggers: Optional[List[LightningLoggerBase]]) -> None:\n self._loggers = loggers if loggers else []\n\n @property\n def callback_metrics(self) -> dict:\n return self._logger_connector.callback_metrics\n\n @property\n def logged_metrics(self) -> dict:\n return self._logger_connector.logged_metrics\n\n @property\n def progress_bar_metrics(self) -> dict:\n return self._logger_connector.progress_bar_metrics\n\n @property\n def _results(self) -> Optional[_ResultCollection]:\n active_loop = self._active_loop\n if active_loop is not None:\n return active_loop._results\n\n def _exit_gracefully_on_signal(self) -> None:\n if not _fault_tolerant_training() or not self._should_terminate_gracefully():\n return\n raise ExitGracefullyException(0)\n\n def _should_terminate_gracefully(self) -> bool:\n value = torch.tensor(int(self._terminate_gracefully), device=self.strategy.root_device)\n return self.strategy.reduce(value, reduce_op=\"sum\") > 0\n\n @property\n def weights_summary(self) -> Optional[str]:\n rank_zero_deprecation(\"`Trainer.weights_summary` is deprecated in v1.5 and will be removed in v1.7.\")\n return self._weights_summary\n\n @weights_summary.setter\n def weights_summary(self, val: Optional[str]) -> None:\n rank_zero_deprecation(\"Setting `Trainer.weights_summary` is deprecated in v1.5 and will be removed in v1.7.\")\n self._weights_summary = val\n\n \"\"\"\n Other\n \"\"\"\n\n @property\n def estimated_stepping_batches(self) -> Union[int, float]:\n r\"\"\"\n Estimated stepping batches for the complete training inferred from DataLoaders, gradient\n accumulation factor and distributed setup.\n\n Examples::\n\n def configure_optimizers(self):\n optimizer = ...\n scheduler = torch.optim.lr_scheduler.OneCycleLR(\n optimizer, max_lr=1e-3, total_steps=self.trainer.estimated_stepping_batches\n )\n return [optimizer], [scheduler]\n\n \"\"\"\n accumulation_scheduler = self.accumulation_scheduler\n\n if accumulation_scheduler.epochs != [0]:\n raise MisconfigurationException(\n \"Estimated stepping batches cannot be computed with different\"\n \" `accumulate_grad_batches` at different epochs.\"\n )\n\n # infinite training\n if self.max_epochs == -1 and self.max_steps == -1:\n return float(\"inf\")\n\n if self.train_dataloader is None:\n rank_zero_info(\"Loading `train_dataloader` to estimate number of stepping batches.\")\n self.reset_train_dataloader()\n\n total_batches = self.num_training_batches\n\n # iterable dataset\n if total_batches == float(\"inf\"):\n return self.max_steps\n\n self.accumulate_grad_batches = accumulation_scheduler.get_accumulate_grad_batches(self.current_epoch)\n effective_batch_size = self.accumulate_grad_batches\n max_estimated_steps = math.ceil(total_batches / effective_batch_size) * max(self.max_epochs, 1)\n\n max_estimated_steps = min(max_estimated_steps, self.max_steps) if self.max_steps != -1 else max_estimated_steps\n return max_estimated_steps\n\n @property\n def terminate_on_nan(self) -> bool:\n rank_zero_deprecation(\"`Trainer.terminate_on_nan` is deprecated in v1.5 and will be removed in 1.7.\")\n return self._terminate_on_nan\n\n @terminate_on_nan.setter\n def terminate_on_nan(self, val: bool) -> None:\n rank_zero_deprecation(\n f\"Setting `Trainer.terminate_on_nan = {val}` is deprecated in v1.5 and will be removed in 1.7.\"\n f\" Please set `Trainer(detect_anomaly={val})` instead.\"\n )\n self._terminate_on_nan = val # : 212\n\n\ndef _determine_batch_limits(batches: Optional[Union[int, float]], name: str) -> Union[int, float]:\n if batches is None:\n # batches is optional to know if the user passed a value so that we can show the above info messages only to the\n # users that set a value explicitly\n return 1.0\n\n # differentiating based on the type can be error-prone for users. show a message describing the chosen behaviour\n if isinstance(batches, int) and batches == 1:\n if name == \"limit_train_batches\":\n message = \"1 batch per epoch will be used.\"\n elif name == \"val_check_interval\":\n message = \"validation will run after every batch.\"\n else:\n message = \"1 batch will be used.\"\n rank_zero_info(f\"`Trainer({name}=1)` was configured so {message}\")\n elif isinstance(batches, float) and batches == 1.0:\n if name == \"limit_train_batches\":\n message = \"100% of the batches per epoch will be used.\"\n elif name == \"val_check_interval\":\n message = \"validation will run at the end of the training epoch.\"\n else:\n message = \"100% of the batches will be used.\"\n rank_zero_info(f\"`Trainer({name}=1.0)` was configured so {message}.\")\n\n if 0 <= batches <= 1:\n return batches\n if batches > 1 and batches % 1.0 == 0:\n return int(batches)\n raise MisconfigurationException(\n f\"You have passed invalid value {batches} for {name}, it has to be in [0.0, 1.0] or an int.\"\n )\n"
] | [
[
"torch.set_grad_enabled",
"torch.no_grad",
"torch.cuda.is_available",
"torch.autograd.set_detect_anomaly",
"torch._C._log_api_usage_once"
]
] |
saranyakrish14/glow | [
"3562fba6a77d2bb4aacf98a5bff5a737a93f6adc"
] | [
"torch_glow/tests/nodes/add_test.py"
] | [
"from __future__ import absolute_import, division, print_function, unicode_literals\n\nimport torch\nfrom tests import utils\n\n\nclass SimpleAddModule(torch.nn.Module):\n def __init__(self, inplace=False):\n super(SimpleAddModule, self).__init__()\n self.inplace = inplace\n\n def forward(self, a, b):\n if b.size() == torch.Size([]):\n return (a * a).add(b.item())\n if self.inplace:\n c = a.add_(b)\n return c.add_(c)\n else:\n c = a.add(b)\n return c.add(c)\n\n\nclass TestAdd(utils.TorchGlowTestCase):\n @utils.deterministic_expand(\n [\n lambda: (\"basic\", SimpleAddModule(), torch.randn(4), torch.randn(4)),\n lambda: (\"inplace\", SimpleAddModule(True), torch.randn(4), torch.randn(4)),\n lambda: (\n \"broadcast\",\n SimpleAddModule(),\n torch.randn(8, 3, 4, 2),\n torch.randn(4, 2),\n ),\n lambda: (\n \"broadcast\",\n SimpleAddModule(),\n torch.randn(8, 3, 4, 2),\n torch.randn(1, 2),\n ),\n lambda: (\n \"broadcast\",\n SimpleAddModule(),\n torch.randn(4, 2),\n torch.randn(8, 3, 4, 2),\n ),\n lambda: (\"float\", SimpleAddModule(), torch.randn(4), torch.tensor(1.2345)),\n lambda: (\n \"float_and_int\",\n SimpleAddModule(),\n torch.randn(4),\n torch.tensor(42),\n True,\n ),\n lambda: (\n \"int32\",\n SimpleAddModule(),\n torch.torch.randint(-10, 10, (2, 4), dtype=torch.int32),\n torch.torch.randint(-10, 10, (2, 4), dtype=torch.int32),\n ),\n lambda: (\n \"int64\",\n SimpleAddModule(),\n torch.torch.randint(-10, 10, (2, 4), dtype=torch.int64),\n torch.torch.randint(-10, 10, (2, 4), dtype=torch.int64),\n ),\n ]\n )\n def test_add(self, _, module, a, b, skip_to_glow=False):\n utils.run_comparison_tests(\n module,\n (a, b),\n fusible_ops={\"aten::add_\"} if module.inplace else {\"aten::add\"},\n )\n"
] | [
[
"torch.randn",
"torch.tensor",
"torch.Size",
"torch.torch.randint"
]
] |
kuangliu/pytorch-ssd | [
"02ed1cbe6962e791895ab1c455dc5ddfb87291b9"
] | [
"encoder.py"
] | [
"'''Encode target locations and labels.'''\nimport torch\n\nimport math\nimport itertools\n\nclass DataEncoder:\n def __init__(self):\n '''Compute default box sizes with scale and aspect transform.'''\n scale = 300.\n steps = [s / scale for s in (8, 16, 32, 64, 100, 300)]\n sizes = [s / scale for s in (30, 60, 111, 162, 213, 264, 315)]\n aspect_ratios = ((2,), (2,3), (2,3), (2,3), (2,), (2,))\n feature_map_sizes = (38, 19, 10, 5, 3, 1)\n\n num_layers = len(feature_map_sizes)\n\n boxes = []\n for i in range(num_layers):\n fmsize = feature_map_sizes[i]\n for h,w in itertools.product(range(fmsize), repeat=2):\n cx = (w + 0.5)*steps[i]\n cy = (h + 0.5)*steps[i]\n\n s = sizes[i]\n boxes.append((cx, cy, s, s))\n\n s = math.sqrt(sizes[i] * sizes[i+1])\n boxes.append((cx, cy, s, s))\n\n s = sizes[i]\n for ar in aspect_ratios[i]:\n boxes.append((cx, cy, s * math.sqrt(ar), s / math.sqrt(ar)))\n boxes.append((cx, cy, s / math.sqrt(ar), s * math.sqrt(ar)))\n\n self.default_boxes = torch.Tensor(boxes)\n\n def iou(self, box1, box2):\n '''Compute the intersection over union of two set of boxes, each box is [x1,y1,x2,y2].\n\n Args:\n box1: (tensor) bounding boxes, sized [N,4].\n box2: (tensor) bounding boxes, sized [M,4].\n\n Return:\n (tensor) iou, sized [N,M].\n '''\n N = box1.size(0)\n M = box2.size(0)\n\n lt = torch.max(\n box1[:,:2].unsqueeze(1).expand(N,M,2), # [N,2] -> [N,1,2] -> [N,M,2]\n box2[:,:2].unsqueeze(0).expand(N,M,2), # [M,2] -> [1,M,2] -> [N,M,2]\n )\n\n rb = torch.min(\n box1[:,2:].unsqueeze(1).expand(N,M,2), # [N,2] -> [N,1,2] -> [N,M,2]\n box2[:,2:].unsqueeze(0).expand(N,M,2), # [M,2] -> [1,M,2] -> [N,M,2]\n )\n\n wh = rb - lt # [N,M,2]\n wh[wh<0] = 0 # clip at 0\n inter = wh[:,:,0] * wh[:,:,1] # [N,M]\n\n area1 = (box1[:,2]-box1[:,0]) * (box1[:,3]-box1[:,1]) # [N,]\n area2 = (box2[:,2]-box2[:,0]) * (box2[:,3]-box2[:,1]) # [M,]\n area1 = area1.unsqueeze(1).expand_as(inter) # [N,] -> [N,1] -> [N,M]\n area2 = area2.unsqueeze(0).expand_as(inter) # [M,] -> [1,M] -> [N,M]\n\n iou = inter / (area1 + area2 - inter)\n return iou\n\n def encode(self, boxes, classes, threshold=0.5):\n '''Transform target bounding boxes and class labels to SSD boxes and classes.\n\n Match each object box to all the default boxes, pick the ones with the\n Jaccard-Index > 0.5:\n Jaccard(A,B) = AB / (A+B-AB)\n\n Args:\n boxes: (tensor) object bounding boxes (xmin,ymin,xmax,ymax) of a image, sized [#obj, 4].\n classes: (tensor) object class labels of a image, sized [#obj,].\n threshold: (float) Jaccard index threshold\n\n Returns:\n boxes: (tensor) bounding boxes, sized [#obj, 8732, 4].\n classes: (tensor) class labels, sized [8732,]\n '''\n default_boxes = self.default_boxes\n num_default_boxes = default_boxes.size(0)\n num_objs = boxes.size(0)\n\n iou = self.iou( # [#obj,8732]\n boxes,\n torch.cat([default_boxes[:,:2] - default_boxes[:,2:]/2,\n default_boxes[:,:2] + default_boxes[:,2:]/2], 1)\n )\n\n iou, max_idx = iou.max(0) # [1,8732]\n max_idx.squeeze_(0) # [8732,]\n iou.squeeze_(0) # [8732,]\n\n boxes = boxes[max_idx] # [8732,4]\n variances = [0.1, 0.2]\n cxcy = (boxes[:,:2] + boxes[:,2:])/2 - default_boxes[:,:2] # [8732,2]\n cxcy /= variances[0] * default_boxes[:,2:]\n wh = (boxes[:,2:] - boxes[:,:2]) / default_boxes[:,2:] # [8732,2]\n wh = torch.log(wh) / variances[1]\n loc = torch.cat([cxcy, wh], 1) # [8732,4]\n\n conf = 1 + classes[max_idx] # [8732,], background class = 0\n conf[iou<threshold] = 0 # background\n return loc, conf\n\n def nms(self, bboxes, scores, threshold=0.5, mode='union'):\n '''Non maximum suppression.\n\n Args:\n bboxes: (tensor) bounding boxes, sized [N,4].\n scores: (tensor) bbox scores, sized [N,].\n threshold: (float) overlap threshold.\n mode: (str) 'union' or 'min'.\n\n Returns:\n keep: (tensor) selected indices.\n\n Ref:\n https://github.com/rbgirshick/py-faster-rcnn/blob/master/lib/nms/py_cpu_nms.py\n '''\n x1 = bboxes[:,0]\n y1 = bboxes[:,1]\n x2 = bboxes[:,2]\n y2 = bboxes[:,3]\n\n areas = (x2-x1) * (y2-y1)\n _, order = scores.sort(0, descending=True)\n\n keep = []\n while order.numel() > 0:\n i = order[0]\n keep.append(i)\n\n if order.numel() == 1:\n break\n\n xx1 = x1[order[1:]].clamp(min=x1[i])\n yy1 = y1[order[1:]].clamp(min=y1[i])\n xx2 = x2[order[1:]].clamp(max=x2[i])\n yy2 = y2[order[1:]].clamp(max=y2[i])\n\n w = (xx2-xx1).clamp(min=0)\n h = (yy2-yy1).clamp(min=0)\n inter = w*h\n\n if mode == 'union':\n ovr = inter / (areas[i] + areas[order[1:]] - inter)\n elif mode == 'min':\n ovr = inter / areas[order[1:]].clamp(max=areas[i])\n else:\n raise TypeError('Unknown nms mode: %s.' % mode)\n\n ids = (ovr<=threshold).nonzero().squeeze()\n if ids.numel() == 0:\n break\n order = order[ids+1]\n return torch.LongTensor(keep)\n\n def decode(self, loc, conf):\n '''Transform predicted loc/conf back to real bbox locations and class labels.\n\n Args:\n loc: (tensor) predicted loc, sized [8732,4].\n conf: (tensor) predicted conf, sized [8732,21].\n\n Returns:\n boxes: (tensor) bbox locations, sized [#obj, 4].\n labels: (tensor) class labels, sized [#obj,1].\n '''\n variances = [0.1, 0.2]\n wh = torch.exp(loc[:,2:]*variances[1]) * self.default_boxes[:,2:]\n cxcy = loc[:,:2] * variances[0] * self.default_boxes[:,2:] + self.default_boxes[:,:2]\n boxes = torch.cat([cxcy-wh/2, cxcy+wh/2], 1) # [8732,4]\n\n max_conf, labels = conf.max(1) # [8732,1]\n ids = labels.squeeze(1).nonzero().squeeze(1) # [#boxes,]\n\n keep = self.nms(boxes[ids], max_conf[ids].squeeze(1))\n return boxes[ids][keep], labels[ids][keep], max_conf[ids][keep]\n"
] | [
[
"torch.exp",
"torch.log",
"torch.LongTensor",
"torch.cat",
"torch.Tensor"
]
] |
johnmgregoire/JCAPGeneratePrintCode | [
"afc1dbe6125d0024a46889011ab653ed24016fe4"
] | [
"platemapgenerator_calccompsforsingleplate.py"
] | [
"import time, copy, pickle\nimport os, os.path\nimport sys\nimport numpy, pylab\n\nsys.path.append('C:/Users/Gregoire/Documents/PythonCode/JCAP')\nfrom readplatemap import *\n\nmodelpath='C:/Users/Gregoire/Documents/CaltechWork/platemaps/Quaternarysingleplate/plate333_1map_full.txt'\nnewpath='C:/Users/Gregoire/Documents/CaltechWork/platemaps/Quaternarysingleplate/plate20intervwbin.txt'\n\nwritelines=[]\nf=open(modelpath, mode='r')\nls=f.readlines()[:2]\nwritelines+=[l.strip() for l in ls]\nf.close()\n\ndlist=readsingleplatemaptxt(modelpath, returnfiducials=False)\ndlistsrc=readplatemaptxt(codes=[0, 1, 2, 3])\n\nsmpsrc=numpy.array([d['Sample'] for d in dlistsrc])\ncodesrc=numpy.array([d['code'] for d in dlistsrc])\n\nintervs=20\ncomps=[[1.0*b/intervs, 1.0*c/intervs, 1.0*(intervs-a-b-c)/intervs, 1.0*a/intervs] for a in numpy.arange(0,intervs+1)[::-1] for b in numpy.arange(0,intervs+1-a) for c in numpy.arange(0, intervs+1-a-b)][::-1]\n\ndef genbinarycomps(intervs, elind1, elind2, ndim=4):\n aa=numpy.linspace(0.,1.,intervs+1)\n c=numpy.zeros((len(aa), ndim), dtype='float64')\n c[:, elind1]=aa\n c[:, elind2]=1.-aa\n return c\n\ncomps2=comps\ncodes=[0]*len(comps)\nbinintervs=5\nfor i, j in [(0, 1), (0, 2), (0, 3), (1, 2), (1, 3), (2, 3)]:\n comps2+=list(genbinarycomps(binintervs, i, j))+[numpy.zeros(4, dtype='float64')] #add 6 compositions in binary line and then zeros\n codes+=[4]*6+[1]\ncomps2+=[numpy.zeros(4, dtype='float64')]*6 #6 more zeros to round out the 1819 code0 samples in a standard platemap\ncodes+=[1]*6\ncomps2=[numpy.array(c) for c in comps2]\n\ncomps2pop=copy.copy(comps2)\ncodespop=copy.copy(codes)\n\nfor d in dlist:\n if d['code']==0:\n c=comps2pop.pop(0)\n cd=codespop.pop(0)\n for k, v in zip(['A', 'B', 'C', 'D'], c):\n d[k]=v\n d['code']=cd\n\nk_f=[\\\n('Sample','%04d'),\\\n('x','%.2f'),\\\n('y','%.2f'),\\\n('dx','%.2f'),\\\n('dx','%.2f'),\\\n('A','%.3f'),\\\n('B','%.3f'),\\\n('C','%.3f'),\\\n('D','%.3f'),\\\n('E','%.3f'),\\\n('F','%.3f'),\\\n('G','%.3f'),\\\n('H','%.3f'),\\\n('code','%d'),\\\n]\n\nwritelines+=[', '.join([f %d[k] for k, f in k_f]) for d in dlist]\n\nf=open(newpath, mode='w')\nf.write('\\n'.join(writelines))\nf.close()\n\nsys.path.append('C:/Users/Gregoire/Documents/PythonCode/ternaryplot')\nfrom myquaternaryutility import QuaternaryPlot\n\nfor d in dlist:\n c=numpy.array([d[el] for el in ['A', 'B', 'C', 'D']])\n if c.sum()>0:\n c/=c.sum()\n d['compositions']=c\n\ncarr=numpy.array([d['compositions'] for d in dlist])\nstpq=QuaternaryPlot(111)\nstpq.scatter(carr)\npylab.show()\n"
] | [
[
"numpy.array",
"numpy.arange",
"numpy.linspace",
"numpy.zeros"
]
] |
hfurkanbozkurt/syne-tune | [
"05ee2668f0155b40c3ee3b61e4b3d58f3f9f3c4f"
] | [
"syne_tune/optimizer/schedulers/searchers/bayesopt/utils/test_objects.py"
] | [
"# Copyright 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\").\n# You may not use this file except in compliance with the License.\n# A copy of the License is located at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# or in the \"license\" file accompanying this file. This file is distributed\n# on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either\n# express or implied. See the License for the specific language governing\n# permissions and limitations under the License.\n# Could eventually remove this code: Is this needed in unit tests?\n\n\"\"\"\nObject definitions that are used for testing.\n\"\"\"\n\nfrom typing import Iterator, Tuple, Dict, List, Optional, Union\nimport numpy as np\n\nfrom syne_tune.optimizer.schedulers.searchers.bayesopt.datatypes.common \\\n import Hyperparameter, Configuration, dictionarize_objective\nfrom syne_tune.config_space import Categorical, loguniform, randint, \\\n choice, uniform\nfrom syne_tune.optimizer.schedulers.searchers.bayesopt.datatypes.hp_ranges \\\n import HyperparameterRanges\nfrom syne_tune.optimizer.schedulers.searchers.bayesopt.datatypes.hp_ranges_factory \\\n import make_hyperparameter_ranges\nfrom syne_tune.optimizer.schedulers.searchers.bayesopt.datatypes.tuning_job_state \\\n import TuningJobState\nfrom syne_tune.optimizer.schedulers.searchers.bayesopt.datatypes.common import \\\n TrialEvaluations, PendingEvaluation\nfrom syne_tune.optimizer.schedulers.searchers.bayesopt.gpautograd.constants \\\n import MCMCConfig, OptimizationConfig\nfrom syne_tune.optimizer.schedulers.searchers.bayesopt.gpautograd.gp_regression \\\n import GaussianProcessRegression\nfrom syne_tune.optimizer.schedulers.searchers.bayesopt.gpautograd.gpr_mcmc \\\n import GPRegressionMCMC\nfrom syne_tune.optimizer.schedulers.searchers.bayesopt.gpautograd.kernel \\\n import Matern52, KernelFunction\nfrom syne_tune.optimizer.schedulers.searchers.bayesopt.gpautograd.warping \\\n import WarpedKernel, Warping\nfrom syne_tune.optimizer.schedulers.searchers.bayesopt.tuning_algorithms.base_classes \\\n import CandidateGenerator\nfrom syne_tune.optimizer.schedulers.searchers.bayesopt.tuning_algorithms.common \\\n import ExclusionList\n\n\ndef build_kernel(state: TuningJobState,\n do_warping: bool = False) -> KernelFunction:\n dims, warping_ranges = dimensionality_and_warping_ranges(state.hp_ranges)\n kernel = Matern52(dims, ARD=True)\n if do_warping:\n return WarpedKernel(\n kernel=kernel, warping=Warping(dims, warping_ranges))\n else:\n return kernel\n\n\ndef default_gpmodel(\n state: TuningJobState, random_seed: int,\n optimization_config: OptimizationConfig) -> GaussianProcessRegression:\n return GaussianProcessRegression(\n kernel=build_kernel(state),\n optimization_config=optimization_config,\n random_seed=random_seed\n )\n\n\ndef default_gpmodel_mcmc(\n state: TuningJobState, random_seed: int,\n mcmc_config: MCMCConfig) -> GPRegressionMCMC:\n return GPRegressionMCMC(\n build_kernel=lambda: build_kernel(state),\n mcmc_config=mcmc_config,\n random_seed=random_seed\n )\n\n\ndef dimensionality_and_warping_ranges(hp_ranges: HyperparameterRanges) -> \\\n Tuple[int, Dict[int, Tuple[float, float]]]:\n lower_config = dict()\n upper_config = dict()\n for name, hp_range in hp_ranges.config_space.items():\n if not isinstance(hp_range, Categorical):\n lower_config[name] = hp_range.lower\n upper_config[name] = hp_range.upper\n else:\n lower_config[name] = hp_range.categories[0]\n upper_config[name] = hp_range.categories[0]\n lower_internal = hp_ranges.to_ndarray(lower_config)\n upper_internal = hp_ranges.to_ndarray(upper_config)\n dims = 0\n warping_ranges = dict()\n for name in hp_ranges.internal_keys:\n hp_range = hp_ranges.config_space[name]\n if not isinstance(hp_range, Categorical):\n _lower = lower_internal[dims]\n _upper = upper_internal[dims]\n if _upper > _lower: # exclude cases where max equal to min\n warping_ranges[dims] = (_lower, _upper)\n else:\n assert _lower == _upper\n dims += 1\n else:\n # For binary, we use a single dimension, not 2\n sz = len(hp_range.categories)\n if sz == 2:\n sz = 1\n dims += sz\n return dims, warping_ranges\n\n\nclass RepeatedCandidateGenerator(CandidateGenerator):\n \"\"\"Generates candidates from a fixed set. Used to test the deduplication logic.\"\"\"\n def __init__(self, n_unique_candidates: int):\n self.config_space = {\n 'a': uniform(0, n_unique_candidates),\n 'b': randint(0, n_unique_candidates),\n 'c': choice([f\"value_{i}\" for i in range(n_unique_candidates)])}\n self.hp_ranges = make_hyperparameter_ranges(self.config_space)\n self.all_unique_candidates = [\n {'a': 1.0*j, 'b': j, 'c': f\"value_{j}\"}\n for j in range(n_unique_candidates)]\n\n def generate_candidates(self) -> Iterator[Configuration]:\n i = 0\n while True:\n i += 1\n yield self.all_unique_candidates[i % len(self.all_unique_candidates)]\n\n\n# Example black box function, with adjustable location of global minimum.\n# Potentially could catch issues with optimizer, e.g. if the optimizer\n# ignoring somehow candidates on the edge of search space.\n# A simple quadratic function is used.\nclass Quadratic3d:\n def __init__(self, local_minima, active_metric, metric_names):\n # local_minima: point where local_minima is located\n self.local_minima = np.array(local_minima).astype('float')\n self.local_minima[0] = np.log10(self.local_minima[0])\n self.active_metric = active_metric\n self.metric_names = metric_names\n\n @property\n def search_space(self):\n config_space = {\n 'x': loguniform(1.0, 100.0),\n 'y': randint(0, 2),\n 'z': choice(['0.0', '1.0', '2.0'])}\n return make_hyperparameter_ranges(config_space)\n\n @property\n def f_min(self):\n return 0.0\n\n def __call__(self, candidate):\n p = np.array([float(hp) for hp in candidate])\n p[0] = np.log10(p[0])\n return dictionarize_objective(np.sum((self.local_minima - p) ** 2))\n\n\ndef tuples_to_configs(config_tpls: List[Tuple[Hyperparameter, ...]],\n hp_ranges: HyperparameterRanges) -> List[Configuration]:\n \"\"\"\n Many unit tests write configs as tuples.\n\n \"\"\"\n return [hp_ranges.tuple_to_config(x) for x in config_tpls]\n\n\ndef create_exclusion_set(\n candidates_tpl, hp_ranges: HyperparameterRanges,\n is_dict: bool = False) -> ExclusionList:\n \"\"\"\n Creates exclusion list from set of tuples.\n\n \"\"\"\n if not is_dict:\n candidates_tpl = tuples_to_configs(candidates_tpl, hp_ranges)\n config_for_trial = {\n str(trial_id): config for trial_id, config in enumerate(candidates_tpl)}\n state = TuningJobState(\n hp_ranges=hp_ranges,\n config_for_trial=config_for_trial,\n trials_evaluations=[],\n failed_trials=[str(x) for x in range(len(candidates_tpl))])\n return ExclusionList(state)\n\n\nTupleOrDict = Union[tuple, dict]\n\n\ndef create_tuning_job_state(\n hp_ranges: HyperparameterRanges, cand_tuples: List[TupleOrDict],\n metrics: List[Dict],\n pending_tuples: Optional[List[TupleOrDict]] = None,\n failed_tuples: Optional[List[TupleOrDict]] = None) -> TuningJobState:\n \"\"\"\n Builds `TuningJobState` from basics, where configs are given as tuples or\n as dicts.\n\n NOTE: We assume that all configs in the different lists are different!\n\n \"\"\"\n if cand_tuples and isinstance(cand_tuples[0], tuple):\n configs = tuples_to_configs(cand_tuples, hp_ranges)\n else:\n configs = cand_tuples\n trials_evaluations = [TrialEvaluations(trial_id=str(trial_id), metrics=y)\n for trial_id, y in enumerate(metrics)]\n pending_evaluations = None\n if pending_tuples is not None:\n sz = len(configs)\n extra = len(pending_tuples)\n if pending_tuples and isinstance(pending_tuples[0], tuple):\n extra_configs = tuples_to_configs(pending_tuples, hp_ranges)\n else:\n extra_configs = pending_tuples\n configs.extend(extra_configs)\n pending_evaluations = [PendingEvaluation(trial_id=str(trial_id))\n for trial_id in range(sz, sz + extra)]\n failed_trials = None\n if failed_tuples is not None:\n sz = len(configs)\n extra = len(failed_tuples)\n if failed_tuples and isinstance(failed_tuples[0], tuple):\n extra_configs = tuples_to_configs(failed_tuples, hp_ranges)\n else:\n extra_configs = failed_tuples\n configs.extend(extra_configs)\n failed_trials = [str(x) for x in range(sz, sz + extra)]\n\n config_for_trial = {\n str(trial_id): config for trial_id, config in enumerate(configs)}\n return TuningJobState(\n hp_ranges=hp_ranges,\n config_for_trial=config_for_trial,\n trials_evaluations=trials_evaluations,\n failed_trials=failed_trials,\n pending_evaluations=pending_evaluations)\n"
] | [
[
"numpy.array",
"numpy.sum",
"numpy.log10"
]
] |
uhrwecker/GRDonuts | [
"3087aeb5c169251bdb711b425dcc3040ff962da7"
] | [
"util/utility.py"
] | [
"import numpy as np\n\nclass UtilInverse():\n def __init__(self, verbose=True):\n self.verbose = verbose\n\n def find_nearest_ind(self, array, value):\n index = []\n for ind in range(len(array)-1):\n if array[ind] < value and array[ind+1] > value:\n index.append(ind)\n if array[ind] > value and array[ind+1] < value:\n index.append(ind)\n return index\n\n def sort_array_by_column(self, array, order=['f0']):\n bits = 'i8'+',i8'*(len(array[0])-1)\n array.view(bits).sort(order=order, axis=0)\n return array\n\n \n\nclass UtilStability():\n def __init__(self, verbose=True):\n self.verbose = verbose\n\n def retrieve_extrema(self, w, r):\n self.check_for_stable_point(w, self.verbose)\n \n min_mask = np.r_[True, w[1:] < w[:-1]] & np.r_[w[:-1] < w[1:], True]\n max_mask = np.r_[True, w[1:] > w[:-1]] & np.r_[w[:-1] > w[1:], True]\n\n w_min = w[min_mask]\n r_min = r[min_mask]\n w_max = w[max_mask]\n r_max = r[max_mask]\n\n try:\n\n if w_min[0] == w[0]:\n w_min = np.delete(w_min, 0)\n r_min = np.delete(r_min, 0)\n\n if w_max[-1] == w[-1]:\n w_max = np.delete(w_max, -1)\n r_max = np.delete(r_max, -1)\n\n if self.verbose:\n print('Simple extremum analysis: ')\n print('- W has maximum/a at w='+str(w_max.tolist()))\n print('- W has minimum/a at w='+str(w_min.tolist()))\n\n return w_min.tolist(), w_max.tolist(), r_min.tolist(), r_max.tolist()\n except:\n return [0], [0], [0], [0]\n\n def check_for_stable_point(self, w, exit_if_not_stable=False):\n '''\n Checks if array has at least one minimum and\n its maximum is only local\n '''\n min_mask = np.r_[True, w[1:] < w[:-1]] & np.r_[w[:-1] < w[1:], True]\n max_mask = np.r_[True, w[1:] > w[:-1]] & np.r_[w[:-1] > w[1:], True]\n\n\n w_min = w[min_mask]\n w_max = w[max_mask]\n\n## if w_max[0] == w[0] or w_max[0] == w[1]:\n## '''\n## The potentianl comes from +inf, so its not a stable point.\n## '''\n## raise ValueError()\n\n if len(w_min) < 2 and len(w_max) < 2:\n '''\n The function is monotonically. There is no stable point.\n '''\n self._error_monotonically(exit_if_not_stable)\n\n elif len(w_min) < 1 or len(w_max) < 1:\n '''\n The function has either a local maximum OR local minimum, but not\n both, thus is not stable\n '''\n self._error_only_one_extremum(exit_if_not_stable)\n\n elif w_max[0] > w_max[1]:\n '''\n The potential is not closed, there is no Roche limit.\n Matter will extend into infitiy.\n '''\n self._error_no_roche_limit(exit_if_not_stable)\n\n elif self.verbose and len(w_min) > 1 and len(w_max) > 1:\n print('Potential is possibly stable')\n\n return 0\n\n def closure_rating_function(self, w, r):\n wmin, wmax, rmin, rmax = self.retrieve_extrema(w, r)\n\n int_l = np.where(r == rmax[0])[0][0]\n int_r = np.where(w > wmax[0])[0][0]\n\n area_func = abs(w[int_l:int_r] - wmax[-1])\n\n area = np.trapz(area_func)\n\n return area\n \n\n def _error_monotonically(self, flag):\n if flag:\n raise ValueError('Potential not closed, potential is monotonically.')\n else:\n if self.verbose:\n print('WARNING: Potential not closed, potential is monotonically.')\n \n def _error_only_one_extremum(self, flag):\n if flag:\n raise ValueError('Potential not closed, only has one extremum.')\n else:\n if self.verbose:\n print('WARNING: Potential not closed, only has one extremum.')\n\n\n def _error_no_roche_limit(self, flag):\n if flag:\n raise ValueError('Potential is not closed, matter extends into infinity.')\n else:\n if self.verbose:\n print('WARNING: Potential not close, no Roche limit.')\n"
] | [
[
"numpy.trapz",
"numpy.where",
"numpy.delete"
]
] |
blokhinnv/dgl | [
"bcf92f6c21afd4ad48a86d2ee543386099190791"
] | [
"python/dgl/distributed/dist_graph.py"
] | [
"\"\"\"Define distributed graph.\"\"\"\n\nfrom collections.abc import MutableMapping\nfrom collections import namedtuple\n\nimport os\nimport numpy as np\n\nfrom ..heterograph import DGLHeteroGraph\nfrom ..convert import heterograph as dgl_heterograph\nfrom ..convert import graph as dgl_graph\nfrom ..transform import compact_graphs\nfrom .. import heterograph_index\nfrom .. import backend as F\nfrom ..base import NID, EID, NTYPE, ETYPE, ALL, is_all\nfrom .kvstore import KVServer, get_kvstore\nfrom .._ffi.ndarray import empty_shared_mem\nfrom ..frame import infer_scheme\nfrom .partition import load_partition, load_partition_book\nfrom .graph_partition_book import PartitionPolicy, get_shared_mem_partition_book\nfrom .graph_partition_book import HeteroDataName, parse_hetero_data_name\nfrom .graph_partition_book import NodePartitionPolicy, EdgePartitionPolicy\nfrom .shared_mem_utils import _to_shared_mem, _get_ndata_path, _get_edata_path, DTYPE_DICT\nfrom . import rpc\nfrom . import role\nfrom .server_state import ServerState\nfrom .rpc_server import start_server\nfrom .graph_services import find_edges as dist_find_edges\nfrom .graph_services import out_degrees as dist_out_degrees\nfrom .graph_services import in_degrees as dist_in_degrees\nfrom .dist_tensor import DistTensor\n\nINIT_GRAPH = 800001\n\nclass InitGraphRequest(rpc.Request):\n \"\"\" Init graph on the backup servers.\n\n When the backup server starts, they don't load the graph structure.\n This request tells the backup servers that they can map to the graph structure\n with shared memory.\n \"\"\"\n def __init__(self, graph_name):\n self._graph_name = graph_name\n\n def __getstate__(self):\n return self._graph_name\n\n def __setstate__(self, state):\n self._graph_name = state\n\n def process_request(self, server_state):\n if server_state.graph is None:\n server_state.graph = _get_graph_from_shared_mem(self._graph_name)\n return InitGraphResponse(self._graph_name)\n\nclass InitGraphResponse(rpc.Response):\n \"\"\" Ack the init graph request\n \"\"\"\n def __init__(self, graph_name):\n self._graph_name = graph_name\n\n def __getstate__(self):\n return self._graph_name\n\n def __setstate__(self, state):\n self._graph_name = state\n\ndef _copy_graph_to_shared_mem(g, graph_name, graph_format):\n new_g = g.shared_memory(graph_name, formats=graph_format)\n # We should share the node/edge data to the client explicitly instead of putting them\n # in the KVStore because some of the node/edge data may be duplicated.\n new_g.ndata['inner_node'] = _to_shared_mem(g.ndata['inner_node'],\n _get_ndata_path(graph_name, 'inner_node'))\n new_g.ndata[NID] = _to_shared_mem(g.ndata[NID], _get_ndata_path(graph_name, NID))\n\n new_g.edata['inner_edge'] = _to_shared_mem(g.edata['inner_edge'],\n _get_edata_path(graph_name, 'inner_edge'))\n new_g.edata[EID] = _to_shared_mem(g.edata[EID], _get_edata_path(graph_name, EID))\n new_g.edata[ETYPE] = _to_shared_mem(g.edata[ETYPE], _get_edata_path(graph_name, ETYPE))\n return new_g\n\nFIELD_DICT = {'inner_node': F.int32, # A flag indicates whether the node is inside a partition.\n 'inner_edge': F.int32, # A flag indicates whether the edge is inside a partition.\n NID: F.int64,\n EID: F.int64,\n NTYPE: F.int32,\n ETYPE: F.int32}\n\ndef _get_shared_mem_ndata(g, graph_name, name):\n ''' Get shared-memory node data from DistGraph server.\n\n This is called by the DistGraph client to access the node data in the DistGraph server\n with shared memory.\n '''\n shape = (g.number_of_nodes(),)\n dtype = FIELD_DICT[name]\n dtype = DTYPE_DICT[dtype]\n data = empty_shared_mem(_get_ndata_path(graph_name, name), False, shape, dtype)\n dlpack = data.to_dlpack()\n return F.zerocopy_from_dlpack(dlpack)\n\ndef _get_shared_mem_edata(g, graph_name, name):\n ''' Get shared-memory edge data from DistGraph server.\n\n This is called by the DistGraph client to access the edge data in the DistGraph server\n with shared memory.\n '''\n shape = (g.number_of_edges(),)\n dtype = FIELD_DICT[name]\n dtype = DTYPE_DICT[dtype]\n data = empty_shared_mem(_get_edata_path(graph_name, name), False, shape, dtype)\n dlpack = data.to_dlpack()\n return F.zerocopy_from_dlpack(dlpack)\n\ndef _get_graph_from_shared_mem(graph_name):\n ''' Get the graph from the DistGraph server.\n\n The DistGraph server puts the graph structure of the local partition in the shared memory.\n The client can access the graph structure and some metadata on nodes and edges directly\n through shared memory to reduce the overhead of data access.\n '''\n g, ntypes, etypes = heterograph_index.create_heterograph_from_shared_memory(graph_name)\n if g is None:\n return None\n g = DGLHeteroGraph(g, ntypes, etypes)\n\n g.ndata['inner_node'] = _get_shared_mem_ndata(g, graph_name, 'inner_node')\n g.ndata[NID] = _get_shared_mem_ndata(g, graph_name, NID)\n\n g.edata['inner_edge'] = _get_shared_mem_edata(g, graph_name, 'inner_edge')\n g.edata[EID] = _get_shared_mem_edata(g, graph_name, EID)\n g.edata[ETYPE] = _get_shared_mem_edata(g, graph_name, ETYPE)\n return g\n\nNodeSpace = namedtuple('NodeSpace', ['data'])\nEdgeSpace = namedtuple('EdgeSpace', ['data'])\n\nclass HeteroNodeView(object):\n \"\"\"A NodeView class to act as G.nodes for a DistGraph.\"\"\"\n __slots__ = ['_graph']\n\n def __init__(self, graph):\n self._graph = graph\n\n def __getitem__(self, key):\n assert isinstance(key, str)\n return NodeSpace(data=NodeDataView(self._graph, key))\n\nclass HeteroEdgeView(object):\n \"\"\"A NodeView class to act as G.nodes for a DistGraph.\"\"\"\n __slots__ = ['_graph']\n\n def __init__(self, graph):\n self._graph = graph\n\n def __getitem__(self, key):\n assert isinstance(key, str)\n return EdgeSpace(data=EdgeDataView(self._graph, key))\n\nclass NodeDataView(MutableMapping):\n \"\"\"The data view class when dist_graph.ndata[...].data is called.\n \"\"\"\n __slots__ = ['_graph', '_data']\n\n def __init__(self, g, ntype=None):\n self._graph = g\n # When this is created, the server may already load node data. We need to\n # initialize the node data in advance.\n names = g._get_ndata_names(ntype)\n if ntype is None:\n self._data = g._ndata_store\n else:\n if ntype in g._ndata_store:\n self._data = g._ndata_store[ntype]\n else:\n self._data = {}\n g._ndata_store[ntype] = self._data\n for name in names:\n assert name.is_node()\n policy = PartitionPolicy(name.policy_str, g.get_partition_book())\n dtype, shape, _ = g._client.get_data_meta(str(name))\n # We create a wrapper on the existing tensor in the kvstore.\n self._data[name.get_name()] = DistTensor(shape, dtype, name.get_name(),\n part_policy=policy)\n\n def _get_names(self):\n return list(self._data.keys())\n\n def __getitem__(self, key):\n return self._data[key]\n\n def __setitem__(self, key, val):\n self._data[key] = val\n\n def __delitem__(self, key):\n del self._data[key]\n\n def __len__(self):\n # The number of node data may change. Let's count it every time we need them.\n # It's not called frequently. It should be fine.\n return len(self._data)\n\n def __iter__(self):\n return iter(self._data)\n\n def __repr__(self):\n reprs = {}\n for name in self._data:\n dtype = F.dtype(self._data[name])\n shape = F.shape(self._data[name])\n reprs[name] = 'DistTensor(shape={}, dtype={})'.format(str(shape), str(dtype))\n return repr(reprs)\n\nclass EdgeDataView(MutableMapping):\n \"\"\"The data view class when G.edges[...].data is called.\n \"\"\"\n __slots__ = ['_graph', '_data']\n\n def __init__(self, g, etype=None):\n self._graph = g\n # When this is created, the server may already load edge data. We need to\n # initialize the edge data in advance.\n names = g._get_edata_names(etype)\n if etype is None:\n self._data = g._edata_store\n else:\n if etype in g._edata_store:\n self._data = g._edata_store[etype]\n else:\n self._data = {}\n g._edata_store[etype] = self._data\n for name in names:\n assert name.is_edge()\n policy = PartitionPolicy(name.policy_str, g.get_partition_book())\n dtype, shape, _ = g._client.get_data_meta(str(name))\n # We create a wrapper on the existing tensor in the kvstore.\n self._data[name.get_name()] = DistTensor(shape, dtype, name.get_name(),\n part_policy=policy)\n\n def _get_names(self):\n return list(self._data.keys())\n\n def __getitem__(self, key):\n return self._data[key]\n\n def __setitem__(self, key, val):\n self._data[key] = val\n\n def __delitem__(self, key):\n del self._data[key]\n\n def __len__(self):\n # The number of edge data may change. Let's count it every time we need them.\n # It's not called frequently. It should be fine.\n return len(self._data)\n\n def __iter__(self):\n return iter(self._data)\n\n def __repr__(self):\n reprs = {}\n for name in self._data:\n dtype = F.dtype(self._data[name])\n shape = F.shape(self._data[name])\n reprs[name] = 'DistTensor(shape={}, dtype={})'.format(str(shape), str(dtype))\n return repr(reprs)\n\n\nclass DistGraphServer(KVServer):\n ''' The DistGraph server.\n\n This DistGraph server loads the graph data and sets up a service so that trainers and\n samplers can read data of a graph partition (graph structure, node data and edge data)\n from remote machines. A server is responsible for one graph partition.\n\n Currently, each machine runs only one main server with a set of backup servers to handle\n clients' requests. The main server and the backup servers all handle the requests for the same\n graph partition. They all share the partition data (graph structure and node/edge data) with\n shared memory.\n\n By default, the partition data is shared with the DistGraph clients that run on\n the same machine. However, a user can disable shared memory option. This is useful for the case\n that a user wants to run the server and the client on different machines.\n\n Parameters\n ----------\n server_id : int\n The server ID (start from 0).\n ip_config : str\n Path of IP configuration file.\n num_servers : int\n Server count on each machine.\n num_clients : int\n Total number of client nodes.\n part_config : string\n The path of the config file generated by the partition tool.\n disable_shared_mem : bool\n Disable shared memory.\n graph_format : str or list of str\n The graph formats.\n '''\n def __init__(self, server_id, ip_config, num_servers,\n num_clients, part_config, disable_shared_mem=False,\n graph_format=('csc', 'coo')):\n super(DistGraphServer, self).__init__(server_id=server_id,\n ip_config=ip_config,\n num_servers=num_servers,\n num_clients=num_clients)\n self.ip_config = ip_config\n self.num_servers = num_servers\n # Load graph partition data.\n if self.is_backup_server():\n # The backup server doesn't load the graph partition. It'll initialized afterwards.\n self.gpb, graph_name, ntypes, etypes = load_partition_book(part_config, self.part_id)\n self.client_g = None\n else:\n self.client_g, node_feats, edge_feats, self.gpb, graph_name, \\\n ntypes, etypes = load_partition(part_config, self.part_id)\n print('load ' + graph_name)\n # Create the graph formats specified the users.\n self.client_g = self.client_g.formats(graph_format)\n self.client_g.create_formats_()\n if not disable_shared_mem:\n self.client_g = _copy_graph_to_shared_mem(self.client_g, graph_name, graph_format)\n\n if not disable_shared_mem:\n self.gpb.shared_memory(graph_name)\n assert self.gpb.partid == self.part_id\n for ntype in ntypes:\n node_name = HeteroDataName(True, ntype, None)\n self.add_part_policy(PartitionPolicy(node_name.policy_str, self.gpb))\n for etype in etypes:\n edge_name = HeteroDataName(False, etype, None)\n self.add_part_policy(PartitionPolicy(edge_name.policy_str, self.gpb))\n\n if not self.is_backup_server():\n for name in node_feats:\n # The feature name has the following format: node_type + \"/\" + feature_name to avoid\n # feature name collision for different node types.\n ntype, feat_name = name.split('/')\n data_name = HeteroDataName(True, ntype, feat_name)\n self.init_data(name=str(data_name), policy_str=data_name.policy_str,\n data_tensor=node_feats[name])\n for name in edge_feats:\n # The feature name has the following format: edge_type + \"/\" + feature_name to avoid\n # feature name collision for different edge types.\n etype, feat_name = name.split('/')\n data_name = HeteroDataName(False, etype, feat_name)\n self.init_data(name=str(data_name), policy_str=data_name.policy_str,\n data_tensor=edge_feats[name])\n\n def start(self):\n \"\"\" Start graph store server.\n \"\"\"\n # start server\n server_state = ServerState(kv_store=self, local_g=self.client_g, partition_book=self.gpb)\n print('start graph service on server {} for part {}'.format(self.server_id, self.part_id))\n start_server(server_id=self.server_id,\n ip_config=self.ip_config,\n num_servers=self.num_servers,\n num_clients=self.num_clients, server_state=server_state)\n\nclass DistGraph:\n '''The class for accessing a distributed graph.\n\n This class provides a subset of DGLGraph APIs for accessing partitioned graph data in\n distributed GNN training and inference. Thus, its main use case is to work with\n distributed sampling APIs to generate mini-batches and perform forward and\n backward computation on the mini-batches.\n\n The class can run in two modes: the standalone mode and the distributed mode.\n\n * When a user runs the training script normally, ``DistGraph`` will be in the standalone mode.\n In this mode, the input data must be constructed by\n :py:meth:`~dgl.distributed.partition.partition_graph` with only one partition. This mode is\n used for testing and debugging purpose. In this mode, users have to provide ``part_config``\n so that ``DistGraph`` can load the input graph.\n * When a user runs the training script with the distributed launch script, ``DistGraph`` will\n be set into the distributed mode. This is used for actual distributed training. All data of\n partitions are loaded by the ``DistGraph`` servers, which are created by DGL's launch script.\n ``DistGraph`` connects with the servers to access the partitioned graph data.\n\n Currently, the ``DistGraph`` servers and clients run on the same set of machines\n in the distributed mode. ``DistGraph`` uses shared-memory to access the partition data\n in the local machine. This gives the best performance for distributed training\n\n Users may want to run ``DistGraph`` servers and clients on separate sets of machines.\n In this case, a user may want to disable shared memory by passing\n ``disable_shared_mem=False`` when creating ``DistGraphServer``. When shared memory is disabled,\n a user has to pass a partition book.\n\n Parameters\n ----------\n graph_name : str\n The name of the graph. This name has to be the same as the one used for\n partitioning a graph in :py:meth:`dgl.distributed.partition.partition_graph`.\n gpb : GraphPartitionBook, optional\n The partition book object. Normally, users do not need to provide the partition book.\n This argument is necessary only when users want to run server process and trainer\n processes on different machines.\n part_config : str, optional\n The path of partition configuration file generated by\n :py:meth:`dgl.distributed.partition.partition_graph`. It's used in the standalone mode.\n\n Examples\n --------\n The example shows the creation of ``DistGraph`` in the standalone mode.\n\n >>> dgl.distributed.partition_graph(g, 'graph_name', 1, num_hops=1, part_method='metis',\n ... out_path='output/', reshuffle=True)\n >>> g = dgl.distributed.DistGraph('graph_name', part_config='output/graph_name.json')\n\n The example shows the creation of ``DistGraph`` in the distributed mode.\n\n >>> g = dgl.distributed.DistGraph('graph-name')\n\n The code below shows the mini-batch training using ``DistGraph``.\n\n >>> def sample(seeds):\n ... seeds = th.LongTensor(np.asarray(seeds))\n ... frontier = dgl.distributed.sample_neighbors(g, seeds, 10)\n ... return dgl.to_block(frontier, seeds)\n >>> dataloader = dgl.distributed.DistDataLoader(dataset=nodes, batch_size=1000,\n ... collate_fn=sample, shuffle=True)\n >>> for block in dataloader:\n ... feat = g.ndata['features'][block.srcdata[dgl.NID]]\n ... labels = g.ndata['labels'][block.dstdata[dgl.NID]]\n ... pred = model(block, feat)\n\n Note\n ----\n DGL's distributed training by default runs server processes and trainer processes on the same\n set of machines. If users need to run them on different sets of machines, it requires\n manually setting up servers and trainers. The setup is not fully tested yet.\n '''\n def __init__(self, graph_name, gpb=None, part_config=None):\n self.graph_name = graph_name\n self._gpb_input = gpb\n if os.environ.get('DGL_DIST_MODE', 'standalone') == 'standalone':\n assert part_config is not None, \\\n 'When running in the standalone model, the partition config file is required'\n self._client = get_kvstore()\n assert self._client is not None, \\\n 'Distributed module is not initialized. Please call dgl.distributed.initialize.'\n # Load graph partition data.\n g, node_feats, edge_feats, self._gpb, _, _, _ = load_partition(part_config, 0)\n assert self._gpb.num_partitions() == 1, \\\n 'The standalone mode can only work with the graph data with one partition'\n if self._gpb is None:\n self._gpb = gpb\n self._g = g\n for name in node_feats:\n # The feature name has the following format: node_type + \"/\" + feature_name.\n ntype, feat_name = name.split('/')\n self._client.add_data(str(HeteroDataName(True, ntype, feat_name)),\n node_feats[name],\n NodePartitionPolicy(self._gpb, ntype=ntype))\n for name in edge_feats:\n # The feature name has the following format: edge_type + \"/\" + feature_name.\n etype, feat_name = name.split('/')\n self._client.add_data(str(HeteroDataName(False, etype, feat_name)),\n edge_feats[name],\n EdgePartitionPolicy(self._gpb, etype=etype))\n self._client.map_shared_data(self._gpb)\n rpc.set_num_client(1)\n else:\n self._init()\n # Tell the backup servers to load the graph structure from shared memory.\n for server_id in range(self._client.num_servers):\n rpc.send_request(server_id, InitGraphRequest(graph_name))\n for server_id in range(self._client.num_servers):\n rpc.recv_response()\n self._client.barrier()\n\n self._ndata_store = {}\n self._edata_store = {}\n self._ndata = NodeDataView(self)\n self._edata = EdgeDataView(self)\n\n self._num_nodes = 0\n self._num_edges = 0\n for part_md in self._gpb.metadata():\n self._num_nodes += int(part_md['num_nodes'])\n self._num_edges += int(part_md['num_edges'])\n\n # When we store node/edge types in a list, they are stored in the order of type IDs.\n self._ntype_map = {ntype:i for i, ntype in enumerate(self.ntypes)}\n self._etype_map = {etype:i for i, etype in enumerate(self.etypes)}\n\n # Get canonical edge types.\n # TODO(zhengda) this requires the server to store the graph with coo format.\n eid = []\n for etype in self.etypes:\n type_eid = F.zeros((1,), F.int64, F.cpu())\n eid.append(self._gpb.map_to_homo_eid(type_eid, etype))\n eid = F.cat(eid, 0)\n src, dst = dist_find_edges(self, eid)\n src_tids, _ = self._gpb.map_to_per_ntype(src)\n dst_tids, _ = self._gpb.map_to_per_ntype(dst)\n self._canonical_etypes = []\n etype_ids = F.arange(0, len(self.etypes))\n for src_tid, etype_id, dst_tid in zip(src_tids, etype_ids, dst_tids):\n src_tid = F.as_scalar(src_tid)\n etype_id = F.as_scalar(etype_id)\n dst_tid = F.as_scalar(dst_tid)\n self._canonical_etypes.append((self.ntypes[src_tid], self.etypes[etype_id],\n self.ntypes[dst_tid]))\n self._etype2canonical = {}\n for src_type, etype, dst_type in self._canonical_etypes:\n if etype in self._etype2canonical:\n self._etype2canonical[etype] = ()\n else:\n self._etype2canonical[etype] = (src_type, etype, dst_type)\n\n def _init(self):\n self._client = get_kvstore()\n assert self._client is not None, \\\n 'Distributed module is not initialized. Please call dgl.distributed.initialize.'\n self._g = _get_graph_from_shared_mem(self.graph_name)\n self._gpb = get_shared_mem_partition_book(self.graph_name, self._g)\n if self._gpb is None:\n self._gpb = self._gpb_input\n self._client.map_shared_data(self._gpb)\n\n def __getstate__(self):\n return self.graph_name, self._gpb, self._canonical_etypes\n\n def __setstate__(self, state):\n self.graph_name, self._gpb_input, self._canonical_etypes = state\n self._init()\n\n self._etype2canonical = {}\n for src_type, etype, dst_type in self._canonical_etypes:\n if etype in self._etype2canonical:\n self._etype2canonical[etype] = ()\n else:\n self._etype2canonical[etype] = (src_type, etype, dst_type)\n self._ndata_store = {}\n self._edata_store = {}\n self._ndata = NodeDataView(self)\n self._edata = EdgeDataView(self)\n self._num_nodes = 0\n self._num_edges = 0\n for part_md in self._gpb.metadata():\n self._num_nodes += int(part_md['num_nodes'])\n self._num_edges += int(part_md['num_edges'])\n\n @property\n def local_partition(self):\n ''' Return the local partition on the client\n\n DistGraph provides a global view of the distributed graph. Internally,\n it may contains a partition of the graph if it is co-located with\n the server. When servers and clients run on separate sets of machines,\n this returns None.\n\n Returns\n -------\n DGLGraph\n The local partition\n '''\n return self._g\n\n @property\n def nodes(self):\n '''Return a node view\n '''\n return HeteroNodeView(self)\n\n @property\n def edges(self):\n '''Return an edge view\n '''\n return HeteroEdgeView(self)\n\n @property\n def ndata(self):\n \"\"\"Return the data view of all the nodes.\n\n Returns\n -------\n NodeDataView\n The data view in the distributed graph storage.\n \"\"\"\n assert len(self.ntypes) == 1, \"ndata only works for a graph with one node type.\"\n return self._ndata\n\n @property\n def edata(self):\n \"\"\"Return the data view of all the edges.\n\n Returns\n -------\n EdgeDataView\n The data view in the distributed graph storage.\n \"\"\"\n assert len(self.etypes) == 1, \"edata only works for a graph with one edge type.\"\n return self._edata\n\n @property\n def idtype(self):\n \"\"\"The dtype of graph index\n\n Returns\n -------\n backend dtype object\n th.int32/th.int64 or tf.int32/tf.int64 etc.\n\n See Also\n --------\n long\n int\n \"\"\"\n # TODO(da?): describe when self._g is None and idtype shouldn't be called.\n return F.int64\n\n @property\n def device(self):\n \"\"\"Get the device context of this graph.\n\n Examples\n --------\n The following example uses PyTorch backend.\n\n >>> g = dgl.bipartite(([0, 1, 1, 2], [0, 0, 2, 1]), 'user', 'plays', 'game')\n >>> print(g.device)\n device(type='cpu')\n >>> g = g.to('cuda:0')\n >>> print(g.device)\n device(type='cuda', index=0)\n\n Returns\n -------\n Device context object\n \"\"\"\n # TODO(da?): describe when self._g is None and device shouldn't be called.\n return F.cpu()\n\n @property\n def ntypes(self):\n \"\"\"Return the list of node types of this graph.\n\n Returns\n -------\n list of str\n\n Examples\n --------\n\n >>> g = DistGraph(\"test\")\n >>> g.ntypes\n ['_U']\n \"\"\"\n return self._gpb.ntypes\n\n @property\n def etypes(self):\n \"\"\"Return the list of edge types of this graph.\n\n Returns\n -------\n list of str\n\n Examples\n --------\n\n >>> g = DistGraph(\"test\")\n >>> g.etypes\n ['_E']\n \"\"\"\n # Currently, we only support a graph with one edge type.\n return self._gpb.etypes\n\n @property\n def canonical_etypes(self):\n \"\"\"Return all the canonical edge types in the graph.\n\n A canonical edge type is a string triplet ``(str, str, str)``\n for source node type, edge type and destination node type.\n\n Returns\n -------\n list[(str, str, str)]\n All the canonical edge type triplets in a list.\n\n Notes\n -----\n DGL internally assigns an integer ID for each edge type. The returned\n edge type names are sorted according to their IDs.\n\n See Also\n --------\n etypes\n\n Examples\n --------\n The following example uses PyTorch backend.\n\n >>> import dgl\n >>> import torch\n\n >>> g = DistGraph(\"test\")\n >>> g.canonical_etypes\n [('user', 'follows', 'user'),\n ('user', 'follows', 'game'),\n ('user', 'plays', 'game')]\n \"\"\"\n return self._canonical_etypes\n\n def to_canonical_etype(self, etype):\n \"\"\"Convert an edge type to the corresponding canonical edge type in the graph.\n\n A canonical edge type is a string triplet ``(str, str, str)``\n for source node type, edge type and destination node type.\n\n The function expects the given edge type name can uniquely identify a canonical edge\n type. DGL will raise error if this is not the case.\n\n Parameters\n ----------\n etype : str or (str, str, str)\n If :attr:`etype` is an edge type (str), it returns the corresponding canonical edge\n type in the graph. If :attr:`etype` is already a canonical edge type,\n it directly returns the input unchanged.\n\n Returns\n -------\n (str, str, str)\n The canonical edge type corresponding to the edge type.\n\n Examples\n --------\n The following example uses PyTorch backend.\n\n >>> import dgl\n >>> import torch\n\n >>> g = DistGraph(\"test\")\n >>> g.canonical_etypes\n [('user', 'follows', 'user'),\n ('user', 'follows', 'game'),\n ('user', 'plays', 'game')]\n\n >>> g.to_canonical_etype('plays')\n ('user', 'plays', 'game')\n >>> g.to_canonical_etype(('user', 'plays', 'game'))\n ('user', 'plays', 'game')\n\n See Also\n --------\n canonical_etypes\n \"\"\"\n if etype is None:\n if len(self.etypes) != 1:\n raise DGLError('Edge type name must be specified if there are more than one '\n 'edge types.')\n etype = self.etypes[0]\n if isinstance(etype, tuple):\n return etype\n else:\n ret = self._etype2canonical.get(etype, None)\n if ret is None:\n raise DGLError('Edge type \"{}\" does not exist.'.format(etype))\n if len(ret) != 3:\n raise DGLError('Edge type \"{}\" is ambiguous. Please use canonical edge type '\n 'in the form of (srctype, etype, dsttype)'.format(etype))\n return ret\n\n def get_ntype_id(self, ntype):\n \"\"\"Return the ID of the given node type.\n\n ntype can also be None. If so, there should be only one node type in the\n graph.\n\n Parameters\n ----------\n ntype : str\n Node type\n\n Returns\n -------\n int\n \"\"\"\n if ntype is None:\n if len(self._ntype_map) != 1:\n raise DGLError('Node type name must be specified if there are more than one '\n 'node types.')\n return 0\n return self._ntype_map[ntype]\n\n def get_etype_id(self, etype):\n \"\"\"Return the id of the given edge type.\n\n etype can also be None. If so, there should be only one edge type in the\n graph.\n\n Parameters\n ----------\n etype : str or tuple of str\n Edge type\n\n Returns\n -------\n int\n \"\"\"\n if etype is None:\n if len(self._etype_map) != 1:\n raise DGLError('Edge type name must be specified if there are more than one '\n 'edge types.')\n return 0\n return self._etype_map[etype]\n\n def number_of_nodes(self, ntype=None):\n \"\"\"Alias of :func:`num_nodes`\"\"\"\n return self.num_nodes(ntype)\n\n def number_of_edges(self, etype=None):\n \"\"\"Alias of :func:`num_edges`\"\"\"\n return self.num_edges(etype)\n\n def num_nodes(self, ntype=None):\n \"\"\"Return the total number of nodes in the distributed graph.\n\n Parameters\n ----------\n ntype : str, optional\n The node type name. If given, it returns the number of nodes of the\n type. If not given (default), it returns the total number of nodes of all types.\n\n Returns\n -------\n int\n The number of nodes\n\n Examples\n --------\n >>> g = dgl.distributed.DistGraph('ogb-product')\n >>> print(g.num_nodes())\n 2449029\n \"\"\"\n if ntype is None:\n if len(self.ntypes) == 1:\n return self._gpb._num_nodes(self.ntypes[0])\n else:\n return sum([self._gpb._num_nodes(ntype) for ntype in self.ntypes])\n return self._gpb._num_nodes(ntype)\n\n def num_edges(self, etype=None):\n \"\"\"Return the total number of edges in the distributed graph.\n\n Parameters\n ----------\n etype : str or (str, str, str), optional\n The type name of the edges. The allowed type name formats are:\n\n * ``(str, str, str)`` for source node type, edge type and destination node type.\n * or one ``str`` edge type name if the name can uniquely identify a\n triplet format in the graph.\n\n If not provided, return the total number of edges regardless of the types\n in the graph.\n\n Returns\n -------\n int\n The number of edges\n\n Examples\n --------\n >>> g = dgl.distributed.DistGraph('ogb-product')\n >>> print(g.num_edges())\n 123718280\n \"\"\"\n if etype is None:\n if len(self.etypes) == 1:\n return self._gpb._num_edges(self.etypes[0])\n else:\n return sum([self._gpb._num_edges(etype) for etype in self.etypes])\n return self._gpb._num_edges(etype)\n\n def out_degrees(self, u=ALL):\n \"\"\"Return the out-degree(s) of the given nodes.\n\n It computes the out-degree(s).\n It does not support heterogeneous graphs yet.\n\n Parameters\n ----------\n u : node IDs\n The node IDs. The allowed formats are:\n\n * ``int``: A single node.\n * Int Tensor: Each element is a node ID. The tensor must have the same device type\n and ID data type as the graph's.\n * iterable[int]: Each element is a node ID.\n\n If not given, return the in-degrees of all the nodes.\n\n Returns\n -------\n int or Tensor\n The out-degree(s) of the node(s) in a Tensor. The i-th element is the out-degree\n of the i-th input node. If :attr:`v` is an ``int``, return an ``int`` too.\n\n Examples\n --------\n The following example uses PyTorch backend.\n\n >>> import dgl\n >>> import torch\n\n Query for all nodes.\n\n >>> g.out_degrees()\n tensor([2, 2, 0, 0])\n\n Query for nodes 1 and 2.\n\n >>> g.out_degrees(torch.tensor([1, 2]))\n tensor([2, 0])\n\n See Also\n --------\n in_degrees\n \"\"\"\n if is_all(u):\n u = F.arange(0, self.number_of_nodes())\n return dist_out_degrees(self, u)\n\n def in_degrees(self, v=ALL):\n \"\"\"Return the in-degree(s) of the given nodes.\n\n It computes the in-degree(s).\n It does not support heterogeneous graphs yet.\n\n Parameters\n ----------\n v : node IDs\n The node IDs. The allowed formats are:\n\n * ``int``: A single node.\n * Int Tensor: Each element is a node ID. The tensor must have the same device type\n and ID data type as the graph's.\n * iterable[int]: Each element is a node ID.\n\n If not given, return the in-degrees of all the nodes.\n\n Returns\n -------\n int or Tensor\n The in-degree(s) of the node(s) in a Tensor. The i-th element is the in-degree\n of the i-th input node. If :attr:`v` is an ``int``, return an ``int`` too.\n\n Examples\n --------\n The following example uses PyTorch backend.\n\n >>> import dgl\n >>> import torch\n\n Query for all nodes.\n\n >>> g.in_degrees()\n tensor([0, 2, 1, 1])\n\n Query for nodes 1 and 2.\n\n >>> g.in_degrees(torch.tensor([1, 2]))\n tensor([2, 1])\n\n See Also\n --------\n out_degrees\n \"\"\"\n if is_all(v):\n v = F.arange(0, self.number_of_nodes())\n return dist_in_degrees(self, v)\n\n def node_attr_schemes(self):\n \"\"\"Return the node feature schemes.\n\n Each feature scheme is a named tuple that stores the shape and data type\n of the node feature.\n\n Returns\n -------\n dict of str to schemes\n The schemes of node feature columns.\n\n Examples\n --------\n The following uses PyTorch backend.\n\n >>> g.node_attr_schemes()\n {'h': Scheme(shape=(4,), dtype=torch.float32)}\n\n See Also\n --------\n edge_attr_schemes\n \"\"\"\n schemes = {}\n for key in self.ndata:\n schemes[key] = infer_scheme(self.ndata[key])\n return schemes\n\n def edge_attr_schemes(self):\n \"\"\"Return the edge feature schemes.\n\n Each feature scheme is a named tuple that stores the shape and data type\n of the edge feature.\n\n Returns\n -------\n dict of str to schemes\n The schemes of edge feature columns.\n\n Examples\n --------\n The following uses PyTorch backend.\n\n >>> g.edge_attr_schemes()\n {'h': Scheme(shape=(4,), dtype=torch.float32)}\n\n See Also\n --------\n node_attr_schemes\n \"\"\"\n schemes = {}\n for key in self.edata:\n schemes[key] = infer_scheme(self.edata[key])\n return schemes\n\n def rank(self):\n ''' The rank of the current DistGraph.\n\n This returns a unique number to identify the DistGraph object among all of\n the client processes.\n\n Returns\n -------\n int\n The rank of the current DistGraph.\n '''\n return role.get_global_rank()\n\n def find_edges(self, edges, etype=None):\n \"\"\" Given an edge ID array, return the source\n and destination node ID array ``s`` and ``d``. ``s[i]`` and ``d[i]``\n are source and destination node ID for edge ``eid[i]``.\n\n Parameters\n ----------\n edges : Int Tensor\n Each element is an ID. The tensor must have the same device type\n and ID data type as the graph's.\n\n etype : str or (str, str, str), optional\n The type names of the edges. The allowed type name formats are:\n\n * ``(str, str, str)`` for source node type, edge type and destination node type.\n * or one ``str`` edge type name if the name can uniquely identify a\n triplet format in the graph.\n\n Can be omitted if the graph has only one type of edges.\n\n Returns\n -------\n tensor\n The source node ID array.\n tensor\n The destination node ID array.\n \"\"\"\n if etype is None:\n assert len(self.etypes) == 1, 'find_edges requires etype for heterogeneous graphs.'\n\n gpb = self.get_partition_book()\n if len(gpb.etypes) > 1:\n # if etype is a canonical edge type (str, str, str), extract the edge type\n if len(etype) == 3:\n etype = etype[1]\n edges = gpb.map_to_homo_eid(edges, etype)\n src, dst = dist_find_edges(self, edges)\n if len(gpb.ntypes) > 1:\n _, src = gpb.map_to_per_ntype(src)\n _, dst = gpb.map_to_per_ntype(dst)\n return src, dst\n\n def edge_subgraph(self, edges, relabel_nodes=True, store_ids=True):\n \"\"\"Return a subgraph induced on the given edges.\n\n An edge-induced subgraph is equivalent to creating a new graph using the given\n edges. In addition to extracting the subgraph, DGL also copies the features\n of the extracted nodes and edges to the resulting graph. The copy is *lazy*\n and incurs data movement only when needed.\n\n If the graph is heterogeneous, DGL extracts a subgraph per relation and composes\n them as the resulting graph. Thus, the resulting graph has the same set of relations\n as the input one.\n\n Parameters\n ----------\n edges : Int Tensor or dict[(str, str, str), Int Tensor]\n The edges to form the subgraph. Each element is an edge ID. The tensor must have\n the same device type and ID data type as the graph's.\n\n If the graph is homogeneous, one can directly pass an Int Tensor.\n Otherwise, the argument must be a dictionary with keys being edge types\n and values being the edge IDs in the above formats.\n relabel_nodes : bool, optional\n If True, it will remove the isolated nodes and relabel the incident nodes in the\n extracted subgraph.\n store_ids : bool, optional\n If True, it will store the raw IDs of the extracted edges in the ``edata`` of the\n resulting graph under name ``dgl.EID``; if ``relabel_nodes`` is ``True``, it will\n also store the raw IDs of the incident nodes in the ``ndata`` of the resulting\n graph under name ``dgl.NID``.\n\n Returns\n -------\n G : DGLGraph\n The subgraph.\n \"\"\"\n if isinstance(edges, dict):\n # TODO(zhengda) we need to directly generate subgraph of all relations with\n # one invocation.\n if isinstance(edges, tuple):\n subg = {etype: self.find_edges(edges[etype], etype[1]) for etype in edges}\n else:\n subg = {}\n for etype in edges:\n assert len(self._etype2canonical[etype]) == 3, \\\n 'the etype in input edges is ambiguous'\n subg[self._etype2canonical[etype]] = self.find_edges(edges[etype], etype)\n num_nodes = {ntype: self.number_of_nodes(ntype) for ntype in self.ntypes}\n subg = dgl_heterograph(subg, num_nodes_dict=num_nodes)\n for etype in edges:\n subg.edges[etype].data[EID] = edges[etype]\n else:\n assert len(self.etypes) == 1\n subg = self.find_edges(edges)\n subg = dgl_graph(subg, num_nodes=self.number_of_nodes())\n subg.edata[EID] = edges\n\n if relabel_nodes:\n subg = compact_graphs(subg)\n assert store_ids, 'edge_subgraph always stores original node/edge IDs.'\n return subg\n\n def get_partition_book(self):\n \"\"\"Get the partition information.\n\n Returns\n -------\n GraphPartitionBook\n Object that stores all graph partition information.\n \"\"\"\n return self._gpb\n\n def get_node_partition_policy(self, ntype):\n \"\"\"Get the partition policy for a node type.\n\n When creating a new distributed tensor, we need to provide a partition policy\n that indicates how to distribute data of the distributed tensor in a cluster\n of machines. When we load a distributed graph in the cluster, we have pre-defined\n partition policies for each node type and each edge type. By providing\n the node type, we can reference to the pre-defined partition policy for the node type.\n\n Parameters\n ----------\n ntype : str\n The node type\n\n Returns\n -------\n PartitionPolicy\n The partition policy for the node type.\n \"\"\"\n return NodePartitionPolicy(self.get_partition_book(), ntype)\n\n def get_edge_partition_policy(self, etype):\n \"\"\"Get the partition policy for an edge type.\n\n When creating a new distributed tensor, we need to provide a partition policy\n that indicates how to distribute data of the distributed tensor in a cluster\n of machines. When we load a distributed graph in the cluster, we have pre-defined\n partition policies for each node type and each edge type. By providing\n the edge type, we can reference to the pre-defined partition policy for the edge type.\n\n Parameters\n ----------\n etype : str\n The edge type\n\n Returns\n -------\n PartitionPolicy\n The partition policy for the edge type.\n \"\"\"\n return EdgePartitionPolicy(self.get_partition_book(), etype)\n\n def barrier(self):\n '''Barrier for all client nodes.\n\n This API blocks the current process untill all the clients invoke this API.\n Please use this API with caution.\n '''\n self._client.barrier()\n\n def _get_ndata_names(self, ntype=None):\n ''' Get the names of all node data.\n '''\n names = self._client.gdata_name_list()\n ndata_names = []\n for name in names:\n name = parse_hetero_data_name(name)\n right_type = (name.get_type() == ntype) if ntype is not None else True\n if name.is_node() and right_type:\n ndata_names.append(name)\n return ndata_names\n\n def _get_edata_names(self, etype=None):\n ''' Get the names of all edge data.\n '''\n names = self._client.gdata_name_list()\n edata_names = []\n for name in names:\n name = parse_hetero_data_name(name)\n right_type = (name.get_type() == etype) if etype is not None else True\n if name.is_edge() and right_type:\n edata_names.append(name)\n return edata_names\n\ndef _get_overlap(mask_arr, ids):\n \"\"\" Select the IDs given a boolean mask array.\n\n The boolean mask array indicates all of the IDs to be selected. We want to\n find the overlap between the IDs selected by the boolean mask array and\n the ID array.\n\n Parameters\n ----------\n mask_arr : 1D tensor\n A boolean mask array.\n ids : 1D tensor\n A vector with IDs.\n\n Returns\n -------\n 1D tensor\n The selected IDs.\n \"\"\"\n if isinstance(mask_arr, DistTensor):\n masks = mask_arr[ids]\n return F.boolean_mask(ids, masks)\n else:\n masks = F.gather_row(F.tensor(mask_arr), ids)\n return F.boolean_mask(ids, masks)\n\ndef _split_local(partition_book, rank, elements, local_eles):\n ''' Split the input element list with respect to data locality.\n '''\n num_clients = role.get_num_trainers()\n num_client_per_part = num_clients // partition_book.num_partitions()\n if rank is None:\n rank = role.get_trainer_rank()\n assert rank < num_clients, \\\n 'The input rank ({}) is incorrect. #Trainers: {}'.format(rank, num_clients)\n # all ranks of the clients in the same machine are in a contiguous range.\n client_id_in_part = rank % num_client_per_part\n local_eles = _get_overlap(elements, local_eles)\n\n # get a subset for the local client.\n size = len(local_eles) // num_client_per_part\n # if this isn't the last client in the partition.\n if client_id_in_part + 1 < num_client_per_part:\n return local_eles[(size * client_id_in_part):(size * (client_id_in_part + 1))]\n else:\n return local_eles[(size * client_id_in_part):]\n\ndef _even_offset(n, k):\n ''' Split an array of length n into k segments and the difference of thier length is\n at most 1. Return the offset of each segment.\n '''\n eles_per_part = n // k\n offset = np.array([0] + [eles_per_part] * k, dtype=int)\n offset[1 : n - eles_per_part * k + 1] += 1\n return np.cumsum(offset)\n\ndef _split_even_to_part(partition_book, elements):\n ''' Split the input element list evenly.\n '''\n # here we divide the element list as evenly as possible. If we use range partitioning,\n # the split results also respect the data locality. Range partitioning is the default\n # strategy.\n # TODO(zhengda) we need another way to divide the list for other partitioning strategy.\n if isinstance(elements, DistTensor):\n nonzero_count = elements.count_nonzero()\n else:\n elements = F.tensor(elements)\n nonzero_count = F.count_nonzero(elements)\n # compute the offset of each split and ensure that the difference of each partition size\n # is 1.\n offsets = _even_offset(nonzero_count, partition_book.num_partitions())\n assert offsets[-1] == nonzero_count\n\n # Get the elements that belong to the partition.\n partid = partition_book.partid\n left, right = offsets[partid], offsets[partid + 1]\n\n x = y = 0\n num_elements = len(elements)\n block_size = num_elements // partition_book.num_partitions()\n part_eles = None\n # compute the nonzero tensor of each partition instead of whole tensor to save memory\n for idx in range(0, num_elements, block_size):\n nonzero_block = F.nonzero_1d(elements[idx:min(idx+block_size, num_elements)])\n x = y\n y += len(nonzero_block)\n if y > left and x < right:\n start = max(x, left) - x\n end = min(y, right) - x\n tmp = nonzero_block[start:end] + idx\n if part_eles is None:\n part_eles = tmp\n else:\n part_eles = F.cat((part_eles, tmp), 0)\n elif x >= right:\n break\n\n return part_eles\n\ndef _split_random_within_part(partition_book, rank, part_eles):\n # If there are more than one client in a partition, we need to randomly select a subset of\n # elements in the partition for a client. We have to make sure that the set of elements\n # for different clients are disjoint.\n\n num_clients = role.get_num_trainers()\n num_client_per_part = num_clients // partition_book.num_partitions()\n if num_client_per_part == 1:\n return part_eles\n if rank is None:\n rank = role.get_trainer_rank()\n assert rank < num_clients, \\\n 'The input rank ({}) is incorrect. #Trainers: {}'.format(rank, num_clients)\n client_id_in_part = rank % num_client_per_part\n offset = _even_offset(len(part_eles), num_client_per_part)\n\n # We set the random seed for each partition, so that each process (client) in a partition\n # permute the elements in a partition in the same way, so each process gets a disjoint subset\n # of elements.\n np.random.seed(partition_book.partid)\n rand_idx = np.random.permutation(len(part_eles))\n rand_idx = rand_idx[offset[client_id_in_part] : offset[client_id_in_part + 1]]\n idx, _ = F.sort_1d(F.tensor(rand_idx))\n return F.gather_row(part_eles, idx)\n\ndef _split_by_trainer_id(partition_book, part_eles, trainer_id,\n num_client_per_part, client_id_in_part):\n # TODO(zhengda): MXNet cannot deal with empty tensors, which makes the implementation\n # much more difficult. Let's just use numpy for the computation for now. We just\n # perform operations on vectors. It shouldn't be too difficult.\n trainer_id = F.asnumpy(trainer_id)\n part_eles = F.asnumpy(part_eles)\n part_id = trainer_id // num_client_per_part\n trainer_id = trainer_id % num_client_per_part\n local_eles = part_eles[np.nonzero(part_id[part_eles] == partition_book.partid)[0]]\n # these are the Ids of the local elements in the partition. The Ids are global Ids.\n remote_eles = part_eles[np.nonzero(part_id[part_eles] != partition_book.partid)[0]]\n # these are the Ids of the remote nodes in the partition. The Ids are global Ids.\n local_eles_idx = np.concatenate(\n [np.nonzero(trainer_id[local_eles] == i)[0] for i in range(num_client_per_part)],\n # trainer_id[local_eles] is the trainer ids of local nodes in the partition and we\n # pick out the indices where the node belongs to each trainer i respectively, and\n # concatenate them.\n axis=0\n )\n # `local_eles_idx` is used to sort `local_eles` according to `trainer_id`. It is a\n # permutation of 0...(len(local_eles)-1)\n local_eles = local_eles[local_eles_idx]\n\n # evenly split local nodes to trainers\n local_offsets = _even_offset(len(local_eles), num_client_per_part)\n # evenly split remote nodes to trainers\n remote_offsets = _even_offset(len(remote_eles), num_client_per_part)\n\n client_local_eles = local_eles[\n local_offsets[client_id_in_part]:local_offsets[client_id_in_part + 1]]\n client_remote_eles = remote_eles[\n remote_offsets[client_id_in_part]:remote_offsets[client_id_in_part + 1]]\n client_eles = np.concatenate([client_local_eles, client_remote_eles], axis=0)\n return F.tensor(client_eles)\n\ndef node_split(nodes, partition_book=None, ntype='_N', rank=None, force_even=True,\n node_trainer_ids=None):\n ''' Split nodes and return a subset for the local rank.\n\n This function splits the input nodes based on the partition book and\n returns a subset of nodes for the local rank. This method is used for\n dividing workloads for distributed training.\n\n The input nodes are stored as a vector of masks. The length of the vector is\n the same as the number of nodes in a graph; 1 indicates that the vertex in\n the corresponding location exists.\n\n There are two strategies to split the nodes. By default, it splits the nodes\n in a way to maximize data locality. That is, all nodes that belong to a process\n are returned. If ``force_even`` is set to true, the nodes are split evenly so\n that each process gets almost the same number of nodes.\n\n When ``force_even`` is True, the data locality is still preserved if a graph is partitioned\n with Metis and the node/edge IDs are shuffled.\n In this case, majority of the nodes returned for a process are the ones that\n belong to the process. If node/edge IDs are not shuffled, data locality is not guaranteed.\n\n Parameters\n ----------\n nodes : 1D tensor or DistTensor\n A boolean mask vector that indicates input nodes.\n partition_book : GraphPartitionBook, optional\n The graph partition book\n ntype : str, optional\n The node type of the input nodes.\n rank : int, optional\n The rank of a process. If not given, the rank of the current process is used.\n force_even : bool, optional\n Force the nodes are split evenly.\n node_trainer_ids : 1D tensor or DistTensor, optional\n If not None, split the nodes to the trainers on the same machine according to\n trainer IDs assigned to each node. Otherwise, split randomly.\n\n Returns\n -------\n 1D-tensor\n The vector of node IDs that belong to the rank.\n '''\n if not isinstance(nodes, DistTensor):\n assert partition_book is not None, 'Regular tensor requires a partition book.'\n elif partition_book is None:\n partition_book = nodes.part_policy.partition_book\n\n assert len(nodes) == partition_book._num_nodes(ntype), \\\n 'The length of boolean mask vector should be the number of nodes in the graph.'\n if rank is None:\n rank = role.get_trainer_rank()\n if force_even:\n num_clients = role.get_num_trainers()\n num_client_per_part = num_clients // partition_book.num_partitions()\n assert num_clients % partition_book.num_partitions() == 0, \\\n 'The total number of clients should be multiple of the number of partitions.'\n part_nid = _split_even_to_part(partition_book, nodes)\n if num_client_per_part == 1:\n return part_nid\n elif node_trainer_ids is None:\n return _split_random_within_part(partition_book, rank, part_nid)\n else:\n trainer_id = node_trainer_ids[0:len(node_trainer_ids)]\n max_trainer_id = F.as_scalar(F.reduce_max(trainer_id)) + 1\n\n if max_trainer_id > num_clients:\n # We hope the partition scheme with trainer_id could be used when the number of\n # trainers is less than the `num_trainers_per_machine` previously assigned during\n # partitioning.\n assert max_trainer_id % num_clients == 0\n trainer_id //= (max_trainer_id // num_clients)\n\n client_id_in_part = rank % num_client_per_part\n return _split_by_trainer_id(partition_book, part_nid, trainer_id,\n num_client_per_part, client_id_in_part)\n else:\n # Get all nodes that belong to the rank.\n local_nids = partition_book.partid2nids(partition_book.partid)\n return _split_local(partition_book, rank, nodes, local_nids)\n\ndef edge_split(edges, partition_book=None, etype='_E', rank=None, force_even=True,\n edge_trainer_ids=None):\n ''' Split edges and return a subset for the local rank.\n\n This function splits the input edges based on the partition book and\n returns a subset of edges for the local rank. This method is used for\n dividing workloads for distributed training.\n\n The input edges can be stored as a vector of masks. The length of the vector is\n the same as the number of edges in a graph; 1 indicates that the edge in\n the corresponding location exists.\n\n There are two strategies to split the edges. By default, it splits the edges\n in a way to maximize data locality. That is, all edges that belong to a process\n are returned. If ``force_even`` is set to true, the edges are split evenly so\n that each process gets almost the same number of edges.\n\n When ``force_even`` is True, the data locality is still preserved if a graph is partitioned\n with Metis and the node/edge IDs are shuffled.\n In this case, majority of the nodes returned for a process are the ones that\n belong to the process. If node/edge IDs are not shuffled, data locality is not guaranteed.\n\n Parameters\n ----------\n edges : 1D tensor or DistTensor\n A boolean mask vector that indicates input edges.\n partition_book : GraphPartitionBook, optional\n The graph partition book\n etype : str, optional\n The edge type of the input edges.\n rank : int, optional\n The rank of a process. If not given, the rank of the current process is used.\n force_even : bool, optional\n Force the edges are split evenly.\n edge_trainer_ids : 1D tensor or DistTensor, optional\n If not None, split the edges to the trainers on the same machine according to\n trainer IDs assigned to each edge. Otherwise, split randomly.\n\n Returns\n -------\n 1D-tensor\n The vector of edge IDs that belong to the rank.\n '''\n if not isinstance(edges, DistTensor):\n assert partition_book is not None, 'Regular tensor requires a partition book.'\n elif partition_book is None:\n partition_book = edges.part_policy.partition_book\n assert len(edges) == partition_book._num_edges(etype), \\\n 'The length of boolean mask vector should be the number of edges in the graph.'\n if rank is None:\n rank = role.get_trainer_rank()\n if force_even:\n num_clients = role.get_num_trainers()\n num_client_per_part = num_clients // partition_book.num_partitions()\n assert num_clients % partition_book.num_partitions() == 0, \\\n 'The total number of clients should be multiple of the number of partitions.'\n part_eid = _split_even_to_part(partition_book, edges)\n if num_client_per_part == 1:\n return part_eid\n elif edge_trainer_ids is None:\n return _split_random_within_part(partition_book, rank, part_eid)\n else:\n trainer_id = edge_trainer_ids[0:len(edge_trainer_ids)]\n max_trainer_id = F.as_scalar(F.reduce_max(trainer_id)) + 1\n\n if max_trainer_id > num_clients:\n # We hope the partition scheme with trainer_id could be used when the number of\n # trainers is less than the `num_trainers_per_machine` previously assigned during\n # partitioning.\n assert max_trainer_id % num_clients == 0\n trainer_id //= (max_trainer_id // num_clients)\n\n client_id_in_part = rank % num_client_per_part\n return _split_by_trainer_id(partition_book, part_eid, trainer_id,\n num_client_per_part, client_id_in_part)\n else:\n # Get all edges that belong to the rank.\n local_eids = partition_book.partid2eids(partition_book.partid)\n return _split_local(partition_book, rank, edges, local_eids)\n\nrpc.register_service(INIT_GRAPH, InitGraphRequest, InitGraphResponse)\n"
] | [
[
"numpy.cumsum",
"numpy.random.seed",
"numpy.array",
"numpy.concatenate",
"numpy.nonzero"
]
] |
minrk/discretisedfield | [
"251584f8d976a7fafdff5402d16327489407c4dd"
] | [
"discretisedfield/field.py"
] | [
"import pyvtk\nimport struct\nimport matplotlib\nimport numpy as np\nimport mpl_toolkits.axes_grid1\nimport discretisedfield as df\nimport ubermagutil.typesystem as ts\nimport discretisedfield.util as dfu\nimport matplotlib.pyplot as plt\n\n\[email protected](mesh=ts.Typed(expected_type=df.Mesh),\n dim=ts.Scalar(expected_type=int, unsigned=True, const=True),\n name=ts.Name(const=True))\nclass Field:\n \"\"\"Finite difference field.\n\n This class defines a finite difference field and enables certain\n operations for its analysis and visualisation. The field is\n defined on a finite difference mesh (`discretisedfield.Mesh`).\n\n Parameters\n ----------\n mesh : discretisedfield.Mesh\n Finite difference rectangular mesh.\n dim : int, optional\n Dimension of the field value. For instance, if `dim=3` the\n field is a three-dimensional vector field and for `dim=1`\n the field is a scalar field. Defaults to `dim=3`.\n value : array_like, callable, optional\n Please refer to the `value` property:\n :py:func:`~discretisedfield.Field.value`. Defaults to 0,\n meaning that if the value is not provided in the\n initialisation process, \"zero-field\" will be defined.\n norm : numbers.Real, callable, optional\n Please refer to the `norm` property:\n :py:func:`~discretisedfield.Field.norm`. Defaults to `None`\n (`norm=None` defines no norm).\n name : str, optional\n Field name (defaults to `'field'`). The field name must be a\n valid Python variable name string. More specifically, it must\n not contain spaces, or start with underscore or numeric\n character.\n\n Examples\n --------\n 1. Creating a uniform three-dimensional vector field on a\n nano-sized thin film.\n\n >>> import discretisedfield as df\n ...\n >>> p1 = (-50e-9, -25e-9, 0)\n >>> p2 = (50e-9, 25e-9, 5e-9)\n >>> cell = (1e-9, 1e-9, 0.1e-9)\n >>> mesh = df.Mesh(p1=p1, p2=p2, cell=cell)\n ...\n >>> dim = 3\n >>> value = (0, 0, 1)\n >>> field = df.Field(mesh=mesh, dim=dim, value=value)\n >>> field\n Field(mesh=...)\n\n 2. Creating a scalar field.\n\n >>> import discretisedfield as df\n ...\n >>> p1 = (-10, -10, -10)\n >>> p2 = (10, 10, 10)\n >>> n = (1, 1, 1)\n >>> mesh = df.Mesh(p1=p1, p2=p2, n=n)\n >>> dim = 1\n >>> value = 3.14\n >>> field = df.Field(mesh=mesh, dim=dim, value=value)\n >>> field\n Field(mesh=...)\n\n .. seealso:: :py:func:`~discretisedfield.Mesh`\n\n \"\"\"\n def __init__(self, mesh, dim=3, value=0, norm=None, name='field'):\n self.mesh = mesh\n self.dim = dim\n self.value = value\n self.norm = norm\n self.name = name\n\n @property\n def value(self):\n \"\"\"Field value representation.\n\n This propertry returns a representation of the field value if\n it exists. Otherwise, the `numpy.ndarray` containing all\n values from the field is returned.\n\n Parameters\n ----------\n value : 0, array_like, callable\n For scalar fields (`dim=1`) `numbers.Real` values are\n allowed. In the case of vector fields, \"array_like\" (list,\n tuple, numpy.ndarray) value with length equal to `dim`\n should be used. Finally, the value can also be a callable\n (e.g. Python function or another field), which for every\n coordinate in the mesh returns a valid value. If\n `value=0`, all values in the field will be set to zero\n independent of the field dimension.\n\n Returns\n -------\n array_like, callable, numbers.Real\n The value used (representation) for setting the field is\n returned. However, if the actual value of the field does\n not correspond to the initially used value anymore, a\n `numpy.ndarray` is returned containing all field values.\n\n Raises\n ------\n ValueError\n If unsupported type is passed\n\n Examples\n --------\n 1. Different ways of setting and getting the field value.\n\n >>> import discretisedfield as df\n ...\n >>> p1 = (0, 0, 0)\n >>> p2 = (2, 2, 1)\n >>> cell = (1, 1, 1)\n >>> mesh = df.Mesh(p1=p1, p2=p2, cell=cell)\n >>> value = (0, 0, 1)\n >>> # if value is not specified, zero-field is defined\n >>> field = df.Field(mesh=mesh, dim=3)\n >>> field.value\n 0\n >>> field.value = (0, 0, 1)\n >>> field.value\n (0, 0, 1)\n >>> # Setting the field value using a Python function (callable).\n >>> def value_function(pos):\n ... x, y, z = pos\n ... if x <= 1:\n ... return (0, 0, 1)\n ... else:\n ... return (0, 0, -1)\n >>> field.value = value_function\n >>> field.value\n <function value_function at ...>\n >>> # We now change the value of a single cell so that the\n >>> # representation used for initialising field is not valid\n >>> # anymore.\n >>> field.array[0, 0, 0, :] = (0, 0, 0)\n >>> field.value\n array(...)\n\n .. seealso:: :py:func:`~discretisedfield.Field.array`\n\n \"\"\"\n value_array = dfu.as_array(self.mesh, self.dim, self._value)\n if np.array_equal(self.array, value_array):\n return self._value\n else:\n return self.array\n\n @value.setter\n def value(self, val):\n self._value = val\n self.array = dfu.as_array(self.mesh, self.dim, val)\n\n @property\n def array(self):\n \"\"\"Numpy array of a field value.\n\n `array` has shape of `(self.mesh.n[0], self.mesh.n[1],\n self.mesh.n[2], dim)`.\n\n Parameters\n ----------\n array : numpy.ndarray\n Numpy array with dimensions `(self.mesh.n[0],\n self.mesh.n[1], self.mesh.n[2], dim)`\n\n Returns\n -------\n numpy.ndarray\n Field values array.\n\n Raises\n ------\n ValueError\n If setting the array with wrong type, shape, or value.\n\n Examples\n --------\n 1. Accessing and setting the field array.\n\n >>> import discretisedfield as df\n >>> import numpy as np\n ...\n >>> p1 = (0, 0, 0)\n >>> p2 = (1, 1, 1)\n >>> cell = (0.5, 1, 1)\n >>> mesh = df.Mesh(p1=p1, p2=p2, cell=cell)\n >>> value = (0, 0, 1)\n >>> field = df.Field(mesh=mesh, dim=3, value=value)\n >>> field.array\n array(...)\n >>> field.array.shape\n (2, 1, 1, 3)\n >>> field.array = np.ones(field.array.shape)\n >>> field.array\n array(...)\n\n .. seealso:: :py:func:`~discretisedfield.Field.value`\n\n \"\"\"\n return self._array\n\n @array.setter\n def array(self, val):\n if isinstance(val, np.ndarray) and \\\n val.shape == self.mesh.n + (self.dim,):\n self._array = val\n else:\n msg = (f'Unsupported type(val)={type(val)} '\n 'or invalid value dimensions.')\n raise ValueError(msg)\n\n @property\n def norm(self):\n \"\"\"Norm of a field.\n\n This property computes the norm of the field and returns it as\n a `discretisedfield.Field` object with `dim=1`. Norm of a\n scalar field cannot be set and `ValueError` is raised.\n\n Parameters\n ----------\n numbers.Real, numpy.ndarray\n Norm value\n\n Returns\n -------\n discretisedfield.Field\n Scalar field with norm values.\n\n Raises\n ------\n ValueError\n If setting the norm with wrong type, shape, or value. In\n addition, if the field is scalar (dim=1) or it contains\n zero vector values.\n\n Examples\n --------\n 1. Manipulating the field norm\n\n >>> import discretisedfield as df\n ...\n >>> p1 = (0, 0, 0)\n >>> p2 = (1, 1, 1)\n >>> cell = (1, 1, 1)\n >>> mesh = df.Mesh(p1=p1, p2=p2, cell=cell)\n >>> field = df.Field(mesh=mesh, dim=3, value=(0, 0, 1))\n >>> field.norm\n Field(...)\n >>> field.norm = 2\n >>> field.norm\n Field(...)\n >>> field.value = (1, 0, 0)\n >>> field.norm.array\n array([[[[1.]]]])\n\n \"\"\"\n current_norm = np.linalg.norm(self.array, axis=-1)[..., None]\n return Field(self.mesh, dim=1, value=current_norm, name='norm')\n\n @norm.setter\n def norm(self, val):\n if val is not None:\n if self.dim == 1:\n msg = f'Cannot set norm for field with dim={self.dim}.'\n raise ValueError(msg)\n\n if not np.all(self.norm.array):\n msg = 'Cannot normalise field with zero values.'\n raise ValueError(msg)\n\n self.array /= self.norm.array # normalise to 1\n self.array *= dfu.as_array(self.mesh, dim=1, val=val)\n\n @property\n def average(self):\n \"\"\"Field average.\n\n It computes the average of the field over the entire volume of\n the mesh.\n\n Returns\n -------\n tuple\n Field average tuple whose length equals to the field's\n dimension.\n\n Examples\n --------\n 1. Computing the vector field average.\n\n >>> import discretisedfield as df\n ...\n >>> p1 = (0, 0, 0)\n >>> p2 = (5, 5, 5)\n >>> cell = (1, 1, 1)\n >>> mesh = df.Mesh(p1=p1, p2=p2, cell=cell)\n >>> field1 = df.Field(mesh=mesh, dim=3, value=(0, 0, 1))\n >>> field1.average\n (0.0, 0.0, 1.0)\n >>> field2 = df.Field(mesh=mesh, dim=1, value=55)\n >>> field2.average\n (55.0,)\n\n \"\"\"\n return tuple(self.array.mean(axis=(0, 1, 2)))\n\n def __repr__(self):\n \"\"\"Field representation string.\n\n This method returns the string that can ideally be copied in\n another Python script so that exactly the same field object\n could be defined. However, this is usually not the case due to\n complex values used.\n\n Returns\n -------\n str\n Field representation string.\n\n Example\n -------\n 1. Getting field representation string.\n\n >>> import discretisedfield as df\n ...\n >>> p1 = (0, 0, 0)\n >>> p2 = (2, 2, 1)\n >>> cell = (1, 1, 1)\n >>> mesh = df.Mesh(p1=p1, p2=p2, cell=cell)\n >>> field = df.Field(mesh, dim=1, value=1)\n >>> repr(field)\n \"Field(mesh=...)\"\n\n \"\"\"\n return (f'Field(mesh={repr(self.mesh)}, '\n f'dim={self.dim}, name=\\'{self.name}\\')')\n\n def __call__(self, point):\n \"\"\"Sample the field at `point`.\n\n It returns the value of the discreatisation cell `point`\n belongs to. It always returns a tuple, whose length is the\n same as the dimension of the field.\n\n Parameters\n ----------\n point : (3,) array_like\n The mesh point coordinate :math:`(p_{x}, p_{y}, p_{z})`.\n\n Returns\n -------\n tuple\n A tuple, whose length is the same as the dimension of the\n field.\n\n Example\n -------\n 1. Sampling the field value\n\n >>> import discretisedfield as df\n ...\n >>> p1 = (0, 0, 0)\n >>> p2 = (20, 20, 20)\n >>> n = (20, 20, 20)\n >>> mesh = df.Mesh(p1=p1, p2=p2, n=n)\n >>> field = df.Field(mesh, dim=3, value=(1, 3, 4))\n >>> point = (10, 2, 3)\n >>> field(point)\n (1.0, 3.0, 4.0)\n\n \"\"\"\n value = self.array[self.mesh.point2index(point)]\n if self.dim > 1:\n value = tuple(value)\n return value\n\n def __getattr__(self, name):\n \"\"\"Extracting the component of the vector field.\n\n If `'x'`, `'y'`, or `'z'` is accessed, a new scalar field of\n that component will be returned. This method is effective for\n vector fields with dimension 2 or 3.\n\n Returns\n -------\n discretisedfield.Field\n Scalar field with vector field component values.\n\n Examples\n --------\n 1. Accessing the vector field components.\n\n >>> import discretisedfield as df\n ...\n >>> p1 = (0, 0, 0)\n >>> p2 = (2, 2, 2)\n >>> cell = (1, 1, 1)\n >>> mesh = df.Mesh(p1=p1, p2=p2, cell=cell)\n >>> field = df.Field(mesh=mesh, dim=3, value=(0, 0, 1))\n >>> field.x\n Field(...)\n >>> field.y\n Field(...)\n >>> field.z\n Field(...)\n >>> field.z.dim\n 1\n\n \"\"\"\n if name in list(dfu.axesdict.keys())[:self.dim] and 1 < self.dim <= 3:\n # Components x, y, and z make sense only for vector fields\n # with typical dimensions 2 and 3.\n component_array = self.array[..., dfu.axesdict[name]][..., None]\n fieldname = f'{self.name}-{name}'.format(self.name, name)\n return Field(mesh=self.mesh, dim=1,\n value=component_array, name=fieldname)\n else:\n msg = f'{type(self).__name__} object has no attribute {name}.'\n raise AttributeError(msg.format(type(self).__name__, name))\n\n def __dir__(self):\n \"\"\"Extension of the tab-completion list.\n\n Adds `'x'`, `'y'`, and `'z'`, depending on the dimension of\n the field, to the tab-completion list. This is effective in\n IPython or Jupyter notebook environment.\n\n \"\"\"\n if 1 < self.dim <= 3:\n extension = list(dfu.axesdict.keys())[:self.dim]\n else:\n extension = []\n return list(self.__dict__.keys()) + extension\n\n def __iter__(self):\n \"\"\"Generator yielding coordinates and values of all field cells.\n\n The discretisation cell coordinate corresponds to the cell\n centre point.\n\n Yields\n ------\n tuple (2,)\n The first value is the mesh cell coordinates (`px`, `py`,\n `pz`), whereas the second one is the field value.\n\n Examples\n --------\n 1. Iterating through the field coordinates and values\n\n >>> import discretisedfield as df\n ...\n >>> p1 = (0, 0, 0)\n >>> p2 = (2, 2, 1)\n >>> cell = (1, 1, 1)\n >>> mesh = df.Mesh(p1=p1, p2=p2, cell=cell)\n >>> field = df.Field(mesh, dim=3, value=(0, 0, 1))\n >>> for coord, value in field:\n ... print (coord, value)\n (0.5, 0.5, 0.5) (0.0, 0.0, 1.0)\n (1.5, 0.5, 0.5) (0.0, 0.0, 1.0)\n (0.5, 1.5, 0.5) (0.0, 0.0, 1.0)\n (1.5, 1.5, 0.5) (0.0, 0.0, 1.0)\n\n .. seealso:: :py:func:`~discretisedfield.Mesh.indices`\n\n \"\"\"\n for point in self.mesh.coordinates:\n yield point, self.__call__(point)\n\n def line(self, p1, p2, n=100):\n \"\"\"Sampling the field along the line.\n\n Given two points :math:`p_{1}` and :math:`p_{2}`, :math:`n`\n position coordinates are generated and the corresponding field\n values.\n\n .. math::\n\n \\\\mathbf{r}_{i} = i\\\\frac{\\\\mathbf{p}_{2} -\n \\\\mathbf{p}_{1}}{n-1}\n\n Parameters\n ----------\n p1, p2 : (3,) array_like\n Two points between which the line is generated.\n n : int\n Number of points on the line.\n\n Yields\n ------\n tuple\n The first element is the coordinate of the point on the\n line, whereas the second one is the value of the field.\n\n Raises\n ------\n ValueError\n If `p1` or `p2` is outside the mesh domain.\n\n Examples\n --------\n 1. Sampling the field along the line.\n\n >>> import discretisedfield as df\n ...\n >>> p1 = (0, 0, 0)\n >>> p2 = (2, 2, 2)\n >>> cell = (1, 1, 1)\n >>> mesh = df.Mesh(p1=p1, p2=p2, cell=cell)\n >>> field = df.Field(mesh, dim=2, value=(0, 3))\n >>> for coord, value in field.line(p1=(0, 0, 0), p2=(2, 0, 0), n=3):\n ... print(coord, value)\n (0.0, 0.0, 0.0) (0.0, 3.0)\n (1.0, 0.0, 0.0) (0.0, 3.0)\n (2.0, 0.0, 0.0) (0.0, 3.0)\n\n \"\"\"\n for point in self.mesh.line(p1=p1, p2=p2, n=n):\n yield point, self.__call__(point)\n\n def plane(self, *args, n=None, **kwargs):\n \"\"\"Slices the field with a plane.\n\n If one of the axes (`'x'`, `'y'`, or `'z'`) is passed as a\n string, a plane perpendicular to that axis is generated which\n intersects the field at its centre. Alternatively, if a keyword\n argument is passed (e.g. `x=1`), a plane perpendicular to the\n x-axis and intersecting it at x=1 is generated. The number of\n points in two dimensions on the plane can be defined using `n`\n (e.g. `n=(10, 15)`). Using the generated plane, a new\n \"two-dimensional\" field is created and returned.\n\n Parameters\n ----------\n n : tuple of length 2\n The number of points on the plane in two dimensions\n\n Returns\n ------\n discretisedfield.Field\n A field obtained as an intersection of mesh and the plane.\n\n Example\n -------\n 1. Intersecting the field with a plane.\n\n >>> import discretisedfield as df\n ...\n >>> p1 = (0, 0, 0)\n >>> p2 = (2, 2, 2)\n >>> cell = (1, 1, 1)\n >>> mesh = df.Mesh(p1=p1, p2=p2, cell=cell)\n >>> field = df.Field(mesh, dim=3)\n >>> field.plane(y=1)\n Field(mesh=...)\n\n \"\"\"\n plane_mesh = self.mesh.plane(*args, n=n, **kwargs)\n return self.__class__(plane_mesh, dim=self.dim, value=self)\n\n def write(self, filename, representation='txt', extend_scalar=False):\n \"\"\"Write the field in .ovf, .omf, .ohf, or vtk format.\n\n If the extension of the `filename` is `.vtk`, a VTK file is\n written\n (:py:func:`~discretisedfield.Field._writevtk`). Otherwise, for\n `.ovf`, `.omf`, or `.ohf` extensions, an OOMMF file is written\n (:py:func:`~discretisedfield.Field._writeovf`). The\n representation (`bin4`, 'bin8', or 'txt') is passed using\n `representation` argument.\n\n Parameters\n ----------\n filename : str\n Name of the file written. It depends on its extension the\n format it is going to be written as.\n representation : str\n In the case of OOMMF files (`.ovf`, `.omf`, or `.ohf`),\n representation can be specified (`bin4`, `bin8`, or\n `txt`). Defaults to 'txt'.\n extend_scalar : bool\n If True, a scalar field will be saved as a vector\n field. More precisely, if the value at a cell is 3, that\n cell will be saved as (3, 0, 0). This is valid only for\n the OVF file formats.\n\n Example\n -------\n 1. Write an .omf file and delete it from the disk\n\n >>> import os\n >>> import discretisedfield as df\n ...\n >>> p1 = (0, 0, -5)\n >>> p2 = (5, 15, 15)\n >>> n = (5, 15, 20)\n >>> mesh = df.Mesh(p1=p1, p2=p2, n=n)\n >>> field = df.Field(mesh, value=(5, 6, 7))\n >>> filename = 'mytestfile.omf'\n >>> field.write(filename) # write the file\n >>> os.remove(filename) # delete the file\n\n .. seealso:: :py:func:`~discretisedfield.Field.fromfile`\n\n \"\"\"\n if any([filename.endswith(ext) for ext in ['.omf', '.ovf', '.ohf']]):\n self._writeovf(filename, representation=representation,\n extend_scalar=extend_scalar)\n elif filename.endswith('.vtk'):\n self._writevtk(filename)\n else:\n msg = ('Allowed extensions for writing the field are '\n '.omf, .ovf, .ohf, and .vtk.')\n raise ValueError(msg)\n\n def _writeovf(self, filename, representation='txt', extend_scalar=False):\n \"\"\"Write the field in .ovf, .omf, or .ohf format.\n\n The extension of the `filename` should be `.ovf`, `.omf`, or\n `.ohf`. The representation (`bin4`, 'bin8', or 'txt') is\n passed using `representation` argument.\n\n Parameters\n ----------\n filename : str\n Name of the file written.\n representation : str\n Representation of the file (`bin4`, `bin8`, or\n `txt`). Defaults to 'txt'.\n extend_scalar : bool\n If True, a scalar field will be saved as a vector\n field. More precisely, if the value at a cell is 3, that\n cell will be saved as (3, 0, 0).\n\n Example\n -------\n 1. Write an .omf file and delete it from the disk\n\n >>> import os\n >>> import discretisedfield as df\n ...\n >>> p1 = (0, 0, -5)\n >>> p2 = (5, 15, 15)\n >>> n = (5, 15, 20)\n >>> mesh = df.Mesh(p1=p1, p2=p2, n=n)\n >>> field = df.Field(mesh, value=(5, 6, 7))\n >>> filename = 'mytestfile.omf'\n >>> field._writeovf(filename) # write the file\n >>> os.remove(filename) # delete the file\n\n \"\"\"\n if extend_scalar and self.dim == 1:\n write_dim = 3\n else:\n write_dim = self.dim\n header = ['OOMMF OVF 2.0',\n '',\n 'Segment count: 1',\n '',\n 'Begin: Segment',\n 'Begin: Header',\n '',\n 'Title: Field generated omf file',\n 'Desc: File generated by Field class',\n 'meshunit: m',\n 'meshtype: rectangular',\n f'xbase: {self.mesh.pmin[0] + self.mesh.cell[0]/2}',\n f'ybase: {self.mesh.pmin[1] + self.mesh.cell[1]/2}',\n f'zbase: {self.mesh.pmin[2] + self.mesh.cell[2]/2}',\n f'xnodes: {self.mesh.n[0]}',\n f'ynodes: {self.mesh.n[1]}',\n f'znodes: {self.mesh.n[2]}',\n f'xstepsize: {self.mesh.cell[0]}',\n f'ystepsize: {self.mesh.cell[1]}',\n f'zstepsize: {self.mesh.cell[2]}',\n f'xmin: {self.mesh.pmin[0]}',\n f'ymin: {self.mesh.pmin[1]}',\n f'zmin: {self.mesh.pmin[2]}',\n f'xmax: {self.mesh.pmax[0]}',\n f'ymax: {self.mesh.pmax[1]}',\n f'zmax: {self.mesh.pmax[2]}',\n f'valuedim: {write_dim}',\n f'valuelabels: {self.name}_x {self.name}_y {self.name}_z',\n 'valueunits: A/m A/m A/m',\n '',\n 'End: Header',\n '']\n\n if representation == 'bin4':\n header.append('Begin: Data Binary 4')\n footer = ['End: Data Binary 4',\n 'End: Segment']\n elif representation == 'bin8':\n header.append('Begin: Data Binary 8')\n footer = ['End: Data Binary 8',\n 'End: Segment']\n elif representation == 'txt':\n header.append('Begin: Data Text')\n footer = ['End: Data Text',\n 'End: Segment']\n\n # Write header lines to the ovf file.\n f = open(filename, 'w')\n f.write(''.join(map(lambda line: f'# {line}\\n', header)))\n f.close()\n\n binary_reps = {'bin4': (1234567.0, 'f'),\n 'bin8': (123456789012345.0, 'd')}\n\n if representation in binary_reps:\n # Reopen the file with binary write, appending to the end\n # of the file.\n f = open(filename, 'ab')\n\n # Add the 8 bit binary check value that OOMMF uses.\n packarray = [binary_reps[representation][0]]\n\n # Write data to the ovf file.\n for i in self.mesh.indices:\n for vi in self.array[i]:\n packarray.append(vi)\n\n v_bin = struct.pack(binary_reps[representation][1]*len(packarray),\n *packarray)\n f.write(v_bin)\n f.close()\n\n else:\n # Reopen the file for txt representation, appending to the\n # file.\n f = open(filename, 'a')\n for i in self.mesh.indices:\n if self.dim == 3:\n v = [vi for vi in self.array[i]]\n elif self.dim == 1:\n if extend_scalar:\n v = [self.array[i][0], 0.0, 0.0]\n else:\n v = [self.array[i][0]]\n else:\n msg = (f'Cannot write dim={self.dim} field.')\n raise TypeError(msg)\n for vi in v:\n f.write(' ' + str(vi))\n f.write('\\n')\n f.close()\n\n # Write footer lines to OOMMF file.\n f = open(filename, 'a')\n f.write(''.join(map(lambda line: f'# {line}\\n', footer)))\n f.close()\n\n def _writevtk(self, filename):\n \"\"\"Write the field in the VTK format.\n\n The extension of the `filename` should be `.vtk`.\n\n Parameters\n ----------\n filename : str\n Name of the file written.\n\n Example\n -------\n 1. Write a .vtk file and delete it from the disk\n\n >>> import os\n >>> import discretisedfield as df\n ...\n >>> p1 = (0, 0, -5)\n >>> p2 = (5, 15, 15)\n >>> n = (5, 15, 20)\n >>> mesh = df.Mesh(p1=p1, p2=p2, n=n)\n >>> field = df.Field(mesh, value=(5, 6, 7))\n >>> filename = 'mytestfile.vtk'\n >>> field._writevtk(filename) # write the file\n >>> os.remove(filename) # delete the file\n\n \"\"\"\n grid = [pmini + np.linspace(0, li, ni+1) for pmini, li, ni in\n zip(self.mesh.pmin, self.mesh.l, self.mesh.n)]\n\n structure = pyvtk.RectilinearGrid(*grid)\n vtkdata = pyvtk.VtkData(structure)\n\n vectors = [self.__call__(coord) for coord in self.mesh.coordinates]\n vtkdata.cell_data.append(pyvtk.Vectors(vectors, self.name))\n for i, component in enumerate(dfu.axesdict.keys()):\n name = f'{self.name}_{component}'\n vtkdata.cell_data.append(pyvtk.Scalars(list(zip(*vectors))[i],\n name))\n\n vtkdata.tofile(filename)\n\n @classmethod\n def fromfile(cls, filename, norm=None, name='field'):\n \"\"\"Read the field from .ovf, .omf, or .ohf file.\n\n The extension of the `filename` should be `.ovf`, `.omf`, or\n `.ohf`. If the field should be normalised, `norm` argument can\n be passed. The `name` of the field defaults to `'field'`. This\n is a `classmethod` and should be called as\n `discretisedfield.Field.fromfile('myfile.omf')`.\n\n Parameters\n ----------\n filename : str\n Name of the file to be read.\n norm : numbers.Real, numpy.ndarray, callable\n For details, refer to :py:func:`~discretisedfield.Field.value`.\n name : str\n Name of the field read.\n\n Returns\n -------\n discretisedfield.Field\n\n Example\n -------\n 1. Read a field from the .ovf file\n\n >>> import os\n >>> import discretisedfield as df\n ...\n >>> ovffile = os.path.join(os.path.dirname(__file__),\n ... 'tests', 'test_sample',\n ... 'mumax-output-linux.ovf')\n >>> field = df.Field.fromfile(ovffile)\n >>> field\n Field(mesh=...)\n\n .. seealso:: :py:func:`~discretisedfield.Field.write`\n\n \"\"\"\n mdatalist = ['xmin', 'ymin', 'zmin', 'xmax', 'ymax', 'zmax',\n 'xstepsize', 'ystepsize', 'zstepsize', 'valuedim']\n mdatadict = dict()\n\n try:\n with open(filename, 'r', encoding='utf-8') as ovffile:\n f = ovffile.read()\n lines = f.split('\\n')\n\n mdatalines = filter(lambda s: s.startswith('#'), lines)\n datalines = np.loadtxt(filter(lambda s: not s.startswith('#'),\n lines))\n\n for line in mdatalines:\n for mdatum in mdatalist:\n if mdatum in line:\n mdatadict[mdatum] = float(line.split()[-1])\n break\n\n except UnicodeDecodeError:\n with open(filename, 'rb') as ovffile:\n f = ovffile.read()\n lines = f.split(b'\\n')\n\n mdatalines = filter(lambda s: s.startswith(bytes('#', 'utf-8')),\n lines)\n\n for line in mdatalines:\n for mdatum in mdatalist:\n if bytes(mdatum, 'utf-8') in line:\n mdatadict[mdatum] = float(line.split()[-1])\n break\n\n header = b'# Begin: Data Binary '\n data_start = f.find(header)\n header = f[data_start:data_start + len(header) + 1]\n\n data_start += len(b'# Begin: Data Binary 8')\n data_end = f.find(b'# End: Data Binary ')\n\n # ordered by length\n newlines = [b'\\n\\r', b'\\r\\n', b'\\n']\n for nl in newlines:\n if f.startswith(nl, data_start):\n data_start += len(nl)\n break\n\n if b'4' in header:\n formatstr = '@f'\n checkvalue = 1234567.0\n elif b'8' in header:\n formatstr = '@d'\n checkvalue = 123456789012345.0\n\n listdata = list(struct.iter_unpack(formatstr,\n f[data_start:data_end]))\n datalines = np.array(listdata)\n\n if datalines[0] != checkvalue:\n # These two lines cannot be accessed via\n # tests. Therefore, they are excluded from coverage.\n msg = 'Binary Data cannot be read.' # pragma: no cover\n raise AssertionError(msg) # pragma: no cover\n\n datalines = datalines[1:] # check value removal\n\n p1 = (mdatadict[key] for key in ['xmin', 'ymin', 'zmin'])\n p2 = (mdatadict[key] for key in ['xmax', 'ymax', 'zmax'])\n cell = (mdatadict[key] for key in ['xstepsize', 'ystepsize',\n 'zstepsize'])\n dim = int(mdatadict['valuedim'])\n\n mesh = df.Mesh(p1=p1, p2=p2, cell=cell)\n\n field = df.Field(mesh, dim=dim, name=name)\n\n r_tuple = tuple(reversed(field.mesh.n)) + (int(mdatadict['valuedim']),)\n t_tuple = tuple(reversed(range(3))) + (3,)\n field.array = datalines.reshape(r_tuple).transpose(t_tuple)\n field.norm = norm # Normalise if norm is passed\n\n return field\n\n def mpl(self, figsize=None):\n \"\"\"Plots a field plane using matplotlib.\n\n Before the field can be plotted, it must be sliced with a\n plane (e.g. `field.plane(`z`)`). Otherwise, ValueError is\n raised. For vector fields, this method plots both `quiver`\n (vector) and `imshow` (scalar) plots. The `imshow` plot\n represents the value of the out-of-plane vector component and\n the `quiver` plot is not coloured. On the other hand, only\n `imshow` is plotted for scalar fields. Where the norm of the\n field is zero, no vectors are shown and those `imshow` pixels\n are not coloured. In order to use this function inside Jupyter\n notebook `%matplotlib inline` must be activated after\n `discretisedfield` is imported.\n\n Parameters\n ----------\n figsize : tuple, optional\n Length-2 tuple passed to the `matplotlib.figure` function.\n\n Raises\n ------\n ValueError\n If the field has not been sliced with a plane.\n\n Example\n -------\n >>> import discretisedfield as df\n ...\n >>> p1 = (0, 0, 0)\n >>> p2 = (100, 100, 100)\n >>> n = (10, 10, 10)\n >>> mesh = df.Mesh(p1=p1, p2=p2, n=n)\n >>> field = df.Field(mesh, dim=3, value=(1, 2, 0))\n >>> field.plane(z=50, n=(5, 5)).mpl()\n\n .. seealso:: :py:func:`~discretisedfield.Field.k3d_vectors`\n\n \"\"\"\n if not hasattr(self.mesh, 'info'):\n msg = ('Only sliced field can be plotted using mpl. '\n 'For instance, field.plane(\\'x\\').mpl().')\n raise ValueError(msg)\n\n fig = plt.figure(figsize=figsize)\n ax = fig.add_subplot(111)\n\n planeaxis = dfu.raxesdict[self.mesh.info['planeaxis']]\n\n if self.dim > 1:\n # Vector field has both quiver and imshow plots.\n self.quiver(ax=ax, headwidth=5)\n scfield = getattr(self, planeaxis)\n coloredplot = scfield.imshow(ax=ax, norm_field=self.norm)\n else:\n # Scalar field has only imshow.\n scfield = self\n coloredplot = scfield.imshow(ax=ax, norm_field=None)\n\n # Add colorbar to imshow plot.\n cbar = self.colorbar(ax, coloredplot)\n\n # Add labels.\n ax.set_xlabel(dfu.raxesdict[self.mesh.info['axis1']])\n ax.set_ylabel(dfu.raxesdict[self.mesh.info['axis2']])\n if self.dim > 1:\n cbar.ax.set_ylabel(planeaxis + ' component')\n\n def imshow(self, ax, norm_field=None, **kwargs):\n \"\"\"Plots a scalar field plane using `matplotlib.pyplot.imshow`.\n\n Before the field can be plotted, it must be sliced with a\n plane (e.g. `field.plane(`y`)`) and field must be of dimension\n 1 (scalar field). Otherwise, ValueError is raised. `imshow`\n adds the plot to `matplotlib.axes.Axes` passed via `ax`\n argument. If the scalar field plotted is extracted from a\n vector field, which has coordinates where the norm of the\n field is zero, the norm of that vector field can be passed\n using `norm_field` argument, so that pixels at those\n coordinates are not coloured. All other parameters accepted by\n `matplotlib.pyplot.imshow` can be passed. In order to use this\n function inside Jupyter notebook `%matplotlib inline` must be\n activated after `discretisedfield` is imported.\n\n Parameters\n ----------\n ax : matplotlib.axes.Axes\n Axes object to which the scalar plot will be added.\n norm_field : discretisedfield.Field, optional\n A (scalar) norm field used for determining whether certain\n pixels should be coloured.\n\n Returns\n -------\n matplotlib.image.AxesImage object\n\n Raises\n ------\n ValueError\n If the field has not been sliced with a plane or its\n dimension is not 1.\n\n Example\n -------\n >>> import discretisedfield as df\n ...\n >>> p1 = (0, 0, 0)\n >>> p2 = (100, 100, 100)\n >>> n = (10, 10, 10)\n >>> mesh = df.Mesh(p1=p1, p2=p2, n=n)\n >>> field = df.Field(mesh, dim=1, value=2)\n >>> fig = plt.figure()\n >>> ax = fig.add_subplot(111)\n >>> field.plane('y').imshow(ax=ax)\n <matplotlib.image.AxesImage object at ...>\n\n .. seealso:: :py:func:`~discretisedfield.Field.quiver`\n\n \"\"\"\n if not hasattr(self.mesh, 'info'):\n msg = ('Only sliced field can be plotted using imshow. '\n 'For instance, field.plane(\\'x\\').imshow(ax=ax).')\n raise ValueError(msg)\n if self.dim > 1:\n msg = ('Only scalar (dim=1) fields can be plotted. Consider '\n 'plotting one component, e.g. field.x.imshow(ax=ax) '\n 'or norm field.norm.imshow(ax=ax).')\n raise ValueError(msg)\n\n points, values = list(zip(*list(self)))\n\n # If norm_field is passed, set values where norm=0 to np.nan,\n # so that they are not plotted.\n if norm_field is not None:\n values = list(values) # make values mutable\n for i, point in enumerate(points):\n if norm_field(point) == 0:\n values[i] = np.nan\n\n # \"Unpack\" values inside arrays.\n values = [v[0] if not np.isnan(v) else v for v in values]\n else:\n # \"Unpack\" values inside arrays.\n values = list(zip(*values))\n\n points = list(zip(*points))\n\n extent = [self.mesh.pmin[self.mesh.info['axis1']],\n self.mesh.pmax[self.mesh.info['axis1']],\n self.mesh.pmin[self.mesh.info['axis2']],\n self.mesh.pmax[self.mesh.info['axis2']]]\n n = (self.mesh.n[self.mesh.info['axis2']],\n self.mesh.n[self.mesh.info['axis1']])\n\n imax = ax.imshow(np.array(values).reshape(n), origin='lower',\n extent=extent, **kwargs)\n\n return imax\n\n def quiver(self, ax=None, color_field=None, **kwargs):\n \"\"\"Plots a vector field plane using `matplotlib.pyplot.quiver`.\n\n Before the field can be plotted, it must be sliced with a\n plane (e.g. `field.plane(`y`)`) and field must be of dimension\n 3 (vector field). Otherwise, ValueError is raised. `quiver`\n adds the plot to `matplotlib.axes.Axes` passed via `ax`\n argument. If there are coordinates where the norm of the field\n is zero, vectors are not plotted at those coordinates. By\n default, plot is not coloured, but by passing a\n `discretisedfield.Field` object of dimension 1 as\n `color_field`, quiver plot will be coloured based on the\n values from the field. All other parameters accepted by\n `matplotlib.pyplot.quiver` can be passed. In order to use this\n function inside Jupyter notebook `%matplotlib inline` must be\n activated after `discretisedfield` is imported.\n\n Parameters\n ----------\n ax : matplotlib.axes.Axes\n Axes object to which the quiver plot will be added.\n color_field : discretisedfield.Field, optional\n A (scalar) field used for determining the colour of the\n quiver plot.\n\n Returns\n -------\n matplotlib.quiver.Quiver object\n\n Raises\n ------\n ValueError\n If the field has not been sliced with a plane or its\n dimension is not 3.\n\n Example\n -------\n >>> import discretisedfield as df\n ...\n >>> p1 = (0, 0, 0)\n >>> p2 = (100, 100, 100)\n >>> n = (10, 10, 10)\n >>> mesh = df.Mesh(p1=p1, p2=p2, n=n)\n >>> field = df.Field(mesh, dim=3, value=(1, 2, 0))\n >>> fig = plt.figure()\n >>> ax = fig.add_subplot(111)\n >>> field.plane(z=50).quiver(ax=ax, color_field=field.z)\n <matplotlib.quiver.Quiver object at ...>\n\n .. seealso:: :py:func:`~discretisedfield.Field.imshow`\n\n \"\"\"\n if not hasattr(self.mesh, 'info'):\n msg = ('Only sliced field can be plotted using quiver. '\n 'For instance, field.plane(\\'x\\').quiver(ax=ax).')\n raise ValueError(msg)\n if self.dim != 3:\n msg = 'Only three-dimensional (dim=3) fields can be plotted.'\n raise ValueError(msg)\n\n points, values = list(zip(*list(self)))\n\n # Remove values where norm is 0\n points, values = list(points), list(values) # make them mutable\n points = [p for p, v in zip(points, values)\n if not np.equal(v, 0).all()]\n values = [v for v in values if not np.equal(v, 0).all()]\n if color_field is not None:\n colors = [color_field(p) for p in points]\n colors = list(zip(*colors))\n\n # \"Unpack\" values inside arrays.\n points, values = list(zip(*points)), list(zip(*values))\n\n # Are there any vectors pointing out-of-plane? If yes, set the scale.\n if not any(values[self.mesh.info['axis1']] +\n values[self.mesh.info['axis2']]):\n kwargs['scale'] = 1\n\n kwargs['pivot'] = 'mid' # arrow at the centre of the cell\n\n if color_field is None:\n # quiver plot is not coloured.\n qvax = ax.quiver(points[self.mesh.info['axis1']],\n points[self.mesh.info['axis2']],\n values[self.mesh.info['axis1']],\n values[self.mesh.info['axis2']],\n **kwargs)\n\n else:\n # quiver plot is coloured.\n qvax = ax.quiver(points[self.mesh.info['axis1']],\n points[self.mesh.info['axis2']],\n values[self.mesh.info['axis1']],\n values[self.mesh.info['axis2']],\n colors,\n **kwargs)\n\n return qvax\n\n def colorbar(self, ax, coloredplot, cax=None, **kwargs):\n \"\"\"Adds a colorbar to the axes using `matplotlib.pyplot.colorbar`.\n\n Axes to which the colorbar should be added is passed via `ax`\n argument. If the colorbar axes are made before the method is\n called, they should be passed as `cax`. The plot to which the\n colorbar should correspond to is passed via `coloredplot`. All\n other parameters accepted by `matplotlib.pyplot.colorbar` can\n be passed. In order to use this function inside Jupyter\n notebook `%matplotlib inline` must be activated after\n `discretisedfield` is imported.\n\n Parameters\n ----------\n ax : matplotlib.axes.Axes\n Axes object to which the colorbar will be added.\n coloredplot : matplotlib.quiver.Quiver, matplotlib.image.AxesImage\n A plot to which the colorbar should correspond\n cax : matplotlib.axes.Axes, optional\n Colorbar axes.\n\n Returns\n -------\n matplotlib.colorbar.Colorbar\n\n Example\n -------\n >>> import discretisedfield as df\n ...\n >>> p1 = (0, 0, 0)\n >>> p2 = (100, 100, 100)\n >>> n = (10, 10, 10)\n >>> mesh = df.Mesh(p1=p1, p2=p2, n=n)\n >>> field = df.Field(mesh, dim=3, value=(1, 2, 0))\n >>> fig = plt.figure()\n >>> ax = fig.add_subplot(111)\n >>> coloredplot = field.plane(z=50).quiver(ax=ax, color_field=field.z)\n >>> field.colorbar(ax=ax, coloredplot=coloredplot)\n <matplotlib.colorbar.Colorbar object at ...>\n\n \"\"\"\n if cax is None:\n divider = mpl_toolkits.axes_grid1.make_axes_locatable(ax)\n cax = divider.append_axes('right', size='5%', pad=0.1)\n\n cbar = plt.colorbar(coloredplot, cax=cax, **kwargs)\n\n return cbar\n\n def k3d_nonzero(self, color=dfu.colormap[0], plot=None, **kwargs):\n \"\"\"Plots the voxels where the value of a scalar field is nonzero.\n\n All mesh cells where the value of the field is not zero will\n be marked using the same color. Only scalar fields can be\n plotted. Otherwise, ValueError is raised. Different colour of\n voxels can be passed in the RGB format using `color`\n parameter. This function is often used to look at the defined\n sample in the finite difference mesh, by inspecting its norm\n (`field.norm.k3d_nonzero`). If `plot` is passed as a\n `k3d.plot.Plot`, plot is added to it. Otherwise, a new k3d\n plot is created. All arguments allowed in `k3d.voxels()` can\n be passed. This function is to be called in Jupyter notebook.\n\n Parameters\n ----------\n color : int/hex, optional\n Voxel color in hexadecimal format.\n plot : k3d.plot.Plot, optional\n If this argument is passed, plot is added to\n it. Otherwise, a new k3d plot is created.\n\n Example\n -------\n >>> import discretisedfield as df\n ...\n >>> p1 = (-50, -50, -50)\n >>> p2 = (50, 50, 50)\n >>> n = (10, 10, 10)\n >>> mesh = df.Mesh(p1=p1, p2=p2, n=n)\n >>> field = df.Field(mesh, dim=3, value=(1, 2, 0))\n >>> def normfun(pos):\n ... x, y, z = pos\n ... if x**2 + y**2 < 30**2:\n ... return 1\n ... else:\n ... return 0\n >>> field.norm = normfun\n >>> field.norm.k3d_nonzero()\n Plot(...)\n\n .. seealso:: :py:func:`~discretisedfield.Field.k3d_voxels`\n \"\"\"\n if self.dim > 1:\n msg = ('Only scalar (dim=1) fields can be plotted. Consider '\n 'plotting one component, e.g. field.x.k3d_nonzero() '\n 'or norm field.norm.k3d_nonzero().')\n raise ValueError(msg)\n plot_array = np.copy(self.array) # make a deep copy\n plot_array = np.squeeze(plot_array) # remove an empty dimension\n plot_array = np.swapaxes(plot_array, 0, 2) # k3d: arrays are (z, y, x)\n plot_array[plot_array != 0] = 1 # all cells have the same colour\n\n # In the case of nano-sized samples, fix the order of\n # magnitude of the plot extent to avoid freezing the k3d plot.\n if np.any(np.divide(self.mesh.cell, 1e-9) < 1e3):\n pmin = np.divide(self.mesh.pmin, 1e-9)\n pmax = np.divide(self.mesh.pmax, 1e-9)\n else:\n pmin = self.mesh.pmin\n pmax = self.mesh.pmax\n\n dfu.voxels(plot_array, pmin, pmax, colormap=color,\n plot=plot, **kwargs)\n\n def k3d_voxels(self, norm_field=None, plot=None, **kwargs):\n \"\"\"Plots the scalar field as a coloured `k3d.voxels()` plot.\n\n At all mesh cells, a voxel will be plotted anc coloured\n according to its value. If the scalar field plotted is\n extracted from a vector field, which has coordinates where the\n norm of the field is zero, the norm of that vector field can\n be passed using `norm_field` argument, so that voxels at those\n coordinates are not showed. Only scalar fields can be\n plotted. Otherwise, ValueError is raised. If `plot` is passed\n as a `k3d.plot.Plot`, plot is added to it. Otherwise, a new\n k3d plot is created. All arguments allowed in `k3d.voxels()`\n can be passed. This function is to be called in Jupyter\n notebook.\n\n Parameters\n ----------\n norm_field : discretisedfield.Field, optional\n A (scalar) norm field used for determining whether certain\n voxels should be plotted.\n plot : k3d.plot.Plot, optional\n If this argument is passed, plot is added to\n it. Otherwise, a new k3d plot is created.\n\n Example\n -------\n >>> import discretisedfield as df\n ...\n >>> p1 = (-50, -50, -50)\n >>> p2 = (50, 50, 50)\n >>> n = (10, 10, 10)\n >>> mesh = df.Mesh(p1=p1, p2=p2, n=n)\n >>> field = df.Field(mesh, dim=3, value=(1, 2, 0))\n >>> def normfun(pos):\n ... x, y, z = pos\n ... if x**2 + y**2 < 30**2:\n ... return 1\n ... else:\n ... return 0\n >>> field.norm = normfun\n >>> field.x.k3d_voxels(norm_field=field.norm)\n Plot(...)\n\n .. seealso:: :py:func:`~discretisedfield.Field.k3d_vectors`\n\n \"\"\"\n if self.dim > 1:\n msg = ('Only scalar (dim=1) fields can be plotted. Consider '\n 'plotting one component, e.g. field.x.k3d_nonzero() '\n 'or norm field.norm.k3d_nonzero().')\n raise ValueError(msg)\n\n plot_array = np.copy(self.array) # make a deep copy\n plot_array = plot_array[..., 0] # remove an empty dimension\n\n plot_array -= plot_array.min()\n # In the case of uniform fields, division by zero can be\n # encountered.\n if plot_array.max() != 0:\n plot_array /= plot_array.max()\n plot_array *= 254\n plot_array += 1\n plot_array = plot_array.round()\n plot_array = plot_array.astype(int)\n\n if norm_field is not None:\n for index in self.mesh.indices:\n if norm_field(self.mesh.index2point(index)) == 0:\n plot_array[index] = 0\n\n plot_array = np.swapaxes(plot_array, 0, 2) # k3d: arrays are (z, y, x)\n\n cmap = matplotlib.cm.get_cmap('viridis', 256)\n colormap = [dfu.num2hexcolor(i, cmap) for i in range(cmap.N)]\n\n # In the case of nano-sized samples, fix the order of\n # magnitude of the plot extent to avoid freezing the k3d plot.\n if np.any(np.divide(self.mesh.cell, 1e-9) < 1e3):\n pmin = np.divide(self.mesh.pmin, 1e-9)\n pmax = np.divide(self.mesh.pmax, 1e-9)\n else:\n pmin = self.mesh.pmin\n pmax = self.mesh.pmax\n\n dfu.voxels(plot_array, pmin, pmax, colormap=colormap,\n plot=plot, **kwargs)\n\n def k3d_vectors(self, color_field=None, points=True, plot=None, **kwargs):\n \"\"\"Plots the vector field as a `k3d.vectors()` plot.\n\n At all mesh cells, a vector will be plotted if its norm is not\n zero. Vectors can be coloured according to the values of the\n scalar field passed as `color_field`. Only vector fields can\n be plotted. Otherwise, ValueError is raised. Points at the\n discretisation cell centres can be added by setting\n `points=True`. If `plot` is passed as a `k3d.plot.Plot`, plot\n is added to it. Otherwise, a new k3d plot is created. All\n arguments allowed in `k3d.vectors()` can be passed. This\n function is to be called in Jupyter notebook.\n\n Parameters\n ----------\n color_field : discretisedfield.Field, optional\n A (scalar) field used for determining the colours of\n vectors.\n points : bool, optional\n If `True`, points will be added to the discretisation cell\n centres.\n plot : k3d.plot.Plot, optional\n If this argument is passed, plot is added to\n it. Otherwise, a new k3d plot is created.\n\n Example\n -------\n 1. Plotting an entire vector field.\n\n >>> import discretisedfield as df\n ...\n >>> p1 = (-50, -50, -50)\n >>> p2 = (50, 50, 50)\n >>> n = (10, 10, 10)\n >>> mesh = df.Mesh(p1=p1, p2=p2, n=n)\n >>> field = df.Field(mesh, dim=3, value=(1, 2, 0))\n >>> field.k3d_vectors(color_field=field.x)\n Plot(...)\n\n 2. Plotting the slice of a vector field.\n\n >>> import discretisedfield as df\n ...\n >>> p1 = (-50, -50, -50)\n >>> p2 = (50, 50, 50)\n >>> n = (10, 10, 10)\n >>> mesh = df.Mesh(p1=p1, p2=p2, n=n)\n >>> field = df.Field(mesh, dim=3, value=(1, 2, 0))\n >>> field.plane('x').k3d_vectors(color_field=field.x)\n Plot(...)\n\n .. seealso:: :py:func:`~discretisedfield.Field.k3d_voxels`\n\n \"\"\"\n if self.dim != 3:\n msg = 'Only three-dimensional (dim=3) fields can be plotted.'\n raise ValueError(msg)\n\n coordinates, vectors, color_values = [], [], []\n norm = self.norm # assigned to be computed only once\n for coord, value in self:\n if norm(coord) > 0:\n coordinates.append(coord)\n vectors.append(value)\n if color_field is not None:\n color_values.append(color_field(coord)[0])\n\n coordinates, vectors = np.array(coordinates), np.array(vectors)\n\n # In the case of nano-sized samples, fix the order of\n # magnitude of the coordinates to avoid freezing the k3d plot.\n if np.any(np.divide(self.mesh.cell, 1e-9) < 1e3):\n coordinates /= 1e-9\n cell = np.divide(self.mesh.cell, 1e-9)\n else:\n cell = self.mesh.cell\n\n # Scale the vectors to correspond to the size of cells.\n vectors /= vectors.max()\n vectors *= 0.8*np.array(cell)\n\n # Middle of the arrow is at the cell centre.\n coordinates -= 0.5 * vectors\n\n if color_field is not None:\n color_values = np.array(color_values)\n color_values -= color_values.min()\n # In the case of uniform fields, division by zero can be\n # encountered.\n if color_values.max() != 0:\n color_values /= color_values.max()\n color_values *= 256\n color_values = color_values.round()\n color_values = color_values.astype(int)\n\n cmap = matplotlib.cm.get_cmap('viridis', 256)\n colors = []\n for c in color_values:\n color = dfu.num2hexcolor(c, cmap)\n colors.append((color, color))\n else:\n colors = []\n\n plot = dfu.vectors(coordinates, vectors, colors=colors,\n plot=plot, **kwargs)\n\n if points:\n dfu.points(coordinates + 0.5 * vectors, plot=plot)\n"
] | [
[
"numpy.divide",
"numpy.squeeze",
"matplotlib.pyplot.figure",
"numpy.swapaxes",
"numpy.equal",
"numpy.copy",
"matplotlib.cm.get_cmap",
"numpy.all",
"numpy.array_equal",
"numpy.array",
"matplotlib.pyplot.colorbar",
"numpy.linspace",
"numpy.linalg.norm",
"numpy.isnan"
]
] |
Fei-Wang/dl-pytorch | [
"a7672603e2de7824d0ff7e97b69dedad3fd9d476"
] | [
"test/test_models/test_palm.py"
] | [
"import torch\n\nfrom luffy.models.palm import *\n\n\ndef test_palm_tony():\n model = PaLMTony(num_tokens=20000)\n\n tokens = torch.randint(0, 20000, (1, 2048))\n feat = model(tokens)\n assert feat.shape == (1, 2048, 20000)\n"
] | [
[
"torch.randint"
]
] |
bestetc/batchflow | [
"d2a843640383fbe860654236881483f755227e06",
"d2a843640383fbe860654236881483f755227e06",
"d2a843640383fbe860654236881483f755227e06"
] | [
"batchflow/models/tf/nn/train.py",
"batchflow/models/metrics/loss.py",
"batchflow/batch_image.py"
] | [
"\"\"\" Helpers for training \"\"\"\nfrom math import pi\n\nimport tensorflow as tf\n\ndef piecewise_constant(global_step, *args, **kwargs):\n \"\"\" Constant learning rate decay (uses global_step param instead of x) \"\"\"\n return tf.train.piecewise_constant(global_step, *args, **kwargs)\n\ndef cyclic_learning_rate(learning_rate, global_step, max_lr, step_size=10,\n mode='tri', name='CyclicLearningRate'):\n \"\"\" This function varies the learning rate between the\n minimum (learning_rate) and the maximum (max_lr).\n It returns the decayed learning rate.\n\n Parameters\n ----------\n learning_rate : float or tf.Tensor\n The minimum learning rate boundary.\n global_step : int or tf.Tensor\n Global_step refers to the number of batches seen by the model.\n It is use for the cyclic computation. Must not be negative.\n max_lr : float or tf.Tensor\n The maximum learning rate boundary.\n step_size : int or tf.Tensor\n The number of iterations in half a cycle (the default is 10).\n mode : {'tri', 'sin', 'saw'}\n Set the learning rate change function.\n name : str\n Name of the operation (the default is 'CyclicLearningRate').\n\n Returns\n -------\n tf.Tensor\n\n Notes\n -----\n More detailed information about `mode`:\n\n If 'tri':\n Default, linearly increasing then linearly decreasing the\n learning rate at each cycle. Learning rate starting\n from (max_lr-learning_rate)/2 then decreasing to `learning_rate`.\n See `Leslie N. Smith, Cyclical Learning Rates for Training Neural Networks\n <https://arxiv.org/abs/1506.01186>`_ for more information.\n\n It is computed as::\n\n decayed_learning_rate = abs(mod((global_step + step_size / 4) / step_size, 1) - 0.5) *\n 2 * (max_lr - learning_rate) +\n learning_rate\n\n\n If 'sin':\n Learning rate changes as a sine wave, starting\n from (max_lr-learning_rate)/2 then decreasing to `learning_rate`.\n\n It is computed as::\n\n decayed_learning_rate = (learning_rate - max_lr) / 2 *\n sin(pi * global_step / step_size) +\n (max_lr + learning_rate) / 2\n\n\n If 'saw':\n Learning rate linearly increasing from `learning_rate` to `max_lr`\n and then sharply drops to `learning_rate` at each cycle.\n Learning rate starting from `learning_rate` then increasing.\n\n It is computed as::\n\n decayed_learning_rate = (max_lr - learning_rate) *\n (floor(global_step / step_size) - global_step / step_size) +\n learning_rate\n \"\"\"\n with tf.name_scope(name):\n learning_rate = tf.cast(learning_rate, dtype=tf.float32)\n global_step = tf.cast(global_step, dtype=tf.float32)\n step_size = tf.cast(step_size, dtype=tf.float32)\n max_lr = tf.cast(max_lr, dtype=tf.float32)\n\n if mode == 'tri':\n periodic_comp = tf.mod((global_step + step_size / 4) / step_size, 1)\n first_factor = tf.abs(periodic_comp - 0.5)\n second_factor = 2 * (max_lr - learning_rate)\n second_comp = learning_rate\n elif mode == 'sin':\n first_factor = (learning_rate - max_lr) / 2.\n second_factor = tf.sin((pi * global_step) / step_size)\n second_comp = (learning_rate + max_lr) / 2.\n elif mode == 'saw':\n first_factor = max_lr - learning_rate\n second_factor = tf.mod(global_step / step_size, 1)\n second_comp = learning_rate\n return first_factor * second_factor + second_comp\n",
"\"\"\" Loss as a Metrics to be used in research pipelines added with `run=True` \"\"\"\n\nimport numpy as np\n\nfrom .base import Metrics\n\n\nclass Loss(Metrics):\n \"\"\"\n This is a helper class to aggregate losses from pipelines\n that are used in Research objects with `run=True`,\n like test pipelines\n\n Parameters\n ----------\n loss : float\n loss value obtained from model\n \"\"\"\n\n def __init__(self, loss, batch_len):\n super().__init__()\n\n self.losses = [loss]\n self.batch_lengths = [batch_len]\n\n def agg_loss(args):\n losses, blens = args\n return np.sum(np.asarray(losses) * np.asarray(blens)) / np.sum(blens)\n\n self._agg_fn_dict.update(mean=agg_loss)\n\n def batchwise_loss(args):\n losses, _ = args\n return losses\n\n self._agg_fn_dict.update(batchwise=batchwise_loss)\n\n def append(self, metrics):\n \"\"\" Extend with data from another metrics. \"\"\"\n self.losses.extend(metrics.losses)\n self.batch_lengths.extend(metrics.batch_lengths)\n\n def loss(self):\n return self.losses, self.batch_lengths\n",
"\"\"\" Contains Batch classes for images \"\"\"\nimport os\nimport warnings\nfrom numbers import Number\n\nimport numpy as np\nimport PIL\nimport PIL.ImageOps\nimport PIL.ImageChops\nimport PIL.ImageFilter\nimport PIL.ImageEnhance\n\nfrom .batch import Batch\nfrom .decorators import action, apply_parallel, inbatch_parallel\nfrom .dsindex import FilesIndex\n\n\nclass BaseImagesBatch(Batch):\n \"\"\" Batch class for 2D images.\n\n Note, that if any class method is wrapped with `@apply_parallel` decorator\n than for inner calls (i.e. from other class methods) should be used version\n of desired method with underscores. (For example, if there is a decorated\n `method` than you need to call `_method_` from inside of `other_method`).\n Same is applicable for all child classes of :class:`batch.Batch`.\n \"\"\"\n components = \"images\", \"labels\", \"masks\"\n # Class-specific defaults for :meth:`.Batch.apply_parallel`\n apply_defaults = dict(target='for',\n post='_assemble',\n src='images',\n dst='images',\n )\n\n def _make_path(self, ix, src=None):\n \"\"\" Compose path.\n\n Parameters\n ----------\n ix : str\n element's index (filename)\n src : str\n Path to folder with images. Used if `self.index` is not `FilesIndex`.\n\n Returns\n -------\n path : str\n Full path to an element.\n \"\"\"\n\n if isinstance(src, FilesIndex):\n path = src.get_fullpath(ix)\n elif isinstance(self.index, FilesIndex):\n path = self.index.get_fullpath(ix)\n else:\n path = os.path.join(src, str(ix))\n return path\n\n def _load_image(self, ix, src=None, fmt=None, dst=\"images\"):\n \"\"\" Loads image.\n\n .. note:: Please note that ``dst`` must be ``str`` only, sequence is not allowed here.\n\n Parameters\n ----------\n src : str, dataset.FilesIndex, None\n path to the folder with an image. If src is None then it is determined from the index.\n dst : str\n Component to write images to.\n fmt : str\n Format of the an image\n\n Raises\n ------\n NotImplementedError\n If this method is not defined in a child class\n \"\"\"\n _ = self, ix, src, dst, fmt\n raise NotImplementedError(\"Must be implemented in a child class\")\n\n @action\n def load(self, *args, src=None, fmt=None, dst=None, **kwargs):\n \"\"\" Load data.\n\n .. note:: if `fmt='images'` than ``components`` must be a single component (str).\n .. note:: All parameters must be named only.\n\n Parameters\n ----------\n src : str, None\n Path to the folder with data. If src is None then path is determined from the index.\n fmt : {'image', 'blosc', 'csv', 'hdf5', 'feather'}\n Format of the file to download.\n dst : str, sequence\n components to download.\n \"\"\"\n if fmt == 'image':\n return self._load_image(src, fmt=fmt, dst=dst)\n return super().load(src=src, fmt=fmt, dst=dst, *args, **kwargs)\n\n\n def _dump_image(self, ix, src='images', dst=None, fmt=None):\n \"\"\" Saves image to dst.\n\n .. note:: Please note that ``src`` must be ``str`` only, sequence is not allowed here.\n\n Parameters\n ----------\n src : str\n Component to get images from.\n dst : str\n Folder where to dump. If dst is None then it is determined from index.\n\n Raises\n ------\n NotImplementedError\n If this method is not defined in a child class\n \"\"\"\n _ = self, ix, src, dst, fmt\n raise NotImplementedError(\"Must be implemented in a child class\")\n\n @action\n def dump(self, *args, dst=None, fmt=None, components=\"images\", **kwargs):\n \"\"\" Dump data.\n\n .. note:: If `fmt='images'` than ``dst`` must be a single component (str).\n\n .. note:: All parameters must be named only.\n\n Parameters\n ----------\n dst : str, None\n Path to the folder where to dump. If dst is None then path is determined from the index.\n fmt : {'image', 'blosc', 'csv', 'hdf5', 'feather'}\n Format of the file to save.\n components : str, sequence\n Components to save.\n ext: str\n Format to save images to.\n\n Returns\n -------\n self\n \"\"\"\n if fmt == 'image':\n return self._dump_image(components, dst, fmt=kwargs.pop('ext'))\n return super().dump(dst=dst, fmt=fmt, components=components, *args, **kwargs)\n\n\nclass ImagesBatch(BaseImagesBatch):\n \"\"\" Batch class for 2D images.\n\n Images are stored as numpy arrays of PIL.Image.\n\n PIL.Image has the following system of coordinates::\n\n X\n 0 -------------- >\n |\n |\n | images's pixels\n |\n |\n Y v\n\n Pixel's position is defined as (x, y)\n\n Note, that if any class method is wrapped with `@apply_parallel` decorator\n than for inner calls (i.e. from other class methods) should be used version\n of desired method with underscores. (For example, if there is a decorated\n `method` than you need to call `_method_` from inside of `other_method`).\n Same is applicable for all child classes of :class:`batch.Batch`.\n \"\"\"\n\n @classmethod\n def _get_image_shape(cls, image):\n if isinstance(image, PIL.Image.Image):\n return image.size\n return image.shape[:2]\n\n @property\n def image_shape(self):\n \"\"\": tuple - shape of the image\"\"\"\n _, shapes_count = np.unique([image.size for image in self.images], return_counts=True, axis=0)\n if len(shapes_count) == 1:\n if isinstance(self.images[0], PIL.Image.Image):\n return (*self.images[0].size, len(self.images[0].getbands()))\n return self.images[0].shape\n raise RuntimeError('Images have different shapes')\n\n @inbatch_parallel(init='indices', post='_assemble')\n def _load_image(self, ix, src=None, fmt=None, dst=\"images\"):\n \"\"\" Loads image\n\n .. note:: Please note that ``dst`` must be ``str`` only, sequence is not allowed here.\n\n Parameters\n ----------\n src : str, dataset.FilesIndex, None\n Path to the folder with an image. If src is None then it is determined from the index.\n dst : str\n Component to write images to.\n fmt : str\n Format of an image.\n \"\"\"\n return PIL.Image.open(self._make_path(ix, src))\n\n @inbatch_parallel(init='indices')\n def _dump_image(self, ix, src='images', dst=None, fmt=None):\n \"\"\" Saves image to dst.\n\n .. note:: Please note that ``src`` must be ``str`` only, sequence is not allowed here.\n\n Parameters\n ----------\n src : str\n Component to get images from.\n dst : str\n Folder where to dump.\n fmt : str\n Format of saved image.\n \"\"\"\n if dst is None:\n raise RuntimeError('You must specify `dst`')\n image = self.get(ix, src)\n ix = str(ix) + '.' + fmt if fmt is not None else str(ix)\n image.save(os.path.join(dst, ix))\n\n def _assemble_component(self, result, *args, component='images', **kwargs):\n \"\"\" Assemble one component after parallel execution.\n\n Parameters\n ----------\n result : sequence, array_like\n Results after inbatch_parallel.\n component : str\n component to assemble\n \"\"\"\n _ = args, kwargs\n if isinstance(result[0], PIL.Image.Image):\n setattr(self, component, np.asarray(result, dtype=object))\n else:\n try:\n setattr(self, component, np.stack(result))\n except ValueError:\n array_result = np.empty(len(result), dtype=object)\n array_result[:] = result\n setattr(self, component, array_result)\n\n @apply_parallel\n def to_pil(self, image, mode=None):\n \"\"\"converts images in Batch to PIL format\n\n Parameters\n ----------\n src : str\n Component to get images from. Default is 'images'.\n dst : str\n Component to write images to. Default is 'images'.\n \"\"\"\n if isinstance(image, PIL.Image.Image):\n return image\n\n if mode is None:\n if len(image.shape) == 2:\n mode = 'L'\n elif len(image.shape) == 3:\n if image.shape[-1] == 3:\n mode = 'RGB'\n elif image.shape[-1] == 1:\n mode = 'L'\n image = image[:, :, 0]\n elif image.shape[-1] == 2:\n mode = 'LA'\n elif image.shape[-1] == 4:\n mode = 'RGBA'\n else:\n raise ValueError('Unknown image type as image has', image.shape[-1], 'channels')\n elif mode == 'L' and len(image.shape) == 3:\n image = image[..., 0]\n return PIL.Image.fromarray(image, mode)\n\n def _calc_origin(self, image_shape, origin, background_shape):\n \"\"\" Calculate coordinate of the input image with respect to the background.\n\n Parameters\n ----------\n image_shape : sequence\n shape of the input image.\n origin : array_like, sequence, {'center', 'top_left', 'top_right', 'bottom_left', 'bottom_right', 'random'}\n Position of the input image with respect to the background. Can be one of:\n - 'center' - place the center of the input image on the center of the background and crop\n the input image accordingly.\n - 'top_left' - place the upper-left corner of the input image on the upper-left of the background\n and crop the input image accordingly.\n - 'top_right' - crop an image such that upper-right corners of\n an image and the cropping box coincide\n - 'bottom_left' - crop an image such that lower-left corners of\n an image and the cropping box coincide\n - 'bottom_right' - crop an image such that lower-right corners of\n an image and the cropping box coincide\n - 'random' - place the upper-left corner of the input image on the randomly sampled position\n in the background. Position is sampled uniformly such that there is no need for cropping.\n - other - sequence of ints or sequence of floats in [0, 1) interval;\n place the upper-left corner of the input image on the given position in the background.\n If `origin` is a sequence of floats in [0, 1), it defines a relative position of\n the origin in a valid region of image.\n\n background_shape : sequence\n shape of the background image.\n\n Returns\n -------\n sequence : calculated origin in the form (column, row)\n \"\"\"\n if isinstance(origin, str):\n if origin == 'top_left':\n origin = 0, 0\n elif origin == 'top_right':\n origin = (background_shape[0]-image_shape[0]+1, 0)\n elif origin == 'bottom_left':\n origin = (0, background_shape[1]-image_shape[1]+1)\n elif origin == 'bottom_right':\n origin = (background_shape[0]-image_shape[0]+1,\n background_shape[1]-image_shape[1]+1)\n elif origin == 'center':\n origin = np.maximum(0, np.asarray(background_shape) - image_shape) // 2\n elif origin == 'random':\n origin = (np.random.randint(background_shape[0]-image_shape[0]+1),\n np.random.randint(background_shape[1]-image_shape[1]+1))\n else:\n raise ValueError(\"If string, origin should be one of ['center', 'top_left', 'top_right', \"\n \"'bottom_left', 'bottom_right', 'random']. Got '{}'.\".format(origin))\n elif all(0 <= elem < 1 for elem in origin):\n region = ((background_shape[0]-image_shape[0]+1),\n (background_shape[1]-image_shape[1]+1))\n origin = np.asarray(origin) * region\n elif not all(isinstance(elem, int) for elem in origin):\n raise ValueError('If not a string, origin should be either a sequence of ints or sequence of '\n 'floats in [0, 1) interval. Got {}'.format(origin))\n\n return np.asarray(origin, dtype=np.int)\n\n @apply_parallel\n def scale(self, image, factor, preserve_shape=False, origin='center', resample=0):\n \"\"\" Scale the content of each image in the batch.\n\n Resulting shape is obtained as original_shape * factor.\n\n Parameters\n -----------\n factor : float, sequence\n resulting shape is obtained as original_shape * factor\n\n - float - scale all axes with the given factor\n - sequence (factor_1, factort_2, ...) - scale each axis with the given factor separately\n\n preserve_shape : bool\n whether to preserve the shape of the image after scaling\n\n origin : array-like, {'center', 'top_left', 'top_right', 'bottom_left', 'bottom_right', 'random'}\n Relevant only if `preserve_shape` is True.\n If `scale` < 1, defines position of the scaled image with respect to the original one's shape.\n If `scale` > 1, defines position of cropping box.\n\n Can be one of:\n\n - 'center' - place the center of the input image on the center of the background and crop\n the input image accordingly.\n - 'top_left' - place the upper-left corner of the input image on the upper-left of the background\n and crop the input image accordingly.\n - 'top_right' - crop an image such that upper-right corners of\n an image and the cropping box coincide\n - 'bottom_left' - crop an image such that lower-left corners of\n an image and the cropping box coincide\n - 'bottom_right' - crop an image such that lower-right corners of\n an image and the cropping box coincide\n - 'random' - place the upper-left corner of the input image on the randomly sampled position\n in the background. Position is sampled uniformly such that there is no need for cropping.\n - array_like - sequence of ints or sequence of floats in [0, 1) interval;\n place the upper-left corner of the input image on the given position in the background.\n If `origin` is a sequence of floats in [0, 1), it defines a relative position\n of the origin in a valid region of image.\n\n resample: int\n Parameter passed to PIL.Image.resize. Interpolation order\n src : str\n Component to get images from. Default is 'images'.\n dst : str\n Component to write images to. Default is 'images'.\n p : float\n Probability of applying the transform. Default is 1.\n\n Notes\n -----\n Using 'random' option for origin with `src` as list with multiple elements will not result in same crop for each\n element, as origin will be sampled independently for each `src` element.\n To randomly sample same origin for a number of components, use `R` named expression for `origin` argument.\n\n Returns\n -------\n self\n \"\"\"\n original_shape = self._get_image_shape(image)\n rescaled_shape = list(np.int32(np.ceil(np.asarray(original_shape)*factor)))\n rescaled_image = image.resize(rescaled_shape, resample=resample)\n if preserve_shape:\n rescaled_image = self._preserve_shape(original_shape, rescaled_image, origin)\n return rescaled_image\n\n @apply_parallel\n def crop(self, image, origin, shape, crop_boundaries=False):\n \"\"\" Crop an image.\n\n Extract image data from the window of the size given by `shape` and placed at `origin`.\n\n Parameters\n ----------\n origin : sequence, str\n Location of the cropping box. See :meth:`.ImagesBatch._calc_origin` for details.\n shape : sequence\n crop size in the form of (rows, columns)\n crop_boundaries : bool\n If `True` then crop is got only from image's area. Shape of the crop might diverge with the passed one\n src : str\n Component to get images from. Default is 'images'.\n dst : str\n Component to write images to. Default is 'images'.\n p : float\n Probability of applying the transform. Default is 1.\n\n Notes\n -----\n Using 'random' origin with `src` as list with multiple elements will not result in same crop for each\n element, as origin will be sampled independently for each `src` element.\n To randomly sample same origin for a number of components, use `R` named expression for `origin` argument.\n \"\"\"\n origin = self._calc_origin(shape, origin, image.size)\n right_bottom = origin + shape\n\n if crop_boundaries:\n out_of_boundaries = origin < 0\n origin[out_of_boundaries] = 0\n\n image_shape = np.asarray(image.size)\n out_of_boundaries = right_bottom > image_shape\n right_bottom[out_of_boundaries] = image_shape[out_of_boundaries]\n\n return image.crop((*origin, *right_bottom))\n\n @apply_parallel\n def put_on_background(self, image, background, origin, mask=None):\n \"\"\" Put an image on a background at given origin\n\n Parameters\n ----------\n background : PIL.Image, np.ndarray of np.uint8\n Blank background to put image on.\n origin : sequence, str\n Location of the cropping box. See :meth:`.ImagesBatch._calc_origin` for details.\n mask : None, PIL.Image, np.ndarray of np.uint8\n mask passed to PIL.Image.paste\n\n Notes\n -----\n Using 'random' origin with `src` as list with multiple elements will not result in same crop for each\n element, as origin will be sampled independently for each `src` element.\n To randomly sample same origin for a number of components, use `R` named expression for `origin` argument.\n \"\"\"\n if not isinstance(background, PIL.Image.Image):\n background = PIL.Image.fromarray(background)\n else:\n background = background.copy()\n\n if not isinstance(mask, PIL.Image.Image):\n mask = PIL.Image.fromarray(mask) if mask is not None else None\n\n origin = list(self._calc_origin(self._get_image_shape(image), origin,\n self._get_image_shape(background)))\n\n background.paste(image, origin, mask)\n\n return background\n\n def _preserve_shape(self, original_shape, transformed_image, origin='center'):\n \"\"\" Change the transformed image's shape by cropping and adding empty pixels to fit the shape of original image.\n\n Parameters\n ----------\n original_shape : sequence\n transformed_image : np.ndarray\n input_origin : array-like, {'center', 'top_left', 'random'}\n Position of the scaled image with respect to the original one's shape.\n - 'center' - place the center of the input image on the center of the background and crop\n the input image accordingly.\n - 'top_left' - place the upper-left corner of the input image on the upper-left of the background\n and crop the input image accordingly.\n - 'top_right' - crop an image such that upper-right corners of\n an image and the cropping box coincide\n - 'bottom_left' - crop an image such that lower-left corners of\n an image and the cropping box coincide\n - 'bottom_right' - crop an image such that lower-right corners of\n an image and the cropping box coincide\n - 'random' - place the upper-left corner of the input image on the randomly sampled position\n in the background. Position is sampled uniformly such that there is no need for cropping.\n - array_like - sequence of ints or sequence of floats in [0, 1) interval;\n place the upper-left corner of the input image on the given position in the background.\n If `origin` is a sequence of floats in [0, 1), it defines a relative position\n of the origin in a valid region of image.\n crop_origin: array-like, {'center', 'top_left', 'random'}\n Position of crop from transformed image.\n Has same values as `input_origin`.\n\n Returns\n -------\n np.ndarray : image after described actions\n \"\"\"\n transformed_shape = self._get_image_shape(transformed_image)\n if np.any(np.array(transformed_shape) < np.array(original_shape)):\n n_channels = len(transformed_image.getbands())\n if n_channels == 1:\n background = np.zeros(original_shape, dtype=np.uint8)\n else:\n background = np.zeros((*original_shape, n_channels), dtype=np.uint8)\n return self._put_on_background_(transformed_image, background, origin)\n return self._crop_(transformed_image, origin, original_shape, True)\n\n @apply_parallel\n def filter(self, image, mode, *args, **kwargs):\n \"\"\" Filters an image. Calls ``image.filter(getattr(PIL.ImageFilter, mode)(*args, **kwargs))``.\n\n For more details see `ImageFilter <http://pillow.readthedocs.io/en/stable/reference/ImageFilter.html>_`.\n\n Parameters\n ----------\n mode : str\n Name of the filter.\n src : str\n Component to get images from. Default is 'images'.\n dst : str\n Component to write images to. Default is 'images'.\n p : float\n Probability of applying the transform. Default is 1.\n \"\"\"\n return image.filter(getattr(PIL.ImageFilter, mode)(*args, **kwargs))\n\n @apply_parallel\n def transform(self, image, *args, **kwargs):\n \"\"\" Calls ``image.transform(*args, **kwargs)``.\n\n For more information see\n `<http://pillow.readthedocs.io/en/stable/reference/Image.html#PIL.Image.Image.transform>_`.\n\n Parameters\n ----------\n src : str\n Component to get images from. Default is 'images'.\n dst : str\n Component to write images to. Default is 'images'.\n p : float\n Probability of applying the transform. Default is 1.\n \"\"\"\n size = kwargs.pop('size', self._get_image_shape(image))\n return image.transform(*args, size=size, **kwargs)\n\n @apply_parallel\n def resize(self, image, size, *args, **kwargs):\n \"\"\" Calls ``image.resize(*args, **kwargs)``.\n\n For more details see `<https://pillow.readthedocs.io/en/stable/reference/Image.html#PIL.Image.Image.resize>_`.\n\n Parameters\n ----------\n size : tuple\n the resulting size of the image. If one of the components of tuple is None,\n corresponding dimension will be proportionally resized.\n src : str\n Component to get images from. Default is 'images'.\n dst : str\n Component to write images to. Default is 'images'.\n p : float\n Probability of applying the transform. Default is 1.\n \"\"\"\n if size[0] is None and size[1] is None:\n raise ValueError('At least one component of the parameter \"size\" must be a number.')\n if size[0] is None:\n new_size = (int(image.size[0] * size[1] / image.size[1]), size[1])\n elif size[1] is None:\n new_size = (size[0], int(image.size[1] * size[0] / image.size[0]))\n else:\n new_size = size\n\n return image.resize(new_size, *args, **kwargs)\n\n @apply_parallel\n def shift(self, image, offset, mode='const'):\n \"\"\" Shifts an image.\n\n Parameters\n ----------\n offset : (Number, Number)\n mode : {'const', 'wrap'}\n How to fill borders\n src : str\n Component to get images from. Default is 'images'.\n dst : str\n Component to write images to. Default is 'images'.\n p : float\n Probability of applying the transform. Default is 1.\n \"\"\"\n if mode == 'const':\n image = image.transform(size=image.size,\n method=PIL.Image.AFFINE,\n data=(1, 0, -offset[0], 0, 1, -offset[1]))\n elif mode == 'wrap':\n image = PIL.ImageChops.offset(image, *offset)\n else:\n raise ValueError(\"mode must be one of ['const', 'wrap']\")\n return image\n\n @apply_parallel\n def pad(self, image, *args, **kwargs):\n \"\"\" Calls ``PIL.ImageOps.expand``.\n\n For more details see `<http://pillow.readthedocs.io/en/stable/reference/ImageOps.html#PIL.ImageOps.expand>`_.\n\n Parameters\n ----------\n offset : sequence\n Size of the borders in pixels. The order is (left, top, right, bottom).\n mode : {'const', 'wrap'}\n Filling mode\n src : str\n Component to get images from. Default is 'images'.\n dst : str\n Component to write images to. Default is 'images'.\n p : float\n Probability of applying the transform. Default is 1.\n \"\"\"\n return PIL.ImageOps.expand(image, *args, **kwargs)\n\n @apply_parallel\n def rotate(self, image, *args, **kwargs):\n \"\"\" Rotates an image.\n\n kwargs are passed to PIL.Image.rotate\n\n Parameters\n ----------\n angle: Number\n In degrees counter clockwise.\n resample: int\n Interpolation order\n expand: bool\n Whether to expand the output to hold the whole image. Default is False.\n center: (Number, Number)\n Center of rotation. Default is the center of the image.\n src : str\n Component to get images from. Default is 'images'.\n dst : str\n Component to write images to. Default is 'images'.\n p : float\n Probability of applying the transform. Default is 1.\n \"\"\"\n return image.rotate(*args, **kwargs)\n\n @apply_parallel\n def flip(self, image, mode='lr'):\n \"\"\" Flips image.\n\n Parameters\n ----------\n mode : {'lr', 'ud'}\n\n - 'lr' - apply the left/right flip\n - 'ud' - apply the upside/down flip\n src : str\n Component to get images from. Default is 'images'.\n dst : str\n Component to write images to. Default is 'images'.\n p : float\n Probability of applying the transform. Default is 1.\n \"\"\"\n if mode == 'lr':\n return PIL.ImageOps.mirror(image)\n return PIL.ImageOps.flip(image)\n\n @apply_parallel\n def invert(self, image, channels='all'):\n \"\"\" Invert givn channels.\n\n Parameters\n ----------\n channels : int, sequence\n Indices of the channels to invert.\n src : str\n Component to get images from. Default is 'images'.\n dst : str\n Component to write images to. Default is 'images'.\n p : float\n Probability of applying the transform. Default is 1.\n \"\"\"\n if channels == 'all':\n image = PIL.ImageChops.invert(image)\n else:\n bands = list(image.split())\n channels = (channels,) if isinstance(channels, Number) else channels\n for channel in channels:\n bands[channel] = PIL.ImageChops.invert(bands[channel])\n image = PIL.Image.merge('RGB', bands)\n return image\n\n @apply_parallel\n def salt(self, image, p_noise=.015, color=255, size=(1, 1)):\n \"\"\" Set random pixel on image to givan value.\n\n Every pixel will be set to ``color`` value with probability ``p_noise``.\n\n Parameters\n ----------\n p_noise : float\n Probability of salting a pixel.\n color : float, int, sequence, callable\n Color's value.\n\n - int, float, sequence -- value of color\n - callable -- color is sampled for every chosen pixel (rules are the same as for int, float and sequence)\n size : int, sequence of int, callable\n Size of salt\n\n - int -- square salt with side ``size``\n - sequence -- recangular salt in the form (row, columns)\n - callable -- size is sampled for every chosen pixel (rules are the same as for int and sequence)\n src : str\n Component to get images from. Default is 'images'.\n dst : str\n Component to write images to. Default is 'images'.\n p : float\n Probability of applying the transform. Default is 1.\n \"\"\"\n mask_size = np.asarray(self._get_image_shape(image))\n mask_salt = np.random.binomial(1, p_noise, size=mask_size).astype(bool)\n image = np.array(image)\n if isinstance(size, (tuple, int)) and size in [1, (1, 1)] and not callable(color):\n image[mask_salt] = color\n else:\n size_lambda = size if callable(size) else lambda: size\n color_lambda = color if callable(color) else lambda: color\n mask_salt = np.where(mask_salt)\n for i in range(len(mask_salt[0])):\n current_size = size_lambda()\n current_size = (current_size, current_size) if isinstance(current_size, Number) else current_size\n left_top = np.asarray((mask_salt[0][i], mask_salt[1][i]))\n right_bottom = np.minimum(left_top + current_size, self._get_image_shape(image))\n image[left_top[0]:right_bottom[0], left_top[1]:right_bottom[1]] = color_lambda()\n\n return PIL.Image.fromarray(image)\n\n @apply_parallel\n def clip(self, image, low=0, high=255):\n \"\"\" Truncate image's pixels.\n\n Parameters\n ----------\n low : int, float, sequence\n Actual pixel's value is equal max(value, low). If sequence is given, then its length must coincide\n with the number of channels in an image and each channel is thresholded separately\n high : int, float, sequence\n Actual pixel's value is equal min(value, high). If sequence is given, then its length must coincide\n with the number of channels in an image and each channel is thresholded separately\n src : str\n Component to get images from. Default is 'images'.\n dst : str\n Component to write images to. Default is 'images'.\n p : float\n Probability of applying the transform. Default is 1.\n \"\"\"\n if isinstance(low, Number):\n low = tuple([low]*3)\n if isinstance(high, Number):\n high = tuple([high]*3)\n\n high = PIL.Image.new('RGB', image.size, high)\n low = PIL.Image.new('RGB', image.size, low)\n return PIL.ImageChops.lighter(PIL.ImageChops.darker(image, high), low)\n\n @apply_parallel\n def enhance(self, image, layout='hcbs', factor=(1, 1, 1, 1)):\n \"\"\" Apply enhancements from PIL.ImageEnhance to the image.\n\n Parameters\n ----------\n layout : str\n defines layout of operations, default is `hcbs`:\n h - color\n c - contrast\n b - brightness\n s - sharpness\n\n factor : float or tuple of float\n factor of enhancement for each operation listed in `layout`.\n \"\"\"\n enhancements = {\n 'h': 'Color',\n 'c': 'Contrast',\n 'b': 'Brightness',\n 's': 'Sharpness'\n }\n\n if isinstance(factor, float):\n factor = (factor,) * len(layout)\n if len(layout) != len(factor):\n raise ValueError(\"'layout' and 'factor' should be of same length!\")\n\n for alias, multiplier in zip(layout, factor):\n enhancement = enhancements.get(alias)\n if enhancement is None:\n raise ValueError('Unknown enhancement alias: ', alias)\n image = getattr(PIL.ImageEnhance, enhancement)(image).enhance(multiplier)\n\n return image\n\n @apply_parallel\n def multiply(self, image, multiplier=1., clip=False, preserve_type=False):\n \"\"\" Multiply each pixel by the given multiplier.\n\n Parameters\n ----------\n multiplier : float, sequence\n clip : bool\n whether to force image's pixels to be in [0, 255] or [0, 1.]\n preserve_type : bool\n Whether to preserve ``dtype`` of transformed images.\n If ``False`` is given then the resulting type will be ``np.float``.\n src : str\n Component to get images from. Default is 'images'.\n dst : str\n Component to write images to. Default is 'images'.\n p : float\n Probability of applying the transform. Default is 1.\n \"\"\"\n multiplier = np.float32(multiplier)\n if isinstance(image, PIL.Image.Image):\n if preserve_type is False:\n warnings.warn(\"Note that some info might be lost during `multiply` transformation since PIL.image \"\n \"stores data as `np.uint8`. To suppress this warning, use `preserve_type=True` or \"\n \"consider using `to_array` action before multiplication.\")\n return PIL.Image.fromarray(np.clip(multiplier*np.asarray(image), 0, 255).astype(np.uint8))\n dtype = image.dtype if preserve_type else np.float\n if clip:\n image = np.clip(multiplier*image, 0, 255 if dtype == np.uint8 else 1.)\n else:\n image = multiplier * image\n return image.astype(dtype)\n\n @apply_parallel\n def add(self, image, term=1., clip=False, preserve_type=False):\n \"\"\" Add term to each pixel.\n\n Parameters\n ----------\n term : float, sequence\n clip : bool\n whether to force image's pixels to be in [0, 255] or [0, 1.]\n preserve_type : bool\n Whether to preserve ``dtype`` of transformed images.\n If ``False`` is given then the resulting type will be ``np.float``.\n src : str\n Component to get images from. Default is 'images'.\n dst : str\n Component to write images to. Default is 'images'.\n p : float\n Probability of applying the transform. Default is 1.\n \"\"\"\n term = np.float32(term)\n if isinstance(image, PIL.Image.Image):\n return PIL.Image.fromarray(np.clip(term+np.asarray(image), 0, 255).astype(np.uint8))\n dtype = image.dtype if preserve_type else np.float\n if clip:\n image = np.clip(term+image, 0, 255 if dtype == np.uint8 else 1.)\n else:\n image = term + image\n return image.astype(dtype)\n\n @apply_parallel\n def pil_convert(self, image, mode=\"L\"):\n \"\"\" Convert image. Actually calls ``image.convert(mode)``.\n\n Parameters\n ----------\n mode : str\n Pass 'L' to convert to grayscale\n src : str\n Component to get images from. Default is 'images'.\n dst : str\n Component to write images to. Default is 'images'.\n p : float\n Probability of applying the transform. Default is 1.\n \"\"\"\n return image.convert(mode)\n\n @apply_parallel\n def posterize(self, image, bits=4):\n \"\"\" Posterizes image.\n\n More concretely, it quantizes pixels' values so that they have``2^bits`` colors\n\n Parameters\n ----------\n bits : int\n Number of bits used to store a color's component.\n src : str\n Component to get images from. Default is 'images'.\n dst : str\n Component to write images to. Default is 'images'.\n p : float\n Probability of applying the transform. Default is 1.\n \"\"\"\n return PIL.ImageOps.posterize(image, bits)\n\n @apply_parallel\n def cutout(self, image, origin, shape, color):\n \"\"\" Fills given areas with color\n\n .. note:: It is assumed that ``origins``, ``shapes`` and ``colors`` have the same length.\n\n Parameters\n ----------\n origin : sequence, str\n Location of the cropping box. See :meth:`.ImagesBatch._calc_origin` for details.\n shape : sequence, int\n Shape of a filled box. Can be one of:\n - sequence - crop size in the form of (rows, columns)\n - int - shape has squared form\n\n color : sequence, number\n Color of a filled box. Can be one of:\n\n - sequence - (r,g,b) form\n - number - grayscale\n src : str\n Component to get images from. Default is 'images'.\n dst : str\n Component to write images to. Default is 'images'.\n p : float\n Probability of applying the transform. Default is 1.\n\n Notes\n -----\n Using 'random' origin with `src` as list with multiple elements will not result in same crop for each\n element, as origin will be sampled independently for each `src` element.\n To randomly sample same origin for a number of components, use `R` named expression for `origin` argument.\n \"\"\"\n image = image.copy()\n shape = (shape, shape) if isinstance(shape, Number) else shape\n origin = self._calc_origin(shape, origin, self._get_image_shape(image))\n color = (color, color, color) if isinstance(color, Number) else color\n image.paste(PIL.Image.new('RGB', tuple(shape), tuple(color)), tuple(origin))\n return image\n\n def _assemble_patches(self, patches, *args, dst, **kwargs):\n \"\"\" Assembles patches after parallel execution.\n\n Parameters\n ----------\n patches : sequence\n Patches to gather. pathces.shape must be like (batch.size, patches_i, patch_height, patch_width, n_channels)\n dst : str\n Component to put patches in.\n \"\"\"\n _ = args, kwargs\n new_items = np.concatenate(patches)\n setattr(self, dst, new_items)\n\n @action\n @inbatch_parallel(init='indices', post='_assemble_patches')\n def split_to_patches(self, ix, patch_shape, stride=1, drop_last=False, src='images', dst=None):\n \"\"\" Splits image to patches.\n\n Small images with the same shape (``patch_shape``) are cropped from the original one with stride ``stride``.\n\n Parameters\n ----------\n patch_shape : int, sequence\n Patch's shape in the from (rows, columns). If int is given then patches have square shape.\n stride : int, square\n Step of the moving window from which patches are cropped. If int is given then the window has square shape.\n drop_last : bool\n Whether to drop patches whose window covers area out of the image.\n If False is passed then these patches are cropped from the edge of an image. See more in tutorials.\n src : str\n Component to get images from. Default is 'images'.\n dst : str\n Component to write images to. Default is 'images'.\n p : float\n Probability of applying the transform. Default is 1.\n \"\"\"\n _ = dst\n image = self.get(ix, src)\n image_shape = self._get_image_shape(image)\n image = np.array(image)\n stride = (stride, stride) if isinstance(stride, Number) else stride\n patch_shape = (patch_shape, patch_shape) if isinstance(patch_shape, Number) else patch_shape\n patches = []\n\n def _iterate_columns(row_from, row_to):\n column = 0\n while column < image_shape[1]-patch_shape[1]+1:\n patches.append(PIL.Image.fromarray(image[row_from:row_to, column:column+patch_shape[1]]))\n column += stride[1]\n if not drop_last and column + patch_shape[1] != image_shape[1]:\n patches.append(PIL.Image.fromarray(image[row_from:row_to,\n image_shape[1]-patch_shape[1]:image_shape[1]]))\n\n row = 0\n while row < image_shape[0]-patch_shape[0]+1:\n _iterate_columns(row, row+patch_shape[0])\n row += stride[0]\n if not drop_last and row + patch_shape[0] != image_shape[0]:\n _iterate_columns(image_shape[0]-patch_shape[0], image_shape[0])\n\n return np.array(patches, dtype=object)\n\n @apply_parallel\n def additive_noise(self, image, noise, clip=False, preserve_type=False):\n \"\"\" Add additive noise to an image.\n\n Parameters\n ----------\n noise : callable\n Distribution. Must have ``size`` parameter.\n clip : bool\n whether to force image's pixels to be in [0, 255] or [0, 1.]\n preserve_type : bool\n Whether to preserve ``dtype`` of transformed images.\n If ``False`` is given then the resulting type will be ``np.float``.\n src : str\n Component to get images from. Default is 'images'.\n dst : str\n Component to write images to. Default is 'images'.\n p : float\n Probability of applying the transform. Default is 1.\n \"\"\"\n noise = noise(size=(*image.size, len(image.getbands())) if isinstance(image, PIL.Image.Image) else image.shape)\n return self._add_(image, noise, clip, preserve_type)\n\n @apply_parallel\n def multiplicative_noise(self, image, noise, clip=False, preserve_type=False):\n \"\"\" Add multiplicative noise to an image.\n\n Parameters\n ----------\n noise : callable\n Distribution. Must have ``size`` parameter.\n clip : bool\n whether to force image's pixels to be in [0, 255] or [0, 1.]\n preserve_type : bool\n Whether to preserve ``dtype`` of transformed images.\n If ``False`` is given then the resulting type will be ``np.float``.\n src : str\n Component to get images from. Default is 'images'.\n dst : str\n Component to write images to. Default is 'images'.\n p : float\n Probability of applying the transform. Default is 1.\n \"\"\"\n noise = noise(size=(*image.size, len(image.getbands())) if isinstance(image, PIL.Image.Image) else image.shape)\n return self._multiply_(image, noise, clip, preserve_type)\n\n @apply_parallel\n def elastic_transform(self, image, alpha, sigma, **kwargs):\n \"\"\" Deformation of images as described by Simard, Steinkraus and Platt, `Best Practices for Convolutional\n Neural Networks applied to Visual Document Analysis <http://cognitivemedium.com/assets/rmnist/Simard.pdf>_`.\n\n Code slightly differs from `<https://gist.github.com/chsasank/4d8f68caf01f041a6453e67fb30f8f5a>`_.\n\n Parameters\n ----------\n alpha : number\n maximum of vectors' norms.\n sigma : number\n Smooth factor.\n src : str\n Component to get images from. Default is 'images'.\n dst : str\n Component to write images to. Default is 'images'.\n p : float\n Probability of applying the transform. Default is 1.\n \"\"\"\n image = np.array(image)\n # full shape is needed\n shape = image.shape\n if len(shape) == 2:\n image = image[..., None]\n shape = image.shape\n\n kwargs.setdefault('mode', 'constant')\n kwargs.setdefault('cval', 0)\n\n column_shift = self._sp_gaussian_filter_(np.random.uniform(-1, 1, size=shape), sigma, **kwargs) * alpha\n row_shift = self._sp_gaussian_filter_(np.random.uniform(-1, 1, size=shape), sigma, **kwargs) * alpha\n\n row, column, channel = np.meshgrid(range(shape[0]), range(shape[1]), range(shape[2]))\n\n indices = (column + column_shift, row + row_shift, channel)\n\n distored_image = self._sp_map_coordinates_(image, indices, order=1, mode='reflect')\n\n if shape[-1] == 1:\n return PIL.Image.fromarray(np.uint8(distored_image.reshape(image.shape))[..., 0])\n return PIL.Image.fromarray(np.uint8(distored_image.reshape(image.shape)))\n"
] | [
[
"tensorflow.train.piecewise_constant",
"tensorflow.mod",
"tensorflow.cast",
"tensorflow.name_scope",
"tensorflow.abs",
"tensorflow.sin"
],
[
"numpy.sum",
"numpy.asarray"
],
[
"numpy.random.binomial",
"numpy.random.uniform",
"numpy.zeros",
"numpy.stack",
"numpy.float32",
"numpy.asarray",
"numpy.where",
"numpy.clip",
"numpy.array",
"numpy.concatenate",
"numpy.random.randint",
"numpy.unique"
]
] |
sayanmondal2098/pandas | [
"a1fee9199eba7ebf423880243936b9f1501d3d3a"
] | [
"pandas/tests/io/parser/test_parse_dates.py"
] | [
"# -*- coding: utf-8 -*-\n\n\"\"\"\nTests date parsing functionality for all of the\nparsers defined in parsers.py\n\"\"\"\n\nfrom datetime import date, datetime\nfrom io import StringIO\n\nimport numpy as np\nimport pytest\nimport pytz\n\nfrom pandas._libs.tslib import Timestamp\nfrom pandas._libs.tslibs import parsing\nfrom pandas.compat import lrange, parse_date\nfrom pandas.compat.numpy import np_array_datetime64_compat\n\nimport pandas as pd\nfrom pandas import DataFrame, DatetimeIndex, Index, MultiIndex\nfrom pandas.core.indexes.datetimes import date_range\nimport pandas.util.testing as tm\n\nimport pandas.io.date_converters as conv\nimport pandas.io.parsers as parsers\n\n\ndef test_separator_date_conflict(all_parsers):\n # Regression test for gh-4678\n #\n # Make sure thousands separator and\n # date parsing do not conflict.\n parser = all_parsers\n data = \"06-02-2013;13:00;1-000.215\"\n expected = DataFrame([[datetime(2013, 6, 2, 13, 0, 0), 1000.215]],\n columns=[\"Date\", 2])\n\n df = parser.read_csv(StringIO(data), sep=\";\", thousands=\"-\",\n parse_dates={\"Date\": [0, 1]}, header=None)\n tm.assert_frame_equal(df, expected)\n\n\[email protected](\"keep_date_col\", [True, False])\ndef test_multiple_date_col_custom(all_parsers, keep_date_col):\n data = \"\"\"\\\nKORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000\nKORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000\nKORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000\nKORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000\nKORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000\nKORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000\n\"\"\"\n parser = all_parsers\n\n def date_parser(*date_cols):\n \"\"\"\n Test date parser.\n\n Parameters\n ----------\n date_cols : args\n The list of data columns to parse.\n\n Returns\n -------\n parsed : Series\n \"\"\"\n return parsing.try_parse_dates(parsers._concat_date_cols(date_cols))\n\n result = parser.read_csv(StringIO(data), header=None,\n date_parser=date_parser, prefix=\"X\",\n parse_dates={\"actual\": [1, 2],\n \"nominal\": [1, 3]},\n keep_date_col=keep_date_col)\n expected = DataFrame([\n [datetime(1999, 1, 27, 19, 0), datetime(1999, 1, 27, 18, 56),\n \"KORD\", \"19990127\", \" 19:00:00\", \" 18:56:00\",\n 0.81, 2.81, 7.2, 0.0, 280.0],\n [datetime(1999, 1, 27, 20, 0), datetime(1999, 1, 27, 19, 56),\n \"KORD\", \"19990127\", \" 20:00:00\", \" 19:56:00\",\n 0.01, 2.21, 7.2, 0.0, 260.0],\n [datetime(1999, 1, 27, 21, 0), datetime(1999, 1, 27, 20, 56),\n \"KORD\", \"19990127\", \" 21:00:00\", \" 20:56:00\",\n -0.59, 2.21, 5.7, 0.0, 280.0],\n [datetime(1999, 1, 27, 21, 0), datetime(1999, 1, 27, 21, 18),\n \"KORD\", \"19990127\", \" 21:00:00\", \" 21:18:00\",\n -0.99, 2.01, 3.6, 0.0, 270.0],\n [datetime(1999, 1, 27, 22, 0), datetime(1999, 1, 27, 21, 56),\n \"KORD\", \"19990127\", \" 22:00:00\", \" 21:56:00\",\n -0.59, 1.71, 5.1, 0.0, 290.0],\n [datetime(1999, 1, 27, 23, 0), datetime(1999, 1, 27, 22, 56),\n \"KORD\", \"19990127\", \" 23:00:00\", \" 22:56:00\",\n -0.59, 1.71, 4.6, 0.0, 280.0],\n ], columns=[\"actual\", \"nominal\", \"X0\", \"X1\", \"X2\",\n \"X3\", \"X4\", \"X5\", \"X6\", \"X7\", \"X8\"])\n\n if not keep_date_col:\n expected = expected.drop([\"X1\", \"X2\", \"X3\"], axis=1)\n elif parser.engine == \"python\":\n expected[\"X1\"] = expected[\"X1\"].astype(np.int64)\n\n # Python can sometimes be flaky about how\n # the aggregated columns are entered, so\n # this standardizes the order.\n result = result[expected.columns]\n tm.assert_frame_equal(result, expected)\n\n\[email protected](\"keep_date_col\", [True, False])\ndef test_multiple_date_col(all_parsers, keep_date_col):\n data = \"\"\"\\\nKORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000\nKORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000\nKORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000\nKORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000\nKORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000\nKORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000\n\"\"\"\n parser = all_parsers\n result = parser.read_csv(StringIO(data), header=None,\n prefix=\"X\", parse_dates=[[1, 2], [1, 3]],\n keep_date_col=keep_date_col)\n expected = DataFrame([\n [datetime(1999, 1, 27, 19, 0), datetime(1999, 1, 27, 18, 56),\n \"KORD\", \"19990127\", \" 19:00:00\", \" 18:56:00\",\n 0.81, 2.81, 7.2, 0.0, 280.0],\n [datetime(1999, 1, 27, 20, 0), datetime(1999, 1, 27, 19, 56),\n \"KORD\", \"19990127\", \" 20:00:00\", \" 19:56:00\",\n 0.01, 2.21, 7.2, 0.0, 260.0],\n [datetime(1999, 1, 27, 21, 0), datetime(1999, 1, 27, 20, 56),\n \"KORD\", \"19990127\", \" 21:00:00\", \" 20:56:00\",\n -0.59, 2.21, 5.7, 0.0, 280.0],\n [datetime(1999, 1, 27, 21, 0), datetime(1999, 1, 27, 21, 18),\n \"KORD\", \"19990127\", \" 21:00:00\", \" 21:18:00\",\n -0.99, 2.01, 3.6, 0.0, 270.0],\n [datetime(1999, 1, 27, 22, 0), datetime(1999, 1, 27, 21, 56),\n \"KORD\", \"19990127\", \" 22:00:00\", \" 21:56:00\",\n -0.59, 1.71, 5.1, 0.0, 290.0],\n [datetime(1999, 1, 27, 23, 0), datetime(1999, 1, 27, 22, 56),\n \"KORD\", \"19990127\", \" 23:00:00\", \" 22:56:00\",\n -0.59, 1.71, 4.6, 0.0, 280.0],\n ], columns=[\"X1_X2\", \"X1_X3\", \"X0\", \"X1\", \"X2\",\n \"X3\", \"X4\", \"X5\", \"X6\", \"X7\", \"X8\"])\n\n if not keep_date_col:\n expected = expected.drop([\"X1\", \"X2\", \"X3\"], axis=1)\n elif parser.engine == \"python\":\n expected[\"X1\"] = expected[\"X1\"].astype(np.int64)\n\n tm.assert_frame_equal(result, expected)\n\n\ndef test_date_col_as_index_col(all_parsers):\n data = \"\"\"\\\nKORD,19990127 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000\nKORD,19990127 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000\nKORD,19990127 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000\nKORD,19990127 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000\nKORD,19990127 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000\n\"\"\"\n parser = all_parsers\n result = parser.read_csv(StringIO(data), header=None, prefix=\"X\",\n parse_dates=[1], index_col=1)\n\n index = Index([datetime(1999, 1, 27, 19, 0), datetime(1999, 1, 27, 20, 0),\n datetime(1999, 1, 27, 21, 0), datetime(1999, 1, 27, 21, 0),\n datetime(1999, 1, 27, 22, 0)], name=\"X1\")\n expected = DataFrame([\n [\"KORD\", \" 18:56:00\", 0.81, 2.81, 7.2, 0.0, 280.0],\n [\"KORD\", \" 19:56:00\", 0.01, 2.21, 7.2, 0.0, 260.0],\n [\"KORD\", \" 20:56:00\", -0.59, 2.21, 5.7, 0.0, 280.0],\n [\"KORD\", \" 21:18:00\", -0.99, 2.01, 3.6, 0.0, 270.0],\n [\"KORD\", \" 21:56:00\", -0.59, 1.71, 5.1, 0.0, 290.0],\n ], columns=[\"X0\", \"X2\", \"X3\", \"X4\", \"X5\", \"X6\", \"X7\"], index=index)\n tm.assert_frame_equal(result, expected)\n\n\ndef test_multiple_date_cols_int_cast(all_parsers):\n data = (\"KORD,19990127, 19:00:00, 18:56:00, 0.8100\\n\"\n \"KORD,19990127, 20:00:00, 19:56:00, 0.0100\\n\"\n \"KORD,19990127, 21:00:00, 20:56:00, -0.5900\\n\"\n \"KORD,19990127, 21:00:00, 21:18:00, -0.9900\\n\"\n \"KORD,19990127, 22:00:00, 21:56:00, -0.5900\\n\"\n \"KORD,19990127, 23:00:00, 22:56:00, -0.5900\")\n parse_dates = {\"actual\": [1, 2], \"nominal\": [1, 3]}\n parser = all_parsers\n\n result = parser.read_csv(StringIO(data), header=None,\n date_parser=conv.parse_date_time,\n parse_dates=parse_dates, prefix=\"X\")\n expected = DataFrame([\n [datetime(1999, 1, 27, 19, 0), datetime(1999, 1, 27, 18, 56),\n \"KORD\", 0.81],\n [datetime(1999, 1, 27, 20, 0), datetime(1999, 1, 27, 19, 56),\n \"KORD\", 0.01],\n [datetime(1999, 1, 27, 21, 0), datetime(1999, 1, 27, 20, 56),\n \"KORD\", -0.59],\n [datetime(1999, 1, 27, 21, 0), datetime(1999, 1, 27, 21, 18),\n \"KORD\", -0.99],\n [datetime(1999, 1, 27, 22, 0), datetime(1999, 1, 27, 21, 56),\n \"KORD\", -0.59],\n [datetime(1999, 1, 27, 23, 0), datetime(1999, 1, 27, 22, 56),\n \"KORD\", -0.59],\n ], columns=[\"actual\", \"nominal\", \"X0\", \"X4\"])\n\n # Python can sometimes be flaky about how\n # the aggregated columns are entered, so\n # this standardizes the order.\n result = result[expected.columns]\n tm.assert_frame_equal(result, expected)\n\n\ndef test_multiple_date_col_timestamp_parse(all_parsers):\n parser = all_parsers\n data = \"\"\"05/31/2012,15:30:00.029,1306.25,1,E,0,,1306.25\n05/31/2012,15:30:00.029,1306.25,8,E,0,,1306.25\"\"\"\n\n result = parser.read_csv(StringIO(data), parse_dates=[[0, 1]],\n header=None, date_parser=Timestamp)\n expected = DataFrame([\n [Timestamp(\"05/31/2012, 15:30:00.029\"),\n 1306.25, 1, \"E\", 0, np.nan, 1306.25],\n [Timestamp(\"05/31/2012, 15:30:00.029\"),\n 1306.25, 8, \"E\", 0, np.nan, 1306.25]\n ], columns=[\"0_1\", 2, 3, 4, 5, 6, 7])\n tm.assert_frame_equal(result, expected)\n\n\ndef test_multiple_date_cols_with_header(all_parsers):\n parser = all_parsers\n data = \"\"\"\\\nID,date,NominalTime,ActualTime,TDew,TAir,Windspeed,Precip,WindDir\nKORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000\nKORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000\nKORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000\nKORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000\nKORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000\nKORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000\"\"\"\n\n result = parser.read_csv(StringIO(data), parse_dates={\"nominal\": [1, 2]})\n expected = DataFrame([\n [datetime(1999, 1, 27, 19, 0), \"KORD\", \" 18:56:00\",\n 0.81, 2.81, 7.2, 0.0, 280.0],\n [datetime(1999, 1, 27, 20, 0), \"KORD\", \" 19:56:00\",\n 0.01, 2.21, 7.2, 0.0, 260.0],\n [datetime(1999, 1, 27, 21, 0), \"KORD\", \" 20:56:00\",\n -0.59, 2.21, 5.7, 0.0, 280.0],\n [datetime(1999, 1, 27, 21, 0), \"KORD\", \" 21:18:00\",\n -0.99, 2.01, 3.6, 0.0, 270.0],\n [datetime(1999, 1, 27, 22, 0), \"KORD\", \" 21:56:00\",\n -0.59, 1.71, 5.1, 0.0, 290.0],\n [datetime(1999, 1, 27, 23, 0), \"KORD\", \" 22:56:00\",\n -0.59, 1.71, 4.6, 0.0, 280.0],\n ], columns=[\"nominal\", \"ID\", \"ActualTime\", \"TDew\",\n \"TAir\", \"Windspeed\", \"Precip\", \"WindDir\"])\n tm.assert_frame_equal(result, expected)\n\n\[email protected](\"data,parse_dates,msg\", [\n (\"\"\"\\\ndate_NominalTime,date,NominalTime\nKORD1,19990127, 19:00:00\nKORD2,19990127, 20:00:00\"\"\", [[1, 2]], (\"New date column already \"\n \"in dict date_NominalTime\")),\n (\"\"\"\\\nID,date,nominalTime\nKORD,19990127, 19:00:00\nKORD,19990127, 20:00:00\"\"\", dict(ID=[1, 2]), \"Date column ID already in dict\")\n])\ndef test_multiple_date_col_name_collision(all_parsers, data, parse_dates, msg):\n parser = all_parsers\n\n with pytest.raises(ValueError, match=msg):\n parser.read_csv(StringIO(data), parse_dates=parse_dates)\n\n\ndef test_date_parser_int_bug(all_parsers):\n # see gh-3071\n parser = all_parsers\n data = (\"posix_timestamp,elapsed,sys,user,queries,query_time,rows,\"\n \"accountid,userid,contactid,level,silo,method\\n\"\n \"1343103150,0.062353,0,4,6,0.01690,3,\"\n \"12345,1,-1,3,invoice_InvoiceResource,search\\n\")\n\n result = parser.read_csv(\n StringIO(data), index_col=0, parse_dates=[0],\n date_parser=lambda x: datetime.utcfromtimestamp(int(x)))\n expected = DataFrame([[0.062353, 0, 4, 6, 0.01690, 3, 12345, 1, -1,\n 3, \"invoice_InvoiceResource\", \"search\"]],\n columns=[\"elapsed\", \"sys\", \"user\", \"queries\",\n \"query_time\", \"rows\", \"accountid\",\n \"userid\", \"contactid\", \"level\",\n \"silo\", \"method\"],\n index=Index([Timestamp(\"2012-07-24 04:12:30\")],\n name=\"posix_timestamp\"))\n tm.assert_frame_equal(result, expected)\n\n\ndef test_nat_parse(all_parsers):\n # see gh-3062\n parser = all_parsers\n df = DataFrame(dict({\"A\": np.asarray(lrange(10), dtype=\"float64\"),\n \"B\": pd.Timestamp(\"20010101\")}))\n df.iloc[3:6, :] = np.nan\n\n with tm.ensure_clean(\"__nat_parse_.csv\") as path:\n df.to_csv(path)\n\n result = parser.read_csv(path, index_col=0, parse_dates=[\"B\"])\n tm.assert_frame_equal(result, df)\n\n\ndef test_csv_custom_parser(all_parsers):\n data = \"\"\"A,B,C\n20090101,a,1,2\n20090102,b,3,4\n20090103,c,4,5\n\"\"\"\n parser = all_parsers\n result = parser.read_csv(\n StringIO(data),\n date_parser=lambda x: datetime.strptime(x, \"%Y%m%d\"))\n expected = parser.read_csv(StringIO(data), parse_dates=True)\n tm.assert_frame_equal(result, expected)\n\n\ndef test_parse_dates_implicit_first_col(all_parsers):\n data = \"\"\"A,B,C\n20090101,a,1,2\n20090102,b,3,4\n20090103,c,4,5\n\"\"\"\n parser = all_parsers\n result = parser.read_csv(StringIO(data), parse_dates=True)\n\n expected = parser.read_csv(StringIO(data), index_col=0,\n parse_dates=True)\n tm.assert_frame_equal(result, expected)\n\n\ndef test_parse_dates_string(all_parsers):\n data = \"\"\"date,A,B,C\n20090101,a,1,2\n20090102,b,3,4\n20090103,c,4,5\n\"\"\"\n parser = all_parsers\n result = parser.read_csv(StringIO(data), index_col=\"date\",\n parse_dates=[\"date\"])\n index = date_range(\"1/1/2009\", periods=3)\n index.name = \"date\"\n\n expected = DataFrame({\"A\": [\"a\", \"b\", \"c\"], \"B\": [1, 3, 4],\n \"C\": [2, 4, 5]}, index=index)\n tm.assert_frame_equal(result, expected)\n\n\n# Bug in https://github.com/dateutil/dateutil/issues/217\n# has been addressed, but we just don't pass in the `yearfirst`\[email protected](reason=\"yearfirst is not surfaced in read_*\")\[email protected](\"parse_dates\", [\n [[\"date\", \"time\"]],\n [[0, 1]]\n])\ndef test_yy_format_with_year_first(all_parsers, parse_dates):\n data = \"\"\"date,time,B,C\n090131,0010,1,2\n090228,1020,3,4\n090331,0830,5,6\n\"\"\"\n parser = all_parsers\n result = parser.read_csv(StringIO(data), index_col=0,\n parse_dates=parse_dates)\n index = DatetimeIndex([datetime(2009, 1, 31, 0, 10, 0),\n datetime(2009, 2, 28, 10, 20, 0),\n datetime(2009, 3, 31, 8, 30, 0)],\n dtype=object, name=\"date_time\")\n expected = DataFrame({\"B\": [1, 3, 5], \"C\": [2, 4, 6]}, index=index)\n tm.assert_frame_equal(result, expected)\n\n\[email protected](\"parse_dates\", [[0, 2], [\"a\", \"c\"]])\ndef test_parse_dates_column_list(all_parsers, parse_dates):\n data = \"a,b,c\\n01/01/2010,1,15/02/2010\"\n parser = all_parsers\n\n expected = DataFrame({\"a\": [datetime(2010, 1, 1)], \"b\": [1],\n \"c\": [datetime(2010, 2, 15)]})\n expected = expected.set_index([\"a\", \"b\"])\n\n result = parser.read_csv(StringIO(data), index_col=[0, 1],\n parse_dates=parse_dates, dayfirst=True)\n tm.assert_frame_equal(result, expected)\n\n\[email protected](\"index_col\", [[0, 1], [1, 0]])\ndef test_multi_index_parse_dates(all_parsers, index_col):\n data = \"\"\"index1,index2,A,B,C\n20090101,one,a,1,2\n20090101,two,b,3,4\n20090101,three,c,4,5\n20090102,one,a,1,2\n20090102,two,b,3,4\n20090102,three,c,4,5\n20090103,one,a,1,2\n20090103,two,b,3,4\n20090103,three,c,4,5\n\"\"\"\n parser = all_parsers\n index = MultiIndex.from_product([\n (datetime(2009, 1, 1), datetime(2009, 1, 2),\n datetime(2009, 1, 3)), (\"one\", \"two\", \"three\")],\n names=[\"index1\", \"index2\"])\n\n # Out of order.\n if index_col == [1, 0]:\n index = index.swaplevel(0, 1)\n\n expected = DataFrame([[\"a\", 1, 2], [\"b\", 3, 4], [\"c\", 4, 5],\n [\"a\", 1, 2], [\"b\", 3, 4], [\"c\", 4, 5],\n [\"a\", 1, 2], [\"b\", 3, 4], [\"c\", 4, 5]],\n columns=[\"A\", \"B\", \"C\"], index=index)\n result = parser.read_csv(StringIO(data), index_col=index_col,\n parse_dates=True)\n tm.assert_frame_equal(result, expected)\n\n\[email protected](\"kwargs\", [\n dict(dayfirst=True), dict(day_first=True)\n])\ndef test_parse_dates_custom_euro_format(all_parsers, kwargs):\n parser = all_parsers\n data = \"\"\"foo,bar,baz\n31/01/2010,1,2\n01/02/2010,1,NA\n02/02/2010,1,2\n\"\"\"\n if \"dayfirst\" in kwargs:\n df = parser.read_csv(StringIO(data), names=[\"time\", \"Q\", \"NTU\"],\n date_parser=lambda d: parse_date(d, **kwargs),\n header=0, index_col=0, parse_dates=True,\n na_values=[\"NA\"])\n exp_index = Index([datetime(2010, 1, 31), datetime(2010, 2, 1),\n datetime(2010, 2, 2)], name=\"time\")\n expected = DataFrame({\"Q\": [1, 1, 1], \"NTU\": [2, np.nan, 2]},\n index=exp_index, columns=[\"Q\", \"NTU\"])\n tm.assert_frame_equal(df, expected)\n else:\n msg = \"got an unexpected keyword argument 'day_first'\"\n with pytest.raises(TypeError, match=msg):\n parser.read_csv(StringIO(data), names=[\"time\", \"Q\", \"NTU\"],\n date_parser=lambda d: parse_date(d, **kwargs),\n skiprows=[0], index_col=0, parse_dates=True,\n na_values=[\"NA\"])\n\n\ndef test_parse_tz_aware(all_parsers):\n # See gh-1693\n parser = all_parsers\n data = \"Date,x\\n2012-06-13T01:39:00Z,0.5\"\n\n result = parser.read_csv(StringIO(data), index_col=0,\n parse_dates=True)\n expected = DataFrame({\"x\": [0.5]}, index=Index([Timestamp(\n \"2012-06-13 01:39:00+00:00\")], name=\"Date\"))\n tm.assert_frame_equal(result, expected)\n assert result.index.tz is pytz.utc\n\n\[email protected](\"parse_dates,index_col\", [\n ({\"nominal\": [1, 2]}, \"nominal\"),\n ({\"nominal\": [1, 2]}, 0),\n ([[1, 2]], 0),\n])\ndef test_multiple_date_cols_index(all_parsers, parse_dates, index_col):\n parser = all_parsers\n data = \"\"\"\nID,date,NominalTime,ActualTime,TDew,TAir,Windspeed,Precip,WindDir\nKORD1,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000\nKORD2,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000\nKORD3,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000\nKORD4,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000\nKORD5,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000\nKORD6,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000\n\"\"\"\n expected = DataFrame([\n [datetime(1999, 1, 27, 19, 0), \"KORD1\", \" 18:56:00\",\n 0.81, 2.81, 7.2, 0.0, 280.0],\n [datetime(1999, 1, 27, 20, 0), \"KORD2\", \" 19:56:00\",\n 0.01, 2.21, 7.2, 0.0, 260.0],\n [datetime(1999, 1, 27, 21, 0), \"KORD3\", \" 20:56:00\",\n -0.59, 2.21, 5.7, 0.0, 280.0],\n [datetime(1999, 1, 27, 21, 0), \"KORD4\", \" 21:18:00\",\n -0.99, 2.01, 3.6, 0.0, 270.0],\n [datetime(1999, 1, 27, 22, 0), \"KORD5\", \" 21:56:00\",\n -0.59, 1.71, 5.1, 0.0, 290.0],\n [datetime(1999, 1, 27, 23, 0), \"KORD6\", \" 22:56:00\",\n -0.59, 1.71, 4.6, 0.0, 280.0],\n ], columns=[\"nominal\", \"ID\", \"ActualTime\", \"TDew\",\n \"TAir\", \"Windspeed\", \"Precip\", \"WindDir\"])\n expected = expected.set_index(\"nominal\")\n\n if not isinstance(parse_dates, dict):\n expected.index.name = \"date_NominalTime\"\n\n result = parser.read_csv(StringIO(data), parse_dates=parse_dates,\n index_col=index_col)\n tm.assert_frame_equal(result, expected)\n\n\ndef test_multiple_date_cols_chunked(all_parsers):\n parser = all_parsers\n data = \"\"\"\\\nID,date,nominalTime,actualTime,A,B,C,D,E\nKORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000\nKORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000\nKORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000\nKORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000\nKORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000\nKORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000\n\"\"\"\n\n expected = DataFrame([\n [datetime(1999, 1, 27, 19, 0), \"KORD\", \" 18:56:00\",\n 0.81, 2.81, 7.2, 0.0, 280.0],\n [datetime(1999, 1, 27, 20, 0), \"KORD\", \" 19:56:00\",\n 0.01, 2.21, 7.2, 0.0, 260.0],\n [datetime(1999, 1, 27, 21, 0), \"KORD\", \" 20:56:00\",\n -0.59, 2.21, 5.7, 0.0, 280.0],\n [datetime(1999, 1, 27, 21, 0), \"KORD\", \" 21:18:00\",\n -0.99, 2.01, 3.6, 0.0, 270.0],\n [datetime(1999, 1, 27, 22, 0), \"KORD\", \" 21:56:00\",\n -0.59, 1.71, 5.1, 0.0, 290.0],\n [datetime(1999, 1, 27, 23, 0), \"KORD\", \" 22:56:00\",\n -0.59, 1.71, 4.6, 0.0, 280.0],\n ], columns=[\"nominal\", \"ID\", \"actualTime\", \"A\", \"B\", \"C\", \"D\", \"E\"])\n expected = expected.set_index(\"nominal\")\n\n reader = parser.read_csv(StringIO(data), parse_dates={\"nominal\": [1, 2]},\n index_col=\"nominal\", chunksize=2)\n chunks = list(reader)\n\n tm.assert_frame_equal(chunks[0], expected[:2])\n tm.assert_frame_equal(chunks[1], expected[2:4])\n tm.assert_frame_equal(chunks[2], expected[4:])\n\n\ndef test_multiple_date_col_named_index_compat(all_parsers):\n parser = all_parsers\n data = \"\"\"\\\nID,date,nominalTime,actualTime,A,B,C,D,E\nKORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000\nKORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000\nKORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000\nKORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000\nKORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000\nKORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000\n\"\"\"\n\n with_indices = parser.read_csv(StringIO(data),\n parse_dates={\"nominal\": [1, 2]},\n index_col=\"nominal\")\n with_names = parser.read_csv(StringIO(data), index_col=\"nominal\",\n parse_dates={\"nominal\": [\n \"date\", \"nominalTime\"]})\n tm.assert_frame_equal(with_indices, with_names)\n\n\ndef test_multiple_date_col_multiple_index_compat(all_parsers):\n parser = all_parsers\n data = \"\"\"\\\nID,date,nominalTime,actualTime,A,B,C,D,E\nKORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000\nKORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000\nKORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000\nKORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000\nKORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000\nKORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000\n\"\"\"\n result = parser.read_csv(StringIO(data), index_col=[\"nominal\", \"ID\"],\n parse_dates={\"nominal\": [1, 2]})\n expected = parser.read_csv(StringIO(data),\n parse_dates={\"nominal\": [1, 2]})\n\n expected = expected.set_index([\"nominal\", \"ID\"])\n tm.assert_frame_equal(result, expected)\n\n\[email protected](\"kwargs\", [dict(), dict(index_col=\"C\")])\ndef test_read_with_parse_dates_scalar_non_bool(all_parsers, kwargs):\n # see gh-5636\n parser = all_parsers\n msg = (\"Only booleans, lists, and dictionaries \"\n \"are accepted for the 'parse_dates' parameter\")\n data = \"\"\"A,B,C\n 1,2,2003-11-1\"\"\"\n\n with pytest.raises(TypeError, match=msg):\n parser.read_csv(StringIO(data), parse_dates=\"C\", **kwargs)\n\n\[email protected](\"parse_dates\", [\n (1,), np.array([4, 5]), {1, 3, 3}\n])\ndef test_read_with_parse_dates_invalid_type(all_parsers, parse_dates):\n parser = all_parsers\n msg = (\"Only booleans, lists, and dictionaries \"\n \"are accepted for the 'parse_dates' parameter\")\n data = \"\"\"A,B,C\n 1,2,2003-11-1\"\"\"\n\n with pytest.raises(TypeError, match=msg):\n parser.read_csv(StringIO(data), parse_dates=(1,))\n\n\ndef test_parse_dates_empty_string(all_parsers):\n # see gh-2263\n parser = all_parsers\n data = \"Date,test\\n2012-01-01,1\\n,2\"\n result = parser.read_csv(StringIO(data), parse_dates=[\"Date\"],\n na_filter=False)\n\n expected = DataFrame([[datetime(2012, 1, 1), 1], [pd.NaT, 2]],\n columns=[\"Date\", \"test\"])\n tm.assert_frame_equal(result, expected)\n\n\[email protected](\"data,kwargs,expected\", [\n (\"a\\n04.15.2016\", dict(parse_dates=[\"a\"]),\n DataFrame([datetime(2016, 4, 15)], columns=[\"a\"])),\n (\"a\\n04.15.2016\", dict(parse_dates=True, index_col=0),\n DataFrame(index=DatetimeIndex([\"2016-04-15\"], name=\"a\"))),\n (\"a,b\\n04.15.2016,09.16.2013\", dict(parse_dates=[\"a\", \"b\"]),\n DataFrame([[datetime(2016, 4, 15), datetime(2013, 9, 16)]],\n columns=[\"a\", \"b\"])),\n (\"a,b\\n04.15.2016,09.16.2013\", dict(parse_dates=True, index_col=[0, 1]),\n DataFrame(index=MultiIndex.from_tuples(\n [(datetime(2016, 4, 15), datetime(2013, 9, 16))], names=[\"a\", \"b\"]))),\n])\ndef test_parse_dates_no_convert_thousands(all_parsers, data, kwargs, expected):\n # see gh-14066\n parser = all_parsers\n\n result = parser.read_csv(StringIO(data), thousands=\".\", **kwargs)\n tm.assert_frame_equal(result, expected)\n\n\ndef test_parse_date_time_multi_level_column_name(all_parsers):\n data = \"\"\"\\\nD,T,A,B\ndate, time,a,b\n2001-01-05, 09:00:00, 0.0, 10.\n2001-01-06, 00:00:00, 1.0, 11.\n\"\"\"\n parser = all_parsers\n result = parser.read_csv(StringIO(data), header=[0, 1],\n parse_dates={\"date_time\": [0, 1]},\n date_parser=conv.parse_date_time)\n\n expected_data = [[datetime(2001, 1, 5, 9, 0, 0), 0., 10.],\n [datetime(2001, 1, 6, 0, 0, 0), 1., 11.]]\n expected = DataFrame(expected_data,\n columns=[\"date_time\", (\"A\", \"a\"), (\"B\", \"b\")])\n tm.assert_frame_equal(result, expected)\n\n\[email protected](\"data,kwargs,expected\", [\n (\"\"\"\\\ndate,time,a,b\n2001-01-05, 10:00:00, 0.0, 10.\n2001-01-05, 00:00:00, 1., 11.\n\"\"\", dict(header=0, parse_dates={\"date_time\": [0, 1]}),\n DataFrame([[datetime(2001, 1, 5, 10, 0, 0), 0.0, 10],\n [datetime(2001, 1, 5, 0, 0, 0), 1.0, 11.0]],\n columns=[\"date_time\", \"a\", \"b\"])),\n ((\"KORD,19990127, 19:00:00, 18:56:00, 0.8100\\n\"\n \"KORD,19990127, 20:00:00, 19:56:00, 0.0100\\n\"\n \"KORD,19990127, 21:00:00, 20:56:00, -0.5900\\n\"\n \"KORD,19990127, 21:00:00, 21:18:00, -0.9900\\n\"\n \"KORD,19990127, 22:00:00, 21:56:00, -0.5900\\n\"\n \"KORD,19990127, 23:00:00, 22:56:00, -0.5900\"),\n dict(header=None, parse_dates={\"actual\": [1, 2], \"nominal\": [1, 3]}),\n DataFrame([\n [datetime(1999, 1, 27, 19, 0), datetime(1999, 1, 27, 18, 56),\n \"KORD\", 0.81],\n [datetime(1999, 1, 27, 20, 0), datetime(1999, 1, 27, 19, 56),\n \"KORD\", 0.01],\n [datetime(1999, 1, 27, 21, 0), datetime(1999, 1, 27, 20, 56),\n \"KORD\", -0.59],\n [datetime(1999, 1, 27, 21, 0), datetime(1999, 1, 27, 21, 18),\n \"KORD\", -0.99],\n [datetime(1999, 1, 27, 22, 0), datetime(1999, 1, 27, 21, 56),\n \"KORD\", -0.59],\n [datetime(1999, 1, 27, 23, 0), datetime(1999, 1, 27, 22, 56),\n \"KORD\", -0.59]], columns=[\"actual\", \"nominal\", 0, 4])),\n])\ndef test_parse_date_time(all_parsers, data, kwargs, expected):\n parser = all_parsers\n result = parser.read_csv(StringIO(data), date_parser=conv.parse_date_time,\n **kwargs)\n\n # Python can sometimes be flaky about how\n # the aggregated columns are entered, so\n # this standardizes the order.\n result = result[expected.columns]\n tm.assert_frame_equal(result, expected)\n\n\ndef test_parse_date_fields(all_parsers):\n parser = all_parsers\n data = (\"year,month,day,a\\n2001,01,10,10.\\n\"\n \"2001,02,1,11.\")\n result = parser.read_csv(StringIO(data), header=0,\n parse_dates={\"ymd\": [0, 1, 2]},\n date_parser=conv.parse_date_fields)\n\n expected = DataFrame([[datetime(2001, 1, 10), 10.],\n [datetime(2001, 2, 1), 11.]], columns=[\"ymd\", \"a\"])\n tm.assert_frame_equal(result, expected)\n\n\ndef test_parse_date_all_fields(all_parsers):\n parser = all_parsers\n data = \"\"\"\\\nyear,month,day,hour,minute,second,a,b\n2001,01,05,10,00,0,0.0,10.\n2001,01,5,10,0,00,1.,11.\n\"\"\"\n result = parser.read_csv(StringIO(data), header=0,\n date_parser=conv.parse_all_fields,\n parse_dates={\"ymdHMS\": [0, 1, 2, 3, 4, 5]})\n expected = DataFrame([[datetime(2001, 1, 5, 10, 0, 0), 0.0, 10.0],\n [datetime(2001, 1, 5, 10, 0, 0), 1.0, 11.0]],\n columns=[\"ymdHMS\", \"a\", \"b\"])\n tm.assert_frame_equal(result, expected)\n\n\ndef test_datetime_fractional_seconds(all_parsers):\n parser = all_parsers\n data = \"\"\"\\\nyear,month,day,hour,minute,second,a,b\n2001,01,05,10,00,0.123456,0.0,10.\n2001,01,5,10,0,0.500000,1.,11.\n\"\"\"\n result = parser.read_csv(StringIO(data), header=0,\n date_parser=conv.parse_all_fields,\n parse_dates={\"ymdHMS\": [0, 1, 2, 3, 4, 5]})\n expected = DataFrame([[datetime(2001, 1, 5, 10, 0, 0,\n microsecond=123456), 0.0, 10.0],\n [datetime(2001, 1, 5, 10, 0, 0,\n microsecond=500000), 1.0, 11.0]],\n columns=[\"ymdHMS\", \"a\", \"b\"])\n tm.assert_frame_equal(result, expected)\n\n\ndef test_generic(all_parsers):\n parser = all_parsers\n data = \"year,month,day,a\\n2001,01,10,10.\\n2001,02,1,11.\"\n\n result = parser.read_csv(StringIO(data), header=0,\n parse_dates={\"ym\": [0, 1]},\n date_parser=lambda y, m: date(year=int(y),\n month=int(m),\n day=1))\n expected = DataFrame([[date(2001, 1, 1), 10, 10.],\n [date(2001, 2, 1), 1, 11.]],\n columns=[\"ym\", \"day\", \"a\"])\n tm.assert_frame_equal(result, expected)\n\n\ndef test_date_parser_resolution_if_not_ns(all_parsers):\n # see gh-10245\n parser = all_parsers\n data = \"\"\"\\\ndate,time,prn,rxstatus\n2013-11-03,19:00:00,126,00E80000\n2013-11-03,19:00:00,23,00E80000\n2013-11-03,19:00:00,13,00E80000\n\"\"\"\n\n def date_parser(dt, time):\n return np_array_datetime64_compat(dt + \"T\" + time + \"Z\",\n dtype=\"datetime64[s]\")\n\n result = parser.read_csv(StringIO(data), date_parser=date_parser,\n parse_dates={\"datetime\": [\"date\", \"time\"]},\n index_col=[\"datetime\", \"prn\"])\n\n datetimes = np_array_datetime64_compat([\"2013-11-03T19:00:00Z\"] * 3,\n dtype=\"datetime64[s]\")\n expected = DataFrame(data={\"rxstatus\": [\"00E80000\"] * 3},\n index=MultiIndex.from_tuples(\n [(datetimes[0], 126), (datetimes[1], 23),\n (datetimes[2], 13)], names=[\"datetime\", \"prn\"]))\n tm.assert_frame_equal(result, expected)\n\n\ndef test_parse_date_column_with_empty_string(all_parsers):\n # see gh-6428\n parser = all_parsers\n data = \"case,opdate\\n7,10/18/2006\\n7,10/18/2008\\n621, \"\n result = parser.read_csv(StringIO(data), parse_dates=[\"opdate\"])\n\n expected_data = [[7, \"10/18/2006\"],\n [7, \"10/18/2008\"],\n [621, \" \"]]\n expected = DataFrame(expected_data, columns=[\"case\", \"opdate\"])\n tm.assert_frame_equal(result, expected)\n\n\[email protected](\"data,expected\", [\n (\"a\\n135217135789158401\\n1352171357E+5\",\n DataFrame({\"a\": [135217135789158401,\n 135217135700000]}, dtype=\"float64\")),\n (\"a\\n99999999999\\n123456789012345\\n1234E+0\",\n DataFrame({\"a\": [99999999999,\n 123456789012345,\n 1234]}, dtype=\"float64\"))\n])\[email protected](\"parse_dates\", [True, False])\ndef test_parse_date_float(all_parsers, data, expected, parse_dates):\n # see gh-2697\n #\n # Date parsing should fail, so we leave the data untouched\n # (i.e. float precision should remain unchanged).\n parser = all_parsers\n\n result = parser.read_csv(StringIO(data), parse_dates=parse_dates)\n tm.assert_frame_equal(result, expected)\n\n\ndef test_parse_timezone(all_parsers):\n # see gh-22256\n parser = all_parsers\n data = \"\"\"dt,val\n 2018-01-04 09:01:00+09:00,23350\n 2018-01-04 09:02:00+09:00,23400\n 2018-01-04 09:03:00+09:00,23400\n 2018-01-04 09:04:00+09:00,23400\n 2018-01-04 09:05:00+09:00,23400\"\"\"\n result = parser.read_csv(StringIO(data), parse_dates=[\"dt\"])\n\n dti = pd.date_range(start=\"2018-01-04 09:01:00\",\n end=\"2018-01-04 09:05:00\", freq=\"1min\",\n tz=pytz.FixedOffset(540))\n expected_data = {\"dt\": dti, \"val\": [23350, 23400, 23400, 23400, 23400]}\n\n expected = DataFrame(expected_data)\n tm.assert_frame_equal(result, expected)\n"
] | [
[
"pandas.util.testing.ensure_clean",
"pandas.compat.parse_date",
"pandas.DatetimeIndex",
"pandas.core.indexes.datetimes.date_range",
"pandas.DataFrame",
"pandas._libs.tslib.Timestamp",
"pandas.io.parsers._concat_date_cols",
"pandas.MultiIndex.from_tuples",
"pandas.compat.numpy.np_array_datetime64_compat",
"pandas.compat.lrange",
"numpy.array",
"pandas.util.testing.assert_frame_equal",
"pandas.Timestamp"
]
] |
sambuddinc/DLTK | [
"9511b0b9860118a9285c2fe730ea49dfe247cab6"
] | [
"data/IXI_HH/download_IXI_HH.py"
] | [
"# -*- coding: utf-8 -*-\n\"\"\"Download and extract the IXI Hammersmith Hospital 3T dataset\n\nurl: http://brain-development.org/ixi-dataset/\nref: IXI – Information eXtraction from Images (EPSRC GR/S21533/02)\n\n\"\"\"\nfrom __future__ import unicode_literals\nfrom __future__ import print_function\nfrom __future__ import division\nfrom __future__ import absolute_import\nfrom future.standard_library import install_aliases # py 2/3 compatability\ninstall_aliases()\n\nfrom urllib.request import FancyURLopener\n\nimport os.path\nimport tarfile\nimport pandas as pd\nimport glob\nimport SimpleITK as sitk\nimport numpy as np\n\nDOWNLOAD_IMAGES = True\nEXTRACT_IMAGES = True\nPROCESS_OTHER = True\nRESAMPLE_IMAGES = True\nCLEAN_UP = True\n\n\ndef resample_image(itk_image, out_spacing=(1.0, 1.0, 1.0), is_label=False):\n original_spacing = itk_image.GetSpacing()\n original_size = itk_image.GetSize()\n\n out_size = [int(np.round(original_size[0]*(original_spacing[0]/out_spacing[0]))),\n int(np.round(original_size[1]*(original_spacing[1]/out_spacing[1]))),\n int(np.round(original_size[2]*(original_spacing[2]/out_spacing[2])))]\n\n resample = sitk.ResampleImageFilter()\n resample.SetOutputSpacing(out_spacing)\n resample.SetSize(out_size)\n resample.SetOutputDirection(itk_image.GetDirection())\n resample.SetOutputOrigin(itk_image.GetOrigin())\n resample.SetTransform(sitk.Transform())\n resample.SetDefaultPixelValue(itk_image.GetPixelIDValue())\n\n if is_label:\n resample.SetInterpolator(sitk.sitkNearestNeighbor)\n else:\n resample.SetInterpolator(sitk.sitkBSpline)\n\n return resample.Execute(itk_image)\n\n\ndef reslice_image(itk_image, itk_ref, is_label=False):\n resample = sitk.ResampleImageFilter()\n resample.SetReferenceImage(itk_ref)\n\n if is_label:\n resample.SetInterpolator(sitk.sitkNearestNeighbor)\n else:\n resample.SetInterpolator(sitk.sitkBSpline)\n\n return resample.Execute(itk_image)\n\n\nurls = {}\nurls['t1'] = 'http://biomedic.doc.ic.ac.uk/brain-development/downloads/IXI/IXI-T1.tar'\nurls['t2'] = 'http://biomedic.doc.ic.ac.uk/brain-development/downloads/IXI/IXI-T2.tar'\nurls['pd'] = 'http://biomedic.doc.ic.ac.uk/brain-development/downloads/IXI/IXI-PD.tar'\nurls['mra'] = 'http://biomedic.doc.ic.ac.uk/brain-development/downloads/IXI/IXI-MRA.tar'\nurls['demographic'] = 'http://biomedic.doc.ic.ac.uk/brain-development/downloads/IXI/IXI.xls'\n\nfnames = {}\nfnames['t1'] = 't1.tar'\nfnames['t2'] = 't2.tar'\nfnames['pd'] = 'pd.tar'\nfnames['mra'] = 'mra.tar'\nfnames['demographic'] = 'demographic.xls'\n\n\nif DOWNLOAD_IMAGES:\n # Download all IXI data\n for key, url in urls.items():\n\n if not os.path.isfile(fnames[key]):\n print('Downloading {} from {}'.format(fnames[key], url))\n curr_file = FancyURLopener()\n curr_file.retrieve(url, fnames[key])\n else:\n print('File {} already exists. Skipping download.'.format(\n fnames[key]))\n\nif EXTRACT_IMAGES:\n # Extract the HH subset of IXI\n for key, fname in fnames.items():\n\n if (fname.endswith('.tar')):\n print('Extracting IXI HH data from {}.'.format(fnames[key]))\n output_dir = os.path.join('./orig/', key)\n\n if not os.path.exists(output_dir):\n os.makedirs(output_dir)\n\n t = tarfile.open(fname, 'r')\n for member in t.getmembers():\n if '-HH-' in member.name:\n t.extract(member, output_dir)\n\n\nif PROCESS_OTHER:\n # Process the demographic xls data and save to csv\n xls = pd.ExcelFile('demographic.xls')\n print(xls.sheet_names)\n\n df = xls.parse('Table')\n for index, row in df.iterrows():\n IXI_id = 'IXI{:03d}'.format(row['IXI_ID'])\n df.loc[index, 'IXI_ID'] = IXI_id\n\n t1_exists = len(glob.glob('./orig/t1/{}*.nii.gz'.format(IXI_id)))\n t2_exists = len(glob.glob('./orig/t2/{}*.nii.gz'.format(IXI_id)))\n pd_exists = len(glob.glob('./orig/pd/{}*.nii.gz'.format(IXI_id)))\n mra_exists = len(glob.glob('./orig/mra/{}*.nii.gz'.format(IXI_id)))\n\n # Check if each entry is complete and drop if not\n # if not t1_exists and not t2_exists and not pd_exists and not mra\n # exists:\n if not (t1_exists and t2_exists and pd_exists and mra_exists):\n df.drop(index, inplace=True)\n\n # Write to csv file\n df.to_csv('demographic_HH.csv', index=False)\n\nif RESAMPLE_IMAGES:\n # Resample the IXI HH T2 images to 1mm isotropic and reslice all\n # others to it\n df = pd.read_csv('demographic_HH.csv', dtype=object, keep_default_na=False,\n na_values=[]).as_matrix()\n\n for i in df:\n IXI_id = i[0]\n print('Resampling {}'.format(IXI_id))\n\n t1_fn = glob.glob('./orig/t1/{}*.nii.gz'.format(IXI_id))[0]\n t2_fn = glob.glob('./orig/t2/{}*.nii.gz'.format(IXI_id))[0]\n pd_fn = glob.glob('./orig/pd/{}*.nii.gz'.format(IXI_id))[0]\n mra_fn = glob.glob('./orig/mra/{}*.nii.gz'.format(IXI_id))[0]\n\n t1 = sitk.ReadImage(t1_fn)\n t2 = sitk.ReadImage(t2_fn)\n pd = sitk.ReadImage(pd_fn)\n mra = sitk.ReadImage(mra_fn)\n\n # Resample to 1mm isotropic resolution\n t2_1mm = resample_image(t2)\n t1_1mm = reslice_image(t1, t2_1mm)\n pd_1mm = reslice_image(pd, t2_1mm)\n mra_1mm = reslice_image(mra, t2_1mm)\n\n output_dir = os.path.join('./1mm/', IXI_id)\n if not os.path.exists(output_dir):\n os.makedirs(output_dir)\n\n print('T1: {} {}'.format(t1_1mm.GetSize(), t1_1mm.GetSpacing()))\n print('T2: {} {}'.format(t2_1mm.GetSize(), t2_1mm.GetSpacing()))\n print('PD: {} {}'.format(pd_1mm.GetSize(), pd_1mm.GetSpacing()))\n print('MRA: {} {}'.format(mra_1mm.GetSize(), mra_1mm.GetSpacing()))\n\n sitk.WriteImage(t1_1mm, os.path.join(output_dir, 'T1_1mm.nii.gz'))\n sitk.WriteImage(t2_1mm, os.path.join(output_dir, 'T2_1mm.nii.gz'))\n sitk.WriteImage(pd_1mm, os.path.join(output_dir, 'PD_1mm.nii.gz'))\n sitk.WriteImage(mra_1mm, os.path.join(output_dir, 'MRA_1mm.nii.gz'))\n\n # Resample to 2mm isotropic resolution\n t2_2mm = resample_image(t2, out_spacing=[2.0, 2.0, 2.0])\n t1_2mm = reslice_image(t1, t2_2mm)\n pd_2mm = reslice_image(pd, t2_2mm)\n mra_2mm = reslice_image(mra, t2_2mm)\n\n output_dir = os.path.join('./2mm/', IXI_id)\n if not os.path.exists(output_dir):\n os.makedirs(output_dir)\n\n print('T1: {} {}'.format(t2_2mm.GetSize(), t1_2mm.GetSpacing()))\n print('T2: {} {}'.format(t2_2mm.GetSize(), t2_2mm.GetSpacing()))\n print('PD: {} {}'.format(pd_2mm.GetSize(), pd_2mm.GetSpacing()))\n print('MRA: {} {}'.format(mra_2mm.GetSize(), mra_2mm.GetSpacing()))\n\n sitk.WriteImage(t1_2mm, os.path.join(output_dir, 'T1_2mm.nii.gz'))\n sitk.WriteImage(t2_2mm, os.path.join(output_dir, 'T2_2mm.nii.gz'))\n sitk.WriteImage(pd_2mm, os.path.join(output_dir, 'PD_2mm.nii.gz'))\n sitk.WriteImage(mra_2mm, os.path.join(output_dir, 'MRA_2mm.nii.gz'))\n\n\nif CLEAN_UP:\n # Remove the .tar files\n for key, fname in fnames.items():\n if (fname.endswith('.tar')):\n os.remove(fname)\n\n # Remove all data in original resolution\n os.system('rm -rf orig')\n"
] | [
[
"numpy.round"
]
] |
klarman-cell-observatory/scCloud.py | [
"5a04a2f22574db044d018656ac4705ec83840226"
] | [
"sccloud/misc/misc.py"
] | [
"import numpy as np\nimport pandas as pd\nfrom typing import List\nfrom anndata import AnnData\n\nfrom sccloud.io import read_input\n\n\ndef search_genes(\n data: AnnData,\n gene_list: List[str],\n rec_key: str = \"de_res\",\n measure: str = \"percentage\",\n) -> pd.DataFrame:\n \"\"\"Extract and display gene expressions for each cluster from an `anndata` object.\n\n This function helps to see marker expressions in clusters via the interactive python environment.\n\n Parameters\n ----------\n\n data: ``anndata.AnnData``\n Annotated data matrix containing the expression matrix and differential expression results.\n\n gene_list: ``List[str]``\n A list of gene symbols.\n\n rec_key: ``str``, optional, default: ``\"de_res\"``\n Keyword of DE analysis result stored in ``data.varm``.\n\n measure : ``str``, optional, default: ``\"percentage\"``\n Can be either ``\"percentage\"`` or ``\"mean_logExpr\"``:\n * ``percentage`` shows the percentage of cells expressed the genes;\n * ``mean_logExpr`` shows the mean log expression.\n\n Returns\n -------\n ``pandas.DataFrame``\n A data frame containing marker expressions in each cluster.\n\n Examples\n --------\n >>> results = scc.search_genes(adata, ['CD3E', 'CD4', 'CD8'])\n \"\"\"\n\n columns = [x for x in data.varm[rec_key].dtype.names if x.startswith(measure + \":\")]\n df = pd.DataFrame(data=data.varm[rec_key][columns], index=data.var_names)\n return df.reindex(index=gene_list)\n\n\ndef search_de_genes(\n data: AnnData,\n gene_list: List[str],\n rec_key: str = \"de_res\",\n de_test: str = \"fisher\",\n de_alpha: float = 0.05,\n thre: float = 1.5,\n) -> pd.DataFrame:\n \"\"\"Extract and display differential expression analysis results of markers for each cluster.\n\n This function helps to see if markers are up or down regulated in each cluster via the interactive python environment: \n * ``++`` indicates up-regulated and fold change >= threshold;\n * ``+`` indicates up-regulated but fold change < threshold;\n * ``--`` indicates down-regulated and fold change <= 1 / threshold; \n * ``-`` indicates down-regulated but fold change > 1 / threshold;\n * ``?`` indicates not differentially expressed.\n\n Parameters\n ----------\n data: ``anndata.Anndata``\n Annotated data matrix containing the expression matrix and differential expression results.\n\n gene_list: ``List[str]``\n A list of gene symbols.\n\n rec_key: ``str``, optional, default: ``\"de_res\"``\n Keyword of DE analysis result stored in ``data.varm``.\n\n de_test : ``str``, optional, default: ``\"fisher\"``\n Differential expression test to look at, could be either ``t``, ``fisher`` or ``mwu``.\n\n de_alpha : ``float``, optional, default: ``0.05``\n False discovery rate.\n\n thre : ``float``, optional, default: ``1.5``\n Fold change threshold to determine if the marker is a strong DE (``++`` or ``--``) or weak DE (``+`` or ``-``).\n\n Returns\n -------\n ``pandas.DataFrame``\n A data frame containing marker differential expression results for each cluster.\n\n Examples\n --------\n >>> df = sccloud.misc.search_de_genes(adata, ['CD3E', 'CD4', 'CD8'], thre = 2.0)\n \"\"\"\n\n columns = [\n x for x in data.varm[rec_key].dtype.names if x.startswith(de_test + \"_qval:\")\n ]\n df_de = pd.DataFrame(data.varm[rec_key][columns], index=data.var_names)\n df_de = df_de.reindex(index=gene_list)\n\n columns = [\n x\n for x in data.varm[rec_key].dtype.names\n if (\n x.startswith(\"percentage_fold_change:\")\n if de_test == \"fisher\"\n else x.startswith(\"log_fold_change:\")\n )\n ]\n df_fc = pd.DataFrame(data.varm[rec_key][columns], index=data.var_names)\n df_fc = df_fc.reindex(index=gene_list)\n if de_test != \"fisher\":\n df_fc = np.exp(df_fc)\n\n results = np.zeros((len(gene_list), len(columns)), dtype=np.dtype(\"U4\"))\n results[:] = \"?\"\n results[np.isnan(df_de)] = \"NaN\"\n results[(df_de <= de_alpha).values & (df_fc > 1.0).values] = \"+\"\n results[(df_de <= de_alpha).values & (df_fc >= thre).values] = \"++\"\n results[(df_de <= de_alpha).values & (df_fc < 1.0).values] = \"-\"\n results[(df_de <= de_alpha).values & (df_fc <= 1.0 / thre).values] = \"--\"\n\n clusts = [x.rpartition(\":\")[2] for x in columns]\n df = pd.DataFrame(data=results, index=gene_list, columns=clusts)\n return df\n\n\ndef show_attributes(\n input_file: str,\n show_attributes: bool,\n show_gene_attributes: bool,\n show_values_for_attributes: str,\n) -> None:\n \"\"\" Show data attributes. For command line use.\n \"\"\"\n\n data = read_input(input_file, h5ad_mode=\"r\")\n if show_attributes:\n print(\n \"Available sample attributes in input dataset: {0}\".format(\n \", \".join(data.obs.columns.values)\n )\n )\n if show_gene_attributes:\n print(\n \"Available gene attributes in input dataset: {0}\".format(\n \", \".join(data.var.columns.values)\n )\n )\n if not show_values_for_attributes is None:\n for attr in show_values_for_attributes.split(\",\"):\n print(\n \"Available values for attribute {0}: {1}.\".format(\n attr, \", \".join(np.unique(data.obs[attr]))\n )\n )\n\n\ndef perform_oneway_anova(\n data: AnnData,\n glist: List[str],\n restriction_vec: List[str],\n group_str: str,\n fdr_alpha: float = 0.05,\n res_key: str = None,\n) -> pd.DataFrame:\n \"\"\"Perform one way ANOVA on a subset of cells (restricted by restriction_vec) grouped by group_str and control FDR at fdr_alpha.\n Parameters\n ----------\n\n data : `anndata` object\n An `anndata` object containing the expression matrix.\n glist : `list[str]`\n A list of gene symbols.\n restriction_vec : `list[str]`\n A vector of restrictions for selecting cells. Each restriction takes the format of attr:value,value,value\n group_str : `str`\n How to group selected cells for ANOVA analysis. If group_str is for pseudotime, it has two formats. 1) 'pseudotime:time:n', which divides cells by equal pseudotime invertal; 2) 'pseudotime:size:n' divides cells by equal number of cells.\n fdr_alpha : `float`, optional (default: 0.05)\n False discovery rate.\n res_key : `str`, optional (default: None)\n Store results into data using res_key, the grouping information is stored in obs and the results is stored in uns.\n\n Returns\n -------\n `pandas.DataFrame`\n Results for genes that pass FDR control.\n\n Examples\n --------\n >>> results = misc.perform_oneway_anova(data, ['CD3E', 'CD4', 'CD8'], [], 'pseudotime:size:10')\n \"\"\"\n\n from scipy.stats import f_oneway\n from statsmodels.stats.multitest import fdrcorrection as fdr\n\n selected = np.ones(data.shape[0], dtype=bool)\n for rest_str in restriction_vec:\n attr, value_str = rest_str.split(\":\")\n values = value_str.split(\",\")\n selected = selected & np.isin(data.obs[attr], values)\n\n gene_list = np.array(glist)\n gene_list = gene_list[np.isin(gene_list, data.var_names)]\n ngene = gene_list.size\n\n newdat = data[selected, :][:, gene_list].copy()\n newdat.X = newdat.X.toarray()\n\n group_values = group_str.split(\":\")\n group_names = []\n col_names = []\n\n ngr = 0\n group_idx = None\n\n if group_values[0] == \"pseudotime\":\n assert len(group_values) == 3\n div_by = group_values[1]\n ngr = int(group_values[2])\n\n group_idx = np.zeros((ngr, newdat.shape[0]), dtype=bool)\n pseudotimes = newdat.obs[\"pseudotime\"].values\n\n min_t = pseudotimes.min()\n max_t = pseudotimes.max()\n\n if div_by == \"time\":\n interval = (max_t - min_t) / ngr\n left = min_t - 1e-5\n for i in range(ngr):\n right = min_t + interval * (i + 1)\n name = \"({:.2f}, {:.2f}]\".format(left if left >= 0 else 0.0, right)\n group_names.append(name)\n group_idx[i] = (pseudotimes > left) & (pseudotimes <= right)\n left = right\n else:\n assert div_by == \"size\"\n ords = np.argsort(pseudotimes)\n quotient = ords.size // ngr\n residule = ords.size % ngr\n\n fr = 0\n for i in range(ngr):\n to = fr + quotient + (i < residule)\n name = \"[{:.2f}, {:.2f}]\".format(\n pseudotimes[ords[fr]], pseudotimes[ords[to - 1]]\n )\n group_names.append(name)\n group_idx[i][ords[fr:to]] = True\n fr = to\n\n else:\n assert len(group_values) == 2\n group_attr = group_values[0]\n tmp_str = group_values[1]\n groups_str = tmp_str.split(\";\")\n\n ngr = len(groups_str)\n group_idx = np.zeros((ngr, newdat.shape[0]), dtype=bool)\n\n for i, gstr in enumerate(groups_str):\n name, values = gstr.split(\"~\")\n group_names.append(name)\n group_idx[i] = np.isin(newdat.obs[group_attr], values.split(\",\"))\n\n for i in range(ngr):\n print(\"Group {} has {} cells.\".format(group_names[i], group_idx[i].sum()))\n\n np.warnings.filterwarnings(\"ignore\")\n stats = np.zeros((ngene, 3 + ngr * 2))\n for i in range(ngene):\n arr_list = []\n for j in range(ngr):\n arr = newdat.X[group_idx[j], i]\n stats[i, 3 + j * 2] = arr.mean()\n stats[i, 3 + j * 2 + 1] = (arr > 0).sum() * 100.0 / arr.size\n arr_list.append(arr)\n stats[i, 0], stats[i, 1] = f_oneway(*arr_list)\n if np.isnan(stats[i, 0]):\n stats[i, 0] = 0.0\n stats[i, 1] = 1.0\n passed, stats[:, 2] = fdr(stats[:, 1])\n\n cols = [\"fstat\", \"pval\", \"qval\"]\n for i in range(ngr):\n cols.extend([group_names[i] + \"_mean\", group_names[i] + \"_percent\"])\n raw_results = pd.DataFrame(stats, columns=cols, index=gene_list)\n\n results = raw_results[raw_results[\"qval\"] <= fdr_alpha]\n results = results.sort_values(\"qval\")\n\n if res_key is not None:\n data.uns[res_key] = raw_results\n data.obs[res_key] = \"background\"\n for i in range(ngr):\n idx = np.zeros(data.shape[0], dtype=bool)\n idx[selected] = group_idx[i]\n data.obs.loc[idx, res_key] = group_names[i]\n\n return results\n"
] | [
[
"numpy.ones",
"numpy.zeros",
"numpy.dtype",
"pandas.DataFrame",
"numpy.argsort",
"numpy.exp",
"numpy.isin",
"scipy.stats.f_oneway",
"numpy.isnan",
"numpy.array",
"numpy.warnings.filterwarnings",
"numpy.unique"
]
] |
waldo2590/thunder | [
"967ff8f3e7c2fabe1705743d95eb2746d4329786"
] | [
"test/test_series_io.py"
] | [
"import pytest\nimport os\nimport glob\nimport json\nfrom numpy import arange, array, allclose, save, savetxt\n\nfrom bolt import array as barray\nfrom thunder.series.readers import fromarray, fromtext, frombinary, fromexample\n\npytestmark = pytest.mark.usefixtures(\"eng\")\n\n\ndef test_from_array(eng):\n a = arange(8, dtype='int16').reshape((4, 2))\n data = fromarray(a, engine=eng)\n assert data.shape == (4, 2)\n assert data.dtype == 'int16'\n assert allclose(data.index, [0, 1])\n assert allclose(data.toarray(), a)\n\n\ndef test_from_array_bolt(eng):\n a = arange(8, dtype='int16').reshape((4, 2))\n if eng is not None:\n b = barray(a, context=eng)\n else:\n b = barray(a)\n data = fromarray(b, engine=eng)\n assert data.shape == (4, 2)\n assert data.dtype == 'int16'\n assert allclose(data.index, [0, 1])\n assert allclose(data.toarray(), a)\n\n\ndef test_from_array_vector(eng):\n a = arange(8, dtype='int16').reshape((4, 2))\n data = fromarray(a, engine=eng)\n assert data.shape == (4, 2)\n assert data.dtype == 'int16'\n assert allclose(data.index, [0, 1])\n assert allclose(data.toarray(), a)\n\n\ndef test_from_array_index(eng):\n a = arange(8, dtype='int16').reshape((4, 2))\n data = fromarray(a, index=[2, 3], engine=eng)\n assert allclose(data.index, [2, 3])\n\n\ndef test_from_text(tmpdir, eng):\n v = [[0, i] for i in range(10)]\n f = os.path.join(str(tmpdir), 'data.txt')\n savetxt(f, v, fmt='%.02g')\n data = fromtext(f, engine=eng)\n assert allclose(data.shape, (10, 2))\n assert data.dtype == 'float64'\n assert allclose(data.toarray(), v)\n\n\ndef test_from_text_skip(tmpdir):\n k = [[i] for i in range(10)]\n v = [[0, i] for i in range(10)]\n a = [kv[0] + kv[1] for kv in zip(k, v)]\n f = os.path.join(str(tmpdir), 'data.txt')\n savetxt(f, a, fmt='%.02g')\n data = fromtext(f, skip=1)\n assert allclose(data.shape, (10, 2))\n assert data.dtype == 'float64'\n assert allclose(data.toarray(), v)\n\n\ndef test_from_binary(tmpdir, eng):\n a = arange(8, dtype='int16').reshape((4, 2))\n p = os.path.join(str(tmpdir), 'data.bin')\n a.tofile(p)\n data = frombinary(p, shape=[4, 2], dtype='int16', engine=eng)\n assert allclose(data.shape, (4, 2))\n assert allclose(data.index, [0, 1])\n assert allclose(data.toarray(), a)\n\n\ndef test_from_binary_skip(tmpdir, eng):\n k = [[i] for i in range(10)]\n v = [[0, i] for i in range(10)]\n a = array([kv[0] + kv[1] for kv in zip(k, v)], dtype='int16')\n p = os.path.join(str(tmpdir), 'data.bin')\n a.tofile(p)\n data = frombinary(p, shape=[10, 2], dtype='int16', skip=1, engine=eng)\n assert allclose(data.shape, (10, 2))\n assert allclose(data.index, [0, 1])\n assert allclose(data.toarray(), v)\n\n\ndef test_to_binary(tmpdir, eng):\n a = arange(8, dtype='int16').reshape((4, 2))\n p = str(tmpdir) + '/data'\n fromarray(a, npartitions=1, engine=eng).tobinary(p)\n files = [os.path.basename(f) for f in glob.glob(str(tmpdir) + '/data/*')]\n assert sorted(files) == ['SUCCESS', 'conf.json', 'series-00000.bin']\n with open(str(tmpdir) + '/data/conf.json', 'r') as f:\n conf = json.load(f)\n assert conf['shape'] == [4, 2]\n assert conf['dtype'] == 'int16'\n\n\ndef test_to_binary_roundtrip(tmpdir, eng):\n a = arange(8, dtype='int16').reshape((4, 2))\n p = str(tmpdir) + '/data'\n data = fromarray(a, npartitions=1, engine=eng)\n data.tobinary(p)\n loaded = frombinary(p)\n assert allclose(data.toarray(), loaded.toarray())\n\n\ndef test_to_binary_roundtrip_partitioned(tmpdir, eng):\n a = arange(8, dtype='int16').reshape((4, 2))\n p = str(tmpdir) + '/data'\n data = fromarray([a, a], npartitions=4, engine=eng)\n data.tobinary(p)\n loaded = frombinary(p)\n assert allclose(data.toarray(), loaded.toarray())\n\n\ndef test_to_binary_roundtrip_3d(tmpdir, eng):\n a = arange(16, dtype='int16').reshape((4, 2, 2))\n p = str(tmpdir) + '/data'\n data = fromarray(a, npartitions=1, engine=eng)\n data.tobinary(p)\n loaded = frombinary(p, engine=eng)\n assert allclose(data.toarray(), loaded.toarray())\n\n\ndef test_from_example(eng):\n return\n data = fromexample('fish', engine=eng)\n assert allclose(data.toarray().shape, (76, 87, 2, 20))\n data = fromexample('mouse', engine=eng)\n assert allclose(data.toarray().shape, (64, 64, 20))\n data = fromexample('iris', engine=eng)\n assert allclose(data.toarray().shape, (150, 4))\n"
] | [
[
"numpy.arange",
"numpy.allclose",
"numpy.savetxt"
]
] |
hx-Tang/GANet | [
"8935c9d3d82189fa6f940c2a877534a398a041e4"
] | [
"libs/sync_bn/src/__init__.py"
] | [
"import os\nimport torch\nfrom torch.utils.cpp_extension import load\n\ncwd = os.path.dirname(os.path.realpath(__file__))\ncpu_path = os.path.join(cwd, 'cpu')\ngpu_path = os.path.join(cwd, 'gpu')\n\ncpu = load('sync_bn_cpu', [\n os.path.join(cpu_path, 'operator.cpp'),\n os.path.join(cpu_path, 'sync_bn.cpp'),\n], build_directory=cpu_path, verbose=False)\n\nif torch.cuda.is_available():\n gpu = load('sync_bn_gpu', [\n os.path.join(gpu_path, 'operator.cpp'),\n os.path.join(gpu_path, 'sync_bn_cuda.cu'),\n ], build_directory=gpu_path, verbose=False)\n"
] | [
[
"torch.cuda.is_available"
]
] |
scottfredericks/PyXtal_Old | [
"3fa39b2f188197b42576087c6f4c3bca14b2e8f3"
] | [
"examples/LJ_38_Oh.py"
] | [
"from pyxtal.crystal import random_cluster\nfrom copy import deepcopy\nfrom optparse import OptionParser\nfrom random import randint, choice\nfrom scipy.optimize import minimize\nfrom scipy.spatial.distance import pdist, cdist\nfrom pyxtal.molecule import PointGroupAnalyzer\nfrom pymatgen import Molecule\nfrom pyxtal.database.collection import Collection\nfrom time import time\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport warnings\nplt.style.use(\"bmh\")\nwarnings.filterwarnings(\"ignore\")\n\n\"\"\"\nThis is a script to \n1, generate random clusters\n2, perform optimization\n\"\"\"\ndef LJ(pos, dim, mu=0.1):\n \"\"\"\n Calculate the total energy\n Args:\n pos: 1D array with N*dim numbers representing the atomic positions\n dim: dimension of the hyper/normal space\n output\n E: the total energy with punishing function\n \"\"\"\n N_atom = int(len(pos)/dim)\n pos = np.reshape(pos, (N_atom, dim))\n \n distance = pdist(pos) \n r6 = np.power(distance, 6)\n r12 = np.multiply(r6, r6)\n Eng = np.sum(4*(1/r12 - 1/r6))\n\n if dim > 3:\n norm = 0\n for i in range(3,dim):\n #diff = pos[:, i] - np.mean(pos[:, i])\n diff = pos[:, i] \n norm += np.sum(np.multiply(diff, diff))\n Eng += 0.5*mu*norm\n return Eng\n\ndef LJ_force(pos, dim, mu=0.1):\n N_atom = int(len(pos)/dim)\n pos = np.reshape(pos,[N_atom, dim])\n force = np.zeros([N_atom, dim])\n for i, pos0 in enumerate(pos):\n pos1 = deepcopy(pos)\n pos1 = np.delete(pos1, i, 0)\n distance = cdist([pos0], pos1)\n r = pos1 - pos0\n r2 = np.power(distance, 2)\n r6 = np.power(r2, 3)\n r12 = np.power(r6, 2)\n force[i] = np.dot((48/r12-24/r6)/r2, r)\n # force from the punish function mu*sum([x-mean(x)]^2)\n if dim > 3:\n for j in range(3,dim):\n #force[i, j] += mu*(pos[i, j] - np.mean(pos[:, j]))\n force[i, j] += mu*pos[i, j] #- np.mean(pos[:, j]))\n return force.flatten()\n\ndef single_optimize(pos, dim=3, kt=0.5, mu=0.1):\n \"\"\"\n perform optimization for a given cluster\n Args: \n pos: N*dim0 array representing the atomic positions\n dim: dimension of the hyper/normal space\n kt: perturbation factors\n\n output:\n energy: optmized energy\n pos: optimized positions\n \"\"\"\n N_atom = len(pos)\n diff = dim - np.shape(pos)[1]\n # if the input pos has less dimensions, we insert a random array for the extra dimension\n # if the input pos has more dimensions, we delete the array for the extra dimension\n if diff > 0:\n pos = np.hstack((pos, 0.5*(np.random.random([N_atom, diff])-0.5) ))\n elif diff < 0:\n pos = pos[:, :dim]\n\n pos = pos.flatten()\n res = minimize(LJ, pos, args=(dim, mu), jac=LJ_force, method='CG', tol=1e-3)\n pos = np.reshape(res.x, (N_atom, dim))\n energy = res.fun\n return energy, pos\n\n\ndef parse_symmetry(pos):\n mol = Molecule(['C']*len(pos), pos)\n try:\n symbol = PointGroupAnalyzer(mol, tolerance=0.1).sch_symbol\n except:\n symbol = 'N/A'\n return symbol\n\n\nclass LJ_prediction():\n \"\"\"\n A class to perform global optimization on LJ clusters\n Args:\n\n Attributes:\n\n \"\"\"\n\n def __init__(self, numIons):\n self.numIons = numIons\n ref = Collection('clusters')[str(numIons)]\n print('\\nReference for LJ {0:3d} is {1:12.3f} eV, PG: {2:4s}'.\\\n format(numIons, ref['energy'], ref['pointgroup']))\n self.reference = ref\n self.time0 = time()\n\n def generate_cluster(self, pgs = range(2, 33)):\n run = True\n while run:\n pg = choice(pgs)\n cluster = random_cluster(pg, ['Mo'], [self.numIons], 1.0)\n if cluster.valid:\n run = False\n return cluster.cart_coords\n \n def predict(self, dim=3, maxN=100, ncpu=2, pgs=range(2, 33)):\n\n print('\\nPerforming random search at {0:d}D space\\n'.format(dim))\n cycle = range(maxN)\n if ncpu > 1:\n from multiprocessing import Pool\n from functools import partial\n\n with Pool(ncpu) as p:\n func = partial(self.relaxation, dim, pgs)\n res = p.map(func, cycle)\n p.close()\n p.join()\n else:\n res=[]\n for i in cycle:\n res.append(self.relaxation(dim, pgs, i))\n \n N_success = 0\n for dct in res:\n if dct['ground']:\n N_success +=1\n print('\\nHit the ground state {0:4d} times out of {1:4d} attempts\\n'.\\\n format(N_success, maxN))\n return res\n\n def relaxation(self, dim, pgs, ind):\n pos = self.generate_cluster(pgs)\n pg1 = parse_symmetry(pos)\n if dim == 3:\n [energy, pos] = single_optimize(pos, 3)\n else:\n do = True\n while do:\n [energy1, pos1] = single_optimize(pos, 3)\n [energy2, pos2] = single_optimize(pos1, dim)\n [energy3, pos3] = single_optimize(pos2, 3)\n #print(energy1, energy2, energy3)\n if abs(energy3-energy1) < 1e-3 or energy3 > energy1:\n pos = pos1\n energy = energy1\n do = False\n #print('stop')\n else:\n pos = pos3\n if abs(energy-self.reference['energy']) <1e-3:\n ground = True\n elif energy < self.reference['energy']:\n ground = True\n print(\" --- ENERGY LOWER THAN REFERENCE FOUND ---\")\n else:\n ground = False\n\n pg2 = parse_symmetry(pos)\n res = {'pos': pos,\n 'energy': energy,\n 'pg_init': pg1,\n 'pg_finial': pg2,\n 'ground': ground,\n 'id': ind,\n }\n if ground:\n print('ID: {0:4d} PG initial: {1:4s} relaxed: {2:4s} Energy: {3:12.3f} Time: {4:6.1f} ++++++'.\\\n format(ind, pg1, pg2, energy, (time()-self.time0)/60))\n elif ind%10 == 0:\n print('ID: {0:4d} PG initial: {1:4s} relaxed: {2:4s} Energy: {3:12.3f} Time: {4:6.1f} '.\\\n format(ind, pg1, pg2, energy, (time()-self.time0)/60))\n return res\n\nif __name__ == \"__main__\":\n #-------------------------------- Options -------------------------\n parser = OptionParser()\n parser.add_option(\"-d\", \"--dimension\", dest=\"dim\", metavar='dim', default=3, type=int,\n help=\"dimension, 3 or higher\")\n parser.add_option(\"-n\", \"--numIons\", dest=\"numIons\", default=16, type=int,\n help=\"desired numbers of atoms: 16\")\n parser.add_option(\"-m\", \"--max\", dest=\"max\", default=100, type=int,\n help=\"maximum number of attempts\")\n parser.add_option(\"-p\", \"--proc\", dest=\"proc\", default=1, type=int,\n help=\"number of processors, default 1\")\n\n (options, args) = parser.parse_args()\n\n N = options.numIons #38\n maxN = options.max #1000\n dim = options.dim #4\n ncpu = options.proc\n\n lj_run = LJ_prediction(N)\n eng_min = lj_run.reference['energy']\n t0 = time()\n print(\"---No symmetry---\")\n results1 = lj_run.predict(dim=dim, maxN=maxN, ncpu=ncpu, pgs=[1])\n print('time: {0:6.2f} seconds'.format(time()-t0))\n\n print(\"---Random symmetry---\")\n results2 = lj_run.predict(dim=dim, maxN=maxN, ncpu=ncpu, pgs=range(2, 33))\n print('time: {0:6.2f} seconds'.format(time()-t0))\n\n print(\"---Oh only---\")\n results3 = lj_run.predict(dim=dim, maxN=maxN, ncpu=ncpu, pgs=[32])\n print('time: {0:6.2f} seconds'.format(time()-t0))\n\n print(\"---Random symmetry (not Oh)---\")\n results4 = lj_run.predict(dim=dim, maxN=maxN, ncpu=ncpu, pgs=range(2, 32))\n print('time: {0:6.2f} seconds'.format(time()-t0))\n eng1 = []\n eng2 = []\n eng3 = []\n eng4 = []\n ground1 = 0\n ground2 = 0\n ground3 = 0\n ground4 = 0\n for dct in results1:\n if dct['ground']:\n ground1 += 1\n eng1.append(dct['energy']) \n for dct in results2:\n if dct['ground']:\n ground2 += 1\n eng2.append(dct['energy']) \n for dct in results3:\n if dct['ground']:\n ground3 += 1\n eng3.append(dct['energy']) \n for dct in results4:\n if dct['ground']:\n ground4 += 1\n eng4.append(dct['energy']) \n eng1 = np.array(eng1)\n eng2 = np.array(eng2)\n eng3 = np.array(eng3)\n eng4 = np.array(eng4)\n\n eng_max = max([max(eng1), max(eng2)])\n bins = np.linspace(eng_min-0.1, 0.1, 100)\n plt.hist(eng1, bins, alpha=0.5, label='no symmetry: ' + str(ground1) + '/' + str(len(eng1)))\n plt.hist(eng2, bins, alpha=0.5, label='random point groups: ' + str(ground2) + '/' + str(len(eng2)))\n plt.xlabel('Energy (eV)')\n plt.ylabel('Counts')\n plt.legend(loc=1)\n plt.title('LJ cluster: ' + str(N) + ' Ground state: ' + str(eng_min))\n plt.savefig(str(N)+'-'+str(maxN)+'-'+str(dim)+'.pdf')\n plt.close()\n\n eng_max = max([max(eng3), max(eng4)])\n bins = np.linspace(eng_min-0.1, 0.1, 100)\n plt.hist(eng3, bins, alpha=0.5, label='Oh only: ' + str(ground3) + '/' + str(len(eng3)))\n plt.hist(eng4, bins, alpha=0.5, label='random point groups (excluding Oh): ' + str(ground4) + '/' + str(len(eng4)))\n plt.xlabel('Energy (eV)')\n plt.ylabel('Counts')\n plt.legend(loc=1)\n plt.title('LJ cluster: ' + str(N) + ' Ground state: ' + str(eng_min))\n plt.savefig(str(N)+'-'+str(maxN)+'-'+str(dim)+'_single.pdf')\n plt.close()\n"
] | [
[
"scipy.spatial.distance.pdist",
"numpy.multiply",
"numpy.sum",
"scipy.spatial.distance.cdist",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.style.use",
"numpy.reshape",
"numpy.delete",
"numpy.linspace",
"numpy.zeros",
"scipy.optimize.minimize",
"numpy.power",
"matplotlib.pyplot.close",
"matplotlib.pyplot.legend",
"numpy.random.random",
"numpy.shape",
"numpy.array",
"numpy.dot",
"matplotlib.pyplot.xlabel"
]
] |
quantapix/qnarre.com | [
"f51d5945c20ef8182c4aa11f1b407d064c190c70"
] | [
"qnarre/models/ibert_quant_modules.py"
] | [
"import decimal\n\nimport numpy as np\nimport torch\nfrom torch import nn\nfrom torch.autograd import Function\n\nfrom ...utils import logging\n\n\nlogger = logging.get_logger(__name__)\n\n\nclass QuantEmbedding(qc.Module):\n def __init__(\n self,\n num_embeddings,\n embedding_dim,\n padding_idx=None,\n max_norm=None,\n norm_type=2.0,\n scale_grad_by_freq=False,\n sparse=False,\n _weight=None,\n weight_bit=8,\n momentum=0.95,\n quant_mode=False,\n ):\n super().__init__()\n self.num_ = num_embeddings\n self.dim = embedding_dim\n self.padding_idx = padding_idx\n self.max_norm = max_norm\n self.norm_type = norm_type\n self.scale_grad_by_freq = scale_grad_by_freq\n self.sparse = sparse\n\n self.weight = nn.Parameter(torch.zeros([num_embeddings, embedding_dim]))\n self.register_buffer(\"weight_scaling_factor\", torch.zeros(1))\n self.register_buffer(\"weight_integer\", torch.zeros_like(self.weight))\n\n self.weight_bit = weight_bit\n self.momentum = momentum\n self.quant_mode = quant_mode\n self.percentile_mode = False\n self.weight_function = SymmetricQuantFunction.apply\n\n def forward(self, x, positions=None, incremental_state=None):\n if not self.quant_mode:\n return (\n F.embedding(\n x,\n self.weight,\n self.padding_idx,\n self.max_norm,\n self.norm_type,\n self.scale_grad_by_freq,\n self.sparse,\n ),\n None,\n )\n\n w = self.weight\n w_transform = w.data.detach()\n w_min = w_transform.min().expand(1)\n w_max = w_transform.max().expand(1)\n\n self.weight_scaling_factor = symmetric_linear_quantization_params(\n self.weight_bit, w_min, w_max, False\n )\n self.weight_integer = self.weight_function(\n self.weight, self.weight_bit, self.percentile_mode, self.weight_scaling_factor\n )\n\n emb_int = F.embedding(\n x,\n self.weight_integer,\n self.padding_idx,\n self.max_norm,\n self.norm_type,\n self.scale_grad_by_freq,\n self.sparse,\n )\n return emb_int * self.weight_scaling_factor, self.weight_scaling_factor\n\n\nclass QuantAct(qc.Module):\n def __init__(\n self,\n activation_bit,\n act_range_momentum=0.95,\n per_channel=False,\n channel_len=None,\n quant_mode=False,\n ):\n super().__init__()\n\n self.activation_bit = activation_bit\n self.act_range_momentum = act_range_momentum\n self.quant_mode = quant_mode\n self.per_channel = per_channel\n self.percentile = False\n self.act_function = SymmetricQuantFunction.apply\n\n if not self.per_channel:\n self.register_buffer(\"x_min\", torch.zeros(1))\n self.register_buffer(\"x_max\", torch.zeros(1))\n self.register_buffer(\"act_scaling_factor\", torch.zeros(1))\n self.x_min -= 1e-5\n self.x_max += 1e-5\n else:\n raise NotImplementedError(\"per-channel mode is not currently supported for activation.\")\n\n def __repr__(self):\n return (\n f\"{self.__class__.__name__}(activation_bit={self.activation_bit}, \"\n f\"quant_mode: {self.activation_bit}, Act_min: {self.x_min.item():.2f}, \"\n f\"Act_max: {self.x_max.item():.2f})\"\n )\n\n def forward(\n self,\n x,\n pre_act_scaling_factor=None,\n identity=None,\n identity_scaling_factor=None,\n specified_min=None,\n specified_max=None,\n ):\n\n x_act = x if identity is None else identity + x\n # collect running stats if training\n if self.training:\n assert not self.percentile, \"percentile mode is not currently supported for activation.\"\n assert (\n not self.per_channel\n ), \"per-channel mode is not currently supported for activation.\"\n x_min = x_act.data.min()\n x_max = x_act.data.max()\n\n assert (\n x_max.isnan().sum() == 0 and x_min.isnan().sum() == 0\n ), \"NaN detected when computing min/max of the activation\"\n\n # Initialization\n if self.x_min.min() > -1.1e-5 and self.x_max.max() < 1.1e-5:\n self.x_min = self.x_min + x_min\n self.x_max = self.x_max + x_max\n\n # exponential moving average (EMA)\n # use momentum to prevent the quantized values change greatly every iteration\n elif self.act_range_momentum == -1:\n self.x_min = torch.min(self.x_min, x_min)\n self.x_max = torch.max(self.x_max, x_max)\n else:\n self.x_min = self.x_min * self.act_range_momentum + x_min * (\n 1 - self.act_range_momentum\n )\n self.x_max = self.x_max * self.act_range_momentum + x_max * (\n 1 - self.act_range_momentum\n )\n\n if not self.quant_mode:\n return x_act, None\n\n x_min = self.x_min if specified_min is None else specified_min\n x_max = self.x_max if specified_max is None else specified_max\n\n self.act_scaling_factor = symmetric_linear_quantization_params(\n self.activation_bit, x_min, x_max, per_channel=self.per_channel\n )\n\n if pre_act_scaling_factor is None:\n # this is for the input quantization\n quant_act_int = self.act_function(\n x, self.activation_bit, self.percentile, self.act_scaling_factor\n )\n else:\n quant_act_int = FixedPointMul.apply(\n x,\n pre_act_scaling_factor,\n self.activation_bit,\n self.act_scaling_factor,\n identity,\n identity_scaling_factor,\n )\n\n correct_output_scale = self.act_scaling_factor.view(-1)\n\n return quant_act_int * correct_output_scale, self.act_scaling_factor\n\n\nclass QuantLinear(qc.Module):\n def __init__(\n self,\n in_features,\n out_features,\n bias=True,\n weight_bit=8,\n bias_bit=32,\n per_channel=False,\n quant_mode=False,\n ):\n super().__init__()\n self.in_features = in_features\n self.out_features = out_features\n\n self.weight = nn.Parameter(torch.zeros([out_features, in_features]))\n self.register_buffer(\"weight_integer\", torch.zeros_like(self.weight))\n self.register_buffer(\"fc_scaling_factor\", torch.zeros(self.out_features))\n if bias:\n self.bias = nn.Parameter(torch.zeros(out_features))\n self.register_buffer(\"bias_integer\", torch.zeros_like(self.bias))\n\n self.weight_bit = weight_bit\n self.quant_mode = quant_mode\n self.per_channel = per_channel\n self.bias_bit = bias_bit\n self.quant_mode = quant_mode\n self.percentile_mode = False\n self.weight_function = SymmetricQuantFunction.apply\n\n def __repr__(self):\n s = super().__repr__()\n s = f\"({s} weight_bit={self.weight_bit}, quant_mode={self.quant_mode})\"\n return s\n\n def forward(self, x, prev_act_scaling_factor=None):\n if not self.quant_mode:\n return F.linear(x, weight=self.weight, bias=self.bias), None\n\n # assert that prev_act_scaling_factor is a scalar tensor\n assert prev_act_scaling_factor is not None and prev_act_scaling_factor.shape == (1,), (\n \"Input activation to the QuantLinear layer should be globally (non-channel-wise) quantized. \"\n \"Please add a QuantAct layer with `per_channel = True` before this QuantAct layer\"\n )\n\n w = self.weight\n w_transform = w.data.detach()\n if self.per_channel:\n w_min, _ = torch.min(w_transform, dim=1, out=None)\n w_max, _ = torch.max(w_transform, dim=1, out=None)\n else:\n w_min = w_transform.min().expand(1)\n w_max = w_transform.max().expand(1)\n\n self.fc_scaling_factor = symmetric_linear_quantization_params(\n self.weight_bit, w_min, w_max, self.per_channel\n )\n self.weight_integer = self.weight_function(\n self.weight, self.weight_bit, self.percentile_mode, self.fc_scaling_factor\n )\n\n bias_scaling_factor = self.fc_scaling_factor * prev_act_scaling_factor\n\n if self.bias is not None:\n self.bias_integer = self.weight_function(\n self.bias, self.bias_bit, False, bias_scaling_factor\n )\n\n prev_act_scaling_factor = prev_act_scaling_factor.view(1, -1)\n x_int = x / prev_act_scaling_factor\n\n return (\n F.linear(x_int, weight=self.weight_integer, bias=self.bias_integer)\n * bias_scaling_factor,\n bias_scaling_factor,\n )\n\n\nclass IntGELU(qc.Module):\n def __init__(self, quant_mode=True, force_dequant=\"none\"):\n super().__init__()\n self.quant_mode = quant_mode\n\n if force_dequant in [\"nonlinear\", \"gelu\"]:\n logger.info(\"Force dequantize gelu\")\n self.quant_mode = False\n\n if not self.quant_mode:\n self.activation_fn = nn.GELU()\n\n self.k = 1.4142\n self.const = 14 # dummy integer constant\n self.coeff = [-0.2888, -1.769, 1] # a(x+b)**2 + c\n self.coeff[2] /= self.coeff[0]\n\n def int_erf(self, x_int, scaling_factor):\n b_int = torch.floor(self.coeff[1] / scaling_factor)\n c_int = torch.floor(self.coeff[2] / scaling_factor**2)\n sign = torch.sign(x_int)\n\n abs_int = torch.min(torch.abs(x_int), -b_int)\n y_int = sign * ((abs_int + b_int) ** 2 + c_int)\n scaling_factor = scaling_factor**2 * self.coeff[0]\n\n # avoid overflow\n y_int = floor_ste.apply(y_int / 2**self.const)\n scaling_factor = scaling_factor * 2**self.const\n\n return y_int, scaling_factor\n\n def forward(self, x, scaling_factor=None):\n if not self.quant_mode:\n return self.activation_fn(x), None\n\n x_int = x / scaling_factor\n sigmoid_int, sigmoid_scaling_factor = self.int_erf(x_int, scaling_factor / self.k)\n\n shift_int = 1.0 // sigmoid_scaling_factor\n\n x_int = x_int * (sigmoid_int + shift_int)\n scaling_factor = scaling_factor * sigmoid_scaling_factor / 2\n\n return x_int * scaling_factor, scaling_factor\n\n\nclass IntSoftmax(qc.Module):\n def __init__(self, output_bit, quant_mode=False, force_dequant=\"none\"):\n super().__init__()\n self.output_bit = output_bit\n self.max_bit = 32\n self.quant_mode = quant_mode\n\n if force_dequant in [\"nonlinear\", \"softmax\"]:\n logger.info(\"Force dequantize softmax\")\n self.quant_mode = False\n\n self.act = QuantAct(16, quant_mode=self.quant_mode)\n self.x0 = -0.6931 # -ln2\n self.const = 30 # dummy integer constant\n self.coef = [0.35815147, 0.96963238, 1.0] # ax**2 + bx + c\n self.coef[1] /= self.coef[0]\n self.coef[2] /= self.coef[0]\n\n def int_polynomial(self, x_int, scaling_factor):\n with torch.no_grad():\n b_int = torch.floor(self.coef[1] / scaling_factor)\n c_int = torch.floor(self.coef[2] / scaling_factor**2)\n z = (x_int + b_int) * x_int + c_int\n scaling_factor = self.coef[0] * scaling_factor**2\n return z, scaling_factor\n\n def int_exp(self, x_int, scaling_factor):\n with torch.no_grad():\n x0_int = torch.floor(self.x0 / scaling_factor)\n x_int = torch.max(x_int, self.const * x0_int)\n\n q = floor_ste.apply(x_int / x0_int)\n r = x_int - x0_int * q\n exp_int, exp_scaling_factor = self.int_polynomial(r, scaling_factor)\n exp_int = torch.clamp(floor_ste.apply(exp_int * 2 ** (self.const - q)), min=0)\n scaling_factor = exp_scaling_factor / 2**self.const\n return exp_int, scaling_factor\n\n def forward(self, x, scaling_factor):\n if not self.quant_mode:\n return F.softmax(x, dim=-1), None\n\n x_int = x / scaling_factor\n\n x_int_max, _ = x_int.max(dim=-1, keepdim=True)\n x_int = x_int - x_int_max\n exp_int, exp_scaling_factor = self.int_exp(x_int, scaling_factor)\n\n # Avoid overflow\n exp, exp_scaling_factor = self.act(exp_int, exp_scaling_factor)\n exp_int = exp / exp_scaling_factor\n\n exp_int_sum = exp_int.sum(dim=-1, keepdim=True)\n factor = floor_ste.apply(2**self.max_bit / exp_int_sum)\n exp_int = floor_ste.apply(exp_int * factor / 2 ** (self.max_bit - self.output_bit))\n scaling_factor = 1 / 2**self.output_bit\n return exp_int * scaling_factor, scaling_factor\n\n\nclass IntLayerNorm(qc.Module):\n def __init__(self, normalized_shape, eps, output_bit=8, quant_mode=False, force_dequant=\"none\"):\n super().__init__()\n self.normalized_shape = normalized_shape\n self.eps = eps\n\n self.weight = nn.Parameter(torch.zeros(normalized_shape))\n self.bias = nn.Parameter(torch.zeros(normalized_shape))\n\n self.quant_mode = quant_mode\n if force_dequant in [\"nonlinear\", \"layernorm\"]:\n logger.info(\"Force dequantize layernorm\")\n self.quant_mode = False\n\n self.register_buffer(\"shift\", torch.zeros(1))\n self.output_bit = output_bit\n self.max_bit = 32\n self.dim_sqrt = None\n self.activation = QuantAct(self.output_bit, quant_mode=self.quant_mode)\n\n def set_shift(self, y_int):\n with torch.no_grad():\n y_sq_int = y_int**2\n var_int = torch.sum(y_sq_int, axis=2, keepdim=True)\n shift = (torch.log2(torch.sqrt(var_int / 2**self.max_bit)).ceil()).max()\n shift_old = self.shift\n self.shift = torch.max(self.shift, shift)\n logger.info(f\"Dynamic shift adjustment: {int(shift_old)} to {int(self.shift)}\")\n\n def overflow_fallback(self, y_int):\n self.set_shift(y_int) # adjusts `self.shift`\n y_int_shifted = floor_ste.apply(y_int / 2**self.shift)\n y_sq_int = y_int_shifted**2\n var_int = torch.sum(y_sq_int, axis=2, keepdim=True)\n return var_int\n\n def forward(self, x, scaling_factor=None):\n if not self.quant_mode:\n mean = x.mean(axis=2, keepdim=True)\n y = x - mean\n var = torch.mean(y**2, axis=2, keepdim=True)\n x = y / torch.sqrt(self.eps + var)\n x = x * self.weight + self.bias\n return x, None\n\n # compute sqrt of the feature dimension if it is the first run\n if self.dim_sqrt is None:\n n = torch.tensor(x.shape[2], dtype=torch.float)\n self.dim_sqrt = torch.sqrt(n).to(x.device)\n\n # Normalization: computes mean and variance(std)\n x_int = x / scaling_factor\n mean_int = round_ste.apply(x_int.mean(axis=2, keepdim=True))\n y_int = x_int - mean_int\n y_int_shifted = floor_ste.apply(y_int / 2**self.shift)\n y_sq_int = y_int_shifted**2\n var_int = torch.sum(y_sq_int, axis=2, keepdim=True)\n\n # overflow handling in training time\n if self.training:\n # if overflow is detected\n if var_int.max() >= 2**self.max_bit:\n var_int = self.overflow_fallback(y_int)\n assert var_int.max() < 2**self.max_bit + 0.1, (\n \"Error detected in overflow handling: \"\n \"`var_int` exceeds `self.max_bit` (the maximum possible bit width)\"\n )\n\n # To be replaced with integer-sqrt kernel that produces the same output\n std_int = floor_ste.apply(torch.sqrt(var_int)) * 2**self.shift\n factor = floor_ste.apply(2**31 / std_int)\n y_int = floor_ste.apply(y_int * factor / 2)\n scaling_factor = self.dim_sqrt / 2**30\n\n # scaling and shifting\n bias = self.bias.data.detach() / (self.weight.data.detach())\n bias_int = floor_ste.apply(bias / scaling_factor)\n\n y_int = y_int + bias_int\n scaling_factor = scaling_factor * self.weight\n x = y_int * scaling_factor\n\n return x, scaling_factor\n\n\ndef get_percentile_min_max(input, lower_percentile, upper_percentile, output_tensor=False):\n input_length = input.shape[0]\n\n lower_index = round(input_length * (1 - lower_percentile * 0.01))\n upper_index = round(input_length * upper_percentile * 0.01)\n\n upper_bound = torch.kthvalue(input, k=upper_index).values\n\n if lower_percentile == 0:\n lower_bound = upper_bound * 0\n # lower_index += 1\n else:\n lower_bound = -torch.kthvalue(-input, k=lower_index).values\n\n if not output_tensor:\n lower_bound = lower_bound.item()\n upper_bound = upper_bound.item()\n return lower_bound, upper_bound\n\n\ndef linear_quantize(input, scale, zero_point, inplace=False):\n if len(input.shape) == 4:\n scale = scale.view(-1, 1, 1, 1)\n zero_point = zero_point.view(-1, 1, 1, 1)\n # reshape scale and zeropoint for linear weights\n elif len(input.shape) == 2:\n scale = scale.view(-1, 1)\n zero_point = zero_point.view(-1, 1)\n else:\n scale = scale.view(-1)\n zero_point = zero_point.view(-1)\n # quantized = float / scale + zero_point\n if inplace:\n input.mul_(1.0 / scale).add_(zero_point).round_()\n return input\n return torch.round(1.0 / scale * input + zero_point)\n\n\ndef symmetric_linear_quantization_params(\n num_bits, saturation_min, saturation_max, per_channel=False\n):\n with torch.no_grad():\n n = 2 ** (num_bits - 1) - 1\n\n if per_channel:\n scale, _ = torch.max(\n torch.stack([saturation_min.abs(), saturation_max.abs()], dim=1), dim=1\n )\n scale = torch.clamp(scale, min=1e-8) / n\n\n else:\n scale = max(saturation_min.abs(), saturation_max.abs())\n scale = torch.clamp(scale, min=1e-8) / n\n\n return scale\n\n\nclass SymmetricQuantFunction(Function):\n @staticmethod\n def forward(ctx, x, k, percentile_mode, scale):\n zero_point = torch.tensor(0.0).to(scale.device)\n\n n = 2 ** (k - 1) - 1\n new_quant_x = linear_quantize(x, scale, zero_point, inplace=False)\n new_quant_x = torch.clamp(new_quant_x, -n, n - 1)\n\n ctx.scale = scale\n return new_quant_x\n\n @staticmethod\n def backward(ctx, grad_output):\n\n scale = ctx.scale\n if len(grad_output.shape) == 4:\n scale = scale.view(-1, 1, 1, 1)\n # reshape scale and zeropoint for linear weights\n elif len(grad_output.shape) == 2:\n scale = scale.view(-1, 1)\n else:\n scale = scale.view(-1)\n\n return grad_output.clone() / scale, None, None, None, None\n\n\nclass floor_ste(Function):\n @staticmethod\n def forward(ctx, x):\n return torch.floor(x)\n\n @staticmethod\n def backward(ctx, grad_output):\n return grad_output.clone()\n\n\nclass round_ste(Function):\n @staticmethod\n def forward(ctx, x):\n return torch.round(x)\n\n @staticmethod\n def backward(ctx, grad_output):\n return grad_output.clone()\n\n\ndef batch_frexp(inputs, max_bit=31):\n shape_of_input = inputs.size()\n\n # trans the input to be a 1-d tensor\n inputs = inputs.view(-1)\n\n output_m, output_e = np.frexp(inputs.cpu().numpy())\n tmp_m = []\n for m in output_m:\n int_m_shifted = int(\n decimal.Decimal(m * (2**max_bit)).quantize(\n decimal.Decimal(\"1\"), rounding=decimal.ROUND_HALF_UP\n )\n )\n tmp_m.append(int_m_shifted)\n output_m = np.array(tmp_m)\n\n output_e = float(max_bit) - output_e\n\n return (\n torch.from_numpy(output_m).to(inputs.device).view(shape_of_input),\n torch.from_numpy(output_e).to(inputs.device).view(shape_of_input),\n )\n\n\nclass FixedPointMul(Function):\n @staticmethod\n def forward(\n ctx,\n pre_act,\n pre_act_scaling_factor,\n bit_num,\n z_scaling_factor,\n identity=None,\n identity_scaling_factor=None,\n ):\n\n if len(pre_act_scaling_factor.shape) == 3:\n reshape = lambda x: x # noqa: E731\n else:\n reshape = lambda x: x.view(1, 1, -1) # noqa: E731\n ctx.identity = identity\n\n n = 2 ** (bit_num - 1) - 1\n\n with torch.no_grad():\n pre_act_scaling_factor = reshape(pre_act_scaling_factor)\n if identity is not None:\n identity_scaling_factor = reshape(identity_scaling_factor)\n\n ctx.z_scaling_factor = z_scaling_factor\n\n z_int = torch.round(pre_act / pre_act_scaling_factor)\n _A = pre_act_scaling_factor.type(torch.double)\n _B = (z_scaling_factor.type(torch.float)).type(torch.double)\n new_scale = _A / _B\n new_scale = reshape(new_scale)\n\n m, e = batch_frexp(new_scale)\n\n output = z_int.type(torch.double) * m.type(torch.double)\n output = torch.round(output / (2.0**e))\n\n if identity is not None:\n # needs addition of identity activation\n wx_int = torch.round(identity / identity_scaling_factor)\n\n _A = identity_scaling_factor.type(torch.double)\n _B = (z_scaling_factor.type(torch.float)).type(torch.double)\n new_scale = _A / _B\n new_scale = reshape(new_scale)\n\n m1, e1 = batch_frexp(new_scale)\n output1 = wx_int.type(torch.double) * m1.type(torch.double)\n output1 = torch.round(output1 / (2.0**e1))\n\n output = output1 + output\n\n return torch.clamp(output.type(torch.float), -n - 1, n)\n\n @staticmethod\n def backward(ctx, grad_output):\n identity_grad = None\n if ctx.identity is not None:\n identity_grad = grad_output.clone() / ctx.z_scaling_factor\n return (\n grad_output.clone() / ctx.z_scaling_factor,\n None,\n None,\n None,\n None,\n identity_grad,\n None,\n )\n"
] | [
[
"torch.sum",
"torch.min",
"torch.round",
"torch.mean",
"torch.zeros_like",
"torch.no_grad",
"torch.zeros",
"torch.tensor",
"torch.sqrt",
"torch.nn.GELU",
"torch.sign",
"torch.from_numpy",
"torch.abs",
"torch.max",
"numpy.array",
"torch.kthvalue",
"torch.clamp",
"torch.floor"
]
] |
frezaeix/evaluating_bdl | [
"bd0a464981c18de8479b6be2d91867527016c8d3"
] | [
"toyClassification/MC-Dropout-MAP-01-Adam/eval.py"
] | [
"# code-checked\n# server-checked\n\nfrom model import ToyNet\n\nimport torch\nimport torch.utils.data\nimport torch.nn as nn\nfrom torch.autograd import Variable\nimport torch.optim as optim\nimport torch.nn.functional as F\n\nimport numpy as np\nimport pickle\nimport matplotlib\nmatplotlib.use(\"Agg\")\nimport matplotlib.pyplot as plt\nimport matplotlib.cm as cm\nimport cv2\n\nbatch_size = 32\n\nM = 4\n\nx_min = -6.0\nx_max = 6.0\nnum_points = 60\n\nnetwork = ToyNet(\"Farzaneh_eval_MC-Dropout-MAP-01-Adam_1_M10_0\", project_dir=\"../\").cuda()\nnetwork.load_state_dict(torch.load(\"../training_logs/model_Farzaneh_MC-Dropout-MAP-01-Adam_1_M10_0/checkpoints/model_Farzaneh_MC-Dropout-MAP-01-Adam_1_M10_epoch_300.pth\"))\n\nM_float = float(M)\nprint (M_float)\n\nnetwork.eval()\n\nfalse_prob_values = np.zeros((num_points, num_points))\nx_values = np.linspace(x_min, x_max, num_points, dtype=np.float32)\nfor x_1_i, x_1_value in enumerate(x_values):\n for x_2_i, x_2_value in enumerate(x_values):\n x = torch.from_numpy(np.array([x_1_value, x_2_value])).unsqueeze(0).cuda() # (shape: (1, 2))\n\n mean_prob_vector = np.zeros((2, ))\n for i in range(M):\n logits = network(x) # (shape: (1, num_classes)) (num_classes==2)\n prob_vector = F.softmax(logits, dim=1) # (shape: (1, num_classes))\n\n prob_vector = prob_vector.data.cpu().numpy()[0] # (shape: (2, ))\n\n mean_prob_vector += prob_vector/M_float\n\n false_prob_values[x_2_i, x_1_i] = mean_prob_vector[0]\n\nplt.figure(1)\nx_1, x_2 = np.meshgrid(x_values, x_values)\nplt.pcolormesh(x_1, x_2, false_prob_values, cmap=\"RdBu\")\nplt.xlabel(\"x_1\")\nplt.ylabel(\"x_2\")\nplt.title(\"Predictive Density\")\nplt.colorbar()\nplt.savefig(\"%s/predictive_density.png\" % network.model_dir)\nplt.close(1)\nplt.figure(1)\nplt.pcolormesh(x_1, x_2, false_prob_values, cmap=\"binary\")\nplt.xlabel(\"x_1\")\nplt.ylabel(\"x_2\")\nplt.title(\"Predictive Density\")\nplt.colorbar()\nplt.savefig(\"%s/predictive_density_gray.png\" % network.model_dir)\nplt.close(1)\n\nx_values = np.linspace(x_min, x_max, 1000, dtype=np.float32)\nx_1, x_2 = np.meshgrid(x_values, x_values)\ndist = np.sqrt(x_1**2 + x_2**2)\nfalse_prob_values_GT = np.zeros(dist.shape)\nfalse_prob_values_GT[dist < 2.4] = 1.0\nplt.figure(1)\nplt.pcolormesh(x_1, x_2, false_prob_values_GT, cmap=\"RdBu\")\nplt.xlabel(\"x_1\")\nplt.ylabel(\"x_2\")\nplt.title(\"Predictive Density - Ground Truth\")\nplt.colorbar()\nplt.savefig(\"%s/predictive_density_GT.png\" % network.model_dir)\nplt.close(1)\nplt.figure(1)\nplt.pcolormesh(x_1, x_2, false_prob_values_GT, cmap=\"binary\")\nplt.xlabel(\"x_1\")\nplt.ylabel(\"x_2\")\nplt.title(\"Predictive Density - Ground Truth\")\nplt.colorbar()\nplt.savefig(\"%s/predictive_density_gray_GT.png\" % network.model_dir)\nplt.close(1)\n\nwith open(\"../HMC/false_prob_values.pkl\", \"rb\") as file: # (needed for python3)\n false_prob_values_HMC = pickle.load(file) # (shape: (60, 60))\nx_values = np.linspace(x_min, x_max, num_points, dtype=np.float32)\nx_1, x_2 = np.meshgrid(x_values, x_values)\nx_values_GT = np.linspace(x_min, x_max, 1000, dtype=np.float32)\nx_1_GT, x_2_GT = np.meshgrid(x_values_GT, x_values_GT)\nfig, axes = plt.subplots(nrows=1, ncols=2, constrained_layout=True, sharex=True, sharey=True, figsize=(11.0, 5.0))\nim = axes.flat[0].pcolormesh(x_1, x_2, false_prob_values_HMC, cmap=\"RdBu\", vmin=0, vmax=1)\nim = axes.flat[1].pcolormesh(x_1, x_2, false_prob_values, cmap=\"RdBu\", vmin=0, vmax=1)\nfig.colorbar(im, ax=axes.flat)\nplt.savefig(\"%s/predictive_density_comparison.png\" % network.model_dir)\nplt.close()\n"
] | [
[
"numpy.sqrt",
"matplotlib.pyplot.pcolormesh",
"torch.load",
"numpy.zeros",
"numpy.array",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.savefig",
"torch.nn.functional.softmax",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.title",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.close",
"matplotlib.use",
"matplotlib.pyplot.colorbar",
"numpy.meshgrid",
"numpy.linspace",
"matplotlib.pyplot.xlabel"
]
] |
drunkpig/rlcard | [
"db8a410bbfefb7f9fd958239aae8d79a8bfb29d3"
] | [
"examples/uno_single.py"
] | [
"''' A toy example of training single-agent algorithm on Leduc Hold'em\n The environment can be treated as normal OpenAI gym style single-agent environment\n'''\n\nimport tensorflow as tf\nimport os\nimport numpy as np\n\nimport rlcard\nfrom rlcard.agents.dqn_agent import DQNAgent\nfrom rlcard.agents.random_agent import RandomAgent\nfrom rlcard.utils.utils import set_global_seed, tournament\nfrom rlcard.utils.logger import Logger\n\n# Make environment\nenv = rlcard.make('uno', config={'single_agent_mode':True})\neval_env = rlcard.make('uno', config={'single_agent_mode':True})\n\n# Set the iterations numbers and how frequently we evaluate the performance\nevaluate_every = 1000\nevaluate_num = 10000\ntimesteps = 100000\n\n# The intial memory size\nmemory_init_size = 1000\n\n# Train the agent every X steps\ntrain_every = 1\n\n# The paths for saving the logs and learning curves\nlog_dir = './experiments/uno_single_dqn_result/'\n\n# Set a global seed\nset_global_seed(0)\n\nwith tf.Session() as sess:\n\n # Initialize a global step\n global_step = tf.Variable(0, name='global_step', trainable=False)\n\n # Set up the agents\n agent = DQNAgent(sess,\n scope='dqn',\n action_num=env.action_num,\n replay_memory_init_size=memory_init_size,\n train_every=train_every,\n state_shape=env.state_shape,\n mlp_layers=[128,128])\n # Initialize global variables\n sess.run(tf.global_variables_initializer())\n\n # Init a Logger to plot the learning curve\n logger = Logger(log_dir)\n\n state = env.reset()\n\n for timestep in range(timesteps):\n action = agent.step(state)\n next_state, reward, done = env.step(action)\n ts = (state, action, reward, next_state, done)\n agent.feed(ts)\n\n if timestep % evaluate_every == 0:\n rewards = []\n state = eval_env.reset()\n for _ in range(evaluate_num):\n action, _ = agent.eval_step(state)\n _, reward, done = env.step(action)\n if done:\n rewards.append(reward)\n logger.log_performance(env.timestep, np.mean(rewards))\n\n # Close files in the logger\n logger.close_files()\n\n # Plot the learning curve\n logger.plot('DQN')\n \n # Save model\n save_dir = 'models/uno_single_dqn'\n if not os.path.exists(save_dir):\n os.makedirs(save_dir)\n saver = tf.train.Saver()\n saver.save(sess, os.path.join(save_dir, 'model'))\n \n"
] | [
[
"tensorflow.global_variables_initializer",
"tensorflow.train.Saver",
"tensorflow.Session",
"tensorflow.Variable",
"numpy.mean"
]
] |
noashin/local_global_attention_model | [
"531e6a4cc1dc364a6a4168de1b9f972727a8aeb1"
] | [
"src/LocalChoiceModel/vel_param.py"
] | [
"import sys\n\nimport numpy as np\nfrom scipy.stats import multivariate_normal\n\nsys.path.append('./../../')\nfrom src.HMC.hmcparameter import HMCParameter\n\nclass VelParam(HMCParameter):\n def __init__(self, init_val):\n super().__init__(np.array(init_val))\n dim = np.array(init_val).shape\n self.mu = np.zeros(dim)\n self.sigma = 1\n\n def gen_init_value(self):\n self.value = multivariate_normal.rvs(self.mu, self.sigma)\n\n def get_energy_grad(self):\n return self.value\n\n def get_energy(self):\n return np.dot(self.value, self.value) / 2\n\n def get_energy_for_value(self, value):\n return np.dot(value, value) / 2\n"
] | [
[
"numpy.array",
"numpy.dot",
"scipy.stats.multivariate_normal.rvs",
"numpy.zeros"
]
] |
ruriboshi/propnet | [
"770703fb4fc344f785f89c02f26b31ea5733d2bd"
] | [
"propnet/models/python/electromechanical_coupling.py"
] | [
"import numpy as np\n\n\ndef plug_in(symbol_values):\n\n req_symbols = [\"S\", \"e\", \"d\"]\n data = {}\n if all(s in symbol_values for s in req_symbols):\n e = symbol_values[\"e\"]\n S = symbol_values[\"S\"]\n d = symbol_values[\"d\"]\n\n data[\"k\"] = np.abs(d[2][2] / np.sqrt(e[2][2] * S[2][2]))\n\n return data\n\n\nDESCRIPTION = \"\"\"\nModel calculating the electromechanical coupling factor,\nwhich is the efficiency of converting eletrical energy\nto acoustic energy in a piezoeletric transducer or filter\n\"\"\"\n\ntest_data = [{\n \"inputs\": {\n \"S\": [[0.007482236755310126, -0.002827041595205337, -0.002827041595205337, 0.0, 0.0, 0.0],\n [-0.002827041595205337, 0.007482236755310125, -0.002827041595205337, 0.0, 0.0, 0.0],\n [-0.0028270415952053366, -0.002827041595205337, 0.007482236755310125, 0.0, 0.0, 0.0],\n [0.0, 0.0, 0.0, 0.010309278350515464, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.010309278350515464, 0.0],\n [0.0, 0.0, 0.0, 0.0, 0.0, 0.010309278350515464]],\n \"e\": [[18.65, 0.00, 0.00], [-0.00, 18.65, 0.00], [-0.00, 0.00, 7.88]],\n \"d\": [[-0.0412497, -0.28686697, 0.06342802], [0.05065159, 0.26064878, -0.04828778],\n [0.08828203, 0.5660897, -0.11520665], [-0.16218673, -0.92468949, 0.2109461],\n [0.02485558, 0.03232004, -0.02421919], [0.06636329, 0.46541895, -0.09526407]]\n },\n \"outputs\": {\n \"k\": 0.47445902984\n }\n}]\n\nconfig = {\n \"name\": \"electromechanical_coupling\",\n \"connections\": [{\n \"inputs\": [\"e\", \"S\", \"d\"],\n \"outputs\": [\"k\"]\n }],\n \"categories\": [\"mechanical\", \"electrical\"],\n \"variable_symbol_map\": {\n \"S\": \"compliance_tensor_voigt\",\n \"e\": \"dielectric_tensor\",\n \"d\": \"piezoelectric_tensor_converse\",\n \"k\": \"electromechanical_coupling\"\n },\n \"description\": DESCRIPTION,\n \"implemented_by\": [\"shyamd\"],\n \"references\": [],\n \"plug_in\": plug_in,\n \"test_data\": test_data\n}\n"
] | [
[
"numpy.sqrt"
]
] |
eduardojdiniz/Buzznauts | [
"8ac242a8d5309b4090a0f0b148ec275cac762bc0"
] | [
"analysis/baseline/s02_perform_encoding.py"
] | [
"#!/usr/bin/env python\n# coding=utf-8\n\nimport numpy as np\nimport os\nimport os.path as op\nimport argparse\nimport torch\nfrom Buzznauts.utils import load_dict, saveasnii, get_fmri, set_device\nfrom Buzznauts.analysis.baseline import get_activations, predict_fmri_fast\nfrom tqdm import tqdm\n\n\ndef main():\n description = 'Encoding model analysis for Algonauts 2021'\n parser = argparse.ArgumentParser(description=description)\n\n buzz_root = '/home/[email protected]/proj/Buzznauts'\n baseline = op.join(buzz_root, 'models/baseline')\n parser.add_argument('-rd', '--result_dir',\n help='saves predicted fMRI activity',\n default=op.join(baseline, 'results'),\n type=str)\n parser.add_argument('-ad', '--activations_dir',\n help='directory containing DNN activations',\n default=op.join(baseline, 'activations'),\n type=str)\n parser.add_argument('-model', '--model',\n help='model under which predicted fMRI will be saved',\n default='alexnet',\n type=str)\n _help = 'layer from which activations will be used to train & predict fMRI'\n parser.add_argument('-l', '--layer',\n help=_help,\n default='layer_5',\n type=str)\n parser.add_argument(\n '-sub', '--sub',\n help='subject number from which fMRI data will be used',\n default='sub04', type=str)\n parser.add_argument('-r', '--roi',\n help='brain region from which fMRI data will be used',\n default='EBA',\n type=str)\n _help = 'test or val, val returns mean correlation ' + \\\n 'by using 10% of training data for validation'\n parser.add_argument('-m', '--mode',\n help=_help,\n default='val',\n type=str)\n parser.add_argument('-fd', '--fmri_dir',\n help='directory containing fMRI activity',\n default=op.join(buzz_root, 'data/fmri'),\n type=str)\n parser.add_argument('-v', '--visualize',\n help='visualize whole brain in MNI space or not',\n default=True,\n type=bool)\n _help = 'number of voxel to fit at one time in case of memory constraints'\n parser.add_argument('-b', '--batch_size',\n help=_help,\n default=1000,\n type=int)\n args = vars(parser.parse_args())\n\n mode = args['mode']\n sub = args['sub']\n ROI = args['roi']\n model = args['model']\n layer = args['layer']\n visualize_results = args['visualize']\n batch_size = args['batch_size']\n\n device = set_device()\n\n if ROI == \"WB\":\n track = \"full_track\"\n else:\n track = \"mini_track\"\n\n activations_dir = op.join(args['activations_dir'], 'pca_100')\n fmri_dir = op.join(args['fmri_dir'], track)\n\n sub_fmri_dir = op.join(fmri_dir, sub)\n results_dir = op.join(args['result_dir'], model, layer, track, sub)\n if not op.exists(results_dir):\n os.makedirs(results_dir)\n\n print(\"ROi is : \", ROI)\n\n features_train, features_test = get_activations(activations_dir,\n layer)\n if track == \"full_track\":\n fmri_train_all, voxel_mask = get_fmri(sub_fmri_dir, ROI)\n else:\n fmri_train_all = get_fmri(sub_fmri_dir, ROI)\n num_voxels = fmri_train_all.shape[1]\n\n if mode == 'val':\n # Here as an example we use first 900 videos as training and rest of\n # the videos as validation\n features_test = features_train[900:, :]\n features_train = features_train[:900, :]\n fmri_train = fmri_train_all[:900, :]\n fmri_test = fmri_train_all[900:, :]\n pred_fmri = np.zeros_like(fmri_test)\n pred_fmri_save_path = op.join(results_dir, ROI + '_val.npy')\n else:\n fmri_train = fmri_train_all\n num_test_videos = 102\n pred_fmri = np.zeros((num_test_videos, num_voxels))\n pred_fmri_save_path = op.join(results_dir, ROI + '_test.npy')\n\n print(\"number of voxels is \", num_voxels)\n i = 0\n with tqdm(total=100) as pbar:\n while i < num_voxels - batch_size:\n j = i + batch_size\n pred_fmri[:, i:j] = predict_fmri_fast(features_train,\n features_test,\n fmri_train[:, i:j],\n device=device)\n i = j\n pbar.update((100*i) // num_voxels)\n pred_fmri[:, i:] = predict_fmri_fast(features_train,\n features_test,\n fmri_train[:, i:i + batch_size],\n device=device)\n\n if mode == 'val':\n score = vectorized_correlation(fmri_test, pred_fmri)\n print(\"Mean correlation for ROI : \", ROI, \"in \", sub, \" is :\",\n round(score.mean(), 6))\n\n # result visualization for whole brain (full_track)\n if track == \"full_track\" and visualize_results:\n brain_mask = op.join(buzz_root, 'data/fmri/example.nii')\n nii_save_path = op.join(results_dir, ROI + '_val.nii')\n\n view_args = {'brain_mask': brain_mask,\n 'nii_save_path': nii_save_path,\n 'score': score,\n 'voxel_mask': voxel_mask}\n\n view = visualize_activity_surf(sub, **view_args)\n view_save_path = op.join(results_dir, ROI + '_val.html')\n view.save_as_html(view_save_path)\n print(\"Results saved in this directory: \", results_dir)\n view.open_in_browser()\n\n np.save(pred_fmri_save_path, pred_fmri)\n\n print(\"ROI done : \", ROI)\n\n\nif __name__ == \"__main__\":\n main()\n"
] | [
[
"numpy.save",
"numpy.zeros_like",
"numpy.zeros"
]
] |
Anders-Holst/Bonsai | [
"841aa4e12c8bea8945396bd232c2006260127507"
] | [
"datapreparation/analyze.py"
] | [
"#! /usr/bin/env python3\n\n\n\"\"\" -------------------------------\n\n analyse.py\n\n Copyright (C) 2018 RISE\n This code was produced by RISE\n The 2013-04-10 version\n\n bonsai/src_v02/analyze.py\n\n simple analysis of pandas dataframes data\n such as \n\n 1. find duplicated rows\n\n 2. number of unique values in a column\n\n 3. number of unique values in common \n between two columns in two different\n files\n \n 4. \n \n------------------------------------\"\"\"\n\n\nimport global_settings as gs\nimport numpy as np\nimport pandas as pd\nimport bonsai_io as bio\nimport common\nimport copy\n\ndef nr_of_unique_rows(df):\n d = df.drop_duplicates()\n return len(d)\n\ndef nr_of_unique_values_in_cols(df, cols):\n c = df.drop_duplicates(subset = cols)\n return len(c)\n\n\ndef nr_of_unique_values(df, col):\n c = df[col].dropna()\n c = c.drop_duplicates()\n return len(c)\n\n\"\"\"\ndef nr_of_unique_numeric_values(df, col):\n\n c = df[col].dropna()\n c = c.drop_duplicates()\n c = c.str.isnumeric() \n c = c[c].index.values\n\"\"\"\n\n\ndef nr_of_nonnan_values(df, col):\n\n c = df[col].dropna()\n return len(c)\n \ndef nr_of_unique_digital_values(df, col):\n\n c = df[col].dropna()\n c = c.drop_duplicates()\n c = c.str.isdigit() \n c = c[c].index.values\n # df = df.drop_duplicates(subset = col)\n # df = df[ df[col].dropna().str.isdigit() ]\n # df = df[ df[col].str.contains('\\d', regex=True) ]\n return len(c)\n\ndef duplicated_rows(df):\n df['dup'] = df.duplicated()\n df = df[df['dup'] == True]\n return df\n\ndef print_duplicated_rows(df, nr):\n dup = duplicated_rows(df)\n print('Nr of rows in total', len(df))\n print('Nr of duplicated rows', len(dup))\n nr = min( nr,len(dup) )\n if nr > 0:\n print('the first', nr,' of them')\n print(dup[0:nr])\n return dup\n\ndef unique_number_values(df, col):\n df = df.drop_duplicates(subset = col)\n df = df[ df[col].str.contains('\\d', regex=True) ]\n return df\n\n\ndef info(df, name = ''):\n print()\n if name != '':\n print()\n print('--------------------------------------------------')\n print()\n print('\\tInfo on the file\\n\\t' + name)\n print()\n print('--------------------------------------------------')\n print()\n df_unique_nr = nr_of_unique_rows(df)\n print(' shape', df.shape)\n print(' unique rows', df_unique_nr)\n\n for c in df.columns:\n print()\n print('\\tInfo on non-nan values of column', c)\n print()\n nonnan_nr = nr_of_nonnan_values(df, c)\n unique_nr = nr_of_unique_values(df, c)\n digital_nr = nr_of_unique_digital_values(df, c)\n # numeric_nr = nr_of_unique_numeric_values(df, c)\n print('non-nan values', nonnan_nr)\n print(' unique values', unique_nr)\n print('digital values', digital_nr)\n # print('numeric values', unique_nr)\n \n print()\n # return unique_number_values(df, 'ICD10')\n\n# df = df[ df[c].str.contains('\\d', regex=True) ]\n\n\n\ndef readall():\n dia = bio.read_generated_dia()\n dgr = bio.read_diagroups()\n per = bio.readperson()\n ctr = bio.readcontrol()\n inc = bio.readincare()\n nic = bio.readnicare()\n dru = bio.readdrug()\n dcl = bio.readdrugclasses()\n tre = bio.readtreatment()\n sur = bio.readsurgery()\n cau = bio.readcause()\n\n data = [\n dia, \n dgr, \n per,\n ctr, \n inc, \n nic, \n dru, \n dcl, \n tre,\n sur,\n cau\n]\n\n name = [\n 'diagnos ',\n 'diagnosgrupp ',\n 'person ',\n 'kontrollgrupp ',\n 'sluten v_rd ',\n '_ppen v_rd ',\n 'l_kemedel ',\n 'l_kemedelsgrupper',\n 'behandling ',\n 'kirurgi ',\n 'orsak ',\n ]\n\n return data, name\n\n\ndef info_on_all():\n\n data, name = readall()\n \n for i in range(0, len(name)):\n info(data[i], name[i])\n\n\ndef compare_lopnr(dfx, dfy, namex = 'data 1', namey = 'data 2'):\n\n xs = list(dfx['LopNr'].values)\n ys = list(dfy['LopNr'].values)\n\n sx = set(xs)\n sy = set(ys)\n cut = sx & sy\n ux = sx - sy\n uy = sy - sx\n\n print()\n # print('shape ' + namex + '\\t\\t', dfx.shape)\n # print('shape ' + namey + '\\t\\t', dfy.shape)\n # print('unique Lopnr ' + namex + '\\t', len(xs))\n # print('unique Lopnr ' + namey + '\\t', len(ys))\n\n print('common Lopnr\\t\\t\\t', len(cut))\n print('Lopnr in ' + namex + ' only\\t', len(ux))\n print('Lopnr in ' + namey + ' only\\t', len(uy))\n print()\n\n ux = list(ux)\n uy = list(uy)\n ux.sort\n uy.sort\n return ux, uy\n\n\ndef readlopnr():\n dia = bio.read_generated_dia()\n per = bio.readperson()\n ctr = bio.readcontrol()\n inc = bio.readincare()\n nic = bio.readnicare()\n dru = bio.readdrug()\n tre = bio.readtreatment()\n sur = bio.readsurgery()\n cau = bio.readcause()\n\n data = [dia, per, ctr, inc, nic, dru, tre, sur, cau]\n\n name = [\n 'diagnos ',\n 'person ',\n 'kontrollgrupp',\n 'sluten v_rd ',\n '_ppen v_rd ',\n 'l_kemedel ',\n 'behandling ',\n 'kirurgi ',\n 'orsak ',\n ]\n\n return data, name\n\n\ndef pairwise_lopnr_comparisions():\n\n data, name = readlopnr()\n\n for i in range(0, len(name)):\n for j in range(i+1, len(name)):\n print()\n print('--------------------------------------------------')\n print()\n print('\\tComparing ' + name[i] + ' with ' + name[j])\n print()\n print('--------------------------------------------------')\n print()\n\n compare_lopnr(data[i], data[j], name[i], name[j])\n\n\n\n\n\n\"\"\" -------------------------------\n \n 4. count amd list various types of diagnosis\n codes in care data\n \n------------------------------------\"\"\"\n\n\"\"\"\ndef is_icd10_class(x):\n if not common.isstr(x):\n return False\n if common.is_icd10(x):\n return False\n if len(x) < 3:\n return False\n if not x[0].isupper():\n return False\n return x[1].isdigit() and x[2].isdigit()\n\"\"\"\n\n\ndef code_count(xs):\n if not isinstance(xs, str):\n return 0\n return len(xs.split())\n\ndef icd10_count(xs):\n if not isinstance(xs, str):\n return 0\n count = 0\n for x in xs.split():\n if common.is_icd10(x):\n # print(x)\n count += 1\n return count\n\ndef not_icd10_count(xs):\n if not isinstance(xs, str):\n return 0\n count = 0\n for x in xs.split():\n if not common.is_icd10(x):\n # print(x)\n count += 1\n return count\n\ndef icd10_class_count(xs):\n if not isinstance(xs, str):\n return 0\n count = 0\n for x in xs.split():\n if common.is_icd10_class(x):\n # print(x)\n count += 1\n return count\n\n\"\"\"\ndef code_list(xs):\n if not isinstance(xs, str):\n return 0\n return len(xs.split())\n\"\"\"\n\ndef count_and_print(df, table = False):\n dia = 'DIAGNOS'\n dfc = copy.copy(df)\n dfc['code_count'] = df[dia].apply(code_count)\n dfc['icd10_count'] = df[dia].apply(icd10_count)\n dfc['not_icd10_count'] = df[dia].apply(not_icd10_count)\n dfc['icd10_class_count'] = df[dia].apply(icd10_class_count)\n nr_of_codes = dfc['code_count'].sum()\n nr_of_icd10 = dfc['icd10_count'].sum()\n nr_of_not_icd10 = dfc['not_icd10_count'].sum()\n nr_of_class_codes = dfc['icd10_class_count'].sum()\n\n if table:\n print('nr_of_lines\\t', len(df))\n print('nr_of_codes\\t', nr_of_codes)\n print('nr_of_icd10\\t', nr_of_icd10)\n print('nr_of_not_icd10\\t', nr_of_not_icd10)\n print('nr_of_icd10_class_codes\\t', nr_of_class_codes)\n \n else:\n \n \n print(' nr_of_lines', len(df))\n print(' nr_of_codes', nr_of_codes)\n print(' nr_of_icd10', nr_of_icd10)\n print(' nr_of_not_icd10', nr_of_not_icd10)\n print(' nr_of_icd10_class_codes', nr_of_class_codes)\n\n\n \"\"\"\n for c in df1[dia].values:\n print('\\t', c)\n \"\"\"\n\n\ndef print_dates(df, table = False):\n date = 'INDATUM'\n\n if table:\n\n print('first date\\t', df[date].min())\n print('last date\\t', df[date].max())\n\n else:\n\n print(' first date', df[date].min())\n print(' last date', df[date].max())\n \n\ndef icd10_class_list(xs):\n if not isinstance(xs, str):\n return []\n codes = []\n for x in xs.split():\n if common.is_icd10_class(x):\n codes += [x]\n #print(codes)\n return codes\n\ndef flat(xs):\n ys = []\n for x in xs:\n ys += x\n return ys\n\n \n\ndef print_class_codes(df):\n dia = 'DIAGNOS'\n dfc = copy.copy(df)\n dfc['icd10_class'] = df[dia].apply(icd10_class_list)\n dfc['is_class'] = dfc['icd10_class'].apply(lambda x: x != [])\n dfc = dfc[dfc['is_class']]\n codes = np.unique(flat(list(dfc['icd10_class'].values)))\n for c in codes:\n print('\\t', c)\n \n\ndef diagnosis_code_count(df, print_class = False, table = False):\n \n date = 'INDATUM'\n nr = 'LopNr'\n icd10_start = np.datetime64('1998-01-01')\n\n \"\"\"\n size0 = len(df)\n df = df.dropna().reset_index(drop=True)\n print('nr of empty lines:', size0- len(df))\n \"\"\"\n \n df[date] = df[date].apply(bio.str2time)\n df = df.sort_values(date).dropna().reset_index(drop=True)\n\n df1 = df[df[date] < icd10_start] \n df2 = df[df[date] >= icd10_start]\n\n print() \n print('code counts before 1998_01_01:')\n print()\n \n print_dates(df1, table = table)\n count_and_print(df1, table = table)\n\n print() \n print('code counts from 1998_01_01')\n print()\n \n print_dates(df2, table = table)\n count_and_print(df2, table = table)\n if print_class:\n print()\n print(' all icd10_class_codes:')\n print_class_codes(df2)\n\n print()\n"
] | [
[
"numpy.datetime64"
]
] |
vinayak1998/Data_Driven_Astronomy | [
"1d0dd82b2e9066759c442807c30c70bef096d719"
] | [
"Week1/brightest_pixel_position_fits.py"
] | [
"import numpy as np\nimport time\nfrom astropy.io import fits\nimport matplotlib.pyplot as plt\n\ndef load_fits(filename):\n start = time.perf_counter()\n hdulist = fits.open(filename)\n data = hdulist[0].data\n result = np.where(data == np.amax(data))\n coornidates = list(zip(result[0],result[1]))\n end = time.perf_counter() - start\n return coornidates[0]\n \nif __name__ == '__main__':\n # Run your `load_fits` function with examples:\n bright = load_fits('image1.fits')\n print(bright)\n\n # You can also confirm your result visually:\n from astropy.io import fits\n import matplotlib.pyplot as plt\n\n hdulist = fits.open('image1.fits')\n data = hdulist[0].data\n\n # Plot the 2D image data\n plt.imshow(data.T, cmap=plt.cm.viridis)\n plt.colorbar()\n plt.show()"
] | [
[
"numpy.amax",
"matplotlib.pyplot.colorbar",
"matplotlib.pyplot.imshow",
"matplotlib.pyplot.show"
]
] |
anaikawadi/svbrdf-estimation | [
"6c169b12210d2a92495c1ab1218dd3e4da0314a5"
] | [
"development/multiImage_pytorch/persistence.py"
] | [
"import gc\nimport json\nimport pathlib\nimport torch\n\nclass Checkpoint:\n def __init__(self, checkpoint=None):\n self.checkpoint = checkpoint\n\n @staticmethod\n def get_checkpoint_path(checkpoint_dir):\n return checkpoint_dir.joinpath(\"checkpoint.tar\")\n\n @staticmethod\n def load_legacy(model_dir):\n model_path = model_dir.joinpath(\"model.data\")\n state_path = model_dir.joinpath(\"state.json\")\n if not model_path.exists():\n return None\n \n checkpoint = {\n 'model_state_dict' : torch.load(model_path),\n }\n print(\"Loaded legacy model state\")\n\n if state_path.exists():\n with open(state_path, 'r') as f:\n state = json.load(f)\n checkpoint['epoch'] = state['epoch']\n print(\"Loaded legacy training state\")\n\n return checkpoint \n\n @classmethod\n def load(cls, checkpoint_dir):\n if not isinstance(checkpoint_dir, pathlib.Path):\n checkpoint_dir = pathlib.Path(checkpoint_dir)\n \n checkpoint_path = Checkpoint.get_checkpoint_path(checkpoint_dir)\n\n if not checkpoint_path.exists():\n # If there is no checkpoint file we try to perform a legacy load\n checkpoint = Checkpoint.load_legacy(checkpoint_dir)\n\n if checkpoint is None:\n print(\"No checkpoint found in directory '{}'\".format(checkpoint_dir))\n\n return cls(checkpoint)\n\n return cls(torch.load(checkpoint_path))\n\n @staticmethod\n def save(checkpoint_dir, args, model, optimizer, epoch):\n if not isinstance(checkpoint_dir, pathlib.Path):\n checkpoint_dir = pathlib.Path(checkpoint_dir)\n\n checkpoint_dir.mkdir(parents=True, exist_ok=True)\n\n checkpoint = {\n 'model_type' : args.model_type,\n 'use_coords' : True if args.use_coords else False,\n 'epoch' : epoch,\n 'model_state_dict': model.state_dict(),\n }\n\n if not args.omit_optimizer_state_save:\n checkpoint['optimizer_state_dict'] = optimizer.state_dict()\n\n torch.save(checkpoint, Checkpoint.get_checkpoint_path(checkpoint_dir))\n\n def purge(self):\n self.checkpoint = None\n gc.collect()\n\n def is_valid(self):\n return self.checkpoint is not None\n\n def restore_args(self, args):\n # Restore checkpoint relevant arguments\n\n if 'model_type' in self.checkpoint:\n args.model_type = self.checkpoint['model_type']\n print(\"Restored model type '{}'\".format(args.model_type))\n else:\n print(\"Failed to restore model type\")\n\n \n if 'use_coords' in self.checkpoint:\n args.use_coords = self.checkpoint['use_coords']\n print(\"Restored use coords flag '{}'\".format(args.use_coords))\n else:\n print(\"Failed to restore use coords flag\")\n\n return args\n\n def restore_model_state(self, model):\n if 'model_state_dict' in self.checkpoint:\n model.load_state_dict(self.checkpoint['model_state_dict'])\n print(\"Restored model state\")\n else:\n print(\"Failed to restore model state\")\n\n return model\n\n def restore_epoch(self, epoch):\n if 'epoch' in self.checkpoint:\n epoch = self.checkpoint['epoch']\n print(\"Restored epoch {}\".format(epoch))\n else:\n print(\"Failed to restore epoch\")\n \n return epoch\n\n def restore_optimizer_state(self, optimizer):\n if 'optimizer_state_dict' in self.checkpoint:\n optimizer.load_state_dict(self.checkpoint['optimizer_state_dict'])\n print(\"Restored optimizer state\")\n else:\n print(\"Failed to restore optimizer state\")\n\n return optimizer\n\n"
] | [
[
"torch.load"
]
] |
kobakobashu/posenet-python | [
"52290733504fd0a130cc2301bad5db761c14a4e9"
] | [
"models/helper.py"
] | [
"# -*- coding: utf-8 -*-\n\"\"\"Models helper\n\nThese are helper functions for models.\n\n\"\"\"\n\nimport torch.optim as optim\nimport torch.nn as nn\n\nfrom configs.supported_info import SUPPORTED_OPTIMIZER, SUPPORTED_CRITERION\n\n\ndef get_optimizer(cfg: object, network: object) -> object:\n \"\"\"Get optimizer function\n\n This is function to get optimizer.\n\n Args:\n cfg: Config of optimizer.\n network: Network of model.\n\n Returns:\n Optimizer object.\n\n Raises:\n NotImplementedError: If the optimizer you want to use is not suppoeted.\n\n \"\"\"\n \n optimizer_name = cfg.name\n\n if not optimizer_name:\n return None\n\n if optimizer_name not in SUPPORTED_OPTIMIZER:\n raise NotImplementedError('The optimizer is not supported.')\n\n if optimizer_name == \"adam\":\n return optim.Adam(network.parameters(),\n lr=cfg.lr,\n weight_decay=cfg.decay)\n\n\ndef get_criterion(cfg: object) -> object:\n \"\"\"Get criterion function\n\n This is function to get criterion.\n\n Args:\n cfg: Config of criterion.\n\n Returns:\n Criterion object.\n\n Raises:\n NotImplementedError: If the criterion you want to use is not suppoeted.\n\n \"\"\"\n \n criterion_name = cfg.name\n\n if not criterion_name:\n return None\n\n if criterion_name not in SUPPORTED_CRITERION:\n raise NotImplementedError('The loss function is not supported.')\n\n if criterion_name == \"cross_entropy\":\n return nn.CrossEntropyLoss()\n\n elif criterion_name == \"nll_loss\":\n return nn.NLLLoss()"
] | [
[
"torch.nn.NLLLoss",
"torch.nn.CrossEntropyLoss"
]
] |
zhangziyezzy/DeepLearningMugenKnock | [
"26830fe049c7da8001977ca0df12e946c0f030eb"
] | [
"Scripts_Model/scripts_pytorch/VGG19_pytorch.py"
] | [
"import torch\nimport torch.nn.functional as F\nimport numpy as np\nfrom collections import OrderedDict\nfrom easydict import EasyDict\nfrom _main_base import main\nimport os\n\n#---\n# config\n#---\ncfg = EasyDict()\n\n# class\ncfg.CLASS_LABEL = ['akahara', 'madara']\ncfg.CLASS_NUM = len(cfg.CLASS_LABEL)\n\n# model\ncfg.INPUT_HEIGHT = 64\ncfg.INPUT_WIDTH = 64\ncfg.INPUT_CHANNEL = 3\n\ncfg.GPU = False\ncfg.DEVICE = torch.device(\"cuda\" if cfg.GPU and torch.cuda.is_available() else \"cpu\")\n\ncfg.MODEL_SAVE_PATH = 'models/VGG16_{}.pt'\ncfg.MODEL_SAVE_INTERVAL = 200\ncfg.ITERATION = 1000\ncfg.MINIBATCH = 8\ncfg.OPTIMIZER = torch.optim.SGD\ncfg.LEARNING_RATE = 0.1\ncfg.MOMENTUM = 0.9\ncfg.LOSS_FUNCTION = loss_fn = torch.nn.NLLLoss()\n\ncfg.TRAIN = EasyDict()\ncfg.TRAIN.DISPAY_ITERATION_INTERVAL = 50\n\ncfg.TRAIN.DATA_PATH = '../Dataset/train/images/'\ncfg.TRAIN.DATA_HORIZONTAL_FLIP = True\ncfg.TRAIN.DATA_VERTICAL_FLIP = True\ncfg.TRAIN.DATA_ROTATION = False\n\ncfg.TEST = EasyDict()\ncfg.TEST.MODEL_PATH = cfg.MODEL_SAVE_PATH.format('final')\ncfg.TEST.DATA_PATH = '../Dataset/test/images/'\ncfg.TEST.MINIBATCH = 2\n\n# random seed\ntorch.manual_seed(0)\n\n\nclass VGG19(torch.nn.Module):\n def __init__(self):\n super(VGG19, self).__init__()\n\n self.conv1 = torch.nn.Sequential(OrderedDict({\n 'conv1_1' : torch.nn.Conv2d(cfg.INPUT_CHANNEL, 64, kernel_size=3, padding=1, stride=1),\n 'conv1_1_relu' : torch.nn.ReLU(),\n 'conv1_1_bn' : torch.nn.BatchNorm2d(64),\n 'conv1_2' : torch.nn.Conv2d(64, 64, kernel_size=3, padding=1, stride=1),\n 'conv1_2_relu' : torch.nn.ReLU(),\n 'conv1_2_bn' : torch.nn.BatchNorm2d(64),\n }))\n\n self.conv2 = torch.nn.Sequential(OrderedDict({\n 'conv2_1' : torch.nn.Conv2d(64, 128, kernel_size=3, padding=1, stride=1),\n 'conv2_1_relu' : torch.nn.ReLU(),\n 'conv2_1_bn' : torch.nn.BatchNorm2d(128),\n 'conv2_2' : torch.nn.Conv2d(128, 128, kernel_size=3, padding=1, stride=1),\n 'conv2_2_relu' : torch.nn.ReLU(),\n 'conv2_2_bn' : torch.nn.BatchNorm2d(128),\n }))\n\n self.conv3 = torch.nn.Sequential(OrderedDict({\n 'conv3_1' : torch.nn.Conv2d(128, 256, kernel_size=3, padding=1, stride=1),\n 'conv3_1_relu' : torch.nn.ReLU(),\n 'conv3_1_bn' : torch.nn.BatchNorm2d(256),\n 'conv3_2' : torch.nn.Conv2d(256, 256, kernel_size=3, padding=1, stride=1),\n 'conv3_2_relu' : torch.nn.ReLU(),\n 'conv3_2_bn' : torch.nn.BatchNorm2d(256),\n 'conv3_3' : torch.nn.Conv2d(256, 256, kernel_size=3, padding=1, stride=1),\n 'conv3_3_relu' : torch.nn.ReLU(),\n 'conv3_3_bn' : torch.nn.BatchNorm2d(256),\n 'conv3_4' : torch.nn.Conv2d(256, 256, kernel_size=3, padding=1, stride=1),\n 'conv3_4_relu' : torch.nn.ReLU(),\n 'conv3_4_bn' : torch.nn.BatchNorm2d(256),\n }))\n\n self.conv4 = torch.nn.Sequential(OrderedDict({\n 'conv4_1' : torch.nn.Conv2d(256, 512, kernel_size=3, padding=1, stride=1),\n 'conv4_1_relu' : torch.nn.ReLU(),\n 'conv4_1_bn' : torch.nn.BatchNorm2d(512),\n 'conv4_2' : torch.nn.Conv2d(512, 512, kernel_size=3, padding=1, stride=1),\n 'conv4_2_relu' : torch.nn.ReLU(),\n 'conv4_2_bn' : torch.nn.BatchNorm2d(512),\n 'conv4_3' : torch.nn.Conv2d(512, 512, kernel_size=3, padding=1, stride=1),\n 'conv4_3_relu' : torch.nn.ReLU(),\n 'conv4_3_bn' : torch.nn.BatchNorm2d(512),\n 'conv4_4' : torch.nn.Conv2d(512, 512, kernel_size=3, padding=1, stride=1),\n 'conv4_4_relu' : torch.nn.ReLU(),\n 'conv4_4_bn' : torch.nn.BatchNorm2d(512),\n }))\n\n self.conv5 = torch.nn.Sequential(OrderedDict({\n 'conv5_1' : torch.nn.Conv2d(512, 512, kernel_size=3, padding=1, stride=1),\n 'conv5_1_relu' : torch.nn.ReLU(),\n 'conv5_1_bn' : torch.nn.BatchNorm2d(512),\n 'conv5_2' : torch.nn.Conv2d(512, 512, kernel_size=3, padding=1, stride=1),\n 'conv5_2_relu' : torch.nn.ReLU(),\n 'conv5_2_bn' : torch.nn.BatchNorm2d(512),\n 'conv5_3' : torch.nn.Conv2d(512, 512, kernel_size=3, padding=1, stride=1),\n 'conv5_3_relu' : torch.nn.ReLU(),\n 'conv5_3_bn' : torch.nn.BatchNorm2d(512),\n 'conv5_3' : torch.nn.Conv2d(512, 512, kernel_size=3, padding=1, stride=1),\n 'conv5_3_relu' : torch.nn.ReLU(),\n 'conv5_3_bn' : torch.nn.BatchNorm2d(512),\n }))\n \n self.top = torch.nn.Sequential(OrderedDict({\n 'Dense1' : torch.nn.Linear(512 * (cfg.INPUT_HEIGHT // 32) * (cfg.INPUT_WIDTH // 32), 256),\n 'Dense1_relu' : torch.nn.ReLU(),\n 'Dense1_dropout' : torch.nn.Dropout(p=0.5),\n 'Dense2' : torch.nn.Linear(256, 256),\n 'Dense2_relu' : torch.nn.ReLU(),\n 'Dense2_dropout' : torch.nn.Dropout(p=0.5),\n }))\n\n self.fc_out = torch.nn.Linear(256, cfg.CLASS_NUM)\n \n\n def forward(self, x):\n # block conv1\n x = self.conv1(x)\n x = F.max_pool2d(x, 2, stride=2, padding=0)\n\n # block conv2\n x = self.conv2(x)\n x = F.max_pool2d(x, 2, stride=2, padding=0)\n\n # block conv3\n x = self.conv3(x)\n x = F.max_pool2d(x, 2, stride=2, padding=0)\n\n # block conv4\n x = self.conv4(x)\n x = F.max_pool2d(x, 2, stride=2, padding=0)\n\n # block conv5\n x = self.conv5(x)\n x = F.max_pool2d(x, 2, stride=2, padding=0)\n \n x = x.view(x.shape[0], -1)\n x = self.top(x)\n x = self.fc_out(x)\n x = F.softmax(x, dim=1)\n return x\n\n# main\nif __name__ == '__main__':\n\n model_save_dir = '/'.join(cfg.MODEL_SAVE_PATH.split('/')[:-1])\n os.makedirs(model_save_dir, exist_ok=True)\n\n main(cfg, VGG19())"
] | [
[
"torch.nn.NLLLoss",
"torch.nn.BatchNorm2d",
"torch.nn.Linear",
"torch.nn.functional.max_pool2d",
"torch.nn.functional.softmax",
"torch.manual_seed",
"torch.cuda.is_available",
"torch.nn.Conv2d",
"torch.nn.ReLU",
"torch.nn.Dropout"
]
] |
DionysisChristopoulos/google-research | [
"7f59ef421beef32ca16c2a7215be74f7eba01a0f"
] | [
"blur/synapse_util.py"
] | [
"# coding=utf-8\n# Copyright 2021 The Google Research Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Utilities for synapse handling.\"\"\"\n\nimport enum\nimport functools as ft\nfrom typing import Callable, List, Sequence, Text, Union, Optional\nimport dataclasses as dc\n\nimport jax.numpy as jp\nimport numpy as np\nimport tensorflow.compat.v1 as tf\n\nfrom blur import blur_env\n\nTensorShape = tf.TensorShape\nTensor = Union[tf.Tensor, np.ndarray, jp.array]\n\n\[email protected]\nclass SynapseInitializerParams:\n shape: TensorShape\n in_neurons: int\n out_neurons: int\n\n\nclass UpdateType(enum.Enum):\n FORWARD = 1\n BACKWARD = 2\n BOTH = 3\n NONE = 4\n\n\nSynapseInitializer = Callable[[SynapseInitializerParams], Tensor]\n\n# A callable that takes a sequence of layers and SynapseInitializer and creates\n# appropriately shaped list of Synapses.\nCreateSynapseFn = Callable[[Sequence[Tensor], SynapseInitializer], List[Tensor]]\n\n\ndef random_uniform_symmetric(shape, seed):\n return (tf.random.uniform(shape, seed=seed) - 0.5) * 2\n\n\ndef random_initializer(start_seed=0, scale_by_channels=False,\n scale=1, bias=0, random_fn=random_uniform_symmetric):\n \"\"\"Returns initializer that generates random sequence.\"\"\"\n seed = [hash(str(start_seed))]\n def impl(params):\n if len(params.shape) >= 3:\n # shape: species x (in+out) x (in+out) x states\n num_channels = int(params.shape[-2])\n seed[0] += 1\n v = random_fn(params.shape, seed[0])\n apply_scale = scale(params) if callable(scale) else scale\n r = v * apply_scale + bias\n if scale_by_channels:\n r = r / (num_channels ** 0.5)\n return r\n return impl\n\n\ndef _random_uniform_fn(start_seed):\n rng = np.random.RandomState(start_seed)\n return lambda shape: tf.constant(rng.uniform( # pylint: disable=g-long-lambda\n low=-1, high=1, size=shape), dtype=np.float32)\n\n\ndef fixed_random_initializer(start_seed=0,\n scale_by_channels=False,\n scale=1,\n bias=0,\n random_fn=None):\n \"\"\"Returns an initializer that generates random (but fixed) sequence.\n\n The resulting tensors are backed by a constant so they produce the same\n value across all calls.\n\n This initializer uses its own random state that is independent of default\n random sequence.\n\n Args:\n start_seed: initial seed passed to np.random.RandomStates\n scale_by_channels: whether to scale by number of channels.\n scale: target scale (default: 1)\n bias: mean of the resulting distribution.\n random_fn: random generator if none will use use _random_uniform_fn\n Returns:\n callable that accepts shape and returns tensorflow constant tensor.\n \"\"\"\n if random_fn is None:\n random_fn = _random_uniform_fn(start_seed)\n\n def impl(params):\n if len(params.shape) >= 3:\n # shape: species x (in+out) x (in+out) x states\n num_channels = int(params.shape[-2])\n v = random_fn(shape=params.shape)\n apply_scale = scale(params) if callable(scale) else scale\n r = v * apply_scale + bias\n if scale_by_channels:\n r = r / (num_channels ** 0.5)\n return r\n\n return impl\n\n\ndef create_synapse_init_fns(\n layers,\n initializer):\n \"\"\"Generates network synapse initializers.\n\n Arguments:\n layers: Sequence of network layers (used for shape calculation).\n initializer: SynapseInitializer used to initialize synapse tensors.\n\n Returns:\n A list of functions that produce synapse tensors for all layers upon\n execution.\n \"\"\"\n synapse_init_fns = []\n for pre, post in zip(layers, layers[1:]):\n # shape: population_dims, batch_size, in_channels, neuron_state\n pop_dims = pre.shape[:-3]\n # -2: is the number of channels\n num_inputs = pre.shape[-2] + post.shape[-2] + 1\n # -1: is the number of states in a single neuron.\n synapse_shape = (*pop_dims, num_inputs, num_inputs, pre.shape[-1])\n params = SynapseInitializerParams(\n shape=synapse_shape,\n in_neurons=pre.shape[-2],\n out_neurons=post.shape[-2])\n synapse_init_fns.append(ft.partial(initializer, params))\n return synapse_init_fns\n\n\ndef create_synapses(layers,\n initializer):\n \"\"\"Generates arbitrary form synapses.\n\n Arguments:\n layers: Sequence of network layers (used for shape calculation).\n initializer: SynapseInitializer used to initialize synapse tensors.\n\n Returns:\n A list of created synapse tensors for all layers.\n \"\"\"\n return [init_fn() for init_fn in create_synapse_init_fns(layers, initializer)]\n\n\ndef transpose_synapse(synapse, env):\n num_batch_dims = len(synapse.shape[:-3])\n perm = [\n *range(num_batch_dims), num_batch_dims + 1, num_batch_dims,\n num_batch_dims + 2\n ]\n return env.transpose(synapse, perm)\n\n\ndef synapse_submatrix(synapse,\n in_channels,\n update_type,\n include_bias = True):\n \"\"\"Returns a submatrix of a synapse matrix given the update type.\"\"\"\n bias = 1 if include_bias else 0\n if update_type == UpdateType.FORWARD:\n return synapse[Ellipsis, :(in_channels + bias), (in_channels + bias):, :]\n if update_type == UpdateType.BACKWARD:\n return synapse[Ellipsis, (in_channels + 1):, :(in_channels + bias), :]\n\n\ndef combine_in_out_synapses(in_out_synapse, out_in_synapse,\n env):\n \"\"\"Combines forward and backward synapses into a single matrix.\"\"\"\n batch_dims = in_out_synapse.shape[:-3]\n out_channels, in_channels, num_states = in_out_synapse.shape[-3:]\n synapse = env.concat([\n env.concat([\n env.zeros((*batch_dims, out_channels, out_channels, num_states)),\n in_out_synapse\n ], axis=-2),\n env.concat([\n out_in_synapse,\n env.zeros((*batch_dims, in_channels, in_channels, num_states))\n ], axis=-2)\n ], axis=-3)\n return synapse\n\n\ndef sync_all_synapses(synapses, layers, env):\n \"\"\"Sync synapses across all layers.\n\n For each synapse, syncs its first state forward synapse with backward synapse\n and copies it arocess all the states.\n\n Args:\n synapses: list of synapses in the network.\n layers: list of layers in the network.\n env: Environment\n Returns:\n Synchronized synapses.\n \"\"\"\n for i in range(len(synapses)):\n synapses[i] = sync_in_and_out_synapse(synapses[i], layers[i].shape[-2], env)\n return synapses\n\n\ndef sync_in_and_out_synapse(synapse, in_channels, env):\n \"\"\"Copies forward synapse to backward one.\"\"\"\n in_out_synapse = synapse_submatrix(\n synapse,\n in_channels=in_channels,\n update_type=UpdateType.FORWARD,\n include_bias=True)\n return combine_in_out_synapses(\n in_out_synapse,\n transpose_synapse(in_out_synapse, env),\n env)\n\n\ndef sync_states_synapse(synapse, env, num_states=None):\n \"\"\"Sync synapse's first state across all the other states.\"\"\"\n if num_states is None:\n num_states = synapse.shape[-1]\n return env.stack(num_states*[synapse[Ellipsis, 0]], axis=-1)\n\n\ndef normalize_synapses(synapses,\n rescale_to,\n env,\n axis = -3):\n \"\"\"Normalizes synapses across a particular axis (across input by def.).\"\"\"\n # Default value axis=-3 corresponds to normalizing across the input neuron\n # dimension.\n squared = env.sum(synapses ** 2, axis=axis, keepdims=True)\n synapses /= env.sqrt(squared + 1e-9)\n if rescale_to is not None:\n synapses *= rescale_to\n return synapses\n"
] | [
[
"numpy.random.RandomState",
"tensorflow.compat.v1.random.uniform"
]
] |
ishine/malaya-speech | [
"fd34afc7107af1656dff4b3201fa51dda54fde18",
"fd34afc7107af1656dff4b3201fa51dda54fde18",
"fd34afc7107af1656dff4b3201fa51dda54fde18"
] | [
"pretrained-model/stt/hubert/conformer-tiny-ctc.py",
"session/speaker-change/finetune-vggvox-v2.py",
"pretrained-model/stt/jasper/medium-jasper-ctc.py"
] | [
"import os\n\nos.environ['CUDA_VISIBLE_DEVICES'] = '3'\n\nimport pyroomacoustics as pra\nimport numpy as np\nfrom pydub import AudioSegment\nfrom sklearn.utils import shuffle\nfrom glob import glob\nimport random\nimport json\nfrom malaya_speech.train.model.conformer.model import Model as ConformerModel\nfrom malaya_speech.train.model import hubert, ctc\nimport malaya_speech.train as train\nimport malaya_speech.config\nimport malaya_speech.augmentation.waveform as augmentation\nimport malaya_speech\nimport tensorflow as tf\nimport os\nimport string\n\n\nsr = 16000\nmaxlen = 18\nminlen_text = 1\nprob_aug = 0.95\n\nunique_vocab = [''] + list(string.ascii_lowercase + string.digits) + [' ']\n\n\ndef augment_room(y, scale=1.0):\n corners = np.array(\n [[0, 0], [0, 5 * scale], [3 * scale, 5 * scale], [3 * scale, 0]]\n ).T\n room = pra.Room.from_corners(\n corners,\n fs=sr,\n materials=pra.Material(0.2, 0.15),\n ray_tracing=True,\n air_absorption=True,\n )\n room.extrude(3.5, materials=pra.Material(0.2, 0.15))\n room.set_ray_tracing(\n receiver_radius=0.5, n_rays=1000, energy_thres=1e-5\n )\n room.add_source([1.5 * scale, 4 * scale, 0.5], signal=y)\n R = np.array([[1.5 * scale], [0.5 * scale], [0.5]])\n room.add_microphone(R)\n room.simulate()\n return room.mic_array.signals[0]\n\n\ndef random_amplitude_threshold(sample, low=1, high=2, threshold=0.4):\n y_aug = sample.copy()\n dyn_change = np.random.uniform(low=low, high=high)\n y_aug[np.abs(y_aug) >= threshold] = (\n y_aug[np.abs(y_aug) >= threshold] * dyn_change\n )\n return np.clip(y_aug, -1, 1)\n\n\ndef add_uniform_noise(\n sample, power=0.01, return_noise=False, scale=False\n):\n y_noise = sample.copy()\n noise_amp = power * np.random.uniform() * np.amax(y_noise)\n noise = noise_amp * np.random.normal(size=y_noise.shape[0])\n y_noise = y_noise + noise\n if scale:\n y_noise = y_noise / (np.max(np.abs(y_noise)) + 1e-9)\n if return_noise:\n if scale:\n noise = noise / (np.max(np.abs(y_noise)) + 1e-9)\n return y_noise, noise\n else:\n return y_noise\n\n\ndef calc(signal, add_uniform=True):\n choice = random.randint(0, 10)\n print('choice', choice)\n if choice == 0:\n x = augmentation.sox_augment_high(\n signal,\n min_bass_gain=random.randint(25, 50),\n reverberance=random.randint(0, 80),\n hf_damping=10,\n room_scale=random.randint(0, 50),\n negate=1,\n )\n if choice == 1:\n x = augmentation.sox_augment_high(\n signal,\n min_bass_gain=random.randint(25, 70),\n reverberance=random.randint(0, 80),\n hf_damping=10,\n room_scale=random.randint(0, 50),\n negate=0,\n )\n if choice == 2:\n x = augmentation.sox_augment_low(\n signal,\n min_bass_gain=random.randint(5, 30),\n reverberance=random.randint(0, 80),\n hf_damping=10,\n room_scale=random.randint(0, 50),\n negate=random.randint(0, 1),\n )\n if choice == 3:\n x = augmentation.sox_augment_combine(\n signal,\n min_bass_gain_high=random.randint(25, 70),\n min_bass_gain_low=random.randint(5, 30),\n reverberance=random.randint(0, 80),\n hf_damping=10,\n room_scale=random.randint(0, 90),\n )\n if choice == 4:\n x = augmentation.sox_reverb(\n signal,\n reverberance=random.randint(10, 80),\n hf_damping=10,\n room_scale=random.randint(10, 90),\n )\n if choice == 5:\n x = random_amplitude_threshold(\n signal, threshold=random.uniform(0.35, 0.8)\n )\n if choice == 6:\n x = augmentation.lowpass_filter(\n signal, sr=sr, cutoff=random.randint(200, 551)\n )\n if choice == 7:\n x = augmentation.highpass_filter(\n signal, sr=sr, cutoff=random.randint(551, 1653)\n )\n if choice == 8:\n x = augmentation.bandpass_filter(\n signal,\n sr=sr,\n cutoff_low=random.randint(200, 551),\n cutoff_high=random.randint(551, 1653),\n )\n if choice == 9:\n x = augment_room(signal)\n if choice == 10:\n x = signal\n\n if choice not in [5] and random.gauss(0.5, 0.14) > 0.6:\n x = random_amplitude_threshold(\n x, low=1.0, high=2.0, threshold=random.uniform(0.6, 0.9)\n )\n\n if random.gauss(0.5, 0.14) > 0.6 and add_uniform:\n x = add_uniform_noise(x, power=random.uniform(0.005, 0.015))\n\n return x\n\n\ndef mp3_to_wav(file, sr=sr):\n audio = AudioSegment.from_file(file)\n audio = audio.set_frame_rate(sr).set_channels(1)\n sample = np.array(audio.get_array_of_samples())\n return malaya_speech.astype.int_to_float(sample), sr\n\n\ndef generate(file):\n with open(file) as fopen:\n dataset = json.load(fopen)\n audios, cleaned_texts = dataset['X'], dataset['Y']\n while True:\n audios, cleaned_texts = shuffle(audios, cleaned_texts)\n for i in range(len(audios)):\n try:\n if audios[i].endswith('.mp3'):\n # print('found mp3', audios[i])\n wav_data, _ = mp3_to_wav(audios[i])\n else:\n wav_data, _ = malaya_speech.load(audios[i], sr=sr)\n\n if len(cleaned_texts[i]) < minlen_text:\n # print(f'skipped text too short {audios[i]}')\n continue\n\n if (len(wav_data) / sr) > maxlen:\n continue\n\n t = [unique_vocab.index(c) for c in cleaned_texts[i]]\n\n yield {\n 'waveforms': wav_data,\n 'waveforms_length': [len(wav_data)],\n 'targets': t,\n 'targets_length': [len(t)],\n }\n except Exception as e:\n print(e)\n\n\ndef get_dataset(\n file,\n batch_size=12,\n shuffle_size=20,\n thread_count=24,\n maxlen_feature=1800,\n):\n def get():\n dataset = tf.data.Dataset.from_generator(\n generate,\n {\n 'waveforms': tf.float32,\n 'waveforms_length': tf.int32,\n 'targets': tf.int32,\n 'targets_length': tf.int32,\n },\n output_shapes={\n 'waveforms': tf.TensorShape([None]),\n 'waveforms_length': tf.TensorShape([None]),\n 'targets': tf.TensorShape([None]),\n 'targets_length': tf.TensorShape([None]),\n },\n args=(file,),\n )\n dataset = dataset.prefetch(tf.contrib.data.AUTOTUNE)\n dataset = dataset.padded_batch(\n batch_size,\n padded_shapes={\n 'waveforms': tf.TensorShape([None]),\n 'waveforms_length': tf.TensorShape([None]),\n 'targets': tf.TensorShape([None]),\n 'targets_length': tf.TensorShape([None]),\n },\n padding_values={\n 'waveforms': tf.constant(0, dtype=tf.float32),\n 'waveforms_length': tf.constant(0, dtype=tf.int32),\n 'targets': tf.constant(0, dtype=tf.int32),\n 'targets_length': tf.constant(0, dtype=tf.int32),\n },\n )\n return dataset\n\n return get\n\n\nclass Encoder:\n def __init__(self, config):\n self.config = config\n self.encoder = ConformerModel(**self.config)\n\n def __call__(self, x, input_mask, training=True):\n return self.encoder(x, training=training)\n\n\ntotal_steps = 2000000\n\n\ndef model_fn(features, labels, mode, params):\n config_conformer = malaya_speech.config.conformer_tiny_encoder_config\n config_conformer['subsampling']['type'] = 'none'\n config_conformer['dropout'] = 0.0\n encoder = Encoder(config_conformer)\n cfg = hubert.HuBERTConfig(\n extractor_mode='layer_norm',\n dropout=0.0,\n attention_dropout=0.0,\n encoder_layerdrop=0.0,\n dropout_input=0.0,\n dropout_features=0.0,\n final_dim=128,\n )\n model = hubert.Model(cfg, encoder, ['pad', 'eos', 'unk'] + [str(i) for i in range(100)])\n X = features['waveforms']\n X_len = features['waveforms_length'][:, 0]\n targets = features['targets']\n targets_int32 = tf.cast(targets, tf.int32)\n targets_length = features['targets_length'][:, 0]\n r = model(X, padding_mask=X_len, features_only=True, mask=False)\n logits = tf.layers.dense(r['x'], len(unique_vocab) + 1)\n seq_lens = tf.reduce_sum(\n tf.cast(tf.logical_not(r['padding_mask']), tf.int32), axis=1\n )\n mean_error, sum_error, sum_weight = ctc.loss.ctc_loss(\n logits, seq_lens, targets_int32, targets_length\n )\n loss = mean_error\n accuracy = ctc.metrics.ctc_sequence_accuracy(\n logits, seq_lens, targets_int32, targets_length,\n )\n\n tf.identity(loss, 'train_loss')\n tf.identity(accuracy, name='train_accuracy')\n\n tf.summary.scalar('train_accuracy', accuracy)\n\n variables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)\n init_checkpoint = 'hubert-conformer-tiny/model.ckpt-1000000'\n\n assignment_map, initialized_variable_names = train.get_assignment_map_from_checkpoint(\n variables, init_checkpoint\n )\n\n tf.train.init_from_checkpoint(init_checkpoint, assignment_map)\n\n if mode == tf.estimator.ModeKeys.TRAIN:\n train_op = train.optimizer.adamw.create_optimizer(\n loss,\n init_lr=5e-5,\n num_train_steps=total_steps,\n num_warmup_steps=100000,\n end_learning_rate=0.0,\n weight_decay_rate=0.01,\n beta_1=0.9,\n beta_2=0.999,\n epsilon=1e-6,\n clip_norm=1.0,\n )\n estimator_spec = tf.estimator.EstimatorSpec(\n mode=mode, loss=loss, train_op=train_op\n )\n\n elif mode == tf.estimator.ModeKeys.EVAL:\n\n estimator_spec = tf.estimator.EstimatorSpec(\n mode=tf.estimator.ModeKeys.EVAL,\n loss=loss,\n eval_metric_ops={\n 'accuracy': ctc.metrics.ctc_sequence_accuracy_estimator(\n logits, seq_lens, targets_int32, targets_length\n )\n },\n )\n\n return estimator_spec\n\n\ntrain_hooks = [\n tf.train.LoggingTensorHook(\n ['train_accuracy', 'train_loss'], every_n_iter=1\n )\n]\ntrain_dataset = get_dataset('bahasa-asr-train-combined.json')\ndev_dataset = get_dataset('bahasa-asr-test.json')\n\ntrain.run_training(\n train_fn=train_dataset,\n model_fn=model_fn,\n model_dir='hubert-conformer-tiny-ctc-char',\n num_gpus=1,\n log_step=1,\n save_checkpoint_step=20000,\n max_steps=total_steps,\n eval_fn=dev_dataset,\n train_hooks=train_hooks,\n)\n",
"import os\n\nos.environ['GOOGLE_APPLICATION_CREDENTIALS'] = '../gcs/mesolitica-storage.json'\nos.environ['CUDA_VISIBLE_DEVICES'] = '1'\n\nimport tensorflow as tf\nimport malaya_speech.train as train\nimport malaya_speech.train.model.vggvox_v2 as vggvox_v2\nimport malaya_speech\nfrom glob import glob\nimport librosa\nimport numpy as np\n\n\ndef lin_spectogram_from_wav(wav, hop_length, win_length, n_fft=1024):\n linear = librosa.stft(\n wav, n_fft=n_fft, win_length=win_length, hop_length=hop_length\n ) # linear spectrogram\n return linear.T\n\n\ndef load_data(\n wav,\n win_length=400,\n sr=16000,\n hop_length=50,\n n_fft=512,\n spec_len=250,\n mode='train',\n):\n linear_spect = lin_spectogram_from_wav(wav, hop_length, win_length, n_fft)\n mag, _ = librosa.magphase(linear_spect) # magnitude\n mag_T = mag.T\n freq, time = mag_T.shape\n if mode == 'train':\n if time > spec_len:\n randtime = np.random.randint(0, time - spec_len)\n spec_mag = mag_T[:, randtime: randtime + spec_len]\n else:\n spec_mag = np.pad(mag_T, ((0, 0), (0, spec_len - time)), 'constant')\n else:\n spec_mag = mag_T\n # preprocessing, subtract mean, divided by time-wise var\n mu = np.mean(spec_mag, 0, keepdims=True)\n std = np.std(spec_mag, 0, keepdims=True)\n return (spec_mag - mu) / (std + 1e-5)\n\n\nDIMENSION = 257\n\n\ndef calc(v):\n\n r = load_data(v, mode='eval')\n return r\n\n\ndef preprocess_inputs(example):\n s = tf.compat.v1.numpy_function(calc, [example['inputs']], tf.float32)\n\n s = tf.reshape(s, (DIMENSION, -1, 1))\n example['inputs'] = s\n\n return example\n\n\ndef parse(serialized_example):\n\n data_fields = {\n 'inputs': tf.VarLenFeature(tf.float32),\n 'targets': tf.VarLenFeature(tf.int64),\n }\n features = tf.parse_single_example(\n serialized_example, features=data_fields\n )\n for k in features.keys():\n features[k] = features[k].values\n\n features = preprocess_inputs(features)\n\n keys = list(features.keys())\n for k in keys:\n if k not in ['inputs', 'targets']:\n features.pop(k, None)\n\n return features\n\n\ndef get_dataset(files, batch_size=32, shuffle_size=1024, thread_count=24):\n def get():\n dataset = tf.data.TFRecordDataset(files)\n dataset = dataset.map(parse, num_parallel_calls=thread_count)\n dataset = dataset.shuffle(shuffle_size)\n dataset = dataset.padded_batch(\n batch_size,\n padded_shapes={\n 'inputs': tf.TensorShape([DIMENSION, None, 1]),\n 'targets': tf.TensorShape([None]),\n },\n padding_values={\n 'inputs': tf.constant(0, dtype=tf.float32),\n 'targets': tf.constant(0, dtype=tf.int64),\n },\n )\n dataset = dataset.repeat()\n return dataset\n\n return get\n\n\nlearning_rate = 1e-5\ninit_checkpoint = '../vggvox-speaker-identification/v2/vggvox.ckpt'\n\n\ndef model_fn(features, labels, mode, params):\n Y = tf.cast(features['targets'][:, 0], tf.int32)\n model = vggvox_v2.Model(features['inputs'], num_class=2, mode='train')\n\n logits = model.logits\n loss = tf.reduce_mean(\n tf.nn.sparse_softmax_cross_entropy_with_logits(\n logits=logits, labels=Y\n )\n )\n\n tf.identity(loss, 'train_loss')\n\n accuracy = tf.metrics.accuracy(\n labels=Y, predictions=tf.argmax(logits, axis=1)\n )\n\n tf.identity(accuracy[1], name='train_accuracy')\n tf.summary.scalar('train_accuracy', accuracy[1])\n\n variables = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)\n variables = [v for v in variables if 'prediction' not in v.name]\n\n assignment_map, initialized_variable_names = train.get_assignment_map_from_checkpoint(\n variables, init_checkpoint\n )\n\n tf.train.init_from_checkpoint(init_checkpoint, assignment_map)\n\n if mode == tf.estimator.ModeKeys.TRAIN:\n global_step = tf.train.get_or_create_global_step()\n optimizer = tf.train.AdamOptimizer(learning_rate)\n train_op = optimizer.minimize(loss, global_step=global_step)\n estimator_spec = tf.estimator.EstimatorSpec(\n mode=mode, loss=loss, train_op=train_op\n )\n\n elif mode == tf.estimator.ModeKeys.EVAL:\n\n estimator_spec = tf.estimator.EstimatorSpec(\n mode=tf.estimator.ModeKeys.EVAL,\n loss=loss,\n eval_metric_ops={'accuracy': accuracy},\n )\n\n return estimator_spec\n\n\ntrain_hooks = [\n tf.train.LoggingTensorHook(\n ['train_accuracy', 'train_loss'], every_n_iter=1\n )\n]\n\nfiles = tf.io.gfile.glob(\n 'gs://mesolitica-general/speaker-change/data/*.tfrecords'\n)\ntrain_dataset = get_dataset(files)\n\nsave_directory = 'output-vggvox-v2-speaker-change'\n\ntrain.run_training(\n train_fn=train_dataset,\n model_fn=model_fn,\n model_dir=save_directory,\n num_gpus=1,\n log_step=1,\n save_checkpoint_step=25000,\n max_steps=300000,\n train_hooks=train_hooks,\n)\n",
"import os\n\nos.environ['CUDA_VISIBLE_DEVICES'] = '2,3'\n\nimport tensorflow as tf\nimport malaya_speech\nimport malaya_speech.augmentation.waveform as augmentation\nimport malaya_speech.augmentation.spectrogram as mask_augmentation\nimport malaya_speech.train.model.medium_jasper as jasper\nimport malaya_speech.train.model.ctc as ctc\nimport malaya_speech.train as train\nfrom malaya_speech.train.model.quartznet import layer, abstract\nimport numpy as np\nimport random\nfrom glob import glob\nimport json\n\nwith open('malaya-speech-sst-vocab.json') as fopen:\n unique_vocab = json.load(fopen) + ['{', '}', '[']\n\nparameters = {\n 'optimizer_params': {},\n 'lr_policy_params': {\n 'learning_rate': 1e-4,\n 'min_lr': 1e-6,\n 'warmup_steps': 0,\n 'decay_steps': 500_000,\n },\n}\n\n\ndef learning_rate_scheduler(global_step):\n return train.schedule.cosine_decay(\n global_step, **parameters['lr_policy_params']\n )\n\n\nfeaturizer = malaya_speech.tf_featurization.STTFeaturizer(\n normalize_per_feature=True\n)\nn_mels = featurizer.num_feature_bins\n\n\ndef mel_augmentation(features):\n\n features = mask_augmentation.mask_frequency(features, width_freq_mask=15)\n features = mask_augmentation.mask_time(\n features, width_time_mask=int(features.shape[0] * 0.05)\n )\n return features\n\n\ndef preprocess_inputs(example):\n s = featurizer.vectorize(example['waveforms'])\n s = tf.reshape(s, (-1, n_mels))\n s = tf.compat.v1.numpy_function(mel_augmentation, [s], tf.float32)\n mel_fbanks = tf.reshape(s, (-1, n_mels))\n length = tf.cast(tf.shape(mel_fbanks)[0], tf.int32)\n length = tf.expand_dims(length, 0)\n example['inputs'] = mel_fbanks\n example['inputs_length'] = length\n\n return example\n\n\ndef parse(serialized_example):\n\n data_fields = {\n 'waveforms': tf.VarLenFeature(tf.float32),\n 'targets': tf.VarLenFeature(tf.int64),\n }\n features = tf.parse_single_example(\n serialized_example, features=data_fields\n )\n for k in features.keys():\n features[k] = features[k].values\n\n features = preprocess_inputs(features)\n\n keys = list(features.keys())\n for k in keys:\n if k not in ['waveforms', 'inputs', 'inputs_length', 'targets']:\n features.pop(k, None)\n\n return features\n\n\ndef get_dataset(\n path,\n batch_size=32,\n shuffle_size=32,\n thread_count=24,\n maxlen_feature=1800,\n):\n def get():\n files = glob(path)\n dataset = tf.data.TFRecordDataset(files)\n dataset = dataset.shuffle(shuffle_size)\n dataset = dataset.repeat()\n dataset = dataset.prefetch(tf.contrib.data.AUTOTUNE)\n dataset = dataset.map(parse, num_parallel_calls=thread_count)\n dataset = dataset.padded_batch(\n batch_size,\n padded_shapes={\n 'waveforms': tf.TensorShape([None]),\n 'inputs': tf.TensorShape([None, n_mels]),\n 'inputs_length': tf.TensorShape([None]),\n 'targets': tf.TensorShape([None]),\n },\n padding_values={\n 'waveforms': tf.constant(0, dtype=tf.float32),\n 'inputs': tf.constant(0, dtype=tf.float32),\n 'inputs_length': tf.constant(0, dtype=tf.int32),\n 'targets': tf.constant(0, dtype=tf.int64),\n },\n )\n return dataset\n\n return get\n\n\ndef model_fn(features, labels, mode, params):\n\n model = jasper.Model(\n features['inputs'], features['inputs_length'][:, 0], training=True\n )\n logits = tf.layers.dense(model.logits['outputs'], len(unique_vocab) + 1)\n seq_lens = model.logits['src_length']\n\n targets_int32 = tf.cast(features['targets'], tf.int32)\n\n mean_error, sum_error, sum_weight = ctc.loss.ctc_loss(\n logits, targets_int32, seq_lens\n )\n\n loss = mean_error\n accuracy = ctc.metrics.ctc_sequence_accuracy(\n logits, targets_int32, seq_lens\n )\n\n tf.identity(loss, 'train_loss')\n tf.identity(accuracy, name='train_accuracy')\n\n tf.summary.scalar('train_accuracy', accuracy)\n\n if mode == tf.estimator.ModeKeys.TRAIN:\n train_op = train.optimizer.optimize_loss(\n loss,\n tf.train.AdamOptimizer,\n parameters['optimizer_params'],\n learning_rate_scheduler,\n summaries=['learning_rate', 'loss_scale'],\n larc_params=parameters.get('larc_params', None),\n loss_scaling=parameters.get('loss_scaling', 1.0),\n loss_scaling_params=parameters.get('loss_scaling_params', None),\n )\n estimator_spec = tf.estimator.EstimatorSpec(\n mode=mode, loss=loss, train_op=train_op\n )\n\n elif mode == tf.estimator.ModeKeys.EVAL:\n\n estimator_spec = tf.estimator.EstimatorSpec(\n mode=tf.estimator.ModeKeys.EVAL,\n loss=loss,\n eval_metric_ops={\n 'accuracy': ctc.metrics.ctc_sequence_accuracy_estimator(\n logits, targets_int32, seq_lens\n )\n },\n )\n\n return estimator_spec\n\n\ntrain_hooks = [\n tf.train.LoggingTensorHook(\n ['train_accuracy', 'train_loss'], every_n_iter=1\n )\n]\ntrain_dataset = get_dataset(\n '../speech-bahasa/bahasa-asr/data/bahasa-asr-train-*'\n)\ndev_dataset = get_dataset(\n '../speech-bahasa/bahasa-asr-test/data/bahasa-asr-dev-*'\n)\n\ntrain.run_training(\n train_fn=train_dataset,\n model_fn=model_fn,\n model_dir='asr-medium-jasper-ctc',\n num_gpus=2,\n log_step=1,\n save_checkpoint_step=5000,\n max_steps=parameters['lr_policy_params']['decay_steps'],\n eval_fn=dev_dataset,\n train_hooks=train_hooks,\n)\n"
] | [
[
"numpy.random.uniform",
"tensorflow.summary.scalar",
"tensorflow.train.LoggingTensorHook",
"tensorflow.logical_not",
"tensorflow.identity",
"tensorflow.get_collection",
"numpy.random.normal",
"numpy.abs",
"sklearn.utils.shuffle",
"tensorflow.estimator.EstimatorSpec",
"tensorflow.cast",
"numpy.amax",
"numpy.clip",
"tensorflow.TensorShape",
"tensorflow.train.init_from_checkpoint",
"numpy.array",
"tensorflow.constant"
],
[
"tensorflow.summary.scalar",
"tensorflow.data.TFRecordDataset",
"tensorflow.VarLenFeature",
"tensorflow.reshape",
"tensorflow.nn.sparse_softmax_cross_entropy_with_logits",
"tensorflow.identity",
"tensorflow.train.LoggingTensorHook",
"tensorflow.train.init_from_checkpoint",
"tensorflow.train.get_or_create_global_step",
"tensorflow.constant",
"numpy.mean",
"tensorflow.parse_single_example",
"tensorflow.get_collection",
"tensorflow.estimator.EstimatorSpec",
"tensorflow.cast",
"tensorflow.TensorShape",
"numpy.std",
"tensorflow.io.gfile.glob",
"numpy.pad",
"tensorflow.train.AdamOptimizer",
"tensorflow.compat.v1.numpy_function",
"tensorflow.argmax",
"numpy.random.randint"
],
[
"tensorflow.summary.scalar",
"tensorflow.train.LoggingTensorHook",
"tensorflow.VarLenFeature",
"tensorflow.data.TFRecordDataset",
"tensorflow.shape",
"tensorflow.reshape",
"tensorflow.parse_single_example",
"tensorflow.expand_dims",
"tensorflow.estimator.EstimatorSpec",
"tensorflow.cast",
"tensorflow.TensorShape",
"tensorflow.compat.v1.numpy_function",
"tensorflow.identity",
"tensorflow.constant"
]
] |
fab464654/SSD_on_ActiveVisionDataset | [
"1bc6f0745241d0b45c3f257c6fb09ea0435c993e"
] | [
"train.py"
] | [
"import time\nimport torch.backends.cudnn as cudnn\nimport torch.optim\nimport torch.utils.data\nfrom model import SSD300, MultiBoxLoss\nfrom datasets import PascalVOCDataset\nfrom utils import *\n\n# Data parameters\ndata_folder = 'google_drive/MyDrive/ColabNotebooks/Project/GT' # folder with data files\nkeep_difficult = True # use objects considered difficult to detect?\n\n# Model parameters\n# Not too many here since the SSD300 has a very specific structure\nn_classes = len(label_map) # number of different types of objects\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n# Learning parameters\ncheckpoint = \"google_drive/MyDrive/checkpointsIeri/checkpoint_ssd300.pth.tar\" # path to model checkpoint, None if none\nbatch_size = 9 # batch size\niterations = 120000 # number of iterations to train\nworkers = 4 # number of workers for loading data in the DataLoader\nprint_freq = 5 # print training status every __ batches\nlr = 5e-4 # learning rate\ndecay_lr_at = [80000, 100000] # decay learning rate after these many iterations\ndecay_lr_to = 0.1 # decay learning rate to this fraction of the existing learning rate\nmomentum = 0.9 # momentum\nweight_decay = 5e-4 # weight decay\ngrad_clip = None # clip if gradients are exploding, which may happen at larger batch sizes (sometimes at 32) - you will recognize it by a sorting error in the MuliBox loss calculation\n\ncudnn.benchmark = True\n\n\ndef main():\n \"\"\"\n Training.\n \"\"\"\n global start_epoch, label_map, epoch, checkpoint, decay_lr_at\n\n # Initialize model or load checkpoint\n if checkpoint is None:\n start_epoch = 0\n model = SSD300(n_classes=n_classes)\n # Initialize the optimizer, with twice the default learning rate for biases, as in the original Caffe repo\n biases = list()\n not_biases = list()\n for param_name, param in model.named_parameters():\n if param.requires_grad:\n if param_name.endswith('.bias'):\n biases.append(param)\n else:\n not_biases.append(param)\n optimizer = torch.optim.SGD(params=[{'params': biases, 'lr': 2 * lr}, {'params': not_biases}],\n lr=lr, momentum=momentum, weight_decay=weight_decay)\n\n else:\n checkpoint = torch.load(checkpoint)\n start_epoch = checkpoint['epoch'] + 1\n print('\\nLoaded checkpoint from epoch %d.\\n' % start_epoch)\n model = checkpoint['model']\n optimizer = checkpoint['optimizer']\n\n # Move to default device\n model = model.to(device)\n criterion = MultiBoxLoss(priors_cxcy=model.priors_cxcy).to(device)\n\n #import active_vision_dataset_processing.data_loading\n import transforms, active_vision_dataset\n\n #Include all instances\n pick_trans = transforms.PickInstances(range(34))\n\n TRAIN_PATH = \"./google_drive/MyDrive/ColabNotebooks/Project/trainDataset\"\n\n \n train_dataset = active_vision_dataset.AVD(root=TRAIN_PATH, train=True,\n target_transform=pick_trans,\n scene_list=['Home_001_1', \n 'Home_002_1',\n 'Home_003_1', \n 'Home_004_1',\n 'Home_005_1',\n 'Home_006_1',\n 'Home_007_1',\n 'Home_008_1',\n 'Home_014_1',\n 'Home_011_1',\n 'Home_010_1',\n 'Office_001_1'],\n fraction_of_no_box=-1)\n \n\n train_loader = torch.utils.data.DataLoader(train_dataset,\n batch_size=batch_size,\n shuffle=True,\n collate_fn=active_vision_dataset.collate\n )\n \"\"\"\n #I TRY TO USE THE DEFAULT DATASET LOADER::::::::::::::\n\n # Custom dataloaders\n train_dataset = PascalVOCDataset(data_folder,\n split='train',\n keep_difficult=keep_difficult)\n train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=batch_size, shuffle=True,\n collate_fn=train_dataset.collate_fn, num_workers=workers,\n pin_memory=True) # note that we're passing the collate function here\n \"\"\"\n\n # Calculate total number of epochs to train and the epochs to decay learning rate at (i.e. convert iterations to epochs)\n # To convert iterations to epochs, divide iterations by the number of iterations per epoch\n # The paper trains for 120,000 iterations with a batch size of 32, decays after 80,000 and 100,000 iterations\n epochs = iterations // (len(train_dataset) // 32)\n decay_lr_at = [it // (len(train_dataset) // 32) for it in decay_lr_at]\n\n # Epochs\n for epoch in range(start_epoch, epochs):\n \n # Decay learning rate at particular epochs\n if epoch in decay_lr_at:\n adjust_learning_rate(optimizer, decay_lr_to)\n\n # One epoch's training\n train(train_loader=train_loader,\n model=model,\n criterion=criterion,\n optimizer=optimizer,\n epoch=epoch)\n\n # Save checkpoint\n save_checkpoint(epoch, model, optimizer)\n \n\n\ndef train(train_loader, model, criterion, optimizer, epoch):\n \n \n \"\"\"\n One epoch's training.\n\n :param train_loader: DataLoader for training data\n :param model: model\n :param criterion: MultiBox loss\n :param optimizer: optimizer\n :param epoch: epoch number\n \"\"\"\n model.train() # training mode enables dropout\n\n batch_time = AverageMeter() # forward prop. + back prop. time\n data_time = AverageMeter() # data loading time\n losses = AverageMeter() # loss\n\n start = time.time()\n\n import numpy as np\n # Batches\n for i, (images, labels) in enumerate(train_loader):\n\n #CHECK / REMOVE THIS CODE!\n data_time.update(time.time() - start)\n #print(len(images))\n #print(labels)\n # Move to default device\n data = images\n a = np.asarray(data)\n #print(a.shape)\n #a = np.squeeze(a, axis=1) # shape should now be (L, 224, 224, 3)\n \n\n #image = torch.from_numpy(a) \n #image = image.permute(0,3,1,2)\n #print(image.shape)\n\n #Pre-processing: \n from torchvision import transforms as transf\n preprocess = transf.Compose([\n transf.ToPILImage(),\n transf.Resize(300),\n transf.CenterCrop(300),\n transf.ToTensor(), \n transf.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),\n ])\n \n for j in range(batch_size): \n \n if j == 0: \n input_tensor = preprocess(images[j])\n input_tensor = input_tensor.unsqueeze(0)\n input_batch = input_tensor\n else:\n input_tensor = preprocess(images[j])\n #print(input_tensor)\n input_tensor = input_tensor.unsqueeze(0) # create a mini-batch as expected by the model\n #print(input_tensor.shape)\n input_batch = torch.cat((input_batch, input_tensor), 0)\n #print(\"shape images: \",input_batch.shape) \n\n \n \n # In the Active Vision Dataset we have this formatting:\n # [xmin ymin xmax ymax instance_id difficulty]\n \n \"\"\" From the Tutorial: \nSince the number of objects in any given image can vary, we can't use a fixed \nsize tensor for storing the bounding boxes for the entire batch of N images.\n\nTherefore, ground truth bounding boxes fed to the model must be a list of \nlength N, where each element of the list is a Float tensor of dimensions\nN_o, 4, where N_o is the number of objects present in that particular image.\n\nTherefore, ground truth labels fed to the model must be a list of length N, \nwhere each element of the list is a Long tensor of dimensions N_o, where N_o \nis the number of objects present in that particular image.\n \"\"\"\n #Prints to test\n #print(j)\n box_id_diff = [b for b in labels[j][0]] \n box = [l[0:4] for l in box_id_diff]\n\n #print('before:',box) #To check\n\n #Boundary coordinates as requested\n for k in range(len(box)): \n box[k][0] = box[k][0]/1920.0\n box[k][2] = box[k][2]/1920.0 \n box[k][1] = box[k][1]/1080.0\n box[k][3] = box[k][3]/1080.0 \n\n #print('after:',box) #To check\n \n box_tensor = torch.FloatTensor(box).to(device)\n\n #Done with the parameter in AVD method\n \"\"\" \n #Check if there are objects in the images\n if j == 0: \n start = True\n \n if len(box_tensor) > 0:\n if start == True:\n box_list = box_tensor\n start = False\n elif start == False:\n box_list = [box_list, box_tensor] \n #box_list = torch.cat((box_list,box_tensor),0) \n else:\n start = True\n \"\"\"\n \n #print(box_tensor) #To check\n\n if j == 0: \n box_list = [box_tensor]\n else:\n box_list.append(box_tensor) \n\n label = [l[4] for l in box_id_diff]\n label_tensor = torch.LongTensor(label).to(device)\n if j == 0: \n label_list = [label_tensor]\n else:\n label_list.append(label_tensor) \n\n \n #print(box_id_diff[0][0:4])\n \n \"\"\"\n if len(box_id_diff.size())-1 != 0:\n if j == 0: \n box = box_id_diff[0][0:4]\n print(\"asad:\",box)\n #box = box.unsqueeze(0)\n boxes = box\n else:\n box = [l[0:4] for l in box_id_diff]\n\n #box = box.unsqueeze(0) # create a mini-batch as expected by the model\n #print(input_tensor.shape)\n boxes = torch.cat((boxes, box), 0)\n print(\"boxes:\", boxes)\n \"\"\"\n #box = torch.split(box_id_diff, 2)\n #print(box)\n \"\"\"\n if not labels[j][0]:\n labels = [] \n print(\"coasc\") \n else: \n labels = [l.to(device) for l in torch.tensor(labels[j][0][4])]\n \"\"\"\n \n #print(\"list of boxes:\",box_list)\n #print(\"list of labels:\", label_list)\n\n images = input_batch.to(device) # (batch_size (N), 3, 300, 300)\n #print(images.shape)\n boxes = box_list\n labels = label_list\n\n # Forward prop. \n predicted_locs, predicted_scores = model(images) # (N, 8732, 4), (N, 8732, n_classes)\n\n #Prints to check the dimensions\n #print(predicted_locs.shape) #correct \n #print(predicted_scores.shape) #correct \n\n # Loss\n loss = criterion(predicted_locs, predicted_scores, boxes, labels) # scalar\n\n # Backward prop.\n optimizer.zero_grad()\n loss.backward()\n\n # Clip gradients, if necessary\n if grad_clip is not None:\n clip_gradient(optimizer, grad_clip)\n\n # Update model\n optimizer.step()\n\n losses.update(loss.item(), images.size(0))\n batch_time.update(time.time() - start)\n\n start = time.time()\n\n \n # Print status\n if i % print_freq == 0: \n print('Epoch: [{0}][{1}/{2}]\\t' \n 'Loss {loss.val:.4f} ({loss.avg:.4f})\\t'.format(epoch, i, len(train_loader), loss=losses))\n \"\"\"\n print('Epoch: [{0}][{1}/{2}]\\t'\n 'Batch Time {batch_time.val:.3f} ({batch_time.avg:.3f})\\t'\n 'Data Time {data_time.val:.3f} ({data_time.avg:.3f})\\t'\n 'Loss {loss.val:.4f} ({loss.avg:.4f})\\t'.format(epoch, i, len(train_loader),\n batch_time=batch_time,\n data_time=data_time, loss=losses))\n \"\"\" \n del predicted_locs, predicted_scores, images, boxes, labels # free some memory since their histories may be stored\n\n\nif __name__ == '__main__':\n main()\n"
] | [
[
"numpy.asarray"
]
] |
1nadequacy/dm_control | [
"a55474768cf0a6d570fe4a376802630027ad5f01"
] | [
"dm_control/rl/specs_test.py"
] | [
"# Copyright 2017 The dm_control Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"Tests for specs.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\n# Internal dependencies.\n\nfrom absl.testing import absltest\nfrom dm_control.rl import specs as array_spec\nimport numpy as np\nimport six\n\n\nclass ArraySpecTest(absltest.TestCase):\n\n def testShapeTypeError(self):\n with self.assertRaises(TypeError):\n array_spec.ArraySpec(32, np.int32)\n\n def testDtypeTypeError(self):\n with self.assertRaises(TypeError):\n array_spec.ArraySpec((1, 2, 3), \"32\")\n\n def testStringDtype(self):\n array_spec.ArraySpec((1, 2, 3), \"int32\")\n\n def testNumpyDtype(self):\n array_spec.ArraySpec((1, 2, 3), np.int32)\n\n def testDtype(self):\n spec = array_spec.ArraySpec((1, 2, 3), np.int32)\n self.assertEqual(np.int32, spec.dtype)\n\n def testShape(self):\n spec = array_spec.ArraySpec([1, 2, 3], np.int32)\n self.assertEqual((1, 2, 3), spec.shape)\n\n def testEqual(self):\n spec_1 = array_spec.ArraySpec((1, 2, 3), np.int32)\n spec_2 = array_spec.ArraySpec((1, 2, 3), np.int32)\n self.assertEqual(spec_1, spec_2)\n\n def testNotEqualDifferentShape(self):\n spec_1 = array_spec.ArraySpec((1, 2, 3), np.int32)\n spec_2 = array_spec.ArraySpec((1, 3, 3), np.int32)\n self.assertNotEqual(spec_1, spec_2)\n\n def testNotEqualDifferentDtype(self):\n spec_1 = array_spec.ArraySpec((1, 2, 3), np.int64)\n spec_2 = array_spec.ArraySpec((1, 2, 3), np.int32)\n self.assertNotEqual(spec_1, spec_2)\n\n def testNotEqualOtherClass(self):\n spec_1 = array_spec.ArraySpec((1, 2, 3), np.int32)\n spec_2 = None\n self.assertNotEqual(spec_1, spec_2)\n self.assertNotEqual(spec_2, spec_1)\n\n spec_2 = ()\n self.assertNotEqual(spec_1, spec_2)\n self.assertNotEqual(spec_2, spec_1)\n\n def testIsUnhashable(self):\n spec = array_spec.ArraySpec(shape=(1, 2, 3), dtype=np.int32)\n with self.assertRaisesRegexp(TypeError, \"unhashable type\"):\n hash(spec)\n\n def testValidateDtype(self):\n spec = array_spec.ArraySpec((1, 2), np.int32)\n spec.validate(np.zeros((1, 2), dtype=np.int32))\n with self.assertRaises(ValueError):\n spec.validate(np.zeros((1, 2), dtype=np.float32))\n\n def testValidateShape(self):\n spec = array_spec.ArraySpec((1, 2), np.int32)\n spec.validate(np.zeros((1, 2), dtype=np.int32))\n with self.assertRaises(ValueError):\n spec.validate(np.zeros((1, 2, 3), dtype=np.int32))\n\n def testGenerateValue(self):\n spec = array_spec.ArraySpec((1, 2), np.int32)\n test_value = spec.generate_value()\n spec.validate(test_value)\n\n\nclass BoundedArraySpecTest(absltest.TestCase):\n\n def testInvalidMinimum(self):\n with six.assertRaisesRegex(self, ValueError, \"not compatible\"):\n array_spec.BoundedArraySpec((3, 5), np.uint8, (0, 0, 0), (1, 1))\n\n def testInvalidMaximum(self):\n with six.assertRaisesRegex(self, ValueError, \"not compatible\"):\n array_spec.BoundedArraySpec((3, 5), np.uint8, 0, (1, 1, 1))\n\n def testMinMaxAttributes(self):\n spec = array_spec.BoundedArraySpec((1, 2, 3), np.float32, 0, (5, 5, 5))\n self.assertEqual(type(spec.minimum), np.ndarray)\n self.assertEqual(type(spec.maximum), np.ndarray)\n\n def testNotWriteable(self):\n spec = array_spec.BoundedArraySpec((1, 2, 3), np.float32, 0, (5, 5, 5))\n with six.assertRaisesRegex(self, ValueError, \"read-only\"):\n spec.minimum[0] = -1\n with six.assertRaisesRegex(self, ValueError, \"read-only\"):\n spec.maximum[0] = 100\n\n def testEqualBroadcastingBounds(self):\n spec_1 = array_spec.BoundedArraySpec(\n (1, 2), np.int32, minimum=0.0, maximum=1.0)\n spec_2 = array_spec.BoundedArraySpec(\n (1, 2), np.int32, minimum=[0.0, 0.0], maximum=[1.0, 1.0])\n self.assertEqual(spec_1, spec_2)\n\n def testNotEqualDifferentMinimum(self):\n spec_1 = array_spec.BoundedArraySpec(\n (1, 2), np.int32, minimum=[0.0, -0.6], maximum=[1.0, 1.0])\n spec_2 = array_spec.BoundedArraySpec(\n (1, 2), np.int32, minimum=[0.0, 0.0], maximum=[1.0, 1.0])\n self.assertNotEqual(spec_1, spec_2)\n\n def testNotEqualOtherClass(self):\n spec_1 = array_spec.BoundedArraySpec(\n (1, 2), np.int32, minimum=[0.0, -0.6], maximum=[1.0, 1.0])\n spec_2 = array_spec.ArraySpec((1, 2), np.int32)\n self.assertNotEqual(spec_1, spec_2)\n self.assertNotEqual(spec_2, spec_1)\n\n spec_2 = None\n self.assertNotEqual(spec_1, spec_2)\n self.assertNotEqual(spec_2, spec_1)\n\n spec_2 = ()\n self.assertNotEqual(spec_1, spec_2)\n self.assertNotEqual(spec_2, spec_1)\n\n def testNotEqualDifferentMaximum(self):\n spec_1 = array_spec.BoundedArraySpec(\n (1, 2), np.int32, minimum=0.0, maximum=2.0)\n spec_2 = array_spec.BoundedArraySpec(\n (1, 2), np.int32, minimum=[0.0, 0.0], maximum=[1.0, 1.0])\n self.assertNotEqual(spec_1, spec_2)\n\n def testIsUnhashable(self):\n spec = array_spec.BoundedArraySpec(\n shape=(1, 2), dtype=np.int32, minimum=0.0, maximum=2.0)\n with self.assertRaisesRegexp(TypeError, \"unhashable type\"):\n hash(spec)\n\n def testRepr(self):\n as_string = repr(array_spec.BoundedArraySpec(\n (1, 2), np.int32, minimum=101.0, maximum=73.0))\n self.assertIn(\"101\", as_string)\n self.assertIn(\"73\", as_string)\n\n def testValidateBounds(self):\n spec = array_spec.BoundedArraySpec((2, 2), np.int32, minimum=5, maximum=10)\n spec.validate(np.array([[5, 6], [8, 10]], dtype=np.int32))\n with self.assertRaises(ValueError):\n spec.validate(np.array([[5, 6], [8, 11]], dtype=np.int32))\n with self.assertRaises(ValueError):\n spec.validate(np.array([[4, 6], [8, 10]], dtype=np.int32))\n\n def testGenerateValue(self):\n spec = array_spec.BoundedArraySpec((2, 2), np.int32, minimum=5, maximum=10)\n test_value = spec.generate_value()\n spec.validate(test_value)\n\n def testScalarBounds(self):\n spec = array_spec.BoundedArraySpec((), np.float, minimum=0.0, maximum=1.0)\n\n self.assertIsInstance(spec.minimum, np.ndarray)\n self.assertIsInstance(spec.maximum, np.ndarray)\n\n # Sanity check that numpy compares correctly to a scalar for an empty shape.\n self.assertEqual(0.0, spec.minimum)\n self.assertEqual(1.0, spec.maximum)\n\n # Check that the spec doesn't fail its own input validation.\n _ = array_spec.BoundedArraySpec(\n spec.shape, spec.dtype, spec.minimum, spec.maximum)\n\n\nif __name__ == \"__main__\":\n absltest.main()\n"
] | [
[
"numpy.array",
"numpy.zeros"
]
] |
JoeTao-097/Multi-REZ-Evalution-for-Breast-Ultrasound-Images | [
"344d64ad2fe9d790c49e8005b3abee219d362278"
] | [
"Model_test.py"
] | [
"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon Aug 2 17:32:52 2021\r\n\r\n@author: jiangyt\r\n\"\"\"\r\n\r\nfrom Tools import *\r\nfrom tensorflow import keras\r\nfrom tensorflow.keras.layers import Dense, Activation, Flatten, Dropout, Input, BatchNormalization\r\nfrom tensorflow.keras.layers import Conv2D, MaxPooling2D, add, AveragePooling2D, ZeroPadding2D, GlobalAveragePooling2D\r\nfrom tensorflow.keras.models import Model, Sequential\r\n\r\n\"\"\"\r\nWeight Dict\r\n\"\"\"\r\nWeight = {'Resnet50_448':\"./model_checkpoints/ResNet50_448_checkpoints/20218131038.h5\",\r\n 'MobileNet_224':\"./model_checkpoints/MobileNet_224_checkpoints/202189956.h5\",\r\n 'Xception_448':\"./model_checkpoints/Xception_448_checkpoints/2021810951.h5\",\r\n 'EfficientNet_B0_320':\"./model_checkpoints/EfficientNetB0_320_checkpoints/2021871045.h5\",\r\n 'DenseNet121_448':\"./model_checkpoints/DenseNet121_448_checkpoints/2021891655.h5\"}\r\n\r\n\"\"\"\r\nLoad model\r\n\"\"\"\r\ndf = pd.read_excel('./AI-Physician Comparasion Dataset.xlsx')\r\n# df = pd.read_csv('/home/joe/Project/Breast_new/20210805_b_m_Xception_train/df_test_small.csv')\r\n\r\n\"\"\"\r\nEval each model\r\n\"\"\"\r\nfor key in Weight.keys():\r\n if key == 'Resnet50_448':\r\n from tensorflow.keras.applications.resnet50 import preprocess_input\r\n backbone_model= keras.applications.resnet50.ResNet50(include_top=False, weights=None, input_tensor=None,\r\n input_shape=(448, 448, 3), pooling=None, classes=2)\r\n elif key == 'MobileNet_224':\r\n from tensorflow.keras.applications.mobilenet import preprocess_input\r\n backbone_model= keras.applications.mobilenet.MobileNet(include_top=False, weights=None, input_tensor=None,\r\n input_shape=(224, 224, 3), pooling=None, classes=2)\r\n elif key == 'Xception_448':\r\n from tensorflow.keras.applications.xception import preprocess_input\r\n backbone_model= keras.applications.xception.Xception(include_top=False, weights=None, input_tensor=None,\r\n input_shape=(448, 448, 3), pooling=None, classes=2)\r\n elif key == 'EfficientNet_B0_320':\r\n from tensorflow.keras.applications.efficientnet import preprocess_input\r\n backbone_model= keras.applications.efficientnet.EfficientNetB0(include_top=False, weights=None, input_tensor=None,\r\n input_shape=(320, 320, 3), pooling=None, classes=2)\r\n elif key == 'DenseNet121_448':\r\n from tensorflow.keras.applications.densenet import preprocess_input\r\n\r\n backbone_model = keras.applications.densenet.DenseNet121(include_top=False, weights=\"imagenet\",\r\n input_tensor=None,\r\n input_shape=(448, 448, 3), pooling=None, classes=2)\r\n else:\r\n print('Error: No model weight find')\r\n test_model = Sequential()\r\n test_model.add(backbone_model)\r\n test_model.add(GlobalAveragePooling2D())\r\n test_model.add(Dense(2, activation='softmax', name='fc1'))\r\n test_model.load_weights(Weight[key])\r\n\r\n test_model.summary()\r\n\r\n y_true = []\r\n y_pred = []\r\n\r\n for i in range(len(df)):\r\n y_true.append(df['malignancy'][i])\r\n x = Image.open(df['path'][i])\r\n x = np.array(x)\r\n x = zero_pad(x,int(key.split('_')[-1]))\r\n x = preprocess_input(x)\r\n x = x.reshape(1,x.shape[0],x.shape[1],x.shape[2])\r\n y_pred.append(test_model.predict(x))\r\n \r\n\r\n y_pred = np.array(y_pred)\r\n y_pred = y_pred.reshape(y_pred.shape[0],2)\r\n y_pred_1 = y_pred[:,1]\r\n\r\n thresh_0=get_auc(0, np.array(y_true), np.array(y_pred_1), 'Malignancy', plot=False)\r\n y_pred_comp_lvl=[1 if y>thresh_0 else 0 for y in y_pred_1]\r\n cm_comp=confusion_matrix(y_true, y_pred_comp_lvl)\r\n\r\n fig, axes = plt.subplots(nrows=2, ncols=2)\r\n fig.tight_layout(pad=2, w_pad=2.)\r\n fig.set_figheight(8)\r\n fig.set_figwidth(7)\r\n thresh_0=get_auc(axes[0, 0], np.array(y_true), np.array(y_pred_1), 'Performance of {}'.format(key))\r\n thresh_AP=get_precision_recall(axes[0, 1], np.array(y_true), np.array(y_pred_1), 'Malignancy=0 vs 1')\r\n plot_confusion_matrix(axes[1, 0], cm_comp, [\"0\", \"1\"], title='Malignancy', normalize=False)\r\n plot_confusion_matrix(axes[1, 1], cm_comp, [\"0\", \"1\"], title='Malignancy (normalized)')\r\n print('f1 score is: {:.3f}'.format(f1_score(y_true, y_pred_comp_lvl)))\r\n"
] | [
[
"tensorflow.keras.models.Sequential",
"tensorflow.keras.applications.resnet50.ResNet50",
"tensorflow.keras.applications.densenet.preprocess_input",
"tensorflow.keras.applications.mobilenet.MobileNet",
"tensorflow.keras.layers.Dense",
"tensorflow.keras.applications.densenet.DenseNet121",
"tensorflow.keras.applications.xception.Xception",
"tensorflow.keras.applications.efficientnet.EfficientNetB0",
"tensorflow.keras.layers.GlobalAveragePooling2D"
]
] |
arnoyu-hub/COMP0016miemie | [
"59af664dcf190eab4f93cefb8471908717415fea",
"59af664dcf190eab4f93cefb8471908717415fea",
"59af664dcf190eab4f93cefb8471908717415fea",
"59af664dcf190eab4f93cefb8471908717415fea",
"59af664dcf190eab4f93cefb8471908717415fea"
] | [
"venv/Lib/site-packages/pandas/tests/series/indexing/test_get.py",
"venv/Lib/site-packages/pandas/tests/frame/methods/test_at_time.py",
"venv/Lib/site-packages/pandas/tests/arrays/categorical/test_operators.py",
"venv/Lib/site-packages/pandas/tests/frame/methods/test_filter.py",
"venv/Lib/site-packages/sklearn/base.py"
] | [
"import numpy as np\r\nimport pytest\r\n\r\nimport pandas as pd\r\nfrom pandas import Series\r\nimport pandas._testing as tm\r\n\r\n\r\ndef test_get():\r\n # GH 6383\r\n s = Series(\r\n np.array(\r\n [\r\n 43,\r\n 48,\r\n 60,\r\n 48,\r\n 50,\r\n 51,\r\n 50,\r\n 45,\r\n 57,\r\n 48,\r\n 56,\r\n 45,\r\n 51,\r\n 39,\r\n 55,\r\n 43,\r\n 54,\r\n 52,\r\n 51,\r\n 54,\r\n ]\r\n )\r\n )\r\n\r\n result = s.get(25, 0)\r\n expected = 0\r\n assert result == expected\r\n\r\n s = Series(\r\n np.array(\r\n [\r\n 43,\r\n 48,\r\n 60,\r\n 48,\r\n 50,\r\n 51,\r\n 50,\r\n 45,\r\n 57,\r\n 48,\r\n 56,\r\n 45,\r\n 51,\r\n 39,\r\n 55,\r\n 43,\r\n 54,\r\n 52,\r\n 51,\r\n 54,\r\n ]\r\n ),\r\n index=pd.Float64Index(\r\n [\r\n 25.0,\r\n 36.0,\r\n 49.0,\r\n 64.0,\r\n 81.0,\r\n 100.0,\r\n 121.0,\r\n 144.0,\r\n 169.0,\r\n 196.0,\r\n 1225.0,\r\n 1296.0,\r\n 1369.0,\r\n 1444.0,\r\n 1521.0,\r\n 1600.0,\r\n 1681.0,\r\n 1764.0,\r\n 1849.0,\r\n 1936.0,\r\n ]\r\n ),\r\n )\r\n\r\n result = s.get(25, 0)\r\n expected = 43\r\n assert result == expected\r\n\r\n # GH 7407\r\n # with a boolean accessor\r\n df = pd.DataFrame({\"i\": [0] * 3, \"b\": [False] * 3})\r\n vc = df.i.value_counts()\r\n result = vc.get(99, default=\"Missing\")\r\n assert result == \"Missing\"\r\n\r\n vc = df.b.value_counts()\r\n result = vc.get(False, default=\"Missing\")\r\n assert result == 3\r\n\r\n result = vc.get(True, default=\"Missing\")\r\n assert result == \"Missing\"\r\n\r\n\r\ndef test_get_nan():\r\n # GH 8569\r\n s = pd.Float64Index(range(10)).to_series()\r\n assert s.get(np.nan) is None\r\n assert s.get(np.nan, default=\"Missing\") == \"Missing\"\r\n\r\n\r\ndef test_get_nan_multiple():\r\n # GH 8569\r\n # ensure that fixing \"test_get_nan\" above hasn't broken get\r\n # with multiple elements\r\n s = pd.Float64Index(range(10)).to_series()\r\n\r\n idx = [2, 30]\r\n assert s.get(idx) is None\r\n\r\n idx = [2, np.nan]\r\n assert s.get(idx) is None\r\n\r\n # GH 17295 - all missing keys\r\n idx = [20, 30]\r\n assert s.get(idx) is None\r\n\r\n idx = [np.nan, np.nan]\r\n assert s.get(idx) is None\r\n\r\n\r\ndef test_get_with_default():\r\n # GH#7725\r\n d0 = [\"a\", \"b\", \"c\", \"d\"]\r\n d1 = np.arange(4, dtype=\"int64\")\r\n others = [\"e\", 10]\r\n\r\n for data, index in ((d0, d1), (d1, d0)):\r\n s = Series(data, index=index)\r\n for i, d in zip(index, data):\r\n assert s.get(i) == d\r\n assert s.get(i, d) == d\r\n assert s.get(i, \"z\") == d\r\n for other in others:\r\n assert s.get(other, \"z\") == \"z\"\r\n assert s.get(other, other) == other\r\n\r\n\r\[email protected](\r\n \"arr\",\r\n [np.random.randn(10), tm.makeDateIndex(10, name=\"a\").tz_localize(tz=\"US/Eastern\")],\r\n)\r\ndef test_get2(arr):\r\n # TODO: better name, possibly split\r\n # GH#21260\r\n ser = Series(arr, index=[2 * i for i in range(len(arr))])\r\n assert ser.get(4) == ser.iloc[2]\r\n\r\n result = ser.get([4, 6])\r\n expected = ser.iloc[[2, 3]]\r\n tm.assert_series_equal(result, expected)\r\n\r\n result = ser.get(slice(2))\r\n expected = ser.iloc[[0, 1]]\r\n tm.assert_series_equal(result, expected)\r\n\r\n assert ser.get(-1) is None\r\n assert ser.get(ser.index.max() + 1) is None\r\n\r\n ser = Series(arr[:6], index=list(\"abcdef\"))\r\n assert ser.get(\"c\") == ser.iloc[2]\r\n\r\n result = ser.get(slice(\"b\", \"d\"))\r\n expected = ser.iloc[[1, 2, 3]]\r\n tm.assert_series_equal(result, expected)\r\n\r\n result = ser.get(\"Z\")\r\n assert result is None\r\n\r\n assert ser.get(4) == ser.iloc[4]\r\n assert ser.get(-1) == ser.iloc[-1]\r\n assert ser.get(len(ser)) is None\r\n\r\n # GH#21257\r\n ser = Series(arr)\r\n ser2 = ser[::2]\r\n assert ser2.get(1) is None\r\n\r\n\r\ndef test_getitem_get(string_series, object_series):\r\n for obj in [string_series, object_series]:\r\n idx = obj.index[5]\r\n\r\n assert obj[idx] == obj.get(idx)\r\n assert obj[idx] == obj[5]\r\n\r\n assert string_series.get(-1) == string_series.get(string_series.index[-1])\r\n assert string_series[5] == string_series.get(string_series.index[5])\r\n\r\n\r\ndef test_get_none():\r\n # GH#5652\r\n s1 = Series(dtype=object)\r\n s2 = Series(dtype=object, index=list(\"abc\"))\r\n for s in [s1, s2]:\r\n result = s.get(None)\r\n assert result is None\r\n",
"from datetime import time\r\n\r\nimport numpy as np\r\nimport pytest\r\nimport pytz\r\n\r\nfrom pandas._libs.tslibs import timezones\r\n\r\nfrom pandas import (\r\n DataFrame,\r\n date_range,\r\n)\r\nimport pandas._testing as tm\r\n\r\n\r\nclass TestAtTime:\r\n @pytest.mark.parametrize(\"tzstr\", [\"US/Eastern\", \"dateutil/US/Eastern\"])\r\n def test_localized_at_time(self, tzstr, frame_or_series):\r\n tz = timezones.maybe_get_tz(tzstr)\r\n\r\n rng = date_range(\"4/16/2012\", \"5/1/2012\", freq=\"H\")\r\n ts = frame_or_series(np.random.randn(len(rng)), index=rng)\r\n\r\n ts_local = ts.tz_localize(tzstr)\r\n\r\n result = ts_local.at_time(time(10, 0))\r\n expected = ts.at_time(time(10, 0)).tz_localize(tzstr)\r\n tm.assert_equal(result, expected)\r\n assert timezones.tz_compare(result.index.tz, tz)\r\n\r\n def test_at_time(self, frame_or_series):\r\n rng = date_range(\"1/1/2000\", \"1/5/2000\", freq=\"5min\")\r\n ts = DataFrame(np.random.randn(len(rng), 2), index=rng)\r\n if frame_or_series is not DataFrame:\r\n ts = ts[0]\r\n rs = ts.at_time(rng[1])\r\n assert (rs.index.hour == rng[1].hour).all()\r\n assert (rs.index.minute == rng[1].minute).all()\r\n assert (rs.index.second == rng[1].second).all()\r\n\r\n result = ts.at_time(\"9:30\")\r\n expected = ts.at_time(time(9, 30))\r\n tm.assert_equal(result, expected)\r\n\r\n def test_at_time_midnight(self, frame_or_series):\r\n # midnight, everything\r\n rng = date_range(\"1/1/2000\", \"1/31/2000\")\r\n ts = DataFrame(np.random.randn(len(rng), 3), index=rng)\r\n if frame_or_series is not DataFrame:\r\n ts = ts[0]\r\n\r\n result = ts.at_time(time(0, 0))\r\n tm.assert_equal(result, ts)\r\n\r\n def test_at_time_nonexistent(self, frame_or_series):\r\n # time doesn't exist\r\n rng = date_range(\"1/1/2012\", freq=\"23Min\", periods=384)\r\n ts = DataFrame(np.random.randn(len(rng)), rng)\r\n if frame_or_series is not DataFrame:\r\n ts = ts[0]\r\n rs = ts.at_time(\"16:00\")\r\n assert len(rs) == 0\r\n\r\n @pytest.mark.parametrize(\r\n \"hour\", [\"1:00\", \"1:00AM\", time(1), time(1, tzinfo=pytz.UTC)]\r\n )\r\n def test_at_time_errors(self, hour):\r\n # GH#24043\r\n dti = date_range(\"2018\", periods=3, freq=\"H\")\r\n df = DataFrame(list(range(len(dti))), index=dti)\r\n if getattr(hour, \"tzinfo\", None) is None:\r\n result = df.at_time(hour)\r\n expected = df.iloc[1:2]\r\n tm.assert_frame_equal(result, expected)\r\n else:\r\n with pytest.raises(ValueError, match=\"Index must be timezone\"):\r\n df.at_time(hour)\r\n\r\n def test_at_time_tz(self):\r\n # GH#24043\r\n dti = date_range(\"2018\", periods=3, freq=\"H\", tz=\"US/Pacific\")\r\n df = DataFrame(list(range(len(dti))), index=dti)\r\n result = df.at_time(time(4, tzinfo=pytz.timezone(\"US/Eastern\")))\r\n expected = df.iloc[1:2]\r\n tm.assert_frame_equal(result, expected)\r\n\r\n def test_at_time_raises(self, frame_or_series):\r\n # GH#20725\r\n obj = DataFrame([[1, 2, 3], [4, 5, 6]])\r\n if frame_or_series is not DataFrame:\r\n obj = obj[0]\r\n msg = \"Index must be DatetimeIndex\"\r\n with pytest.raises(TypeError, match=msg): # index is not a DatetimeIndex\r\n obj.at_time(\"00:00\")\r\n\r\n @pytest.mark.parametrize(\"axis\", [\"index\", \"columns\", 0, 1])\r\n def test_at_time_axis(self, axis):\r\n # issue 8839\r\n rng = date_range(\"1/1/2000\", \"1/5/2000\", freq=\"5min\")\r\n ts = DataFrame(np.random.randn(len(rng), len(rng)))\r\n ts.index, ts.columns = rng, rng\r\n\r\n indices = rng[(rng.hour == 9) & (rng.minute == 30) & (rng.second == 0)]\r\n\r\n if axis in [\"index\", 0]:\r\n expected = ts.loc[indices, :]\r\n elif axis in [\"columns\", 1]:\r\n expected = ts.loc[:, indices]\r\n\r\n result = ts.at_time(\"9:30\", axis=axis)\r\n\r\n # Without clearing freq, result has freq 1440T and expected 5T\r\n result.index = result.index._with_freq(None)\r\n expected.index = expected.index._with_freq(None)\r\n tm.assert_frame_equal(result, expected)\r\n\r\n def test_at_time_datetimeindex(self):\r\n index = date_range(\"2012-01-01\", \"2012-01-05\", freq=\"30min\")\r\n df = DataFrame(np.random.randn(len(index), 5), index=index)\r\n akey = time(12, 0, 0)\r\n ainds = [24, 72, 120, 168]\r\n\r\n result = df.at_time(akey)\r\n expected = df.loc[akey]\r\n expected2 = df.iloc[ainds]\r\n tm.assert_frame_equal(result, expected)\r\n tm.assert_frame_equal(result, expected2)\r\n assert len(result) == 4\r\n",
"import operator\r\nimport warnings\r\n\r\nimport numpy as np\r\nimport pytest\r\n\r\nimport pandas as pd\r\nfrom pandas import (\r\n Categorical,\r\n DataFrame,\r\n Series,\r\n date_range,\r\n)\r\nimport pandas._testing as tm\r\nfrom pandas.tests.arrays.categorical.common import TestCategorical\r\n\r\n\r\nclass TestCategoricalOpsWithFactor(TestCategorical):\r\n def test_categories_none_comparisons(self):\r\n factor = Categorical([\"a\", \"b\", \"b\", \"a\", \"a\", \"c\", \"c\", \"c\"], ordered=True)\r\n tm.assert_categorical_equal(factor, self.factor)\r\n\r\n def test_comparisons(self):\r\n result = self.factor[self.factor == \"a\"]\r\n expected = self.factor[np.asarray(self.factor) == \"a\"]\r\n tm.assert_categorical_equal(result, expected)\r\n\r\n result = self.factor[self.factor != \"a\"]\r\n expected = self.factor[np.asarray(self.factor) != \"a\"]\r\n tm.assert_categorical_equal(result, expected)\r\n\r\n result = self.factor[self.factor < \"c\"]\r\n expected = self.factor[np.asarray(self.factor) < \"c\"]\r\n tm.assert_categorical_equal(result, expected)\r\n\r\n result = self.factor[self.factor > \"a\"]\r\n expected = self.factor[np.asarray(self.factor) > \"a\"]\r\n tm.assert_categorical_equal(result, expected)\r\n\r\n result = self.factor[self.factor >= \"b\"]\r\n expected = self.factor[np.asarray(self.factor) >= \"b\"]\r\n tm.assert_categorical_equal(result, expected)\r\n\r\n result = self.factor[self.factor <= \"b\"]\r\n expected = self.factor[np.asarray(self.factor) <= \"b\"]\r\n tm.assert_categorical_equal(result, expected)\r\n\r\n n = len(self.factor)\r\n\r\n other = self.factor[np.random.permutation(n)]\r\n result = self.factor == other\r\n expected = np.asarray(self.factor) == np.asarray(other)\r\n tm.assert_numpy_array_equal(result, expected)\r\n\r\n result = self.factor == \"d\"\r\n expected = np.zeros(len(self.factor), dtype=bool)\r\n tm.assert_numpy_array_equal(result, expected)\r\n\r\n # comparisons with categoricals\r\n cat_rev = Categorical([\"a\", \"b\", \"c\"], categories=[\"c\", \"b\", \"a\"], ordered=True)\r\n cat_rev_base = Categorical(\r\n [\"b\", \"b\", \"b\"], categories=[\"c\", \"b\", \"a\"], ordered=True\r\n )\r\n cat = Categorical([\"a\", \"b\", \"c\"], ordered=True)\r\n cat_base = Categorical([\"b\", \"b\", \"b\"], categories=cat.categories, ordered=True)\r\n\r\n # comparisons need to take categories ordering into account\r\n res_rev = cat_rev > cat_rev_base\r\n exp_rev = np.array([True, False, False])\r\n tm.assert_numpy_array_equal(res_rev, exp_rev)\r\n\r\n res_rev = cat_rev < cat_rev_base\r\n exp_rev = np.array([False, False, True])\r\n tm.assert_numpy_array_equal(res_rev, exp_rev)\r\n\r\n res = cat > cat_base\r\n exp = np.array([False, False, True])\r\n tm.assert_numpy_array_equal(res, exp)\r\n\r\n # Only categories with same categories can be compared\r\n msg = \"Categoricals can only be compared if 'categories' are the same\"\r\n with pytest.raises(TypeError, match=msg):\r\n cat > cat_rev\r\n\r\n cat_rev_base2 = Categorical([\"b\", \"b\", \"b\"], categories=[\"c\", \"b\", \"a\", \"d\"])\r\n\r\n with pytest.raises(TypeError, match=msg):\r\n cat_rev > cat_rev_base2\r\n\r\n # Only categories with same ordering information can be compared\r\n cat_unorderd = cat.set_ordered(False)\r\n assert not (cat > cat).any()\r\n\r\n with pytest.raises(TypeError, match=msg):\r\n cat > cat_unorderd\r\n\r\n # comparison (in both directions) with Series will raise\r\n s = Series([\"b\", \"b\", \"b\"])\r\n msg = (\r\n \"Cannot compare a Categorical for op __gt__ with type \"\r\n r\"<class 'numpy\\.ndarray'>\"\r\n )\r\n with pytest.raises(TypeError, match=msg):\r\n cat > s\r\n with pytest.raises(TypeError, match=msg):\r\n cat_rev > s\r\n with pytest.raises(TypeError, match=msg):\r\n s < cat\r\n with pytest.raises(TypeError, match=msg):\r\n s < cat_rev\r\n\r\n # comparison with numpy.array will raise in both direction, but only on\r\n # newer numpy versions\r\n a = np.array([\"b\", \"b\", \"b\"])\r\n with pytest.raises(TypeError, match=msg):\r\n cat > a\r\n with pytest.raises(TypeError, match=msg):\r\n cat_rev > a\r\n\r\n # Make sure that unequal comparison take the categories order in\r\n # account\r\n cat_rev = Categorical(list(\"abc\"), categories=list(\"cba\"), ordered=True)\r\n exp = np.array([True, False, False])\r\n res = cat_rev > \"b\"\r\n tm.assert_numpy_array_equal(res, exp)\r\n\r\n # check that zero-dim array gets unboxed\r\n res = cat_rev > np.array(\"b\")\r\n tm.assert_numpy_array_equal(res, exp)\r\n\r\n\r\nclass TestCategoricalOps:\r\n def test_compare_frame(self):\r\n # GH#24282 check that Categorical.__cmp__(DataFrame) defers to frame\r\n data = [\"a\", \"b\", 2, \"a\"]\r\n cat = Categorical(data)\r\n\r\n df = DataFrame(cat)\r\n\r\n result = cat == df.T\r\n expected = DataFrame([[True, True, True, True]])\r\n tm.assert_frame_equal(result, expected)\r\n\r\n result = cat[::-1] != df.T\r\n expected = DataFrame([[False, True, True, False]])\r\n tm.assert_frame_equal(result, expected)\r\n\r\n def test_compare_frame_raises(self, all_compare_operators):\r\n # alignment raises unless we transpose\r\n op = getattr(operator, all_compare_operators)\r\n cat = Categorical([\"a\", \"b\", 2, \"a\"])\r\n df = DataFrame(cat)\r\n msg = \"Unable to coerce to Series, length must be 1: given 4\"\r\n with pytest.raises(ValueError, match=msg):\r\n op(cat, df)\r\n\r\n def test_datetime_categorical_comparison(self):\r\n dt_cat = Categorical(date_range(\"2014-01-01\", periods=3), ordered=True)\r\n tm.assert_numpy_array_equal(dt_cat > dt_cat[0], np.array([False, True, True]))\r\n tm.assert_numpy_array_equal(dt_cat[0] < dt_cat, np.array([False, True, True]))\r\n\r\n def test_reflected_comparison_with_scalars(self):\r\n # GH8658\r\n cat = Categorical([1, 2, 3], ordered=True)\r\n tm.assert_numpy_array_equal(cat > cat[0], np.array([False, True, True]))\r\n tm.assert_numpy_array_equal(cat[0] < cat, np.array([False, True, True]))\r\n\r\n def test_comparison_with_unknown_scalars(self):\r\n # https://github.com/pandas-dev/pandas/issues/9836#issuecomment-92123057\r\n # and following comparisons with scalars not in categories should raise\r\n # for unequal comps, but not for equal/not equal\r\n cat = Categorical([1, 2, 3], ordered=True)\r\n\r\n msg = \"Invalid comparison between dtype=category and int\"\r\n with pytest.raises(TypeError, match=msg):\r\n cat < 4\r\n with pytest.raises(TypeError, match=msg):\r\n cat > 4\r\n with pytest.raises(TypeError, match=msg):\r\n 4 < cat\r\n with pytest.raises(TypeError, match=msg):\r\n 4 > cat\r\n\r\n tm.assert_numpy_array_equal(cat == 4, np.array([False, False, False]))\r\n tm.assert_numpy_array_equal(cat != 4, np.array([True, True, True]))\r\n\r\n def test_comparison_with_tuple(self):\r\n cat = Categorical(np.array([\"foo\", (0, 1), 3, (0, 1)], dtype=object))\r\n\r\n result = cat == \"foo\"\r\n expected = np.array([True, False, False, False], dtype=bool)\r\n tm.assert_numpy_array_equal(result, expected)\r\n\r\n result = cat == (0, 1)\r\n expected = np.array([False, True, False, True], dtype=bool)\r\n tm.assert_numpy_array_equal(result, expected)\r\n\r\n result = cat != (0, 1)\r\n tm.assert_numpy_array_equal(result, ~expected)\r\n\r\n def test_comparison_of_ordered_categorical_with_nan_to_scalar(\r\n self, compare_operators_no_eq_ne\r\n ):\r\n # https://github.com/pandas-dev/pandas/issues/26504\r\n # BUG: fix ordered categorical comparison with missing values (#26504 )\r\n # and following comparisons with scalars in categories with missing\r\n # values should be evaluated as False\r\n\r\n cat = Categorical([1, 2, 3, None], categories=[1, 2, 3], ordered=True)\r\n scalar = 2\r\n with warnings.catch_warnings():\r\n warnings.simplefilter(\"ignore\", RuntimeWarning)\r\n expected = getattr(np.array(cat), compare_operators_no_eq_ne)(scalar)\r\n actual = getattr(cat, compare_operators_no_eq_ne)(scalar)\r\n tm.assert_numpy_array_equal(actual, expected)\r\n\r\n def test_comparison_of_ordered_categorical_with_nan_to_listlike(\r\n self, compare_operators_no_eq_ne\r\n ):\r\n # https://github.com/pandas-dev/pandas/issues/26504\r\n # and following comparisons of missing values in ordered Categorical\r\n # with listlike should be evaluated as False\r\n\r\n cat = Categorical([1, 2, 3, None], categories=[1, 2, 3], ordered=True)\r\n other = Categorical([2, 2, 2, 2], categories=[1, 2, 3], ordered=True)\r\n with warnings.catch_warnings():\r\n warnings.simplefilter(\"ignore\", RuntimeWarning)\r\n expected = getattr(np.array(cat), compare_operators_no_eq_ne)(2)\r\n actual = getattr(cat, compare_operators_no_eq_ne)(other)\r\n tm.assert_numpy_array_equal(actual, expected)\r\n\r\n @pytest.mark.parametrize(\r\n \"data,reverse,base\",\r\n [(list(\"abc\"), list(\"cba\"), list(\"bbb\")), ([1, 2, 3], [3, 2, 1], [2, 2, 2])],\r\n )\r\n def test_comparisons(self, data, reverse, base):\r\n cat_rev = Series(Categorical(data, categories=reverse, ordered=True))\r\n cat_rev_base = Series(Categorical(base, categories=reverse, ordered=True))\r\n cat = Series(Categorical(data, ordered=True))\r\n cat_base = Series(\r\n Categorical(base, categories=cat.cat.categories, ordered=True)\r\n )\r\n s = Series(base)\r\n a = np.array(base)\r\n\r\n # comparisons need to take categories ordering into account\r\n res_rev = cat_rev > cat_rev_base\r\n exp_rev = Series([True, False, False])\r\n tm.assert_series_equal(res_rev, exp_rev)\r\n\r\n res_rev = cat_rev < cat_rev_base\r\n exp_rev = Series([False, False, True])\r\n tm.assert_series_equal(res_rev, exp_rev)\r\n\r\n res = cat > cat_base\r\n exp = Series([False, False, True])\r\n tm.assert_series_equal(res, exp)\r\n\r\n scalar = base[1]\r\n res = cat > scalar\r\n exp = Series([False, False, True])\r\n exp2 = cat.values > scalar\r\n tm.assert_series_equal(res, exp)\r\n tm.assert_numpy_array_equal(res.values, exp2)\r\n res_rev = cat_rev > scalar\r\n exp_rev = Series([True, False, False])\r\n exp_rev2 = cat_rev.values > scalar\r\n tm.assert_series_equal(res_rev, exp_rev)\r\n tm.assert_numpy_array_equal(res_rev.values, exp_rev2)\r\n\r\n # Only categories with same categories can be compared\r\n msg = \"Categoricals can only be compared if 'categories' are the same\"\r\n with pytest.raises(TypeError, match=msg):\r\n cat > cat_rev\r\n\r\n # categorical cannot be compared to Series or numpy array, and also\r\n # not the other way around\r\n msg = (\r\n \"Cannot compare a Categorical for op __gt__ with type \"\r\n r\"<class 'numpy\\.ndarray'>\"\r\n )\r\n with pytest.raises(TypeError, match=msg):\r\n cat > s\r\n with pytest.raises(TypeError, match=msg):\r\n cat_rev > s\r\n with pytest.raises(TypeError, match=msg):\r\n cat > a\r\n with pytest.raises(TypeError, match=msg):\r\n cat_rev > a\r\n\r\n with pytest.raises(TypeError, match=msg):\r\n s < cat\r\n with pytest.raises(TypeError, match=msg):\r\n s < cat_rev\r\n\r\n with pytest.raises(TypeError, match=msg):\r\n a < cat\r\n with pytest.raises(TypeError, match=msg):\r\n a < cat_rev\r\n\r\n @pytest.mark.parametrize(\r\n \"ctor\",\r\n [\r\n lambda *args, **kwargs: Categorical(*args, **kwargs),\r\n lambda *args, **kwargs: Series(Categorical(*args, **kwargs)),\r\n ],\r\n )\r\n def test_unordered_different_order_equal(self, ctor):\r\n # https://github.com/pandas-dev/pandas/issues/16014\r\n c1 = ctor([\"a\", \"b\"], categories=[\"a\", \"b\"], ordered=False)\r\n c2 = ctor([\"a\", \"b\"], categories=[\"b\", \"a\"], ordered=False)\r\n assert (c1 == c2).all()\r\n\r\n c1 = ctor([\"a\", \"b\"], categories=[\"a\", \"b\"], ordered=False)\r\n c2 = ctor([\"b\", \"a\"], categories=[\"b\", \"a\"], ordered=False)\r\n assert (c1 != c2).all()\r\n\r\n c1 = ctor([\"a\", \"a\"], categories=[\"a\", \"b\"], ordered=False)\r\n c2 = ctor([\"b\", \"b\"], categories=[\"b\", \"a\"], ordered=False)\r\n assert (c1 != c2).all()\r\n\r\n c1 = ctor([\"a\", \"a\"], categories=[\"a\", \"b\"], ordered=False)\r\n c2 = ctor([\"a\", \"b\"], categories=[\"b\", \"a\"], ordered=False)\r\n result = c1 == c2\r\n tm.assert_numpy_array_equal(np.array(result), np.array([True, False]))\r\n\r\n def test_unordered_different_categories_raises(self):\r\n c1 = Categorical([\"a\", \"b\"], categories=[\"a\", \"b\"], ordered=False)\r\n c2 = Categorical([\"a\", \"c\"], categories=[\"c\", \"a\"], ordered=False)\r\n\r\n with pytest.raises(TypeError, match=(\"Categoricals can only be compared\")):\r\n c1 == c2\r\n\r\n def test_compare_different_lengths(self):\r\n c1 = Categorical([], categories=[\"a\", \"b\"])\r\n c2 = Categorical([], categories=[\"a\"])\r\n\r\n msg = \"Categoricals can only be compared if 'categories' are the same.\"\r\n with pytest.raises(TypeError, match=msg):\r\n c1 == c2\r\n\r\n def test_compare_unordered_different_order(self):\r\n # https://github.com/pandas-dev/pandas/issues/16603#issuecomment-\r\n # 349290078\r\n a = Categorical([\"a\"], categories=[\"a\", \"b\"])\r\n b = Categorical([\"b\"], categories=[\"b\", \"a\"])\r\n assert not a.equals(b)\r\n\r\n def test_numeric_like_ops(self):\r\n\r\n df = DataFrame({\"value\": np.random.randint(0, 10000, 100)})\r\n labels = [f\"{i} - {i + 499}\" for i in range(0, 10000, 500)]\r\n cat_labels = Categorical(labels, labels)\r\n\r\n df = df.sort_values(by=[\"value\"], ascending=True)\r\n df[\"value_group\"] = pd.cut(\r\n df.value, range(0, 10500, 500), right=False, labels=cat_labels\r\n )\r\n\r\n # numeric ops should not succeed\r\n for op, str_rep in [\r\n (\"__add__\", r\"\\+\"),\r\n (\"__sub__\", \"-\"),\r\n (\"__mul__\", r\"\\*\"),\r\n (\"__truediv__\", \"/\"),\r\n ]:\r\n msg = f\"Series cannot perform the operation {str_rep}|unsupported operand\"\r\n with pytest.raises(TypeError, match=msg):\r\n getattr(df, op)(df)\r\n\r\n # reduction ops should not succeed (unless specifically defined, e.g.\r\n # min/max)\r\n s = df[\"value_group\"]\r\n for op in [\"kurt\", \"skew\", \"var\", \"std\", \"mean\", \"sum\", \"median\"]:\r\n msg = f\"'Categorical' does not implement reduction '{op}'\"\r\n with pytest.raises(TypeError, match=msg):\r\n getattr(s, op)(numeric_only=False)\r\n\r\n # mad technically works because it takes always the numeric data\r\n\r\n # numpy ops\r\n s = Series(Categorical([1, 2, 3, 4]))\r\n with pytest.raises(\r\n TypeError, match=\"'Categorical' does not implement reduction 'sum'\"\r\n ):\r\n np.sum(s)\r\n\r\n # numeric ops on a Series\r\n for op, str_rep in [\r\n (\"__add__\", r\"\\+\"),\r\n (\"__sub__\", \"-\"),\r\n (\"__mul__\", r\"\\*\"),\r\n (\"__truediv__\", \"/\"),\r\n ]:\r\n msg = f\"Series cannot perform the operation {str_rep}|unsupported operand\"\r\n with pytest.raises(TypeError, match=msg):\r\n getattr(s, op)(2)\r\n\r\n # invalid ufunc\r\n msg = \"Object with dtype category cannot perform the numpy op log\"\r\n with pytest.raises(TypeError, match=msg):\r\n np.log(s)\r\n",
"import numpy as np\r\nimport pytest\r\n\r\nimport pandas as pd\r\nfrom pandas import DataFrame\r\nimport pandas._testing as tm\r\n\r\n\r\nclass TestDataFrameFilter:\r\n def test_filter(self, float_frame, float_string_frame):\r\n # Items\r\n filtered = float_frame.filter([\"A\", \"B\", \"E\"])\r\n assert len(filtered.columns) == 2\r\n assert \"E\" not in filtered\r\n\r\n filtered = float_frame.filter([\"A\", \"B\", \"E\"], axis=\"columns\")\r\n assert len(filtered.columns) == 2\r\n assert \"E\" not in filtered\r\n\r\n # Other axis\r\n idx = float_frame.index[0:4]\r\n filtered = float_frame.filter(idx, axis=\"index\")\r\n expected = float_frame.reindex(index=idx)\r\n tm.assert_frame_equal(filtered, expected)\r\n\r\n # like\r\n fcopy = float_frame.copy()\r\n fcopy[\"AA\"] = 1\r\n\r\n filtered = fcopy.filter(like=\"A\")\r\n assert len(filtered.columns) == 2\r\n assert \"AA\" in filtered\r\n\r\n # like with ints in column names\r\n df = DataFrame(0.0, index=[0, 1, 2], columns=[0, 1, \"_A\", \"_B\"])\r\n filtered = df.filter(like=\"_\")\r\n assert len(filtered.columns) == 2\r\n\r\n # regex with ints in column names\r\n # from PR #10384\r\n df = DataFrame(0.0, index=[0, 1, 2], columns=[\"A1\", 1, \"B\", 2, \"C\"])\r\n expected = DataFrame(\r\n 0.0, index=[0, 1, 2], columns=pd.Index([1, 2], dtype=object)\r\n )\r\n filtered = df.filter(regex=\"^[0-9]+$\")\r\n tm.assert_frame_equal(filtered, expected)\r\n\r\n expected = DataFrame(0.0, index=[0, 1, 2], columns=[0, \"0\", 1, \"1\"])\r\n # shouldn't remove anything\r\n filtered = expected.filter(regex=\"^[0-9]+$\")\r\n tm.assert_frame_equal(filtered, expected)\r\n\r\n # pass in None\r\n with pytest.raises(TypeError, match=\"Must pass\"):\r\n float_frame.filter()\r\n with pytest.raises(TypeError, match=\"Must pass\"):\r\n float_frame.filter(items=None)\r\n with pytest.raises(TypeError, match=\"Must pass\"):\r\n float_frame.filter(axis=1)\r\n\r\n # test mutually exclusive arguments\r\n with pytest.raises(TypeError, match=\"mutually exclusive\"):\r\n float_frame.filter(items=[\"one\", \"three\"], regex=\"e$\", like=\"bbi\")\r\n with pytest.raises(TypeError, match=\"mutually exclusive\"):\r\n float_frame.filter(items=[\"one\", \"three\"], regex=\"e$\", axis=1)\r\n with pytest.raises(TypeError, match=\"mutually exclusive\"):\r\n float_frame.filter(items=[\"one\", \"three\"], regex=\"e$\")\r\n with pytest.raises(TypeError, match=\"mutually exclusive\"):\r\n float_frame.filter(items=[\"one\", \"three\"], like=\"bbi\", axis=0)\r\n with pytest.raises(TypeError, match=\"mutually exclusive\"):\r\n float_frame.filter(items=[\"one\", \"three\"], like=\"bbi\")\r\n\r\n # objects\r\n filtered = float_string_frame.filter(like=\"foo\")\r\n assert \"foo\" in filtered\r\n\r\n # unicode columns, won't ascii-encode\r\n df = float_frame.rename(columns={\"B\": \"\\u2202\"})\r\n filtered = df.filter(like=\"C\")\r\n assert \"C\" in filtered\r\n\r\n def test_filter_regex_search(self, float_frame):\r\n fcopy = float_frame.copy()\r\n fcopy[\"AA\"] = 1\r\n\r\n # regex\r\n filtered = fcopy.filter(regex=\"[A]+\")\r\n assert len(filtered.columns) == 2\r\n assert \"AA\" in filtered\r\n\r\n # doesn't have to be at beginning\r\n df = DataFrame(\r\n {\"aBBa\": [1, 2], \"BBaBB\": [1, 2], \"aCCa\": [1, 2], \"aCCaBB\": [1, 2]}\r\n )\r\n\r\n result = df.filter(regex=\"BB\")\r\n exp = df[[x for x in df.columns if \"BB\" in x]]\r\n tm.assert_frame_equal(result, exp)\r\n\r\n @pytest.mark.parametrize(\r\n \"name,expected\",\r\n [\r\n (\"a\", DataFrame({\"a\": [1, 2]})),\r\n (\"a\", DataFrame({\"a\": [1, 2]})),\r\n (\"あ\", DataFrame({\"あ\": [3, 4]})),\r\n ],\r\n )\r\n def test_filter_unicode(self, name, expected):\r\n # GH13101\r\n df = DataFrame({\"a\": [1, 2], \"あ\": [3, 4]})\r\n\r\n tm.assert_frame_equal(df.filter(like=name), expected)\r\n tm.assert_frame_equal(df.filter(regex=name), expected)\r\n\r\n @pytest.mark.parametrize(\"name\", [\"a\", \"a\"])\r\n def test_filter_bytestring(self, name):\r\n # GH13101\r\n df = DataFrame({b\"a\": [1, 2], b\"b\": [3, 4]})\r\n expected = DataFrame({b\"a\": [1, 2]})\r\n\r\n tm.assert_frame_equal(df.filter(like=name), expected)\r\n tm.assert_frame_equal(df.filter(regex=name), expected)\r\n\r\n def test_filter_corner(self):\r\n empty = DataFrame()\r\n\r\n result = empty.filter([])\r\n tm.assert_frame_equal(result, empty)\r\n\r\n result = empty.filter(like=\"foo\")\r\n tm.assert_frame_equal(result, empty)\r\n\r\n def test_filter_regex_non_string(self):\r\n # GH#5798 trying to filter on non-string columns should drop,\r\n # not raise\r\n df = DataFrame(np.random.random((3, 2)), columns=[\"STRING\", 123])\r\n result = df.filter(regex=\"STRING\")\r\n expected = df[[\"STRING\"]]\r\n tm.assert_frame_equal(result, expected)\r\n",
"\"\"\"Base classes for all estimators.\"\"\"\r\n\r\n# Author: Gael Varoquaux <[email protected]>\r\n# License: BSD 3 clause\r\n\r\nimport copy\r\nimport warnings\r\nfrom collections import defaultdict\r\nimport platform\r\nimport inspect\r\nimport re\r\n\r\nimport numpy as np\r\n\r\nfrom . import __version__\r\nfrom ._config import get_config\r\nfrom .utils import _IS_32BIT\r\nfrom .utils._tags import (\r\n _DEFAULT_TAGS,\r\n _safe_tags,\r\n)\r\nfrom .utils.validation import check_X_y\r\nfrom .utils.validation import check_array\r\nfrom .utils.validation import _check_y\r\nfrom .utils.validation import _num_features\r\nfrom .utils.validation import _check_feature_names_in\r\nfrom .utils._estimator_html_repr import estimator_html_repr\r\nfrom .utils.validation import _get_feature_names\r\n\r\n\r\ndef clone(estimator, *, safe=True):\r\n \"\"\"Constructs a new unfitted estimator with the same parameters.\r\n\r\n Clone does a deep copy of the model in an estimator\r\n without actually copying attached data. It yields a new estimator\r\n with the same parameters that has not been fitted on any data.\r\n\r\n If the estimator's `random_state` parameter is an integer (or if the\r\n estimator doesn't have a `random_state` parameter), an *exact clone* is\r\n returned: the clone and the original estimator will give the exact same\r\n results. Otherwise, *statistical clone* is returned: the clone might\r\n yield different results from the original estimator. More details can be\r\n found in :ref:`randomness`.\r\n\r\n Parameters\r\n ----------\r\n estimator : {list, tuple, set} of estimator instance or a single \\\r\n estimator instance\r\n The estimator or group of estimators to be cloned.\r\n\r\n safe : bool, default=True\r\n If safe is False, clone will fall back to a deep copy on objects\r\n that are not estimators.\r\n\r\n \"\"\"\r\n estimator_type = type(estimator)\r\n # XXX: not handling dictionaries\r\n if estimator_type in (list, tuple, set, frozenset):\r\n return estimator_type([clone(e, safe=safe) for e in estimator])\r\n elif not hasattr(estimator, \"get_params\") or isinstance(estimator, type):\r\n if not safe:\r\n return copy.deepcopy(estimator)\r\n else:\r\n if isinstance(estimator, type):\r\n raise TypeError(\r\n \"Cannot clone object. \"\r\n + \"You should provide an instance of \"\r\n + \"scikit-learn estimator instead of a class.\"\r\n )\r\n else:\r\n raise TypeError(\r\n \"Cannot clone object '%s' (type %s): \"\r\n \"it does not seem to be a scikit-learn \"\r\n \"estimator as it does not implement a \"\r\n \"'get_params' method.\" % (repr(estimator), type(estimator))\r\n )\r\n\r\n klass = estimator.__class__\r\n new_object_params = estimator.get_params(deep=False)\r\n for name, param in new_object_params.items():\r\n new_object_params[name] = clone(param, safe=False)\r\n new_object = klass(**new_object_params)\r\n params_set = new_object.get_params(deep=False)\r\n\r\n # quick sanity check of the parameters of the clone\r\n for name in new_object_params:\r\n param1 = new_object_params[name]\r\n param2 = params_set[name]\r\n if param1 is not param2:\r\n raise RuntimeError(\r\n \"Cannot clone object %s, as the constructor \"\r\n \"either does not set or modifies parameter %s\" % (estimator, name)\r\n )\r\n return new_object\r\n\r\n\r\ndef _pprint(params, offset=0, printer=repr):\r\n \"\"\"Pretty print the dictionary 'params'\r\n\r\n Parameters\r\n ----------\r\n params : dict\r\n The dictionary to pretty print\r\n\r\n offset : int, default=0\r\n The offset in characters to add at the begin of each line.\r\n\r\n printer : callable, default=repr\r\n The function to convert entries to strings, typically\r\n the builtin str or repr\r\n\r\n \"\"\"\r\n # Do a multi-line justified repr:\r\n options = np.get_printoptions()\r\n np.set_printoptions(precision=5, threshold=64, edgeitems=2)\r\n params_list = list()\r\n this_line_length = offset\r\n line_sep = \",\\n\" + (1 + offset // 2) * \" \"\r\n for i, (k, v) in enumerate(sorted(params.items())):\r\n if type(v) is float:\r\n # use str for representing floating point numbers\r\n # this way we get consistent representation across\r\n # architectures and versions.\r\n this_repr = \"%s=%s\" % (k, str(v))\r\n else:\r\n # use repr of the rest\r\n this_repr = \"%s=%s\" % (k, printer(v))\r\n if len(this_repr) > 500:\r\n this_repr = this_repr[:300] + \"...\" + this_repr[-100:]\r\n if i > 0:\r\n if this_line_length + len(this_repr) >= 75 or \"\\n\" in this_repr:\r\n params_list.append(line_sep)\r\n this_line_length = len(line_sep)\r\n else:\r\n params_list.append(\", \")\r\n this_line_length += 2\r\n params_list.append(this_repr)\r\n this_line_length += len(this_repr)\r\n\r\n np.set_printoptions(**options)\r\n lines = \"\".join(params_list)\r\n # Strip trailing space to avoid nightmare in doctests\r\n lines = \"\\n\".join(l.rstrip(\" \") for l in lines.split(\"\\n\"))\r\n return lines\r\n\r\n\r\nclass BaseEstimator:\r\n \"\"\"Base class for all estimators in scikit-learn.\r\n\r\n Notes\r\n -----\r\n All estimators should specify all the parameters that can be set\r\n at the class level in their ``__init__`` as explicit keyword\r\n arguments (no ``*args`` or ``**kwargs``).\r\n \"\"\"\r\n\r\n @classmethod\r\n def _get_param_names(cls):\r\n \"\"\"Get parameter names for the estimator\"\"\"\r\n # fetch the constructor or the original constructor before\r\n # deprecation wrapping if any\r\n init = getattr(cls.__init__, \"deprecated_original\", cls.__init__)\r\n if init is object.__init__:\r\n # No explicit constructor to introspect\r\n return []\r\n\r\n # introspect the constructor arguments to find the model parameters\r\n # to represent\r\n init_signature = inspect.signature(init)\r\n # Consider the constructor parameters excluding 'self'\r\n parameters = [\r\n p\r\n for p in init_signature.parameters.values()\r\n if p.name != \"self\" and p.kind != p.VAR_KEYWORD\r\n ]\r\n for p in parameters:\r\n if p.kind == p.VAR_POSITIONAL:\r\n raise RuntimeError(\r\n \"scikit-learn estimators should always \"\r\n \"specify their parameters in the signature\"\r\n \" of their __init__ (no varargs).\"\r\n \" %s with constructor %s doesn't \"\r\n \" follow this convention.\" % (cls, init_signature)\r\n )\r\n # Extract and sort argument names excluding 'self'\r\n return sorted([p.name for p in parameters])\r\n\r\n def get_params(self, deep=True):\r\n \"\"\"\r\n Get parameters for this estimator.\r\n\r\n Parameters\r\n ----------\r\n deep : bool, default=True\r\n If True, will return the parameters for this estimator and\r\n contained subobjects that are estimators.\r\n\r\n Returns\r\n -------\r\n params : dict\r\n Parameter names mapped to their values.\r\n \"\"\"\r\n out = dict()\r\n for key in self._get_param_names():\r\n value = getattr(self, key)\r\n if deep and hasattr(value, \"get_params\"):\r\n deep_items = value.get_params().items()\r\n out.update((key + \"__\" + k, val) for k, val in deep_items)\r\n out[key] = value\r\n return out\r\n\r\n def set_params(self, **params):\r\n \"\"\"\r\n Set the parameters of this estimator.\r\n\r\n The method works on simple estimators as well as on nested objects\r\n (such as :class:`~sklearn.pipeline.Pipeline`). The latter have\r\n parameters of the form ``<component>__<parameter>`` so that it's\r\n possible to update each component of a nested object.\r\n\r\n Parameters\r\n ----------\r\n **params : dict\r\n Estimator parameters.\r\n\r\n Returns\r\n -------\r\n self : estimator instance\r\n Estimator instance.\r\n \"\"\"\r\n if not params:\r\n # Simple optimization to gain speed (inspect is slow)\r\n return self\r\n valid_params = self.get_params(deep=True)\r\n\r\n nested_params = defaultdict(dict) # grouped by prefix\r\n for key, value in params.items():\r\n key, delim, sub_key = key.partition(\"__\")\r\n if key not in valid_params:\r\n raise ValueError(\r\n \"Invalid parameter %s for estimator %s. \"\r\n \"Check the list of available parameters \"\r\n \"with `estimator.get_params().keys()`.\" % (key, self)\r\n )\r\n\r\n if delim:\r\n nested_params[key][sub_key] = value\r\n else:\r\n setattr(self, key, value)\r\n valid_params[key] = value\r\n\r\n for key, sub_params in nested_params.items():\r\n valid_params[key].set_params(**sub_params)\r\n\r\n return self\r\n\r\n def __repr__(self, N_CHAR_MAX=700):\r\n # N_CHAR_MAX is the (approximate) maximum number of non-blank\r\n # characters to render. We pass it as an optional parameter to ease\r\n # the tests.\r\n\r\n from .utils._pprint import _EstimatorPrettyPrinter\r\n\r\n N_MAX_ELEMENTS_TO_SHOW = 30 # number of elements to show in sequences\r\n\r\n # use ellipsis for sequences with a lot of elements\r\n pp = _EstimatorPrettyPrinter(\r\n compact=True,\r\n indent=1,\r\n indent_at_name=True,\r\n n_max_elements_to_show=N_MAX_ELEMENTS_TO_SHOW,\r\n )\r\n\r\n repr_ = pp.pformat(self)\r\n\r\n # Use bruteforce ellipsis when there are a lot of non-blank characters\r\n n_nonblank = len(\"\".join(repr_.split()))\r\n if n_nonblank > N_CHAR_MAX:\r\n lim = N_CHAR_MAX // 2 # apprx number of chars to keep on both ends\r\n regex = r\"^(\\s*\\S){%d}\" % lim\r\n # The regex '^(\\s*\\S){%d}' % n\r\n # matches from the start of the string until the nth non-blank\r\n # character:\r\n # - ^ matches the start of string\r\n # - (pattern){n} matches n repetitions of pattern\r\n # - \\s*\\S matches a non-blank char following zero or more blanks\r\n left_lim = re.match(regex, repr_).end()\r\n right_lim = re.match(regex, repr_[::-1]).end()\r\n\r\n if \"\\n\" in repr_[left_lim:-right_lim]:\r\n # The left side and right side aren't on the same line.\r\n # To avoid weird cuts, e.g.:\r\n # categoric...ore',\r\n # we need to start the right side with an appropriate newline\r\n # character so that it renders properly as:\r\n # categoric...\r\n # handle_unknown='ignore',\r\n # so we add [^\\n]*\\n which matches until the next \\n\r\n regex += r\"[^\\n]*\\n\"\r\n right_lim = re.match(regex, repr_[::-1]).end()\r\n\r\n ellipsis = \"...\"\r\n if left_lim + len(ellipsis) < len(repr_) - right_lim:\r\n # Only add ellipsis if it results in a shorter repr\r\n repr_ = repr_[:left_lim] + \"...\" + repr_[-right_lim:]\r\n\r\n return repr_\r\n\r\n def __getstate__(self):\r\n try:\r\n state = super().__getstate__()\r\n except AttributeError:\r\n state = self.__dict__.copy()\r\n\r\n if type(self).__module__.startswith(\"sklearn.\"):\r\n return dict(state.items(), _sklearn_version=__version__)\r\n else:\r\n return state\r\n\r\n def __setstate__(self, state):\r\n if type(self).__module__.startswith(\"sklearn.\"):\r\n pickle_version = state.pop(\"_sklearn_version\", \"pre-0.18\")\r\n if pickle_version != __version__:\r\n warnings.warn(\r\n \"Trying to unpickle estimator {0} from version {1} when \"\r\n \"using version {2}. This might lead to breaking code or \"\r\n \"invalid results. Use at your own risk. \"\r\n \"For more info please refer to:\\n\"\r\n \"https://scikit-learn.org/stable/modules/model_persistence\"\r\n \".html#security-maintainability-limitations\".format(\r\n self.__class__.__name__, pickle_version, __version__\r\n ),\r\n UserWarning,\r\n )\r\n try:\r\n super().__setstate__(state)\r\n except AttributeError:\r\n self.__dict__.update(state)\r\n\r\n def _more_tags(self):\r\n return _DEFAULT_TAGS\r\n\r\n def _get_tags(self):\r\n collected_tags = {}\r\n for base_class in reversed(inspect.getmro(self.__class__)):\r\n if hasattr(base_class, \"_more_tags\"):\r\n # need the if because mixins might not have _more_tags\r\n # but might do redundant work in estimators\r\n # (i.e. calling more tags on BaseEstimator multiple times)\r\n more_tags = base_class._more_tags(self)\r\n collected_tags.update(more_tags)\r\n return collected_tags\r\n\r\n def _check_n_features(self, X, reset):\r\n \"\"\"Set the `n_features_in_` attribute, or check against it.\r\n\r\n Parameters\r\n ----------\r\n X : {ndarray, sparse matrix} of shape (n_samples, n_features)\r\n The input samples.\r\n reset : bool\r\n If True, the `n_features_in_` attribute is set to `X.shape[1]`.\r\n If False and the attribute exists, then check that it is equal to\r\n `X.shape[1]`. If False and the attribute does *not* exist, then\r\n the check is skipped.\r\n .. note::\r\n It is recommended to call reset=True in `fit` and in the first\r\n call to `partial_fit`. All other methods that validate `X`\r\n should set `reset=False`.\r\n \"\"\"\r\n try:\r\n n_features = _num_features(X)\r\n except TypeError as e:\r\n if not reset and hasattr(self, \"n_features_in_\"):\r\n raise ValueError(\r\n \"X does not contain any features, but \"\r\n f\"{self.__class__.__name__} is expecting \"\r\n f\"{self.n_features_in_} features\"\r\n ) from e\r\n # If the number of features is not defined and reset=True,\r\n # then we skip this check\r\n return\r\n\r\n if reset:\r\n self.n_features_in_ = n_features\r\n return\r\n\r\n if not hasattr(self, \"n_features_in_\"):\r\n # Skip this check if the expected number of expected input features\r\n # was not recorded by calling fit first. This is typically the case\r\n # for stateless transformers.\r\n return\r\n\r\n if n_features != self.n_features_in_:\r\n raise ValueError(\r\n f\"X has {n_features} features, but {self.__class__.__name__} \"\r\n f\"is expecting {self.n_features_in_} features as input.\"\r\n )\r\n\r\n def _check_feature_names(self, X, *, reset):\r\n \"\"\"Set or check the `feature_names_in_` attribute.\r\n\r\n .. versionadded:: 1.0\r\n\r\n Parameters\r\n ----------\r\n X : {ndarray, dataframe} of shape (n_samples, n_features)\r\n The input samples.\r\n\r\n reset : bool\r\n Whether to reset the `feature_names_in_` attribute.\r\n If False, the input will be checked for consistency with\r\n feature names of data provided when reset was last True.\r\n .. note::\r\n It is recommended to call `reset=True` in `fit` and in the first\r\n call to `partial_fit`. All other methods that validate `X`\r\n should set `reset=False`.\r\n \"\"\"\r\n\r\n if reset:\r\n feature_names_in = _get_feature_names(X)\r\n if feature_names_in is not None:\r\n self.feature_names_in_ = feature_names_in\r\n elif hasattr(self, \"feature_names_in_\"):\r\n # Delete the attribute when the estimator is fitted on a new dataset\r\n # that has no feature names.\r\n delattr(self, \"feature_names_in_\")\r\n return\r\n\r\n fitted_feature_names = getattr(self, \"feature_names_in_\", None)\r\n X_feature_names = _get_feature_names(X)\r\n\r\n if fitted_feature_names is None and X_feature_names is None:\r\n # no feature names seen in fit and in X\r\n return\r\n\r\n if X_feature_names is not None and fitted_feature_names is None:\r\n warnings.warn(\r\n f\"X has feature names, but {self.__class__.__name__} was fitted without\"\r\n \" feature names\"\r\n )\r\n return\r\n\r\n if X_feature_names is None and fitted_feature_names is not None:\r\n warnings.warn(\r\n \"X does not have valid feature names, but\"\r\n f\" {self.__class__.__name__} was fitted with feature names\"\r\n )\r\n return\r\n\r\n # validate the feature names against the `feature_names_in_` attribute\r\n if len(fitted_feature_names) != len(X_feature_names) or np.any(\r\n fitted_feature_names != X_feature_names\r\n ):\r\n message = (\r\n \"The feature names should match those that were \"\r\n \"passed during fit. Starting version 1.2, an error will be raised.\\n\"\r\n )\r\n fitted_feature_names_set = set(fitted_feature_names)\r\n X_feature_names_set = set(X_feature_names)\r\n\r\n unexpected_names = sorted(X_feature_names_set - fitted_feature_names_set)\r\n missing_names = sorted(fitted_feature_names_set - X_feature_names_set)\r\n\r\n def add_names(names):\r\n output = \"\"\r\n max_n_names = 5\r\n for i, name in enumerate(names):\r\n if i >= max_n_names:\r\n output += \"- ...\\n\"\r\n break\r\n output += f\"- {name}\\n\"\r\n return output\r\n\r\n if unexpected_names:\r\n message += \"Feature names unseen at fit time:\\n\"\r\n message += add_names(unexpected_names)\r\n\r\n if missing_names:\r\n message += \"Feature names seen at fit time, yet now missing:\\n\"\r\n message += add_names(missing_names)\r\n\r\n if not missing_names and not missing_names:\r\n message += (\r\n \"Feature names must be in the same order as they were in fit.\\n\"\r\n )\r\n\r\n warnings.warn(message, FutureWarning)\r\n\r\n def _validate_data(\r\n self,\r\n X=\"no_validation\",\r\n y=\"no_validation\",\r\n reset=True,\r\n validate_separately=False,\r\n **check_params,\r\n ):\r\n \"\"\"Validate input data and set or check the `n_features_in_` attribute.\r\n\r\n Parameters\r\n ----------\r\n X : {array-like, sparse matrix, dataframe} of shape \\\r\n (n_samples, n_features), default='no validation'\r\n The input samples.\r\n If `'no_validation'`, no validation is performed on `X`. This is\r\n useful for meta-estimator which can delegate input validation to\r\n their underlying estimator(s). In that case `y` must be passed and\r\n the only accepted `check_params` are `multi_output` and\r\n `y_numeric`.\r\n\r\n y : array-like of shape (n_samples,), default='no_validation'\r\n The targets.\r\n\r\n - If `None`, `check_array` is called on `X`. If the estimator's\r\n requires_y tag is True, then an error will be raised.\r\n - If `'no_validation'`, `check_array` is called on `X` and the\r\n estimator's requires_y tag is ignored. This is a default\r\n placeholder and is never meant to be explicitly set. In that case\r\n `X` must be passed.\r\n - Otherwise, only `y` with `_check_y` or both `X` and `y` are\r\n checked with either `check_array` or `check_X_y` depending on\r\n `validate_separately`.\r\n\r\n reset : bool, default=True\r\n Whether to reset the `n_features_in_` attribute.\r\n If False, the input will be checked for consistency with data\r\n provided when reset was last True.\r\n .. note::\r\n It is recommended to call reset=True in `fit` and in the first\r\n call to `partial_fit`. All other methods that validate `X`\r\n should set `reset=False`.\r\n validate_separately : False or tuple of dicts, default=False\r\n Only used if y is not None.\r\n If False, call validate_X_y(). Else, it must be a tuple of kwargs\r\n to be used for calling check_array() on X and y respectively.\r\n **check_params : kwargs\r\n Parameters passed to :func:`sklearn.utils.check_array` or\r\n :func:`sklearn.utils.check_X_y`. Ignored if validate_separately\r\n is not False.\r\n\r\n Returns\r\n -------\r\n out : {ndarray, sparse matrix} or tuple of these\r\n The validated input. A tuple is returned if both `X` and `y` are\r\n validated.\r\n \"\"\"\r\n self._check_feature_names(X, reset=reset)\r\n\r\n if y is None and self._get_tags()[\"requires_y\"]:\r\n raise ValueError(\r\n f\"This {self.__class__.__name__} estimator \"\r\n \"requires y to be passed, but the target y is None.\"\r\n )\r\n\r\n no_val_X = isinstance(X, str) and X == \"no_validation\"\r\n no_val_y = y is None or isinstance(y, str) and y == \"no_validation\"\r\n\r\n if no_val_X and no_val_y:\r\n raise ValueError(\"Validation should be done on X, y or both.\")\r\n elif not no_val_X and no_val_y:\r\n X = check_array(X, **check_params)\r\n out = X\r\n elif no_val_X and not no_val_y:\r\n y = _check_y(y, **check_params)\r\n out = y\r\n else:\r\n if validate_separately:\r\n # We need this because some estimators validate X and y\r\n # separately, and in general, separately calling check_array()\r\n # on X and y isn't equivalent to just calling check_X_y()\r\n # :(\r\n check_X_params, check_y_params = validate_separately\r\n X = check_array(X, **check_X_params)\r\n y = check_array(y, **check_y_params)\r\n else:\r\n X, y = check_X_y(X, y, **check_params)\r\n out = X, y\r\n\r\n if not no_val_X and check_params.get(\"ensure_2d\", True):\r\n self._check_n_features(X, reset=reset)\r\n\r\n return out\r\n\r\n @property\r\n def _repr_html_(self):\r\n \"\"\"HTML representation of estimator.\r\n\r\n This is redundant with the logic of `_repr_mimebundle_`. The latter\r\n should be favorted in the long term, `_repr_html_` is only\r\n implemented for consumers who do not interpret `_repr_mimbundle_`.\r\n \"\"\"\r\n if get_config()[\"display\"] != \"diagram\":\r\n raise AttributeError(\r\n \"_repr_html_ is only defined when the \"\r\n \"'display' configuration option is set to \"\r\n \"'diagram'\"\r\n )\r\n return self._repr_html_inner\r\n\r\n def _repr_html_inner(self):\r\n \"\"\"This function is returned by the @property `_repr_html_` to make\r\n `hasattr(estimator, \"_repr_html_\") return `True` or `False` depending\r\n on `get_config()[\"display\"]`.\r\n \"\"\"\r\n return estimator_html_repr(self)\r\n\r\n def _repr_mimebundle_(self, **kwargs):\r\n \"\"\"Mime bundle used by jupyter kernels to display estimator\"\"\"\r\n output = {\"text/plain\": repr(self)}\r\n if get_config()[\"display\"] == \"diagram\":\r\n output[\"text/html\"] = estimator_html_repr(self)\r\n return output\r\n\r\n\r\nclass ClassifierMixin:\r\n \"\"\"Mixin class for all classifiers in scikit-learn.\"\"\"\r\n\r\n _estimator_type = \"classifier\"\r\n\r\n def score(self, X, y, sample_weight=None):\r\n \"\"\"\r\n Return the mean accuracy on the given test data and labels.\r\n\r\n In multi-label classification, this is the subset accuracy\r\n which is a harsh metric since you require for each sample that\r\n each label set be correctly predicted.\r\n\r\n Parameters\r\n ----------\r\n X : array-like of shape (n_samples, n_features)\r\n Test samples.\r\n\r\n y : array-like of shape (n_samples,) or (n_samples, n_outputs)\r\n True labels for `X`.\r\n\r\n sample_weight : array-like of shape (n_samples,), default=None\r\n Sample weights.\r\n\r\n Returns\r\n -------\r\n score : float\r\n Mean accuracy of ``self.predict(X)`` wrt. `y`.\r\n \"\"\"\r\n from .metrics import accuracy_score\r\n\r\n return accuracy_score(y, self.predict(X), sample_weight=sample_weight)\r\n\r\n def _more_tags(self):\r\n return {\"requires_y\": True}\r\n\r\n\r\nclass RegressorMixin:\r\n \"\"\"Mixin class for all regression estimators in scikit-learn.\"\"\"\r\n\r\n _estimator_type = \"regressor\"\r\n\r\n def score(self, X, y, sample_weight=None):\r\n \"\"\"Return the coefficient of determination of the prediction.\r\n\r\n The coefficient of determination :math:`R^2` is defined as\r\n :math:`(1 - \\\\frac{u}{v})`, where :math:`u` is the residual\r\n sum of squares ``((y_true - y_pred)** 2).sum()`` and :math:`v`\r\n is the total sum of squares ``((y_true - y_true.mean()) ** 2).sum()``.\r\n The best possible score is 1.0 and it can be negative (because the\r\n model can be arbitrarily worse). A constant model that always predicts\r\n the expected value of `y`, disregarding the input features, would get\r\n a :math:`R^2` score of 0.0.\r\n\r\n Parameters\r\n ----------\r\n X : array-like of shape (n_samples, n_features)\r\n Test samples. For some estimators this may be a precomputed\r\n kernel matrix or a list of generic objects instead with shape\r\n ``(n_samples, n_samples_fitted)``, where ``n_samples_fitted``\r\n is the number of samples used in the fitting for the estimator.\r\n\r\n y : array-like of shape (n_samples,) or (n_samples, n_outputs)\r\n True values for `X`.\r\n\r\n sample_weight : array-like of shape (n_samples,), default=None\r\n Sample weights.\r\n\r\n Returns\r\n -------\r\n score : float\r\n :math:`R^2` of ``self.predict(X)`` wrt. `y`.\r\n\r\n Notes\r\n -----\r\n The :math:`R^2` score used when calling ``score`` on a regressor uses\r\n ``multioutput='uniform_average'`` from version 0.23 to keep consistent\r\n with default value of :func:`~sklearn.metrics.r2_score`.\r\n This influences the ``score`` method of all the multioutput\r\n regressors (except for\r\n :class:`~sklearn.multioutput.MultiOutputRegressor`).\r\n \"\"\"\r\n\r\n from .metrics import r2_score\r\n\r\n y_pred = self.predict(X)\r\n return r2_score(y, y_pred, sample_weight=sample_weight)\r\n\r\n def _more_tags(self):\r\n return {\"requires_y\": True}\r\n\r\n\r\nclass ClusterMixin:\r\n \"\"\"Mixin class for all cluster estimators in scikit-learn.\"\"\"\r\n\r\n _estimator_type = \"clusterer\"\r\n\r\n def fit_predict(self, X, y=None):\r\n \"\"\"\r\n Perform clustering on `X` and returns cluster labels.\r\n\r\n Parameters\r\n ----------\r\n X : array-like of shape (n_samples, n_features)\r\n Input data.\r\n\r\n y : Ignored\r\n Not used, present for API consistency by convention.\r\n\r\n Returns\r\n -------\r\n labels : ndarray of shape (n_samples,), dtype=np.int64\r\n Cluster labels.\r\n \"\"\"\r\n # non-optimized default implementation; override when a better\r\n # method is possible for a given clustering algorithm\r\n self.fit(X)\r\n return self.labels_\r\n\r\n def _more_tags(self):\r\n return {\"preserves_dtype\": []}\r\n\r\n\r\nclass BiclusterMixin:\r\n \"\"\"Mixin class for all bicluster estimators in scikit-learn.\"\"\"\r\n\r\n @property\r\n def biclusters_(self):\r\n \"\"\"Convenient way to get row and column indicators together.\r\n\r\n Returns the ``rows_`` and ``columns_`` members.\r\n \"\"\"\r\n return self.rows_, self.columns_\r\n\r\n def get_indices(self, i):\r\n \"\"\"Row and column indices of the `i`'th bicluster.\r\n\r\n Only works if ``rows_`` and ``columns_`` attributes exist.\r\n\r\n Parameters\r\n ----------\r\n i : int\r\n The index of the cluster.\r\n\r\n Returns\r\n -------\r\n row_ind : ndarray, dtype=np.intp\r\n Indices of rows in the dataset that belong to the bicluster.\r\n col_ind : ndarray, dtype=np.intp\r\n Indices of columns in the dataset that belong to the bicluster.\r\n \"\"\"\r\n rows = self.rows_[i]\r\n columns = self.columns_[i]\r\n return np.nonzero(rows)[0], np.nonzero(columns)[0]\r\n\r\n def get_shape(self, i):\r\n \"\"\"Shape of the `i`'th bicluster.\r\n\r\n Parameters\r\n ----------\r\n i : int\r\n The index of the cluster.\r\n\r\n Returns\r\n -------\r\n n_rows : int\r\n Number of rows in the bicluster.\r\n\r\n n_cols : int\r\n Number of columns in the bicluster.\r\n \"\"\"\r\n indices = self.get_indices(i)\r\n return tuple(len(i) for i in indices)\r\n\r\n def get_submatrix(self, i, data):\r\n \"\"\"Return the submatrix corresponding to bicluster `i`.\r\n\r\n Parameters\r\n ----------\r\n i : int\r\n The index of the cluster.\r\n data : array-like of shape (n_samples, n_features)\r\n The data.\r\n\r\n Returns\r\n -------\r\n submatrix : ndarray of shape (n_rows, n_cols)\r\n The submatrix corresponding to bicluster `i`.\r\n\r\n Notes\r\n -----\r\n Works with sparse matrices. Only works if ``rows_`` and\r\n ``columns_`` attributes exist.\r\n \"\"\"\r\n from .utils.validation import check_array\r\n\r\n data = check_array(data, accept_sparse=\"csr\")\r\n row_ind, col_ind = self.get_indices(i)\r\n return data[row_ind[:, np.newaxis], col_ind]\r\n\r\n\r\nclass TransformerMixin:\r\n \"\"\"Mixin class for all transformers in scikit-learn.\"\"\"\r\n\r\n def fit_transform(self, X, y=None, **fit_params):\r\n \"\"\"\r\n Fit to data, then transform it.\r\n\r\n Fits transformer to `X` and `y` with optional parameters `fit_params`\r\n and returns a transformed version of `X`.\r\n\r\n Parameters\r\n ----------\r\n X : array-like of shape (n_samples, n_features)\r\n Input samples.\r\n\r\n y : array-like of shape (n_samples,) or (n_samples, n_outputs), \\\r\n default=None\r\n Target values (None for unsupervised transformations).\r\n\r\n **fit_params : dict\r\n Additional fit parameters.\r\n\r\n Returns\r\n -------\r\n X_new : ndarray array of shape (n_samples, n_features_new)\r\n Transformed array.\r\n \"\"\"\r\n # non-optimized default implementation; override when a better\r\n # method is possible for a given clustering algorithm\r\n if y is None:\r\n # fit method of arity 1 (unsupervised transformation)\r\n return self.fit(X, **fit_params).transform(X)\r\n else:\r\n # fit method of arity 2 (supervised transformation)\r\n return self.fit(X, y, **fit_params).transform(X)\r\n\r\n\r\nclass _OneToOneFeatureMixin:\r\n \"\"\"Provides `get_feature_names_out` for simple transformers.\r\n\r\n Assumes there's a 1-to-1 correspondence between input features\r\n and output features.\r\n \"\"\"\r\n\r\n def get_feature_names_out(self, input_features=None):\r\n \"\"\"Get output feature names for transformation.\r\n\r\n Parameters\r\n ----------\r\n input_features : array-like of str or None, default=None\r\n Input features.\r\n\r\n - If `input_features` is `None`, then `feature_names_in_` is\r\n used as feature names in. If `feature_names_in_` is not defined,\r\n then names are generated: `[x0, x1, ..., x(n_features_in_)]`.\r\n - If `input_features` is an array-like, then `input_features` must\r\n match `feature_names_in_` if `feature_names_in_` is defined.\r\n\r\n Returns\r\n -------\r\n feature_names_out : ndarray of str objects\r\n Same as input features.\r\n \"\"\"\r\n return _check_feature_names_in(self, input_features)\r\n\r\n\r\nclass DensityMixin:\r\n \"\"\"Mixin class for all density estimators in scikit-learn.\"\"\"\r\n\r\n _estimator_type = \"DensityEstimator\"\r\n\r\n def score(self, X, y=None):\r\n \"\"\"Return the score of the model on the data `X`.\r\n\r\n Parameters\r\n ----------\r\n X : array-like of shape (n_samples, n_features)\r\n Test samples.\r\n\r\n y : Ignored\r\n Not used, present for API consistency by convention.\r\n\r\n Returns\r\n -------\r\n score : float\r\n \"\"\"\r\n pass\r\n\r\n\r\nclass OutlierMixin:\r\n \"\"\"Mixin class for all outlier detection estimators in scikit-learn.\"\"\"\r\n\r\n _estimator_type = \"outlier_detector\"\r\n\r\n def fit_predict(self, X, y=None):\r\n \"\"\"Perform fit on X and returns labels for X.\r\n\r\n Returns -1 for outliers and 1 for inliers.\r\n\r\n Parameters\r\n ----------\r\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\r\n The input samples.\r\n\r\n y : Ignored\r\n Not used, present for API consistency by convention.\r\n\r\n Returns\r\n -------\r\n y : ndarray of shape (n_samples,)\r\n 1 for inliers, -1 for outliers.\r\n \"\"\"\r\n # override for transductive outlier detectors like LocalOulierFactor\r\n return self.fit(X).predict(X)\r\n\r\n\r\nclass MetaEstimatorMixin:\r\n _required_parameters = [\"estimator\"]\r\n \"\"\"Mixin class for all meta estimators in scikit-learn.\"\"\"\r\n\r\n\r\nclass MultiOutputMixin:\r\n \"\"\"Mixin to mark estimators that support multioutput.\"\"\"\r\n\r\n def _more_tags(self):\r\n return {\"multioutput\": True}\r\n\r\n\r\nclass _UnstableArchMixin:\r\n \"\"\"Mark estimators that are non-determinstic on 32bit or PowerPC\"\"\"\r\n\r\n def _more_tags(self):\r\n return {\r\n \"non_deterministic\": (\r\n _IS_32BIT or platform.machine().startswith((\"ppc\", \"powerpc\"))\r\n )\r\n }\r\n\r\n\r\ndef is_classifier(estimator):\r\n \"\"\"Return True if the given estimator is (probably) a classifier.\r\n\r\n Parameters\r\n ----------\r\n estimator : object\r\n Estimator object to test.\r\n\r\n Returns\r\n -------\r\n out : bool\r\n True if estimator is a classifier and False otherwise.\r\n \"\"\"\r\n return getattr(estimator, \"_estimator_type\", None) == \"classifier\"\r\n\r\n\r\ndef is_regressor(estimator):\r\n \"\"\"Return True if the given estimator is (probably) a regressor.\r\n\r\n Parameters\r\n ----------\r\n estimator : estimator instance\r\n Estimator object to test.\r\n\r\n Returns\r\n -------\r\n out : bool\r\n True if estimator is a regressor and False otherwise.\r\n \"\"\"\r\n return getattr(estimator, \"_estimator_type\", None) == \"regressor\"\r\n\r\n\r\ndef is_outlier_detector(estimator):\r\n \"\"\"Return True if the given estimator is (probably) an outlier detector.\r\n\r\n Parameters\r\n ----------\r\n estimator : estimator instance\r\n Estimator object to test.\r\n\r\n Returns\r\n -------\r\n out : bool\r\n True if estimator is an outlier detector and False otherwise.\r\n \"\"\"\r\n return getattr(estimator, \"_estimator_type\", None) == \"outlier_detector\"\r\n\r\n\r\ndef _is_pairwise(estimator):\r\n \"\"\"Returns True if estimator is pairwise.\r\n\r\n - If the `_pairwise` attribute and the tag are present and consistent,\r\n then use the value and not issue a warning.\r\n - If the `_pairwise` attribute and the tag are present and not\r\n consistent, use the `_pairwise` value and issue a deprecation\r\n warning.\r\n - If only the `_pairwise` attribute is present and it is not False,\r\n issue a deprecation warning and use the `_pairwise` value.\r\n\r\n Parameters\r\n ----------\r\n estimator : object\r\n Estimator object to test.\r\n\r\n Returns\r\n -------\r\n out : bool\r\n True if the estimator is pairwise and False otherwise.\r\n \"\"\"\r\n with warnings.catch_warnings():\r\n warnings.filterwarnings(\"ignore\", category=FutureWarning)\r\n has_pairwise_attribute = hasattr(estimator, \"_pairwise\")\r\n pairwise_attribute = getattr(estimator, \"_pairwise\", False)\r\n pairwise_tag = _safe_tags(estimator, key=\"pairwise\")\r\n\r\n if has_pairwise_attribute:\r\n if pairwise_attribute != pairwise_tag:\r\n warnings.warn(\r\n \"_pairwise was deprecated in 0.24 and will be removed in 1.1 \"\r\n \"(renaming of 0.26). Set the estimator tags of your estimator \"\r\n \"instead\",\r\n FutureWarning,\r\n )\r\n return pairwise_attribute\r\n\r\n # use pairwise tag when the attribute is not present\r\n return pairwise_tag\r\n"
] | [
[
"pandas.Series",
"pandas.DataFrame",
"numpy.random.randn",
"pandas.Float64Index",
"numpy.arange",
"pandas._testing.assert_series_equal",
"numpy.array",
"pandas._testing.makeDateIndex"
],
[
"pandas.date_range",
"pandas._libs.tslibs.timezones.tz_compare",
"pandas.DataFrame",
"pandas._testing.assert_frame_equal",
"pandas._testing.assert_equal",
"pandas._libs.tslibs.timezones.maybe_get_tz"
],
[
"pandas._testing.assert_numpy_array_equal",
"numpy.sum",
"pandas.Series",
"pandas.date_range",
"pandas._testing.assert_categorical_equal",
"numpy.random.permutation",
"pandas.DataFrame",
"pandas._testing.assert_frame_equal",
"pandas.Categorical",
"numpy.asarray",
"pandas._testing.assert_series_equal",
"numpy.log",
"numpy.array",
"numpy.random.randint"
],
[
"numpy.random.random",
"pandas.Index",
"pandas.DataFrame",
"pandas._testing.assert_frame_equal"
],
[
"numpy.any",
"numpy.get_printoptions",
"numpy.set_printoptions",
"numpy.nonzero"
]
] |
MountainRange/mobius_score | [
"fc900ab456b3e3431cfa6d9684b97ec6321d0a23"
] | [
"audiospec.py"
] | [
"\nimport numpy as np\nimport librosa\nfrom tqdm import tqdm\nfrom audiomisc import ks_key\n\nfrom constants import VERTICALCUTOFF, FFT_SIZE, FFT_HOP\n\ndef stft(x, fft_size, hopsamp):\n window = np.hanning(fft_size)\n return np.array([np.fft.rfft(window*x[i:i+fft_size])\n for i in range(0, len(x)-fft_size, hopsamp)])\n\ndef wav_to_spec(fn):\n input_signal, sample_rate = librosa.load(fn, sr=44100)\n stft_mag = np.array([])\n split = int(1e6)#int(264600)\n fft_size = FFT_SIZE\n hopsamp = fft_size // FFT_HOP\n for i in tqdm(range(len(input_signal)//split)):\n temp_signal = input_signal[(split*i):(split*(i+1))]\n stft_full = stft(temp_signal, fft_size, hopsamp)\n\n stft_full = abs(stft_full)\n if np.max(stft_full) != 0:\n stft_full = (stft_full - np.mean(stft_full)) / np.std(stft_full)\n stft_full += abs(np.min(stft_full))\n stft_full *= 255.0/np.max(stft_full)\n \n if stft_mag.shape[0] != 0:\n stft_mag = np.concatenate((stft_mag, stft_full))\n else:\n stft_mag = stft_full\n\n print(\"Calculating tempo\")\n tempo, _ = librosa.beat.beat_track(y=input_signal, sr=sample_rate, hop_length=512)\n\n print(\"Calculating music key\")\n chroma = librosa.feature.chroma_stft(y=input_signal, sr=sample_rate)\n chroma = [sum(x)/len(x) for x in chroma]\n bestmajor, bestminor = ks_key(chroma)\n if max(bestmajor) > max(bestminor):\n key = np.argmax(bestmajor)\n # C, Db, D, Eb, E, F, F#, G, Ab, A, Bb, B\n keymap = [0, -5, 2, -3, 4, -1, 6, 1, -4, 3, -2, 5]\n else:\n key = np.argmax(bestminor)\n # c, c#, d, eb, e, f, f#, g, g#, a, bb, b\n keymap = [-3, 4, -1, -6, 1, -4, 3, -2, 5, 0, -5, 2]\n \n return stft_mag[:, :VERTICALCUTOFF].T, tempo, keymap[key]"
] | [
[
"numpy.argmax",
"numpy.max",
"numpy.min",
"numpy.array",
"numpy.std",
"numpy.concatenate",
"numpy.fft.rfft",
"numpy.hanning",
"numpy.mean"
]
] |
Prasad9/Detect-Flags-SSD | [
"c0d662bde99ed8df33d72bd06d61d5eb869d31a5"
] | [
"detect/image_detector.py"
] | [
"from __future__ import print_function\nimport mxnet as mx\nimport numpy as np\nfrom timeit import default_timer as timer\nfrom dataset.iterator import DetTestImageIter\nimport cv2\n\nclass ImageDetector(object):\n\t\"\"\"\n\tSSD detector which hold a detection network and wraps detection API\n\n\tParameters:\n\t----------\n\tsymbol : mx.Symbol\n\t\tdetection network Symbol\n\tmodel_prefix : str\n\t\tname prefix of trained model\n\tepoch : int\n\t\tload epoch of trained model\n\tdata_shape : int\n\t\tinput data resize shape\n\tmean_pixels : tuple of float\n\t\t(mean_r, mean_g, mean_b)\n\tbatch_size : int\n\t\trun detection with batch size\n\tctx : mx.ctx\n\t\tdevice to use, if None, use mx.cpu() as default context\n\t\"\"\"\n\tdef __init__(self, symbol, model_prefix, epoch, data_shape, mean_pixels, \\\n\t\t\t\t\tclasses, thresh = 0.6, plot_confidence = True, batch_size=1, ctx=None):\n\t\tself.ctx = ctx\n\t\tif self.ctx is None:\n\t\t\tself.ctx = mx.cpu()\n\t\tload_symbol, args, auxs = mx.model.load_checkpoint(model_prefix, epoch)\n\t\tif symbol is None:\n\t\t\tsymbol = load_symbol\n\t\tself.mod = mx.mod.Module(symbol, label_names=None, context=ctx)\n\t\tself.data_shape = data_shape\n\t\tself.mod.bind(data_shapes=[('data', (batch_size, 3, data_shape, data_shape))])\n\t\tself.mod.set_params(args, auxs)\n\t\tself.data_shape = data_shape\n\t\tself.mean_pixels = mean_pixels\n\t\tself.classes = classes\n\t\tself.colors = []\n\t\tself.fill_random_colors_int()\n\t\tself.thresh = thresh\n\t\tself.plot_confidence = plot_confidence\n\n\tdef fill_random_colors(self):\n\t\timport random\n\t\tfor i in range(len(self.classes)):\n\t\t\tself.colors.append((random.random(), random.random(), random.random()))\n\n\t\t#print(self.colors)\n\n\tdef fill_random_colors_int(self):\n\t\timport random\n\t\tfor i in range(len(self.classes)):\n\t\t\tself.colors.append((random.randint(0, 255), random.randint(0, 255), random.randint(0, 255)))\n\n\t\t#print(self.colors)\n\n\n\tdef detect(self, det_iter, show_timer=False):\n\t\t\"\"\"\n\t\tdetect all images in iterator\n\n\t\tParameters:\n\t\t----------\n\t\tdet_iter : DetIter\n\t\t\titerator for all testing images\n\t\tshow_timer : Boolean\n\t\t\twhether to print out detection exec time\n\n\t\tReturns:\n\t\t----------\n\t\tlist of detection results\n\t\t\"\"\"\n\t\tnum_images = det_iter._size\n\t\tresult = []\n\t\tdetections = []\n\t\t#if not isinstance(det_iter, mx.io.PrefetchingIter):\n\t\t#\tdet_iter = mx.io.PrefetchingIter(det_iter)\n\t\tstart = timer()\n\t\tfor pred, _, _ in self.mod.iter_predict(det_iter):\n\t\t\tdetections.append(pred[0].asnumpy())\n\t\ttime_elapsed = timer() - start\n\t\tif show_timer:\n\t\t\tprint(\"Detection time for {} images: {:.4f} sec\".format(num_images, time_elapsed))\n\t\tfor output in detections:\n\t\t\tfor i in range(output.shape[0]):\n\t\t\t\tdet = output[i, :, :]\n\t\t\t\tres = det[np.where(det[:, 0] >= 0)[0]]\n\t\t\t\tresult.append(res)\n\t\tresized_img = det_iter.current_data()\n\t\treturn result, resized_img\n\n\tdef im_detect(self, img, show_timer=False):\n\t\t\"\"\"\n\t\twrapper for detecting multiple images\n\n\t\tParameters:\n\t\t----------\n\t\tim_list : list of str\n\t\t\timage path or list of image paths\n\t\troot_dir : str\n\t\t\tdirectory of input images, optional if image path already\n\t\t\thas full directory information\n\t\textension : str\n\t\t\timage extension, eg. \".jpg\", optional\n\n\t\tReturns:\n\t\t----------\n\t\tlist of detection results in format [det0, det1...], det is in\n\t\tformat np.array([id, score, xmin, ymin, xmax, ymax]...)\n\t\t\"\"\"\n\t\tim_list = [img]\n\t\ttest_iter = DetTestImageIter(im_list, 1, self.data_shape, self.mean_pixels)\n\t\treturn self.detect(test_iter, show_timer)\n\n\tdef plot_rects(self, img, dets):\n\t\timg_shape = img.shape\n\t\tfor i in range(dets.shape[0]):\n\t\t\tcls_id = int(dets[i, 0])\n\t\t\tif cls_id >= 0:\n\t\t\t\tscore = dets[i, 1]\n\t\t\t\t#print('Score is {}, class {}'.format(score, cls_id))\n\t\t\t\tif score > self.thresh:\n\t\t\t\t\txmin = int(dets[i, 2] * img_shape[1])\n\t\t\t\t\tymin = int(dets[i, 3] * img_shape[0])\n\t\t\t\t\txmax = int(dets[i, 4] * img_shape[1])\n\t\t\t\t\tymax = int(dets[i, 5] * img_shape[0])\n\n\t\t\t\t\tcv2.rectangle(img, (xmin, ymin), (xmax, ymax), self.colors[cls_id], 4)\n\n\t\t\t\t\tclass_name = self.classes[cls_id]\n\t\t\t\t\tcv2.putText(img, class_name, (xmin, ymin), cv2.FONT_HERSHEY_SIMPLEX, 2, (0, 0, 255), 4)\n\t\t\t\t\t#print('Class id = {}, Score = {}, Country = {}, rect = ({}, {}, {}, {})'.format(cls_id, score, class_name, xmin, ymin, xmax, ymax))\n\n\tdef detect_and_visualize_image(self, img, show_timer=False):\n\t\t\"\"\"\n\t\twrapper for im_detect and visualize_detection\n\n\t\tParameters:\n\t\t----------\n\t\tim_list : list of str or str\n\t\timage path or list of image paths\n\t\troot_dir : str or None\n\t\tdirectory of input images, optional if image path already\n\t\thas full directory information\n\t\textension : str or None\n\t\timage extension, eg. \".jpg\", optional\n\n\t\tReturns:\n\t\t----------\n\n\t\t\"\"\"\n\t\tdets, resized_img = self.im_detect(img, show_timer=show_timer)\n\t\tresized_img = resized_img.asnumpy()\n\t\tresized_img /= 255.0\n\t\tfor k, det in enumerate(dets):\n\t\t\tself.plot_rects(resized_img, det)\n\t\treturn resized_img\n\n\tdef scale_and_plot_rects(self, img, dets):\n\t\timg_shape = img.shape\n\t\tfor i in range(dets.shape[0]):\n\t\t\tcls_id = int(dets[i, 0])\n\t\t\tif cls_id >= 0:\n\t\t\t\tscore = dets[i, 1]\n\t\t\t\t#print('Score is {}, class {}'.format(score, cls_id))\n\t\t\t\tif score > self.thresh:\n\t\t\t\t\txmin = int(dets[i, 2] * img_shape[1])\n\t\t\t\t\tymin = int(dets[i, 3] * img_shape[0])\n\t\t\t\t\txmax = int(dets[i, 4] * img_shape[1])\n\t\t\t\t\tymax = int(dets[i, 5] * img_shape[0])\n\n\t\t\t\t\tcv2.rectangle(img, (xmin, ymin), (xmax, ymax), self.colors[cls_id], 4)\n\n\t\t\t\t\tclass_name = self.classes[cls_id]\n\t\t\t\t\tcv2.putText(img, class_name, (xmin, ymin - 15), cv2.FONT_HERSHEY_SIMPLEX, 2, (255, 0, 255), 3)\n\t\t\t\t\tif self.plot_confidence:\n\t\t\t\t\t\tscore_color = (0, 255, 0) if score > 0.5 else (255, 0, 0)\n\t\t\t\t\t\tcv2.putText(img, '{:.3f}'.format(score), (xmax - 60, ymin - 15), cv2.FONT_HERSHEY_SIMPLEX, 1, score_color, 1)\n\t\t\t\t\t\n\n\tdef detect_and_layover_image(self, img, show_timer=False):\n\t\t\"\"\"\n\t\twrapper for im_detect and visualize_detection\n\n\t\tParameters:\n\t\t----------\n\t\tim_list : list of str or str\n\t\timage path or list of image paths\n\t\troot_dir : str or None\n\t\tdirectory of input images, optional if image path already\n\t\thas full directory information\n\t\textension : str or None\n\t\timage extension, eg. \".jpg\", optional\n\n\t\tReturns:\n\t\t----------\n\n\t\t\"\"\"\n\t\tdets, _ = self.im_detect(img, show_timer=show_timer)\n\t\tfor k, det in enumerate(dets):\n\t\t\tself.scale_and_plot_rects(img, det)\n\t\treturn img\n"
] | [
[
"numpy.where"
]
] |
richardtjornhammar/graphtastic | [
"1e64d408ffb3e09d5ad068986c847032d5cfdcbd"
] | [
"src/graphtastic/clustering.py"
] | [
"\"\"\"\nCopyright 2022 RICHARD TJÖRNHAMMAR\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\"\"\"\nimport numpy as np\nimport typing\nimport sys\n\ntry :\n from numba import jit\n bUseNumba = True\nexcept ImportError :\n print ( \"ImportError:\",\" NUMBA. WILL NOT USE IT\")\n bUseNumba = False\nexcept OSError:\n print ( \"OSError:\",\" NUMBA. WILL NOT USE IT\")\n bUseNumba = False\n\n# THE FOLLOWING KMEANS ALGORITHM IS THE AUTHOR OWN LOCAL VERSION\nif bUseNumba :\n @jit(nopython=True)\n def seeded_kmeans( dat:np.array, cent:np.array ) :\n #\n # PYTHON ADAPTATION OF MY C++ CODE THAT CAN BE FOUND IN\n # https://github.com/richardtjornhammar/RichTools/blob/master/src/cluster.cc\n # AROUND LINE 2345\n # AGAIN CONSIDER USING THE C++ VERSION SINCE IT IS ALOT FASTER\n # HERE WE SPEED IT UP USING NUMBA IF THE USER HAS IT INSTALLED AS A MODULE\n #\n NN , MM = np.shape ( dat )\n KK , LL = np.shape ( cent )\n if not LL == MM :\n print ( 'WARNING DATA FORMAT ERROR. NON COALESCING COORDINATE AXIS' )\n\n labels = [ int(z) for z in np.zeros(NN) ]\n w = labels\n counts = np.zeros(KK)\n tmp_ce = np.zeros(KK*MM).reshape(KK,MM)\n old_error , error , TOL = 0. , 1. , 1.0E-10\n while abs ( error - old_error ) > TOL :\n old_error = error\n error = 0.\n counts = counts * 0.\n tmp_ce = tmp_ce * 0.\n # START BC\n for h in range ( NN ) :\n min_distance = 1.0E30\n for i in range ( KK ) :\n distance = np.sum( ( dat[h]-cent[i] )**2 )\n if distance < min_distance :\n labels[h] = i\n min_distance = distance\n tmp_ce[labels[h]] += dat[ h ]\n counts[labels[h]] += 1.0\n error += min_distance\n # END BC\n for i in range ( KK ) :\n if counts[i]>0:\n cent[i] = tmp_ce[i]/counts[i]\n centroids = cent\n return ( labels , centroids )\nelse :\n def seeded_kmeans( dat:np.array, cent:np.array ) :\n #\n # SLOW SLUGGISH KMEANS WITH A DUBBLE FOR LOOP\n # IN PYTHON! WOW! SUCH SPEED!\n #\n NN , MM = np.shape ( dat )\n KK , LL = np.shape ( cent )\n if not LL == MM :\n print ( 'WARNING DATA FORMAT ERROR. NON COALESCING COORDINATE AXIS' )\n\n labels = [ int(z) for z in np.zeros(NN) ]\n w = labels\n counts = np.zeros(KK)\n tmp_ce = np.zeros(KK*MM).reshape(KK,MM)\n old_error , error , TOL = 0. , 1. , 1.0E-10\n while abs ( error - old_error ) > TOL :\n old_error = error\n error = 0.\n counts = counts * 0.\n tmp_ce = tmp_ce * 0.\n # START BC\n for h in range ( NN ) :\n min_distance = 1.0E30\n for i in range ( KK ) :\n distance = np.sum( ( dat[h]-cent[i] )**2 )\n if distance < min_distance :\n labels[h] = i\n min_distance = distance\n tmp_ce[labels[h]] += dat[ h ]\n counts[labels[h]] += 1.0\n error += min_distance\n # END BC\n for i in range ( KK ) :\n if counts[i]>0:\n cent[i] = tmp_ce[i]/counts[i]\n centroids = cent\n return ( labels , centroids )\n\n\nif bUseNumba :\n @jit(nopython=True)\n def connectivity ( B:np.array , val:float , bVerbose:bool = False ) :\n description = \"\"\" This is a cutoff based clustering algorithm. The intended use is to supply a distance matrix and a cutoff value (then becomes symmetric positive). For a small distance cutoff, you should see all the parts of the system and for a large distance cutoff, you should see the entire system. It has been employed for statistical analysis work as well as the original application where it was employed to segment molecular systems.\"\"\"\n if bVerbose :\n print ( \"CONNECTIVITY CLUSTERING OF \", np.shape(B), \" MATRIX\" )\n # PYTHON ADAPTATION OF MY C++ CODE THAT CAN BE FOUND IN\n # https://github.com/richardtjornhammar/RichTools/blob/master/src/cluster.cc\n # AROUND LINE 2277\n # CONSIDER COMPILING AND USING THAT AS A MODULE INSTEAD OF THIS SINCE IT IS\n # A LOT FASTER\n # FOR A DESCRIPTION READ PAGE 30 (16 INTERNAL NUMBERING) of:\n # https://kth.diva-portal.org/smash/get/diva2:748464/FULLTEXT01.pdf\n #\n # https://github.com/richardtjornhammar/RichTools/blob/master/src/cluster.cc\n # ADDED TO RICHTOOLS HERE: https://github.com/richardtjornhammar/RichTools/commit/74b35df9c623bf03570707a24eafe828f461ed90#diff-25a6634263c1b1f6fc4697a04e2b9904ea4b042a89af59dc93ec1f5d44848a26\n # CONNECTIVITY SEARCH FOR (connectivity) CONNECTIVITY\n #\n nr_sq,mr_sq = np.shape(B)\n if nr_sq != mr_sq :\n print ( 'ERROR: FAILED' )\n N = mr_sq\n res , nvisi, s, NN, ndx, C = [0], [0], [0], [0], [0], 0\n res .append(0)\n for i in range(N) :\n nvisi.append(i+1)\n res.append(0); res.append(0)\n ndx.append(i)\n\n res = res[1:]\n nvisi = nvisi[1:]\n ndx = ndx[1:]\n while ( len(ndx)>0 ) :\n i = ndx[-1] ; ndx = ndx[:-1]\n NN = []\n if ( nvisi[i]>0 ) :\n C-=1\n for j in range(N) :\n if ( B[i,j]<=val ) :\n NN.append(j)\n while ( len(NN)>0 ) :\n # back pop_back\n k = NN[-1]; NN = NN[:-1]\n nvisi[k] = C\n for j in range(N):\n if ( B[j,k]<=val ) :\n for q in range(N) :\n if ( nvisi[q] == j+1 ) :\n NN.append(q)\n if bVerbose : # VERBOSE\n print ( \"INFO \"+str(-1*C) +\" clusters\" )\n Nc = [ 0 for i in range(-1*C) ]\n for q in range(N) :\n res[ q*2+1 ] = q;\n res[ q*2 ] = nvisi[q]-C;\n Nc [res[q*2]]+= 1;\n if bVerbose :\n print ( \" \"+str(res[q*2])+\" \"+str(res[2*q+1]) )\n if bVerbose :\n for i in range(-1*C) :\n print( \"CLUSTER \" +str(i)+ \" HAS \" + str(Nc[i]) + \" ELEMENTS\")\n return ( Nc , np.array(res[:-1]).reshape(-1,2) )\nelse :\n def connectivity ( B:np.array , val:float , bVerbose:bool = False ) :\n description=\"\"\"\nThis is a cutoff based clustering algorithm. The intended use is to supply a distance matrix and a cutoff value (then becomes symmetric positive). For a small distanc>\n \"\"\"\n if bVerbose :\n print ( \"CONNECTIVITY CLUSTERING OF \", np.shape(B), \" MATRIX\" )\n # PYTHON ADAPTATION OF MY C++ CODE THAT CAN BE FOUND IN\n # https://github.com/richardtjornhammar/RichTools/blob/master/src/cluster.cc\n # AROUND LINE 2277\n # CONSIDER COMPILING AND USING THAT AS A MODULE INSTEAD OF THIS SINCE IT IS\n # A LOT FASTER\n # FOR A DESCRIPTION READ PAGE 30 (16 INTERNAL NUMBERING) of:\n # https://kth.diva-portal.org/smash/get/diva2:748464/FULLTEXT01.pdf\n #\n nr_sq,mr_sq = np.shape(B)\n if nr_sq != mr_sq :\n print ( 'ERROR' )\n return ( -1 )\n N = mr_sq\n res , nvisi, s, NN, ndx, C = [], [], [], [], [], 0\n res .append(0)\n for i in range(N) :\n nvisi.append(i+1)\n res.append(0); res.append(0)\n ndx.append(i)\n while ( len(ndx)>0 ) :\n i = ndx[-1] ; ndx = ndx[:-1]\n NN = []\n if ( nvisi[i]>0 ) :\n C-=1\n for j in range(N) :\n if ( B[i,j]<=val ) :\n NN.append(j)\n while ( len(NN)>0 ) :\n # back pop_back\n k = NN[-1]; NN = NN[:-1]\n nvisi[k] = C\n for j in range(N):\n if ( B[j,k]<=val ) :\n for q in range(N) :\n if ( nvisi[q] == j+1 ) :\n NN.append(q)\n if bVerbose : # VERBOSE\n print ( \"INFO \"+str(-1*C) +\" clusters\" )\n Nc = [ 0 for i in range(-1*C) ]\n for q in range(N) :\n res[ q*2+1 ] = q;\n res[ q*2 ] = nvisi[q]-C;\n Nc [res[q*2]]+= 1;\n if bVerbose :\n print ( \" \"+str(res[q*2])+\" \"+str(res[2*q+1]) )\n if bVerbose:\n for i in range(-1*C) :\n print( \"CLUSTER \" +str(i)+ \" HAS \" + str(Nc[i]) + \" ELEMENTS\")\n return ( Nc , np.array(res[:-1]).reshape(-1,2) )\n\nif bUseNumba :\n @jit(nopython=True)\n def connectedness ( distm:np.array , alpha:float , n_connections:int=1 ) -> list :\n #\n # AN ALTERNATIVE METHOD\n # DOES THE SAME THING AS THE CONNECTIVITY CODE IN MY\n # CLUSTERING MODULE (in src/impetuous/clustering.py )\n # OR IN https://github.com/richardtjornhammar/RichTools/blob/master/src/cluster.cc\n # https://github.com/richardtjornhammar/RichTools/commit/74b35df9c623bf03570707a24eafe828f461ed90#diff-25a6634263c1b1f6fc4697a04e2b9904ea4b042a89af59dc93ec1f5d44848a26\n # CONNECTIVITY SEARCH FOR (connectivity) CONNECTIVITY\n #\n # THIS ROUTINE RETURNS A LIST BELONGING TO THE CLUSTERS\n # WITH THE SET OF INDICES THAT MAPS TO THE CLUSTER\n #\n if len ( distm.shape ) < 2 :\n print ( 'PLEASE SUBMIT A SQUARE DISTANCE MATRIX' )\n\n def b2i ( a:list ) -> list :\n return ( [ i for b,i in zip(a,range(len(a))) if b ] )\n def f2i ( a:list,alf:float ) -> list :\n return ( b2i( a<=alf ) )\n\n L = []\n for a in distm :\n bAdd = True\n ids = set( f2i(a,alpha) )\n for i in range(len(L)) :\n if len( L[i]&ids ) >= n_connections :\n L[i] = L[i] | ids\n bAdd = False\n break\n if bAdd and len(ids) >= n_connections :\n L .append( ids )\n return ( L )\nelse :\n def connectedness ( distm:np.array , alpha:float , n_connections:int=1 ) -> list :\n #\n # AN ALTERNATIVE METHOD\n # DOES THE SAME THING AS THE CONNECTIVITY CODE IN MY\n # CLUSTERING MODULE (in src/impetuous/clustering.py )\n # OR IN https://github.com/richardtjornhammar/RichTools/blob/master/src/cluster.cc\n # as of commit https://github.com/richardtjornhammar/RichTools/commit/76201bb07687017ae16a4e57cb1ed9fd8c394f18 2016\n # CONNECTIVITY SEARCH FOR (connectivity) CONNECTIVITY\n #\n # THIS ROUTINE RETURNS A LIST BELONGING TO THE CLUSTERS\n # WITH THE SET OF INDICES THAT MAPS TO THE CLUSTER\n #\n if len ( distm.shape ) < 2 :\n print ( 'PLEASE SUBMIT A SQUARE DISTANCE MATRIX' )\n\n def b2i ( a:list ) -> list :\n return ( [ i for b,i in zip(a,range(len(a))) if b ] )\n def f2i ( a:list,alf:float ) -> list :\n return ( b2i( a<=alf ) )\n\n L = []\n for a in distm :\n bAdd = True\n ids = set( f2i(a,alpha) )\n for i in range(len(L)) :\n if len( L[i]&ids ) >= n_connections :\n L[i] = L[i] | ids\n bAdd = False\n break\n if bAdd and len(ids) >= n_connections :\n L .append( ids )\n return ( L )\n\ndef dbscan ( coordinates:np.array = None , distance_matrix:np.array = None ,\n eps:float = None, minPts:int = None , bVerbose:bool = False ) -> dict :\n\n def absolute_coordinates_to_distance_matrix ( Q:np.array , power:int=2 , bInvPow:bool=False ) -> np.array :\n # UNUSED FALLBACK\n DP = np.array( [ np.sum((np.array(p)-np.array(q))**power) for p in Q for q in Q] ).reshape(np.shape(Q)[0],np.shape(Q)[0])\n if bInvPow :\n DP = DP**(1.0/power)\n return ( DP )\n\n if bVerbose :\n print ( \"THIS IMPLEMENTATION FOR DBSCAN\" )\n print ( \"ASSESSMENT OF NOISE DIFFERS FROM\" )\n print ( \"THE IMPLEMENTATION FOUND IN SKLEARN\" )\n print ( \"ASSUMES LINEAR DISTANCES, NOT SQUARED\" )\n #\n # FOR A DESCRIPTION OF THE CONNECTIVITY READ PAGE 30 (16 INTERNAL NUMBERING) of:\n # https://kth.diva-portal.org/smash/get/diva2:748464/FULLTEXT01.pdf\n #from impetuous.clustering import absolute_coordinates_to_distance_matrix\n #from impetuous.clustering import connectivity\n\n import operator\n if not operator.xor( coordinates is None , distance_matrix is None ) :\n print ( \"ONLY SUPPLY A SINGE DATA FRAME OR A DISTANCE MATRIX\" )\n print ( \"dbscan FAILED\" )\n print ( \"DATA MATRICES NEEDS TO BE SPECIFIED WITH \\\" distance_matrix = ... \\\" \" )\n exit(1)\n\n if distance_matrix is None :\n from graphtastic.fit import absolute_coordinates_to_distance_matrix\n distance_matrix_ = absolute_coordinates_to_distance_matrix ( coordinates )\n eps = eps**2.0\n else :\n distance_matrix_ = distance_matrix\n\n isNoise = np.sum(distance_matrix_<eps,0)-1 < minPts\n i_ = 0\n for ib in isNoise :\n if ib :\n distance_matrix_ [ i_] = ( 1+eps )*10.0\n distance_matrix_.T[i_] = ( 1+eps )*10.0\n distance_matrix_[i_][i_] = 0.\n i_ = i_+1\n clustercontent , clustercontacts = connectivity ( distance_matrix_ , eps )\n return ( {'cluster content': clustercontent, 'clusterid-particleid' : clustercontacts, 'is noise':isNoise} )\n\ndef reformat_dbscan_results ( results:dict ) -> dict :\n if True :\n clusters = {}\n for icontent in range(len(results['cluster content'])) :\n content = results[ 'cluster content' ][ icontent ]\n for c in results [ 'clusterid-particleid' ] :\n if c[0] == icontent :\n if results[ 'is noise' ][c[1]] :\n icontent=-1\n if icontent in clusters:\n clusters[ icontent ] .append( c[1] )\n else :\n clusters[ icontent ] = [ c[1] ]\n return ( clusters )\n"
] | [
[
"numpy.array",
"numpy.sum",
"numpy.shape",
"numpy.zeros"
]
] |
TochkaAI/Paddle | [
"f249a5f05f0f5832279244d88c8cb4eaaad1fbd4"
] | [
"python/paddle/fluid/tests/unittests/ir/inference/test_trt_activation_pass.py"
] | [
"# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\nimport shutil\nimport unittest\nimport numpy as np\nfrom inference_pass_test import InferencePassTest\nimport paddle.fluid as fluid\nimport paddle.fluid.core as core\nfrom paddle.fluid.core import PassVersionChecker\nfrom paddle.fluid.core import AnalysisConfig\n\n\nclass TensorRTSubgraphPassActivationTest(InferencePassTest):\n def setUpTensorRTParam(self):\n self.enable_trt = True\n self.trt_parameters = TensorRTSubgraphPassActivationTest.TensorRTParam(\n 1 << 30, 32, 0, AnalysisConfig.Precision.Float32, False, False)\n\n def setUp(self):\n self.setUpTensorRTParam()\n with fluid.program_guard(self.main_program, self.startup_program):\n data = fluid.data(\n name=\"data\", shape=[-1, 6, 64, 64], dtype=\"float32\")\n act_out = self.append_act(data)\n out = fluid.layers.batch_norm(act_out, is_test=True)\n self.feeds = {\n \"data\": np.random.random([1, 6, 64, 64]).astype(\"float32\"),\n }\n self.fetch_list = [out]\n\n def append_act(self, x):\n return fluid.layers.relu(x)\n\n def test_check_output(self):\n if core.is_compiled_with_cuda():\n use_gpu = True\n if os.path.exists(self.path + \"_opt_cache\"):\n shutil.rmtree(self.path + \"_opt_cache\")\n if self.trt_parameters.precision == AnalysisConfig.Precision.Float32:\n self.check_output_with_option(use_gpu)\n else:\n self.check_output_with_option(use_gpu, 1e-3)\n self.assertTrue(\n PassVersionChecker.IsCompatible('tensorrt_subgraph_pass'))\n\n\nclass TensorRTSubgraphPassLeakyReluTest(TensorRTSubgraphPassActivationTest):\n def append_act(self, x):\n return fluid.layers.leaky_relu(x)\n\n\nclass TensorRTSubgraphPassRelu6Test(TensorRTSubgraphPassActivationTest):\n def append_act(self, x):\n return fluid.layers.relu6(x)\n\n\nclass TensorRTSubgraphPassSoftMaxTest(TensorRTSubgraphPassActivationTest):\n def append_act(self, x):\n return fluid.layers.softmax(x)\n\n\nclass TensorRTSubgraphPassSigmoidTest(TensorRTSubgraphPassActivationTest):\n def append_act(self, x):\n return fluid.layers.sigmoid(x)\n\n\nclass TensorRTSubgraphPassHardSwishTest(TensorRTSubgraphPassActivationTest):\n def append_act(self, x):\n return fluid.layers.hard_swish(x)\n\n\nclass TensorRTSubgraphPassHardSigmoidTest(TensorRTSubgraphPassActivationTest):\n def append_act(self, x):\n return fluid.layers.hard_sigmoid(x)\n\n\nclass TensorRTSubgraphPassHardSwishPluginTest(\n TensorRTSubgraphPassActivationTest):\n def append_act(self, x):\n return fluid.layers.hard_swish(x, threshold=4.0, scale=8.0)\n\n\nclass TensorRTSubgraphPassClipTest(TensorRTSubgraphPassActivationTest):\n def append_act(self, x):\n return fluid.layers.clip(x, 0, 1)\n\n\nclass TensorRTSubgraphPassTanhTest(TensorRTSubgraphPassActivationTest):\n def append_act(self, x):\n return fluid.layers.tanh(x)\n\n\nclass TensorRTSubgraphPassSwishTest(TensorRTSubgraphPassActivationTest):\n def setUpTensorRTParam(self):\n self.enable_trt = True\n self.trt_parameters = TensorRTSubgraphPassActivationTest.TensorRTParam(\n 1 << 30, 32, 0, AnalysisConfig.Precision.Float32, True, False)\n\n def append_act(self, x):\n return fluid.layers.swish(x)\n\n\nclass TensorRTSubgraphPassSwishFp16SerializeTest(\n TensorRTSubgraphPassActivationTest):\n def setUpTensorRTParam(self):\n self.enable_trt = True\n self.trt_parameters = TensorRTSubgraphPassActivationTest.TensorRTParam(\n 1 << 30, 32, 0, AnalysisConfig.Precision.Half, True, False)\n\n def append_act(self, x):\n return fluid.layers.swish(x)\n\n\nclass TensorRTSubgraphPassDynamicSwishFp16SerializeTest(\n TensorRTSubgraphPassActivationTest):\n def setUpTensorRTParam(self):\n self.enable_trt = True\n self.trt_parameters = TensorRTSubgraphPassActivationTest.TensorRTParam(\n 1 << 30, 32, 0, AnalysisConfig.Precision.Half, True, False)\n self.dynamic_shape_params = TensorRTSubgraphPassActivationTest.DynamicShapeParam(\n {\n 'data': [1, 6, 8, 8]\n }, {'data': [1, 6, 512, 512]}, {'data': [1, 6, 256, 256]}, False)\n\n def append_act(self, x):\n return fluid.layers.swish(x)\n\n\nclass TensorRTSubgraphPassPreluAllTest(TensorRTSubgraphPassActivationTest):\n def append_act(self, x):\n return fluid.layers.prelu(x, mode='all')\n\n\nclass TensorRTSubgraphPassPreluChannelTest(TensorRTSubgraphPassActivationTest):\n def append_act(self, x):\n return fluid.layers.prelu(x, mode='channel')\n\n\nclass TensorRTSubgraphPassPreluElementTest(TensorRTSubgraphPassActivationTest):\n def append_act(self, x):\n return fluid.layers.prelu(x, mode='element')\n\n\nclass TensorRTSubgraphPassGeluTest(TensorRTSubgraphPassActivationTest):\n def append_act(self, x):\n return fluid.layers.gelu(x)\n\n\nclass TensorRTSubgraphPassGeluDynamicTest(TensorRTSubgraphPassActivationTest):\n def setUpTensorRTParam(self):\n self.enable_trt = True\n self.trt_parameters = TensorRTSubgraphPassActivationTest.TensorRTParam(\n 1 << 30, 32, 0, AnalysisConfig.Precision.Float32, False, False)\n self.dynamic_shape_params = TensorRTSubgraphPassActivationTest.DynamicShapeParam(\n {\n 'data': [1, 6, 8, 8]\n }, {'data': [1, 6, 512, 512]}, {'data': [1, 6, 256, 256]}, False)\n\n def append_act(self, x):\n return fluid.layers.gelu(x)\n\n\nclass TensorRTSubgraphPassGeluFp16Test(TensorRTSubgraphPassActivationTest):\n def setUpTensorRTParam(self):\n self.enable_trt = True\n self.trt_parameters = TensorRTSubgraphPassActivationTest.TensorRTParam(\n 1 << 30, 32, 0, AnalysisConfig.Precision.Half, False, False)\n\n def append_act(self, x):\n return fluid.layers.gelu(x)\n\n\nclass TensorRTSubgraphPassGeluFp16SerializeTest(\n TensorRTSubgraphPassActivationTest):\n def setUpTensorRTParam(self):\n self.enable_trt = True\n self.trt_parameters = TensorRTSubgraphPassActivationTest.TensorRTParam(\n 1 << 30, 32, 0, AnalysisConfig.Precision.Half, True, False)\n\n def append_act(self, x):\n return fluid.layers.gelu(x)\n\n\nclass TensorRTSubgraphPassGeluFp16DynamicTest(\n TensorRTSubgraphPassActivationTest):\n def setUpTensorRTParam(self):\n self.enable_trt = True\n self.trt_parameters = TensorRTSubgraphPassActivationTest.TensorRTParam(\n 1 << 30, 32, 0, AnalysisConfig.Precision.Half, False, False)\n self.dynamic_shape_params = TensorRTSubgraphPassActivationTest.DynamicShapeParam(\n {\n 'data': [1, 6, 8, 8]\n }, {'data': [1, 6, 512, 512]}, {'data': [1, 6, 256, 256]}, False)\n\n def append_act(self, x):\n return fluid.layers.gelu(x)\n\n\nclass TensorRTSubgraphPassGeluFp16DynamicSerializeTest(\n TensorRTSubgraphPassActivationTest):\n def setUpTensorRTParam(self):\n self.enable_trt = True\n self.trt_parameters = TensorRTSubgraphPassActivationTest.TensorRTParam(\n 1 << 30, 32, 0, AnalysisConfig.Precision.Half, True, False)\n self.dynamic_shape_params = TensorRTSubgraphPassActivationTest.DynamicShapeParam(\n {\n 'data': [1, 6, 8, 8]\n }, {'data': [1, 6, 512, 512]}, {'data': [1, 6, 256, 256]}, False)\n\n def append_act(self, x):\n return fluid.layers.gelu(x)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n"
] | [
[
"numpy.random.random"
]
] |
naveenkambham/big_five_personality_machine_learning | [
"a4d673e7e72287f2448b6a7b2729e5231b4f7ab2"
] | [
"UnitTests/test_battery_sensor_features_extractor.py"
] | [
"\"\"\"\nDeveloper : Naveen Kambham\nDescription: Unit testing for battery sensor feature extractor code. Majority of the data extraction code has to be tested visually by looking at the plots distributions.\n\"\"\"\n#Importing the required libraries.\nimport unittest\nimport numpy as np\nfrom FeatureExtraction import battery_sensor_features_extractor\n\n\n\nclass BatterySensorTestCase(unittest.TestCase):\n \"\"\"\n Tests for battery_sensor_features_extractor.py\n \"\"\"\n def test_TakeMostProbableTimeInStudy(self):\n \"\"\"\n to test the most probable time functionality\n :return:\n \"\"\"\n #case 1 multiple values in each day\n result= battery_sensor_features_extractor.TakeMostProbableTimeInStudy([1,1,1,1,2,2,3,3,3,3,3,3,3,3],[1,2,0])\n self.assertEqual(result,3)\n\n # case 2 only one value in a day\n result = battery_sensor_features_extractor.TakeMostProbableTimeInStudy(\n [1, 1, 1, 1, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3], [1])\n self.assertEqual(result, 4)\n\n # case 3 only one value in a day and it is not exists in the study times so far seen\n result = battery_sensor_features_extractor.TakeMostProbableTimeInStudy(\n [1, 1, 1, 1, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3], [0])\n self.assertEqual(result, 0)\n\n def test_extract(self):\n \"\"\"\n testing the feature extractor code\n :return:\n \"\"\"\n #extracting the features\n df_battery=battery_sensor_features_extractor.extract(r\"/home/naveen/Data/Shed10/Filtered/battery_events.csv\")\n\n # charging should atleast be greater than 0\n self.assertTrue(np.min(df_battery['Battery_Charging_Duration'] >=0))\n self.assertTrue(np.min(df_battery['CharginTimeDaily'] >=0) and np.max(df_battery['CharginTimeDaily'] <=24))\n\n\nif __name__ == '__main__':\n unittest.main()"
] | [
[
"numpy.max",
"numpy.min"
]
] |
KOLANICH/qiskit-terra | [
"3947f258ddb31a2b8dd17aff5d2d041d29d74601"
] | [
"qiskit/quantum_info/operators/measures.py"
] | [
"# This code is part of Qiskit.\n#\n# (C) Copyright IBM 2017.\n#\n# This code is licensed under the Apache License, Version 2.0. You may\n# obtain a copy of this license in the LICENSE.txt file in the root directory\n# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.\n#\n# Any modifications or derivative works of this code must retain this\n# copyright notice, and modified files need to carry a notice indicating\n# that they have been altered from the originals.\n\n# pylint: disable=invalid-name\n\"\"\"\nA collection of useful quantum information functions for operators.\n\"\"\"\n\nimport warnings\nimport numpy as np\nfrom scipy import sparse\n\nfrom qiskit.exceptions import QiskitError\nfrom qiskit.circuit.gate import Gate\nfrom qiskit.quantum_info.operators.base_operator import BaseOperator\nfrom qiskit.quantum_info.operators.operator import Operator\nfrom qiskit.quantum_info.operators.channel.quantum_channel import QuantumChannel\nfrom qiskit.quantum_info.operators.channel import Choi, SuperOp\nfrom qiskit.quantum_info.states.densitymatrix import DensityMatrix\nfrom qiskit.quantum_info.states.measures import state_fidelity\n\ntry:\n import cvxpy\n _HAS_CVX = True\nexcept ImportError:\n _HAS_CVX = False\n\n\ndef process_fidelity(channel,\n target=None,\n require_cp=True,\n require_tp=False):\n r\"\"\"Return the process fidelity of a noisy quantum channel.\n\n\n The process fidelity :math:`F_{\\text{pro}}(\\mathcal{E}, \\methcal{F})`\n between two quantum channels :math:`\\mathcal{E}, \\mathcal{F}` is given by\n\n .. math:\n F_{\\text{pro}}(\\mathcal{E}, \\mathcal{F})\n = F(\\rho_{\\mathcal{E}}, \\rho_{\\mathcal{F}})\n\n where :math:`F` is the :func:`~qiskit.quantum_info.state_fidelity`,\n :math:`\\rho_{\\mathcal{E}} = \\Lambda_{\\mathcal{E}} / d` is the\n normalized :class:`~qiskit.quantum_info.Choi` matrix for the channel\n :math:`\\mathcal{E}`, and :math:`d` is the input dimension of\n :math:`\\mathcal{E}`.\n\n When the target channel is unitary this is equivalent to\n\n .. math::\n F_{\\text{pro}}(\\mathcal{E}, U)\n = \\frac{Tr[S_U^\\dagger S_{\\mathcal{E}}]}{d^2}\n\n where :math:`S_{\\mathcal{E}}, S_{U}` are the\n :class:`~qiskit.quantum_info.SuperOp` matrices for the *input* quantum\n channel :math:`\\mathcal{E}` and *target* unitary :math:`U` respectively,\n and :math:`d` is the input dimension of the channel.\n\n Args:\n channel (Operator or QuantumChannel): input quantum channel.\n target (Operator or QuantumChannel or None): target quantum channel.\n If `None` target is the identity operator [Default: None].\n require_cp (bool): require channel to be completely-positive\n [Default: True].\n require_tp (bool): require channel to be trace-preserving\n [Default: False].\n\n Returns:\n float: The process fidelity :math:`F_{\\text{pro}}`.\n\n Raises:\n QiskitError: if the channel and target do not have the same dimensions.\n QiskitError: if the channel and target are not completely-positive\n (with ``require_cp=True``) or not trace-preserving\n (with ``require_tp=True``).\n \"\"\"\n # Format inputs\n channel = _input_formatter(\n channel, SuperOp, 'process_fidelity', 'channel')\n target = _input_formatter(\n target, Operator, 'process_fidelity', 'target')\n\n if target:\n # Validate dimensions\n if channel.dim != target.dim:\n raise QiskitError(\n 'Input quantum channel and target unitary must have the same '\n 'dimensions ({} != {}).'.format(channel.dim, target.dim))\n\n # Validate complete-positivity and trace-preserving\n for label, chan in [('Input', channel), ('Target', target)]:\n if isinstance(chan, Operator) and (require_cp or require_tp):\n is_unitary = chan.is_unitary()\n # Validate as unitary\n if require_cp and not is_unitary:\n raise QiskitError('{} channel is not completely-positive'.format(label))\n if require_tp and not is_unitary:\n raise QiskitError('{} channel is not trace-preserving'.format(label))\n elif chan is not None:\n # Validate as QuantumChannel\n if require_cp and not chan.is_cp():\n raise QiskitError('{} channel is not completely-positive'.format(label))\n if require_tp and not chan.is_tp():\n raise QiskitError('{} channel is not trace-preserving'.format(label))\n\n if isinstance(target, Operator):\n # Compute fidelity with unitary target by applying the inverse\n # to channel and computing fidelity with the identity\n channel = channel @ target.adjoint()\n target = None\n\n input_dim, _ = channel.dim\n if target is None:\n # Compute process fidelity with identity channel\n if isinstance(channel, Operator):\n # |Tr[U]/dim| ** 2\n fid = np.abs(np.trace(channel.data) / input_dim)**2\n else:\n # Tr[S] / (dim ** 2)\n fid = np.trace(SuperOp(channel).data) / (input_dim**2)\n return float(np.real(fid))\n\n # For comparing two non-unitary channels we compute the state fidelity of\n # the normalized Choi-matrices. This is equivalent to the previous definition\n # when the target is a unitary channel.\n state1 = DensityMatrix(Choi(channel).data / input_dim)\n state2 = DensityMatrix(Choi(target).data / input_dim)\n return state_fidelity(state1, state2, validate=False)\n\n\ndef average_gate_fidelity(channel,\n target=None,\n require_cp=True,\n require_tp=False):\n r\"\"\"Return the average gate fidelity of a noisy quantum channel.\n\n The average gate fidelity :math:`F_{\\text{ave}}` is given by\n\n .. math::\n F_{\\text{ave}}(\\mathcal{E}, U)\n &= \\int d\\psi \\langle\\psi|U^\\dagger\n \\mathcal{E}(|\\psi\\rangle\\!\\langle\\psi|)U|\\psi\\rangle \\\\\n &= \\frac{d F_{\\text{pro}}(\\mathcal{E}, U) + 1}{d + 1}\n\n where :math:`F_{\\text{pro}}(\\mathcal{E}, U)` is the\n :meth:`~qiskit.quantum_info.process_fidelity` of the input quantum\n *channel* :math:`\\mathcal{E}` with a *target* unitary :math:`U`, and\n :math:`d` is the dimension of the *channel*.\n\n Args:\n channel (QuantumChannel or Operator): noisy quantum channel.\n target (Operator or None): target unitary operator.\n If `None` target is the identity operator [Default: None].\n require_cp (bool): require channel to be completely-positive\n [Default: True].\n require_tp (bool): require channel to be trace-preserving\n [Default: False].\n\n Returns:\n float: The average gate fidelity :math:`F_{\\text{ave}}`.\n\n Raises:\n QiskitError: if the channel and target do not have the same dimensions,\n or have different input and output dimensions.\n QiskitError: if the channel and target or are not completely-positive\n (with ``require_cp=True``) or not trace-preserving\n (with ``require_tp=True``).\n \"\"\"\n # Format inputs\n channel = _input_formatter(\n channel, SuperOp, 'average_gate_fidelity', 'channel')\n target = _input_formatter(\n target, Operator, 'average_gate_fidelity', 'target')\n\n if target is not None:\n try:\n target = Operator(target)\n except QiskitError:\n raise QiskitError(\n 'Target channel is not a unitary channel. To compare '\n 'two non-unitary channels use the '\n '`qiskit.quantum_info.process_fidelity` function instead.')\n dim, _ = channel.dim\n f_pro = process_fidelity(channel,\n target=target,\n require_cp=require_cp,\n require_tp=require_tp)\n return (dim * f_pro + 1) / (dim + 1)\n\n\ndef gate_error(channel, target=None, require_cp=True, require_tp=False):\n r\"\"\"Return the gate error of a noisy quantum channel.\n\n The gate error :math:`E` is given by the average gate infidelity\n\n .. math::\n E(\\mathcal{E}, U) = 1 - F_{\\text{ave}}(\\mathcal{E}, U)\n\n where :math:`F_{\\text{ave}}(\\mathcal{E}, U)` is the\n :meth:`~qiskit.quantum_info.average_gate_fidelity` of the input\n quantum *channel* :math:`\\mathcal{E}` with a *target* unitary\n :math:`U`.\n\n Args:\n channel (QuantumChannel): noisy quantum channel.\n target (Operator or None): target unitary operator.\n If `None` target is the identity operator [Default: None].\n require_cp (bool): require channel to be completely-positive\n [Default: True].\n require_tp (bool): require channel to be trace-preserving\n [Default: False].\n\n Returns:\n float: The average gate error :math:`E`.\n\n Raises:\n QiskitError: if the channel and target do not have the same dimensions,\n or have different input and output dimensions.\n QiskitError: if the channel and target or are not completely-positive\n (with ``require_cp=True``) or not trace-preserving\n (with ``require_tp=True``).\n \"\"\"\n # Format inputs\n channel = _input_formatter(\n channel, SuperOp, 'gate_error', 'channel')\n target = _input_formatter(\n target, Operator, 'gate_error', 'target')\n return 1 - average_gate_fidelity(\n channel, target=target, require_cp=require_cp, require_tp=require_tp)\n\n\ndef diamond_norm(choi, **kwargs):\n r\"\"\"Return the diamond norm of the input quantum channel object.\n\n This function computes the completely-bounded trace-norm (often\n referred to as the diamond-norm) of the input quantum channel object\n using the semidefinite-program from reference [1].\n\n Args:\n choi(Choi or QuantumChannel): a quantum channel object or\n Choi-matrix array.\n kwargs: optional arguments to pass to CVXPY solver.\n\n Returns:\n float: The completely-bounded trace norm\n :math:`\\|\\mathcal{E}\\|_{\\diamond}`.\n\n Raises:\n QiskitError: if CVXPY package cannot be found.\n\n Additional Information:\n The input to this function is typically *not* a CPTP quantum\n channel, but rather the *difference* between two quantum channels\n :math:`\\|\\Delta\\mathcal{E}\\|_\\diamond` where\n :math:`\\Delta\\mathcal{E} = \\mathcal{E}_1 - \\mathcal{E}_2`.\n\n Reference:\n J. Watrous. \"Simpler semidefinite programs for completely bounded\n norms\", arXiv:1207.5726 [quant-ph] (2012).\n\n .. note::\n\n This function requires the optional CVXPY package to be installed.\n Any additional kwargs will be passed to the ``cvxpy.solve``\n function. See the CVXPY documentation for information on available\n SDP solvers.\n \"\"\"\n _cvxpy_check('`diamond_norm`') # Check CVXPY is installed\n\n choi = Choi(_input_formatter(choi, Choi, 'diamond_norm', 'choi'))\n\n def cvx_bmat(mat_r, mat_i):\n \"\"\"Block matrix for embedding complex matrix in reals\"\"\"\n return cvxpy.bmat([[mat_r, -mat_i], [mat_i, mat_r]])\n\n # Dimension of input and output spaces\n dim_in = choi._input_dim\n dim_out = choi._output_dim\n size = dim_in * dim_out\n\n # SDP Variables to convert to real valued problem\n r0_r = cvxpy.Variable((dim_in, dim_in))\n r0_i = cvxpy.Variable((dim_in, dim_in))\n r0 = cvx_bmat(r0_r, r0_i)\n\n r1_r = cvxpy.Variable((dim_in, dim_in))\n r1_i = cvxpy.Variable((dim_in, dim_in))\n r1 = cvx_bmat(r1_r, r1_i)\n\n x_r = cvxpy.Variable((size, size))\n x_i = cvxpy.Variable((size, size))\n iden = sparse.eye(dim_out)\n\n # Watrous uses row-vec convention for his Choi matrix while we use\n # col-vec. It turns out row-vec convention is requried for CVXPY too\n # since the cvxpy.kron function must have a constant as its first argument.\n c_r = cvxpy.bmat([[cvxpy.kron(iden, r0_r), x_r], [x_r.T, cvxpy.kron(iden, r1_r)]])\n c_i = cvxpy.bmat([[cvxpy.kron(iden, r0_i), x_i], [-x_i.T, cvxpy.kron(iden, r1_i)]])\n c = cvx_bmat(c_r, c_i)\n\n # Convert col-vec convention Choi-matrix to row-vec convention and\n # then take Transpose: Choi_C -> Choi_R.T\n choi_rt = np.transpose(\n np.reshape(choi.data, (dim_in, dim_out, dim_in, dim_out)),\n (3, 2, 1, 0)).reshape(choi.data.shape)\n choi_rt_r = choi_rt.real\n choi_rt_i = choi_rt.imag\n\n # Constraints\n cons = [\n r0 >> 0, r0_r == r0_r.T, r0_i == - r0_i.T, cvxpy.trace(r0_r) == 1,\n r1 >> 0, r1_r == r1_r.T, r1_i == - r1_i.T, cvxpy.trace(r1_r) == 1,\n c >> 0\n ]\n\n # Objective function\n obj = cvxpy.Maximize(cvxpy.trace(choi_rt_r @ x_r) + cvxpy.trace(choi_rt_i @ x_i))\n prob = cvxpy.Problem(obj, cons)\n sol = prob.solve(**kwargs)\n return sol\n\n\ndef _cvxpy_check(name):\n \"\"\"Check that a supported CVXPY version is installed\"\"\"\n # Check if CVXPY package is installed\n if not _HAS_CVX:\n raise QiskitError(\n 'CVXPY backage is requried for {}. Install'\n ' with `pip install cvxpy` to use.'.format(name))\n # Check CVXPY version\n version = cvxpy.__version__\n if version[0] != '1':\n raise ImportError(\n 'Incompatible CVXPY version {} found.'\n ' Install version >=1.0.'.format(version))\n\n\n# pylint: disable=too-many-return-statements\ndef _input_formatter(obj, fallback_class, func_name, arg_name):\n \"\"\"Formatting function for input conversion\"\"\"\n # Empty input\n if obj is None:\n return obj\n\n # Channel-like input\n if isinstance(obj, QuantumChannel):\n return obj\n if hasattr(obj, 'to_quantumchannel'):\n return obj.to_quantumchannel()\n if hasattr(obj, 'to_channel'):\n return obj.to_channel()\n\n # Unitary-like input\n if isinstance(obj, (Gate, BaseOperator)):\n return Operator(obj)\n if hasattr(obj, 'to_operator'):\n return obj.to_operator()\n\n warnings.warn(\n 'Passing in a list or Numpy array to `{}` `{}` argument is '\n 'deprecated as of 0.17.0 since the matrix representation cannot be inferred '\n 'unambiguously. Use a Gate or BaseOperator subclass (eg. Operator, '\n 'SuperOp, Choi) object instead.'.format(func_name, arg_name),\n DeprecationWarning)\n warnings.warn(\n 'Treating array input as a {} object'.format(fallback_class.__name__))\n return fallback_class(obj)\n"
] | [
[
"scipy.sparse.eye",
"numpy.trace",
"numpy.reshape",
"numpy.real"
]
] |
noenfugler/jesse | [
"217a3168620a755c1a9576d9deb27105db7dccf8",
"217a3168620a755c1a9576d9deb27105db7dccf8",
"217a3168620a755c1a9576d9deb27105db7dccf8",
"217a3168620a755c1a9576d9deb27105db7dccf8"
] | [
"jesse/indicators/supersmoother.py",
"jesse/indicators/sinwma.py",
"jesse/indicators/damiani_volatmeter.py",
"jesse/indicators/alligator.py"
] | [
"from typing import Union\n\nimport numpy as np\nfrom numba import njit\n\nfrom jesse.helpers import get_candle_source, slice_candles\n\n\ndef supersmoother(candles: np.ndarray, period: int = 14, source_type: str = \"close\", sequential: bool = False) -> Union[\n float, np.ndarray]:\n \"\"\"\n Super Smoother Filter 2pole Butterworth\n This indicator was described by John F. Ehlers\n\n :param candles: np.ndarray\n :param period: int - default=14\n :param source_type: str - default: \"close\"\n :param sequential: bool - default=False\n\n :return: float | np.ndarray\n \"\"\"\n\n candles = slice_candles(candles, sequential)\n\n # Accept normal array too.\n if len(candles.shape) == 1:\n source = candles\n else:\n source = get_candle_source(candles, source_type=source_type)\n\n res = supersmoother_fast(source, period)\n\n return res if sequential else res[-1]\n\n\n@njit\ndef supersmoother_fast(source, period):\n a = np.exp(-1.414 * np.pi / period)\n b = 2 * a * np.cos(1.414 * np.pi / period)\n newseries = np.copy(source)\n for i in range(2, source.shape[0]):\n newseries[i] = (1 + a ** 2 - b) / 2 * (source[i] + source[i - 1]) \\\n + b * newseries[i - 1] - a ** 2 * newseries[i - 2]\n return newseries\n",
"from typing import Union\n\nimport numpy as np\nfrom numpy.lib.stride_tricks import sliding_window_view\n\nfrom jesse.helpers import get_candle_source, slice_candles, same_length\n\n\ndef sinwma(candles: np.ndarray, period: int = 14, source_type: str = \"close\", sequential: bool = False) -> Union[\n float, np.ndarray]:\n \"\"\"\n Sine Weighted Moving Average (SINWMA)\n\n :param candles: np.ndarray\n :param period: int - default: 14\n :param source_type: str - default: \"close\"\n :param sequential: bool - default=False\n\n :return: float | np.ndarray\n \"\"\"\n candles = slice_candles(candles, sequential)\n\n source = get_candle_source(candles, source_type=source_type)\n sines = np.array([np.sin((i + 1) * np.pi / (period + 1)) for i in range(0, period)])\n w = sines / sines.sum()\n swv = sliding_window_view(source, window_shape=period)\n res = np.average(swv, weights=w, axis=-1)\n\n return same_length(candles, res) if sequential else res[-1]\n",
"from collections import namedtuple\n\nimport numpy as np\nimport talib\nfrom numba import njit\n\nfrom jesse.helpers import get_candle_source\nfrom jesse.helpers import slice_candles\n\nDamianiVolatmeter = namedtuple('DamianiVolatmeter', ['vol', 'anti'])\n\n\ndef damiani_volatmeter(candles: np.ndarray, vis_atr: int = 13, vis_std: int = 20, sed_atr: int = 40, sed_std: int = 100,\n threshold: float = 1.4, source_type: str = \"close\",\n sequential: bool = False) -> DamianiVolatmeter:\n \"\"\"\n Damiani Volatmeter\n\n :param candles: np.ndarray\n :param vis_atr: int - default=13\n :param vis_std: int - default=20\n :param sed_atr: int - default=40\n :param sed_std: int - default=100\n :param threshold: float - default=1.4\n :param source_type: str - default: \"close\"\n :param sequential: bool - default=False\n\n :return: float | np.ndarray\n \"\"\"\n\n candles = slice_candles(candles, sequential)\n\n source = get_candle_source(candles, source_type=source_type)\n\n atrvis = talib.ATR(candles[:, 3], candles[:, 4], candles[:, 2], timeperiod=vis_atr)\n atrsed = talib.ATR(candles[:, 3], candles[:, 4], candles[:, 2], timeperiod=sed_atr)\n\n vol, t = damiani_volatmeter_fast(source, sed_std, atrvis, atrsed, vis_std, threshold)\n\n if sequential:\n return DamianiVolatmeter(vol, t)\n else:\n return DamianiVolatmeter(vol[-1], t[-1])\n\n\n@njit\ndef damiani_volatmeter_fast(source, sed_std, atrvis, atrsed, vis_std,\n threshold): # Function is compiled to machine code when called the first time\n lag_s = 0.5\n\n vol = np.full_like(source, 0)\n t = np.full_like(source, 0)\n for i in range(source.shape[0]):\n if not (i < sed_std):\n vol[i] = atrvis[i] / atrsed[i] + lag_s * (vol[i - 1] - vol[i - 3])\n anti_thres = np.std(source[i - vis_std:i]) / np.std(source[i - sed_std:i])\n t[i] = threshold - anti_thres\n return vol, t\n",
"from collections import namedtuple\n\nimport numpy as np\n\nfrom jesse.helpers import get_candle_source, np_shift, slice_candles\n\nAG = namedtuple('AG', ['jaw', 'teeth', 'lips'])\n\n\ndef alligator(candles: np.ndarray, source_type: str = \"close\", sequential: bool = False) -> AG:\n \"\"\"\n Alligator\n\n :param candles: np.ndarray\n :param source_type: str - default: \"close\"\n :param sequential: bool - default=False\n\n :return: AG(jaw, teeth, lips)\n \"\"\"\n candles = slice_candles(candles, sequential)\n\n source = get_candle_source(candles, source_type=source_type)\n\n jaw = np_shift(numpy_ewma(source, 13), 8, fill_value=np.nan)\n teeth = np_shift(numpy_ewma(source, 8), 5, fill_value=np.nan)\n lips = np_shift(numpy_ewma(source, 5), 3, fill_value=np.nan)\n\n if sequential:\n return AG(jaw, teeth, lips)\n else:\n return AG(jaw[-1], teeth[-1], lips[-1])\n\n\ndef numpy_ewma(data: np.ndarray, window: int):\n \"\"\"\n\n :param data:\n :param window:\n :return:\n \"\"\"\n alpha = 1 / window\n scale = 1 / (1 - alpha)\n n = data.shape[0]\n scale_arr = (1 - alpha) ** (-1 * np.arange(n))\n weights = (1 - alpha) ** np.arange(n)\n pw0 = (1 - alpha) ** (n - 1)\n mult = data * pw0 * scale_arr\n cumsums = mult.cumsum()\n out = cumsums * scale_arr[::-1] / weights.cumsum()\n\n return out\n"
] | [
[
"numpy.cos",
"numpy.exp",
"numpy.copy"
],
[
"numpy.sin",
"numpy.average",
"numpy.lib.stride_tricks.sliding_window_view"
],
[
"numpy.full_like",
"numpy.std"
],
[
"numpy.arange"
]
] |
WeilerP/cellrank | [
"c8c2b9f6bd2448861fb414435aee7620ca5a0bad"
] | [
"cellrank/pl/_circular_projection.py"
] | [
"from typing import Any, Tuple, Union, Mapping, Callable, Optional, Sequence\nfrom typing_extensions import Literal\n\nfrom enum import auto\nfrom types import MappingProxyType\nfrom pathlib import Path\n\nimport scvelo as scv\nfrom anndata import AnnData\nfrom cellrank import logging as logg\nfrom cellrank.tl import Lineage\nfrom cellrank._key import Key\nfrom scanpy._utils import deprecated_arg_names\nfrom cellrank.tl._enum import ModeEnum\nfrom cellrank.ul._docs import d\nfrom cellrank.pl._utils import _held_karp\nfrom cellrank.tl._utils import save_fig, _unique_order_preserving\nfrom cellrank.ul._utils import _check_collection\nfrom cellrank.tl._lineage import PrimingDegree\n\nimport numpy as np\nimport pandas as pd\nfrom sklearn.metrics import pairwise_distances\n\nimport matplotlib.pyplot as plt\nfrom matplotlib.colors import LogNorm, LinearSegmentedColormap\nfrom matplotlib.collections import LineCollection\n\n\nclass LineageOrder(ModeEnum): # noqa: D101\n DEFAULT = auto()\n OPTIMAL = auto()\n\n\nclass LabelRot(ModeEnum): # noqa: D101\n DEFAULT = auto()\n BEST = auto()\n\n\nMetric_T = Union[str, Callable, np.ndarray, pd.DataFrame]\n_N = 200\n\n\ndef _get_distances(data: Union[np.ndarray, Lineage], metric: Metric_T) -> np.ndarray:\n if isinstance(data, Lineage):\n data = data.X\n\n if isinstance(metric, str) or callable(metric):\n metric = pairwise_distances(data.T, metric=metric)\n elif isinstance(metric, (pd.DataFrame, np.ndarray)):\n shape = (data.shape[1], data.shape[1])\n if metric.shape != shape:\n raise ValueError(\n f\"Expected an `numpy.array` or `pandas.DataFrame` of shape `{shape}`, found `{metric.shape}`.\"\n )\n else:\n raise TypeError(\n f\"Expected either metric defined by `str`, `callable` or a pairwise distance matrix of type\"\n f\" `numpy.ndarray` or `pandas.DataFrame`, found `{type(metric).__name__}`.\"\n )\n\n return np.asarray(metric, dtype=np.float64)\n\n\ndef _get_optimal_order(data: Lineage, metric: Metric_T) -> Tuple[float, np.ndarray]:\n \"\"\"Solve the TSP using dynamic programming.\"\"\"\n return _held_karp(_get_distances(data, metric))\n\n\[email protected]\n@deprecated_arg_names({\"labeldistance\": \"label_distance\", \"labelrot\": \"label_rot\"})\ndef circular_projection(\n adata: AnnData,\n keys: Union[str, Sequence[str]],\n backward: bool = False,\n lineages: Optional[Union[str, Sequence[str]]] = None,\n early_cells: Optional[Union[Mapping[str, Sequence[str]], Sequence[str]]] = None,\n lineage_order: Optional[Literal[\"default\", \"optimal\"]] = None,\n metric: Union[str, Callable, np.ndarray, pd.DataFrame] = \"correlation\",\n normalize_by_mean: bool = True,\n ncols: int = 4,\n space: float = 0.25,\n use_raw: bool = False,\n text_kwargs: Mapping[str, Any] = MappingProxyType({}),\n label_distance: float = 1.25,\n label_rot: Union[Literal[\"default\", \"best\"], float] = \"best\",\n show_edges: bool = True,\n key_added: Optional[str] = None,\n figsize: Optional[Tuple[float, float]] = None,\n dpi: Optional[int] = None,\n save: Optional[Union[str, Path]] = None,\n **kwargs: Any,\n):\n r\"\"\"\n Plot absorption probabilities on a circular embedding as in :cite:`velten:17`.\n\n Parameters\n ----------\n %(adata)s\n keys\n Keys in :attr:`anndata.AnnData.obs` or :attr:`anndata.AnnData.var_names`. Additional keys are:\n\n - `'kl_divergence'` - as in :cite:`velten:17`, computes KL-divergence between the fate probabilities\n of a cell and the average fate probabilities. See ``early_cells`` for more information.\n - `'entropy'` - as in :cite:`setty:19`, computes entropy over a cells fate probabilities.\n\n %(backward)s\n lineages\n Lineages to plot. If `None`, plot all lineages.\n early_cells\n Cell ids or a mask marking early cells used to define the average fate probabilities. If `None`, use all cells.\n Only used when `'kl_divergence'` is in ``keys``. If a :class:`dict`, key specifies a cluster key in\n :attr:`anndata.AnnData.obs` and the values specify cluster labels containing early cells.\n lineage_order\n Can be one of the following:\n\n - `None` - it will determined automatically, based on the number of lineages.\n - `'optimal'` - order lineages optimally by solving the Travelling salesman problem (TSP).\n Recommended for <= `20` lineages.\n - `'default'` - use the order as specified by ``lineages``.\n\n metric\n Metric to use when constructing pairwise distance matrix when ``lineage_order = 'optimal'``. For available\n options, see :func:`sklearn.metrics.pairwise_distances`.\n normalize_by_mean\n If `True`, normalize each lineage by its mean probability, as done in :cite:`velten:17`.\n ncols\n Number of columns when plotting multiple ``keys``.\n space\n Horizontal and vertical space between for :func:`matplotlib.pyplot.subplots_adjust`.\n use_raw\n Whether to access :attr:`anndata.AnnData.raw` when there are ``keys`` in :attr:`anndata.AnnData.var_names`.\n text_kwargs\n Keyword arguments for :func:`matplotlib.pyplot.text`.\n label_distance\n Distance at which the lineage labels will be drawn.\n label_rot\n How to rotate the labels. Valid options are:\n\n - `'best'` - rotate labels so that they are easily readable.\n - `'default'` - use :mod:`matplotlib`'s default.\n - `None` - same as `'default'`.\n\n If a :class:`float`, all labels will be rotated by this many degrees.\n show_edges\n Whether to show the edges surrounding the simplex.\n key_added\n Key in :attr:`anndata.AnnData.obsm` where to add the circular embedding. If `None`, it will be set to\n `'X_fate_simplex_{fwd,bwd}'`, based on ``backward``.\n %(plotting)s\n kwargs\n Keyword arguments for :func:`scvelo.pl.scatter`.\n\n Returns\n -------\n %(just_plots)s\n Also updates ``adata`` with the following fields:\n\n - :attr:`anndata.AnnData.obsm` ``['{key_added}']`` - the circular projection.\n - :attr:`anndata.AnnData.obs` ``['to_{initial,terminal}_states_{method}']`` - the priming degree,\n if a method is present in ``keys``.\n \"\"\"\n if label_distance is not None and label_distance < 0:\n raise ValueError(\n f\"Expected `label_distance` to be positive, found `{label_distance}`.\"\n )\n\n if label_rot is None:\n label_rot = LabelRot.DEFAULT\n label_rot = LabelRot(label_rot)\n\n suffix = \"bwd\" if backward else \"fwd\"\n if key_added is None:\n key_added = \"X_fate_simplex_\" + suffix\n\n if isinstance(keys, str):\n keys = (keys,)\n\n keys = _unique_order_preserving(keys)\n keys_ = _check_collection(\n adata, keys, \"obs\", key_name=\"Observation\", raise_exc=False\n ) + _check_collection(\n adata, keys, \"var_names\", key_name=\"Gene\", raise_exc=False, use_raw=use_raw\n )\n haystack = set(PrimingDegree)\n keys = keys_ + [k for k in keys if k in haystack]\n keys = _unique_order_preserving(keys)\n\n if not len(keys):\n raise ValueError(\"No valid keys have been selected.\")\n\n lineage_key = Key.obsm.abs_probs(backward)\n if lineage_key not in adata.obsm:\n raise KeyError(f\"Lineages key `{lineage_key!r}` not found in `adata.obsm`.\")\n\n probs: Lineage = adata.obsm[lineage_key]\n\n if isinstance(lineages, str):\n lineages = (lineages,)\n elif lineages is None:\n lineages = probs.names\n\n probs = adata.obsm[lineage_key][lineages]\n n_lin = probs.shape[1]\n if n_lin < 3:\n raise ValueError(f\"Expected at least `3` lineages, found `{n_lin}`.\")\n\n X = probs.X.copy()\n if normalize_by_mean:\n X /= np.mean(X, axis=0)[None, :]\n X /= X.sum(1)[:, None]\n # this happens when cells for sel. lineages sum to 1 (or when the lineage average is 0, which is unlikely)\n X = np.nan_to_num(X, nan=1.0 / n_lin, copy=False)\n\n if lineage_order is None:\n lineage_order = (\n LineageOrder.OPTIMAL if 3 < n_lin <= 20 else LineageOrder.DEFAULT\n )\n logg.debug(f\"Set ordering to `{lineage_order}`\")\n lineage_order = LineageOrder(lineage_order)\n\n if lineage_order == LineageOrder.OPTIMAL:\n logg.info(f\"Solving TSP for `{n_lin}` states\")\n _, order = _get_optimal_order(X, metric=metric)\n else:\n order = np.arange(n_lin)\n\n probs = probs[:, order]\n X = X[:, order]\n\n angle_vec = np.linspace(0, 2 * np.pi, n_lin, endpoint=False)\n angle_vec_sin = np.cos(angle_vec)\n angle_vec_cos = np.sin(angle_vec)\n\n x = np.sum(X * angle_vec_sin, axis=1)\n y = np.sum(X * angle_vec_cos, axis=1)\n adata.obsm[key_added] = np.c_[x, y]\n\n nrows = int(np.ceil(len(keys) / ncols))\n fig, ax = plt.subplots(\n nrows=nrows,\n ncols=ncols,\n figsize=(ncols * 5, nrows * 5) if figsize is None else figsize,\n dpi=dpi,\n )\n\n fig.subplots_adjust(wspace=space, hspace=space)\n axes = np.ravel([ax])\n\n text_kwargs = dict(text_kwargs)\n text_kwargs[\"ha\"] = \"center\"\n text_kwargs[\"va\"] = \"center\"\n\n _i = 0\n for _i, (k, ax) in enumerate(zip(keys, axes)):\n\n set_lognorm, colorbar = False, kwargs.pop(\"colorbar\", True)\n try:\n _ = PrimingDegree(k)\n logg.debug(f\"Calculating priming degree using `method={k}`\")\n val = probs.priming_degree(method=k, early_cells=early_cells)\n k = f\"{lineage_key}_{k}\"\n adata.obs[k] = val\n except ValueError:\n pass\n\n scv.pl.scatter(\n adata,\n basis=key_added,\n color=k,\n show=False,\n ax=ax,\n use_raw=use_raw,\n norm=LogNorm() if set_lognorm else None,\n colorbar=colorbar,\n **kwargs,\n )\n if colorbar and set_lognorm:\n cbar = ax.collections[0].colorbar\n cax = cbar.locator.axis\n ticks = cax.minor.locator.tick_values(cbar.vmin, cbar.vmax)\n ticks = [ticks[0], ticks[len(ticks) // 2 + 1], ticks[-1]]\n cbar.set_ticks(ticks)\n cbar.set_ticklabels([f\"{t:.2f}\" for t in ticks])\n cbar.update_ticks()\n\n patches, texts = ax.pie(\n np.ones_like(angle_vec),\n labeldistance=label_distance,\n rotatelabels=True,\n labels=probs.names[::-1],\n startangle=-360 / len(angle_vec) / 2,\n counterclock=False,\n textprops=text_kwargs,\n )\n\n for patch in patches:\n patch.set_visible(False)\n\n # clockwise\n for color, text in zip(probs.colors[::-1], texts):\n if isinstance(label_rot, (int, float)):\n text.set_rotation(label_rot)\n elif label_rot == LabelRot.BEST:\n rot = text.get_rotation()\n text.set_rotation(rot + 90 + (1 - rot // 180) * 180)\n elif label_rot != LabelRot.DEFAULT:\n raise NotImplementedError(\n f\"Label rotation `{label_rot}` is not yet implemented.\"\n )\n text.set_color(color)\n\n if not show_edges:\n continue\n\n for i, color in enumerate(probs.colors):\n next = (i + 1) % n_lin\n x = 1.04 * np.linspace(angle_vec_sin[i], angle_vec_sin[next], _N)\n y = 1.04 * np.linspace(angle_vec_cos[i], angle_vec_cos[next], _N)\n points = np.array([x, y]).T.reshape(-1, 1, 2)\n segments = np.concatenate([points[:-1], points[1:]], axis=1)\n\n cmap = LinearSegmentedColormap.from_list(\n \"abs_prob_cmap\", [color, probs.colors[next]], N=_N\n )\n lc = LineCollection(segments, cmap=cmap, zorder=-1)\n lc.set_array(np.linspace(0, 1, _N))\n lc.set_linewidth(2)\n ax.add_collection(lc)\n\n for j in range(_i + 1, len(axes)):\n axes[j].remove()\n\n if save is not None:\n save_fig(fig, save)\n"
] | [
[
"numpy.sum",
"numpy.ones_like",
"numpy.asarray",
"numpy.cos",
"matplotlib.pyplot.subplots",
"numpy.ravel",
"numpy.arange",
"matplotlib.colors.LinearSegmentedColormap.from_list",
"matplotlib.colors.LogNorm",
"matplotlib.collections.LineCollection",
"numpy.array",
"numpy.sin",
"numpy.concatenate",
"numpy.nan_to_num",
"numpy.linspace",
"numpy.mean",
"sklearn.metrics.pairwise_distances"
]
] |
blnm/RSE | [
"6a3f0dd858ea4b6dafcfb1d97bb979e101d9911c"
] | [
"RAdam.py"
] | [
"import tensorflow as tf\r\nfrom tensorflow.python.eager import context\r\nfrom tensorflow.python.framework import ops\r\nfrom tensorflow.python.ops import clip_ops\r\nfrom tensorflow.python.ops import control_flow_ops\r\nfrom tensorflow.python.ops import math_ops\r\nfrom tensorflow.python.ops import resource_variable_ops\r\nfrom tensorflow.python.ops import state_ops\r\nfrom tensorflow.python.training import optimizer\r\n\r\n__all__ = ['RAdamOptimizer']\r\n\r\n\r\nclass RAdamOptimizer(optimizer.Optimizer):\r\n \"\"\"RAdam optimizer.\r\n\r\n According to the paper\r\n [On The Variance Of The Adaptive Learning Rate And Beyond](https://arxiv.org/pdf/1908.03265v1.pdf).\r\n \"\"\"\r\n\r\n def __init__(self,\r\n learning_rate=0.001,\r\n beta1=0.9,\r\n beta2=0.999,\r\n epsilon=1e-7,\r\n L2_decay=0.,\r\n amsgrad=False,\r\n total_steps=0,\r\n warmup_proportion=0.1,\r\n min_lr=0.,\r\n use_locking=False,\r\n name=\"RAdam\",\r\n decay_vars=None,\r\n L1_decay=0.0,\r\n clip_gradients=False, clip_multiplier=3.0, clip_epsilon=1e-2):\r\n r\"\"\"Construct a new Adam optimizer.\r\n\r\n Args:\r\n learning_rate: A Tensor or a floating point value. The learning rate.\r\n beta1: A float value or a constant float tensor. The exponential decay\r\n rate for the 1st moment estimates.\r\n beta2: A float value or a constant float tensor. The exponential decay\r\n rate for the 2nd moment estimates.\r\n epsilon: A small constant for numerical stability. This epsilon is\r\n \"epsilon hat\" in the Kingma and Ba paper (in the formula just before\r\n Section 2.1), not the epsilon in Algorithm 1 of the paper.\r\n L2_decay: A floating point value. Weight decay for each param.\r\n amsgrad: boolean. Whether to apply AMSGrad variant of this algorithm from\r\n the paper \"On the Convergence of Adam and beyond\".\r\n total_steps: An integer. Total number of training steps.\r\n Enable warmup by setting a positive value.\r\n warmup_proportion: A floating point value. The proportion of increasing steps.\r\n min_lr: A floating point value. Minimum learning rate after warmup.\r\n name: Optional name for the operations created when applying gradients.\r\n Defaults to \"Adam\". @compatibility(eager) When eager execution is\r\n enabled, `learning_rate`, `beta_1`, `beta_2`, and `epsilon` can each be\r\n a callable that takes no arguments and returns the actual value to use.\r\n This can be useful for changing these values across different\r\n invocations of optimizer functions. @end_compatibility\r\n **kwargs: keyword arguments. Allowed to be {`clipnorm`, `clipvalue`, `lr`,\r\n `decay`}. `clipnorm` is clip gradients by norm; `clipvalue` is clip\r\n gradients by value, `decay` is included for backward compatibility to\r\n allow time inverse decay of learning rate. `lr` is included for backward\r\n compatibility, recommended to use `learning_rate` instead.\r\n \"\"\"\r\n super(RAdamOptimizer, self).__init__(use_locking, name)\r\n self._lr = learning_rate\r\n self._beta1 = beta1\r\n self._beta2 = beta2\r\n self._epsilon = epsilon\r\n self._weight_decay = L2_decay\r\n self._L1_decay = L1_decay\r\n self._amsgrad = amsgrad\r\n self._total_steps = float(total_steps)\r\n self._warmup_proportion = warmup_proportion\r\n self._min_lr = min_lr\r\n self._initial_weight_decay = L2_decay\r\n self._initial_total_steps = total_steps\r\n self.clip_multiplier = clip_multiplier\r\n self.clip_epsilon = clip_epsilon\r\n self.clip_gradients = clip_gradients\r\n self.clip_multiplier_t = ops.convert_to_tensor(self.clip_multiplier, name=\"clip_multiplier\")\r\n self.clip_epsilon_t = ops.convert_to_tensor(self.clip_epsilon, name=\"clip_epsilon\")\r\n\r\n self._lr_t = None\r\n self._step_t = None\r\n self._beta1_t = None\r\n self._beta2_t = None\r\n self._epsilon_t = None\r\n self._weight_decay_t = None\r\n self._total_steps_t = None\r\n self._warmup_proportion_t = None\r\n self._min_lr_t = None\r\n self.reg_vars = set(decay_vars) if decay_vars is not None else set()\r\n\r\n def _get_beta_accumulators(self):\r\n with ops.init_scope():\r\n if context.executing_eagerly():\r\n graph = None\r\n else:\r\n graph = ops.get_default_graph()\r\n return (self._get_non_slot_variable(\"step\", graph=graph),\r\n self._get_non_slot_variable(\"beta1_power\", graph=graph),\r\n self._get_non_slot_variable(\"beta2_power\", graph=graph))\r\n\r\n def _create_slots_internal(self, var_list):\r\n first_var = min(var_list, key=lambda x: x.name)\r\n self._create_non_slot_variable(initial_value=1.0, name=\"step\", colocate_with=first_var)\r\n self._create_non_slot_variable(initial_value=self._beta1, name=\"beta1_power\", colocate_with=first_var)\r\n self._create_non_slot_variable(initial_value=self._beta2, name=\"beta2_power\", colocate_with=first_var)\r\n for v in var_list:\r\n self._zeros_slot(v, \"m\", self._name)\r\n self._zeros_slot(v, \"v\", self._name)\r\n if self._amsgrad:\r\n self._zeros_slot(v, \"vhat\", self._name)\r\n\r\n def _prepare(self):\r\n lr = self._call_if_callable(self._lr)\r\n beta1 = self._call_if_callable(self._beta1)\r\n beta2 = self._call_if_callable(self._beta2)\r\n epsilon = self._call_if_callable(self._epsilon)\r\n weight_decay = self._call_if_callable(self._weight_decay)\r\n total_steps = self._call_if_callable(self._total_steps)\r\n warmup_proportion = self._call_if_callable(self._warmup_proportion)\r\n min_lr = self._call_if_callable(self._min_lr)\r\n\r\n self._lr_t = ops.convert_to_tensor(lr, name=\"learning_rate\")\r\n self._beta1_t = ops.convert_to_tensor(beta1, name=\"beta1\")\r\n self._beta2_t = ops.convert_to_tensor(beta2, name=\"beta2\")\r\n self._epsilon_t = ops.convert_to_tensor(epsilon, name=\"epsilon\")\r\n self._weight_decay_t = ops.convert_to_tensor(weight_decay, name=\"weight_decay\")\r\n self._total_steps_t = ops.convert_to_tensor(total_steps, name=\"total_steps\")\r\n self._warmup_proportion_t = ops.convert_to_tensor(warmup_proportion, name=\"warmup_proportion\")\r\n self._min_lr_t = ops.convert_to_tensor(min_lr, name=\"min_lr\")\r\n\r\n def apply_gradients(self, grads_and_vars, global_step=None, name=None):\r\n tvars = list(zip(*grads_and_vars))[1]\r\n self._create_slots_internal(tvars)\r\n\r\n return super().apply_gradients(grads_and_vars, global_step, name)\r\n\r\n def _apply_dense(self, grad, var):\r\n return self._resource_apply_dense(grad, var)\r\n\r\n def _resource_apply_dense(self, grad, var):\r\n step, beta1_power, beta2_power = self._get_beta_accumulators()\r\n beta1_power = math_ops.cast(beta1_power, var.dtype.base_dtype)\r\n beta2_power = math_ops.cast(beta2_power, var.dtype.base_dtype)\r\n lr_t = math_ops.cast(self._lr_t, var.dtype.base_dtype)\r\n\r\n if self._initial_total_steps > 0:\r\n total_steps = math_ops.cast(self._total_steps_t, var.dtype.base_dtype)\r\n warmup_proportion = math_ops.cast(self._warmup_proportion_t, var.dtype.base_dtype)\r\n min_lr = math_ops.cast(self._min_lr_t, var.dtype.base_dtype)\r\n warmup_steps = total_steps * warmup_proportion\r\n decay_steps = math_ops.maximum(total_steps - warmup_steps, 1)\r\n decay_rate = (min_lr - lr_t) / decay_steps\r\n lr_t = tf.where(\r\n step <= warmup_steps,\r\n lr_t * (step / warmup_steps),\r\n lr_t + decay_rate * math_ops.minimum(step - warmup_steps, decay_steps),\r\n )\r\n\r\n beta1_t = math_ops.cast(self._beta1_t, var.dtype.base_dtype)\r\n beta2_t = math_ops.cast(self._beta2_t, var.dtype.base_dtype)\r\n epsilon_t = math_ops.cast(self._epsilon_t, var.dtype.base_dtype)\r\n\r\n v = self.get_slot(var, \"v\")\r\n\r\n if self.clip_gradients:\r\n clipVal = math_ops.sqrt(\r\n tf.reduce_sum(v) / (1.0 - beta2_power)) * self.clip_multiplier_t + self.clip_epsilon_t\r\n grad = clip_ops.clip_by_norm(grad, clipVal)\r\n\r\n sma_inf = 2.0 / (1.0 - beta2_t) - 1.0\r\n sma_t = sma_inf - 2.0 * step * beta2_power / (1.0 - beta2_power)\r\n\r\n m = self.get_slot(var, \"m\")\r\n\r\n v_t = state_ops.assign(v, beta2_t * v + (1.0 - beta2_t) * math_ops.square(grad), use_locking=self._use_locking)\r\n v_corr_t = math_ops.sqrt(v_t / (1.0 - beta2_power)) + epsilon_t\r\n grad_corr = grad / v_corr_t\r\n\r\n m_t = state_ops.assign(m, beta1_t * m + (1.0 - beta1_t) * grad_corr, use_locking=self._use_locking)\r\n m_corr_t = m_t / (1.0 - beta1_power)\r\n\r\n r_t = math_ops.sqrt((sma_t - 4.0) / (sma_inf - 4.0) *\r\n (sma_t - 2.0) / (sma_inf - 2.0) *\r\n sma_inf / sma_t)\r\n\r\n var_t = tf.where(sma_t >= 5.0, r_t * m_corr_t, m_corr_t)\r\n\r\n if var in self.reg_vars:\r\n if self._initial_weight_decay > 0.0:\r\n var_t += math_ops.cast(self._weight_decay_t, var.dtype.base_dtype) * var\r\n if self._L1_decay > 0.0:\r\n var_t += math_ops.cast(self._L1_decay, var.dtype.base_dtype) * math_ops.sign(var)\r\n\r\n with tf.control_dependencies([var_t]):\r\n var_update = state_ops.assign_sub(var, lr_t * var_t, use_locking=self._use_locking)\r\n\r\n updates = [var_update, m_t, v_t]\r\n return control_flow_ops.group(*updates)\r\n\r\n def _apply_sparse_shared(self, grad, var, indices, scatter_add):\r\n step, beta1_power, beta2_power = self._get_beta_accumulators()\r\n beta1_power = math_ops.cast(beta1_power, var.dtype.base_dtype)\r\n beta2_power = math_ops.cast(beta2_power, var.dtype.base_dtype)\r\n lr_t = math_ops.cast(self._lr_t, var.dtype.base_dtype)\r\n\r\n if self._initial_total_steps > 0:\r\n total_steps = math_ops.cast(self._total_steps_t, var.dtype.base_dtype)\r\n warmup_proportion = math_ops.cast(self._warmup_proportion_t, var.dtype.base_dtype)\r\n min_lr = math_ops.cast(self._min_lr_t, var.dtype.base_dtype)\r\n warmup_steps = total_steps * warmup_proportion\r\n decay_steps = math_ops.maximum(total_steps - warmup_steps, 1)\r\n decay_rate = (min_lr - lr_t) / decay_steps\r\n lr_t = tf.where(\r\n step <= warmup_steps,\r\n lr_t * (step / warmup_steps),\r\n lr_t + decay_rate * math_ops.minimum(step - warmup_steps, decay_steps),\r\n )\r\n\r\n beta1_t = math_ops.cast(self._beta1_t, var.dtype.base_dtype)\r\n beta2_t = math_ops.cast(self._beta2_t, var.dtype.base_dtype)\r\n epsilon_t = math_ops.cast(self._epsilon_t, var.dtype.base_dtype)\r\n v = self.get_slot(var, \"v\")\r\n\r\n if self.clip_gradients:\r\n clipVal = math_ops.sqrt(\r\n tf.reduce_sum(v) / (1.0 - beta2_power)) * self.clip_multiplier_t + self.clip_epsilon_t\r\n grad = clip_ops.clip_by_norm(grad, clipVal)\r\n\r\n sma_inf = 2.0 / (1.0 - beta2_t) - 1.0\r\n sma_t = sma_inf - 2.0 * step * beta2_power / (1.0 - beta2_power)\r\n\r\n m = self.get_slot(var, \"m\")\r\n m_scaled_g_values = grad * (1 - beta1_t)\r\n m_t = state_ops.assign(m, m * beta1_t, use_locking=self._use_locking)\r\n with ops.control_dependencies([m_t]):\r\n m_t = scatter_add(m, indices, m_scaled_g_values)\r\n m_corr_t = m_t / (1.0 - beta1_power)\r\n\r\n v_scaled_g_values = (grad * grad) * (1 - beta2_t)\r\n v_t = state_ops.assign(v, v * beta2_t, use_locking=self._use_locking)\r\n with ops.control_dependencies([v_t]):\r\n v_t = scatter_add(v, indices, v_scaled_g_values)\r\n if self._amsgrad:\r\n vhat = self.get_slot(var, 'vhat')\r\n vhat_t = state_ops.assign(vhat, math_ops.maximum(vhat, v_t), use_locking=self._use_locking)\r\n v_corr_t = math_ops.sqrt(vhat_t / (1.0 - beta2_power)) + epsilon_t\r\n else:\r\n v_corr_t = math_ops.sqrt(v_t / (1.0 - beta2_power)) + epsilon_t\r\n\r\n r_t = math_ops.sqrt((sma_t - 4.0) / (sma_inf - 4.0) *\r\n (sma_t - 2.0) / (sma_inf - 2.0) *\r\n sma_inf / sma_t)\r\n\r\n var_t = tf.where(sma_t >= 5.0, r_t * m_corr_t / v_corr_t, m_corr_t)\r\n\r\n if var in self.reg_vars:\r\n if self._initial_weight_decay > 0.0:\r\n var_t += math_ops.cast(self._weight_decay_t, var.dtype.base_dtype) * var\r\n if self._L1_decay > 0.0:\r\n var_t += math_ops.cast(self._L1_decay, var.dtype.base_dtype) * math_ops.sign(var)\r\n\r\n var_update = state_ops.assign_sub(var, lr_t * var_t, use_locking=self._use_locking)\r\n\r\n updates = [var_update, m_t, v_t]\r\n if self._amsgrad:\r\n updates.append(vhat_t)\r\n return control_flow_ops.group(*updates)\r\n\r\n def _apply_sparse(self, grad, var):\r\n return self._apply_sparse_shared(\r\n grad.values,\r\n var,\r\n grad.indices,\r\n lambda x, i, v: state_ops.scatter_add(x, i, v, use_locking=self._use_locking))\r\n\r\n def _resource_scatter_add(self, x, i, v):\r\n with ops.control_dependencies([resource_variable_ops.resource_scatter_add(x.handle, i, v)]):\r\n return x.value()\r\n\r\n def _resource_apply_sparse(self, grad, var, indices):\r\n return self._apply_sparse_shared(grad, var, indices, self._resource_scatter_add)\r\n\r\n def _finish(self, update_ops, name_scope):\r\n with ops.control_dependencies(update_ops):\r\n step, beta1_power, beta2_power = self._get_beta_accumulators()\r\n with ops.colocate_with(beta1_power):\r\n update_step = step.assign(step + 1.0, use_locking=self._use_locking)\r\n update_beta1 = beta1_power.assign(beta1_power * self._beta1_t, use_locking=self._use_locking)\r\n update_beta2 = beta2_power.assign(beta2_power * self._beta2_t, use_locking=self._use_locking)\r\n return control_flow_ops.group(*update_ops + [update_step, update_beta1, update_beta2], name=name_scope)\r\n"
] | [
[
"tensorflow.python.eager.context.executing_eagerly",
"tensorflow.python.framework.ops.colocate_with",
"tensorflow.python.ops.math_ops.maximum",
"tensorflow.python.ops.state_ops.assign_sub",
"tensorflow.python.ops.state_ops.scatter_add",
"tensorflow.reduce_sum",
"tensorflow.python.ops.math_ops.sign",
"tensorflow.python.ops.control_flow_ops.group",
"tensorflow.python.ops.math_ops.square",
"tensorflow.python.ops.resource_variable_ops.resource_scatter_add",
"tensorflow.python.ops.math_ops.minimum",
"tensorflow.python.framework.ops.control_dependencies",
"tensorflow.python.ops.math_ops.cast",
"tensorflow.python.framework.ops.convert_to_tensor",
"tensorflow.python.framework.ops.init_scope",
"tensorflow.control_dependencies",
"tensorflow.python.ops.state_ops.assign",
"tensorflow.python.framework.ops.get_default_graph",
"tensorflow.python.ops.math_ops.sqrt",
"tensorflow.where",
"tensorflow.python.ops.clip_ops.clip_by_norm"
]
] |
rhshadrach/pandas | [
"777c0f90c6067c636fcd76ce003a8fbfcc311d7b"
] | [
"pandas/core/generic.py"
] | [
"import collections\nfrom datetime import timedelta\nimport functools\nimport gc\nimport json\nimport operator\nimport pickle\nimport re\nfrom textwrap import dedent\nfrom typing import (\n TYPE_CHECKING,\n Any,\n Callable,\n Dict,\n FrozenSet,\n Hashable,\n List,\n Mapping,\n Optional,\n Sequence,\n Set,\n Tuple,\n Type,\n Union,\n)\nimport warnings\nimport weakref\n\nimport numpy as np\n\nfrom pandas._config import config\n\nfrom pandas._libs import Timestamp, iNaT, lib\nfrom pandas._typing import (\n Axis,\n FilePathOrBuffer,\n FrameOrSeries,\n JSONSerializable,\n Label,\n Level,\n Renamer,\n)\nfrom pandas.compat import set_function_name\nfrom pandas.compat._optional import import_optional_dependency\nfrom pandas.compat.numpy import function as nv\nfrom pandas.errors import AbstractMethodError\nfrom pandas.util._decorators import (\n Appender,\n Substitution,\n doc,\n rewrite_axis_style_signature,\n)\nfrom pandas.util._validators import (\n validate_bool_kwarg,\n validate_fillna_kwargs,\n validate_percentile,\n)\n\nfrom pandas.core.dtypes.common import (\n ensure_int64,\n ensure_object,\n ensure_str,\n is_bool,\n is_bool_dtype,\n is_datetime64_any_dtype,\n is_datetime64tz_dtype,\n is_dict_like,\n is_extension_array_dtype,\n is_float,\n is_integer,\n is_list_like,\n is_number,\n is_numeric_dtype,\n is_object_dtype,\n is_re_compilable,\n is_scalar,\n is_timedelta64_dtype,\n pandas_dtype,\n)\nfrom pandas.core.dtypes.generic import ABCDataFrame, ABCSeries\nfrom pandas.core.dtypes.inference import is_hashable\nfrom pandas.core.dtypes.missing import isna, notna\n\nimport pandas as pd\nfrom pandas.core import missing, nanops\nimport pandas.core.algorithms as algos\nfrom pandas.core.base import PandasObject, SelectionMixin\nimport pandas.core.common as com\nfrom pandas.core.construction import create_series_with_explicit_dtype\nfrom pandas.core.indexes.api import (\n Index,\n InvalidIndexError,\n MultiIndex,\n RangeIndex,\n ensure_index,\n)\nfrom pandas.core.indexes.datetimes import DatetimeIndex\nfrom pandas.core.indexes.period import Period, PeriodIndex\nimport pandas.core.indexing as indexing\nfrom pandas.core.internals import BlockManager\nfrom pandas.core.missing import find_valid_index\nfrom pandas.core.ops import _align_method_FRAME\n\nfrom pandas.io.formats import format as fmt\nfrom pandas.io.formats.format import DataFrameFormatter, format_percentiles\nfrom pandas.io.formats.printing import pprint_thing\nfrom pandas.tseries.frequencies import to_offset\n\nif TYPE_CHECKING:\n from pandas.core.resample import Resampler\n\n# goal is to be able to define the docs close to function, while still being\n# able to share\n_shared_docs: Dict[str, str] = dict()\n_shared_doc_kwargs = dict(\n axes=\"keywords for axes\",\n klass=\"Series/DataFrame\",\n axes_single_arg=\"int or labels for object\",\n args_transpose=\"axes to permute (int or label for object)\",\n optional_by=\"\"\"\n by : str or list of str\n Name or list of names to sort by\"\"\",\n)\n\n\ndef _single_replace(self, to_replace, method, inplace, limit):\n \"\"\"\n Replaces values in a Series using the fill method specified when no\n replacement value is given in the replace method\n \"\"\"\n if self.ndim != 1:\n raise TypeError(\n f\"cannot replace {to_replace} with method {method} on a \"\n f\"{type(self).__name__}\"\n )\n\n orig_dtype = self.dtype\n result = self if inplace else self.copy()\n fill_f = missing.get_fill_func(method)\n\n mask = missing.mask_missing(result.values, to_replace)\n values = fill_f(result.values, limit=limit, mask=mask)\n\n if values.dtype == orig_dtype and inplace:\n return\n\n result = pd.Series(values, index=self.index, dtype=self.dtype).__finalize__(self)\n\n if inplace:\n self._update_inplace(result._data)\n return\n\n return result\n\n\nbool_t = bool # Need alias because NDFrame has def bool:\n\n\nclass NDFrame(PandasObject, SelectionMixin, indexing.IndexingMixin):\n \"\"\"\n N-dimensional analogue of DataFrame. Store multi-dimensional in a\n size-mutable, labeled data structure\n\n Parameters\n ----------\n data : BlockManager\n axes : list\n copy : bool, default False\n \"\"\"\n\n _internal_names: List[str] = [\n \"_data\",\n \"_cacher\",\n \"_item_cache\",\n \"_cache\",\n \"_is_copy\",\n \"_subtyp\",\n \"_name\",\n \"_index\",\n \"_default_kind\",\n \"_default_fill_value\",\n \"_metadata\",\n \"__array_struct__\",\n \"__array_interface__\",\n ]\n _internal_names_set: Set[str] = set(_internal_names)\n _accessors: Set[str] = set()\n _deprecations: FrozenSet[str] = frozenset([\"get_values\"])\n _metadata: List[str] = []\n _is_copy = None\n _data: BlockManager\n _attrs: Dict[Optional[Hashable], Any]\n _typ: str\n\n # ----------------------------------------------------------------------\n # Constructors\n\n def __init__(\n self,\n data: BlockManager,\n copy: bool = False,\n attrs: Optional[Mapping[Optional[Hashable], Any]] = None,\n ):\n # copy kwarg is retained for mypy compat, is not used\n\n object.__setattr__(self, \"_is_copy\", None)\n object.__setattr__(self, \"_data\", data)\n object.__setattr__(self, \"_item_cache\", {})\n if attrs is None:\n attrs = {}\n else:\n attrs = dict(attrs)\n object.__setattr__(self, \"_attrs\", attrs)\n\n @classmethod\n def _init_mgr(cls, mgr, axes=None, dtype=None, copy=False):\n \"\"\" passed a manager and a axes dict \"\"\"\n for a, axe in axes.items():\n if axe is not None:\n mgr = mgr.reindex_axis(\n axe, axis=cls._get_block_manager_axis(a), copy=False\n )\n\n # make a copy if explicitly requested\n if copy:\n mgr = mgr.copy()\n if dtype is not None:\n # avoid further copies if we can\n if len(mgr.blocks) > 1 or mgr.blocks[0].values.dtype != dtype:\n mgr = mgr.astype(dtype=dtype)\n return mgr\n\n # ----------------------------------------------------------------------\n\n @property\n def attrs(self) -> Dict[Optional[Hashable], Any]:\n \"\"\"\n Dictionary of global attributes on this object.\n\n .. warning::\n\n attrs is experimental and may change without warning.\n \"\"\"\n if self._attrs is None:\n self._attrs = {}\n return self._attrs\n\n @attrs.setter\n def attrs(self, value: Mapping[Optional[Hashable], Any]) -> None:\n self._attrs = dict(value)\n\n @classmethod\n def _validate_dtype(cls, dtype):\n \"\"\" validate the passed dtype \"\"\"\n if dtype is not None:\n dtype = pandas_dtype(dtype)\n\n # a compound dtype\n if dtype.kind == \"V\":\n raise NotImplementedError(\n \"compound dtypes are not implemented \"\n f\"in the {cls.__name__} constructor\"\n )\n\n return dtype\n\n # ----------------------------------------------------------------------\n # Construction\n\n @property\n def _constructor(self: FrameOrSeries) -> Type[FrameOrSeries]:\n \"\"\"\n Used when a manipulation result has the same dimensions as the\n original.\n \"\"\"\n raise AbstractMethodError(self)\n\n @property\n def _constructor_sliced(self):\n \"\"\"\n Used when a manipulation result has one lower dimension(s) as the\n original, such as DataFrame single columns slicing.\n \"\"\"\n raise AbstractMethodError(self)\n\n @property\n def _constructor_expanddim(self):\n \"\"\"\n Used when a manipulation result has one higher dimension as the\n original, such as Series.to_frame()\n \"\"\"\n raise NotImplementedError\n\n # ----------------------------------------------------------------------\n # Axis\n _AXIS_ALIASES = {\"rows\": 0}\n _AXIS_IALIASES = {0: \"rows\"}\n _stat_axis_number = 0\n _stat_axis_name = \"index\"\n _ix = None\n _AXIS_ORDERS: List[str]\n _AXIS_NUMBERS: Dict[str, int]\n _AXIS_NAMES: Dict[int, str]\n _AXIS_REVERSED: bool\n _info_axis_number: int\n _info_axis_name: str\n _AXIS_LEN: int\n\n def _construct_axes_dict(self, axes=None, **kwargs):\n \"\"\"Return an axes dictionary for myself.\"\"\"\n d = {a: self._get_axis(a) for a in (axes or self._AXIS_ORDERS)}\n d.update(kwargs)\n return d\n\n @classmethod\n def _construct_axes_from_arguments(\n cls, args, kwargs, require_all: bool = False, sentinel=None\n ):\n \"\"\"\n Construct and returns axes if supplied in args/kwargs.\n\n If require_all, raise if all axis arguments are not supplied\n return a tuple of (axes, kwargs).\n\n sentinel specifies the default parameter when an axis is not\n supplied; useful to distinguish when a user explicitly passes None\n in scenarios where None has special meaning.\n \"\"\"\n # construct the args\n args = list(args)\n for a in cls._AXIS_ORDERS:\n\n # look for a argument by position\n if a not in kwargs:\n try:\n kwargs[a] = args.pop(0)\n except IndexError as err:\n if require_all:\n raise TypeError(\n \"not enough/duplicate arguments specified!\"\n ) from err\n\n axes = {a: kwargs.pop(a, sentinel) for a in cls._AXIS_ORDERS}\n return axes, kwargs\n\n @classmethod\n def _get_axis_number(cls, axis):\n axis = cls._AXIS_ALIASES.get(axis, axis)\n if is_integer(axis):\n if axis in cls._AXIS_NAMES:\n return axis\n else:\n try:\n return cls._AXIS_NUMBERS[axis]\n except KeyError:\n pass\n raise ValueError(f\"No axis named {axis} for object type {cls}\")\n\n @classmethod\n def _get_axis_name(cls, axis):\n axis = cls._AXIS_ALIASES.get(axis, axis)\n if isinstance(axis, str):\n if axis in cls._AXIS_NUMBERS:\n return axis\n else:\n try:\n return cls._AXIS_NAMES[axis]\n except KeyError:\n pass\n raise ValueError(f\"No axis named {axis} for object type {cls}\")\n\n def _get_axis(self, axis):\n name = self._get_axis_name(axis)\n return getattr(self, name)\n\n @classmethod\n def _get_block_manager_axis(cls, axis):\n \"\"\"Map the axis to the block_manager axis.\"\"\"\n axis = cls._get_axis_number(axis)\n if cls._AXIS_REVERSED:\n m = cls._AXIS_LEN - 1\n return m - axis\n return axis\n\n def _get_axis_resolvers(self, axis: str) -> Dict[str, ABCSeries]:\n # index or columns\n axis_index = getattr(self, axis)\n d = dict()\n prefix = axis[0]\n\n for i, name in enumerate(axis_index.names):\n if name is not None:\n key = level = name\n else:\n # prefix with 'i' or 'c' depending on the input axis\n # e.g., you must do ilevel_0 for the 0th level of an unnamed\n # multiiindex\n key = f\"{prefix}level_{i}\"\n level = i\n\n level_values = axis_index.get_level_values(level)\n s = level_values.to_series()\n s.index = axis_index\n d[key] = s\n\n # put the index/columns itself in the dict\n if isinstance(axis_index, MultiIndex):\n dindex = axis_index\n else:\n dindex = axis_index.to_series()\n\n d[axis] = dindex\n return d\n\n def _get_index_resolvers(self) -> Dict[str, ABCSeries]:\n from pandas.core.computation.parsing import clean_column_name\n\n d: Dict[str, ABCSeries] = {}\n for axis_name in self._AXIS_ORDERS:\n d.update(self._get_axis_resolvers(axis_name))\n\n return {clean_column_name(k): v for k, v in d.items() if not isinstance(k, int)}\n\n def _get_cleaned_column_resolvers(self) -> Dict[str, ABCSeries]:\n \"\"\"\n Return the special character free column resolvers of a dataframe.\n\n Column names with special characters are 'cleaned up' so that they can\n be referred to by backtick quoting.\n Used in :meth:`DataFrame.eval`.\n \"\"\"\n from pandas.core.computation.parsing import clean_column_name\n\n if isinstance(self, ABCSeries):\n return {clean_column_name(self.name): self}\n\n return {\n clean_column_name(k): v for k, v in self.items() if not isinstance(k, int)\n }\n\n @property\n def _info_axis(self):\n return getattr(self, self._info_axis_name)\n\n @property\n def _stat_axis(self):\n return getattr(self, self._stat_axis_name)\n\n @property\n def shape(self) -> Tuple[int, ...]:\n \"\"\"\n Return a tuple of axis dimensions\n \"\"\"\n return tuple(len(self._get_axis(a)) for a in self._AXIS_ORDERS)\n\n @property\n def axes(self) -> List[Index]:\n \"\"\"\n Return index label(s) of the internal NDFrame\n \"\"\"\n # we do it this way because if we have reversed axes, then\n # the block manager shows then reversed\n return [self._get_axis(a) for a in self._AXIS_ORDERS]\n\n @property\n def ndim(self) -> int:\n \"\"\"\n Return an int representing the number of axes / array dimensions.\n\n Return 1 if Series. Otherwise return 2 if DataFrame.\n\n See Also\n --------\n ndarray.ndim : Number of array dimensions.\n\n Examples\n --------\n >>> s = pd.Series({'a': 1, 'b': 2, 'c': 3})\n >>> s.ndim\n 1\n\n >>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4]})\n >>> df.ndim\n 2\n \"\"\"\n return self._data.ndim\n\n @property\n def size(self) -> int:\n \"\"\"\n Return an int representing the number of elements in this object.\n\n Return the number of rows if Series. Otherwise return the number of\n rows times number of columns if DataFrame.\n\n See Also\n --------\n ndarray.size : Number of elements in the array.\n\n Examples\n --------\n >>> s = pd.Series({'a': 1, 'b': 2, 'c': 3})\n >>> s.size\n 3\n\n >>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4]})\n >>> df.size\n 4\n \"\"\"\n return np.prod(self.shape)\n\n @property\n def _selected_obj(self: FrameOrSeries) -> FrameOrSeries:\n \"\"\" internal compat with SelectionMixin \"\"\"\n return self\n\n @property\n def _obj_with_exclusions(self: FrameOrSeries) -> FrameOrSeries:\n \"\"\" internal compat with SelectionMixin \"\"\"\n return self\n\n def set_axis(self, labels, axis: Axis = 0, inplace: bool = False):\n \"\"\"\n Assign desired index to given axis.\n\n Indexes for%(extended_summary_sub)s row labels can be changed by assigning\n a list-like or Index.\n\n .. versionchanged:: 0.21.0\n\n The signature is now `labels` and `axis`, consistent with\n the rest of pandas API. Previously, the `axis` and `labels`\n arguments were respectively the first and second positional\n arguments.\n\n Parameters\n ----------\n labels : list-like, Index\n The values for the new index.\n\n axis : %(axes_single_arg)s, default 0\n The axis to update. The value 0 identifies the rows%(axis_description_sub)s.\n\n inplace : bool, default False\n Whether to return a new %(klass)s instance.\n\n Returns\n -------\n renamed : %(klass)s or None\n An object of type %(klass)s if inplace=False, None otherwise.\n\n See Also\n --------\n %(klass)s.rename_axis : Alter the name of the index%(see_also_sub)s.\n \"\"\"\n if inplace:\n setattr(self, self._get_axis_name(axis), labels)\n else:\n obj = self.copy()\n obj.set_axis(labels, axis=axis, inplace=True)\n return obj\n\n def _set_axis(self, axis: int, labels: Index) -> None:\n labels = ensure_index(labels)\n self._data.set_axis(axis, labels)\n self._clear_item_cache()\n\n def swapaxes(self: FrameOrSeries, axis1, axis2, copy=True) -> FrameOrSeries:\n \"\"\"\n Interchange axes and swap values axes appropriately.\n\n Returns\n -------\n y : same as input\n \"\"\"\n i = self._get_axis_number(axis1)\n j = self._get_axis_number(axis2)\n\n if i == j:\n if copy:\n return self.copy()\n return self\n\n mapping = {i: j, j: i}\n\n new_axes = (self._get_axis(mapping.get(k, k)) for k in range(self._AXIS_LEN))\n new_values = self.values.swapaxes(i, j)\n if copy:\n new_values = new_values.copy()\n\n return self._constructor(new_values, *new_axes).__finalize__(self)\n\n def droplevel(self: FrameOrSeries, level, axis=0) -> FrameOrSeries:\n \"\"\"\n Return DataFrame with requested index / column level(s) removed.\n\n .. versionadded:: 0.24.0\n\n Parameters\n ----------\n level : int, str, or list-like\n If a string is given, must be the name of a level\n If list-like, elements must be names or positional indexes\n of levels.\n\n axis : {0 or 'index', 1 or 'columns'}, default 0\n Axis along which the level(s) is removed:\n\n * 0 or 'index': remove level(s) in column.\n * 1 or 'columns': remove level(s) in row.\n\n Returns\n -------\n DataFrame\n DataFrame with requested index / column level(s) removed.\n\n Examples\n --------\n >>> df = pd.DataFrame([\n ... [1, 2, 3, 4],\n ... [5, 6, 7, 8],\n ... [9, 10, 11, 12]\n ... ]).set_index([0, 1]).rename_axis(['a', 'b'])\n\n >>> df.columns = pd.MultiIndex.from_tuples([\n ... ('c', 'e'), ('d', 'f')\n ... ], names=['level_1', 'level_2'])\n\n >>> df\n level_1 c d\n level_2 e f\n a b\n 1 2 3 4\n 5 6 7 8\n 9 10 11 12\n\n >>> df.droplevel('a')\n level_1 c d\n level_2 e f\n b\n 2 3 4\n 6 7 8\n 10 11 12\n\n >>> df.droplevel('level_2', axis=1)\n level_1 c d\n a b\n 1 2 3 4\n 5 6 7 8\n 9 10 11 12\n \"\"\"\n labels = self._get_axis(axis)\n new_labels = labels.droplevel(level)\n result = self.set_axis(new_labels, axis=axis, inplace=False)\n return result\n\n def pop(self: FrameOrSeries, item) -> FrameOrSeries:\n \"\"\"\n Return item and drop from frame. Raise KeyError if not found.\n\n Parameters\n ----------\n item : str\n Label of column to be popped.\n\n Returns\n -------\n Series\n\n Examples\n --------\n >>> df = pd.DataFrame([('falcon', 'bird', 389.0),\n ... ('parrot', 'bird', 24.0),\n ... ('lion', 'mammal', 80.5),\n ... ('monkey', 'mammal', np.nan)],\n ... columns=('name', 'class', 'max_speed'))\n >>> df\n name class max_speed\n 0 falcon bird 389.0\n 1 parrot bird 24.0\n 2 lion mammal 80.5\n 3 monkey mammal NaN\n\n >>> df.pop('class')\n 0 bird\n 1 bird\n 2 mammal\n 3 mammal\n Name: class, dtype: object\n\n >>> df\n name max_speed\n 0 falcon 389.0\n 1 parrot 24.0\n 2 lion 80.5\n 3 monkey NaN\n \"\"\"\n result = self[item]\n del self[item]\n try:\n result._reset_cacher()\n except AttributeError:\n pass\n\n return result\n\n def squeeze(self, axis=None):\n \"\"\"\n Squeeze 1 dimensional axis objects into scalars.\n\n Series or DataFrames with a single element are squeezed to a scalar.\n DataFrames with a single column or a single row are squeezed to a\n Series. Otherwise the object is unchanged.\n\n This method is most useful when you don't know if your\n object is a Series or DataFrame, but you do know it has just a single\n column. In that case you can safely call `squeeze` to ensure you have a\n Series.\n\n Parameters\n ----------\n axis : {0 or 'index', 1 or 'columns', None}, default None\n A specific axis to squeeze. By default, all length-1 axes are\n squeezed.\n\n Returns\n -------\n DataFrame, Series, or scalar\n The projection after squeezing `axis` or all the axes.\n\n See Also\n --------\n Series.iloc : Integer-location based indexing for selecting scalars.\n DataFrame.iloc : Integer-location based indexing for selecting Series.\n Series.to_frame : Inverse of DataFrame.squeeze for a\n single-column DataFrame.\n\n Examples\n --------\n >>> primes = pd.Series([2, 3, 5, 7])\n\n Slicing might produce a Series with a single value:\n\n >>> even_primes = primes[primes % 2 == 0]\n >>> even_primes\n 0 2\n dtype: int64\n\n >>> even_primes.squeeze()\n 2\n\n Squeezing objects with more than one value in every axis does nothing:\n\n >>> odd_primes = primes[primes % 2 == 1]\n >>> odd_primes\n 1 3\n 2 5\n 3 7\n dtype: int64\n\n >>> odd_primes.squeeze()\n 1 3\n 2 5\n 3 7\n dtype: int64\n\n Squeezing is even more effective when used with DataFrames.\n\n >>> df = pd.DataFrame([[1, 2], [3, 4]], columns=['a', 'b'])\n >>> df\n a b\n 0 1 2\n 1 3 4\n\n Slicing a single column will produce a DataFrame with the columns\n having only one value:\n\n >>> df_a = df[['a']]\n >>> df_a\n a\n 0 1\n 1 3\n\n So the columns can be squeezed down, resulting in a Series:\n\n >>> df_a.squeeze('columns')\n 0 1\n 1 3\n Name: a, dtype: int64\n\n Slicing a single row from a single column will produce a single\n scalar DataFrame:\n\n >>> df_0a = df.loc[df.index < 1, ['a']]\n >>> df_0a\n a\n 0 1\n\n Squeezing the rows produces a single scalar Series:\n\n >>> df_0a.squeeze('rows')\n a 1\n Name: 0, dtype: int64\n\n Squeezing all axes will project directly into a scalar:\n\n >>> df_0a.squeeze()\n 1\n \"\"\"\n axis = self._AXIS_NAMES if axis is None else (self._get_axis_number(axis),)\n return self.iloc[\n tuple(\n 0 if i in axis and len(a) == 1 else slice(None)\n for i, a in enumerate(self.axes)\n )\n ]\n\n # ----------------------------------------------------------------------\n # Rename\n\n def rename(\n self: FrameOrSeries,\n mapper: Optional[Renamer] = None,\n *,\n index: Optional[Renamer] = None,\n columns: Optional[Renamer] = None,\n axis: Optional[Axis] = None,\n copy: bool = True,\n inplace: bool = False,\n level: Optional[Level] = None,\n errors: str = \"ignore\",\n ) -> Optional[FrameOrSeries]:\n \"\"\"\n Alter axes input function or functions. Function / dict values must be\n unique (1-to-1). Labels not contained in a dict / Series will be left\n as-is. Extra labels listed don't throw an error. Alternatively, change\n ``Series.name`` with a scalar value (Series only).\n\n Parameters\n ----------\n %(axes)s : scalar, list-like, dict-like or function, optional\n Scalar or list-like will alter the ``Series.name`` attribute,\n and raise on DataFrame.\n dict-like or functions are transformations to apply to\n that axis' values\n copy : bool, default True\n Also copy underlying data.\n inplace : bool, default False\n Whether to return a new %(klass)s. If True then value of copy is\n ignored.\n level : int or level name, default None\n In case of a MultiIndex, only rename labels in the specified\n level.\n errors : {'ignore', 'raise'}, default 'ignore'\n If 'raise', raise a `KeyError` when a dict-like `mapper`, `index`,\n or `columns` contains labels that are not present in the Index\n being transformed.\n If 'ignore', existing keys will be renamed and extra keys will be\n ignored.\n\n Returns\n -------\n renamed : %(klass)s (new object)\n\n Raises\n ------\n KeyError\n If any of the labels is not found in the selected axis and\n \"errors='raise'\".\n\n See Also\n --------\n NDFrame.rename_axis\n\n Examples\n --------\n >>> s = pd.Series([1, 2, 3])\n >>> s\n 0 1\n 1 2\n 2 3\n dtype: int64\n >>> s.rename(\"my_name\") # scalar, changes Series.name\n 0 1\n 1 2\n 2 3\n Name: my_name, dtype: int64\n >>> s.rename(lambda x: x ** 2) # function, changes labels\n 0 1\n 1 2\n 4 3\n dtype: int64\n >>> s.rename({1: 3, 2: 5}) # mapping, changes labels\n 0 1\n 3 2\n 5 3\n dtype: int64\n\n Since ``DataFrame`` doesn't have a ``.name`` attribute,\n only mapping-type arguments are allowed.\n\n >>> df = pd.DataFrame({\"A\": [1, 2, 3], \"B\": [4, 5, 6]})\n >>> df.rename(2)\n Traceback (most recent call last):\n ...\n TypeError: 'int' object is not callable\n\n ``DataFrame.rename`` supports two calling conventions\n\n * ``(index=index_mapper, columns=columns_mapper, ...)``\n * ``(mapper, axis={'index', 'columns'}, ...)``\n\n We *highly* recommend using keyword arguments to clarify your\n intent.\n\n >>> df.rename(index=str, columns={\"A\": \"a\", \"B\": \"c\"})\n a c\n 0 1 4\n 1 2 5\n 2 3 6\n\n >>> df.rename(index=str, columns={\"A\": \"a\", \"C\": \"c\"})\n a B\n 0 1 4\n 1 2 5\n 2 3 6\n\n Using axis-style parameters\n\n >>> df.rename(str.lower, axis='columns')\n a b\n 0 1 4\n 1 2 5\n 2 3 6\n\n >>> df.rename({1: 2, 2: 4}, axis='index')\n A B\n 0 1 4\n 2 2 5\n 4 3 6\n\n See the :ref:`user guide <basics.rename>` for more.\n \"\"\"\n if mapper is None and index is None and columns is None:\n raise TypeError(\"must pass an index to rename\")\n\n if index is not None or columns is not None:\n if axis is not None:\n raise TypeError(\n \"Cannot specify both 'axis' and any of 'index' or 'columns'\"\n )\n elif mapper is not None:\n raise TypeError(\n \"Cannot specify both 'mapper' and any of 'index' or 'columns'\"\n )\n else:\n # use the mapper argument\n if axis and self._get_axis_number(axis) == 1:\n columns = mapper\n else:\n index = mapper\n\n result = self if inplace else self.copy(deep=copy)\n\n for axis_no, replacements in enumerate((index, columns)):\n if replacements is None:\n continue\n\n ax = self._get_axis(axis_no)\n baxis = self._get_block_manager_axis(axis_no)\n f = com.get_rename_function(replacements)\n\n if level is not None:\n level = ax._get_level_number(level)\n\n # GH 13473\n if not callable(replacements):\n indexer = ax.get_indexer_for(replacements)\n if errors == \"raise\" and len(indexer[indexer == -1]):\n missing_labels = [\n label\n for index, label in enumerate(replacements)\n if indexer[index] == -1\n ]\n raise KeyError(f\"{missing_labels} not found in axis\")\n\n result._data = result._data.rename_axis(\n f, axis=baxis, copy=copy, level=level\n )\n result._clear_item_cache()\n\n if inplace:\n self._update_inplace(result._data)\n return None\n else:\n return result.__finalize__(self)\n\n @rewrite_axis_style_signature(\"mapper\", [(\"copy\", True), (\"inplace\", False)])\n def rename_axis(self, mapper=lib.no_default, **kwargs):\n \"\"\"\n Set the name of the axis for the index or columns.\n\n Parameters\n ----------\n mapper : scalar, list-like, optional\n Value to set the axis name attribute.\n index, columns : scalar, list-like, dict-like or function, optional\n A scalar, list-like, dict-like or functions transformations to\n apply to that axis' values.\n\n Use either ``mapper`` and ``axis`` to\n specify the axis to target with ``mapper``, or ``index``\n and/or ``columns``.\n\n .. versionchanged:: 0.24.0\n\n axis : {0 or 'index', 1 or 'columns'}, default 0\n The axis to rename.\n copy : bool, default True\n Also copy underlying data.\n inplace : bool, default False\n Modifies the object directly, instead of creating a new Series\n or DataFrame.\n\n Returns\n -------\n Series, DataFrame, or None\n The same type as the caller or None if `inplace` is True.\n\n See Also\n --------\n Series.rename : Alter Series index labels or name.\n DataFrame.rename : Alter DataFrame index labels or name.\n Index.rename : Set new names on index.\n\n Notes\n -----\n ``DataFrame.rename_axis`` supports two calling conventions\n\n * ``(index=index_mapper, columns=columns_mapper, ...)``\n * ``(mapper, axis={'index', 'columns'}, ...)``\n\n The first calling convention will only modify the names of\n the index and/or the names of the Index object that is the columns.\n In this case, the parameter ``copy`` is ignored.\n\n The second calling convention will modify the names of the\n the corresponding index if mapper is a list or a scalar.\n However, if mapper is dict-like or a function, it will use the\n deprecated behavior of modifying the axis *labels*.\n\n We *highly* recommend using keyword arguments to clarify your\n intent.\n\n Examples\n --------\n **Series**\n\n >>> s = pd.Series([\"dog\", \"cat\", \"monkey\"])\n >>> s\n 0 dog\n 1 cat\n 2 monkey\n dtype: object\n >>> s.rename_axis(\"animal\")\n animal\n 0 dog\n 1 cat\n 2 monkey\n dtype: object\n\n **DataFrame**\n\n >>> df = pd.DataFrame({\"num_legs\": [4, 4, 2],\n ... \"num_arms\": [0, 0, 2]},\n ... [\"dog\", \"cat\", \"monkey\"])\n >>> df\n num_legs num_arms\n dog 4 0\n cat 4 0\n monkey 2 2\n >>> df = df.rename_axis(\"animal\")\n >>> df\n num_legs num_arms\n animal\n dog 4 0\n cat 4 0\n monkey 2 2\n >>> df = df.rename_axis(\"limbs\", axis=\"columns\")\n >>> df\n limbs num_legs num_arms\n animal\n dog 4 0\n cat 4 0\n monkey 2 2\n\n **MultiIndex**\n\n >>> df.index = pd.MultiIndex.from_product([['mammal'],\n ... ['dog', 'cat', 'monkey']],\n ... names=['type', 'name'])\n >>> df\n limbs num_legs num_arms\n type name\n mammal dog 4 0\n cat 4 0\n monkey 2 2\n\n >>> df.rename_axis(index={'type': 'class'})\n limbs num_legs num_arms\n class name\n mammal dog 4 0\n cat 4 0\n monkey 2 2\n\n >>> df.rename_axis(columns=str.upper)\n LIMBS num_legs num_arms\n type name\n mammal dog 4 0\n cat 4 0\n monkey 2 2\n \"\"\"\n axes, kwargs = self._construct_axes_from_arguments(\n (), kwargs, sentinel=lib.no_default\n )\n copy = kwargs.pop(\"copy\", True)\n inplace = kwargs.pop(\"inplace\", False)\n axis = kwargs.pop(\"axis\", 0)\n if axis is not None:\n axis = self._get_axis_number(axis)\n\n if kwargs:\n raise TypeError(\n \"rename_axis() got an unexpected keyword \"\n f'argument \"{list(kwargs.keys())[0]}\"'\n )\n\n inplace = validate_bool_kwarg(inplace, \"inplace\")\n\n if mapper is not lib.no_default:\n # Use v0.23 behavior if a scalar or list\n non_mapper = is_scalar(mapper) or (\n is_list_like(mapper) and not is_dict_like(mapper)\n )\n if non_mapper:\n return self._set_axis_name(mapper, axis=axis, inplace=inplace)\n else:\n raise ValueError(\"Use `.rename` to alter labels with a mapper.\")\n else:\n # Use new behavior. Means that index and/or columns\n # is specified\n result = self if inplace else self.copy(deep=copy)\n\n for axis in range(self._AXIS_LEN):\n v = axes.get(self._AXIS_NAMES[axis])\n if v is lib.no_default:\n continue\n non_mapper = is_scalar(v) or (is_list_like(v) and not is_dict_like(v))\n if non_mapper:\n newnames = v\n else:\n f = com.get_rename_function(v)\n curnames = self._get_axis(axis).names\n newnames = [f(name) for name in curnames]\n result._set_axis_name(newnames, axis=axis, inplace=True)\n if not inplace:\n return result\n\n def _set_axis_name(self, name, axis=0, inplace=False):\n \"\"\"\n Set the name(s) of the axis.\n\n Parameters\n ----------\n name : str or list of str\n Name(s) to set.\n axis : {0 or 'index', 1 or 'columns'}, default 0\n The axis to set the label. The value 0 or 'index' specifies index,\n and the value 1 or 'columns' specifies columns.\n inplace : bool, default False\n If `True`, do operation inplace and return None.\n\n .. versionadded:: 0.21.0\n\n Returns\n -------\n Series, DataFrame, or None\n The same type as the caller or `None` if `inplace` is `True`.\n\n See Also\n --------\n DataFrame.rename : Alter the axis labels of :class:`DataFrame`.\n Series.rename : Alter the index labels or set the index name\n of :class:`Series`.\n Index.rename : Set the name of :class:`Index` or :class:`MultiIndex`.\n\n Examples\n --------\n >>> df = pd.DataFrame({\"num_legs\": [4, 4, 2]},\n ... [\"dog\", \"cat\", \"monkey\"])\n >>> df\n num_legs\n dog 4\n cat 4\n monkey 2\n >>> df._set_axis_name(\"animal\")\n num_legs\n animal\n dog 4\n cat 4\n monkey 2\n >>> df.index = pd.MultiIndex.from_product(\n ... [[\"mammal\"], ['dog', 'cat', 'monkey']])\n >>> df._set_axis_name([\"type\", \"name\"])\n legs\n type name\n mammal dog 4\n cat 4\n monkey 2\n \"\"\"\n axis = self._get_axis_number(axis)\n idx = self._get_axis(axis).set_names(name)\n\n inplace = validate_bool_kwarg(inplace, \"inplace\")\n renamed = self if inplace else self.copy()\n renamed.set_axis(idx, axis=axis, inplace=True)\n if not inplace:\n return renamed\n\n # ----------------------------------------------------------------------\n # Comparison Methods\n\n def _indexed_same(self, other) -> bool:\n return all(\n self._get_axis(a).equals(other._get_axis(a)) for a in self._AXIS_ORDERS\n )\n\n def equals(self, other):\n \"\"\"\n Test whether two objects contain the same elements.\n\n This function allows two Series or DataFrames to be compared against\n each other to see if they have the same shape and elements. NaNs in\n the same location are considered equal. The column headers do not\n need to have the same type, but the elements within the columns must\n be the same dtype.\n\n Parameters\n ----------\n other : Series or DataFrame\n The other Series or DataFrame to be compared with the first.\n\n Returns\n -------\n bool\n True if all elements are the same in both objects, False\n otherwise.\n\n See Also\n --------\n Series.eq : Compare two Series objects of the same length\n and return a Series where each element is True if the element\n in each Series is equal, False otherwise.\n DataFrame.eq : Compare two DataFrame objects of the same shape and\n return a DataFrame where each element is True if the respective\n element in each DataFrame is equal, False otherwise.\n testing.assert_series_equal : Raises an AssertionError if left and\n right are not equal. Provides an easy interface to ignore\n inequality in dtypes, indexes and precision among others.\n testing.assert_frame_equal : Like assert_series_equal, but targets\n DataFrames.\n numpy.array_equal : Return True if two arrays have the same shape\n and elements, False otherwise.\n\n Notes\n -----\n This function requires that the elements have the same dtype as their\n respective elements in the other Series or DataFrame. However, the\n column labels do not need to have the same type, as long as they are\n still considered equal.\n\n Examples\n --------\n >>> df = pd.DataFrame({1: [10], 2: [20]})\n >>> df\n 1 2\n 0 10 20\n\n DataFrames df and exactly_equal have the same types and values for\n their elements and column labels, which will return True.\n\n >>> exactly_equal = pd.DataFrame({1: [10], 2: [20]})\n >>> exactly_equal\n 1 2\n 0 10 20\n >>> df.equals(exactly_equal)\n True\n\n DataFrames df and different_column_type have the same element\n types and values, but have different types for the column labels,\n which will still return True.\n\n >>> different_column_type = pd.DataFrame({1.0: [10], 2.0: [20]})\n >>> different_column_type\n 1.0 2.0\n 0 10 20\n >>> df.equals(different_column_type)\n True\n\n DataFrames df and different_data_type have different types for the\n same values for their elements, and will return False even though\n their column labels are the same values and types.\n\n >>> different_data_type = pd.DataFrame({1: [10.0], 2: [20.0]})\n >>> different_data_type\n 1 2\n 0 10.0 20.0\n >>> df.equals(different_data_type)\n False\n \"\"\"\n if not isinstance(other, self._constructor):\n return False\n return self._data.equals(other._data)\n\n # -------------------------------------------------------------------------\n # Unary Methods\n\n def __neg__(self):\n values = self._values\n if is_bool_dtype(values):\n arr = operator.inv(values)\n elif (\n is_numeric_dtype(values)\n or is_timedelta64_dtype(values)\n or is_object_dtype(values)\n ):\n arr = operator.neg(values)\n else:\n raise TypeError(f\"Unary negative expects numeric dtype, not {values.dtype}\")\n return self.__array_wrap__(arr)\n\n def __pos__(self):\n values = self._values\n if is_bool_dtype(values):\n arr = values\n elif (\n is_numeric_dtype(values)\n or is_timedelta64_dtype(values)\n or is_object_dtype(values)\n ):\n arr = operator.pos(values)\n else:\n raise TypeError(f\"Unary plus expects numeric dtype, not {values.dtype}\")\n return self.__array_wrap__(arr)\n\n def __invert__(self):\n if not self.size:\n # inv fails with 0 len\n return self\n\n new_data = self._data.apply(operator.invert)\n result = self._constructor(new_data).__finalize__(self)\n return result\n\n def __nonzero__(self):\n raise ValueError(\n f\"The truth value of a {type(self).__name__} is ambiguous. \"\n \"Use a.empty, a.bool(), a.item(), a.any() or a.all().\"\n )\n\n __bool__ = __nonzero__\n\n def bool(self):\n \"\"\"\n Return the bool of a single element PandasObject.\n\n This must be a boolean scalar value, either True or False. Raise a\n ValueError if the PandasObject does not have exactly 1 element, or that\n element is not boolean\n\n Returns\n -------\n bool\n Same single boolean value converted to bool type.\n \"\"\"\n v = self.squeeze()\n if isinstance(v, (bool, np.bool_)):\n return bool(v)\n elif is_scalar(v):\n raise ValueError(\n \"bool cannot act on a non-boolean single element \"\n f\"{type(self).__name__}\"\n )\n\n self.__nonzero__()\n\n def __abs__(self: FrameOrSeries) -> FrameOrSeries:\n return self.abs()\n\n def __round__(self: FrameOrSeries, decimals: int = 0) -> FrameOrSeries:\n return self.round(decimals)\n\n # -------------------------------------------------------------------------\n # Label or Level Combination Helpers\n #\n # A collection of helper methods for DataFrame/Series operations that\n # accept a combination of column/index labels and levels. All such\n # operations should utilize/extend these methods when possible so that we\n # have consistent precedence and validation logic throughout the library.\n\n def _is_level_reference(self, key, axis=0):\n \"\"\"\n Test whether a key is a level reference for a given axis.\n\n To be considered a level reference, `key` must be a string that:\n - (axis=0): Matches the name of an index level and does NOT match\n a column label.\n - (axis=1): Matches the name of a column level and does NOT match\n an index label.\n\n Parameters\n ----------\n key : str\n Potential level name for the given axis\n axis : int, default 0\n Axis that levels are associated with (0 for index, 1 for columns)\n\n Returns\n -------\n is_level : bool\n \"\"\"\n axis = self._get_axis_number(axis)\n\n return (\n key is not None\n and is_hashable(key)\n and key in self.axes[axis].names\n and not self._is_label_reference(key, axis=axis)\n )\n\n def _is_label_reference(self, key, axis=0) -> bool_t:\n \"\"\"\n Test whether a key is a label reference for a given axis.\n\n To be considered a label reference, `key` must be a string that:\n - (axis=0): Matches a column label\n - (axis=1): Matches an index label\n\n Parameters\n ----------\n key: str\n Potential label name\n axis: int, default 0\n Axis perpendicular to the axis that labels are associated with\n (0 means search for column labels, 1 means search for index labels)\n\n Returns\n -------\n is_label: bool\n \"\"\"\n axis = self._get_axis_number(axis)\n other_axes = (ax for ax in range(self._AXIS_LEN) if ax != axis)\n\n return (\n key is not None\n and is_hashable(key)\n and any(key in self.axes[ax] for ax in other_axes)\n )\n\n def _is_label_or_level_reference(self, key: str, axis: int = 0) -> bool_t:\n \"\"\"\n Test whether a key is a label or level reference for a given axis.\n\n To be considered either a label or a level reference, `key` must be a\n string that:\n - (axis=0): Matches a column label or an index level\n - (axis=1): Matches an index label or a column level\n\n Parameters\n ----------\n key: str\n Potential label or level name\n axis: int, default 0\n Axis that levels are associated with (0 for index, 1 for columns)\n\n Returns\n -------\n is_label_or_level: bool\n \"\"\"\n return self._is_level_reference(key, axis=axis) or self._is_label_reference(\n key, axis=axis\n )\n\n def _check_label_or_level_ambiguity(self, key, axis: int = 0) -> None:\n \"\"\"\n Check whether `key` is ambiguous.\n\n By ambiguous, we mean that it matches both a level of the input\n `axis` and a label of the other axis.\n\n Parameters\n ----------\n key: str or object\n Label or level name.\n axis: int, default 0\n Axis that levels are associated with (0 for index, 1 for columns).\n\n Raises\n ------\n ValueError: `key` is ambiguous\n \"\"\"\n axis = self._get_axis_number(axis)\n other_axes = (ax for ax in range(self._AXIS_LEN) if ax != axis)\n\n if (\n key is not None\n and is_hashable(key)\n and key in self.axes[axis].names\n and any(key in self.axes[ax] for ax in other_axes)\n ):\n\n # Build an informative and grammatical warning\n level_article, level_type = (\n (\"an\", \"index\") if axis == 0 else (\"a\", \"column\")\n )\n\n label_article, label_type = (\n (\"a\", \"column\") if axis == 0 else (\"an\", \"index\")\n )\n\n msg = (\n f\"'{key}' is both {level_article} {level_type} level and \"\n f\"{label_article} {label_type} label, which is ambiguous.\"\n )\n raise ValueError(msg)\n\n def _get_label_or_level_values(self, key: str, axis: int = 0) -> np.ndarray:\n \"\"\"\n Return a 1-D array of values associated with `key`, a label or level\n from the given `axis`.\n\n Retrieval logic:\n - (axis=0): Return column values if `key` matches a column label.\n Otherwise return index level values if `key` matches an index\n level.\n - (axis=1): Return row values if `key` matches an index label.\n Otherwise return column level values if 'key' matches a column\n level\n\n Parameters\n ----------\n key: str\n Label or level name.\n axis: int, default 0\n Axis that levels are associated with (0 for index, 1 for columns)\n\n Returns\n -------\n values: np.ndarray\n\n Raises\n ------\n KeyError\n if `key` matches neither a label nor a level\n ValueError\n if `key` matches multiple labels\n FutureWarning\n if `key` is ambiguous. This will become an ambiguity error in a\n future version\n \"\"\"\n axis = self._get_axis_number(axis)\n other_axes = [ax for ax in range(self._AXIS_LEN) if ax != axis]\n\n if self._is_label_reference(key, axis=axis):\n self._check_label_or_level_ambiguity(key, axis=axis)\n values = self.xs(key, axis=other_axes[0])._values\n elif self._is_level_reference(key, axis=axis):\n values = self.axes[axis].get_level_values(key)._values\n else:\n raise KeyError(key)\n\n # Check for duplicates\n if values.ndim > 1:\n\n if other_axes and isinstance(self._get_axis(other_axes[0]), MultiIndex):\n multi_message = (\n \"\\n\"\n \"For a multi-index, the label must be a \"\n \"tuple with elements corresponding to each level.\"\n )\n else:\n multi_message = \"\"\n\n label_axis_name = \"column\" if axis == 0 else \"index\"\n raise ValueError(\n (\n f\"The {label_axis_name} label '{key}' \"\n f\"is not unique.{multi_message}\"\n )\n )\n\n return values\n\n def _drop_labels_or_levels(self, keys, axis: int = 0):\n \"\"\"\n Drop labels and/or levels for the given `axis`.\n\n For each key in `keys`:\n - (axis=0): If key matches a column label then drop the column.\n Otherwise if key matches an index level then drop the level.\n - (axis=1): If key matches an index label then drop the row.\n Otherwise if key matches a column level then drop the level.\n\n Parameters\n ----------\n keys: str or list of str\n labels or levels to drop\n axis: int, default 0\n Axis that levels are associated with (0 for index, 1 for columns)\n\n Returns\n -------\n dropped: DataFrame\n\n Raises\n ------\n ValueError\n if any `keys` match neither a label nor a level\n \"\"\"\n axis = self._get_axis_number(axis)\n\n # Validate keys\n keys = com.maybe_make_list(keys)\n invalid_keys = [\n k for k in keys if not self._is_label_or_level_reference(k, axis=axis)\n ]\n\n if invalid_keys:\n raise ValueError(\n (\n \"The following keys are not valid labels or \"\n f\"levels for axis {axis}: {invalid_keys}\"\n )\n )\n\n # Compute levels and labels to drop\n levels_to_drop = [k for k in keys if self._is_level_reference(k, axis=axis)]\n\n labels_to_drop = [k for k in keys if not self._is_level_reference(k, axis=axis)]\n\n # Perform copy upfront and then use inplace operations below.\n # This ensures that we always perform exactly one copy.\n # ``copy`` and/or ``inplace`` options could be added in the future.\n dropped = self.copy()\n\n if axis == 0:\n # Handle dropping index levels\n if levels_to_drop:\n dropped.reset_index(levels_to_drop, drop=True, inplace=True)\n\n # Handle dropping columns labels\n if labels_to_drop:\n dropped.drop(labels_to_drop, axis=1, inplace=True)\n else:\n # Handle dropping column levels\n if levels_to_drop:\n if isinstance(dropped.columns, MultiIndex):\n # Drop the specified levels from the MultiIndex\n dropped.columns = dropped.columns.droplevel(levels_to_drop)\n else:\n # Drop the last level of Index by replacing with\n # a RangeIndex\n dropped.columns = RangeIndex(dropped.columns.size)\n\n # Handle dropping index labels\n if labels_to_drop:\n dropped.drop(labels_to_drop, axis=0, inplace=True)\n\n return dropped\n\n # ----------------------------------------------------------------------\n # Iteration\n\n def __hash__(self):\n raise TypeError(\n f\"{repr(type(self).__name__)} objects are mutable, \"\n f\"thus they cannot be hashed\"\n )\n\n def __iter__(self):\n \"\"\"\n Iterate over info axis.\n\n Returns\n -------\n iterator\n Info axis as iterator.\n \"\"\"\n return iter(self._info_axis)\n\n # can we get a better explanation of this?\n def keys(self):\n \"\"\"\n Get the 'info axis' (see Indexing for more).\n\n This is index for Series, columns for DataFrame.\n\n Returns\n -------\n Index\n Info axis.\n \"\"\"\n return self._info_axis\n\n def items(self):\n \"\"\"\n Iterate over (label, values) on info axis\n\n This is index for Series and columns for DataFrame.\n\n Returns\n -------\n Generator\n \"\"\"\n for h in self._info_axis:\n yield h, self[h]\n\n @Appender(items.__doc__)\n def iteritems(self):\n return self.items()\n\n def __len__(self) -> int:\n \"\"\"Returns length of info axis\"\"\"\n return len(self._info_axis)\n\n def __contains__(self, key) -> bool_t:\n \"\"\"True if the key is in the info axis\"\"\"\n return key in self._info_axis\n\n @property\n def empty(self) -> bool_t:\n \"\"\"\n Indicator whether DataFrame is empty.\n\n True if DataFrame is entirely empty (no items), meaning any of the\n axes are of length 0.\n\n Returns\n -------\n bool\n If DataFrame is empty, return True, if not return False.\n\n See Also\n --------\n Series.dropna : Return series without null values.\n DataFrame.dropna : Return DataFrame with labels on given axis omitted\n where (all or any) data are missing.\n\n Notes\n -----\n If DataFrame contains only NaNs, it is still not considered empty. See\n the example below.\n\n Examples\n --------\n An example of an actual empty DataFrame. Notice the index is empty:\n\n >>> df_empty = pd.DataFrame({'A' : []})\n >>> df_empty\n Empty DataFrame\n Columns: [A]\n Index: []\n >>> df_empty.empty\n True\n\n If we only have NaNs in our DataFrame, it is not considered empty! We\n will need to drop the NaNs to make the DataFrame empty:\n\n >>> df = pd.DataFrame({'A' : [np.nan]})\n >>> df\n A\n 0 NaN\n >>> df.empty\n False\n >>> df.dropna().empty\n True\n \"\"\"\n return any(len(self._get_axis(a)) == 0 for a in self._AXIS_ORDERS)\n\n # ----------------------------------------------------------------------\n # Array Interface\n\n # This is also set in IndexOpsMixin\n # GH#23114 Ensure ndarray.__op__(DataFrame) returns NotImplemented\n __array_priority__ = 1000\n\n def __array__(self, dtype=None) -> np.ndarray:\n return np.asarray(self._values, dtype=dtype)\n\n def __array_wrap__(self, result, context=None):\n result = lib.item_from_zerodim(result)\n if is_scalar(result):\n # e.g. we get here with np.ptp(series)\n # ptp also requires the item_from_zerodim\n return result\n d = self._construct_axes_dict(self._AXIS_ORDERS, copy=False)\n return self._constructor(result, **d).__finalize__(self)\n\n # ideally we would define this to avoid the getattr checks, but\n # is slower\n # @property\n # def __array_interface__(self):\n # \"\"\" provide numpy array interface method \"\"\"\n # values = self.values\n # return dict(typestr=values.dtype.str,shape=values.shape,data=values)\n\n # ----------------------------------------------------------------------\n # Picklability\n\n def __getstate__(self) -> Dict[str, Any]:\n meta = {k: getattr(self, k, None) for k in self._metadata}\n return dict(\n _data=self._data,\n _typ=self._typ,\n _metadata=self._metadata,\n attrs=self.attrs,\n **meta,\n )\n\n def __setstate__(self, state):\n\n if isinstance(state, BlockManager):\n self._data = state\n elif isinstance(state, dict):\n typ = state.get(\"_typ\")\n if typ is not None:\n attrs = state.get(\"_attrs\", {})\n object.__setattr__(self, \"_attrs\", attrs)\n\n # set in the order of internal names\n # to avoid definitional recursion\n # e.g. say fill_value needing _data to be\n # defined\n meta = set(self._internal_names + self._metadata)\n for k in list(meta):\n if k in state:\n v = state[k]\n object.__setattr__(self, k, v)\n\n for k, v in state.items():\n if k not in meta:\n object.__setattr__(self, k, v)\n\n else:\n raise NotImplementedError(\"Pre-0.12 pickles are no longer supported\")\n elif len(state) == 2:\n raise NotImplementedError(\"Pre-0.12 pickles are no longer supported\")\n\n self._item_cache = {}\n\n # ----------------------------------------------------------------------\n # Rendering Methods\n\n def __repr__(self) -> str:\n # string representation based upon iterating over self\n # (since, by definition, `PandasContainers` are iterable)\n prepr = f\"[{','.join(map(pprint_thing, self))}]\"\n return f\"{type(self).__name__}({prepr})\"\n\n def _repr_latex_(self):\n \"\"\"\n Returns a LaTeX representation for a particular object.\n Mainly for use with nbconvert (jupyter notebook conversion to pdf).\n \"\"\"\n if config.get_option(\"display.latex.repr\"):\n return self.to_latex()\n else:\n return None\n\n def _repr_data_resource_(self):\n \"\"\"\n Not a real Jupyter special repr method, but we use the same\n naming convention.\n \"\"\"\n if config.get_option(\"display.html.table_schema\"):\n data = self.head(config.get_option(\"display.max_rows\"))\n payload = json.loads(\n data.to_json(orient=\"table\"), object_pairs_hook=collections.OrderedDict\n )\n return payload\n\n # ----------------------------------------------------------------------\n # I/O Methods\n\n _shared_docs[\n \"to_markdown\"\n ] = \"\"\"\n Print %(klass)s in Markdown-friendly format.\n\n .. versionadded:: 1.0.0\n\n Parameters\n ----------\n buf : str, Path or StringIO-like, optional, default None\n Buffer to write to. If None, the output is returned as a string.\n mode : str, optional\n Mode in which file is opened.\n **kwargs\n These parameters will be passed to `tabulate`.\n\n Returns\n -------\n str\n %(klass)s in Markdown-friendly format.\n \"\"\"\n\n _shared_docs[\n \"to_excel\"\n ] = \"\"\"\n Write %(klass)s to an Excel sheet.\n\n To write a single %(klass)s to an Excel .xlsx file it is only necessary to\n specify a target file name. To write to multiple sheets it is necessary to\n create an `ExcelWriter` object with a target file name, and specify a sheet\n in the file to write to.\n\n Multiple sheets may be written to by specifying unique `sheet_name`.\n With all data written to the file it is necessary to save the changes.\n Note that creating an `ExcelWriter` object with a file name that already\n exists will result in the contents of the existing file being erased.\n\n Parameters\n ----------\n excel_writer : str or ExcelWriter object\n File path or existing ExcelWriter.\n sheet_name : str, default 'Sheet1'\n Name of sheet which will contain DataFrame.\n na_rep : str, default ''\n Missing data representation.\n float_format : str, optional\n Format string for floating point numbers. For example\n ``float_format=\"%%.2f\"`` will format 0.1234 to 0.12.\n columns : sequence or list of str, optional\n Columns to write.\n header : bool or list of str, default True\n Write out the column names. If a list of string is given it is\n assumed to be aliases for the column names.\n index : bool, default True\n Write row names (index).\n index_label : str or sequence, optional\n Column label for index column(s) if desired. If not specified, and\n `header` and `index` are True, then the index names are used. A\n sequence should be given if the DataFrame uses MultiIndex.\n startrow : int, default 0\n Upper left cell row to dump data frame.\n startcol : int, default 0\n Upper left cell column to dump data frame.\n engine : str, optional\n Write engine to use, 'openpyxl' or 'xlsxwriter'. You can also set this\n via the options ``io.excel.xlsx.writer``, ``io.excel.xls.writer``, and\n ``io.excel.xlsm.writer``.\n merge_cells : bool, default True\n Write MultiIndex and Hierarchical Rows as merged cells.\n encoding : str, optional\n Encoding of the resulting excel file. Only necessary for xlwt,\n other writers support unicode natively.\n inf_rep : str, default 'inf'\n Representation for infinity (there is no native representation for\n infinity in Excel).\n verbose : bool, default True\n Display more information in the error logs.\n freeze_panes : tuple of int (length 2), optional\n Specifies the one-based bottommost row and rightmost column that\n is to be frozen.\n\n See Also\n --------\n to_csv : Write DataFrame to a comma-separated values (csv) file.\n ExcelWriter : Class for writing DataFrame objects into excel sheets.\n read_excel : Read an Excel file into a pandas DataFrame.\n read_csv : Read a comma-separated values (csv) file into DataFrame.\n\n Notes\n -----\n For compatibility with :meth:`~DataFrame.to_csv`,\n to_excel serializes lists and dicts to strings before writing.\n\n Once a workbook has been saved it is not possible write further data\n without rewriting the whole workbook.\n\n Examples\n --------\n\n Create, write to and save a workbook:\n\n >>> df1 = pd.DataFrame([['a', 'b'], ['c', 'd']],\n ... index=['row 1', 'row 2'],\n ... columns=['col 1', 'col 2'])\n >>> df1.to_excel(\"output.xlsx\") # doctest: +SKIP\n\n To specify the sheet name:\n\n >>> df1.to_excel(\"output.xlsx\",\n ... sheet_name='Sheet_name_1') # doctest: +SKIP\n\n If you wish to write to more than one sheet in the workbook, it is\n necessary to specify an ExcelWriter object:\n\n >>> df2 = df1.copy()\n >>> with pd.ExcelWriter('output.xlsx') as writer: # doctest: +SKIP\n ... df1.to_excel(writer, sheet_name='Sheet_name_1')\n ... df2.to_excel(writer, sheet_name='Sheet_name_2')\n\n ExcelWriter can also be used to append to an existing Excel file:\n\n >>> with pd.ExcelWriter('output.xlsx',\n ... mode='a') as writer: # doctest: +SKIP\n ... df.to_excel(writer, sheet_name='Sheet_name_3')\n\n To set the library that is used to write the Excel file,\n you can pass the `engine` keyword (the default engine is\n automatically chosen depending on the file extension):\n\n >>> df1.to_excel('output1.xlsx', engine='xlsxwriter') # doctest: +SKIP\n \"\"\"\n\n @Appender(_shared_docs[\"to_excel\"] % dict(klass=\"object\"))\n def to_excel(\n self,\n excel_writer,\n sheet_name=\"Sheet1\",\n na_rep=\"\",\n float_format=None,\n columns=None,\n header=True,\n index=True,\n index_label=None,\n startrow=0,\n startcol=0,\n engine=None,\n merge_cells=True,\n encoding=None,\n inf_rep=\"inf\",\n verbose=True,\n freeze_panes=None,\n ) -> None:\n df = self if isinstance(self, ABCDataFrame) else self.to_frame()\n\n from pandas.io.formats.excel import ExcelFormatter\n\n formatter = ExcelFormatter(\n df,\n na_rep=na_rep,\n cols=columns,\n header=header,\n float_format=float_format,\n index=index,\n index_label=index_label,\n merge_cells=merge_cells,\n inf_rep=inf_rep,\n )\n formatter.write(\n excel_writer,\n sheet_name=sheet_name,\n startrow=startrow,\n startcol=startcol,\n freeze_panes=freeze_panes,\n engine=engine,\n )\n\n def to_json(\n self,\n path_or_buf: Optional[FilePathOrBuffer] = None,\n orient: Optional[str] = None,\n date_format: Optional[str] = None,\n double_precision: int = 10,\n force_ascii: bool_t = True,\n date_unit: str = \"ms\",\n default_handler: Optional[Callable[[Any], JSONSerializable]] = None,\n lines: bool_t = False,\n compression: Optional[str] = \"infer\",\n index: bool_t = True,\n indent: Optional[int] = None,\n ) -> Optional[str]:\n \"\"\"\n Convert the object to a JSON string.\n\n Note NaN's and None will be converted to null and datetime objects\n will be converted to UNIX timestamps.\n\n Parameters\n ----------\n path_or_buf : str or file handle, optional\n File path or object. If not specified, the result is returned as\n a string.\n orient : str\n Indication of expected JSON string format.\n\n * Series:\n\n - default is 'index'\n - allowed values are: {'split','records','index','table'}.\n\n * DataFrame:\n\n - default is 'columns'\n - allowed values are: {'split', 'records', 'index', 'columns',\n 'values', 'table'}.\n\n * The format of the JSON string:\n\n - 'split' : dict like {'index' -> [index], 'columns' -> [columns],\n 'data' -> [values]}\n - 'records' : list like [{column -> value}, ... , {column -> value}]\n - 'index' : dict like {index -> {column -> value}}\n - 'columns' : dict like {column -> {index -> value}}\n - 'values' : just the values array\n - 'table' : dict like {'schema': {schema}, 'data': {data}}\n\n Describing the data, where data component is like ``orient='records'``.\n\n .. versionchanged:: 0.20.0\n\n date_format : {None, 'epoch', 'iso'}\n Type of date conversion. 'epoch' = epoch milliseconds,\n 'iso' = ISO8601. The default depends on the `orient`. For\n ``orient='table'``, the default is 'iso'. For all other orients,\n the default is 'epoch'.\n double_precision : int, default 10\n The number of decimal places to use when encoding\n floating point values.\n force_ascii : bool, default True\n Force encoded string to be ASCII.\n date_unit : str, default 'ms' (milliseconds)\n The time unit to encode to, governs timestamp and ISO8601\n precision. One of 's', 'ms', 'us', 'ns' for second, millisecond,\n microsecond, and nanosecond respectively.\n default_handler : callable, default None\n Handler to call if object cannot otherwise be converted to a\n suitable format for JSON. Should receive a single argument which is\n the object to convert and return a serialisable object.\n lines : bool, default False\n If 'orient' is 'records' write out line delimited json format. Will\n throw ValueError if incorrect 'orient' since others are not list\n like.\n\n compression : {'infer', 'gzip', 'bz2', 'zip', 'xz', None}\n\n A string representing the compression to use in the output file,\n only used when the first argument is a filename. By default, the\n compression is inferred from the filename.\n\n .. versionadded:: 0.21.0\n .. versionchanged:: 0.24.0\n 'infer' option added and set to default\n index : bool, default True\n Whether to include the index values in the JSON string. Not\n including the index (``index=False``) is only supported when\n orient is 'split' or 'table'.\n\n .. versionadded:: 0.23.0\n\n indent : int, optional\n Length of whitespace used to indent each record.\n\n .. versionadded:: 1.0.0\n\n Returns\n -------\n None or str\n If path_or_buf is None, returns the resulting json format as a\n string. Otherwise returns None.\n\n See Also\n --------\n read_json : Convert a JSON string to pandas object.\n\n Notes\n -----\n The behavior of ``indent=0`` varies from the stdlib, which does not\n indent the output but does insert newlines. Currently, ``indent=0``\n and the default ``indent=None`` are equivalent in pandas, though this\n may change in a future release.\n\n Examples\n --------\n >>> df = pd.DataFrame([['a', 'b'], ['c', 'd']],\n ... index=['row 1', 'row 2'],\n ... columns=['col 1', 'col 2'])\n >>> df.to_json(orient='split')\n '{\"columns\":[\"col 1\",\"col 2\"],\n \"index\":[\"row 1\",\"row 2\"],\n \"data\":[[\"a\",\"b\"],[\"c\",\"d\"]]}'\n\n Encoding/decoding a Dataframe using ``'records'`` formatted JSON.\n Note that index labels are not preserved with this encoding.\n\n >>> df.to_json(orient='records')\n '[{\"col 1\":\"a\",\"col 2\":\"b\"},{\"col 1\":\"c\",\"col 2\":\"d\"}]'\n\n Encoding/decoding a Dataframe using ``'index'`` formatted JSON:\n\n >>> df.to_json(orient='index')\n '{\"row 1\":{\"col 1\":\"a\",\"col 2\":\"b\"},\"row 2\":{\"col 1\":\"c\",\"col 2\":\"d\"}}'\n\n Encoding/decoding a Dataframe using ``'columns'`` formatted JSON:\n\n >>> df.to_json(orient='columns')\n '{\"col 1\":{\"row 1\":\"a\",\"row 2\":\"c\"},\"col 2\":{\"row 1\":\"b\",\"row 2\":\"d\"}}'\n\n Encoding/decoding a Dataframe using ``'values'`` formatted JSON:\n\n >>> df.to_json(orient='values')\n '[[\"a\",\"b\"],[\"c\",\"d\"]]'\n\n Encoding with Table Schema\n\n >>> df.to_json(orient='table')\n '{\"schema\": {\"fields\": [{\"name\": \"index\", \"type\": \"string\"},\n {\"name\": \"col 1\", \"type\": \"string\"},\n {\"name\": \"col 2\", \"type\": \"string\"}],\n \"primaryKey\": \"index\",\n \"pandas_version\": \"0.20.0\"},\n \"data\": [{\"index\": \"row 1\", \"col 1\": \"a\", \"col 2\": \"b\"},\n {\"index\": \"row 2\", \"col 1\": \"c\", \"col 2\": \"d\"}]}'\n \"\"\"\n from pandas.io import json\n\n if date_format is None and orient == \"table\":\n date_format = \"iso\"\n elif date_format is None:\n date_format = \"epoch\"\n\n config.is_nonnegative_int(indent)\n indent = indent or 0\n\n return json.to_json(\n path_or_buf=path_or_buf,\n obj=self,\n orient=orient,\n date_format=date_format,\n double_precision=double_precision,\n force_ascii=force_ascii,\n date_unit=date_unit,\n default_handler=default_handler,\n lines=lines,\n compression=compression,\n index=index,\n indent=indent,\n )\n\n def to_hdf(\n self,\n path_or_buf,\n key: str,\n mode: str = \"a\",\n complevel: Optional[int] = None,\n complib: Optional[str] = None,\n append: bool_t = False,\n format: Optional[str] = None,\n index: bool_t = True,\n min_itemsize: Optional[Union[int, Dict[str, int]]] = None,\n nan_rep=None,\n dropna: Optional[bool_t] = None,\n data_columns: Optional[List[str]] = None,\n errors: str = \"strict\",\n encoding: str = \"UTF-8\",\n ) -> None:\n \"\"\"\n Write the contained data to an HDF5 file using HDFStore.\n\n Hierarchical Data Format (HDF) is self-describing, allowing an\n application to interpret the structure and contents of a file with\n no outside information. One HDF file can hold a mix of related objects\n which can be accessed as a group or as individual objects.\n\n In order to add another DataFrame or Series to an existing HDF file\n please use append mode and a different a key.\n\n For more information see the :ref:`user guide <io.hdf5>`.\n\n Parameters\n ----------\n path_or_buf : str or pandas.HDFStore\n File path or HDFStore object.\n key : str\n Identifier for the group in the store.\n mode : {'a', 'w', 'r+'}, default 'a'\n Mode to open file:\n\n - 'w': write, a new file is created (an existing file with\n the same name would be deleted).\n - 'a': append, an existing file is opened for reading and\n writing, and if the file does not exist it is created.\n - 'r+': similar to 'a', but the file must already exist.\n complevel : {0-9}, optional\n Specifies a compression level for data.\n A value of 0 disables compression.\n complib : {'zlib', 'lzo', 'bzip2', 'blosc'}, default 'zlib'\n Specifies the compression library to be used.\n As of v0.20.2 these additional compressors for Blosc are supported\n (default if no compressor specified: 'blosc:blosclz'):\n {'blosc:blosclz', 'blosc:lz4', 'blosc:lz4hc', 'blosc:snappy',\n 'blosc:zlib', 'blosc:zstd'}.\n Specifying a compression library which is not available issues\n a ValueError.\n append : bool, default False\n For Table formats, append the input data to the existing.\n format : {'fixed', 'table', None}, default 'fixed'\n Possible values:\n\n - 'fixed': Fixed format. Fast writing/reading. Not-appendable,\n nor searchable.\n - 'table': Table format. Write as a PyTables Table structure\n which may perform worse but allow more flexible operations\n like searching / selecting subsets of the data.\n - If None, pd.get_option('io.hdf.default_format') is checked,\n followed by fallback to \"fixed\"\n errors : str, default 'strict'\n Specifies how encoding and decoding errors are to be handled.\n See the errors argument for :func:`open` for a full list\n of options.\n encoding : str, default \"UTF-8\"\n min_itemsize : dict or int, optional\n Map column names to minimum string sizes for columns.\n nan_rep : Any, optional\n How to represent null values as str.\n Not allowed with append=True.\n data_columns : list of columns or True, optional\n List of columns to create as indexed data columns for on-disk\n queries, or True to use all columns. By default only the axes\n of the object are indexed. See :ref:`io.hdf5-query-data-columns`.\n Applicable only to format='table'.\n\n See Also\n --------\n DataFrame.read_hdf : Read from HDF file.\n DataFrame.to_parquet : Write a DataFrame to the binary parquet format.\n DataFrame.to_sql : Write to a sql table.\n DataFrame.to_feather : Write out feather-format for DataFrames.\n DataFrame.to_csv : Write out to a csv file.\n\n Examples\n --------\n >>> df = pd.DataFrame({'A': [1, 2, 3], 'B': [4, 5, 6]},\n ... index=['a', 'b', 'c'])\n >>> df.to_hdf('data.h5', key='df', mode='w')\n\n We can add another object to the same file:\n\n >>> s = pd.Series([1, 2, 3, 4])\n >>> s.to_hdf('data.h5', key='s')\n\n Reading from HDF file:\n\n >>> pd.read_hdf('data.h5', 'df')\n A B\n a 1 4\n b 2 5\n c 3 6\n >>> pd.read_hdf('data.h5', 's')\n 0 1\n 1 2\n 2 3\n 3 4\n dtype: int64\n\n Deleting file with data:\n\n >>> import os\n >>> os.remove('data.h5')\n \"\"\"\n from pandas.io import pytables\n\n pytables.to_hdf(\n path_or_buf,\n key,\n self,\n mode=mode,\n complevel=complevel,\n complib=complib,\n append=append,\n format=format,\n index=index,\n min_itemsize=min_itemsize,\n nan_rep=nan_rep,\n dropna=dropna,\n data_columns=data_columns,\n errors=errors,\n encoding=encoding,\n )\n\n def to_sql(\n self,\n name: str,\n con,\n schema=None,\n if_exists: str = \"fail\",\n index: bool_t = True,\n index_label=None,\n chunksize=None,\n dtype=None,\n method=None,\n ) -> None:\n \"\"\"\n Write records stored in a DataFrame to a SQL database.\n\n Databases supported by SQLAlchemy [1]_ are supported. Tables can be\n newly created, appended to, or overwritten.\n\n Parameters\n ----------\n name : str\n Name of SQL table.\n con : sqlalchemy.engine.Engine or sqlite3.Connection\n Using SQLAlchemy makes it possible to use any DB supported by that\n library. Legacy support is provided for sqlite3.Connection objects. The user\n is responsible for engine disposal and connection closure for the SQLAlchemy\n connectable See `here \\\n <https://docs.sqlalchemy.org/en/13/core/connections.html>`_.\n\n schema : str, optional\n Specify the schema (if database flavor supports this). If None, use\n default schema.\n if_exists : {'fail', 'replace', 'append'}, default 'fail'\n How to behave if the table already exists.\n\n * fail: Raise a ValueError.\n * replace: Drop the table before inserting new values.\n * append: Insert new values to the existing table.\n\n index : bool, default True\n Write DataFrame index as a column. Uses `index_label` as the column\n name in the table.\n index_label : str or sequence, default None\n Column label for index column(s). If None is given (default) and\n `index` is True, then the index names are used.\n A sequence should be given if the DataFrame uses MultiIndex.\n chunksize : int, optional\n Specify the number of rows in each batch to be written at a time.\n By default, all rows will be written at once.\n dtype : dict or scalar, optional\n Specifying the datatype for columns. If a dictionary is used, the\n keys should be the column names and the values should be the\n SQLAlchemy types or strings for the sqlite3 legacy mode. If a\n scalar is provided, it will be applied to all columns.\n method : {None, 'multi', callable}, optional\n Controls the SQL insertion clause used:\n\n * None : Uses standard SQL ``INSERT`` clause (one per row).\n * 'multi': Pass multiple values in a single ``INSERT`` clause.\n * callable with signature ``(pd_table, conn, keys, data_iter)``.\n\n Details and a sample callable implementation can be found in the\n section :ref:`insert method <io.sql.method>`.\n\n .. versionadded:: 0.24.0\n\n Raises\n ------\n ValueError\n When the table already exists and `if_exists` is 'fail' (the\n default).\n\n See Also\n --------\n read_sql : Read a DataFrame from a table.\n\n Notes\n -----\n Timezone aware datetime columns will be written as\n ``Timestamp with timezone`` type with SQLAlchemy if supported by the\n database. Otherwise, the datetimes will be stored as timezone unaware\n timestamps local to the original timezone.\n\n .. versionadded:: 0.24.0\n\n References\n ----------\n .. [1] https://docs.sqlalchemy.org\n .. [2] https://www.python.org/dev/peps/pep-0249/\n\n Examples\n --------\n Create an in-memory SQLite database.\n\n >>> from sqlalchemy import create_engine\n >>> engine = create_engine('sqlite://', echo=False)\n\n Create a table from scratch with 3 rows.\n\n >>> df = pd.DataFrame({'name' : ['User 1', 'User 2', 'User 3']})\n >>> df\n name\n 0 User 1\n 1 User 2\n 2 User 3\n\n >>> df.to_sql('users', con=engine)\n >>> engine.execute(\"SELECT * FROM users\").fetchall()\n [(0, 'User 1'), (1, 'User 2'), (2, 'User 3')]\n\n >>> df1 = pd.DataFrame({'name' : ['User 4', 'User 5']})\n >>> df1.to_sql('users', con=engine, if_exists='append')\n >>> engine.execute(\"SELECT * FROM users\").fetchall()\n [(0, 'User 1'), (1, 'User 2'), (2, 'User 3'),\n (0, 'User 4'), (1, 'User 5')]\n\n Overwrite the table with just ``df1``.\n\n >>> df1.to_sql('users', con=engine, if_exists='replace',\n ... index_label='id')\n >>> engine.execute(\"SELECT * FROM users\").fetchall()\n [(0, 'User 4'), (1, 'User 5')]\n\n Specify the dtype (especially useful for integers with missing values).\n Notice that while pandas is forced to store the data as floating point,\n the database supports nullable integers. When fetching the data with\n Python, we get back integer scalars.\n\n >>> df = pd.DataFrame({\"A\": [1, None, 2]})\n >>> df\n A\n 0 1.0\n 1 NaN\n 2 2.0\n\n >>> from sqlalchemy.types import Integer\n >>> df.to_sql('integers', con=engine, index=False,\n ... dtype={\"A\": Integer()})\n\n >>> engine.execute(\"SELECT * FROM integers\").fetchall()\n [(1,), (None,), (2,)]\n \"\"\"\n from pandas.io import sql\n\n sql.to_sql(\n self,\n name,\n con,\n schema=schema,\n if_exists=if_exists,\n index=index,\n index_label=index_label,\n chunksize=chunksize,\n dtype=dtype,\n method=method,\n )\n\n def to_pickle(\n self,\n path,\n compression: Optional[str] = \"infer\",\n protocol: int = pickle.HIGHEST_PROTOCOL,\n ) -> None:\n \"\"\"\n Pickle (serialize) object to file.\n\n Parameters\n ----------\n path : str\n File path where the pickled object will be stored.\n compression : {'infer', 'gzip', 'bz2', 'zip', 'xz', None}, \\\n default 'infer'\n A string representing the compression to use in the output file. By\n default, infers from the file extension in specified path.\n protocol : int\n Int which indicates which protocol should be used by the pickler,\n default HIGHEST_PROTOCOL (see [1]_ paragraph 12.1.2). The possible\n values are 0, 1, 2, 3, 4. A negative value for the protocol\n parameter is equivalent to setting its value to HIGHEST_PROTOCOL.\n\n .. [1] https://docs.python.org/3/library/pickle.html.\n .. versionadded:: 0.21.0.\n\n See Also\n --------\n read_pickle : Load pickled pandas object (or any object) from file.\n DataFrame.to_hdf : Write DataFrame to an HDF5 file.\n DataFrame.to_sql : Write DataFrame to a SQL database.\n DataFrame.to_parquet : Write a DataFrame to the binary parquet format.\n\n Examples\n --------\n >>> original_df = pd.DataFrame({\"foo\": range(5), \"bar\": range(5, 10)})\n >>> original_df\n foo bar\n 0 0 5\n 1 1 6\n 2 2 7\n 3 3 8\n 4 4 9\n >>> original_df.to_pickle(\"./dummy.pkl\")\n\n >>> unpickled_df = pd.read_pickle(\"./dummy.pkl\")\n >>> unpickled_df\n foo bar\n 0 0 5\n 1 1 6\n 2 2 7\n 3 3 8\n 4 4 9\n\n >>> import os\n >>> os.remove(\"./dummy.pkl\")\n \"\"\"\n from pandas.io.pickle import to_pickle\n\n to_pickle(self, path, compression=compression, protocol=protocol)\n\n def to_clipboard(\n self, excel: bool_t = True, sep: Optional[str] = None, **kwargs\n ) -> None:\n r\"\"\"\n Copy object to the system clipboard.\n\n Write a text representation of object to the system clipboard.\n This can be pasted into Excel, for example.\n\n Parameters\n ----------\n excel : bool, default True\n Produce output in a csv format for easy pasting into excel.\n\n - True, use the provided separator for csv pasting.\n - False, write a string representation of the object to the clipboard.\n\n sep : str, default ``'\\t'``\n Field delimiter.\n **kwargs\n These parameters will be passed to DataFrame.to_csv.\n\n See Also\n --------\n DataFrame.to_csv : Write a DataFrame to a comma-separated values\n (csv) file.\n read_clipboard : Read text from clipboard and pass to read_table.\n\n Notes\n -----\n Requirements for your platform.\n\n - Linux : `xclip`, or `xsel` (with `PyQt4` modules)\n - Windows : none\n - OS X : none\n\n Examples\n --------\n Copy the contents of a DataFrame to the clipboard.\n\n >>> df = pd.DataFrame([[1, 2, 3], [4, 5, 6]], columns=['A', 'B', 'C'])\n >>> df.to_clipboard(sep=',')\n ... # Wrote the following to the system clipboard:\n ... # ,A,B,C\n ... # 0,1,2,3\n ... # 1,4,5,6\n\n We can omit the index by passing the keyword `index` and setting\n it to false.\n\n >>> df.to_clipboard(sep=',', index=False)\n ... # Wrote the following to the system clipboard:\n ... # A,B,C\n ... # 1,2,3\n ... # 4,5,6\n \"\"\"\n from pandas.io import clipboards\n\n clipboards.to_clipboard(self, excel=excel, sep=sep, **kwargs)\n\n def to_xarray(self):\n \"\"\"\n Return an xarray object from the pandas object.\n\n Returns\n -------\n xarray.DataArray or xarray.Dataset\n Data in the pandas structure converted to Dataset if the object is\n a DataFrame, or a DataArray if the object is a Series.\n\n See Also\n --------\n DataFrame.to_hdf : Write DataFrame to an HDF5 file.\n DataFrame.to_parquet : Write a DataFrame to the binary parquet format.\n\n Notes\n -----\n See the `xarray docs <https://xarray.pydata.org/en/stable/>`__\n\n Examples\n --------\n >>> df = pd.DataFrame([('falcon', 'bird', 389.0, 2),\n ... ('parrot', 'bird', 24.0, 2),\n ... ('lion', 'mammal', 80.5, 4),\n ... ('monkey', 'mammal', np.nan, 4)],\n ... columns=['name', 'class', 'max_speed',\n ... 'num_legs'])\n >>> df\n name class max_speed num_legs\n 0 falcon bird 389.0 2\n 1 parrot bird 24.0 2\n 2 lion mammal 80.5 4\n 3 monkey mammal NaN 4\n\n >>> df.to_xarray()\n <xarray.Dataset>\n Dimensions: (index: 4)\n Coordinates:\n * index (index) int64 0 1 2 3\n Data variables:\n name (index) object 'falcon' 'parrot' 'lion' 'monkey'\n class (index) object 'bird' 'bird' 'mammal' 'mammal'\n max_speed (index) float64 389.0 24.0 80.5 nan\n num_legs (index) int64 2 2 4 4\n\n >>> df['max_speed'].to_xarray()\n <xarray.DataArray 'max_speed' (index: 4)>\n array([389. , 24. , 80.5, nan])\n Coordinates:\n * index (index) int64 0 1 2 3\n\n >>> dates = pd.to_datetime(['2018-01-01', '2018-01-01',\n ... '2018-01-02', '2018-01-02'])\n >>> df_multiindex = pd.DataFrame({'date': dates,\n ... 'animal': ['falcon', 'parrot',\n ... 'falcon', 'parrot'],\n ... 'speed': [350, 18, 361, 15]})\n >>> df_multiindex = df_multiindex.set_index(['date', 'animal'])\n\n >>> df_multiindex\n speed\n date animal\n 2018-01-01 falcon 350\n parrot 18\n 2018-01-02 falcon 361\n parrot 15\n\n >>> df_multiindex.to_xarray()\n <xarray.Dataset>\n Dimensions: (animal: 2, date: 2)\n Coordinates:\n * date (date) datetime64[ns] 2018-01-01 2018-01-02\n * animal (animal) object 'falcon' 'parrot'\n Data variables:\n speed (date, animal) int64 350 18 361 15\n \"\"\"\n xarray = import_optional_dependency(\"xarray\")\n\n if self.ndim == 1:\n return xarray.DataArray.from_series(self)\n else:\n return xarray.Dataset.from_dataframe(self)\n\n @Substitution(returns=fmt.return_docstring)\n def to_latex(\n self,\n buf=None,\n columns=None,\n col_space=None,\n header=True,\n index=True,\n na_rep=\"NaN\",\n formatters=None,\n float_format=None,\n sparsify=None,\n index_names=True,\n bold_rows=False,\n column_format=None,\n longtable=None,\n escape=None,\n encoding=None,\n decimal=\".\",\n multicolumn=None,\n multicolumn_format=None,\n multirow=None,\n caption=None,\n label=None,\n ):\n r\"\"\"\n Render object to a LaTeX tabular, longtable, or nested table/tabular.\n\n Requires ``\\usepackage{booktabs}``. The output can be copy/pasted\n into a main LaTeX document or read from an external file\n with ``\\input{table.tex}``.\n\n .. versionchanged:: 0.20.2\n Added to Series.\n\n .. versionchanged:: 1.0.0\n Added caption and label arguments.\n\n Parameters\n ----------\n buf : str, Path or StringIO-like, optional, default None\n Buffer to write to. If None, the output is returned as a string.\n columns : list of label, optional\n The subset of columns to write. Writes all columns by default.\n col_space : int, optional\n The minimum width of each column.\n header : bool or list of str, default True\n Write out the column names. If a list of strings is given,\n it is assumed to be aliases for the column names.\n index : bool, default True\n Write row names (index).\n na_rep : str, default 'NaN'\n Missing data representation.\n formatters : list of functions or dict of {str: function}, optional\n Formatter functions to apply to columns' elements by position or\n name. The result of each function must be a unicode string.\n List must be of length equal to the number of columns.\n float_format : one-parameter function or str, optional, default None\n Formatter for floating point numbers. For example\n ``float_format=\"%%.2f\"`` and ``float_format=\"{:0.2f}\".format`` will\n both result in 0.1234 being formatted as 0.12.\n sparsify : bool, optional\n Set to False for a DataFrame with a hierarchical index to print\n every multiindex key at each row. By default, the value will be\n read from the config module.\n index_names : bool, default True\n Prints the names of the indexes.\n bold_rows : bool, default False\n Make the row labels bold in the output.\n column_format : str, optional\n The columns format as specified in `LaTeX table format\n <https://en.wikibooks.org/wiki/LaTeX/Tables>`__ e.g. 'rcl' for 3\n columns. By default, 'l' will be used for all columns except\n columns of numbers, which default to 'r'.\n longtable : bool, optional\n By default, the value will be read from the pandas config\n module. Use a longtable environment instead of tabular. Requires\n adding a \\usepackage{longtable} to your LaTeX preamble.\n escape : bool, optional\n By default, the value will be read from the pandas config\n module. When set to False prevents from escaping latex special\n characters in column names.\n encoding : str, optional\n A string representing the encoding to use in the output file,\n defaults to 'utf-8'.\n decimal : str, default '.'\n Character recognized as decimal separator, e.g. ',' in Europe.\n multicolumn : bool, default True\n Use \\multicolumn to enhance MultiIndex columns.\n The default will be read from the config module.\n multicolumn_format : str, default 'l'\n The alignment for multicolumns, similar to `column_format`\n The default will be read from the config module.\n multirow : bool, default False\n Use \\multirow to enhance MultiIndex rows. Requires adding a\n \\usepackage{multirow} to your LaTeX preamble. Will print\n centered labels (instead of top-aligned) across the contained\n rows, separating groups via clines. The default will be read\n from the pandas config module.\n caption : str, optional\n The LaTeX caption to be placed inside ``\\caption{}`` in the output.\n\n .. versionadded:: 1.0.0\n\n label : str, optional\n The LaTeX label to be placed inside ``\\label{}`` in the output.\n This is used with ``\\ref{}`` in the main ``.tex`` file.\n\n .. versionadded:: 1.0.0\n %(returns)s\n See Also\n --------\n DataFrame.to_string : Render a DataFrame to a console-friendly\n tabular output.\n DataFrame.to_html : Render a DataFrame as an HTML table.\n\n Examples\n --------\n >>> df = pd.DataFrame({'name': ['Raphael', 'Donatello'],\n ... 'mask': ['red', 'purple'],\n ... 'weapon': ['sai', 'bo staff']})\n >>> print(df.to_latex(index=False)) # doctest: +NORMALIZE_WHITESPACE\n \\begin{tabular}{lll}\n \\toprule\n name & mask & weapon \\\\\n \\midrule\n Raphael & red & sai \\\\\n Donatello & purple & bo staff \\\\\n \\bottomrule\n \\end{tabular}\n \"\"\"\n # Get defaults from the pandas config\n if self.ndim == 1:\n self = self.to_frame()\n if longtable is None:\n longtable = config.get_option(\"display.latex.longtable\")\n if escape is None:\n escape = config.get_option(\"display.latex.escape\")\n if multicolumn is None:\n multicolumn = config.get_option(\"display.latex.multicolumn\")\n if multicolumn_format is None:\n multicolumn_format = config.get_option(\"display.latex.multicolumn_format\")\n if multirow is None:\n multirow = config.get_option(\"display.latex.multirow\")\n\n formatter = DataFrameFormatter(\n self,\n columns=columns,\n col_space=col_space,\n na_rep=na_rep,\n header=header,\n index=index,\n formatters=formatters,\n float_format=float_format,\n bold_rows=bold_rows,\n sparsify=sparsify,\n index_names=index_names,\n escape=escape,\n decimal=decimal,\n )\n return formatter.to_latex(\n buf=buf,\n column_format=column_format,\n longtable=longtable,\n encoding=encoding,\n multicolumn=multicolumn,\n multicolumn_format=multicolumn_format,\n multirow=multirow,\n caption=caption,\n label=label,\n )\n\n def to_csv(\n self,\n path_or_buf: Optional[FilePathOrBuffer] = None,\n sep: str = \",\",\n na_rep: str = \"\",\n float_format: Optional[str] = None,\n columns: Optional[Sequence[Label]] = None,\n header: Union[bool_t, List[str]] = True,\n index: bool_t = True,\n index_label: Optional[Union[bool_t, str, Sequence[Label]]] = None,\n mode: str = \"w\",\n encoding: Optional[str] = None,\n compression: Optional[Union[str, Mapping[str, str]]] = \"infer\",\n quoting: Optional[int] = None,\n quotechar: str = '\"',\n line_terminator: Optional[str] = None,\n chunksize: Optional[int] = None,\n date_format: Optional[str] = None,\n doublequote: bool_t = True,\n escapechar: Optional[str] = None,\n decimal: Optional[str] = \".\",\n ) -> Optional[str]:\n r\"\"\"\n Write object to a comma-separated values (csv) file.\n\n .. versionchanged:: 0.24.0\n The order of arguments for Series was changed.\n\n Parameters\n ----------\n path_or_buf : str or file handle, default None\n File path or object, if None is provided the result is returned as\n a string. If a file object is passed it should be opened with\n `newline=''`, disabling universal newlines.\n\n .. versionchanged:: 0.24.0\n\n Was previously named \"path\" for Series.\n\n sep : str, default ','\n String of length 1. Field delimiter for the output file.\n na_rep : str, default ''\n Missing data representation.\n float_format : str, default None\n Format string for floating point numbers.\n columns : sequence, optional\n Columns to write.\n header : bool or list of str, default True\n Write out the column names. If a list of strings is given it is\n assumed to be aliases for the column names.\n\n .. versionchanged:: 0.24.0\n\n Previously defaulted to False for Series.\n\n index : bool, default True\n Write row names (index).\n index_label : str or sequence, or False, default None\n Column label for index column(s) if desired. If None is given, and\n `header` and `index` are True, then the index names are used. A\n sequence should be given if the object uses MultiIndex. If\n False do not print fields for index names. Use index_label=False\n for easier importing in R.\n mode : str\n Python write mode, default 'w'.\n encoding : str, optional\n A string representing the encoding to use in the output file,\n defaults to 'utf-8'.\n compression : str or dict, default 'infer'\n If str, represents compression mode. If dict, value at 'method' is\n the compression mode. Compression mode may be any of the following\n possible values: {'infer', 'gzip', 'bz2', 'zip', 'xz', None}. If\n compression mode is 'infer' and `path_or_buf` is path-like, then\n detect compression mode from the following extensions: '.gz',\n '.bz2', '.zip' or '.xz'. (otherwise no compression). If dict given\n and mode is 'zip' or inferred as 'zip', other entries passed as\n additional compression options.\n\n .. versionchanged:: 1.0.0\n\n May now be a dict with key 'method' as compression mode\n and other entries as additional compression options if\n compression mode is 'zip'.\n\n quoting : optional constant from csv module\n Defaults to csv.QUOTE_MINIMAL. If you have set a `float_format`\n then floats are converted to strings and thus csv.QUOTE_NONNUMERIC\n will treat them as non-numeric.\n quotechar : str, default '\\\"'\n String of length 1. Character used to quote fields.\n line_terminator : str, optional\n The newline character or character sequence to use in the output\n file. Defaults to `os.linesep`, which depends on the OS in which\n this method is called ('\\n' for linux, '\\r\\n' for Windows, i.e.).\n\n .. versionchanged:: 0.24.0\n chunksize : int or None\n Rows to write at a time.\n date_format : str, default None\n Format string for datetime objects.\n doublequote : bool, default True\n Control quoting of `quotechar` inside a field.\n escapechar : str, default None\n String of length 1. Character used to escape `sep` and `quotechar`\n when appropriate.\n decimal : str, default '.'\n Character recognized as decimal separator. E.g. use ',' for\n European data.\n\n Returns\n -------\n None or str\n If path_or_buf is None, returns the resulting csv format as a\n string. Otherwise returns None.\n\n See Also\n --------\n read_csv : Load a CSV file into a DataFrame.\n to_excel : Write DataFrame to an Excel file.\n\n Examples\n --------\n >>> df = pd.DataFrame({'name': ['Raphael', 'Donatello'],\n ... 'mask': ['red', 'purple'],\n ... 'weapon': ['sai', 'bo staff']})\n >>> df.to_csv(index=False)\n 'name,mask,weapon\\nRaphael,red,sai\\nDonatello,purple,bo staff\\n'\n\n Create 'out.zip' containing 'out.csv'\n\n >>> compression_opts = dict(method='zip',\n ... archive_name='out.csv') # doctest: +SKIP\n >>> df.to_csv('out.zip', index=False,\n ... compression=compression_opts) # doctest: +SKIP\n \"\"\"\n df = self if isinstance(self, ABCDataFrame) else self.to_frame()\n\n from pandas.io.formats.csvs import CSVFormatter\n\n formatter = CSVFormatter(\n df,\n path_or_buf,\n line_terminator=line_terminator,\n sep=sep,\n encoding=encoding,\n compression=compression,\n quoting=quoting,\n na_rep=na_rep,\n float_format=float_format,\n cols=columns,\n header=header,\n index=index,\n index_label=index_label,\n mode=mode,\n chunksize=chunksize,\n quotechar=quotechar,\n date_format=date_format,\n doublequote=doublequote,\n escapechar=escapechar,\n decimal=decimal,\n )\n formatter.save()\n\n if path_or_buf is None:\n return formatter.path_or_buf.getvalue()\n\n return None\n\n # ----------------------------------------------------------------------\n # Lookup Caching\n\n def _set_as_cached(self, item, cacher) -> None:\n \"\"\"\n Set the _cacher attribute on the calling object with a weakref to\n cacher.\n \"\"\"\n self._cacher = (item, weakref.ref(cacher))\n\n def _reset_cacher(self) -> None:\n \"\"\"\n Reset the cacher.\n \"\"\"\n if hasattr(self, \"_cacher\"):\n del self._cacher\n\n def _maybe_cache_changed(self, item, value) -> None:\n \"\"\"\n The object has called back to us saying maybe it has changed.\n \"\"\"\n self._data.set(item, value)\n\n @property\n def _is_cached(self) -> bool_t:\n \"\"\"Return boolean indicating if self is cached or not.\"\"\"\n return getattr(self, \"_cacher\", None) is not None\n\n def _get_cacher(self):\n \"\"\"return my cacher or None\"\"\"\n cacher = getattr(self, \"_cacher\", None)\n if cacher is not None:\n cacher = cacher[1]()\n return cacher\n\n def _maybe_update_cacher(\n self, clear: bool_t = False, verify_is_copy: bool_t = True\n ) -> None:\n \"\"\"\n See if we need to update our parent cacher if clear, then clear our\n cache.\n\n Parameters\n ----------\n clear : bool, default False\n Clear the item cache.\n verify_is_copy : bool, default True\n Provide is_copy checks.\n \"\"\"\n cacher = getattr(self, \"_cacher\", None)\n if cacher is not None:\n ref = cacher[1]()\n\n # we are trying to reference a dead referant, hence\n # a copy\n if ref is None:\n del self._cacher\n else:\n # Note: we need to call ref._maybe_cache_changed even in the\n # case where it will raise. (Uh, not clear why)\n try:\n ref._maybe_cache_changed(cacher[0], self)\n except AssertionError:\n # ref._data.setitem can raise\n # AssertionError because of shape mismatch\n pass\n\n if verify_is_copy:\n self._check_setitem_copy(stacklevel=5, t=\"referant\")\n\n if clear:\n self._clear_item_cache()\n\n def _clear_item_cache(self) -> None:\n self._item_cache.clear()\n\n # ----------------------------------------------------------------------\n # Indexing Methods\n\n def take(\n self: FrameOrSeries, indices, axis=0, is_copy: Optional[bool_t] = None, **kwargs\n ) -> FrameOrSeries:\n \"\"\"\n Return the elements in the given *positional* indices along an axis.\n\n This means that we are not indexing according to actual values in\n the index attribute of the object. We are indexing according to the\n actual position of the element in the object.\n\n Parameters\n ----------\n indices : array-like\n An array of ints indicating which positions to take.\n axis : {0 or 'index', 1 or 'columns', None}, default 0\n The axis on which to select elements. ``0`` means that we are\n selecting rows, ``1`` means that we are selecting columns.\n is_copy : bool\n Before pandas 1.0, ``is_copy=False`` can be specified to ensure\n that the return value is an actual copy. Starting with pandas 1.0,\n ``take`` always returns a copy, and the keyword is therefore\n deprecated.\n\n .. deprecated:: 1.0.0\n **kwargs\n For compatibility with :meth:`numpy.take`. Has no effect on the\n output.\n\n Returns\n -------\n taken : same type as caller\n An array-like containing the elements taken from the object.\n\n See Also\n --------\n DataFrame.loc : Select a subset of a DataFrame by labels.\n DataFrame.iloc : Select a subset of a DataFrame by positions.\n numpy.take : Take elements from an array along an axis.\n\n Examples\n --------\n >>> df = pd.DataFrame([('falcon', 'bird', 389.0),\n ... ('parrot', 'bird', 24.0),\n ... ('lion', 'mammal', 80.5),\n ... ('monkey', 'mammal', np.nan)],\n ... columns=['name', 'class', 'max_speed'],\n ... index=[0, 2, 3, 1])\n >>> df\n name class max_speed\n 0 falcon bird 389.0\n 2 parrot bird 24.0\n 3 lion mammal 80.5\n 1 monkey mammal NaN\n\n Take elements at positions 0 and 3 along the axis 0 (default).\n\n Note how the actual indices selected (0 and 1) do not correspond to\n our selected indices 0 and 3. That's because we are selecting the 0th\n and 3rd rows, not rows whose indices equal 0 and 3.\n\n >>> df.take([0, 3])\n name class max_speed\n 0 falcon bird 389.0\n 1 monkey mammal NaN\n\n Take elements at indices 1 and 2 along the axis 1 (column selection).\n\n >>> df.take([1, 2], axis=1)\n class max_speed\n 0 bird 389.0\n 2 bird 24.0\n 3 mammal 80.5\n 1 mammal NaN\n\n We may take elements using negative integers for positive indices,\n starting from the end of the object, just like with Python lists.\n\n >>> df.take([-1, -2])\n name class max_speed\n 1 monkey mammal NaN\n 3 lion mammal 80.5\n \"\"\"\n if is_copy is not None:\n warnings.warn(\n \"is_copy is deprecated and will be removed in a future version. \"\n \"'take' always returns a copy, so there is no need to specify this.\",\n FutureWarning,\n stacklevel=2,\n )\n\n nv.validate_take(tuple(), kwargs)\n\n self._consolidate_inplace()\n\n new_data = self._data.take(\n indices, axis=self._get_block_manager_axis(axis), verify=True\n )\n return self._constructor(new_data).__finalize__(self)\n\n def _take_with_is_copy(self: FrameOrSeries, indices, axis=0) -> FrameOrSeries:\n \"\"\"\n Internal version of the `take` method that sets the `_is_copy`\n attribute to keep track of the parent dataframe (using in indexing\n for the SettingWithCopyWarning).\n\n See the docstring of `take` for full explanation of the parameters.\n \"\"\"\n result = self.take(indices=indices, axis=axis)\n # Maybe set copy if we didn't actually change the index.\n if not result._get_axis(axis).equals(self._get_axis(axis)):\n result._set_is_copy(self)\n return result\n\n def xs(self, key, axis=0, level=None, drop_level: bool_t = True):\n \"\"\"\n Return cross-section from the Series/DataFrame.\n\n This method takes a `key` argument to select data at a particular\n level of a MultiIndex.\n\n Parameters\n ----------\n key : label or tuple of label\n Label contained in the index, or partially in a MultiIndex.\n axis : {0 or 'index', 1 or 'columns'}, default 0\n Axis to retrieve cross-section on.\n level : object, defaults to first n levels (n=1 or len(key))\n In case of a key partially contained in a MultiIndex, indicate\n which levels are used. Levels can be referred by label or position.\n drop_level : bool, default True\n If False, returns object with same levels as self.\n\n Returns\n -------\n Series or DataFrame\n Cross-section from the original Series or DataFrame\n corresponding to the selected index levels.\n\n See Also\n --------\n DataFrame.loc : Access a group of rows and columns\n by label(s) or a boolean array.\n DataFrame.iloc : Purely integer-location based indexing\n for selection by position.\n\n Notes\n -----\n `xs` can not be used to set values.\n\n MultiIndex Slicers is a generic way to get/set values on\n any level or levels.\n It is a superset of `xs` functionality, see\n :ref:`MultiIndex Slicers <advanced.mi_slicers>`.\n\n Examples\n --------\n >>> d = {'num_legs': [4, 4, 2, 2],\n ... 'num_wings': [0, 0, 2, 2],\n ... 'class': ['mammal', 'mammal', 'mammal', 'bird'],\n ... 'animal': ['cat', 'dog', 'bat', 'penguin'],\n ... 'locomotion': ['walks', 'walks', 'flies', 'walks']}\n >>> df = pd.DataFrame(data=d)\n >>> df = df.set_index(['class', 'animal', 'locomotion'])\n >>> df\n num_legs num_wings\n class animal locomotion\n mammal cat walks 4 0\n dog walks 4 0\n bat flies 2 2\n bird penguin walks 2 2\n\n Get values at specified index\n\n >>> df.xs('mammal')\n num_legs num_wings\n animal locomotion\n cat walks 4 0\n dog walks 4 0\n bat flies 2 2\n\n Get values at several indexes\n\n >>> df.xs(('mammal', 'dog'))\n num_legs num_wings\n locomotion\n walks 4 0\n\n Get values at specified index and level\n\n >>> df.xs('cat', level=1)\n num_legs num_wings\n class locomotion\n mammal walks 4 0\n\n Get values at several indexes and levels\n\n >>> df.xs(('bird', 'walks'),\n ... level=[0, 'locomotion'])\n num_legs num_wings\n animal\n penguin 2 2\n\n Get values at specified column and axis\n\n >>> df.xs('num_wings', axis=1)\n class animal locomotion\n mammal cat walks 0\n dog walks 0\n bat flies 2\n bird penguin walks 2\n Name: num_wings, dtype: int64\n \"\"\"\n axis = self._get_axis_number(axis)\n labels = self._get_axis(axis)\n if level is not None:\n loc, new_ax = labels.get_loc_level(key, level=level, drop_level=drop_level)\n\n # create the tuple of the indexer\n _indexer = [slice(None)] * self.ndim\n _indexer[axis] = loc\n indexer = tuple(_indexer)\n\n result = self.iloc[indexer]\n setattr(result, result._get_axis_name(axis), new_ax)\n return result\n\n if axis == 1:\n return self[key]\n\n self._consolidate_inplace()\n\n index = self.index\n if isinstance(index, MultiIndex):\n loc, new_index = self.index.get_loc_level(key, drop_level=drop_level)\n else:\n loc = self.index.get_loc(key)\n\n if isinstance(loc, np.ndarray):\n if loc.dtype == np.bool_:\n (inds,) = loc.nonzero()\n return self._take_with_is_copy(inds, axis=axis)\n else:\n return self._take_with_is_copy(loc, axis=axis)\n\n if not is_scalar(loc):\n new_index = self.index[loc]\n\n if is_scalar(loc):\n # In this case loc should be an integer\n if self.ndim == 1:\n # if we encounter an array-like and we only have 1 dim\n # that means that their are list/ndarrays inside the Series!\n # so just return them (GH 6394)\n return self._values[loc]\n\n new_values = self._data.fast_xs(loc)\n\n result = self._constructor_sliced(\n new_values,\n index=self.columns,\n name=self.index[loc],\n dtype=new_values.dtype,\n )\n\n else:\n result = self.iloc[loc]\n result.index = new_index\n\n # this could be a view\n # but only in a single-dtyped view sliceable case\n result._set_is_copy(self, copy=not result._is_view)\n return result\n\n _xs: Callable = xs\n\n def __getitem__(self, item):\n raise AbstractMethodError(self)\n\n def _get_item_cache(self, item):\n \"\"\"Return the cached item, item represents a label indexer.\"\"\"\n cache = self._item_cache\n res = cache.get(item)\n if res is None:\n values = self._data.get(item)\n res = self._box_item_values(item, values)\n cache[item] = res\n res._set_as_cached(item, self)\n\n # for a chain\n res._is_copy = self._is_copy\n return res\n\n def _box_item_values(self, key, values):\n raise AbstractMethodError(self)\n\n def _slice(self: FrameOrSeries, slobj: slice, axis=0) -> FrameOrSeries:\n \"\"\"\n Construct a slice of this container.\n\n Slicing with this method is *always* positional.\n \"\"\"\n assert isinstance(slobj, slice), type(slobj)\n axis = self._get_block_manager_axis(axis)\n result = self._constructor(self._data.get_slice(slobj, axis=axis))\n result = result.__finalize__(self)\n\n # this could be a view\n # but only in a single-dtyped view sliceable case\n is_copy = axis != 0 or result._is_view\n result._set_is_copy(self, copy=is_copy)\n return result\n\n def _set_item(self, key, value) -> None:\n self._data.set(key, value)\n self._clear_item_cache()\n\n def _set_is_copy(self, ref, copy: bool_t = True) -> None:\n if not copy:\n self._is_copy = None\n else:\n assert ref is not None\n self._is_copy = weakref.ref(ref)\n\n def _check_is_chained_assignment_possible(self) -> bool_t:\n \"\"\"\n Check if we are a view, have a cacher, and are of mixed type.\n If so, then force a setitem_copy check.\n\n Should be called just near setting a value\n\n Will return a boolean if it we are a view and are cached, but a\n single-dtype meaning that the cacher should be updated following\n setting.\n \"\"\"\n if self._is_view and self._is_cached:\n ref = self._get_cacher()\n if ref is not None and ref._is_mixed_type:\n self._check_setitem_copy(stacklevel=4, t=\"referant\", force=True)\n return True\n elif self._is_copy:\n self._check_setitem_copy(stacklevel=4, t=\"referant\")\n return False\n\n def _check_setitem_copy(self, stacklevel=4, t=\"setting\", force=False):\n \"\"\"\n\n Parameters\n ----------\n stacklevel : int, default 4\n the level to show of the stack when the error is output\n t : str, the type of setting error\n force : bool, default False\n If True, then force showing an error.\n\n validate if we are doing a setitem on a chained copy.\n\n If you call this function, be sure to set the stacklevel such that the\n user will see the error *at the level of setting*\n\n It is technically possible to figure out that we are setting on\n a copy even WITH a multi-dtyped pandas object. In other words, some\n blocks may be views while other are not. Currently _is_view will ALWAYS\n return False for multi-blocks to avoid having to handle this case.\n\n df = DataFrame(np.arange(0,9), columns=['count'])\n df['group'] = 'b'\n\n # This technically need not raise SettingWithCopy if both are view\n # (which is not # generally guaranteed but is usually True. However,\n # this is in general not a good practice and we recommend using .loc.\n df.iloc[0:5]['group'] = 'a'\n\n \"\"\"\n # return early if the check is not needed\n if not (force or self._is_copy):\n return\n\n value = config.get_option(\"mode.chained_assignment\")\n if value is None:\n return\n\n # see if the copy is not actually referred; if so, then dissolve\n # the copy weakref\n if self._is_copy is not None and not isinstance(self._is_copy, str):\n r = self._is_copy()\n if not gc.get_referents(r) or r.shape == self.shape:\n self._is_copy = None\n return\n\n # a custom message\n if isinstance(self._is_copy, str):\n t = self._is_copy\n\n elif t == \"referant\":\n t = (\n \"\\n\"\n \"A value is trying to be set on a copy of a slice from a \"\n \"DataFrame\\n\\n\"\n \"See the caveats in the documentation: \"\n \"https://pandas.pydata.org/pandas-docs/stable/user_guide/\"\n \"indexing.html#returning-a-view-versus-a-copy\"\n )\n\n else:\n t = (\n \"\\n\"\n \"A value is trying to be set on a copy of a slice from a \"\n \"DataFrame.\\n\"\n \"Try using .loc[row_indexer,col_indexer] = value \"\n \"instead\\n\\nSee the caveats in the documentation: \"\n \"https://pandas.pydata.org/pandas-docs/stable/user_guide/\"\n \"indexing.html#returning-a-view-versus-a-copy\"\n )\n\n if value == \"raise\":\n raise com.SettingWithCopyError(t)\n elif value == \"warn\":\n warnings.warn(t, com.SettingWithCopyWarning, stacklevel=stacklevel)\n\n def __delitem__(self, key) -> None:\n \"\"\"\n Delete item\n \"\"\"\n deleted = False\n\n maybe_shortcut = False\n if self.ndim == 2 and isinstance(self.columns, MultiIndex):\n try:\n maybe_shortcut = key not in self.columns._engine\n except TypeError:\n pass\n\n if maybe_shortcut:\n # Allow shorthand to delete all columns whose first len(key)\n # elements match key:\n if not isinstance(key, tuple):\n key = (key,)\n for col in self.columns:\n if isinstance(col, tuple) and col[: len(key)] == key:\n del self[col]\n deleted = True\n if not deleted:\n # If the above loop ran and didn't delete anything because\n # there was no match, this call should raise the appropriate\n # exception:\n self._data.delete(key)\n\n # delete from the caches\n try:\n del self._item_cache[key]\n except KeyError:\n pass\n\n # ----------------------------------------------------------------------\n # Unsorted\n\n def get(self, key, default=None):\n \"\"\"\n Get item from object for given key (ex: DataFrame column).\n\n Returns default value if not found.\n\n Parameters\n ----------\n key : object\n\n Returns\n -------\n value : same type as items contained in object\n \"\"\"\n try:\n return self[key]\n except (KeyError, ValueError, IndexError):\n return default\n\n @property\n def _is_view(self) -> bool_t:\n \"\"\"Return boolean indicating if self is view of another array \"\"\"\n return self._data.is_view\n\n def reindex_like(\n self: FrameOrSeries,\n other,\n method: Optional[str] = None,\n copy: bool_t = True,\n limit=None,\n tolerance=None,\n ) -> FrameOrSeries:\n \"\"\"\n Return an object with matching indices as other object.\n\n Conform the object to the same index on all axes. Optional\n filling logic, placing NaN in locations having no value\n in the previous index. A new object is produced unless the\n new index is equivalent to the current one and copy=False.\n\n Parameters\n ----------\n other : Object of the same data type\n Its row and column indices are used to define the new indices\n of this object.\n method : {None, 'backfill'/'bfill', 'pad'/'ffill', 'nearest'}\n Method to use for filling holes in reindexed DataFrame.\n Please note: this is only applicable to DataFrames/Series with a\n monotonically increasing/decreasing index.\n\n * None (default): don't fill gaps\n * pad / ffill: propagate last valid observation forward to next\n valid\n * backfill / bfill: use next valid observation to fill gap\n * nearest: use nearest valid observations to fill gap.\n\n copy : bool, default True\n Return a new object, even if the passed indexes are the same.\n limit : int, default None\n Maximum number of consecutive labels to fill for inexact matches.\n tolerance : optional\n Maximum distance between original and new labels for inexact\n matches. The values of the index at the matching locations most\n satisfy the equation ``abs(index[indexer] - target) <= tolerance``.\n\n Tolerance may be a scalar value, which applies the same tolerance\n to all values, or list-like, which applies variable tolerance per\n element. List-like includes list, tuple, array, Series, and must be\n the same size as the index and its dtype must exactly match the\n index's type.\n\n .. versionadded:: 0.21.0 (list-like tolerance)\n\n Returns\n -------\n Series or DataFrame\n Same type as caller, but with changed indices on each axis.\n\n See Also\n --------\n DataFrame.set_index : Set row labels.\n DataFrame.reset_index : Remove row labels or move them to new columns.\n DataFrame.reindex : Change to new indices or expand indices.\n\n Notes\n -----\n Same as calling\n ``.reindex(index=other.index, columns=other.columns,...)``.\n\n Examples\n --------\n >>> df1 = pd.DataFrame([[24.3, 75.7, 'high'],\n ... [31, 87.8, 'high'],\n ... [22, 71.6, 'medium'],\n ... [35, 95, 'medium']],\n ... columns=['temp_celsius', 'temp_fahrenheit',\n ... 'windspeed'],\n ... index=pd.date_range(start='2014-02-12',\n ... end='2014-02-15', freq='D'))\n\n >>> df1\n temp_celsius temp_fahrenheit windspeed\n 2014-02-12 24.3 75.7 high\n 2014-02-13 31.0 87.8 high\n 2014-02-14 22.0 71.6 medium\n 2014-02-15 35.0 95.0 medium\n\n >>> df2 = pd.DataFrame([[28, 'low'],\n ... [30, 'low'],\n ... [35.1, 'medium']],\n ... columns=['temp_celsius', 'windspeed'],\n ... index=pd.DatetimeIndex(['2014-02-12', '2014-02-13',\n ... '2014-02-15']))\n\n >>> df2\n temp_celsius windspeed\n 2014-02-12 28.0 low\n 2014-02-13 30.0 low\n 2014-02-15 35.1 medium\n\n >>> df2.reindex_like(df1)\n temp_celsius temp_fahrenheit windspeed\n 2014-02-12 28.0 NaN low\n 2014-02-13 30.0 NaN low\n 2014-02-14 NaN NaN NaN\n 2014-02-15 35.1 NaN medium\n \"\"\"\n d = other._construct_axes_dict(\n axes=self._AXIS_ORDERS,\n method=method,\n copy=copy,\n limit=limit,\n tolerance=tolerance,\n )\n\n return self.reindex(**d)\n\n def drop(\n self,\n labels=None,\n axis=0,\n index=None,\n columns=None,\n level=None,\n inplace: bool_t = False,\n errors: str = \"raise\",\n ):\n\n inplace = validate_bool_kwarg(inplace, \"inplace\")\n\n if labels is not None:\n if index is not None or columns is not None:\n raise ValueError(\"Cannot specify both 'labels' and 'index'/'columns'\")\n axis_name = self._get_axis_name(axis)\n axes = {axis_name: labels}\n elif index is not None or columns is not None:\n axes, _ = self._construct_axes_from_arguments((index, columns), {})\n else:\n raise ValueError(\n \"Need to specify at least one of 'labels', 'index' or 'columns'\"\n )\n\n obj = self\n\n for axis, labels in axes.items():\n if labels is not None:\n obj = obj._drop_axis(labels, axis, level=level, errors=errors)\n\n if inplace:\n self._update_inplace(obj)\n else:\n return obj\n\n def _drop_axis(\n self: FrameOrSeries, labels, axis, level=None, errors: str = \"raise\"\n ) -> FrameOrSeries:\n \"\"\"\n Drop labels from specified axis. Used in the ``drop`` method\n internally.\n\n Parameters\n ----------\n labels : single label or list-like\n axis : int or axis name\n level : int or level name, default None\n For MultiIndex\n errors : {'ignore', 'raise'}, default 'raise'\n If 'ignore', suppress error and existing labels are dropped.\n\n \"\"\"\n axis = self._get_axis_number(axis)\n axis_name = self._get_axis_name(axis)\n axis = self._get_axis(axis)\n\n if axis.is_unique:\n if level is not None:\n if not isinstance(axis, MultiIndex):\n raise AssertionError(\"axis must be a MultiIndex\")\n new_axis = axis.drop(labels, level=level, errors=errors)\n else:\n new_axis = axis.drop(labels, errors=errors)\n result = self.reindex(**{axis_name: new_axis})\n\n # Case for non-unique axis\n else:\n labels = ensure_object(com.index_labels_to_array(labels))\n if level is not None:\n if not isinstance(axis, MultiIndex):\n raise AssertionError(\"axis must be a MultiIndex\")\n indexer = ~axis.get_level_values(level).isin(labels)\n\n # GH 18561 MultiIndex.drop should raise if label is absent\n if errors == \"raise\" and indexer.all():\n raise KeyError(f\"{labels} not found in axis\")\n else:\n indexer = ~axis.isin(labels)\n # Check if label doesn't exist along axis\n labels_missing = (axis.get_indexer_for(labels) == -1).any()\n if errors == \"raise\" and labels_missing:\n raise KeyError(f\"{labels} not found in axis\")\n\n slicer = [slice(None)] * self.ndim\n slicer[self._get_axis_number(axis_name)] = indexer\n\n result = self.loc[tuple(slicer)]\n\n return result\n\n def _update_inplace(self, result, verify_is_copy: bool_t = True) -> None:\n \"\"\"\n Replace self internals with result.\n\n Parameters\n ----------\n verify_is_copy : bool, default True\n Provide is_copy checks.\n \"\"\"\n # NOTE: This does *not* call __finalize__ and that's an explicit\n # decision that we may revisit in the future.\n\n self._reset_cache()\n self._clear_item_cache()\n self._data = getattr(result, \"_data\", result)\n self._maybe_update_cacher(verify_is_copy=verify_is_copy)\n\n def add_prefix(self: FrameOrSeries, prefix: str) -> FrameOrSeries:\n \"\"\"\n Prefix labels with string `prefix`.\n\n For Series, the row labels are prefixed.\n For DataFrame, the column labels are prefixed.\n\n Parameters\n ----------\n prefix : str\n The string to add before each label.\n\n Returns\n -------\n Series or DataFrame\n New Series or DataFrame with updated labels.\n\n See Also\n --------\n Series.add_suffix: Suffix row labels with string `suffix`.\n DataFrame.add_suffix: Suffix column labels with string `suffix`.\n\n Examples\n --------\n >>> s = pd.Series([1, 2, 3, 4])\n >>> s\n 0 1\n 1 2\n 2 3\n 3 4\n dtype: int64\n\n >>> s.add_prefix('item_')\n item_0 1\n item_1 2\n item_2 3\n item_3 4\n dtype: int64\n\n >>> df = pd.DataFrame({'A': [1, 2, 3, 4], 'B': [3, 4, 5, 6]})\n >>> df\n A B\n 0 1 3\n 1 2 4\n 2 3 5\n 3 4 6\n\n >>> df.add_prefix('col_')\n col_A col_B\n 0 1 3\n 1 2 4\n 2 3 5\n 3 4 6\n \"\"\"\n f = functools.partial(\"{prefix}{}\".format, prefix=prefix)\n\n mapper = {self._info_axis_name: f}\n return self.rename(**mapper) # type: ignore\n\n def add_suffix(self: FrameOrSeries, suffix: str) -> FrameOrSeries:\n \"\"\"\n Suffix labels with string `suffix`.\n\n For Series, the row labels are suffixed.\n For DataFrame, the column labels are suffixed.\n\n Parameters\n ----------\n suffix : str\n The string to add after each label.\n\n Returns\n -------\n Series or DataFrame\n New Series or DataFrame with updated labels.\n\n See Also\n --------\n Series.add_prefix: Prefix row labels with string `prefix`.\n DataFrame.add_prefix: Prefix column labels with string `prefix`.\n\n Examples\n --------\n >>> s = pd.Series([1, 2, 3, 4])\n >>> s\n 0 1\n 1 2\n 2 3\n 3 4\n dtype: int64\n\n >>> s.add_suffix('_item')\n 0_item 1\n 1_item 2\n 2_item 3\n 3_item 4\n dtype: int64\n\n >>> df = pd.DataFrame({'A': [1, 2, 3, 4], 'B': [3, 4, 5, 6]})\n >>> df\n A B\n 0 1 3\n 1 2 4\n 2 3 5\n 3 4 6\n\n >>> df.add_suffix('_col')\n A_col B_col\n 0 1 3\n 1 2 4\n 2 3 5\n 3 4 6\n \"\"\"\n f = functools.partial(\"{}{suffix}\".format, suffix=suffix)\n\n mapper = {self._info_axis_name: f}\n return self.rename(**mapper) # type: ignore\n\n def sort_values(\n self,\n axis=0,\n ascending=True,\n inplace: bool_t = False,\n kind: str = \"quicksort\",\n na_position: str = \"last\",\n ignore_index: bool_t = False,\n ):\n \"\"\"\n Sort by the values along either axis.\n\n Parameters\n ----------%(optional_by)s\n axis : %(axes_single_arg)s, default 0\n Axis to be sorted.\n ascending : bool or list of bool, default True\n Sort ascending vs. descending. Specify list for multiple sort\n orders. If this is a list of bools, must match the length of\n the by.\n inplace : bool, default False\n If True, perform operation in-place.\n kind : {'quicksort', 'mergesort', 'heapsort'}, default 'quicksort'\n Choice of sorting algorithm. See also ndarray.np.sort for more\n information. `mergesort` is the only stable algorithm. For\n DataFrames, this option is only applied when sorting on a single\n column or label.\n na_position : {'first', 'last'}, default 'last'\n Puts NaNs at the beginning if `first`; `last` puts NaNs at the\n end.\n ignore_index : bool, default False\n If True, the resulting axis will be labeled 0, 1, …, n - 1.\n\n .. versionadded:: 1.0.0\n\n Returns\n -------\n sorted_obj : DataFrame or None\n DataFrame with sorted values if inplace=False, None otherwise.\n\n Examples\n --------\n >>> df = pd.DataFrame({\n ... 'col1': ['A', 'A', 'B', np.nan, 'D', 'C'],\n ... 'col2': [2, 1, 9, 8, 7, 4],\n ... 'col3': [0, 1, 9, 4, 2, 3],\n ... })\n >>> df\n col1 col2 col3\n 0 A 2 0\n 1 A 1 1\n 2 B 9 9\n 3 NaN 8 4\n 4 D 7 2\n 5 C 4 3\n\n Sort by col1\n\n >>> df.sort_values(by=['col1'])\n col1 col2 col3\n 0 A 2 0\n 1 A 1 1\n 2 B 9 9\n 5 C 4 3\n 4 D 7 2\n 3 NaN 8 4\n\n Sort by multiple columns\n\n >>> df.sort_values(by=['col1', 'col2'])\n col1 col2 col3\n 1 A 1 1\n 0 A 2 0\n 2 B 9 9\n 5 C 4 3\n 4 D 7 2\n 3 NaN 8 4\n\n Sort Descending\n\n >>> df.sort_values(by='col1', ascending=False)\n col1 col2 col3\n 4 D 7 2\n 5 C 4 3\n 2 B 9 9\n 0 A 2 0\n 1 A 1 1\n 3 NaN 8 4\n\n Putting NAs first\n\n >>> df.sort_values(by='col1', ascending=False, na_position='first')\n col1 col2 col3\n 3 NaN 8 4\n 4 D 7 2\n 5 C 4 3\n 2 B 9 9\n 0 A 2 0\n 1 A 1 1\n \"\"\"\n raise AbstractMethodError(self)\n\n def reindex(self: FrameOrSeries, *args, **kwargs) -> FrameOrSeries:\n \"\"\"\n Conform %(klass)s to new index with optional filling logic.\n\n Places NA/NaN in locations having no value in the previous index. A new object\n is produced unless the new index is equivalent to the current one and\n ``copy=False``.\n\n Parameters\n ----------\n %(optional_labels)s\n %(axes)s : array-like, optional\n New labels / index to conform to, should be specified using\n keywords. Preferably an Index object to avoid duplicating data.\n %(optional_axis)s\n method : {None, 'backfill'/'bfill', 'pad'/'ffill', 'nearest'}\n Method to use for filling holes in reindexed DataFrame.\n Please note: this is only applicable to DataFrames/Series with a\n monotonically increasing/decreasing index.\n\n * None (default): don't fill gaps\n * pad / ffill: Propagate last valid observation forward to next\n valid.\n * backfill / bfill: Use next valid observation to fill gap.\n * nearest: Use nearest valid observations to fill gap.\n\n copy : bool, default True\n Return a new object, even if the passed indexes are the same.\n level : int or name\n Broadcast across a level, matching Index values on the\n passed MultiIndex level.\n fill_value : scalar, default np.NaN\n Value to use for missing values. Defaults to NaN, but can be any\n \"compatible\" value.\n limit : int, default None\n Maximum number of consecutive elements to forward or backward fill.\n tolerance : optional\n Maximum distance between original and new labels for inexact\n matches. The values of the index at the matching locations most\n satisfy the equation ``abs(index[indexer] - target) <= tolerance``.\n\n Tolerance may be a scalar value, which applies the same tolerance\n to all values, or list-like, which applies variable tolerance per\n element. List-like includes list, tuple, array, Series, and must be\n the same size as the index and its dtype must exactly match the\n index's type.\n\n .. versionadded:: 0.21.0 (list-like tolerance)\n\n Returns\n -------\n %(klass)s with changed index.\n\n See Also\n --------\n DataFrame.set_index : Set row labels.\n DataFrame.reset_index : Remove row labels or move them to new columns.\n DataFrame.reindex_like : Change to same indices as other DataFrame.\n\n Examples\n --------\n ``DataFrame.reindex`` supports two calling conventions\n\n * ``(index=index_labels, columns=column_labels, ...)``\n * ``(labels, axis={'index', 'columns'}, ...)``\n\n We *highly* recommend using keyword arguments to clarify your\n intent.\n\n Create a dataframe with some fictional data.\n\n >>> index = ['Firefox', 'Chrome', 'Safari', 'IE10', 'Konqueror']\n >>> df = pd.DataFrame({'http_status': [200, 200, 404, 404, 301],\n ... 'response_time': [0.04, 0.02, 0.07, 0.08, 1.0]},\n ... index=index)\n >>> df\n http_status response_time\n Firefox 200 0.04\n Chrome 200 0.02\n Safari 404 0.07\n IE10 404 0.08\n Konqueror 301 1.00\n\n Create a new index and reindex the dataframe. By default\n values in the new index that do not have corresponding\n records in the dataframe are assigned ``NaN``.\n\n >>> new_index = ['Safari', 'Iceweasel', 'Comodo Dragon', 'IE10',\n ... 'Chrome']\n >>> df.reindex(new_index)\n http_status response_time\n Safari 404.0 0.07\n Iceweasel NaN NaN\n Comodo Dragon NaN NaN\n IE10 404.0 0.08\n Chrome 200.0 0.02\n\n We can fill in the missing values by passing a value to\n the keyword ``fill_value``. Because the index is not monotonically\n increasing or decreasing, we cannot use arguments to the keyword\n ``method`` to fill the ``NaN`` values.\n\n >>> df.reindex(new_index, fill_value=0)\n http_status response_time\n Safari 404 0.07\n Iceweasel 0 0.00\n Comodo Dragon 0 0.00\n IE10 404 0.08\n Chrome 200 0.02\n\n >>> df.reindex(new_index, fill_value='missing')\n http_status response_time\n Safari 404 0.07\n Iceweasel missing missing\n Comodo Dragon missing missing\n IE10 404 0.08\n Chrome 200 0.02\n\n We can also reindex the columns.\n\n >>> df.reindex(columns=['http_status', 'user_agent'])\n http_status user_agent\n Firefox 200 NaN\n Chrome 200 NaN\n Safari 404 NaN\n IE10 404 NaN\n Konqueror 301 NaN\n\n Or we can use \"axis-style\" keyword arguments\n\n >>> df.reindex(['http_status', 'user_agent'], axis=\"columns\")\n http_status user_agent\n Firefox 200 NaN\n Chrome 200 NaN\n Safari 404 NaN\n IE10 404 NaN\n Konqueror 301 NaN\n\n To further illustrate the filling functionality in\n ``reindex``, we will create a dataframe with a\n monotonically increasing index (for example, a sequence\n of dates).\n\n >>> date_index = pd.date_range('1/1/2010', periods=6, freq='D')\n >>> df2 = pd.DataFrame({\"prices\": [100, 101, np.nan, 100, 89, 88]},\n ... index=date_index)\n >>> df2\n prices\n 2010-01-01 100.0\n 2010-01-02 101.0\n 2010-01-03 NaN\n 2010-01-04 100.0\n 2010-01-05 89.0\n 2010-01-06 88.0\n\n Suppose we decide to expand the dataframe to cover a wider\n date range.\n\n >>> date_index2 = pd.date_range('12/29/2009', periods=10, freq='D')\n >>> df2.reindex(date_index2)\n prices\n 2009-12-29 NaN\n 2009-12-30 NaN\n 2009-12-31 NaN\n 2010-01-01 100.0\n 2010-01-02 101.0\n 2010-01-03 NaN\n 2010-01-04 100.0\n 2010-01-05 89.0\n 2010-01-06 88.0\n 2010-01-07 NaN\n\n The index entries that did not have a value in the original data frame\n (for example, '2009-12-29') are by default filled with ``NaN``.\n If desired, we can fill in the missing values using one of several\n options.\n\n For example, to back-propagate the last valid value to fill the ``NaN``\n values, pass ``bfill`` as an argument to the ``method`` keyword.\n\n >>> df2.reindex(date_index2, method='bfill')\n prices\n 2009-12-29 100.0\n 2009-12-30 100.0\n 2009-12-31 100.0\n 2010-01-01 100.0\n 2010-01-02 101.0\n 2010-01-03 NaN\n 2010-01-04 100.0\n 2010-01-05 89.0\n 2010-01-06 88.0\n 2010-01-07 NaN\n\n Please note that the ``NaN`` value present in the original dataframe\n (at index value 2010-01-03) will not be filled by any of the\n value propagation schemes. This is because filling while reindexing\n does not look at dataframe values, but only compares the original and\n desired indexes. If you do want to fill in the ``NaN`` values present\n in the original dataframe, use the ``fillna()`` method.\n\n See the :ref:`user guide <basics.reindexing>` for more.\n \"\"\"\n # TODO: Decide if we care about having different examples for different\n # kinds\n\n # construct the args\n axes, kwargs = self._construct_axes_from_arguments(args, kwargs)\n method = missing.clean_reindex_fill_method(kwargs.pop(\"method\", None))\n level = kwargs.pop(\"level\", None)\n copy = kwargs.pop(\"copy\", True)\n limit = kwargs.pop(\"limit\", None)\n tolerance = kwargs.pop(\"tolerance\", None)\n fill_value = kwargs.pop(\"fill_value\", None)\n\n # Series.reindex doesn't use / need the axis kwarg\n # We pop and ignore it here, to make writing Series/Frame generic code\n # easier\n kwargs.pop(\"axis\", None)\n\n if kwargs:\n raise TypeError(\n \"reindex() got an unexpected keyword \"\n f'argument \"{list(kwargs.keys())[0]}\"'\n )\n\n self._consolidate_inplace()\n\n # if all axes that are requested to reindex are equal, then only copy\n # if indicated must have index names equal here as well as values\n if all(\n self._get_axis(axis).identical(ax)\n for axis, ax in axes.items()\n if ax is not None\n ):\n if copy:\n return self.copy()\n return self\n\n # check if we are a multi reindex\n if self._needs_reindex_multi(axes, method, level):\n return self._reindex_multi(axes, copy, fill_value)\n\n # perform the reindex on the axes\n return self._reindex_axes(\n axes, level, limit, tolerance, method, fill_value, copy\n ).__finalize__(self)\n\n def _reindex_axes(\n self: FrameOrSeries, axes, level, limit, tolerance, method, fill_value, copy\n ) -> FrameOrSeries:\n \"\"\"Perform the reindex for all the axes.\"\"\"\n obj = self\n for a in self._AXIS_ORDERS:\n labels = axes[a]\n if labels is None:\n continue\n\n ax = self._get_axis(a)\n new_index, indexer = ax.reindex(\n labels, level=level, limit=limit, tolerance=tolerance, method=method\n )\n\n axis = self._get_axis_number(a)\n obj = obj._reindex_with_indexers(\n {axis: [new_index, indexer]},\n fill_value=fill_value,\n copy=copy,\n allow_dups=False,\n )\n\n return obj\n\n def _needs_reindex_multi(self, axes, method, level) -> bool_t:\n \"\"\"Check if we do need a multi reindex.\"\"\"\n return (\n (com.count_not_none(*axes.values()) == self._AXIS_LEN)\n and method is None\n and level is None\n and not self._is_mixed_type\n )\n\n def _reindex_multi(self, axes, copy, fill_value):\n raise AbstractMethodError(self)\n\n def _reindex_with_indexers(\n self: FrameOrSeries,\n reindexers,\n fill_value=None,\n copy: bool_t = False,\n allow_dups: bool_t = False,\n ) -> FrameOrSeries:\n \"\"\"allow_dups indicates an internal call here \"\"\"\n # reindex doing multiple operations on different axes if indicated\n new_data = self._data\n for axis in sorted(reindexers.keys()):\n index, indexer = reindexers[axis]\n baxis = self._get_block_manager_axis(axis)\n\n if index is None:\n continue\n\n index = ensure_index(index)\n if indexer is not None:\n indexer = ensure_int64(indexer)\n\n # TODO: speed up on homogeneous DataFrame objects\n new_data = new_data.reindex_indexer(\n index,\n indexer,\n axis=baxis,\n fill_value=fill_value,\n allow_dups=allow_dups,\n copy=copy,\n )\n\n if copy and new_data is self._data:\n new_data = new_data.copy()\n\n return self._constructor(new_data).__finalize__(self)\n\n def filter(\n self: FrameOrSeries,\n items=None,\n like: Optional[str] = None,\n regex: Optional[str] = None,\n axis=None,\n ) -> FrameOrSeries:\n \"\"\"\n Subset the dataframe rows or columns according to the specified index labels.\n\n Note that this routine does not filter a dataframe on its\n contents. The filter is applied to the labels of the index.\n\n Parameters\n ----------\n items : list-like\n Keep labels from axis which are in items.\n like : str\n Keep labels from axis for which \"like in label == True\".\n regex : str (regular expression)\n Keep labels from axis for which re.search(regex, label) == True.\n axis : {0 or ‘index’, 1 or ‘columns’, None}, default None\n The axis to filter on, expressed either as an index (int)\n or axis name (str). By default this is the info axis,\n 'index' for Series, 'columns' for DataFrame.\n\n Returns\n -------\n same type as input object\n\n See Also\n --------\n DataFrame.loc : Access a group of rows and columns\n by label(s) or a boolean array.\n\n Notes\n -----\n The ``items``, ``like``, and ``regex`` parameters are\n enforced to be mutually exclusive.\n\n ``axis`` defaults to the info axis that is used when indexing\n with ``[]``.\n\n Examples\n --------\n >>> df = pd.DataFrame(np.array(([1, 2, 3], [4, 5, 6])),\n ... index=['mouse', 'rabbit'],\n ... columns=['one', 'two', 'three'])\n\n >>> # select columns by name\n >>> df.filter(items=['one', 'three'])\n one three\n mouse 1 3\n rabbit 4 6\n\n >>> # select columns by regular expression\n >>> df.filter(regex='e$', axis=1)\n one three\n mouse 1 3\n rabbit 4 6\n\n >>> # select rows containing 'bbi'\n >>> df.filter(like='bbi', axis=0)\n one two three\n rabbit 4 5 6\n \"\"\"\n nkw = com.count_not_none(items, like, regex)\n if nkw > 1:\n raise TypeError(\n \"Keyword arguments `items`, `like`, or `regex` \"\n \"are mutually exclusive\"\n )\n\n if axis is None:\n axis = self._info_axis_name\n labels = self._get_axis(axis)\n\n if items is not None:\n name = self._get_axis_name(axis)\n return self.reindex(**{name: [r for r in items if r in labels]})\n elif like:\n\n def f(x):\n return like in ensure_str(x)\n\n values = labels.map(f)\n return self.loc(axis=axis)[values]\n elif regex:\n\n def f(x):\n return matcher.search(ensure_str(x)) is not None\n\n matcher = re.compile(regex)\n values = labels.map(f)\n return self.loc(axis=axis)[values]\n else:\n raise TypeError(\"Must pass either `items`, `like`, or `regex`\")\n\n def head(self: FrameOrSeries, n: int = 5) -> FrameOrSeries:\n \"\"\"\n Return the first `n` rows.\n\n This function returns the first `n` rows for the object based\n on position. It is useful for quickly testing if your object\n has the right type of data in it.\n\n For negative values of `n`, this function returns all rows except\n the last `n` rows, equivalent to ``df[:-n]``.\n\n Parameters\n ----------\n n : int, default 5\n Number of rows to select.\n\n Returns\n -------\n same type as caller\n The first `n` rows of the caller object.\n\n See Also\n --------\n DataFrame.tail: Returns the last `n` rows.\n\n Examples\n --------\n >>> df = pd.DataFrame({'animal': ['alligator', 'bee', 'falcon', 'lion',\n ... 'monkey', 'parrot', 'shark', 'whale', 'zebra']})\n >>> df\n animal\n 0 alligator\n 1 bee\n 2 falcon\n 3 lion\n 4 monkey\n 5 parrot\n 6 shark\n 7 whale\n 8 zebra\n\n Viewing the first 5 lines\n\n >>> df.head()\n animal\n 0 alligator\n 1 bee\n 2 falcon\n 3 lion\n 4 monkey\n\n Viewing the first `n` lines (three in this case)\n\n >>> df.head(3)\n animal\n 0 alligator\n 1 bee\n 2 falcon\n\n For negative values of `n`\n\n >>> df.head(-3)\n animal\n 0 alligator\n 1 bee\n 2 falcon\n 3 lion\n 4 monkey\n 5 parrot\n \"\"\"\n return self.iloc[:n]\n\n def tail(self: FrameOrSeries, n: int = 5) -> FrameOrSeries:\n \"\"\"\n Return the last `n` rows.\n\n This function returns last `n` rows from the object based on\n position. It is useful for quickly verifying data, for example,\n after sorting or appending rows.\n\n For negative values of `n`, this function returns all rows except\n the first `n` rows, equivalent to ``df[n:]``.\n\n Parameters\n ----------\n n : int, default 5\n Number of rows to select.\n\n Returns\n -------\n type of caller\n The last `n` rows of the caller object.\n\n See Also\n --------\n DataFrame.head : The first `n` rows of the caller object.\n\n Examples\n --------\n >>> df = pd.DataFrame({'animal': ['alligator', 'bee', 'falcon', 'lion',\n ... 'monkey', 'parrot', 'shark', 'whale', 'zebra']})\n >>> df\n animal\n 0 alligator\n 1 bee\n 2 falcon\n 3 lion\n 4 monkey\n 5 parrot\n 6 shark\n 7 whale\n 8 zebra\n\n Viewing the last 5 lines\n\n >>> df.tail()\n animal\n 4 monkey\n 5 parrot\n 6 shark\n 7 whale\n 8 zebra\n\n Viewing the last `n` lines (three in this case)\n\n >>> df.tail(3)\n animal\n 6 shark\n 7 whale\n 8 zebra\n\n For negative values of `n`\n\n >>> df.tail(-3)\n animal\n 3 lion\n 4 monkey\n 5 parrot\n 6 shark\n 7 whale\n 8 zebra\n \"\"\"\n if n == 0:\n return self.iloc[0:0]\n return self.iloc[-n:]\n\n def sample(\n self: FrameOrSeries,\n n=None,\n frac=None,\n replace=False,\n weights=None,\n random_state=None,\n axis=None,\n ) -> FrameOrSeries:\n \"\"\"\n Return a random sample of items from an axis of object.\n\n You can use `random_state` for reproducibility.\n\n Parameters\n ----------\n n : int, optional\n Number of items from axis to return. Cannot be used with `frac`.\n Default = 1 if `frac` = None.\n frac : float, optional\n Fraction of axis items to return. Cannot be used with `n`.\n replace : bool, default False\n Allow or disallow sampling of the same row more than once.\n weights : str or ndarray-like, optional\n Default 'None' results in equal probability weighting.\n If passed a Series, will align with target object on index. Index\n values in weights not found in sampled object will be ignored and\n index values in sampled object not in weights will be assigned\n weights of zero.\n If called on a DataFrame, will accept the name of a column\n when axis = 0.\n Unless weights are a Series, weights must be same length as axis\n being sampled.\n If weights do not sum to 1, they will be normalized to sum to 1.\n Missing values in the weights column will be treated as zero.\n Infinite values not allowed.\n random_state : int or numpy.random.RandomState, optional\n Seed for the random number generator (if int), or numpy RandomState\n object.\n axis : {0 or ‘index’, 1 or ‘columns’, None}, default None\n Axis to sample. Accepts axis number or name. Default is stat axis\n for given data type (0 for Series and DataFrames).\n\n Returns\n -------\n Series or DataFrame\n A new object of same type as caller containing `n` items randomly\n sampled from the caller object.\n\n See Also\n --------\n numpy.random.choice: Generates a random sample from a given 1-D numpy\n array.\n\n Notes\n -----\n If `frac` > 1, `replacement` should be set to `True`.\n\n Examples\n --------\n >>> df = pd.DataFrame({'num_legs': [2, 4, 8, 0],\n ... 'num_wings': [2, 0, 0, 0],\n ... 'num_specimen_seen': [10, 2, 1, 8]},\n ... index=['falcon', 'dog', 'spider', 'fish'])\n >>> df\n num_legs num_wings num_specimen_seen\n falcon 2 2 10\n dog 4 0 2\n spider 8 0 1\n fish 0 0 8\n\n Extract 3 random elements from the ``Series`` ``df['num_legs']``:\n Note that we use `random_state` to ensure the reproducibility of\n the examples.\n\n >>> df['num_legs'].sample(n=3, random_state=1)\n fish 0\n spider 8\n falcon 2\n Name: num_legs, dtype: int64\n\n A random 50% sample of the ``DataFrame`` with replacement:\n\n >>> df.sample(frac=0.5, replace=True, random_state=1)\n num_legs num_wings num_specimen_seen\n dog 4 0 2\n fish 0 0 8\n\n An upsample sample of the ``DataFrame`` with replacement:\n Note that `replace` parameter has to be `True` for `frac` parameter > 1.\n\n >>> df.sample(frac=2, replace=True, random_state=1)\n num_legs num_wings num_specimen_seen\n dog 4 0 2\n fish 0 0 8\n falcon 2 2 10\n falcon 2 2 10\n fish 0 0 8\n dog 4 0 2\n fish 0 0 8\n dog 4 0 2\n\n Using a DataFrame column as weights. Rows with larger value in the\n `num_specimen_seen` column are more likely to be sampled.\n\n >>> df.sample(n=2, weights='num_specimen_seen', random_state=1)\n num_legs num_wings num_specimen_seen\n falcon 2 2 10\n fish 0 0 8\n \"\"\"\n if axis is None:\n axis = self._stat_axis_number\n\n axis = self._get_axis_number(axis)\n axis_length = self.shape[axis]\n\n # Process random_state argument\n rs = com.random_state(random_state)\n\n # Check weights for compliance\n if weights is not None:\n\n # If a series, align with frame\n if isinstance(weights, ABCSeries):\n weights = weights.reindex(self.axes[axis])\n\n # Strings acceptable if a dataframe and axis = 0\n if isinstance(weights, str):\n if isinstance(self, ABCDataFrame):\n if axis == 0:\n try:\n weights = self[weights]\n except KeyError as err:\n raise KeyError(\n \"String passed to weights not a valid column\"\n ) from err\n else:\n raise ValueError(\n \"Strings can only be passed to \"\n \"weights when sampling from rows on \"\n \"a DataFrame\"\n )\n else:\n raise ValueError(\n \"Strings cannot be passed as weights \"\n \"when sampling from a Series.\"\n )\n\n weights = pd.Series(weights, dtype=\"float64\")\n\n if len(weights) != axis_length:\n raise ValueError(\n \"Weights and axis to be sampled must be of same length\"\n )\n\n if (weights == np.inf).any() or (weights == -np.inf).any():\n raise ValueError(\"weight vector may not include `inf` values\")\n\n if (weights < 0).any():\n raise ValueError(\"weight vector many not include negative values\")\n\n # If has nan, set to zero.\n weights = weights.fillna(0)\n\n # Renormalize if don't sum to 1\n if weights.sum() != 1:\n if weights.sum() != 0:\n weights = weights / weights.sum()\n else:\n raise ValueError(\"Invalid weights: weights sum to zero\")\n\n weights = weights.values\n\n # If no frac or n, default to n=1.\n if n is None and frac is None:\n n = 1\n elif frac is not None and frac > 1 and not replace:\n raise ValueError(\n \"Replace has to be set to `True` when \"\n \"upsampling the population `frac` > 1.\"\n )\n elif n is not None and frac is None and n % 1 != 0:\n raise ValueError(\"Only integers accepted as `n` values\")\n elif n is None and frac is not None:\n n = int(round(frac * axis_length))\n elif n is not None and frac is not None:\n raise ValueError(\"Please enter a value for `frac` OR `n`, not both\")\n\n # Check for negative sizes\n if n < 0:\n raise ValueError(\n \"A negative number of rows requested. Please provide positive value.\"\n )\n\n locs = rs.choice(axis_length, size=n, replace=replace, p=weights)\n return self.take(locs, axis=axis)\n\n _shared_docs[\n \"pipe\"\n ] = r\"\"\"\n Apply func(self, \\*args, \\*\\*kwargs).\n\n Parameters\n ----------\n func : function\n Function to apply to the %(klass)s.\n ``args``, and ``kwargs`` are passed into ``func``.\n Alternatively a ``(callable, data_keyword)`` tuple where\n ``data_keyword`` is a string indicating the keyword of\n ``callable`` that expects the %(klass)s.\n args : iterable, optional\n Positional arguments passed into ``func``.\n kwargs : mapping, optional\n A dictionary of keyword arguments passed into ``func``.\n\n Returns\n -------\n object : the return type of ``func``.\n\n See Also\n --------\n DataFrame.apply : Apply a function along input axis of DataFrame.\n DataFrame.applymap : Apply a function elementwise on a whole DataFrame.\n Series.map : Apply a mapping correspondence on a\n :class:`~pandas.Series`.\n\n Notes\n -----\n\n Use ``.pipe`` when chaining together functions that expect\n Series, DataFrames or GroupBy objects. Instead of writing\n\n >>> f(g(h(df), arg1=a), arg2=b, arg3=c)\n\n You can write\n\n >>> (df.pipe(h)\n ... .pipe(g, arg1=a)\n ... .pipe(f, arg2=b, arg3=c)\n ... )\n\n If you have a function that takes the data as (say) the second\n argument, pass a tuple indicating which keyword expects the\n data. For example, suppose ``f`` takes its data as ``arg2``:\n\n >>> (df.pipe(h)\n ... .pipe(g, arg1=a)\n ... .pipe((f, 'arg2'), arg1=a, arg3=c)\n ... )\n \"\"\"\n\n @Appender(_shared_docs[\"pipe\"] % _shared_doc_kwargs)\n def pipe(self, func, *args, **kwargs):\n return com.pipe(self, func, *args, **kwargs)\n\n _shared_docs[\"aggregate\"] = dedent(\n \"\"\"\n Aggregate using one or more operations over the specified axis.\n %(versionadded)s\n Parameters\n ----------\n func : function, str, list or dict\n Function to use for aggregating the data. If a function, must either\n work when passed a %(klass)s or when passed to %(klass)s.apply.\n\n Accepted combinations are:\n\n - function\n - string function name\n - list of functions and/or function names, e.g. ``[np.sum, 'mean']``\n - dict of axis labels -> functions, function names or list of such.\n %(axis)s\n *args\n Positional arguments to pass to `func`.\n **kwargs\n Keyword arguments to pass to `func`.\n\n Returns\n -------\n scalar, Series or DataFrame\n\n The return can be:\n\n * scalar : when Series.agg is called with single function\n * Series : when DataFrame.agg is called with a single function\n * DataFrame : when DataFrame.agg is called with several functions\n\n Return scalar, Series or DataFrame.\n %(see_also)s\n Notes\n -----\n `agg` is an alias for `aggregate`. Use the alias.\n\n A passed user-defined-function will be passed a Series for evaluation.\n %(examples)s\"\"\"\n )\n\n _shared_docs[\n \"transform\"\n ] = \"\"\"\n Call ``func`` on self producing a %(klass)s with transformed values.\n\n Produced %(klass)s will have same axis length as self.\n\n Parameters\n ----------\n func : function, str, list or dict\n Function to use for transforming the data. If a function, must either\n work when passed a %(klass)s or when passed to %(klass)s.apply.\n\n Accepted combinations are:\n\n - function\n - string function name\n - list of functions and/or function names, e.g. ``[np.exp. 'sqrt']``\n - dict of axis labels -> functions, function names or list of such.\n %(axis)s\n *args\n Positional arguments to pass to `func`.\n **kwargs\n Keyword arguments to pass to `func`.\n\n Returns\n -------\n %(klass)s\n A %(klass)s that must have the same length as self.\n\n Raises\n ------\n ValueError : If the returned %(klass)s has a different length than self.\n\n See Also\n --------\n %(klass)s.agg : Only perform aggregating type operations.\n %(klass)s.apply : Invoke function on a %(klass)s.\n\n Examples\n --------\n >>> df = pd.DataFrame({'A': range(3), 'B': range(1, 4)})\n >>> df\n A B\n 0 0 1\n 1 1 2\n 2 2 3\n >>> df.transform(lambda x: x + 1)\n A B\n 0 1 2\n 1 2 3\n 2 3 4\n\n Even though the resulting %(klass)s must have the same length as the\n input %(klass)s, it is possible to provide several input functions:\n\n >>> s = pd.Series(range(3))\n >>> s\n 0 0\n 1 1\n 2 2\n dtype: int64\n >>> s.transform([np.sqrt, np.exp])\n sqrt exp\n 0 0.000000 1.000000\n 1 1.000000 2.718282\n 2 1.414214 7.389056\n \"\"\"\n\n # ----------------------------------------------------------------------\n # Attribute access\n\n def __finalize__(\n self: FrameOrSeries, other, method=None, **kwargs\n ) -> FrameOrSeries:\n \"\"\"\n Propagate metadata from other to self.\n\n Parameters\n ----------\n other : the object from which to get the attributes that we are going\n to propagate\n method : optional, a passed method name ; possibly to take different\n types of propagation actions based on this\n\n \"\"\"\n if isinstance(other, NDFrame):\n for name in other.attrs:\n self.attrs[name] = other.attrs[name]\n # For subclasses using _metadata.\n for name in self._metadata:\n assert isinstance(name, str)\n object.__setattr__(self, name, getattr(other, name, None))\n return self\n\n def __getattr__(self, name: str):\n \"\"\"\n After regular attribute access, try looking up the name\n This allows simpler access to columns for interactive use.\n \"\"\"\n # Note: obj.x will always call obj.__getattribute__('x') prior to\n # calling obj.__getattr__('x').\n\n if (\n name in self._internal_names_set\n or name in self._metadata\n or name in self._accessors\n ):\n return object.__getattribute__(self, name)\n else:\n if self._info_axis._can_hold_identifiers_and_holds_name(name):\n return self[name]\n return object.__getattribute__(self, name)\n\n def __setattr__(self, name: str, value) -> None:\n \"\"\"\n After regular attribute access, try setting the name\n This allows simpler access to columns for interactive use.\n \"\"\"\n # first try regular attribute access via __getattribute__, so that\n # e.g. ``obj.x`` and ``obj.x = 4`` will always reference/modify\n # the same attribute.\n\n try:\n object.__getattribute__(self, name)\n return object.__setattr__(self, name, value)\n except AttributeError:\n pass\n\n # if this fails, go on to more involved attribute setting\n # (note that this matches __getattr__, above).\n if name in self._internal_names_set:\n object.__setattr__(self, name, value)\n elif name in self._metadata:\n object.__setattr__(self, name, value)\n else:\n try:\n existing = getattr(self, name)\n if isinstance(existing, Index):\n object.__setattr__(self, name, value)\n elif name in self._info_axis:\n self[name] = value\n else:\n object.__setattr__(self, name, value)\n except (AttributeError, TypeError):\n if isinstance(self, ABCDataFrame) and (is_list_like(value)):\n warnings.warn(\n \"Pandas doesn't allow columns to be \"\n \"created via a new attribute name - see \"\n \"https://pandas.pydata.org/pandas-docs/\"\n \"stable/indexing.html#attribute-access\",\n stacklevel=2,\n )\n object.__setattr__(self, name, value)\n\n def _dir_additions(self):\n \"\"\"\n add the string-like attributes from the info_axis.\n If info_axis is a MultiIndex, it's first level values are used.\n \"\"\"\n additions = {\n c\n for c in self._info_axis.unique(level=0)[:100]\n if isinstance(c, str) and c.isidentifier()\n }\n return super()._dir_additions().union(additions)\n\n # ----------------------------------------------------------------------\n # Consolidation of internals\n\n def _protect_consolidate(self, f):\n \"\"\"\n Consolidate _data -- if the blocks have changed, then clear the\n cache\n \"\"\"\n blocks_before = len(self._data.blocks)\n result = f()\n if len(self._data.blocks) != blocks_before:\n self._clear_item_cache()\n return result\n\n def _consolidate_inplace(self) -> None:\n \"\"\"Consolidate data in place and return None\"\"\"\n\n def f():\n self._data = self._data.consolidate()\n\n self._protect_consolidate(f)\n\n def _consolidate(self, inplace: bool_t = False):\n \"\"\"\n Compute NDFrame with \"consolidated\" internals (data of each dtype\n grouped together in a single ndarray).\n\n Parameters\n ----------\n inplace : bool, default False\n If False return new object, otherwise modify existing object.\n\n Returns\n -------\n consolidated : same type as caller\n \"\"\"\n inplace = validate_bool_kwarg(inplace, \"inplace\")\n if inplace:\n self._consolidate_inplace()\n else:\n f = lambda: self._data.consolidate()\n cons_data = self._protect_consolidate(f)\n return self._constructor(cons_data).__finalize__(self)\n\n @property\n def _is_mixed_type(self) -> bool_t:\n f = lambda: self._data.is_mixed_type\n return self._protect_consolidate(f)\n\n @property\n def _is_numeric_mixed_type(self) -> bool_t:\n f = lambda: self._data.is_numeric_mixed_type\n return self._protect_consolidate(f)\n\n def _check_inplace_setting(self, value) -> bool_t:\n \"\"\" check whether we allow in-place setting with this type of value \"\"\"\n if self._is_mixed_type:\n if not self._is_numeric_mixed_type:\n\n # allow an actual np.nan thru\n if is_float(value) and np.isnan(value):\n return True\n\n raise TypeError(\n \"Cannot do inplace boolean setting on \"\n \"mixed-types with a non np.nan value\"\n )\n\n return True\n\n def _get_numeric_data(self):\n return self._constructor(self._data.get_numeric_data()).__finalize__(self)\n\n def _get_bool_data(self):\n return self._constructor(self._data.get_bool_data()).__finalize__(self)\n\n # ----------------------------------------------------------------------\n # Internal Interface Methods\n\n @property\n def values(self) -> np.ndarray:\n \"\"\"\n Return a Numpy representation of the DataFrame.\n\n .. warning::\n\n We recommend using :meth:`DataFrame.to_numpy` instead.\n\n Only the values in the DataFrame will be returned, the axes labels\n will be removed.\n\n Returns\n -------\n numpy.ndarray\n The values of the DataFrame.\n\n See Also\n --------\n DataFrame.to_numpy : Recommended alternative to this method.\n DataFrame.index : Retrieve the index labels.\n DataFrame.columns : Retrieving the column names.\n\n Notes\n -----\n The dtype will be a lower-common-denominator dtype (implicit\n upcasting); that is to say if the dtypes (even of numeric types)\n are mixed, the one that accommodates all will be chosen. Use this\n with care if you are not dealing with the blocks.\n\n e.g. If the dtypes are float16 and float32, dtype will be upcast to\n float32. If dtypes are int32 and uint8, dtype will be upcast to\n int32. By :func:`numpy.find_common_type` convention, mixing int64\n and uint64 will result in a float64 dtype.\n\n Examples\n --------\n A DataFrame where all columns are the same type (e.g., int64) results\n in an array of the same type.\n\n >>> df = pd.DataFrame({'age': [ 3, 29],\n ... 'height': [94, 170],\n ... 'weight': [31, 115]})\n >>> df\n age height weight\n 0 3 94 31\n 1 29 170 115\n >>> df.dtypes\n age int64\n height int64\n weight int64\n dtype: object\n >>> df.values\n array([[ 3, 94, 31],\n [ 29, 170, 115]], dtype=int64)\n\n A DataFrame with mixed type columns(e.g., str/object, int64, float32)\n results in an ndarray of the broadest type that accommodates these\n mixed types (e.g., object).\n\n >>> df2 = pd.DataFrame([('parrot', 24.0, 'second'),\n ... ('lion', 80.5, 1),\n ... ('monkey', np.nan, None)],\n ... columns=('name', 'max_speed', 'rank'))\n >>> df2.dtypes\n name object\n max_speed float64\n rank object\n dtype: object\n >>> df2.values\n array([['parrot', 24.0, 'second'],\n ['lion', 80.5, 1],\n ['monkey', nan, None]], dtype=object)\n \"\"\"\n self._consolidate_inplace()\n return self._data.as_array(transpose=self._AXIS_REVERSED)\n\n @property\n def _values(self) -> np.ndarray:\n \"\"\"internal implementation\"\"\"\n return self.values\n\n def _internal_get_values(self) -> np.ndarray:\n \"\"\"\n Return an ndarray after converting sparse values to dense.\n\n This is the same as ``.values`` for non-sparse data. For sparse\n data contained in a `SparseArray`, the data are first\n converted to a dense representation.\n\n Returns\n -------\n numpy.ndarray\n Numpy representation of DataFrame.\n\n See Also\n --------\n values : Numpy representation of DataFrame.\n SparseArray : Container for sparse data.\n \"\"\"\n return self.values\n\n @property\n def dtypes(self):\n \"\"\"\n Return the dtypes in the DataFrame.\n\n This returns a Series with the data type of each column.\n The result's index is the original DataFrame's columns. Columns\n with mixed types are stored with the ``object`` dtype. See\n :ref:`the User Guide <basics.dtypes>` for more.\n\n Returns\n -------\n pandas.Series\n The data type of each column.\n\n Examples\n --------\n >>> df = pd.DataFrame({'float': [1.0],\n ... 'int': [1],\n ... 'datetime': [pd.Timestamp('20180310')],\n ... 'string': ['foo']})\n >>> df.dtypes\n float float64\n int int64\n datetime datetime64[ns]\n string object\n dtype: object\n \"\"\"\n from pandas import Series\n\n return Series(self._data.get_dtypes(), index=self._info_axis, dtype=np.object_)\n\n def _to_dict_of_blocks(self, copy: bool_t = True):\n \"\"\"\n Return a dict of dtype -> Constructor Types that\n each is a homogeneous dtype.\n\n Internal ONLY\n \"\"\"\n return {\n k: self._constructor(v).__finalize__(self)\n for k, v, in self._data.to_dict(copy=copy).items()\n }\n\n def astype(\n self: FrameOrSeries, dtype, copy: bool_t = True, errors: str = \"raise\"\n ) -> FrameOrSeries:\n \"\"\"\n Cast a pandas object to a specified dtype ``dtype``.\n\n Parameters\n ----------\n dtype : data type, or dict of column name -> data type\n Use a numpy.dtype or Python type to cast entire pandas object to\n the same type. Alternatively, use {col: dtype, ...}, where col is a\n column label and dtype is a numpy.dtype or Python type to cast one\n or more of the DataFrame's columns to column-specific types.\n copy : bool, default True\n Return a copy when ``copy=True`` (be very careful setting\n ``copy=False`` as changes to values then may propagate to other\n pandas objects).\n errors : {'raise', 'ignore'}, default 'raise'\n Control raising of exceptions on invalid data for provided dtype.\n\n - ``raise`` : allow exceptions to be raised\n - ``ignore`` : suppress exceptions. On error return original object.\n\n Returns\n -------\n casted : same type as caller\n\n See Also\n --------\n to_datetime : Convert argument to datetime.\n to_timedelta : Convert argument to timedelta.\n to_numeric : Convert argument to a numeric type.\n numpy.ndarray.astype : Cast a numpy array to a specified type.\n\n Examples\n --------\n Create a DataFrame:\n\n >>> d = {'col1': [1, 2], 'col2': [3, 4]}\n >>> df = pd.DataFrame(data=d)\n >>> df.dtypes\n col1 int64\n col2 int64\n dtype: object\n\n Cast all columns to int32:\n\n >>> df.astype('int32').dtypes\n col1 int32\n col2 int32\n dtype: object\n\n Cast col1 to int32 using a dictionary:\n\n >>> df.astype({'col1': 'int32'}).dtypes\n col1 int32\n col2 int64\n dtype: object\n\n Create a series:\n\n >>> ser = pd.Series([1, 2], dtype='int32')\n >>> ser\n 0 1\n 1 2\n dtype: int32\n >>> ser.astype('int64')\n 0 1\n 1 2\n dtype: int64\n\n Convert to categorical type:\n\n >>> ser.astype('category')\n 0 1\n 1 2\n dtype: category\n Categories (2, int64): [1, 2]\n\n Convert to ordered categorical type with custom ordering:\n\n >>> cat_dtype = pd.api.types.CategoricalDtype(\n ... categories=[2, 1], ordered=True)\n >>> ser.astype(cat_dtype)\n 0 1\n 1 2\n dtype: category\n Categories (2, int64): [2 < 1]\n\n Note that using ``copy=False`` and changing data on a new\n pandas object may propagate changes:\n\n >>> s1 = pd.Series([1, 2])\n >>> s2 = s1.astype('int64', copy=False)\n >>> s2[0] = 10\n >>> s1 # note that s1[0] has changed too\n 0 10\n 1 2\n dtype: int64\n \"\"\"\n if is_dict_like(dtype):\n if self.ndim == 1: # i.e. Series\n if len(dtype) > 1 or self.name not in dtype:\n raise KeyError(\n \"Only the Series name can be used for \"\n \"the key in Series dtype mappings.\"\n )\n new_type = dtype[self.name]\n return self.astype(new_type, copy, errors)\n\n for col_name in dtype.keys():\n if col_name not in self:\n raise KeyError(\n \"Only a column name can be used for the \"\n \"key in a dtype mappings argument.\"\n )\n results = []\n for col_name, col in self.items():\n if col_name in dtype:\n results.append(\n col.astype(dtype=dtype[col_name], copy=copy, errors=errors)\n )\n else:\n results.append(col.copy() if copy else col)\n\n elif is_extension_array_dtype(dtype) and self.ndim > 1:\n # GH 18099/22869: columnwise conversion to extension dtype\n # GH 24704: use iloc to handle duplicate column names\n results = [\n self.iloc[:, i].astype(dtype, copy=copy)\n for i in range(len(self.columns))\n ]\n\n else:\n # else, only a single dtype is given\n new_data = self._data.astype(dtype=dtype, copy=copy, errors=errors)\n return self._constructor(new_data).__finalize__(self)\n\n # GH 19920: retain column metadata after concat\n result = pd.concat(results, axis=1, copy=False)\n result.columns = self.columns\n return result\n\n def copy(self: FrameOrSeries, deep: bool_t = True) -> FrameOrSeries:\n \"\"\"\n Make a copy of this object's indices and data.\n\n When ``deep=True`` (default), a new object will be created with a\n copy of the calling object's data and indices. Modifications to\n the data or indices of the copy will not be reflected in the\n original object (see notes below).\n\n When ``deep=False``, a new object will be created without copying\n the calling object's data or index (only references to the data\n and index are copied). Any changes to the data of the original\n will be reflected in the shallow copy (and vice versa).\n\n Parameters\n ----------\n deep : bool, default True\n Make a deep copy, including a copy of the data and the indices.\n With ``deep=False`` neither the indices nor the data are copied.\n\n Returns\n -------\n copy : Series or DataFrame\n Object type matches caller.\n\n Notes\n -----\n When ``deep=True``, data is copied but actual Python objects\n will not be copied recursively, only the reference to the object.\n This is in contrast to `copy.deepcopy` in the Standard Library,\n which recursively copies object data (see examples below).\n\n While ``Index`` objects are copied when ``deep=True``, the underlying\n numpy array is not copied for performance reasons. Since ``Index`` is\n immutable, the underlying data can be safely shared and a copy\n is not needed.\n\n Examples\n --------\n >>> s = pd.Series([1, 2], index=[\"a\", \"b\"])\n >>> s\n a 1\n b 2\n dtype: int64\n\n >>> s_copy = s.copy()\n >>> s_copy\n a 1\n b 2\n dtype: int64\n\n **Shallow copy versus default (deep) copy:**\n\n >>> s = pd.Series([1, 2], index=[\"a\", \"b\"])\n >>> deep = s.copy()\n >>> shallow = s.copy(deep=False)\n\n Shallow copy shares data and index with original.\n\n >>> s is shallow\n False\n >>> s.values is shallow.values and s.index is shallow.index\n True\n\n Deep copy has own copy of data and index.\n\n >>> s is deep\n False\n >>> s.values is deep.values or s.index is deep.index\n False\n\n Updates to the data shared by shallow copy and original is reflected\n in both; deep copy remains unchanged.\n\n >>> s[0] = 3\n >>> shallow[1] = 4\n >>> s\n a 3\n b 4\n dtype: int64\n >>> shallow\n a 3\n b 4\n dtype: int64\n >>> deep\n a 1\n b 2\n dtype: int64\n\n Note that when copying an object containing Python objects, a deep copy\n will copy the data, but will not do so recursively. Updating a nested\n data object will be reflected in the deep copy.\n\n >>> s = pd.Series([[1, 2], [3, 4]])\n >>> deep = s.copy()\n >>> s[0][0] = 10\n >>> s\n 0 [10, 2]\n 1 [3, 4]\n dtype: object\n >>> deep\n 0 [10, 2]\n 1 [3, 4]\n dtype: object\n \"\"\"\n data = self._data.copy(deep=deep)\n return self._constructor(data).__finalize__(self)\n\n def __copy__(self: FrameOrSeries, deep: bool_t = True) -> FrameOrSeries:\n return self.copy(deep=deep)\n\n def __deepcopy__(self: FrameOrSeries, memo=None) -> FrameOrSeries:\n \"\"\"\n Parameters\n ----------\n memo, default None\n Standard signature. Unused\n \"\"\"\n return self.copy(deep=True)\n\n def _convert(\n self: FrameOrSeries,\n datetime: bool_t = False,\n numeric: bool_t = False,\n timedelta: bool_t = False,\n coerce: bool_t = False,\n copy: bool_t = True,\n ) -> FrameOrSeries:\n \"\"\"\n Attempt to infer better dtype for object columns\n\n Parameters\n ----------\n datetime : bool, default False\n If True, convert to date where possible.\n numeric : bool, default False\n If True, attempt to convert to numbers (including strings), with\n unconvertible values becoming NaN.\n timedelta : bool, default False\n If True, convert to timedelta where possible.\n coerce : bool, default False\n If True, force conversion with unconvertible values converted to\n nulls (NaN or NaT).\n copy : bool, default True\n If True, return a copy even if no copy is necessary (e.g. no\n conversion was done). Note: This is meant for internal use, and\n should not be confused with inplace.\n\n Returns\n -------\n converted : same as input object\n \"\"\"\n validate_bool_kwarg(datetime, \"datetime\")\n validate_bool_kwarg(numeric, \"numeric\")\n validate_bool_kwarg(timedelta, \"timedelta\")\n validate_bool_kwarg(coerce, \"coerce\")\n validate_bool_kwarg(copy, \"copy\")\n return self._constructor(\n self._data.convert(\n datetime=datetime,\n numeric=numeric,\n timedelta=timedelta,\n coerce=coerce,\n copy=copy,\n )\n ).__finalize__(self)\n\n def infer_objects(self: FrameOrSeries) -> FrameOrSeries:\n \"\"\"\n Attempt to infer better dtypes for object columns.\n\n Attempts soft conversion of object-dtyped\n columns, leaving non-object and unconvertible\n columns unchanged. The inference rules are the\n same as during normal Series/DataFrame construction.\n\n .. versionadded:: 0.21.0\n\n Returns\n -------\n converted : same type as input object\n\n See Also\n --------\n to_datetime : Convert argument to datetime.\n to_timedelta : Convert argument to timedelta.\n to_numeric : Convert argument to numeric type.\n convert_dtypes : Convert argument to best possible dtype.\n\n Examples\n --------\n >>> df = pd.DataFrame({\"A\": [\"a\", 1, 2, 3]})\n >>> df = df.iloc[1:]\n >>> df\n A\n 1 1\n 2 2\n 3 3\n\n >>> df.dtypes\n A object\n dtype: object\n\n >>> df.infer_objects().dtypes\n A int64\n dtype: object\n \"\"\"\n # numeric=False necessary to only soft convert;\n # python objects will still be converted to\n # native numpy numeric types\n return self._constructor(\n self._data.convert(\n datetime=True, numeric=False, timedelta=True, coerce=False, copy=True\n )\n ).__finalize__(self)\n\n def convert_dtypes(\n self: FrameOrSeries,\n infer_objects: bool_t = True,\n convert_string: bool_t = True,\n convert_integer: bool_t = True,\n convert_boolean: bool_t = True,\n ) -> FrameOrSeries:\n \"\"\"\n Convert columns to best possible dtypes using dtypes supporting ``pd.NA``.\n\n .. versionadded:: 1.0.0\n\n Parameters\n ----------\n infer_objects : bool, default True\n Whether object dtypes should be converted to the best possible types.\n convert_string : bool, default True\n Whether object dtypes should be converted to ``StringDtype()``.\n convert_integer : bool, default True\n Whether, if possible, conversion can be done to integer extension types.\n convert_boolean : bool, defaults True\n Whether object dtypes should be converted to ``BooleanDtypes()``.\n\n Returns\n -------\n Series or DataFrame\n Copy of input object with new dtype.\n\n See Also\n --------\n infer_objects : Infer dtypes of objects.\n to_datetime : Convert argument to datetime.\n to_timedelta : Convert argument to timedelta.\n to_numeric : Convert argument to a numeric type.\n\n Notes\n -----\n By default, ``convert_dtypes`` will attempt to convert a Series (or each\n Series in a DataFrame) to dtypes that support ``pd.NA``. By using the options\n ``convert_string``, ``convert_integer``, and ``convert_boolean``, it is\n possible to turn off individual conversions to ``StringDtype``, the integer\n extension types or ``BooleanDtype``, respectively.\n\n For object-dtyped columns, if ``infer_objects`` is ``True``, use the inference\n rules as during normal Series/DataFrame construction. Then, if possible,\n convert to ``StringDtype``, ``BooleanDtype`` or an appropriate integer extension\n type, otherwise leave as ``object``.\n\n If the dtype is integer, convert to an appropriate integer extension type.\n\n If the dtype is numeric, and consists of all integers, convert to an\n appropriate integer extension type.\n\n In the future, as new dtypes are added that support ``pd.NA``, the results\n of this method will change to support those new dtypes.\n\n Examples\n --------\n >>> df = pd.DataFrame(\n ... {\n ... \"a\": pd.Series([1, 2, 3], dtype=np.dtype(\"int32\")),\n ... \"b\": pd.Series([\"x\", \"y\", \"z\"], dtype=np.dtype(\"O\")),\n ... \"c\": pd.Series([True, False, np.nan], dtype=np.dtype(\"O\")),\n ... \"d\": pd.Series([\"h\", \"i\", np.nan], dtype=np.dtype(\"O\")),\n ... \"e\": pd.Series([10, np.nan, 20], dtype=np.dtype(\"float\")),\n ... \"f\": pd.Series([np.nan, 100.5, 200], dtype=np.dtype(\"float\")),\n ... }\n ... )\n\n Start with a DataFrame with default dtypes.\n\n >>> df\n a b c d e f\n 0 1 x True h 10.0 NaN\n 1 2 y False i NaN 100.5\n 2 3 z NaN NaN 20.0 200.0\n\n >>> df.dtypes\n a int32\n b object\n c object\n d object\n e float64\n f float64\n dtype: object\n\n Convert the DataFrame to use best possible dtypes.\n\n >>> dfn = df.convert_dtypes()\n >>> dfn\n a b c d e f\n 0 1 x True h 10 NaN\n 1 2 y False i <NA> 100.5\n 2 3 z <NA> <NA> 20 200.0\n\n >>> dfn.dtypes\n a Int32\n b string\n c boolean\n d string\n e Int64\n f float64\n dtype: object\n\n Start with a Series of strings and missing data represented by ``np.nan``.\n\n >>> s = pd.Series([\"a\", \"b\", np.nan])\n >>> s\n 0 a\n 1 b\n 2 NaN\n dtype: object\n\n Obtain a Series with dtype ``StringDtype``.\n\n >>> s.convert_dtypes()\n 0 a\n 1 b\n 2 <NA>\n dtype: string\n \"\"\"\n if self.ndim == 1:\n return self._convert_dtypes(\n infer_objects, convert_string, convert_integer, convert_boolean\n )\n else:\n results = [\n col._convert_dtypes(\n infer_objects, convert_string, convert_integer, convert_boolean\n )\n for col_name, col in self.items()\n ]\n result = pd.concat(results, axis=1, copy=False)\n return result\n\n # ----------------------------------------------------------------------\n # Filling NA's\n\n @doc(**_shared_doc_kwargs)\n def fillna(\n self: FrameOrSeries,\n value=None,\n method=None,\n axis=None,\n inplace: bool_t = False,\n limit=None,\n downcast=None,\n ) -> Optional[FrameOrSeries]:\n \"\"\"\n Fill NA/NaN values using the specified method.\n\n Parameters\n ----------\n value : scalar, dict, Series, or DataFrame\n Value to use to fill holes (e.g. 0), alternately a\n dict/Series/DataFrame of values specifying which value to use for\n each index (for a Series) or column (for a DataFrame). Values not\n in the dict/Series/DataFrame will not be filled. This value cannot\n be a list.\n method : {{'backfill', 'bfill', 'pad', 'ffill', None}}, default None\n Method to use for filling holes in reindexed Series\n pad / ffill: propagate last valid observation forward to next valid\n backfill / bfill: use next valid observation to fill gap.\n axis : {axes_single_arg}\n Axis along which to fill missing values.\n inplace : bool, default False\n If True, fill in-place. Note: this will modify any\n other views on this object (e.g., a no-copy slice for a column in a\n DataFrame).\n limit : int, default None\n If method is specified, this is the maximum number of consecutive\n NaN values to forward/backward fill. In other words, if there is\n a gap with more than this number of consecutive NaNs, it will only\n be partially filled. If method is not specified, this is the\n maximum number of entries along the entire axis where NaNs will be\n filled. Must be greater than 0 if not None.\n downcast : dict, default is None\n A dict of item->dtype of what to downcast if possible,\n or the string 'infer' which will try to downcast to an appropriate\n equal type (e.g. float64 to int64 if possible).\n\n Returns\n -------\n {klass} or None\n Object with missing values filled or None if ``inplace=True``.\n\n See Also\n --------\n interpolate : Fill NaN values using interpolation.\n reindex : Conform object to new index.\n asfreq : Convert TimeSeries to specified frequency.\n\n Examples\n --------\n >>> df = pd.DataFrame([[np.nan, 2, np.nan, 0],\n ... [3, 4, np.nan, 1],\n ... [np.nan, np.nan, np.nan, 5],\n ... [np.nan, 3, np.nan, 4]],\n ... columns=list('ABCD'))\n >>> df\n A B C D\n 0 NaN 2.0 NaN 0\n 1 3.0 4.0 NaN 1\n 2 NaN NaN NaN 5\n 3 NaN 3.0 NaN 4\n\n Replace all NaN elements with 0s.\n\n >>> df.fillna(0)\n A B C D\n 0 0.0 2.0 0.0 0\n 1 3.0 4.0 0.0 1\n 2 0.0 0.0 0.0 5\n 3 0.0 3.0 0.0 4\n\n We can also propagate non-null values forward or backward.\n\n >>> df.fillna(method='ffill')\n A B C D\n 0 NaN 2.0 NaN 0\n 1 3.0 4.0 NaN 1\n 2 3.0 4.0 NaN 5\n 3 3.0 3.0 NaN 4\n\n Replace all NaN elements in column 'A', 'B', 'C', and 'D', with 0, 1,\n 2, and 3 respectively.\n\n >>> values = {{'A': 0, 'B': 1, 'C': 2, 'D': 3}}\n >>> df.fillna(value=values)\n A B C D\n 0 0.0 2.0 2.0 0\n 1 3.0 4.0 2.0 1\n 2 0.0 1.0 2.0 5\n 3 0.0 3.0 2.0 4\n\n Only replace the first NaN element.\n\n >>> df.fillna(value=values, limit=1)\n A B C D\n 0 0.0 2.0 2.0 0\n 1 3.0 4.0 NaN 1\n 2 NaN 1.0 NaN 5\n 3 NaN 3.0 NaN 4\n \"\"\"\n inplace = validate_bool_kwarg(inplace, \"inplace\")\n value, method = validate_fillna_kwargs(value, method)\n\n self._consolidate_inplace()\n\n # set the default here, so functions examining the signaure\n # can detect if something was set (e.g. in groupby) (GH9221)\n if axis is None:\n axis = 0\n axis = self._get_axis_number(axis)\n\n if value is None:\n\n if self._is_mixed_type and axis == 1:\n if inplace:\n raise NotImplementedError()\n result = self.T.fillna(method=method, limit=limit).T\n\n # need to downcast here because of all of the transposes\n result._data = result._data.downcast()\n\n return result\n\n new_data = self._data.interpolate(\n method=method,\n axis=axis,\n limit=limit,\n inplace=inplace,\n coerce=True,\n downcast=downcast,\n )\n else:\n if len(self._get_axis(axis)) == 0:\n return self\n\n if self.ndim == 1:\n if isinstance(value, (dict, ABCSeries)):\n value = create_series_with_explicit_dtype(\n value, dtype_if_empty=object\n )\n elif not is_list_like(value):\n pass\n else:\n raise TypeError(\n '\"value\" parameter must be a scalar, dict '\n \"or Series, but you passed a \"\n f'\"{type(value).__name__}\"'\n )\n\n new_data = self._data.fillna(\n value=value, limit=limit, inplace=inplace, downcast=downcast\n )\n\n elif isinstance(value, (dict, ABCSeries)):\n if axis == 1:\n raise NotImplementedError(\n \"Currently only can fill \"\n \"with dict/Series column \"\n \"by column\"\n )\n\n result = self if inplace else self.copy()\n for k, v in value.items():\n if k not in result:\n continue\n obj = result[k]\n obj.fillna(v, limit=limit, inplace=True, downcast=downcast)\n return result if not inplace else None\n\n elif not is_list_like(value):\n new_data = self._data.fillna(\n value=value, limit=limit, inplace=inplace, downcast=downcast\n )\n elif isinstance(value, ABCDataFrame) and self.ndim == 2:\n new_data = self.where(self.notna(), value)\n else:\n raise ValueError(f\"invalid fill value with a {type(value)}\")\n\n if inplace:\n self._update_inplace(new_data)\n return None\n else:\n return self._constructor(new_data).__finalize__(self)\n\n def ffill(\n self: FrameOrSeries,\n axis=None,\n inplace: bool_t = False,\n limit=None,\n downcast=None,\n ) -> Optional[FrameOrSeries]:\n \"\"\"\n Synonym for :meth:`DataFrame.fillna` with ``method='ffill'``.\n\n Returns\n -------\n %(klass)s or None\n Object with missing values filled or None if ``inplace=True``.\n \"\"\"\n return self.fillna(\n method=\"ffill\", axis=axis, inplace=inplace, limit=limit, downcast=downcast\n )\n\n def bfill(\n self: FrameOrSeries,\n axis=None,\n inplace: bool_t = False,\n limit=None,\n downcast=None,\n ) -> Optional[FrameOrSeries]:\n \"\"\"\n Synonym for :meth:`DataFrame.fillna` with ``method='bfill'``.\n\n Returns\n -------\n %(klass)s or None\n Object with missing values filled or None if ``inplace=True``.\n \"\"\"\n return self.fillna(\n method=\"bfill\", axis=axis, inplace=inplace, limit=limit, downcast=downcast\n )\n\n _shared_docs[\n \"replace\"\n ] = \"\"\"\n Replace values given in `to_replace` with `value`.\n\n Values of the %(klass)s are replaced with other values dynamically.\n This differs from updating with ``.loc`` or ``.iloc``, which require\n you to specify a location to update with some value.\n\n Parameters\n ----------\n to_replace : str, regex, list, dict, Series, int, float, or None\n How to find the values that will be replaced.\n\n * numeric, str or regex:\n\n - numeric: numeric values equal to `to_replace` will be\n replaced with `value`\n - str: string exactly matching `to_replace` will be replaced\n with `value`\n - regex: regexs matching `to_replace` will be replaced with\n `value`\n\n * list of str, regex, or numeric:\n\n - First, if `to_replace` and `value` are both lists, they\n **must** be the same length.\n - Second, if ``regex=True`` then all of the strings in **both**\n lists will be interpreted as regexs otherwise they will match\n directly. This doesn't matter much for `value` since there\n are only a few possible substitution regexes you can use.\n - str, regex and numeric rules apply as above.\n\n * dict:\n\n - Dicts can be used to specify different replacement values\n for different existing values. For example,\n ``{'a': 'b', 'y': 'z'}`` replaces the value 'a' with 'b' and\n 'y' with 'z'. To use a dict in this way the `value`\n parameter should be `None`.\n - For a DataFrame a dict can specify that different values\n should be replaced in different columns. For example,\n ``{'a': 1, 'b': 'z'}`` looks for the value 1 in column 'a'\n and the value 'z' in column 'b' and replaces these values\n with whatever is specified in `value`. The `value` parameter\n should not be ``None`` in this case. You can treat this as a\n special case of passing two lists except that you are\n specifying the column to search in.\n - For a DataFrame nested dictionaries, e.g.,\n ``{'a': {'b': np.nan}}``, are read as follows: look in column\n 'a' for the value 'b' and replace it with NaN. The `value`\n parameter should be ``None`` to use a nested dict in this\n way. You can nest regular expressions as well. Note that\n column names (the top-level dictionary keys in a nested\n dictionary) **cannot** be regular expressions.\n\n * None:\n\n - This means that the `regex` argument must be a string,\n compiled regular expression, or list, dict, ndarray or\n Series of such elements. If `value` is also ``None`` then\n this **must** be a nested dictionary or Series.\n\n See the examples section for examples of each of these.\n value : scalar, dict, list, str, regex, default None\n Value to replace any values matching `to_replace` with.\n For a DataFrame a dict of values can be used to specify which\n value to use for each column (columns not in the dict will not be\n filled). Regular expressions, strings and lists or dicts of such\n objects are also allowed.\n inplace : bool, default False\n If True, in place. Note: this will modify any\n other views on this object (e.g. a column from a DataFrame).\n Returns the caller if this is True.\n limit : int, default None\n Maximum size gap to forward or backward fill.\n regex : bool or same types as `to_replace`, default False\n Whether to interpret `to_replace` and/or `value` as regular\n expressions. If this is ``True`` then `to_replace` *must* be a\n string. Alternatively, this could be a regular expression or a\n list, dict, or array of regular expressions in which case\n `to_replace` must be ``None``.\n method : {'pad', 'ffill', 'bfill', `None`}\n The method to use when for replacement, when `to_replace` is a\n scalar, list or tuple and `value` is ``None``.\n\n .. versionchanged:: 0.23.0\n Added to DataFrame.\n\n Returns\n -------\n %(klass)s\n Object after replacement.\n\n Raises\n ------\n AssertionError\n * If `regex` is not a ``bool`` and `to_replace` is not\n ``None``.\n\n TypeError\n * If `to_replace` is not a scalar, array-like, ``dict``, or ``None``\n * If `to_replace` is a ``dict`` and `value` is not a ``list``,\n ``dict``, ``ndarray``, or ``Series``\n * If `to_replace` is ``None`` and `regex` is not compilable\n into a regular expression or is a list, dict, ndarray, or\n Series.\n * When replacing multiple ``bool`` or ``datetime64`` objects and\n the arguments to `to_replace` does not match the type of the\n value being replaced\n\n ValueError\n * If a ``list`` or an ``ndarray`` is passed to `to_replace` and\n `value` but they are not the same length.\n\n See Also\n --------\n %(klass)s.fillna : Fill NA values.\n %(klass)s.where : Replace values based on boolean condition.\n Series.str.replace : Simple string replacement.\n\n Notes\n -----\n * Regex substitution is performed under the hood with ``re.sub``. The\n rules for substitution for ``re.sub`` are the same.\n * Regular expressions will only substitute on strings, meaning you\n cannot provide, for example, a regular expression matching floating\n point numbers and expect the columns in your frame that have a\n numeric dtype to be matched. However, if those floating point\n numbers *are* strings, then you can do this.\n * This method has *a lot* of options. You are encouraged to experiment\n and play with this method to gain intuition about how it works.\n * When dict is used as the `to_replace` value, it is like\n key(s) in the dict are the to_replace part and\n value(s) in the dict are the value parameter.\n\n Examples\n --------\n\n **Scalar `to_replace` and `value`**\n\n >>> s = pd.Series([0, 1, 2, 3, 4])\n >>> s.replace(0, 5)\n 0 5\n 1 1\n 2 2\n 3 3\n 4 4\n dtype: int64\n\n >>> df = pd.DataFrame({'A': [0, 1, 2, 3, 4],\n ... 'B': [5, 6, 7, 8, 9],\n ... 'C': ['a', 'b', 'c', 'd', 'e']})\n >>> df.replace(0, 5)\n A B C\n 0 5 5 a\n 1 1 6 b\n 2 2 7 c\n 3 3 8 d\n 4 4 9 e\n\n **List-like `to_replace`**\n\n >>> df.replace([0, 1, 2, 3], 4)\n A B C\n 0 4 5 a\n 1 4 6 b\n 2 4 7 c\n 3 4 8 d\n 4 4 9 e\n\n >>> df.replace([0, 1, 2, 3], [4, 3, 2, 1])\n A B C\n 0 4 5 a\n 1 3 6 b\n 2 2 7 c\n 3 1 8 d\n 4 4 9 e\n\n >>> s.replace([1, 2], method='bfill')\n 0 0\n 1 3\n 2 3\n 3 3\n 4 4\n dtype: int64\n\n **dict-like `to_replace`**\n\n >>> df.replace({0: 10, 1: 100})\n A B C\n 0 10 5 a\n 1 100 6 b\n 2 2 7 c\n 3 3 8 d\n 4 4 9 e\n\n >>> df.replace({'A': 0, 'B': 5}, 100)\n A B C\n 0 100 100 a\n 1 1 6 b\n 2 2 7 c\n 3 3 8 d\n 4 4 9 e\n\n >>> df.replace({'A': {0: 100, 4: 400}})\n A B C\n 0 100 5 a\n 1 1 6 b\n 2 2 7 c\n 3 3 8 d\n 4 400 9 e\n\n **Regular expression `to_replace`**\n\n >>> df = pd.DataFrame({'A': ['bat', 'foo', 'bait'],\n ... 'B': ['abc', 'bar', 'xyz']})\n >>> df.replace(to_replace=r'^ba.$', value='new', regex=True)\n A B\n 0 new abc\n 1 foo new\n 2 bait xyz\n\n >>> df.replace({'A': r'^ba.$'}, {'A': 'new'}, regex=True)\n A B\n 0 new abc\n 1 foo bar\n 2 bait xyz\n\n >>> df.replace(regex=r'^ba.$', value='new')\n A B\n 0 new abc\n 1 foo new\n 2 bait xyz\n\n >>> df.replace(regex={r'^ba.$': 'new', 'foo': 'xyz'})\n A B\n 0 new abc\n 1 xyz new\n 2 bait xyz\n\n >>> df.replace(regex=[r'^ba.$', 'foo'], value='new')\n A B\n 0 new abc\n 1 new new\n 2 bait xyz\n\n Note that when replacing multiple ``bool`` or ``datetime64`` objects,\n the data types in the `to_replace` parameter must match the data\n type of the value being replaced:\n\n >>> df = pd.DataFrame({'A': [True, False, True],\n ... 'B': [False, True, False]})\n >>> df.replace({'a string': 'new value', True: False}) # raises\n Traceback (most recent call last):\n ...\n TypeError: Cannot compare types 'ndarray(dtype=bool)' and 'str'\n\n This raises a ``TypeError`` because one of the ``dict`` keys is not of\n the correct type for replacement.\n\n Compare the behavior of ``s.replace({'a': None})`` and\n ``s.replace('a', None)`` to understand the peculiarities\n of the `to_replace` parameter:\n\n >>> s = pd.Series([10, 'a', 'a', 'b', 'a'])\n\n When one uses a dict as the `to_replace` value, it is like the\n value(s) in the dict are equal to the `value` parameter.\n ``s.replace({'a': None})`` is equivalent to\n ``s.replace(to_replace={'a': None}, value=None, method=None)``:\n\n >>> s.replace({'a': None})\n 0 10\n 1 None\n 2 None\n 3 b\n 4 None\n dtype: object\n\n When ``value=None`` and `to_replace` is a scalar, list or\n tuple, `replace` uses the method parameter (default 'pad') to do the\n replacement. So this is why the 'a' values are being replaced by 10\n in rows 1 and 2 and 'b' in row 4 in this case.\n The command ``s.replace('a', None)`` is actually equivalent to\n ``s.replace(to_replace='a', value=None, method='pad')``:\n\n >>> s.replace('a', None)\n 0 10\n 1 10\n 2 10\n 3 b\n 4 b\n dtype: object\n \"\"\"\n\n @Appender(_shared_docs[\"replace\"] % _shared_doc_kwargs)\n def replace(\n self,\n to_replace=None,\n value=None,\n inplace=False,\n limit=None,\n regex=False,\n method=\"pad\",\n ):\n if not (\n is_scalar(to_replace)\n or isinstance(to_replace, pd.Series)\n or is_re_compilable(to_replace)\n or is_list_like(to_replace)\n ):\n raise TypeError(\n \"Expecting 'to_replace' to be either a scalar, array-like, \"\n \"dict or None, got invalid type \"\n f\"{repr(type(to_replace).__name__)}\"\n )\n\n inplace = validate_bool_kwarg(inplace, \"inplace\")\n if not is_bool(regex) and to_replace is not None:\n raise AssertionError(\"'to_replace' must be 'None' if 'regex' is not a bool\")\n\n self._consolidate_inplace()\n\n if value is None:\n # passing a single value that is scalar like\n # when value is None (GH5319), for compat\n if not is_dict_like(to_replace) and not is_dict_like(regex):\n to_replace = [to_replace]\n\n if isinstance(to_replace, (tuple, list)):\n if isinstance(self, ABCDataFrame):\n return self.apply(\n _single_replace, args=(to_replace, method, inplace, limit)\n )\n return _single_replace(self, to_replace, method, inplace, limit)\n\n if not is_dict_like(to_replace):\n if not is_dict_like(regex):\n raise TypeError(\n 'If \"to_replace\" and \"value\" are both None '\n 'and \"to_replace\" is not a list, then '\n \"regex must be a mapping\"\n )\n to_replace = regex\n regex = True\n\n items = list(to_replace.items())\n keys, values = zip(*items) if items else ([], [])\n\n are_mappings = [is_dict_like(v) for v in values]\n\n if any(are_mappings):\n if not all(are_mappings):\n raise TypeError(\n \"If a nested mapping is passed, all values \"\n \"of the top level mapping must be mappings\"\n )\n # passed a nested dict/Series\n to_rep_dict = {}\n value_dict = {}\n\n for k, v in items:\n keys, values = list(zip(*v.items())) or ([], [])\n\n to_rep_dict[k] = list(keys)\n value_dict[k] = list(values)\n\n to_replace, value = to_rep_dict, value_dict\n else:\n to_replace, value = keys, values\n\n return self.replace(\n to_replace, value, inplace=inplace, limit=limit, regex=regex\n )\n else:\n\n # need a non-zero len on all axes\n if not self.size:\n return self\n\n new_data = self._data\n if is_dict_like(to_replace):\n if is_dict_like(value): # {'A' : NA} -> {'A' : 0}\n res = self if inplace else self.copy()\n for c, src in to_replace.items():\n if c in value and c in self:\n # object conversion is handled in\n # series.replace which is called recursively\n res[c] = res[c].replace(\n to_replace=src,\n value=value[c],\n inplace=False,\n regex=regex,\n )\n return None if inplace else res\n\n # {'A': NA} -> 0\n elif not is_list_like(value):\n keys = [(k, src) for k, src in to_replace.items() if k in self]\n keys_len = len(keys) - 1\n for i, (k, src) in enumerate(keys):\n convert = i == keys_len\n new_data = new_data.replace(\n to_replace=src,\n value=value,\n filter=[k],\n inplace=inplace,\n regex=regex,\n convert=convert,\n )\n else:\n raise TypeError(\"value argument must be scalar, dict, or Series\")\n\n elif is_list_like(to_replace): # [NA, ''] -> [0, 'missing']\n if is_list_like(value):\n if len(to_replace) != len(value):\n raise ValueError(\n f\"Replacement lists must match in length. \"\n f\"Expecting {len(to_replace)} got {len(value)} \"\n )\n\n new_data = self._data.replace_list(\n src_list=to_replace,\n dest_list=value,\n inplace=inplace,\n regex=regex,\n )\n\n else: # [NA, ''] -> 0\n new_data = self._data.replace(\n to_replace=to_replace, value=value, inplace=inplace, regex=regex\n )\n elif to_replace is None:\n if not (\n is_re_compilable(regex)\n or is_list_like(regex)\n or is_dict_like(regex)\n ):\n raise TypeError(\n f\"'regex' must be a string or a compiled regular expression \"\n f\"or a list or dict of strings or regular expressions, \"\n f\"you passed a {repr(type(regex).__name__)}\"\n )\n return self.replace(\n regex, value, inplace=inplace, limit=limit, regex=True\n )\n else:\n\n # dest iterable dict-like\n if is_dict_like(value): # NA -> {'A' : 0, 'B' : -1}\n new_data = self._data\n\n for k, v in value.items():\n if k in self:\n new_data = new_data.replace(\n to_replace=to_replace,\n value=v,\n filter=[k],\n inplace=inplace,\n regex=regex,\n )\n\n elif not is_list_like(value): # NA -> 0\n new_data = self._data.replace(\n to_replace=to_replace, value=value, inplace=inplace, regex=regex\n )\n else:\n raise TypeError(\n f'Invalid \"to_replace\" type: {repr(type(to_replace).__name__)}'\n )\n\n if inplace:\n self._update_inplace(new_data)\n else:\n return self._constructor(new_data).__finalize__(self)\n\n _shared_docs[\n \"interpolate\"\n ] = \"\"\"\n Please note that only ``method='linear'`` is supported for\n DataFrame/Series with a MultiIndex.\n\n Parameters\n ----------\n method : str, default 'linear'\n Interpolation technique to use. One of:\n\n * 'linear': Ignore the index and treat the values as equally\n spaced. This is the only method supported on MultiIndexes.\n * 'time': Works on daily and higher resolution data to interpolate\n given length of interval.\n * 'index', 'values': use the actual numerical values of the index.\n * 'pad': Fill in NaNs using existing values.\n * 'nearest', 'zero', 'slinear', 'quadratic', 'cubic', 'spline',\n 'barycentric', 'polynomial': Passed to\n `scipy.interpolate.interp1d`. These methods use the numerical\n values of the index. Both 'polynomial' and 'spline' require that\n you also specify an `order` (int), e.g.\n ``df.interpolate(method='polynomial', order=5)``.\n * 'krogh', 'piecewise_polynomial', 'spline', 'pchip', 'akima':\n Wrappers around the SciPy interpolation methods of similar\n names. See `Notes`.\n * 'from_derivatives': Refers to\n `scipy.interpolate.BPoly.from_derivatives` which\n replaces 'piecewise_polynomial' interpolation method in\n scipy 0.18.\n axis : {0 or 'index', 1 or 'columns', None}, default None\n Axis to interpolate along.\n limit : int, optional\n Maximum number of consecutive NaNs to fill. Must be greater than\n 0.\n inplace : bool, default False\n Update the data in place if possible.\n limit_direction : {'forward', 'backward', 'both'}, default 'forward'\n If limit is specified, consecutive NaNs will be filled in this\n direction.\n limit_area : {`None`, 'inside', 'outside'}, default None\n If limit is specified, consecutive NaNs will be filled with this\n restriction.\n\n * ``None``: No fill restriction.\n * 'inside': Only fill NaNs surrounded by valid values\n (interpolate).\n * 'outside': Only fill NaNs outside valid values (extrapolate).\n\n .. versionadded:: 0.23.0\n\n downcast : optional, 'infer' or None, defaults to None\n Downcast dtypes if possible.\n **kwargs\n Keyword arguments to pass on to the interpolating function.\n\n Returns\n -------\n Series or DataFrame\n Returns the same object type as the caller, interpolated at\n some or all ``NaN`` values.\n\n See Also\n --------\n fillna : Fill missing values using different methods.\n scipy.interpolate.Akima1DInterpolator : Piecewise cubic polynomials\n (Akima interpolator).\n scipy.interpolate.BPoly.from_derivatives : Piecewise polynomial in the\n Bernstein basis.\n scipy.interpolate.interp1d : Interpolate a 1-D function.\n scipy.interpolate.KroghInterpolator : Interpolate polynomial (Krogh\n interpolator).\n scipy.interpolate.PchipInterpolator : PCHIP 1-d monotonic cubic\n interpolation.\n scipy.interpolate.CubicSpline : Cubic spline data interpolator.\n\n Notes\n -----\n The 'krogh', 'piecewise_polynomial', 'spline', 'pchip' and 'akima'\n methods are wrappers around the respective SciPy implementations of\n similar names. These use the actual numerical values of the index.\n For more information on their behavior, see the\n `SciPy documentation\n <https://docs.scipy.org/doc/scipy/reference/interpolate.html#univariate-interpolation>`__\n and `SciPy tutorial\n <https://docs.scipy.org/doc/scipy/reference/tutorial/interpolate.html>`__.\n\n Examples\n --------\n Filling in ``NaN`` in a :class:`~pandas.Series` via linear\n interpolation.\n\n >>> s = pd.Series([0, 1, np.nan, 3])\n >>> s\n 0 0.0\n 1 1.0\n 2 NaN\n 3 3.0\n dtype: float64\n >>> s.interpolate()\n 0 0.0\n 1 1.0\n 2 2.0\n 3 3.0\n dtype: float64\n\n Filling in ``NaN`` in a Series by padding, but filling at most two\n consecutive ``NaN`` at a time.\n\n >>> s = pd.Series([np.nan, \"single_one\", np.nan,\n ... \"fill_two_more\", np.nan, np.nan, np.nan,\n ... 4.71, np.nan])\n >>> s\n 0 NaN\n 1 single_one\n 2 NaN\n 3 fill_two_more\n 4 NaN\n 5 NaN\n 6 NaN\n 7 4.71\n 8 NaN\n dtype: object\n >>> s.interpolate(method='pad', limit=2)\n 0 NaN\n 1 single_one\n 2 single_one\n 3 fill_two_more\n 4 fill_two_more\n 5 fill_two_more\n 6 NaN\n 7 4.71\n 8 4.71\n dtype: object\n\n Filling in ``NaN`` in a Series via polynomial interpolation or splines:\n Both 'polynomial' and 'spline' methods require that you also specify\n an ``order`` (int).\n\n >>> s = pd.Series([0, 2, np.nan, 8])\n >>> s.interpolate(method='polynomial', order=2)\n 0 0.000000\n 1 2.000000\n 2 4.666667\n 3 8.000000\n dtype: float64\n\n Fill the DataFrame forward (that is, going down) along each column\n using linear interpolation.\n\n Note how the last entry in column 'a' is interpolated differently,\n because there is no entry after it to use for interpolation.\n Note how the first entry in column 'b' remains ``NaN``, because there\n is no entry before it to use for interpolation.\n\n >>> df = pd.DataFrame([(0.0, np.nan, -1.0, 1.0),\n ... (np.nan, 2.0, np.nan, np.nan),\n ... (2.0, 3.0, np.nan, 9.0),\n ... (np.nan, 4.0, -4.0, 16.0)],\n ... columns=list('abcd'))\n >>> df\n a b c d\n 0 0.0 NaN -1.0 1.0\n 1 NaN 2.0 NaN NaN\n 2 2.0 3.0 NaN 9.0\n 3 NaN 4.0 -4.0 16.0\n >>> df.interpolate(method='linear', limit_direction='forward', axis=0)\n a b c d\n 0 0.0 NaN -1.0 1.0\n 1 1.0 2.0 -2.0 5.0\n 2 2.0 3.0 -3.0 9.0\n 3 2.0 4.0 -4.0 16.0\n\n Using polynomial interpolation.\n\n >>> df['d'].interpolate(method='polynomial', order=2)\n 0 1.0\n 1 4.0\n 2 9.0\n 3 16.0\n Name: d, dtype: float64\n \"\"\"\n\n @Appender(_shared_docs[\"interpolate\"] % _shared_doc_kwargs)\n def interpolate(\n self,\n method=\"linear\",\n axis=0,\n limit=None,\n inplace=False,\n limit_direction=\"forward\",\n limit_area=None,\n downcast=None,\n **kwargs,\n ):\n \"\"\"\n Interpolate values according to different methods.\n \"\"\"\n inplace = validate_bool_kwarg(inplace, \"inplace\")\n\n axis = self._get_axis_number(axis)\n\n if axis == 0:\n ax = self._info_axis_name\n _maybe_transposed_self = self\n elif axis == 1:\n _maybe_transposed_self = self.T\n ax = 1\n\n ax = _maybe_transposed_self._get_axis_number(ax)\n\n if _maybe_transposed_self.ndim == 2:\n alt_ax = 1 - ax\n else:\n alt_ax = ax\n\n if isinstance(_maybe_transposed_self.index, MultiIndex) and method != \"linear\":\n raise ValueError(\n \"Only `method=linear` interpolation is supported on MultiIndexes.\"\n )\n\n if _maybe_transposed_self._data.get_dtype_counts().get(\"object\") == len(\n _maybe_transposed_self.T\n ):\n raise TypeError(\n \"Cannot interpolate with all object-dtype columns \"\n \"in the DataFrame. Try setting at least one \"\n \"column to a numeric dtype.\"\n )\n\n # create/use the index\n if method == \"linear\":\n # prior default\n index = np.arange(len(_maybe_transposed_self._get_axis(alt_ax)))\n else:\n index = _maybe_transposed_self._get_axis(alt_ax)\n methods = {\"index\", \"values\", \"nearest\", \"time\"}\n is_numeric_or_datetime = (\n is_numeric_dtype(index)\n or is_datetime64_any_dtype(index)\n or is_timedelta64_dtype(index)\n )\n if method not in methods and not is_numeric_or_datetime:\n raise ValueError(\n \"Index column must be numeric or datetime type when \"\n f\"using {method} method other than linear. \"\n \"Try setting a numeric or datetime index column before \"\n \"interpolating.\"\n )\n\n if isna(index).any():\n raise NotImplementedError(\n \"Interpolation with NaNs in the index \"\n \"has not been implemented. Try filling \"\n \"those NaNs before interpolating.\"\n )\n data = _maybe_transposed_self._data\n new_data = data.interpolate(\n method=method,\n axis=ax,\n index=index,\n limit=limit,\n limit_direction=limit_direction,\n limit_area=limit_area,\n inplace=inplace,\n downcast=downcast,\n **kwargs,\n )\n\n if inplace:\n if axis == 1:\n new_data = self._constructor(new_data).T._data\n self._update_inplace(new_data)\n else:\n res = self._constructor(new_data).__finalize__(self)\n if axis == 1:\n res = res.T\n return res\n\n # ----------------------------------------------------------------------\n # Timeseries methods Methods\n\n def asof(self, where, subset=None):\n \"\"\"\n Return the last row(s) without any NaNs before `where`.\n\n The last row (for each element in `where`, if list) without any\n NaN is taken.\n In case of a :class:`~pandas.DataFrame`, the last row without NaN\n considering only the subset of columns (if not `None`)\n\n If there is no good value, NaN is returned for a Series or\n a Series of NaN values for a DataFrame\n\n Parameters\n ----------\n where : date or array-like of dates\n Date(s) before which the last row(s) are returned.\n subset : str or array-like of str, default `None`\n For DataFrame, if not `None`, only use these columns to\n check for NaNs.\n\n Returns\n -------\n scalar, Series, or DataFrame\n\n The return can be:\n\n * scalar : when `self` is a Series and `where` is a scalar\n * Series: when `self` is a Series and `where` is an array-like,\n or when `self` is a DataFrame and `where` is a scalar\n * DataFrame : when `self` is a DataFrame and `where` is an\n array-like\n\n Return scalar, Series, or DataFrame.\n\n See Also\n --------\n merge_asof : Perform an asof merge. Similar to left join.\n\n Notes\n -----\n Dates are assumed to be sorted. Raises if this is not the case.\n\n Examples\n --------\n A Series and a scalar `where`.\n\n >>> s = pd.Series([1, 2, np.nan, 4], index=[10, 20, 30, 40])\n >>> s\n 10 1.0\n 20 2.0\n 30 NaN\n 40 4.0\n dtype: float64\n\n >>> s.asof(20)\n 2.0\n\n For a sequence `where`, a Series is returned. The first value is\n NaN, because the first element of `where` is before the first\n index value.\n\n >>> s.asof([5, 20])\n 5 NaN\n 20 2.0\n dtype: float64\n\n Missing values are not considered. The following is ``2.0``, not\n NaN, even though NaN is at the index location for ``30``.\n\n >>> s.asof(30)\n 2.0\n\n Take all columns into consideration\n\n >>> df = pd.DataFrame({'a': [10, 20, 30, 40, 50],\n ... 'b': [None, None, None, None, 500]},\n ... index=pd.DatetimeIndex(['2018-02-27 09:01:00',\n ... '2018-02-27 09:02:00',\n ... '2018-02-27 09:03:00',\n ... '2018-02-27 09:04:00',\n ... '2018-02-27 09:05:00']))\n >>> df.asof(pd.DatetimeIndex(['2018-02-27 09:03:30',\n ... '2018-02-27 09:04:30']))\n a b\n 2018-02-27 09:03:30 NaN NaN\n 2018-02-27 09:04:30 NaN NaN\n\n Take a single column into consideration\n\n >>> df.asof(pd.DatetimeIndex(['2018-02-27 09:03:30',\n ... '2018-02-27 09:04:30']),\n ... subset=['a'])\n a b\n 2018-02-27 09:03:30 30.0 NaN\n 2018-02-27 09:04:30 40.0 NaN\n \"\"\"\n if isinstance(where, str):\n where = Timestamp(where)\n\n if not self.index.is_monotonic:\n raise ValueError(\"asof requires a sorted index\")\n\n is_series = isinstance(self, ABCSeries)\n if is_series:\n if subset is not None:\n raise ValueError(\"subset is not valid for Series\")\n else:\n if subset is None:\n subset = self.columns\n if not is_list_like(subset):\n subset = [subset]\n\n is_list = is_list_like(where)\n if not is_list:\n start = self.index[0]\n if isinstance(self.index, PeriodIndex):\n where = Period(where, freq=self.index.freq)\n\n if where < start:\n if not is_series:\n from pandas import Series\n\n return Series(index=self.columns, name=where, dtype=np.float64)\n return np.nan\n\n # It's always much faster to use a *while* loop here for\n # Series than pre-computing all the NAs. However a\n # *while* loop is extremely expensive for DataFrame\n # so we later pre-compute all the NAs and use the same\n # code path whether *where* is a scalar or list.\n # See PR: https://github.com/pandas-dev/pandas/pull/14476\n if is_series:\n loc = self.index.searchsorted(where, side=\"right\")\n if loc > 0:\n loc -= 1\n\n values = self._values\n while loc > 0 and isna(values[loc]):\n loc -= 1\n return values[loc]\n\n if not isinstance(where, Index):\n where = Index(where) if is_list else Index([where])\n\n nulls = self.isna() if is_series else self[subset].isna().any(1)\n if nulls.all():\n if is_series:\n return self._constructor(np.nan, index=where, name=self.name)\n elif is_list:\n from pandas import DataFrame\n\n return DataFrame(np.nan, index=where, columns=self.columns)\n else:\n from pandas import Series\n\n return Series(np.nan, index=self.columns, name=where[0])\n\n locs = self.index.asof_locs(where, ~(nulls.values))\n\n # mask the missing\n missing = locs == -1\n data = self.take(locs)\n data.index = where\n data.loc[missing] = np.nan\n return data if is_list else data.iloc[-1]\n\n # ----------------------------------------------------------------------\n # Action Methods\n\n _shared_docs[\n \"isna\"\n ] = \"\"\"\n Detect missing values.\n\n Return a boolean same-sized object indicating if the values are NA.\n NA values, such as None or :attr:`numpy.NaN`, gets mapped to True\n values.\n Everything else gets mapped to False values. Characters such as empty\n strings ``''`` or :attr:`numpy.inf` are not considered NA values\n (unless you set ``pandas.options.mode.use_inf_as_na = True``).\n\n Returns\n -------\n %(klass)s\n Mask of bool values for each element in %(klass)s that\n indicates whether an element is not an NA value.\n\n See Also\n --------\n %(klass)s.isnull : Alias of isna.\n %(klass)s.notna : Boolean inverse of isna.\n %(klass)s.dropna : Omit axes labels with missing values.\n isna : Top-level isna.\n\n Examples\n --------\n Show which entries in a DataFrame are NA.\n\n >>> df = pd.DataFrame({'age': [5, 6, np.NaN],\n ... 'born': [pd.NaT, pd.Timestamp('1939-05-27'),\n ... pd.Timestamp('1940-04-25')],\n ... 'name': ['Alfred', 'Batman', ''],\n ... 'toy': [None, 'Batmobile', 'Joker']})\n >>> df\n age born name toy\n 0 5.0 NaT Alfred None\n 1 6.0 1939-05-27 Batman Batmobile\n 2 NaN 1940-04-25 Joker\n\n >>> df.isna()\n age born name toy\n 0 False True False True\n 1 False False False False\n 2 True False False False\n\n Show which entries in a Series are NA.\n\n >>> ser = pd.Series([5, 6, np.NaN])\n >>> ser\n 0 5.0\n 1 6.0\n 2 NaN\n dtype: float64\n\n >>> ser.isna()\n 0 False\n 1 False\n 2 True\n dtype: bool\n \"\"\"\n\n @Appender(_shared_docs[\"isna\"] % _shared_doc_kwargs)\n def isna(self: FrameOrSeries) -> FrameOrSeries:\n return isna(self).__finalize__(self)\n\n @Appender(_shared_docs[\"isna\"] % _shared_doc_kwargs)\n def isnull(self: FrameOrSeries) -> FrameOrSeries:\n return isna(self).__finalize__(self)\n\n _shared_docs[\n \"notna\"\n ] = \"\"\"\n Detect existing (non-missing) values.\n\n Return a boolean same-sized object indicating if the values are not NA.\n Non-missing values get mapped to True. Characters such as empty\n strings ``''`` or :attr:`numpy.inf` are not considered NA values\n (unless you set ``pandas.options.mode.use_inf_as_na = True``).\n NA values, such as None or :attr:`numpy.NaN`, get mapped to False\n values.\n\n Returns\n -------\n %(klass)s\n Mask of bool values for each element in %(klass)s that\n indicates whether an element is not an NA value.\n\n See Also\n --------\n %(klass)s.notnull : Alias of notna.\n %(klass)s.isna : Boolean inverse of notna.\n %(klass)s.dropna : Omit axes labels with missing values.\n notna : Top-level notna.\n\n Examples\n --------\n Show which entries in a DataFrame are not NA.\n\n >>> df = pd.DataFrame({'age': [5, 6, np.NaN],\n ... 'born': [pd.NaT, pd.Timestamp('1939-05-27'),\n ... pd.Timestamp('1940-04-25')],\n ... 'name': ['Alfred', 'Batman', ''],\n ... 'toy': [None, 'Batmobile', 'Joker']})\n >>> df\n age born name toy\n 0 5.0 NaT Alfred None\n 1 6.0 1939-05-27 Batman Batmobile\n 2 NaN 1940-04-25 Joker\n\n >>> df.notna()\n age born name toy\n 0 True False True False\n 1 True True True True\n 2 False True True True\n\n Show which entries in a Series are not NA.\n\n >>> ser = pd.Series([5, 6, np.NaN])\n >>> ser\n 0 5.0\n 1 6.0\n 2 NaN\n dtype: float64\n\n >>> ser.notna()\n 0 True\n 1 True\n 2 False\n dtype: bool\n \"\"\"\n\n @Appender(_shared_docs[\"notna\"] % _shared_doc_kwargs)\n def notna(self: FrameOrSeries) -> FrameOrSeries:\n return notna(self).__finalize__(self)\n\n @Appender(_shared_docs[\"notna\"] % _shared_doc_kwargs)\n def notnull(self: FrameOrSeries) -> FrameOrSeries:\n return notna(self).__finalize__(self)\n\n def _clip_with_scalar(self, lower, upper, inplace: bool_t = False):\n if (lower is not None and np.any(isna(lower))) or (\n upper is not None and np.any(isna(upper))\n ):\n raise ValueError(\"Cannot use an NA value as a clip threshold\")\n\n result = self\n mask = isna(self.values)\n\n with np.errstate(all=\"ignore\"):\n if upper is not None:\n subset = self.to_numpy() <= upper\n result = result.where(subset, upper, axis=None, inplace=False)\n if lower is not None:\n subset = self.to_numpy() >= lower\n result = result.where(subset, lower, axis=None, inplace=False)\n\n if np.any(mask):\n result[mask] = np.nan\n\n if inplace:\n self._update_inplace(result)\n else:\n return result\n\n def _clip_with_one_bound(self, threshold, method, axis, inplace):\n\n if axis is not None:\n axis = self._get_axis_number(axis)\n\n # method is self.le for upper bound and self.ge for lower bound\n if is_scalar(threshold) and is_number(threshold):\n if method.__name__ == \"le\":\n return self._clip_with_scalar(None, threshold, inplace=inplace)\n return self._clip_with_scalar(threshold, None, inplace=inplace)\n\n subset = method(threshold, axis=axis) | isna(self)\n\n # GH #15390\n # In order for where method to work, the threshold must\n # be transformed to NDFrame from other array like structure.\n if (not isinstance(threshold, ABCSeries)) and is_list_like(threshold):\n if isinstance(self, ABCSeries):\n threshold = self._constructor(threshold, index=self.index)\n else:\n threshold = _align_method_FRAME(self, threshold, axis, flex=None)[1]\n return self.where(subset, threshold, axis=axis, inplace=inplace)\n\n def clip(\n self: FrameOrSeries,\n lower=None,\n upper=None,\n axis=None,\n inplace: bool_t = False,\n *args,\n **kwargs,\n ) -> FrameOrSeries:\n \"\"\"\n Trim values at input threshold(s).\n\n Assigns values outside boundary to boundary values. Thresholds\n can be singular values or array like, and in the latter case\n the clipping is performed element-wise in the specified axis.\n\n Parameters\n ----------\n lower : float or array_like, default None\n Minimum threshold value. All values below this\n threshold will be set to it.\n upper : float or array_like, default None\n Maximum threshold value. All values above this\n threshold will be set to it.\n axis : int or str axis name, optional\n Align object with lower and upper along the given axis.\n inplace : bool, default False\n Whether to perform the operation in place on the data.\n\n .. versionadded:: 0.21.0\n *args, **kwargs\n Additional keywords have no effect but might be accepted\n for compatibility with numpy.\n\n Returns\n -------\n Series or DataFrame\n Same type as calling object with the values outside the\n clip boundaries replaced.\n\n Examples\n --------\n >>> data = {'col_0': [9, -3, 0, -1, 5], 'col_1': [-2, -7, 6, 8, -5]}\n >>> df = pd.DataFrame(data)\n >>> df\n col_0 col_1\n 0 9 -2\n 1 -3 -7\n 2 0 6\n 3 -1 8\n 4 5 -5\n\n Clips per column using lower and upper thresholds:\n\n >>> df.clip(-4, 6)\n col_0 col_1\n 0 6 -2\n 1 -3 -4\n 2 0 6\n 3 -1 6\n 4 5 -4\n\n Clips using specific lower and upper thresholds per column element:\n\n >>> t = pd.Series([2, -4, -1, 6, 3])\n >>> t\n 0 2\n 1 -4\n 2 -1\n 3 6\n 4 3\n dtype: int64\n\n >>> df.clip(t, t + 4, axis=0)\n col_0 col_1\n 0 6 2\n 1 -3 -4\n 2 0 3\n 3 6 8\n 4 5 3\n \"\"\"\n inplace = validate_bool_kwarg(inplace, \"inplace\")\n\n axis = nv.validate_clip_with_axis(axis, args, kwargs)\n if axis is not None:\n axis = self._get_axis_number(axis)\n\n # GH 17276\n # numpy doesn't like NaN as a clip value\n # so ignore\n # GH 19992\n # numpy doesn't drop a list-like bound containing NaN\n if not is_list_like(lower) and np.any(isna(lower)):\n lower = None\n if not is_list_like(upper) and np.any(isna(upper)):\n upper = None\n\n # GH 2747 (arguments were reversed)\n if lower is not None and upper is not None:\n if is_scalar(lower) and is_scalar(upper):\n lower, upper = min(lower, upper), max(lower, upper)\n\n # fast-path for scalars\n if (lower is None or (is_scalar(lower) and is_number(lower))) and (\n upper is None or (is_scalar(upper) and is_number(upper))\n ):\n return self._clip_with_scalar(lower, upper, inplace=inplace)\n\n result = self\n if lower is not None:\n result = result._clip_with_one_bound(\n lower, method=self.ge, axis=axis, inplace=inplace\n )\n if upper is not None:\n if inplace:\n result = self\n result = result._clip_with_one_bound(\n upper, method=self.le, axis=axis, inplace=inplace\n )\n\n return result\n\n _shared_docs[\n \"groupby\"\n ] = \"\"\"\n Group %(klass)s using a mapper or by a Series of columns.\n\n A groupby operation involves some combination of splitting the\n object, applying a function, and combining the results. This can be\n used to group large amounts of data and compute operations on these\n groups.\n\n Parameters\n ----------\n by : mapping, function, label, or list of labels\n Used to determine the groups for the groupby.\n If ``by`` is a function, it's called on each value of the object's\n index. If a dict or Series is passed, the Series or dict VALUES\n will be used to determine the groups (the Series' values are first\n aligned; see ``.align()`` method). If an ndarray is passed, the\n values are used as-is determine the groups. A label or list of\n labels may be passed to group by the columns in ``self``. Notice\n that a tuple is interpreted as a (single) key.\n axis : {0 or 'index', 1 or 'columns'}, default 0\n Split along rows (0) or columns (1).\n level : int, level name, or sequence of such, default None\n If the axis is a MultiIndex (hierarchical), group by a particular\n level or levels.\n as_index : bool, default True\n For aggregated output, return object with group labels as the\n index. Only relevant for DataFrame input. as_index=False is\n effectively \"SQL-style\" grouped output.\n sort : bool, default True\n Sort group keys. Get better performance by turning this off.\n Note this does not influence the order of observations within each\n group. Groupby preserves the order of rows within each group.\n group_keys : bool, default True\n When calling apply, add group keys to index to identify pieces.\n squeeze : bool, default False\n Reduce the dimensionality of the return type if possible,\n otherwise return a consistent type.\n observed : bool, default False\n This only applies if any of the groupers are Categoricals.\n If True: only show observed values for categorical groupers.\n If False: show all values for categorical groupers.\n\n .. versionadded:: 0.23.0\n\n Returns\n -------\n %(klass)sGroupBy\n Returns a groupby object that contains information about the groups.\n\n See Also\n --------\n resample : Convenience method for frequency conversion and resampling\n of time series.\n\n Notes\n -----\n See the `user guide\n <https://pandas.pydata.org/pandas-docs/stable/groupby.html>`_ for more.\n \"\"\"\n\n def asfreq(\n self: FrameOrSeries,\n freq,\n method=None,\n how: Optional[str] = None,\n normalize: bool_t = False,\n fill_value=None,\n ) -> FrameOrSeries:\n \"\"\"\n Convert TimeSeries to specified frequency.\n\n Optionally provide filling method to pad/backfill missing values.\n\n Returns the original data conformed to a new index with the specified\n frequency. ``resample`` is more appropriate if an operation, such as\n summarization, is necessary to represent the data at the new frequency.\n\n Parameters\n ----------\n freq : DateOffset or str\n method : {'backfill'/'bfill', 'pad'/'ffill'}, default None\n Method to use for filling holes in reindexed Series (note this\n does not fill NaNs that already were present):\n\n * 'pad' / 'ffill': propagate last valid observation forward to next\n valid\n * 'backfill' / 'bfill': use NEXT valid observation to fill.\n how : {'start', 'end'}, default end\n For PeriodIndex only (see PeriodIndex.asfreq).\n normalize : bool, default False\n Whether to reset output index to midnight.\n fill_value : scalar, optional\n Value to use for missing values, applied during upsampling (note\n this does not fill NaNs that already were present).\n\n Returns\n -------\n converted : same type as caller\n\n See Also\n --------\n reindex\n\n Notes\n -----\n To learn more about the frequency strings, please see `this link\n <https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases>`__.\n\n Examples\n --------\n Start by creating a series with 4 one minute timestamps.\n\n >>> index = pd.date_range('1/1/2000', periods=4, freq='T')\n >>> series = pd.Series([0.0, None, 2.0, 3.0], index=index)\n >>> df = pd.DataFrame({'s':series})\n >>> df\n s\n 2000-01-01 00:00:00 0.0\n 2000-01-01 00:01:00 NaN\n 2000-01-01 00:02:00 2.0\n 2000-01-01 00:03:00 3.0\n\n Upsample the series into 30 second bins.\n\n >>> df.asfreq(freq='30S')\n s\n 2000-01-01 00:00:00 0.0\n 2000-01-01 00:00:30 NaN\n 2000-01-01 00:01:00 NaN\n 2000-01-01 00:01:30 NaN\n 2000-01-01 00:02:00 2.0\n 2000-01-01 00:02:30 NaN\n 2000-01-01 00:03:00 3.0\n\n Upsample again, providing a ``fill value``.\n\n >>> df.asfreq(freq='30S', fill_value=9.0)\n s\n 2000-01-01 00:00:00 0.0\n 2000-01-01 00:00:30 9.0\n 2000-01-01 00:01:00 NaN\n 2000-01-01 00:01:30 9.0\n 2000-01-01 00:02:00 2.0\n 2000-01-01 00:02:30 9.0\n 2000-01-01 00:03:00 3.0\n\n Upsample again, providing a ``method``.\n\n >>> df.asfreq(freq='30S', method='bfill')\n s\n 2000-01-01 00:00:00 0.0\n 2000-01-01 00:00:30 NaN\n 2000-01-01 00:01:00 NaN\n 2000-01-01 00:01:30 2.0\n 2000-01-01 00:02:00 2.0\n 2000-01-01 00:02:30 3.0\n 2000-01-01 00:03:00 3.0\n \"\"\"\n from pandas.core.resample import asfreq\n\n return asfreq(\n self,\n freq,\n method=method,\n how=how,\n normalize=normalize,\n fill_value=fill_value,\n )\n\n def at_time(\n self: FrameOrSeries, time, asof: bool_t = False, axis=None\n ) -> FrameOrSeries:\n \"\"\"\n Select values at particular time of day (e.g., 9:30AM).\n\n Parameters\n ----------\n time : datetime.time or str\n axis : {0 or 'index', 1 or 'columns'}, default 0\n\n .. versionadded:: 0.24.0\n\n Returns\n -------\n Series or DataFrame\n\n Raises\n ------\n TypeError\n If the index is not a :class:`DatetimeIndex`\n\n See Also\n --------\n between_time : Select values between particular times of the day.\n first : Select initial periods of time series based on a date offset.\n last : Select final periods of time series based on a date offset.\n DatetimeIndex.indexer_at_time : Get just the index locations for\n values at particular time of the day.\n\n Examples\n --------\n >>> i = pd.date_range('2018-04-09', periods=4, freq='12H')\n >>> ts = pd.DataFrame({'A': [1, 2, 3, 4]}, index=i)\n >>> ts\n A\n 2018-04-09 00:00:00 1\n 2018-04-09 12:00:00 2\n 2018-04-10 00:00:00 3\n 2018-04-10 12:00:00 4\n\n >>> ts.at_time('12:00')\n A\n 2018-04-09 12:00:00 2\n 2018-04-10 12:00:00 4\n \"\"\"\n if axis is None:\n axis = self._stat_axis_number\n axis = self._get_axis_number(axis)\n\n index = self._get_axis(axis)\n try:\n indexer = index.indexer_at_time(time, asof=asof)\n except AttributeError as err:\n raise TypeError(\"Index must be DatetimeIndex\") from err\n\n return self._take_with_is_copy(indexer, axis=axis)\n\n def between_time(\n self: FrameOrSeries,\n start_time,\n end_time,\n include_start: bool_t = True,\n include_end: bool_t = True,\n axis=None,\n ) -> FrameOrSeries:\n \"\"\"\n Select values between particular times of the day (e.g., 9:00-9:30 AM).\n\n By setting ``start_time`` to be later than ``end_time``,\n you can get the times that are *not* between the two times.\n\n Parameters\n ----------\n start_time : datetime.time or str\n Initial time as a time filter limit.\n end_time : datetime.time or str\n End time as a time filter limit.\n include_start : bool, default True\n Whether the start time needs to be included in the result.\n include_end : bool, default True\n Whether the end time needs to be included in the result.\n axis : {0 or 'index', 1 or 'columns'}, default 0\n Determine range time on index or columns value.\n\n .. versionadded:: 0.24.0\n\n Returns\n -------\n Series or DataFrame\n Data from the original object filtered to the specified dates range.\n\n Raises\n ------\n TypeError\n If the index is not a :class:`DatetimeIndex`\n\n See Also\n --------\n at_time : Select values at a particular time of the day.\n first : Select initial periods of time series based on a date offset.\n last : Select final periods of time series based on a date offset.\n DatetimeIndex.indexer_between_time : Get just the index locations for\n values between particular times of the day.\n\n Examples\n --------\n >>> i = pd.date_range('2018-04-09', periods=4, freq='1D20min')\n >>> ts = pd.DataFrame({'A': [1, 2, 3, 4]}, index=i)\n >>> ts\n A\n 2018-04-09 00:00:00 1\n 2018-04-10 00:20:00 2\n 2018-04-11 00:40:00 3\n 2018-04-12 01:00:00 4\n\n >>> ts.between_time('0:15', '0:45')\n A\n 2018-04-10 00:20:00 2\n 2018-04-11 00:40:00 3\n\n You get the times that are *not* between two times by setting\n ``start_time`` later than ``end_time``:\n\n >>> ts.between_time('0:45', '0:15')\n A\n 2018-04-09 00:00:00 1\n 2018-04-12 01:00:00 4\n \"\"\"\n if axis is None:\n axis = self._stat_axis_number\n axis = self._get_axis_number(axis)\n\n index = self._get_axis(axis)\n try:\n indexer = index.indexer_between_time(\n start_time,\n end_time,\n include_start=include_start,\n include_end=include_end,\n )\n except AttributeError as err:\n raise TypeError(\"Index must be DatetimeIndex\") from err\n\n return self._take_with_is_copy(indexer, axis=axis)\n\n def resample(\n self,\n rule,\n axis=0,\n closed: Optional[str] = None,\n label: Optional[str] = None,\n convention: str = \"start\",\n kind: Optional[str] = None,\n loffset=None,\n base: int = 0,\n on=None,\n level=None,\n ) -> \"Resampler\":\n \"\"\"\n Resample time-series data.\n\n Convenience method for frequency conversion and resampling of time\n series. Object must have a datetime-like index (`DatetimeIndex`,\n `PeriodIndex`, or `TimedeltaIndex`), or pass datetime-like values\n to the `on` or `level` keyword.\n\n Parameters\n ----------\n rule : DateOffset, Timedelta or str\n The offset string or object representing target conversion.\n axis : {0 or 'index', 1 or 'columns'}, default 0\n Which axis to use for up- or down-sampling. For `Series` this\n will default to 0, i.e. along the rows. Must be\n `DatetimeIndex`, `TimedeltaIndex` or `PeriodIndex`.\n closed : {'right', 'left'}, default None\n Which side of bin interval is closed. The default is 'left'\n for all frequency offsets except for 'M', 'A', 'Q', 'BM',\n 'BA', 'BQ', and 'W' which all have a default of 'right'.\n label : {'right', 'left'}, default None\n Which bin edge label to label bucket with. The default is 'left'\n for all frequency offsets except for 'M', 'A', 'Q', 'BM',\n 'BA', 'BQ', and 'W' which all have a default of 'right'.\n convention : {'start', 'end', 's', 'e'}, default 'start'\n For `PeriodIndex` only, controls whether to use the start or\n end of `rule`.\n kind : {'timestamp', 'period'}, optional, default None\n Pass 'timestamp' to convert the resulting index to a\n `DateTimeIndex` or 'period' to convert it to a `PeriodIndex`.\n By default the input representation is retained.\n loffset : timedelta, default None\n Adjust the resampled time labels.\n base : int, default 0\n For frequencies that evenly subdivide 1 day, the \"origin\" of the\n aggregated intervals. For example, for '5min' frequency, base could\n range from 0 through 4. Defaults to 0.\n on : str, optional\n For a DataFrame, column to use instead of index for resampling.\n Column must be datetime-like.\n\n level : str or int, optional\n For a MultiIndex, level (name or number) to use for\n resampling. `level` must be datetime-like.\n\n Returns\n -------\n Resampler object\n\n See Also\n --------\n groupby : Group by mapping, function, label, or list of labels.\n Series.resample : Resample a Series.\n DataFrame.resample: Resample a DataFrame.\n\n Notes\n -----\n See the `user guide\n <https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#resampling>`_\n for more.\n\n To learn more about the offset strings, please see `this link\n <https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#dateoffset-objects>`__.\n\n Examples\n --------\n Start by creating a series with 9 one minute timestamps.\n\n >>> index = pd.date_range('1/1/2000', periods=9, freq='T')\n >>> series = pd.Series(range(9), index=index)\n >>> series\n 2000-01-01 00:00:00 0\n 2000-01-01 00:01:00 1\n 2000-01-01 00:02:00 2\n 2000-01-01 00:03:00 3\n 2000-01-01 00:04:00 4\n 2000-01-01 00:05:00 5\n 2000-01-01 00:06:00 6\n 2000-01-01 00:07:00 7\n 2000-01-01 00:08:00 8\n Freq: T, dtype: int64\n\n Downsample the series into 3 minute bins and sum the values\n of the timestamps falling into a bin.\n\n >>> series.resample('3T').sum()\n 2000-01-01 00:00:00 3\n 2000-01-01 00:03:00 12\n 2000-01-01 00:06:00 21\n Freq: 3T, dtype: int64\n\n Downsample the series into 3 minute bins as above, but label each\n bin using the right edge instead of the left. Please note that the\n value in the bucket used as the label is not included in the bucket,\n which it labels. For example, in the original series the\n bucket ``2000-01-01 00:03:00`` contains the value 3, but the summed\n value in the resampled bucket with the label ``2000-01-01 00:03:00``\n does not include 3 (if it did, the summed value would be 6, not 3).\n To include this value close the right side of the bin interval as\n illustrated in the example below this one.\n\n >>> series.resample('3T', label='right').sum()\n 2000-01-01 00:03:00 3\n 2000-01-01 00:06:00 12\n 2000-01-01 00:09:00 21\n Freq: 3T, dtype: int64\n\n Downsample the series into 3 minute bins as above, but close the right\n side of the bin interval.\n\n >>> series.resample('3T', label='right', closed='right').sum()\n 2000-01-01 00:00:00 0\n 2000-01-01 00:03:00 6\n 2000-01-01 00:06:00 15\n 2000-01-01 00:09:00 15\n Freq: 3T, dtype: int64\n\n Upsample the series into 30 second bins.\n\n >>> series.resample('30S').asfreq()[0:5] # Select first 5 rows\n 2000-01-01 00:00:00 0.0\n 2000-01-01 00:00:30 NaN\n 2000-01-01 00:01:00 1.0\n 2000-01-01 00:01:30 NaN\n 2000-01-01 00:02:00 2.0\n Freq: 30S, dtype: float64\n\n Upsample the series into 30 second bins and fill the ``NaN``\n values using the ``pad`` method.\n\n >>> series.resample('30S').pad()[0:5]\n 2000-01-01 00:00:00 0\n 2000-01-01 00:00:30 0\n 2000-01-01 00:01:00 1\n 2000-01-01 00:01:30 1\n 2000-01-01 00:02:00 2\n Freq: 30S, dtype: int64\n\n Upsample the series into 30 second bins and fill the\n ``NaN`` values using the ``bfill`` method.\n\n >>> series.resample('30S').bfill()[0:5]\n 2000-01-01 00:00:00 0\n 2000-01-01 00:00:30 1\n 2000-01-01 00:01:00 1\n 2000-01-01 00:01:30 2\n 2000-01-01 00:02:00 2\n Freq: 30S, dtype: int64\n\n Pass a custom function via ``apply``\n\n >>> def custom_resampler(array_like):\n ... return np.sum(array_like) + 5\n ...\n >>> series.resample('3T').apply(custom_resampler)\n 2000-01-01 00:00:00 8\n 2000-01-01 00:03:00 17\n 2000-01-01 00:06:00 26\n Freq: 3T, dtype: int64\n\n For a Series with a PeriodIndex, the keyword `convention` can be\n used to control whether to use the start or end of `rule`.\n\n Resample a year by quarter using 'start' `convention`. Values are\n assigned to the first quarter of the period.\n\n >>> s = pd.Series([1, 2], index=pd.period_range('2012-01-01',\n ... freq='A',\n ... periods=2))\n >>> s\n 2012 1\n 2013 2\n Freq: A-DEC, dtype: int64\n >>> s.resample('Q', convention='start').asfreq()\n 2012Q1 1.0\n 2012Q2 NaN\n 2012Q3 NaN\n 2012Q4 NaN\n 2013Q1 2.0\n 2013Q2 NaN\n 2013Q3 NaN\n 2013Q4 NaN\n Freq: Q-DEC, dtype: float64\n\n Resample quarters by month using 'end' `convention`. Values are\n assigned to the last month of the period.\n\n >>> q = pd.Series([1, 2, 3, 4], index=pd.period_range('2018-01-01',\n ... freq='Q',\n ... periods=4))\n >>> q\n 2018Q1 1\n 2018Q2 2\n 2018Q3 3\n 2018Q4 4\n Freq: Q-DEC, dtype: int64\n >>> q.resample('M', convention='end').asfreq()\n 2018-03 1.0\n 2018-04 NaN\n 2018-05 NaN\n 2018-06 2.0\n 2018-07 NaN\n 2018-08 NaN\n 2018-09 3.0\n 2018-10 NaN\n 2018-11 NaN\n 2018-12 4.0\n Freq: M, dtype: float64\n\n For DataFrame objects, the keyword `on` can be used to specify the\n column instead of the index for resampling.\n\n >>> d = dict({'price': [10, 11, 9, 13, 14, 18, 17, 19],\n ... 'volume': [50, 60, 40, 100, 50, 100, 40, 50]})\n >>> df = pd.DataFrame(d)\n >>> df['week_starting'] = pd.date_range('01/01/2018',\n ... periods=8,\n ... freq='W')\n >>> df\n price volume week_starting\n 0 10 50 2018-01-07\n 1 11 60 2018-01-14\n 2 9 40 2018-01-21\n 3 13 100 2018-01-28\n 4 14 50 2018-02-04\n 5 18 100 2018-02-11\n 6 17 40 2018-02-18\n 7 19 50 2018-02-25\n >>> df.resample('M', on='week_starting').mean()\n price volume\n week_starting\n 2018-01-31 10.75 62.5\n 2018-02-28 17.00 60.0\n\n For a DataFrame with MultiIndex, the keyword `level` can be used to\n specify on which level the resampling needs to take place.\n\n >>> days = pd.date_range('1/1/2000', periods=4, freq='D')\n >>> d2 = dict({'price': [10, 11, 9, 13, 14, 18, 17, 19],\n ... 'volume': [50, 60, 40, 100, 50, 100, 40, 50]})\n >>> df2 = pd.DataFrame(d2,\n ... index=pd.MultiIndex.from_product([days,\n ... ['morning',\n ... 'afternoon']]\n ... ))\n >>> df2\n price volume\n 2000-01-01 morning 10 50\n afternoon 11 60\n 2000-01-02 morning 9 40\n afternoon 13 100\n 2000-01-03 morning 14 50\n afternoon 18 100\n 2000-01-04 morning 17 40\n afternoon 19 50\n >>> df2.resample('D', level=0).sum()\n price volume\n 2000-01-01 21 110\n 2000-01-02 22 140\n 2000-01-03 32 150\n 2000-01-04 36 90\n \"\"\"\n from pandas.core.resample import get_resampler\n\n axis = self._get_axis_number(axis)\n return get_resampler(\n self,\n freq=rule,\n label=label,\n closed=closed,\n axis=axis,\n kind=kind,\n loffset=loffset,\n convention=convention,\n base=base,\n key=on,\n level=level,\n )\n\n def first(self: FrameOrSeries, offset) -> FrameOrSeries:\n \"\"\"\n Method to subset initial periods of time series data based on a date offset.\n\n Parameters\n ----------\n offset : str, DateOffset, dateutil.relativedelta\n\n Returns\n -------\n subset : same type as caller\n\n Raises\n ------\n TypeError\n If the index is not a :class:`DatetimeIndex`\n\n See Also\n --------\n last : Select final periods of time series based on a date offset.\n at_time : Select values at a particular time of the day.\n between_time : Select values between particular times of the day.\n\n Examples\n --------\n >>> i = pd.date_range('2018-04-09', periods=4, freq='2D')\n >>> ts = pd.DataFrame({'A': [1,2,3,4]}, index=i)\n >>> ts\n A\n 2018-04-09 1\n 2018-04-11 2\n 2018-04-13 3\n 2018-04-15 4\n\n Get the rows for the first 3 days:\n\n >>> ts.first('3D')\n A\n 2018-04-09 1\n 2018-04-11 2\n\n Notice the data for 3 first calender days were returned, not the first\n 3 days observed in the dataset, and therefore data for 2018-04-13 was\n not returned.\n \"\"\"\n if not isinstance(self.index, DatetimeIndex):\n raise TypeError(\"'first' only supports a DatetimeIndex index\")\n\n if len(self.index) == 0:\n return self\n\n offset = to_offset(offset)\n end_date = end = self.index[0] + offset\n\n # Tick-like, e.g. 3 weeks\n if not offset.is_anchored() and hasattr(offset, \"_inc\"):\n if end_date in self.index:\n end = self.index.searchsorted(end_date, side=\"left\")\n return self.iloc[:end]\n\n return self.loc[:end]\n\n def last(self: FrameOrSeries, offset) -> FrameOrSeries:\n \"\"\"\n Method to subset final periods of time series data based on a date offset.\n\n Parameters\n ----------\n offset : str, DateOffset, dateutil.relativedelta\n\n Returns\n -------\n subset : same type as caller\n\n Raises\n ------\n TypeError\n If the index is not a :class:`DatetimeIndex`\n\n See Also\n --------\n first : Select initial periods of time series based on a date offset.\n at_time : Select values at a particular time of the day.\n between_time : Select values between particular times of the day.\n\n Examples\n --------\n >>> i = pd.date_range('2018-04-09', periods=4, freq='2D')\n >>> ts = pd.DataFrame({'A': [1, 2, 3, 4]}, index=i)\n >>> ts\n A\n 2018-04-09 1\n 2018-04-11 2\n 2018-04-13 3\n 2018-04-15 4\n\n Get the rows for the last 3 days:\n\n >>> ts.last('3D')\n A\n 2018-04-13 3\n 2018-04-15 4\n\n Notice the data for 3 last calender days were returned, not the last\n 3 observed days in the dataset, and therefore data for 2018-04-11 was\n not returned.\n \"\"\"\n if not isinstance(self.index, DatetimeIndex):\n raise TypeError(\"'last' only supports a DatetimeIndex index\")\n\n if len(self.index) == 0:\n return self\n\n offset = to_offset(offset)\n\n start_date = self.index[-1] - offset\n start = self.index.searchsorted(start_date, side=\"right\")\n return self.iloc[start:]\n\n def rank(\n self: FrameOrSeries,\n axis=0,\n method: str = \"average\",\n numeric_only: Optional[bool_t] = None,\n na_option: str = \"keep\",\n ascending: bool_t = True,\n pct: bool_t = False,\n ) -> FrameOrSeries:\n \"\"\"\n Compute numerical data ranks (1 through n) along axis.\n\n By default, equal values are assigned a rank that is the average of the\n ranks of those values.\n\n Parameters\n ----------\n axis : {0 or 'index', 1 or 'columns'}, default 0\n Index to direct ranking.\n method : {'average', 'min', 'max', 'first', 'dense'}, default 'average'\n How to rank the group of records that have the same value (i.e. ties):\n\n * average: average rank of the group\n * min: lowest rank in the group\n * max: highest rank in the group\n * first: ranks assigned in order they appear in the array\n * dense: like 'min', but rank always increases by 1 between groups.\n\n numeric_only : bool, optional\n For DataFrame objects, rank only numeric columns if set to True.\n na_option : {'keep', 'top', 'bottom'}, default 'keep'\n How to rank NaN values:\n\n * keep: assign NaN rank to NaN values\n * top: assign smallest rank to NaN values if ascending\n * bottom: assign highest rank to NaN values if ascending.\n\n ascending : bool, default True\n Whether or not the elements should be ranked in ascending order.\n pct : bool, default False\n Whether or not to display the returned rankings in percentile\n form.\n\n Returns\n -------\n same type as caller\n Return a Series or DataFrame with data ranks as values.\n\n See Also\n --------\n core.groupby.GroupBy.rank : Rank of values within each group.\n\n Examples\n --------\n >>> df = pd.DataFrame(data={'Animal': ['cat', 'penguin', 'dog',\n ... 'spider', 'snake'],\n ... 'Number_legs': [4, 2, 4, 8, np.nan]})\n >>> df\n Animal Number_legs\n 0 cat 4.0\n 1 penguin 2.0\n 2 dog 4.0\n 3 spider 8.0\n 4 snake NaN\n\n The following example shows how the method behaves with the above\n parameters:\n\n * default_rank: this is the default behaviour obtained without using\n any parameter.\n * max_rank: setting ``method = 'max'`` the records that have the\n same values are ranked using the highest rank (e.g.: since 'cat'\n and 'dog' are both in the 2nd and 3rd position, rank 3 is assigned.)\n * NA_bottom: choosing ``na_option = 'bottom'``, if there are records\n with NaN values they are placed at the bottom of the ranking.\n * pct_rank: when setting ``pct = True``, the ranking is expressed as\n percentile rank.\n\n >>> df['default_rank'] = df['Number_legs'].rank()\n >>> df['max_rank'] = df['Number_legs'].rank(method='max')\n >>> df['NA_bottom'] = df['Number_legs'].rank(na_option='bottom')\n >>> df['pct_rank'] = df['Number_legs'].rank(pct=True)\n >>> df\n Animal Number_legs default_rank max_rank NA_bottom pct_rank\n 0 cat 4.0 2.5 3.0 2.5 0.625\n 1 penguin 2.0 1.0 1.0 1.0 0.250\n 2 dog 4.0 2.5 3.0 2.5 0.625\n 3 spider 8.0 4.0 4.0 4.0 1.000\n 4 snake NaN NaN NaN 5.0 NaN\n \"\"\"\n axis = self._get_axis_number(axis)\n\n if na_option not in {\"keep\", \"top\", \"bottom\"}:\n msg = \"na_option must be one of 'keep', 'top', or 'bottom'\"\n raise ValueError(msg)\n\n def ranker(data):\n ranks = algos.rank(\n data.values,\n axis=axis,\n method=method,\n ascending=ascending,\n na_option=na_option,\n pct=pct,\n )\n ranks = self._constructor(ranks, **data._construct_axes_dict())\n return ranks.__finalize__(self)\n\n # if numeric_only is None, and we can't get anything, we try with\n # numeric_only=True\n if numeric_only is None:\n try:\n return ranker(self)\n except TypeError:\n numeric_only = True\n\n if numeric_only:\n data = self._get_numeric_data()\n else:\n data = self\n\n return ranker(data)\n\n _shared_docs[\n \"align\"\n ] = \"\"\"\n Align two objects on their axes with the specified join method.\n\n Join method is specified for each axis Index.\n\n Parameters\n ----------\n other : DataFrame or Series\n join : {'outer', 'inner', 'left', 'right'}, default 'outer'\n axis : allowed axis of the other object, default None\n Align on index (0), columns (1), or both (None).\n level : int or level name, default None\n Broadcast across a level, matching Index values on the\n passed MultiIndex level.\n copy : bool, default True\n Always returns new objects. If copy=False and no reindexing is\n required then original objects are returned.\n fill_value : scalar, default np.NaN\n Value to use for missing values. Defaults to NaN, but can be any\n \"compatible\" value.\n method : {'backfill', 'bfill', 'pad', 'ffill', None}, default None\n Method to use for filling holes in reindexed Series:\n\n - pad / ffill: propagate last valid observation forward to next valid.\n - backfill / bfill: use NEXT valid observation to fill gap.\n\n limit : int, default None\n If method is specified, this is the maximum number of consecutive\n NaN values to forward/backward fill. In other words, if there is\n a gap with more than this number of consecutive NaNs, it will only\n be partially filled. If method is not specified, this is the\n maximum number of entries along the entire axis where NaNs will be\n filled. Must be greater than 0 if not None.\n fill_axis : %(axes_single_arg)s, default 0\n Filling axis, method and limit.\n broadcast_axis : %(axes_single_arg)s, default None\n Broadcast values along this axis, if aligning two objects of\n different dimensions.\n\n Returns\n -------\n (left, right) : (%(klass)s, type of other)\n Aligned objects.\n \"\"\"\n\n @Appender(_shared_docs[\"align\"] % _shared_doc_kwargs)\n def align(\n self,\n other,\n join=\"outer\",\n axis=None,\n level=None,\n copy=True,\n fill_value=None,\n method=None,\n limit=None,\n fill_axis=0,\n broadcast_axis=None,\n ):\n method = missing.clean_fill_method(method)\n\n if broadcast_axis == 1 and self.ndim != other.ndim:\n if isinstance(self, ABCSeries):\n # this means other is a DataFrame, and we need to broadcast\n # self\n cons = self._constructor_expanddim\n df = cons(\n {c: self for c in other.columns}, **other._construct_axes_dict()\n )\n return df._align_frame(\n other,\n join=join,\n axis=axis,\n level=level,\n copy=copy,\n fill_value=fill_value,\n method=method,\n limit=limit,\n fill_axis=fill_axis,\n )\n elif isinstance(other, ABCSeries):\n # this means self is a DataFrame, and we need to broadcast\n # other\n cons = other._constructor_expanddim\n df = cons(\n {c: other for c in self.columns}, **self._construct_axes_dict()\n )\n return self._align_frame(\n df,\n join=join,\n axis=axis,\n level=level,\n copy=copy,\n fill_value=fill_value,\n method=method,\n limit=limit,\n fill_axis=fill_axis,\n )\n\n if axis is not None:\n axis = self._get_axis_number(axis)\n if isinstance(other, ABCDataFrame):\n return self._align_frame(\n other,\n join=join,\n axis=axis,\n level=level,\n copy=copy,\n fill_value=fill_value,\n method=method,\n limit=limit,\n fill_axis=fill_axis,\n )\n elif isinstance(other, ABCSeries):\n return self._align_series(\n other,\n join=join,\n axis=axis,\n level=level,\n copy=copy,\n fill_value=fill_value,\n method=method,\n limit=limit,\n fill_axis=fill_axis,\n )\n else: # pragma: no cover\n raise TypeError(f\"unsupported type: {type(other)}\")\n\n def _align_frame(\n self,\n other,\n join=\"outer\",\n axis=None,\n level=None,\n copy: bool_t = True,\n fill_value=None,\n method=None,\n limit=None,\n fill_axis=0,\n ):\n # defaults\n join_index, join_columns = None, None\n ilidx, iridx = None, None\n clidx, cridx = None, None\n\n is_series = isinstance(self, ABCSeries)\n\n if axis is None or axis == 0:\n if not self.index.equals(other.index):\n join_index, ilidx, iridx = self.index.join(\n other.index, how=join, level=level, return_indexers=True\n )\n\n if axis is None or axis == 1:\n if not is_series and not self.columns.equals(other.columns):\n join_columns, clidx, cridx = self.columns.join(\n other.columns, how=join, level=level, return_indexers=True\n )\n\n if is_series:\n reindexers = {0: [join_index, ilidx]}\n else:\n reindexers = {0: [join_index, ilidx], 1: [join_columns, clidx]}\n\n left = self._reindex_with_indexers(\n reindexers, copy=copy, fill_value=fill_value, allow_dups=True\n )\n # other must be always DataFrame\n right = other._reindex_with_indexers(\n {0: [join_index, iridx], 1: [join_columns, cridx]},\n copy=copy,\n fill_value=fill_value,\n allow_dups=True,\n )\n\n if method is not None:\n left = self._ensure_type(\n left.fillna(method=method, axis=fill_axis, limit=limit)\n )\n right = right.fillna(method=method, axis=fill_axis, limit=limit)\n\n # if DatetimeIndex have different tz, convert to UTC\n if is_datetime64tz_dtype(left.index):\n if left.index.tz != right.index.tz:\n if join_index is not None:\n left.index = join_index\n right.index = join_index\n\n return left.__finalize__(self), right.__finalize__(other)\n\n def _align_series(\n self,\n other,\n join=\"outer\",\n axis=None,\n level=None,\n copy: bool_t = True,\n fill_value=None,\n method=None,\n limit=None,\n fill_axis=0,\n ):\n\n is_series = isinstance(self, ABCSeries)\n\n # series/series compat, other must always be a Series\n if is_series:\n if axis:\n raise ValueError(\"cannot align series to a series other than axis 0\")\n\n # equal\n if self.index.equals(other.index):\n join_index, lidx, ridx = None, None, None\n else:\n join_index, lidx, ridx = self.index.join(\n other.index, how=join, level=level, return_indexers=True\n )\n\n left = self._reindex_indexer(join_index, lidx, copy)\n right = other._reindex_indexer(join_index, ridx, copy)\n\n else:\n # one has > 1 ndim\n fdata = self._data\n if axis == 0:\n join_index = self.index\n lidx, ridx = None, None\n if not self.index.equals(other.index):\n join_index, lidx, ridx = self.index.join(\n other.index, how=join, level=level, return_indexers=True\n )\n\n if lidx is not None:\n fdata = fdata.reindex_indexer(join_index, lidx, axis=1)\n\n elif axis == 1:\n join_index = self.columns\n lidx, ridx = None, None\n if not self.columns.equals(other.index):\n join_index, lidx, ridx = self.columns.join(\n other.index, how=join, level=level, return_indexers=True\n )\n\n if lidx is not None:\n fdata = fdata.reindex_indexer(join_index, lidx, axis=0)\n else:\n raise ValueError(\"Must specify axis=0 or 1\")\n\n if copy and fdata is self._data:\n fdata = fdata.copy()\n\n left = self._constructor(fdata)\n\n if ridx is None:\n right = other\n else:\n right = other.reindex(join_index, level=level)\n\n # fill\n fill_na = notna(fill_value) or (method is not None)\n if fill_na:\n left = left.fillna(fill_value, method=method, limit=limit, axis=fill_axis)\n right = right.fillna(fill_value, method=method, limit=limit)\n\n # if DatetimeIndex have different tz, convert to UTC\n if is_series or (not is_series and axis == 0):\n if is_datetime64tz_dtype(left.index):\n if left.index.tz != right.index.tz:\n if join_index is not None:\n left.index = join_index\n right.index = join_index\n\n return left.__finalize__(self), right.__finalize__(other)\n\n def _where(\n self,\n cond,\n other=np.nan,\n inplace=False,\n axis=None,\n level=None,\n errors=\"raise\",\n try_cast=False,\n ):\n \"\"\"\n Equivalent to public method `where`, except that `other` is not\n applied as a function even if callable. Used in __setitem__.\n \"\"\"\n inplace = validate_bool_kwarg(inplace, \"inplace\")\n\n # align the cond to same shape as myself\n cond = com.apply_if_callable(cond, self)\n if isinstance(cond, NDFrame):\n cond, _ = cond.align(self, join=\"right\", broadcast_axis=1)\n else:\n if not hasattr(cond, \"shape\"):\n cond = np.asanyarray(cond)\n if cond.shape != self.shape:\n raise ValueError(\"Array conditional must be same shape as self\")\n cond = self._constructor(cond, **self._construct_axes_dict())\n\n # make sure we are boolean\n fill_value = bool(inplace)\n cond = cond.fillna(fill_value)\n\n msg = \"Boolean array expected for the condition, not {dtype}\"\n\n if not isinstance(cond, ABCDataFrame):\n # This is a single-dimensional object.\n if not is_bool_dtype(cond):\n raise ValueError(msg.format(dtype=cond.dtype))\n elif not cond.empty:\n for dt in cond.dtypes:\n if not is_bool_dtype(dt):\n raise ValueError(msg.format(dtype=dt))\n\n cond = -cond if inplace else cond\n\n # try to align with other\n try_quick = True\n if hasattr(other, \"align\"):\n\n # align with me\n if other.ndim <= self.ndim:\n\n _, other = self.align(\n other, join=\"left\", axis=axis, level=level, fill_value=np.nan\n )\n\n # if we are NOT aligned, raise as we cannot where index\n if axis is None and not all(\n other._get_axis(i).equals(ax) for i, ax in enumerate(self.axes)\n ):\n raise InvalidIndexError\n\n # slice me out of the other\n else:\n raise NotImplementedError(\n \"cannot align with a higher dimensional NDFrame\"\n )\n\n if isinstance(other, np.ndarray):\n\n if other.shape != self.shape:\n\n if self.ndim == 1:\n\n icond = cond.values\n\n # GH 2745 / GH 4192\n # treat like a scalar\n if len(other) == 1:\n other = np.array(other[0])\n\n # GH 3235\n # match True cond to other\n elif len(cond[icond]) == len(other):\n\n # try to not change dtype at first (if try_quick)\n if try_quick:\n new_other = np.asarray(self)\n new_other = new_other.copy()\n new_other[icond] = other\n other = new_other\n\n else:\n raise ValueError(\n \"Length of replacements must equal series length\"\n )\n\n else:\n raise ValueError(\n \"other must be the same shape as self when an ndarray\"\n )\n\n # we are the same shape, so create an actual object for alignment\n else:\n other = self._constructor(other, **self._construct_axes_dict())\n\n if axis is None:\n axis = 0\n\n if self.ndim == getattr(other, \"ndim\", 0):\n align = True\n else:\n align = self._get_axis_number(axis) == 1\n\n block_axis = self._get_block_manager_axis(axis)\n\n if inplace:\n # we may have different type blocks come out of putmask, so\n # reconstruct the block manager\n\n self._check_inplace_setting(other)\n new_data = self._data.putmask(\n mask=cond,\n new=other,\n align=align,\n inplace=True,\n axis=block_axis,\n transpose=self._AXIS_REVERSED,\n )\n self._update_inplace(new_data)\n\n else:\n new_data = self._data.where(\n other=other,\n cond=cond,\n align=align,\n errors=errors,\n try_cast=try_cast,\n axis=block_axis,\n )\n\n return self._constructor(new_data).__finalize__(self)\n\n _shared_docs[\n \"where\"\n ] = \"\"\"\n Replace values where the condition is %(cond_rev)s.\n\n Parameters\n ----------\n cond : bool %(klass)s, array-like, or callable\n Where `cond` is %(cond)s, keep the original value. Where\n %(cond_rev)s, replace with corresponding value from `other`.\n If `cond` is callable, it is computed on the %(klass)s and\n should return boolean %(klass)s or array. The callable must\n not change input %(klass)s (though pandas doesn't check it).\n other : scalar, %(klass)s, or callable\n Entries where `cond` is %(cond_rev)s are replaced with\n corresponding value from `other`.\n If other is callable, it is computed on the %(klass)s and\n should return scalar or %(klass)s. The callable must not\n change input %(klass)s (though pandas doesn't check it).\n inplace : bool, default False\n Whether to perform the operation in place on the data.\n axis : int, default None\n Alignment axis if needed.\n level : int, default None\n Alignment level if needed.\n errors : str, {'raise', 'ignore'}, default 'raise'\n Note that currently this parameter won't affect\n the results and will always coerce to a suitable dtype.\n\n - 'raise' : allow exceptions to be raised.\n - 'ignore' : suppress exceptions. On error return original object.\n\n try_cast : bool, default False\n Try to cast the result back to the input type (if possible).\n\n Returns\n -------\n Same type as caller\n\n See Also\n --------\n :func:`DataFrame.%(name_other)s` : Return an object of same shape as\n self.\n\n Notes\n -----\n The %(name)s method is an application of the if-then idiom. For each\n element in the calling DataFrame, if ``cond`` is ``%(cond)s`` the\n element is used; otherwise the corresponding element from the DataFrame\n ``other`` is used.\n\n The signature for :func:`DataFrame.where` differs from\n :func:`numpy.where`. Roughly ``df1.where(m, df2)`` is equivalent to\n ``np.where(m, df1, df2)``.\n\n For further details and examples see the ``%(name)s`` documentation in\n :ref:`indexing <indexing.where_mask>`.\n\n Examples\n --------\n >>> s = pd.Series(range(5))\n >>> s.where(s > 0)\n 0 NaN\n 1 1.0\n 2 2.0\n 3 3.0\n 4 4.0\n dtype: float64\n\n >>> s.mask(s > 0)\n 0 0.0\n 1 NaN\n 2 NaN\n 3 NaN\n 4 NaN\n dtype: float64\n\n >>> s.where(s > 1, 10)\n 0 10\n 1 10\n 2 2\n 3 3\n 4 4\n dtype: int64\n\n >>> df = pd.DataFrame(np.arange(10).reshape(-1, 2), columns=['A', 'B'])\n >>> df\n A B\n 0 0 1\n 1 2 3\n 2 4 5\n 3 6 7\n 4 8 9\n >>> m = df %% 3 == 0\n >>> df.where(m, -df)\n A B\n 0 0 -1\n 1 -2 3\n 2 -4 -5\n 3 6 -7\n 4 -8 9\n >>> df.where(m, -df) == np.where(m, df, -df)\n A B\n 0 True True\n 1 True True\n 2 True True\n 3 True True\n 4 True True\n >>> df.where(m, -df) == df.mask(~m, -df)\n A B\n 0 True True\n 1 True True\n 2 True True\n 3 True True\n 4 True True\n \"\"\"\n\n @Appender(\n _shared_docs[\"where\"]\n % dict(\n _shared_doc_kwargs,\n cond=\"True\",\n cond_rev=\"False\",\n name=\"where\",\n name_other=\"mask\",\n )\n )\n def where(\n self,\n cond,\n other=np.nan,\n inplace=False,\n axis=None,\n level=None,\n errors=\"raise\",\n try_cast=False,\n ):\n\n other = com.apply_if_callable(other, self)\n return self._where(\n cond, other, inplace, axis, level, errors=errors, try_cast=try_cast\n )\n\n @Appender(\n _shared_docs[\"where\"]\n % dict(\n _shared_doc_kwargs,\n cond=\"False\",\n cond_rev=\"True\",\n name=\"mask\",\n name_other=\"where\",\n )\n )\n def mask(\n self,\n cond,\n other=np.nan,\n inplace=False,\n axis=None,\n level=None,\n errors=\"raise\",\n try_cast=False,\n ):\n\n inplace = validate_bool_kwarg(inplace, \"inplace\")\n cond = com.apply_if_callable(cond, self)\n\n # see gh-21891\n if not hasattr(cond, \"__invert__\"):\n cond = np.array(cond)\n\n return self.where(\n ~cond,\n other=other,\n inplace=inplace,\n axis=axis,\n level=level,\n try_cast=try_cast,\n errors=errors,\n )\n\n _shared_docs[\n \"shift\"\n ] = \"\"\"\n Shift index by desired number of periods with an optional time `freq`.\n\n When `freq` is not passed, shift the index without realigning the data.\n If `freq` is passed (in this case, the index must be date or datetime,\n or it will raise a `NotImplementedError`), the index will be\n increased using the periods and the `freq`.\n\n Parameters\n ----------\n periods : int\n Number of periods to shift. Can be positive or negative.\n freq : DateOffset, tseries.offsets, timedelta, or str, optional\n Offset to use from the tseries module or time rule (e.g. 'EOM').\n If `freq` is specified then the index values are shifted but the\n data is not realigned. That is, use `freq` if you would like to\n extend the index when shifting and preserve the original data.\n axis : {0 or 'index', 1 or 'columns', None}, default None\n Shift direction.\n fill_value : object, optional\n The scalar value to use for newly introduced missing values.\n the default depends on the dtype of `self`.\n For numeric data, ``np.nan`` is used.\n For datetime, timedelta, or period data, etc. :attr:`NaT` is used.\n For extension dtypes, ``self.dtype.na_value`` is used.\n\n .. versionchanged:: 0.24.0\n\n Returns\n -------\n %(klass)s\n Copy of input object, shifted.\n\n See Also\n --------\n Index.shift : Shift values of Index.\n DatetimeIndex.shift : Shift values of DatetimeIndex.\n PeriodIndex.shift : Shift values of PeriodIndex.\n tshift : Shift the time index, using the index's frequency if\n available.\n\n Examples\n --------\n >>> df = pd.DataFrame({'Col1': [10, 20, 15, 30, 45],\n ... 'Col2': [13, 23, 18, 33, 48],\n ... 'Col3': [17, 27, 22, 37, 52]})\n\n >>> df.shift(periods=3)\n Col1 Col2 Col3\n 0 NaN NaN NaN\n 1 NaN NaN NaN\n 2 NaN NaN NaN\n 3 10.0 13.0 17.0\n 4 20.0 23.0 27.0\n\n >>> df.shift(periods=1, axis='columns')\n Col1 Col2 Col3\n 0 NaN 10.0 13.0\n 1 NaN 20.0 23.0\n 2 NaN 15.0 18.0\n 3 NaN 30.0 33.0\n 4 NaN 45.0 48.0\n\n >>> df.shift(periods=3, fill_value=0)\n Col1 Col2 Col3\n 0 0 0 0\n 1 0 0 0\n 2 0 0 0\n 3 10 13 17\n 4 20 23 27\n \"\"\"\n\n @Appender(_shared_docs[\"shift\"] % _shared_doc_kwargs)\n def shift(\n self: FrameOrSeries, periods=1, freq=None, axis=0, fill_value=None\n ) -> FrameOrSeries:\n if periods == 0:\n return self.copy()\n\n block_axis = self._get_block_manager_axis(axis)\n if freq is None:\n new_data = self._data.shift(\n periods=periods, axis=block_axis, fill_value=fill_value\n )\n else:\n return self.tshift(periods, freq)\n\n return self._constructor(new_data).__finalize__(self)\n\n def slice_shift(self: FrameOrSeries, periods: int = 1, axis=0) -> FrameOrSeries:\n \"\"\"\n Equivalent to `shift` without copying data.\n\n The shifted data will not include the dropped periods and the\n shifted axis will be smaller than the original.\n\n Parameters\n ----------\n periods : int\n Number of periods to move, can be positive or negative.\n\n Returns\n -------\n shifted : same type as caller\n\n Notes\n -----\n While the `slice_shift` is faster than `shift`, you may pay for it\n later during alignment.\n \"\"\"\n if periods == 0:\n return self\n\n if periods > 0:\n vslicer = slice(None, -periods)\n islicer = slice(periods, None)\n else:\n vslicer = slice(-periods, None)\n islicer = slice(None, periods)\n\n new_obj = self._slice(vslicer, axis=axis)\n shifted_axis = self._get_axis(axis)[islicer]\n new_obj.set_axis(shifted_axis, axis=axis, inplace=True)\n\n return new_obj.__finalize__(self)\n\n def tshift(\n self: FrameOrSeries, periods: int = 1, freq=None, axis=0\n ) -> FrameOrSeries:\n \"\"\"\n Shift the time index, using the index's frequency if available.\n\n Parameters\n ----------\n periods : int\n Number of periods to move, can be positive or negative.\n freq : DateOffset, timedelta, or str, default None\n Increment to use from the tseries module\n or time rule expressed as a string (e.g. 'EOM').\n axis : {0 or ‘index’, 1 or ‘columns’, None}, default 0\n Corresponds to the axis that contains the Index.\n\n Returns\n -------\n shifted : Series/DataFrame\n\n Notes\n -----\n If freq is not specified then tries to use the freq or inferred_freq\n attributes of the index. If neither of those attributes exist, a\n ValueError is thrown\n \"\"\"\n index = self._get_axis(axis)\n if freq is None:\n freq = getattr(index, \"freq\", None)\n\n if freq is None:\n freq = getattr(index, \"inferred_freq\", None)\n\n if freq is None:\n msg = \"Freq was not given and was not set in the index\"\n raise ValueError(msg)\n\n if periods == 0:\n return self\n\n if isinstance(freq, str):\n freq = to_offset(freq)\n\n block_axis = self._get_block_manager_axis(axis)\n if isinstance(index, PeriodIndex):\n orig_freq = to_offset(index.freq)\n if freq == orig_freq:\n new_data = self._data.copy()\n new_data.axes[block_axis] = index.shift(periods)\n elif orig_freq is not None:\n raise ValueError(\n f\"Given freq {freq.rule_code} does not match \"\n f\"PeriodIndex freq {orig_freq.rule_code}\"\n )\n else:\n new_data = self._data.copy()\n new_data.axes[block_axis] = index.shift(periods, freq)\n\n return self._constructor(new_data).__finalize__(self)\n\n def truncate(\n self: FrameOrSeries, before=None, after=None, axis=None, copy: bool_t = True\n ) -> FrameOrSeries:\n \"\"\"\n Truncate a Series or DataFrame before and after some index value.\n\n This is a useful shorthand for boolean indexing based on index\n values above or below certain thresholds.\n\n Parameters\n ----------\n before : date, str, int\n Truncate all rows before this index value.\n after : date, str, int\n Truncate all rows after this index value.\n axis : {0 or 'index', 1 or 'columns'}, optional\n Axis to truncate. Truncates the index (rows) by default.\n copy : bool, default is True,\n Return a copy of the truncated section.\n\n Returns\n -------\n type of caller\n The truncated Series or DataFrame.\n\n See Also\n --------\n DataFrame.loc : Select a subset of a DataFrame by label.\n DataFrame.iloc : Select a subset of a DataFrame by position.\n\n Notes\n -----\n If the index being truncated contains only datetime values,\n `before` and `after` may be specified as strings instead of\n Timestamps.\n\n Examples\n --------\n >>> df = pd.DataFrame({'A': ['a', 'b', 'c', 'd', 'e'],\n ... 'B': ['f', 'g', 'h', 'i', 'j'],\n ... 'C': ['k', 'l', 'm', 'n', 'o']},\n ... index=[1, 2, 3, 4, 5])\n >>> df\n A B C\n 1 a f k\n 2 b g l\n 3 c h m\n 4 d i n\n 5 e j o\n\n >>> df.truncate(before=2, after=4)\n A B C\n 2 b g l\n 3 c h m\n 4 d i n\n\n The columns of a DataFrame can be truncated.\n\n >>> df.truncate(before=\"A\", after=\"B\", axis=\"columns\")\n A B\n 1 a f\n 2 b g\n 3 c h\n 4 d i\n 5 e j\n\n For Series, only rows can be truncated.\n\n >>> df['A'].truncate(before=2, after=4)\n 2 b\n 3 c\n 4 d\n Name: A, dtype: object\n\n The index values in ``truncate`` can be datetimes or string\n dates.\n\n >>> dates = pd.date_range('2016-01-01', '2016-02-01', freq='s')\n >>> df = pd.DataFrame(index=dates, data={'A': 1})\n >>> df.tail()\n A\n 2016-01-31 23:59:56 1\n 2016-01-31 23:59:57 1\n 2016-01-31 23:59:58 1\n 2016-01-31 23:59:59 1\n 2016-02-01 00:00:00 1\n\n >>> df.truncate(before=pd.Timestamp('2016-01-05'),\n ... after=pd.Timestamp('2016-01-10')).tail()\n A\n 2016-01-09 23:59:56 1\n 2016-01-09 23:59:57 1\n 2016-01-09 23:59:58 1\n 2016-01-09 23:59:59 1\n 2016-01-10 00:00:00 1\n\n Because the index is a DatetimeIndex containing only dates, we can\n specify `before` and `after` as strings. They will be coerced to\n Timestamps before truncation.\n\n >>> df.truncate('2016-01-05', '2016-01-10').tail()\n A\n 2016-01-09 23:59:56 1\n 2016-01-09 23:59:57 1\n 2016-01-09 23:59:58 1\n 2016-01-09 23:59:59 1\n 2016-01-10 00:00:00 1\n\n Note that ``truncate`` assumes a 0 value for any unspecified time\n component (midnight). This differs from partial string slicing, which\n returns any partially matching dates.\n\n >>> df.loc['2016-01-05':'2016-01-10', :].tail()\n A\n 2016-01-10 23:59:55 1\n 2016-01-10 23:59:56 1\n 2016-01-10 23:59:57 1\n 2016-01-10 23:59:58 1\n 2016-01-10 23:59:59 1\n \"\"\"\n if axis is None:\n axis = self._stat_axis_number\n axis = self._get_axis_number(axis)\n ax = self._get_axis(axis)\n\n # GH 17935\n # Check that index is sorted\n if not ax.is_monotonic_increasing and not ax.is_monotonic_decreasing:\n raise ValueError(\"truncate requires a sorted index\")\n\n # if we have a date index, convert to dates, otherwise\n # treat like a slice\n if ax.is_all_dates:\n from pandas.core.tools.datetimes import to_datetime\n\n before = to_datetime(before)\n after = to_datetime(after)\n\n if before is not None and after is not None:\n if before > after:\n raise ValueError(f\"Truncate: {after} must be after {before}\")\n\n slicer = [slice(None, None)] * self._AXIS_LEN\n slicer[axis] = slice(before, after)\n result = self.loc[tuple(slicer)]\n\n if isinstance(ax, MultiIndex):\n setattr(result, self._get_axis_name(axis), ax.truncate(before, after))\n\n if copy:\n result = result.copy()\n\n return result\n\n def tz_convert(\n self: FrameOrSeries, tz, axis=0, level=None, copy: bool_t = True\n ) -> FrameOrSeries:\n \"\"\"\n Convert tz-aware axis to target time zone.\n\n Parameters\n ----------\n tz : str or tzinfo object\n axis : the axis to convert\n level : int, str, default None\n If axis is a MultiIndex, convert a specific level. Otherwise\n must be None.\n copy : bool, default True\n Also make a copy of the underlying data.\n\n Returns\n -------\n %(klass)s\n Object with time zone converted axis.\n\n Raises\n ------\n TypeError\n If the axis is tz-naive.\n \"\"\"\n axis = self._get_axis_number(axis)\n ax = self._get_axis(axis)\n\n def _tz_convert(ax, tz):\n if not hasattr(ax, \"tz_convert\"):\n if len(ax) > 0:\n ax_name = self._get_axis_name(axis)\n raise TypeError(\n f\"{ax_name} is not a valid DatetimeIndex or PeriodIndex\"\n )\n else:\n ax = DatetimeIndex([], tz=tz)\n else:\n ax = ax.tz_convert(tz)\n return ax\n\n # if a level is given it must be a MultiIndex level or\n # equivalent to the axis name\n if isinstance(ax, MultiIndex):\n level = ax._get_level_number(level)\n new_level = _tz_convert(ax.levels[level], tz)\n ax = ax.set_levels(new_level, level=level)\n else:\n if level not in (None, 0, ax.name):\n raise ValueError(f\"The level {level} is not valid\")\n ax = _tz_convert(ax, tz)\n\n result = self._constructor(self._data, copy=copy)\n result = result.set_axis(ax, axis=axis, inplace=False)\n return result.__finalize__(self)\n\n def tz_localize(\n self: FrameOrSeries,\n tz,\n axis=0,\n level=None,\n copy: bool_t = True,\n ambiguous=\"raise\",\n nonexistent: str = \"raise\",\n ) -> FrameOrSeries:\n \"\"\"\n Localize tz-naive index of a Series or DataFrame to target time zone.\n\n This operation localizes the Index. To localize the values in a\n timezone-naive Series, use :meth:`Series.dt.tz_localize`.\n\n Parameters\n ----------\n tz : str or tzinfo\n axis : the axis to localize\n level : int, str, default None\n If axis ia a MultiIndex, localize a specific level. Otherwise\n must be None.\n copy : bool, default True\n Also make a copy of the underlying data.\n ambiguous : 'infer', bool-ndarray, 'NaT', default 'raise'\n When clocks moved backward due to DST, ambiguous times may arise.\n For example in Central European Time (UTC+01), when going from\n 03:00 DST to 02:00 non-DST, 02:30:00 local time occurs both at\n 00:30:00 UTC and at 01:30:00 UTC. In such a situation, the\n `ambiguous` parameter dictates how ambiguous times should be\n handled.\n\n - 'infer' will attempt to infer fall dst-transition hours based on\n order\n - bool-ndarray where True signifies a DST time, False designates\n a non-DST time (note that this flag is only applicable for\n ambiguous times)\n - 'NaT' will return NaT where there are ambiguous times\n - 'raise' will raise an AmbiguousTimeError if there are ambiguous\n times.\n nonexistent : str, default 'raise'\n A nonexistent time does not exist in a particular timezone\n where clocks moved forward due to DST. Valid values are:\n\n - 'shift_forward' will shift the nonexistent time forward to the\n closest existing time\n - 'shift_backward' will shift the nonexistent time backward to the\n closest existing time\n - 'NaT' will return NaT where there are nonexistent times\n - timedelta objects will shift nonexistent times by the timedelta\n - 'raise' will raise an NonExistentTimeError if there are\n nonexistent times.\n\n .. versionadded:: 0.24.0\n\n Returns\n -------\n Series or DataFrame\n Same type as the input.\n\n Raises\n ------\n TypeError\n If the TimeSeries is tz-aware and tz is not None.\n\n Examples\n --------\n Localize local times:\n\n >>> s = pd.Series([1],\n ... index=pd.DatetimeIndex(['2018-09-15 01:30:00']))\n >>> s.tz_localize('CET')\n 2018-09-15 01:30:00+02:00 1\n dtype: int64\n\n Be careful with DST changes. When there is sequential data, pandas\n can infer the DST time:\n\n >>> s = pd.Series(range(7),\n ... index=pd.DatetimeIndex(['2018-10-28 01:30:00',\n ... '2018-10-28 02:00:00',\n ... '2018-10-28 02:30:00',\n ... '2018-10-28 02:00:00',\n ... '2018-10-28 02:30:00',\n ... '2018-10-28 03:00:00',\n ... '2018-10-28 03:30:00']))\n >>> s.tz_localize('CET', ambiguous='infer')\n 2018-10-28 01:30:00+02:00 0\n 2018-10-28 02:00:00+02:00 1\n 2018-10-28 02:30:00+02:00 2\n 2018-10-28 02:00:00+01:00 3\n 2018-10-28 02:30:00+01:00 4\n 2018-10-28 03:00:00+01:00 5\n 2018-10-28 03:30:00+01:00 6\n dtype: int64\n\n In some cases, inferring the DST is impossible. In such cases, you can\n pass an ndarray to the ambiguous parameter to set the DST explicitly\n\n >>> s = pd.Series(range(3),\n ... index=pd.DatetimeIndex(['2018-10-28 01:20:00',\n ... '2018-10-28 02:36:00',\n ... '2018-10-28 03:46:00']))\n >>> s.tz_localize('CET', ambiguous=np.array([True, True, False]))\n 2018-10-28 01:20:00+02:00 0\n 2018-10-28 02:36:00+02:00 1\n 2018-10-28 03:46:00+01:00 2\n dtype: int64\n\n If the DST transition causes nonexistent times, you can shift these\n dates forward or backwards with a timedelta object or `'shift_forward'`\n or `'shift_backwards'`.\n >>> s = pd.Series(range(2),\n ... index=pd.DatetimeIndex(['2015-03-29 02:30:00',\n ... '2015-03-29 03:30:00']))\n >>> s.tz_localize('Europe/Warsaw', nonexistent='shift_forward')\n 2015-03-29 03:00:00+02:00 0\n 2015-03-29 03:30:00+02:00 1\n dtype: int64\n >>> s.tz_localize('Europe/Warsaw', nonexistent='shift_backward')\n 2015-03-29 01:59:59.999999999+01:00 0\n 2015-03-29 03:30:00+02:00 1\n dtype: int64\n >>> s.tz_localize('Europe/Warsaw', nonexistent=pd.Timedelta('1H'))\n 2015-03-29 03:30:00+02:00 0\n 2015-03-29 03:30:00+02:00 1\n dtype: int64\n \"\"\"\n nonexistent_options = (\"raise\", \"NaT\", \"shift_forward\", \"shift_backward\")\n if nonexistent not in nonexistent_options and not isinstance(\n nonexistent, timedelta\n ):\n raise ValueError(\n \"The nonexistent argument must be one of 'raise', \"\n \"'NaT', 'shift_forward', 'shift_backward' or \"\n \"a timedelta object\"\n )\n\n axis = self._get_axis_number(axis)\n ax = self._get_axis(axis)\n\n def _tz_localize(ax, tz, ambiguous, nonexistent):\n if not hasattr(ax, \"tz_localize\"):\n if len(ax) > 0:\n ax_name = self._get_axis_name(axis)\n raise TypeError(\n f\"{ax_name} is not a valid DatetimeIndex or PeriodIndex\"\n )\n else:\n ax = DatetimeIndex([], tz=tz)\n else:\n ax = ax.tz_localize(tz, ambiguous=ambiguous, nonexistent=nonexistent)\n return ax\n\n # if a level is given it must be a MultiIndex level or\n # equivalent to the axis name\n if isinstance(ax, MultiIndex):\n level = ax._get_level_number(level)\n new_level = _tz_localize(ax.levels[level], tz, ambiguous, nonexistent)\n ax = ax.set_levels(new_level, level=level)\n else:\n if level not in (None, 0, ax.name):\n raise ValueError(f\"The level {level} is not valid\")\n ax = _tz_localize(ax, tz, ambiguous, nonexistent)\n\n result = self._constructor(self._data, copy=copy)\n result = result.set_axis(ax, axis=axis, inplace=False)\n return result.__finalize__(self)\n\n # ----------------------------------------------------------------------\n # Numeric Methods\n def abs(self: FrameOrSeries) -> FrameOrSeries:\n \"\"\"\n Return a Series/DataFrame with absolute numeric value of each element.\n\n This function only applies to elements that are all numeric.\n\n Returns\n -------\n abs\n Series/DataFrame containing the absolute value of each element.\n\n See Also\n --------\n numpy.absolute : Calculate the absolute value element-wise.\n\n Notes\n -----\n For ``complex`` inputs, ``1.2 + 1j``, the absolute value is\n :math:`\\\\sqrt{ a^2 + b^2 }`.\n\n Examples\n --------\n Absolute numeric values in a Series.\n\n >>> s = pd.Series([-1.10, 2, -3.33, 4])\n >>> s.abs()\n 0 1.10\n 1 2.00\n 2 3.33\n 3 4.00\n dtype: float64\n\n Absolute numeric values in a Series with complex numbers.\n\n >>> s = pd.Series([1.2 + 1j])\n >>> s.abs()\n 0 1.56205\n dtype: float64\n\n Absolute numeric values in a Series with a Timedelta element.\n\n >>> s = pd.Series([pd.Timedelta('1 days')])\n >>> s.abs()\n 0 1 days\n dtype: timedelta64[ns]\n\n Select rows with data closest to certain value using argsort (from\n `StackOverflow <https://stackoverflow.com/a/17758115>`__).\n\n >>> df = pd.DataFrame({\n ... 'a': [4, 5, 6, 7],\n ... 'b': [10, 20, 30, 40],\n ... 'c': [100, 50, -30, -50]\n ... })\n >>> df\n a b c\n 0 4 10 100\n 1 5 20 50\n 2 6 30 -30\n 3 7 40 -50\n >>> df.loc[(df.c - 43).abs().argsort()]\n a b c\n 1 5 20 50\n 0 4 10 100\n 2 6 30 -30\n 3 7 40 -50\n \"\"\"\n return np.abs(self)\n\n def describe(\n self: FrameOrSeries, percentiles=None, include=None, exclude=None\n ) -> FrameOrSeries:\n \"\"\"\n Generate descriptive statistics.\n\n Descriptive statistics include those that summarize the central\n tendency, dispersion and shape of a\n dataset's distribution, excluding ``NaN`` values.\n\n Analyzes both numeric and object series, as well\n as ``DataFrame`` column sets of mixed data types. The output\n will vary depending on what is provided. Refer to the notes\n below for more detail.\n\n Parameters\n ----------\n percentiles : list-like of numbers, optional\n The percentiles to include in the output. All should\n fall between 0 and 1. The default is\n ``[.25, .5, .75]``, which returns the 25th, 50th, and\n 75th percentiles.\n include : 'all', list-like of dtypes or None (default), optional\n A white list of data types to include in the result. Ignored\n for ``Series``. Here are the options:\n\n - 'all' : All columns of the input will be included in the output.\n - A list-like of dtypes : Limits the results to the\n provided data types.\n To limit the result to numeric types submit\n ``numpy.number``. To limit it instead to object columns submit\n the ``numpy.object`` data type. Strings\n can also be used in the style of\n ``select_dtypes`` (e.g. ``df.describe(include=['O'])``). To\n select pandas categorical columns, use ``'category'``\n - None (default) : The result will include all numeric columns.\n exclude : list-like of dtypes or None (default), optional,\n A black list of data types to omit from the result. Ignored\n for ``Series``. Here are the options:\n\n - A list-like of dtypes : Excludes the provided data types\n from the result. To exclude numeric types submit\n ``numpy.number``. To exclude object columns submit the data\n type ``numpy.object``. Strings can also be used in the style of\n ``select_dtypes`` (e.g. ``df.describe(include=['O'])``). To\n exclude pandas categorical columns, use ``'category'``\n - None (default) : The result will exclude nothing.\n\n Returns\n -------\n Series or DataFrame\n Summary statistics of the Series or Dataframe provided.\n\n See Also\n --------\n DataFrame.count: Count number of non-NA/null observations.\n DataFrame.max: Maximum of the values in the object.\n DataFrame.min: Minimum of the values in the object.\n DataFrame.mean: Mean of the values.\n DataFrame.std: Standard deviation of the observations.\n DataFrame.select_dtypes: Subset of a DataFrame including/excluding\n columns based on their dtype.\n\n Notes\n -----\n For numeric data, the result's index will include ``count``,\n ``mean``, ``std``, ``min``, ``max`` as well as lower, ``50`` and\n upper percentiles. By default the lower percentile is ``25`` and the\n upper percentile is ``75``. The ``50`` percentile is the\n same as the median.\n\n For object data (e.g. strings or timestamps), the result's index\n will include ``count``, ``unique``, ``top``, and ``freq``. The ``top``\n is the most common value. The ``freq`` is the most common value's\n frequency. Timestamps also include the ``first`` and ``last`` items.\n\n If multiple object values have the highest count, then the\n ``count`` and ``top`` results will be arbitrarily chosen from\n among those with the highest count.\n\n For mixed data types provided via a ``DataFrame``, the default is to\n return only an analysis of numeric columns. If the dataframe consists\n only of object and categorical data without any numeric columns, the\n default is to return an analysis of both the object and categorical\n columns. If ``include='all'`` is provided as an option, the result\n will include a union of attributes of each type.\n\n The `include` and `exclude` parameters can be used to limit\n which columns in a ``DataFrame`` are analyzed for the output.\n The parameters are ignored when analyzing a ``Series``.\n\n Examples\n --------\n Describing a numeric ``Series``.\n\n >>> s = pd.Series([1, 2, 3])\n >>> s.describe()\n count 3.0\n mean 2.0\n std 1.0\n min 1.0\n 25% 1.5\n 50% 2.0\n 75% 2.5\n max 3.0\n dtype: float64\n\n Describing a categorical ``Series``.\n\n >>> s = pd.Series(['a', 'a', 'b', 'c'])\n >>> s.describe()\n count 4\n unique 3\n top a\n freq 2\n dtype: object\n\n Describing a timestamp ``Series``.\n\n >>> s = pd.Series([\n ... np.datetime64(\"2000-01-01\"),\n ... np.datetime64(\"2010-01-01\"),\n ... np.datetime64(\"2010-01-01\")\n ... ])\n >>> s.describe()\n count 3\n unique 2\n top 2010-01-01 00:00:00\n freq 2\n first 2000-01-01 00:00:00\n last 2010-01-01 00:00:00\n dtype: object\n\n Describing a ``DataFrame``. By default only numeric fields\n are returned.\n\n >>> df = pd.DataFrame({'categorical': pd.Categorical(['d','e','f']),\n ... 'numeric': [1, 2, 3],\n ... 'object': ['a', 'b', 'c']\n ... })\n >>> df.describe()\n numeric\n count 3.0\n mean 2.0\n std 1.0\n min 1.0\n 25% 1.5\n 50% 2.0\n 75% 2.5\n max 3.0\n\n Describing all columns of a ``DataFrame`` regardless of data type.\n\n >>> df.describe(include='all')\n categorical numeric object\n count 3 3.0 3\n unique 3 NaN 3\n top f NaN c\n freq 1 NaN 1\n mean NaN 2.0 NaN\n std NaN 1.0 NaN\n min NaN 1.0 NaN\n 25% NaN 1.5 NaN\n 50% NaN 2.0 NaN\n 75% NaN 2.5 NaN\n max NaN 3.0 NaN\n\n Describing a column from a ``DataFrame`` by accessing it as\n an attribute.\n\n >>> df.numeric.describe()\n count 3.0\n mean 2.0\n std 1.0\n min 1.0\n 25% 1.5\n 50% 2.0\n 75% 2.5\n max 3.0\n Name: numeric, dtype: float64\n\n Including only numeric columns in a ``DataFrame`` description.\n\n >>> df.describe(include=[np.number])\n numeric\n count 3.0\n mean 2.0\n std 1.0\n min 1.0\n 25% 1.5\n 50% 2.0\n 75% 2.5\n max 3.0\n\n Including only string columns in a ``DataFrame`` description.\n\n >>> df.describe(include=[np.object])\n object\n count 3\n unique 3\n top c\n freq 1\n\n Including only categorical columns from a ``DataFrame`` description.\n\n >>> df.describe(include=['category'])\n categorical\n count 3\n unique 3\n top f\n freq 1\n\n Excluding numeric columns from a ``DataFrame`` description.\n\n >>> df.describe(exclude=[np.number])\n categorical object\n count 3 3\n unique 3 3\n top f c\n freq 1 1\n\n Excluding object columns from a ``DataFrame`` description.\n\n >>> df.describe(exclude=[np.object])\n categorical numeric\n count 3 3.0\n unique 3 NaN\n top f NaN\n freq 1 NaN\n mean NaN 2.0\n std NaN 1.0\n min NaN 1.0\n 25% NaN 1.5\n 50% NaN 2.0\n 75% NaN 2.5\n max NaN 3.0\n \"\"\"\n if self.ndim == 2 and self.columns.size == 0:\n raise ValueError(\"Cannot describe a DataFrame without columns\")\n\n if percentiles is not None:\n # explicit conversion of `percentiles` to list\n percentiles = list(percentiles)\n\n # get them all to be in [0, 1]\n validate_percentile(percentiles)\n\n # median should always be included\n if 0.5 not in percentiles:\n percentiles.append(0.5)\n percentiles = np.asarray(percentiles)\n else:\n percentiles = np.array([0.25, 0.5, 0.75])\n\n # sort and check for duplicates\n unique_pcts = np.unique(percentiles)\n if len(unique_pcts) < len(percentiles):\n raise ValueError(\"percentiles cannot contain duplicates\")\n percentiles = unique_pcts\n\n formatted_percentiles = format_percentiles(percentiles)\n\n def describe_numeric_1d(series):\n stat_index = (\n [\"count\", \"mean\", \"std\", \"min\"] + formatted_percentiles + [\"max\"]\n )\n d = (\n [series.count(), series.mean(), series.std(), series.min()]\n + series.quantile(percentiles).tolist()\n + [series.max()]\n )\n return pd.Series(d, index=stat_index, name=series.name)\n\n def describe_categorical_1d(data):\n names = [\"count\", \"unique\"]\n objcounts = data.value_counts()\n count_unique = len(objcounts[objcounts != 0])\n result = [data.count(), count_unique]\n dtype = None\n if result[1] > 0:\n top, freq = objcounts.index[0], objcounts.iloc[0]\n names += [\"top\", \"freq\"]\n result += [top, freq]\n\n # If the DataFrame is empty, set 'top' and 'freq' to None\n # to maintain output shape consistency\n else:\n names += [\"top\", \"freq\"]\n result += [np.nan, np.nan]\n dtype = \"object\"\n\n return pd.Series(result, index=names, name=data.name, dtype=dtype)\n\n def describe_timestamp_1d(data):\n # GH-30164\n stat_index = [\"count\", \"mean\", \"min\"] + formatted_percentiles + [\"max\"]\n d = (\n [data.count(), data.mean(), data.min()]\n + data.quantile(percentiles).tolist()\n + [data.max()]\n )\n return pd.Series(d, index=stat_index, name=data.name)\n\n def describe_1d(data):\n if is_bool_dtype(data):\n return describe_categorical_1d(data)\n elif is_numeric_dtype(data):\n return describe_numeric_1d(data)\n elif is_datetime64_any_dtype(data):\n return describe_timestamp_1d(data)\n elif is_timedelta64_dtype(data):\n return describe_numeric_1d(data)\n else:\n return describe_categorical_1d(data)\n\n if self.ndim == 1:\n return describe_1d(self)\n elif (include is None) and (exclude is None):\n # when some numerics are found, keep only numerics\n data = self.select_dtypes(include=[np.number])\n if len(data.columns) == 0:\n data = self\n elif include == \"all\":\n if exclude is not None:\n msg = \"exclude must be None when include is 'all'\"\n raise ValueError(msg)\n data = self\n else:\n data = self.select_dtypes(include=include, exclude=exclude)\n\n ldesc = [describe_1d(s) for _, s in data.items()]\n # set a convenient order for rows\n names: List[Label] = []\n ldesc_indexes = sorted((x.index for x in ldesc), key=len)\n for idxnames in ldesc_indexes:\n for name in idxnames:\n if name not in names:\n names.append(name)\n\n d = pd.concat([x.reindex(names, copy=False) for x in ldesc], axis=1, sort=False)\n d.columns = data.columns.copy()\n return d\n\n _shared_docs[\n \"pct_change\"\n ] = \"\"\"\n Percentage change between the current and a prior element.\n\n Computes the percentage change from the immediately previous row by\n default. This is useful in comparing the percentage of change in a time\n series of elements.\n\n Parameters\n ----------\n periods : int, default 1\n Periods to shift for forming percent change.\n fill_method : str, default 'pad'\n How to handle NAs before computing percent changes.\n limit : int, default None\n The number of consecutive NAs to fill before stopping.\n freq : DateOffset, timedelta, or str, optional\n Increment to use from time series API (e.g. 'M' or BDay()).\n **kwargs\n Additional keyword arguments are passed into\n `DataFrame.shift` or `Series.shift`.\n\n Returns\n -------\n chg : Series or DataFrame\n The same type as the calling object.\n\n See Also\n --------\n Series.diff : Compute the difference of two elements in a Series.\n DataFrame.diff : Compute the difference of two elements in a DataFrame.\n Series.shift : Shift the index by some number of periods.\n DataFrame.shift : Shift the index by some number of periods.\n\n Examples\n --------\n **Series**\n\n >>> s = pd.Series([90, 91, 85])\n >>> s\n 0 90\n 1 91\n 2 85\n dtype: int64\n\n >>> s.pct_change()\n 0 NaN\n 1 0.011111\n 2 -0.065934\n dtype: float64\n\n >>> s.pct_change(periods=2)\n 0 NaN\n 1 NaN\n 2 -0.055556\n dtype: float64\n\n See the percentage change in a Series where filling NAs with last\n valid observation forward to next valid.\n\n >>> s = pd.Series([90, 91, None, 85])\n >>> s\n 0 90.0\n 1 91.0\n 2 NaN\n 3 85.0\n dtype: float64\n\n >>> s.pct_change(fill_method='ffill')\n 0 NaN\n 1 0.011111\n 2 0.000000\n 3 -0.065934\n dtype: float64\n\n **DataFrame**\n\n Percentage change in French franc, Deutsche Mark, and Italian lira from\n 1980-01-01 to 1980-03-01.\n\n >>> df = pd.DataFrame({\n ... 'FR': [4.0405, 4.0963, 4.3149],\n ... 'GR': [1.7246, 1.7482, 1.8519],\n ... 'IT': [804.74, 810.01, 860.13]},\n ... index=['1980-01-01', '1980-02-01', '1980-03-01'])\n >>> df\n FR GR IT\n 1980-01-01 4.0405 1.7246 804.74\n 1980-02-01 4.0963 1.7482 810.01\n 1980-03-01 4.3149 1.8519 860.13\n\n >>> df.pct_change()\n FR GR IT\n 1980-01-01 NaN NaN NaN\n 1980-02-01 0.013810 0.013684 0.006549\n 1980-03-01 0.053365 0.059318 0.061876\n\n Percentage of change in GOOG and APPL stock volume. Shows computing\n the percentage change between columns.\n\n >>> df = pd.DataFrame({\n ... '2016': [1769950, 30586265],\n ... '2015': [1500923, 40912316],\n ... '2014': [1371819, 41403351]},\n ... index=['GOOG', 'APPL'])\n >>> df\n 2016 2015 2014\n GOOG 1769950 1500923 1371819\n APPL 30586265 40912316 41403351\n\n >>> df.pct_change(axis='columns')\n 2016 2015 2014\n GOOG NaN -0.151997 -0.086016\n APPL NaN 0.337604 0.012002\n \"\"\"\n\n @Appender(_shared_docs[\"pct_change\"] % _shared_doc_kwargs)\n def pct_change(\n self: FrameOrSeries,\n periods=1,\n fill_method=\"pad\",\n limit=None,\n freq=None,\n **kwargs,\n ) -> FrameOrSeries:\n # TODO: Not sure if above is correct - need someone to confirm.\n axis = self._get_axis_number(kwargs.pop(\"axis\", self._stat_axis_name))\n if fill_method is None:\n data = self\n else:\n data = self._ensure_type(\n self.fillna(method=fill_method, axis=axis, limit=limit)\n )\n\n rs = data.div(data.shift(periods=periods, freq=freq, axis=axis, **kwargs)) - 1\n if freq is not None:\n # Shift method is implemented differently when freq is not None\n # We want to restore the original index\n rs = rs.loc[~rs.index.duplicated()]\n rs = rs.reindex_like(data)\n return rs\n\n def _agg_by_level(self, name, axis=0, level=0, skipna=True, **kwargs):\n if axis is None:\n raise ValueError(\"Must specify 'axis' when aggregating by level.\")\n grouped = self.groupby(level=level, axis=axis, sort=False)\n if hasattr(grouped, name) and skipna:\n return getattr(grouped, name)(**kwargs)\n axis = self._get_axis_number(axis)\n method = getattr(type(self), name)\n applyf = lambda x: method(x, axis=axis, skipna=skipna, **kwargs)\n return grouped.aggregate(applyf)\n\n @classmethod\n def _add_numeric_operations(cls):\n \"\"\"\n Add the operations to the cls; evaluate the doc strings again\n \"\"\"\n axis_descr, name1, name2 = _doc_parms(cls)\n\n cls.any = _make_logical_function(\n cls,\n \"any\",\n name1=name1,\n name2=name2,\n axis_descr=axis_descr,\n desc=_any_desc,\n func=nanops.nanany,\n see_also=_any_see_also,\n examples=_any_examples,\n empty_value=False,\n )\n cls.all = _make_logical_function(\n cls,\n \"all\",\n name1=name1,\n name2=name2,\n axis_descr=axis_descr,\n desc=_all_desc,\n func=nanops.nanall,\n see_also=_all_see_also,\n examples=_all_examples,\n empty_value=True,\n )\n\n @Substitution(\n desc=\"Return the mean absolute deviation of the values \"\n \"for the requested axis.\",\n name1=name1,\n name2=name2,\n axis_descr=axis_descr,\n min_count=\"\",\n see_also=\"\",\n examples=\"\",\n )\n @Appender(_num_doc_mad)\n def mad(self, axis=None, skipna=None, level=None):\n if skipna is None:\n skipna = True\n if axis is None:\n axis = self._stat_axis_number\n if level is not None:\n return self._agg_by_level(\"mad\", axis=axis, level=level, skipna=skipna)\n\n data = self._get_numeric_data()\n if axis == 0:\n demeaned = data - data.mean(axis=0)\n else:\n demeaned = data.sub(data.mean(axis=1), axis=0)\n return np.abs(demeaned).mean(axis=axis, skipna=skipna)\n\n cls.mad = mad\n\n cls.sem = _make_stat_function_ddof(\n cls,\n \"sem\",\n name1=name1,\n name2=name2,\n axis_descr=axis_descr,\n desc=\"Return unbiased standard error of the mean over requested \"\n \"axis.\\n\\nNormalized by N-1 by default. This can be changed \"\n \"using the ddof argument\",\n func=nanops.nansem,\n )\n cls.var = _make_stat_function_ddof(\n cls,\n \"var\",\n name1=name1,\n name2=name2,\n axis_descr=axis_descr,\n desc=\"Return unbiased variance over requested axis.\\n\\nNormalized by \"\n \"N-1 by default. This can be changed using the ddof argument\",\n func=nanops.nanvar,\n )\n cls.std = _make_stat_function_ddof(\n cls,\n \"std\",\n name1=name1,\n name2=name2,\n axis_descr=axis_descr,\n desc=\"Return sample standard deviation over requested axis.\"\n \"\\n\\nNormalized by N-1 by default. This can be changed using the \"\n \"ddof argument\",\n func=nanops.nanstd,\n )\n\n cls.cummin = _make_cum_function(\n cls,\n \"cummin\",\n name1=name1,\n name2=name2,\n axis_descr=axis_descr,\n desc=\"minimum\",\n accum_func=np.minimum.accumulate,\n accum_func_name=\"min\",\n mask_a=np.inf,\n mask_b=np.nan,\n examples=_cummin_examples,\n )\n cls.cumsum = _make_cum_function(\n cls,\n \"cumsum\",\n name1=name1,\n name2=name2,\n axis_descr=axis_descr,\n desc=\"sum\",\n accum_func=np.cumsum,\n accum_func_name=\"sum\",\n mask_a=0.0,\n mask_b=np.nan,\n examples=_cumsum_examples,\n )\n cls.cumprod = _make_cum_function(\n cls,\n \"cumprod\",\n name1=name1,\n name2=name2,\n axis_descr=axis_descr,\n desc=\"product\",\n accum_func=np.cumprod,\n accum_func_name=\"prod\",\n mask_a=1.0,\n mask_b=np.nan,\n examples=_cumprod_examples,\n )\n cls.cummax = _make_cum_function(\n cls,\n \"cummax\",\n name1=name1,\n name2=name2,\n axis_descr=axis_descr,\n desc=\"maximum\",\n accum_func=np.maximum.accumulate,\n accum_func_name=\"max\",\n mask_a=-np.inf,\n mask_b=np.nan,\n examples=_cummax_examples,\n )\n\n cls.sum = _make_min_count_stat_function(\n cls,\n \"sum\",\n name1=name1,\n name2=name2,\n axis_descr=axis_descr,\n desc=\"Return the sum of the values for the requested axis.\\n\\n\"\n \"This is equivalent to the method ``numpy.sum``.\",\n func=nanops.nansum,\n see_also=_stat_func_see_also,\n examples=_sum_examples,\n )\n cls.mean = _make_stat_function(\n cls,\n \"mean\",\n name1=name1,\n name2=name2,\n axis_descr=axis_descr,\n desc=\"Return the mean of the values for the requested axis.\",\n func=nanops.nanmean,\n )\n cls.skew = _make_stat_function(\n cls,\n \"skew\",\n name1=name1,\n name2=name2,\n axis_descr=axis_descr,\n desc=\"Return unbiased skew over requested axis.\\n\\nNormalized by N-1.\",\n func=nanops.nanskew,\n )\n cls.kurt = _make_stat_function(\n cls,\n \"kurt\",\n name1=name1,\n name2=name2,\n axis_descr=axis_descr,\n desc=\"Return unbiased kurtosis over requested axis.\\n\\n\"\n \"Kurtosis obtained using Fisher's definition of\\n\"\n \"kurtosis (kurtosis of normal == 0.0). Normalized \"\n \"by N-1.\",\n func=nanops.nankurt,\n )\n cls.kurtosis = cls.kurt\n cls.prod = _make_min_count_stat_function(\n cls,\n \"prod\",\n name1=name1,\n name2=name2,\n axis_descr=axis_descr,\n desc=\"Return the product of the values for the requested axis.\",\n func=nanops.nanprod,\n examples=_prod_examples,\n )\n cls.product = cls.prod\n cls.median = _make_stat_function(\n cls,\n \"median\",\n name1=name1,\n name2=name2,\n axis_descr=axis_descr,\n desc=\"Return the median of the values for the requested axis.\",\n func=nanops.nanmedian,\n )\n cls.max = _make_stat_function(\n cls,\n \"max\",\n name1=name1,\n name2=name2,\n axis_descr=axis_descr,\n desc=\"Return the maximum of the values for the requested axis.\\n\\n\"\n \"If you want the *index* of the maximum, use ``idxmax``. This is\"\n \"the equivalent of the ``numpy.ndarray`` method ``argmax``.\",\n func=nanops.nanmax,\n see_also=_stat_func_see_also,\n examples=_max_examples,\n )\n cls.min = _make_stat_function(\n cls,\n \"min\",\n name1=name1,\n name2=name2,\n axis_descr=axis_descr,\n desc=\"Return the minimum of the values for the requested axis.\\n\\n\"\n \"If you want the *index* of the minimum, use ``idxmin``. This is\"\n \"the equivalent of the ``numpy.ndarray`` method ``argmin``.\",\n func=nanops.nanmin,\n see_also=_stat_func_see_also,\n examples=_min_examples,\n )\n\n @classmethod\n def _add_series_or_dataframe_operations(cls):\n \"\"\"\n Add the series or dataframe only operations to the cls; evaluate\n the doc strings again.\n \"\"\"\n from pandas.core.window import EWM, Expanding, Rolling, Window\n\n @Appender(Rolling.__doc__)\n def rolling(\n self,\n window,\n min_periods=None,\n center=False,\n win_type=None,\n on=None,\n axis=0,\n closed=None,\n ):\n axis = self._get_axis_number(axis)\n\n if win_type is not None:\n return Window(\n self,\n window=window,\n min_periods=min_periods,\n center=center,\n win_type=win_type,\n on=on,\n axis=axis,\n closed=closed,\n )\n\n return Rolling(\n self,\n window=window,\n min_periods=min_periods,\n center=center,\n win_type=win_type,\n on=on,\n axis=axis,\n closed=closed,\n )\n\n cls.rolling = rolling\n\n @Appender(Expanding.__doc__)\n def expanding(self, min_periods=1, center=False, axis=0):\n axis = self._get_axis_number(axis)\n return Expanding(self, min_periods=min_periods, center=center, axis=axis)\n\n cls.expanding = expanding\n\n @Appender(EWM.__doc__)\n def ewm(\n self,\n com=None,\n span=None,\n halflife=None,\n alpha=None,\n min_periods=0,\n adjust=True,\n ignore_na=False,\n axis=0,\n ):\n axis = self._get_axis_number(axis)\n return EWM(\n self,\n com=com,\n span=span,\n halflife=halflife,\n alpha=alpha,\n min_periods=min_periods,\n adjust=adjust,\n ignore_na=ignore_na,\n axis=axis,\n )\n\n cls.ewm = ewm\n\n @Appender(_shared_docs[\"transform\"] % dict(axis=\"\", **_shared_doc_kwargs))\n def transform(self, func, *args, **kwargs):\n result = self.agg(func, *args, **kwargs)\n if is_scalar(result) or len(result) != len(self):\n raise ValueError(\"transforms cannot produce aggregated results\")\n\n return result\n\n # ----------------------------------------------------------------------\n # Misc methods\n\n _shared_docs[\n \"valid_index\"\n ] = \"\"\"\n Return index for %(position)s non-NA/null value.\n\n Returns\n -------\n scalar : type of index\n\n Notes\n -----\n If all elements are non-NA/null, returns None.\n Also returns None for empty %(klass)s.\n \"\"\"\n\n def _find_valid_index(self, how: str):\n \"\"\"\n Retrieves the index of the first valid value.\n\n Parameters\n ----------\n how : {'first', 'last'}\n Use this parameter to change between the first or last valid index.\n\n Returns\n -------\n idx_first_valid : type of index\n \"\"\"\n idxpos = find_valid_index(self._values, how)\n if idxpos is None:\n return None\n return self.index[idxpos]\n\n @Appender(\n _shared_docs[\"valid_index\"] % {\"position\": \"first\", \"klass\": \"Series/DataFrame\"}\n )\n def first_valid_index(self):\n return self._find_valid_index(\"first\")\n\n @Appender(\n _shared_docs[\"valid_index\"] % {\"position\": \"last\", \"klass\": \"Series/DataFrame\"}\n )\n def last_valid_index(self):\n return self._find_valid_index(\"last\")\n\n\ndef _doc_parms(cls):\n \"\"\"Return a tuple of the doc parms.\"\"\"\n axis_descr = (\n f\"{{{', '.join(f'{a} ({i})' for i, a in enumerate(cls._AXIS_ORDERS))}}}\"\n )\n name = cls._constructor_sliced.__name__ if cls._AXIS_LEN > 1 else \"scalar\"\n name2 = cls.__name__\n return axis_descr, name, name2\n\n\n_num_doc = \"\"\"\n%(desc)s\n\nParameters\n----------\naxis : %(axis_descr)s\n Axis for the function to be applied on.\nskipna : bool, default True\n Exclude NA/null values when computing the result.\nlevel : int or level name, default None\n If the axis is a MultiIndex (hierarchical), count along a\n particular level, collapsing into a %(name1)s.\nnumeric_only : bool, default None\n Include only float, int, boolean columns. If None, will attempt to use\n everything, then use only numeric data. Not implemented for Series.\n%(min_count)s\\\n**kwargs\n Additional keyword arguments to be passed to the function.\n\nReturns\n-------\n%(name1)s or %(name2)s (if level specified)\\\n%(see_also)s\\\n%(examples)s\n\"\"\"\n\n_num_doc_mad = \"\"\"\n%(desc)s\n\nParameters\n----------\naxis : %(axis_descr)s\n Axis for the function to be applied on.\nskipna : bool, default None\n Exclude NA/null values when computing the result.\nlevel : int or level name, default None\n If the axis is a MultiIndex (hierarchical), count along a\n particular level, collapsing into a %(name1)s.\n\nReturns\n-------\n%(name1)s or %(name2)s (if level specified)\\\n%(see_also)s\\\n%(examples)s\n\"\"\"\n\n_num_ddof_doc = \"\"\"\n%(desc)s\n\nParameters\n----------\naxis : %(axis_descr)s\nskipna : bool, default True\n Exclude NA/null values. If an entire row/column is NA, the result\n will be NA.\nlevel : int or level name, default None\n If the axis is a MultiIndex (hierarchical), count along a\n particular level, collapsing into a %(name1)s.\nddof : int, default 1\n Delta Degrees of Freedom. The divisor used in calculations is N - ddof,\n where N represents the number of elements.\nnumeric_only : bool, default None\n Include only float, int, boolean columns. If None, will attempt to use\n everything, then use only numeric data. Not implemented for Series.\n\nReturns\n-------\n%(name1)s or %(name2)s (if level specified)\\n\"\"\"\n\n_bool_doc = \"\"\"\n%(desc)s\n\nParameters\n----------\naxis : {0 or 'index', 1 or 'columns', None}, default 0\n Indicate which axis or axes should be reduced.\n\n * 0 / 'index' : reduce the index, return a Series whose index is the\n original column labels.\n * 1 / 'columns' : reduce the columns, return a Series whose index is the\n original index.\n * None : reduce all axes, return a scalar.\n\nbool_only : bool, default None\n Include only boolean columns. If None, will attempt to use everything,\n then use only boolean data. Not implemented for Series.\nskipna : bool, default True\n Exclude NA/null values. If the entire row/column is NA and skipna is\n True, then the result will be %(empty_value)s, as for an empty row/column.\n If skipna is False, then NA are treated as True, because these are not\n equal to zero.\nlevel : int or level name, default None\n If the axis is a MultiIndex (hierarchical), count along a\n particular level, collapsing into a %(name1)s.\n**kwargs : any, default None\n Additional keywords have no effect but might be accepted for\n compatibility with NumPy.\n\nReturns\n-------\n%(name1)s or %(name2)s\n If level is specified, then, %(name2)s is returned; otherwise, %(name1)s\n is returned.\n\n%(see_also)s\n%(examples)s\"\"\"\n\n_all_desc = \"\"\"\\\nReturn whether all elements are True, potentially over an axis.\n\nReturns True unless there at least one element within a series or\nalong a Dataframe axis that is False or equivalent (e.g. zero or\nempty).\"\"\"\n\n_all_examples = \"\"\"\\\nExamples\n--------\n**Series**\n\n>>> pd.Series([True, True]).all()\nTrue\n>>> pd.Series([True, False]).all()\nFalse\n>>> pd.Series([]).all()\nTrue\n>>> pd.Series([np.nan]).all()\nTrue\n>>> pd.Series([np.nan]).all(skipna=False)\nTrue\n\n**DataFrames**\n\nCreate a dataframe from a dictionary.\n\n>>> df = pd.DataFrame({'col1': [True, True], 'col2': [True, False]})\n>>> df\n col1 col2\n0 True True\n1 True False\n\nDefault behaviour checks if column-wise values all return True.\n\n>>> df.all()\ncol1 True\ncol2 False\ndtype: bool\n\nSpecify ``axis='columns'`` to check if row-wise values all return True.\n\n>>> df.all(axis='columns')\n0 True\n1 False\ndtype: bool\n\nOr ``axis=None`` for whether every value is True.\n\n>>> df.all(axis=None)\nFalse\n\"\"\"\n\n_all_see_also = \"\"\"\\\nSee Also\n--------\nSeries.all : Return True if all elements are True.\nDataFrame.any : Return True if one (or more) elements are True.\n\"\"\"\n\n_cnum_doc = \"\"\"\nReturn cumulative %(desc)s over a DataFrame or Series axis.\n\nReturns a DataFrame or Series of the same size containing the cumulative\n%(desc)s.\n\nParameters\n----------\naxis : {0 or 'index', 1 or 'columns'}, default 0\n The index or the name of the axis. 0 is equivalent to None or 'index'.\nskipna : bool, default True\n Exclude NA/null values. If an entire row/column is NA, the result\n will be NA.\n*args, **kwargs :\n Additional keywords have no effect but might be accepted for\n compatibility with NumPy.\n\nReturns\n-------\n%(name1)s or %(name2)s\n\nSee Also\n--------\ncore.window.Expanding.%(accum_func_name)s : Similar functionality\n but ignores ``NaN`` values.\n%(name2)s.%(accum_func_name)s : Return the %(desc)s over\n %(name2)s axis.\n%(name2)s.cummax : Return cumulative maximum over %(name2)s axis.\n%(name2)s.cummin : Return cumulative minimum over %(name2)s axis.\n%(name2)s.cumsum : Return cumulative sum over %(name2)s axis.\n%(name2)s.cumprod : Return cumulative product over %(name2)s axis.\n\n%(examples)s\"\"\"\n\n_cummin_examples = \"\"\"\\\nExamples\n--------\n**Series**\n\n>>> s = pd.Series([2, np.nan, 5, -1, 0])\n>>> s\n0 2.0\n1 NaN\n2 5.0\n3 -1.0\n4 0.0\ndtype: float64\n\nBy default, NA values are ignored.\n\n>>> s.cummin()\n0 2.0\n1 NaN\n2 2.0\n3 -1.0\n4 -1.0\ndtype: float64\n\nTo include NA values in the operation, use ``skipna=False``\n\n>>> s.cummin(skipna=False)\n0 2.0\n1 NaN\n2 NaN\n3 NaN\n4 NaN\ndtype: float64\n\n**DataFrame**\n\n>>> df = pd.DataFrame([[2.0, 1.0],\n... [3.0, np.nan],\n... [1.0, 0.0]],\n... columns=list('AB'))\n>>> df\n A B\n0 2.0 1.0\n1 3.0 NaN\n2 1.0 0.0\n\nBy default, iterates over rows and finds the minimum\nin each column. This is equivalent to ``axis=None`` or ``axis='index'``.\n\n>>> df.cummin()\n A B\n0 2.0 1.0\n1 2.0 NaN\n2 1.0 0.0\n\nTo iterate over columns and find the minimum in each row,\nuse ``axis=1``\n\n>>> df.cummin(axis=1)\n A B\n0 2.0 1.0\n1 3.0 NaN\n2 1.0 0.0\n\"\"\"\n\n_cumsum_examples = \"\"\"\\\nExamples\n--------\n**Series**\n\n>>> s = pd.Series([2, np.nan, 5, -1, 0])\n>>> s\n0 2.0\n1 NaN\n2 5.0\n3 -1.0\n4 0.0\ndtype: float64\n\nBy default, NA values are ignored.\n\n>>> s.cumsum()\n0 2.0\n1 NaN\n2 7.0\n3 6.0\n4 6.0\ndtype: float64\n\nTo include NA values in the operation, use ``skipna=False``\n\n>>> s.cumsum(skipna=False)\n0 2.0\n1 NaN\n2 NaN\n3 NaN\n4 NaN\ndtype: float64\n\n**DataFrame**\n\n>>> df = pd.DataFrame([[2.0, 1.0],\n... [3.0, np.nan],\n... [1.0, 0.0]],\n... columns=list('AB'))\n>>> df\n A B\n0 2.0 1.0\n1 3.0 NaN\n2 1.0 0.0\n\nBy default, iterates over rows and finds the sum\nin each column. This is equivalent to ``axis=None`` or ``axis='index'``.\n\n>>> df.cumsum()\n A B\n0 2.0 1.0\n1 5.0 NaN\n2 6.0 1.0\n\nTo iterate over columns and find the sum in each row,\nuse ``axis=1``\n\n>>> df.cumsum(axis=1)\n A B\n0 2.0 3.0\n1 3.0 NaN\n2 1.0 1.0\n\"\"\"\n\n_cumprod_examples = \"\"\"\\\nExamples\n--------\n**Series**\n\n>>> s = pd.Series([2, np.nan, 5, -1, 0])\n>>> s\n0 2.0\n1 NaN\n2 5.0\n3 -1.0\n4 0.0\ndtype: float64\n\nBy default, NA values are ignored.\n\n>>> s.cumprod()\n0 2.0\n1 NaN\n2 10.0\n3 -10.0\n4 -0.0\ndtype: float64\n\nTo include NA values in the operation, use ``skipna=False``\n\n>>> s.cumprod(skipna=False)\n0 2.0\n1 NaN\n2 NaN\n3 NaN\n4 NaN\ndtype: float64\n\n**DataFrame**\n\n>>> df = pd.DataFrame([[2.0, 1.0],\n... [3.0, np.nan],\n... [1.0, 0.0]],\n... columns=list('AB'))\n>>> df\n A B\n0 2.0 1.0\n1 3.0 NaN\n2 1.0 0.0\n\nBy default, iterates over rows and finds the product\nin each column. This is equivalent to ``axis=None`` or ``axis='index'``.\n\n>>> df.cumprod()\n A B\n0 2.0 1.0\n1 6.0 NaN\n2 6.0 0.0\n\nTo iterate over columns and find the product in each row,\nuse ``axis=1``\n\n>>> df.cumprod(axis=1)\n A B\n0 2.0 2.0\n1 3.0 NaN\n2 1.0 0.0\n\"\"\"\n\n_cummax_examples = \"\"\"\\\nExamples\n--------\n**Series**\n\n>>> s = pd.Series([2, np.nan, 5, -1, 0])\n>>> s\n0 2.0\n1 NaN\n2 5.0\n3 -1.0\n4 0.0\ndtype: float64\n\nBy default, NA values are ignored.\n\n>>> s.cummax()\n0 2.0\n1 NaN\n2 5.0\n3 5.0\n4 5.0\ndtype: float64\n\nTo include NA values in the operation, use ``skipna=False``\n\n>>> s.cummax(skipna=False)\n0 2.0\n1 NaN\n2 NaN\n3 NaN\n4 NaN\ndtype: float64\n\n**DataFrame**\n\n>>> df = pd.DataFrame([[2.0, 1.0],\n... [3.0, np.nan],\n... [1.0, 0.0]],\n... columns=list('AB'))\n>>> df\n A B\n0 2.0 1.0\n1 3.0 NaN\n2 1.0 0.0\n\nBy default, iterates over rows and finds the maximum\nin each column. This is equivalent to ``axis=None`` or ``axis='index'``.\n\n>>> df.cummax()\n A B\n0 2.0 1.0\n1 3.0 NaN\n2 3.0 1.0\n\nTo iterate over columns and find the maximum in each row,\nuse ``axis=1``\n\n>>> df.cummax(axis=1)\n A B\n0 2.0 2.0\n1 3.0 NaN\n2 1.0 1.0\n\"\"\"\n\n_any_see_also = \"\"\"\\\nSee Also\n--------\nnumpy.any : Numpy version of this method.\nSeries.any : Return whether any element is True.\nSeries.all : Return whether all elements are True.\nDataFrame.any : Return whether any element is True over requested axis.\nDataFrame.all : Return whether all elements are True over requested axis.\n\"\"\"\n\n_any_desc = \"\"\"\\\nReturn whether any element is True, potentially over an axis.\n\nReturns False unless there at least one element within a series or\nalong a Dataframe axis that is True or equivalent (e.g. non-zero or\nnon-empty).\"\"\"\n\n_any_examples = \"\"\"\\\nExamples\n--------\n**Series**\n\nFor Series input, the output is a scalar indicating whether any element\nis True.\n\n>>> pd.Series([False, False]).any()\nFalse\n>>> pd.Series([True, False]).any()\nTrue\n>>> pd.Series([]).any()\nFalse\n>>> pd.Series([np.nan]).any()\nFalse\n>>> pd.Series([np.nan]).any(skipna=False)\nTrue\n\n**DataFrame**\n\nWhether each column contains at least one True element (the default).\n\n>>> df = pd.DataFrame({\"A\": [1, 2], \"B\": [0, 2], \"C\": [0, 0]})\n>>> df\n A B C\n0 1 0 0\n1 2 2 0\n\n>>> df.any()\nA True\nB True\nC False\ndtype: bool\n\nAggregating over the columns.\n\n>>> df = pd.DataFrame({\"A\": [True, False], \"B\": [1, 2]})\n>>> df\n A B\n0 True 1\n1 False 2\n\n>>> df.any(axis='columns')\n0 True\n1 True\ndtype: bool\n\n>>> df = pd.DataFrame({\"A\": [True, False], \"B\": [1, 0]})\n>>> df\n A B\n0 True 1\n1 False 0\n\n>>> df.any(axis='columns')\n0 True\n1 False\ndtype: bool\n\nAggregating over the entire DataFrame with ``axis=None``.\n\n>>> df.any(axis=None)\nTrue\n\n`any` for an empty DataFrame is an empty Series.\n\n>>> pd.DataFrame([]).any()\nSeries([], dtype: bool)\n\"\"\"\n\n_shared_docs[\n \"stat_func_example\"\n] = \"\"\"\n\nExamples\n--------\n>>> idx = pd.MultiIndex.from_arrays([\n... ['warm', 'warm', 'cold', 'cold'],\n... ['dog', 'falcon', 'fish', 'spider']],\n... names=['blooded', 'animal'])\n>>> s = pd.Series([4, 2, 0, 8], name='legs', index=idx)\n>>> s\nblooded animal\nwarm dog 4\n falcon 2\ncold fish 0\n spider 8\nName: legs, dtype: int64\n\n>>> s.{stat_func}()\n{default_output}\n\n{verb} using level names, as well as indices.\n\n>>> s.{stat_func}(level='blooded')\nblooded\nwarm {level_output_0}\ncold {level_output_1}\nName: legs, dtype: int64\n\n>>> s.{stat_func}(level=0)\nblooded\nwarm {level_output_0}\ncold {level_output_1}\nName: legs, dtype: int64\"\"\"\n\n_sum_examples = _shared_docs[\"stat_func_example\"].format(\n stat_func=\"sum\", verb=\"Sum\", default_output=14, level_output_0=6, level_output_1=8\n)\n\n_sum_examples += \"\"\"\n\nBy default, the sum of an empty or all-NA Series is ``0``.\n\n>>> pd.Series([]).sum() # min_count=0 is the default\n0.0\n\nThis can be controlled with the ``min_count`` parameter. For example, if\nyou'd like the sum of an empty series to be NaN, pass ``min_count=1``.\n\n>>> pd.Series([]).sum(min_count=1)\nnan\n\nThanks to the ``skipna`` parameter, ``min_count`` handles all-NA and\nempty series identically.\n\n>>> pd.Series([np.nan]).sum()\n0.0\n\n>>> pd.Series([np.nan]).sum(min_count=1)\nnan\"\"\"\n\n_max_examples = _shared_docs[\"stat_func_example\"].format(\n stat_func=\"max\", verb=\"Max\", default_output=8, level_output_0=4, level_output_1=8\n)\n\n_min_examples = _shared_docs[\"stat_func_example\"].format(\n stat_func=\"min\", verb=\"Min\", default_output=0, level_output_0=2, level_output_1=0\n)\n\n_stat_func_see_also = \"\"\"\n\nSee Also\n--------\nSeries.sum : Return the sum.\nSeries.min : Return the minimum.\nSeries.max : Return the maximum.\nSeries.idxmin : Return the index of the minimum.\nSeries.idxmax : Return the index of the maximum.\nDataFrame.sum : Return the sum over the requested axis.\nDataFrame.min : Return the minimum over the requested axis.\nDataFrame.max : Return the maximum over the requested axis.\nDataFrame.idxmin : Return the index of the minimum over the requested axis.\nDataFrame.idxmax : Return the index of the maximum over the requested axis.\"\"\"\n\n_prod_examples = \"\"\"\n\nExamples\n--------\nBy default, the product of an empty or all-NA Series is ``1``\n\n>>> pd.Series([]).prod()\n1.0\n\nThis can be controlled with the ``min_count`` parameter\n\n>>> pd.Series([]).prod(min_count=1)\nnan\n\nThanks to the ``skipna`` parameter, ``min_count`` handles all-NA and\nempty series identically.\n\n>>> pd.Series([np.nan]).prod()\n1.0\n\n>>> pd.Series([np.nan]).prod(min_count=1)\nnan\"\"\"\n\n_min_count_stub = \"\"\"\\\nmin_count : int, default 0\n The required number of valid values to perform the operation. If fewer than\n ``min_count`` non-NA values are present the result will be NA.\n\n .. versionadded:: 0.22.0\n\n Added with the default being 0. This means the sum of an all-NA\n or empty Series is 0, and the product of an all-NA or empty\n Series is 1.\n\"\"\"\n\n\ndef _make_min_count_stat_function(\n cls,\n name: str,\n name1: str,\n name2: str,\n axis_descr: str,\n desc: str,\n func: Callable,\n see_also: str = \"\",\n examples: str = \"\",\n) -> Callable:\n @Substitution(\n desc=desc,\n name1=name1,\n name2=name2,\n axis_descr=axis_descr,\n min_count=_min_count_stub,\n see_also=see_also,\n examples=examples,\n )\n @Appender(_num_doc)\n def stat_func(\n self,\n axis=None,\n skipna=None,\n level=None,\n numeric_only=None,\n min_count=0,\n **kwargs,\n ):\n if name == \"sum\":\n nv.validate_sum(tuple(), kwargs)\n elif name == \"prod\":\n nv.validate_prod(tuple(), kwargs)\n else:\n nv.validate_stat_func(tuple(), kwargs, fname=name)\n if skipna is None:\n skipna = True\n if axis is None:\n axis = self._stat_axis_number\n if level is not None:\n return self._agg_by_level(\n name, axis=axis, level=level, skipna=skipna, min_count=min_count\n )\n return self._reduce(\n func,\n name=name,\n axis=axis,\n skipna=skipna,\n numeric_only=numeric_only,\n min_count=min_count,\n )\n\n return set_function_name(stat_func, name, cls)\n\n\ndef _make_stat_function(\n cls,\n name: str,\n name1: str,\n name2: str,\n axis_descr: str,\n desc: str,\n func: Callable,\n see_also: str = \"\",\n examples: str = \"\",\n) -> Callable:\n @Substitution(\n desc=desc,\n name1=name1,\n name2=name2,\n axis_descr=axis_descr,\n min_count=\"\",\n see_also=see_also,\n examples=examples,\n )\n @Appender(_num_doc)\n def stat_func(\n self, axis=None, skipna=None, level=None, numeric_only=None, **kwargs\n ):\n if name == \"median\":\n nv.validate_median(tuple(), kwargs)\n else:\n nv.validate_stat_func(tuple(), kwargs, fname=name)\n if skipna is None:\n skipna = True\n if axis is None:\n axis = self._stat_axis_number\n if level is not None:\n return self._agg_by_level(name, axis=axis, level=level, skipna=skipna)\n return self._reduce(\n func, name=name, axis=axis, skipna=skipna, numeric_only=numeric_only\n )\n\n return set_function_name(stat_func, name, cls)\n\n\ndef _make_stat_function_ddof(\n cls, name: str, name1: str, name2: str, axis_descr: str, desc: str, func: Callable\n) -> Callable:\n @Substitution(desc=desc, name1=name1, name2=name2, axis_descr=axis_descr)\n @Appender(_num_ddof_doc)\n def stat_func(\n self, axis=None, skipna=None, level=None, ddof=1, numeric_only=None, **kwargs\n ):\n nv.validate_stat_ddof_func(tuple(), kwargs, fname=name)\n if skipna is None:\n skipna = True\n if axis is None:\n axis = self._stat_axis_number\n if level is not None:\n return self._agg_by_level(\n name, axis=axis, level=level, skipna=skipna, ddof=ddof\n )\n return self._reduce(\n func, name, axis=axis, numeric_only=numeric_only, skipna=skipna, ddof=ddof\n )\n\n return set_function_name(stat_func, name, cls)\n\n\ndef _make_cum_function(\n cls,\n name: str,\n name1: str,\n name2: str,\n axis_descr: str,\n desc: str,\n accum_func: Callable,\n accum_func_name: str,\n mask_a: float,\n mask_b: float,\n examples: str,\n) -> Callable:\n @Substitution(\n desc=desc,\n name1=name1,\n name2=name2,\n axis_descr=axis_descr,\n accum_func_name=accum_func_name,\n examples=examples,\n )\n @Appender(_cnum_doc)\n def cum_func(self, axis=None, skipna=True, *args, **kwargs):\n skipna = nv.validate_cum_func_with_skipna(skipna, args, kwargs, name)\n if axis is None:\n axis = self._stat_axis_number\n else:\n axis = self._get_axis_number(axis)\n\n if axis == 1:\n return cum_func(self.T, axis=0, skipna=skipna, *args, **kwargs).T\n\n def na_accum_func(blk_values):\n # We will be applying this function to block values\n if blk_values.dtype.kind in [\"m\", \"M\"]:\n # GH#30460, GH#29058\n # numpy 1.18 started sorting NaTs at the end instead of beginning,\n # so we need to work around to maintain backwards-consistency.\n orig_dtype = blk_values.dtype\n\n # We need to define mask before masking NaTs\n mask = isna(blk_values)\n\n if accum_func == np.minimum.accumulate:\n # Note: the accum_func comparison fails as an \"is\" comparison\n y = blk_values.view(\"i8\")\n y[mask] = np.iinfo(np.int64).max\n changed = True\n else:\n y = blk_values\n changed = False\n\n result = accum_func(y.view(\"i8\"), axis)\n if skipna:\n np.putmask(result, mask, iNaT)\n elif accum_func == np.minimum.accumulate:\n # Restore NaTs that we masked previously\n nz = (~np.asarray(mask)).nonzero()[0]\n if len(nz):\n # everything up to the first non-na entry stays NaT\n result[: nz[0]] = iNaT\n\n if changed:\n # restore NaT elements\n y[mask] = iNaT # TODO: could try/finally for this?\n\n if isinstance(blk_values, np.ndarray):\n result = result.view(orig_dtype)\n else:\n # DatetimeArray\n result = type(blk_values)._from_sequence(result, dtype=orig_dtype)\n\n elif skipna and not issubclass(\n blk_values.dtype.type, (np.integer, np.bool_)\n ):\n vals = blk_values.copy().T\n mask = isna(vals)\n np.putmask(vals, mask, mask_a)\n result = accum_func(vals, axis)\n np.putmask(result, mask, mask_b)\n else:\n result = accum_func(blk_values.T, axis)\n\n # transpose back for ndarray, not for EA\n return result.T if hasattr(result, \"T\") else result\n\n result = self._data.apply(na_accum_func)\n\n d = self._construct_axes_dict()\n d[\"copy\"] = False\n return self._constructor(result, **d).__finalize__(self)\n\n return set_function_name(cum_func, name, cls)\n\n\ndef _make_logical_function(\n cls,\n name: str,\n name1: str,\n name2: str,\n axis_descr: str,\n desc: str,\n func: Callable,\n see_also: str,\n examples: str,\n empty_value: bool,\n) -> Callable:\n @Substitution(\n desc=desc,\n name1=name1,\n name2=name2,\n axis_descr=axis_descr,\n see_also=see_also,\n examples=examples,\n empty_value=empty_value,\n )\n @Appender(_bool_doc)\n def logical_func(self, axis=0, bool_only=None, skipna=True, level=None, **kwargs):\n nv.validate_logical_func(tuple(), kwargs, fname=name)\n if level is not None:\n if bool_only is not None:\n raise NotImplementedError(\n \"Option bool_only is not implemented with option level.\"\n )\n return self._agg_by_level(name, axis=axis, level=level, skipna=skipna)\n return self._reduce(\n func,\n name=name,\n axis=axis,\n skipna=skipna,\n numeric_only=bool_only,\n filter_type=\"bool\",\n )\n\n return set_function_name(logical_func, name, cls)\n"
] | [
[
"pandas.core.common.pipe",
"numpy.any",
"numpy.asarray",
"pandas.core.window.Expanding",
"pandas.core.dtypes.common.is_datetime64_any_dtype",
"numpy.abs",
"pandas.core.indexes.api.Index",
"pandas.core.ops._align_method_FRAME",
"numpy.unique",
"pandas.core.window.Window",
"numpy.asanyarray",
"pandas.io.sql.to_sql",
"pandas.core.indexes.api.RangeIndex",
"numpy.putmask",
"pandas.core.dtypes.common.is_scalar",
"pandas.compat.numpy.function.validate_clip_with_axis",
"pandas.util._decorators.Appender",
"pandas.core.dtypes.common.ensure_str",
"pandas.core.resample.get_resampler",
"pandas.io.formats.format.DataFrameFormatter",
"pandas.DataFrame",
"pandas.core.dtypes.inference.is_hashable",
"numpy.errstate",
"pandas.core.resample.asfreq",
"numpy.array",
"pandas.core.algorithms.rank",
"pandas.io.json.to_json",
"pandas.core.dtypes.common.ensure_int64",
"pandas.core.dtypes.common.is_integer",
"pandas.util._validators.validate_fillna_kwargs",
"pandas.core.missing.get_fill_func",
"pandas.io.clipboards.to_clipboard",
"pandas.compat.numpy.function.validate_cum_func_with_skipna",
"pandas.core.window.Rolling",
"pandas.core.dtypes.common.is_timedelta64_dtype",
"pandas._libs.lib.item_from_zerodim",
"pandas.core.dtypes.missing.isna",
"pandas.core.dtypes.common.is_datetime64tz_dtype",
"pandas.core.dtypes.common.is_object_dtype",
"pandas.Series",
"pandas.core.dtypes.common.is_number",
"pandas.core.dtypes.common.is_dict_like",
"pandas.core.missing.find_valid_index",
"pandas.util._decorators.doc",
"pandas.core.dtypes.common.is_list_like",
"pandas.util._decorators.rewrite_axis_style_signature",
"pandas.core.missing.clean_fill_method",
"pandas.core.common.count_not_none",
"pandas.core.tools.datetimes.to_datetime",
"pandas.io.pytables.to_hdf",
"pandas.io.formats.format.format_percentiles",
"pandas.core.common.SettingWithCopyError",
"pandas.core.construction.create_series_with_explicit_dtype",
"pandas.core.dtypes.common.is_re_compilable",
"numpy.isnan",
"pandas.io.formats.excel.ExcelFormatter",
"pandas.util._validators.validate_percentile",
"pandas.core.common.random_state",
"pandas.core.common.apply_if_callable",
"pandas.concat",
"numpy.prod",
"pandas.core.dtypes.common.is_bool",
"pandas.core.common.maybe_make_list",
"pandas.core.missing.mask_missing",
"pandas.io.pickle.to_pickle",
"pandas.util._validators.validate_bool_kwarg",
"pandas._libs.Timestamp",
"pandas.core.dtypes.common.is_numeric_dtype",
"pandas.core.indexes.period.Period",
"pandas.core.dtypes.common.pandas_dtype",
"pandas.io.formats.csvs.CSVFormatter",
"pandas.core.dtypes.common.is_float",
"pandas.core.indexes.api.ensure_index",
"pandas._config.config.is_nonnegative_int",
"pandas.core.common.index_labels_to_array",
"pandas.core.dtypes.missing.notna",
"pandas.core.common.get_rename_function",
"pandas._config.config.get_option",
"pandas.core.computation.parsing.clean_column_name",
"pandas.compat.set_function_name",
"pandas.core.dtypes.common.is_bool_dtype",
"pandas.tseries.frequencies.to_offset",
"pandas.core.window.EWM",
"pandas.core.indexes.datetimes.DatetimeIndex",
"pandas.core.dtypes.common.is_extension_array_dtype",
"pandas.util._decorators.Substitution",
"pandas.compat._optional.import_optional_dependency",
"numpy.iinfo",
"pandas.errors.AbstractMethodError"
]
] |
dajtmullaj/example_conda_pkg | [
"7c2bf657d14c714608e653d7218fa3cd658a6297"
] | [
"example_conda_pkg/descriptors.py"
] | [
"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Oct 3 21:21:19 2020\n\nProject: chemplot (Chemical Space Visualization)\nContent: Descriptor operation methods\n\n@author: murat cihan sorkun\n\"\"\"\n\nfrom rdkit import Chem\nfrom rdkit.Chem import AllChem\nimport pandas as pd\nimport math\nimport mordred\nfrom mordred import Calculator, descriptors #Dont remove these imports\nfrom sklearn.linear_model import Lasso, LogisticRegression\nfrom sklearn.feature_selection import SelectFromModel\nfrom sklearn.preprocessing import StandardScaler\n\ndef get_mordred_descriptors(smiles_list):\n \"\"\"\n Calculates the Mordred descriptors for given smiles list\n \n :param smiles_list: List of smiles\n :type smiles_list: list\n :returns: The calculated descriptors list for the given smiles\n :rtype: Dataframe\n \"\"\" \n \n return generate_mordred_descriptors(smiles_list, Chem.MolFromSmiles, 'SMILES')\n\n\ndef get_mordred_descriptors_from_inchi(inchi_list):\n \"\"\"\n Calculates the Mordred descriptors for given InChi list\n \n :param inchi_list: List of InChi\n :type inchi_list: list\n :returns: The calculated descriptors list for the given smiles\n :rtype: Dataframe\n \"\"\" \n \n return generate_mordred_descriptors(inchi_list, Chem.MolFromInchi, 'InChi')\n\n \ndef generate_mordred_descriptors(encoding_list, encoding_function, encoding_name):\n \"\"\"\n Calculates the Mordred descriptors for list of molecules encodings\n \n :param smiles_list: List of molecules encodings\n :type smiles_list: list\n :returns: The calculated descriptors list for the given molecules encodings\n :rtype: Dataframe\n \"\"\" \n \n calc = mordred.Calculator() \n \n calc.register(mordred.AtomCount) #16\n calc.register(mordred.RingCount) #139\n calc.register(mordred.BondCount) #9 \n calc.register(mordred.HydrogenBond) #2 \n calc.register(mordred.CarbonTypes) #10\n calc.register(mordred.SLogP) #2\n calc.register(mordred.Constitutional) #16 \n calc.register(mordred.TopoPSA) #2\n calc.register(mordred.Weight) #2\n calc.register(mordred.Polarizability) #2\n calc.register(mordred.McGowanVolume) #1\n \n name_list=[]\n for desc_name in calc.descriptors:\n name_list.append(str(desc_name))\n \n descriptors_list=[] \n erroneous_encodings=[]\n encodings_none_descriptors=[]\n for encoding in encoding_list:\n mol=encoding_function(encoding)\n if mol is None:\n descriptors_list.append([None]*len(name_list))\n erroneous_encodings.append(encoding)\n else:\n mol=Chem.AddHs(mol)\n calculated_descriptors = calc(mol)\n for i in range(len(calculated_descriptors._values)):\n if math.isnan(calculated_descriptors._values[i]):\n calculated_descriptors._values = [None]*len(name_list)\n encodings_none_descriptors.append(encoding)\n break\n descriptors_list.append(calculated_descriptors._values) \n \n if len(erroneous_encodings)>0:\n print(\"The following erroneous {} have been found in the data:\\n{}.\\nThe erroneous {} will be removed from the data.\".format(encoding_name, '\\n'.join(map(str, erroneous_encodings)), encoding_name))\n\n if len(encodings_none_descriptors)>0:\n print(\"For the following {} not all descriptors can be computed:\\n{}.\\nThese {} will be removed from the data.\".format(encoding_name, '\\n'.join(map(str, encodings_none_descriptors)), encoding_name))\n \n df_descriptors=pd.DataFrame(descriptors_list,columns=name_list)\n df_descriptors = df_descriptors.select_dtypes(exclude=['object']) \n return df_descriptors\n \ndef select_descriptors_lasso(df_descriptors,target_list, R_select=0.05, C_select=0.05, kind=\"R\"):\n \"\"\"\n Selects descriptors by LASSO \n \n :param df_descriptors: descriptors of molecules \n :type df_descriptors: Dataframe\n :param target_list: list of target values \n :type target_list: list\n :param R_select: alpha value for Lasso \n :type R_select: float\n :param C_select: C value for LogisticRegression \n :type C_select: float\n :param kind: kind of target R->Regression C->Classification \n :type kind: string\n :returns: The selected descriptors\n :rtype: Dataframe\n \"\"\" \n \n # Remove erroneous data\n df_descriptors = df_descriptors.assign(target=target_list.values)\n df_descriptors = df_descriptors.dropna(how='any')\n target_list = df_descriptors['target'].to_list()\n df_descriptors = df_descriptors.drop(columns=['target'])\n \n df_descriptors_scaled = StandardScaler().fit_transform(df_descriptors)\n \n if(kind==\"C\"): \n model = LogisticRegression(C=C_select,penalty='l1', solver='liblinear',random_state=1).fit(df_descriptors_scaled, target_list)\n else:\n model = Lasso(alpha=R_select,max_iter=10000,random_state=1).fit(df_descriptors_scaled, target_list)\n \n \n selected = SelectFromModel(model, prefit=True)\n X_new_lasso = selected.transform(df_descriptors)\n # Get back the kept features as a DataFrame with dropped columns as all 0s\n selected_features = pd.DataFrame(selected.inverse_transform(X_new_lasso), index=df_descriptors.index, columns=df_descriptors.columns)\n # Dropped columns have values of all 0s, keep other columns \n selected_columns_lasso = selected_features.columns[selected_features.var() != 0] \n selected_data = df_descriptors[selected_columns_lasso] \n \n return selected_data, target_list\n\n\ndef get_ecfp(smiles_list, target_list, radius=2, nBits=2048):\n \"\"\"\n Calculates the ECFP fingerprint for given SMILES list\n \n :param smiles_list: List of SMILES\n :type smiles_list: list\n :param radius: The ECPF fingerprints radius.\n :type radius: int\n :param nBits: The number of bits of the fingerprint vector.\n :type nBits: int\n :returns: The calculated ECPF fingerprints for the given SMILES\n :rtype: Dataframe\n \"\"\" \n \n return generate_ecfp(smiles_list, Chem.MolFromSmiles, 'SMILES', target_list, radius=2, nBits=2048)\n\n\ndef get_ecfp_from_inchi(inchi_list, target_list, radius=2, nBits=2048):\n \"\"\"\n Calculates the ECFP fingerprint for given InChi list\n \n :param inchi_list: List of InChi\n :type inchi_list: list\n :param radius: The ECPF fingerprints radius.\n :type radius: int\n :param nBits: The number of bits of the fingerprint vector.\n :type nBits: int\n :returns: The calculated ECPF fingerprints for the given InChi\n :rtype: Dataframe\n \"\"\" \n \n return generate_ecfp(inchi_list, Chem.MolFromInchi, 'InChi', target_list, radius=2, nBits=2048)\n\n\ndef generate_ecfp(encoding_list, encoding_function, encoding_name, target_list, radius=2, nBits=2048):\n \"\"\"\n Calculates the ECFP fingerprint for given list of molecules encodings\n \n :param encoding_list: List of molecules encodings\n :type encoding_list: list\n :param encoding_function: Function used to extract the molecules from the encodings\n :type encoding_function: fun\n :param radius: The ECPF fingerprints radius.\n :type radius: int\n :param nBits: The number of bits of the fingerprint vector.\n :type nBits: int\n :returns: The calculated ECPF fingerprints for the given molecules encodings\n :rtype: Dataframe\n \"\"\" \n \n # Generate ECFP fingerprints\n ecfp_fingerprints=[]\n erroneous_encodings=[]\n for encoding in encoding_list:\n mol=encoding_function(encoding)\n if mol is None:\n ecfp_fingerprints.append([None]*nBits)\n erroneous_encodings.append(encoding)\n else:\n mol=Chem.AddHs(mol)\n list_bits_fingerprint = []\n list_bits_fingerprint[:0] = AllChem.GetMorganFingerprintAsBitVect(mol, radius, nBits).ToBitString()\n ecfp_fingerprints.append(list_bits_fingerprint) \n \n # Create dataframe of fingerprints\n df_ecfp_fingerprints = pd.DataFrame(data = ecfp_fingerprints, index = encoding_list)\n # Remove erroneous data\n if len(erroneous_encodings)>0:\n print(\"The following erroneous {} have been found in the data:\\n{}.\\nThe erroneous {} will be removed from the data.\".format(encoding_name, '\\n'.join(map(str, erroneous_encodings)), encoding_name))\n \n if len(target_list)>0:\n df_ecfp_fingerprints = df_ecfp_fingerprints.assign(target=target_list.values)\n \n df_ecfp_fingerprints = df_ecfp_fingerprints.dropna(how='any')\n \n if len(target_list)>0:\n target_list = df_ecfp_fingerprints['target'].to_list()\n df_ecfp_fingerprints = df_ecfp_fingerprints.drop(columns=['target'])\n \n # Remove bit columns with no variablity (all \"0\" or all \"1\")\n df_ecfp_fingerprints = df_ecfp_fingerprints.loc[:, (df_ecfp_fingerprints != 0).any(axis=0)]\n df_ecfp_fingerprints = df_ecfp_fingerprints.loc[:, (df_ecfp_fingerprints != 1).any(axis=0)]\n \n return df_ecfp_fingerprints, target_list"
] | [
[
"pandas.DataFrame",
"sklearn.feature_selection.SelectFromModel",
"sklearn.linear_model.LogisticRegression",
"sklearn.linear_model.Lasso",
"sklearn.preprocessing.StandardScaler"
]
] |
pmarshwx/matplotlib | [
"12be528dbf2114f7c25abf60de8100cb2d4494af"
] | [
"lib/matplotlib/backends/qt_compat.py"
] | [
"\"\"\" A Qt API selector that can be used to switch between PyQt and PySide.\n\"\"\"\nfrom __future__ import (absolute_import, division, print_function,\n unicode_literals)\n\nimport six\n\nimport os\nfrom matplotlib import rcParams, verbose\n\n# Available APIs.\nQT_API_PYQT = 'PyQt4' # API is not set here; Python 2.x default is V 1\nQT_API_PYQTv2 = 'PyQt4v2' # forced to Version 2 API\nQT_API_PYSIDE = 'PySide' # only supports Version 2 API\nQT_API_PYQT5 = 'PyQt5' # use PyQt5 API; Version 2 with module shim\n\nETS = dict(pyqt=(QT_API_PYQTv2, 4), pyside=(QT_API_PYSIDE, 4),\n pyqt5=(QT_API_PYQT5, 5))\n# ETS is a dict of env variable to (QT_API, QT_MAJOR_VERSION)\n# If the ETS QT_API environment variable is set, use it, but only\n# if the varible if of the same major QT version. Note that\n# ETS requires the version 2 of PyQt4, which is not the platform\n# default for Python 2.x.\n\nQT_API_ENV = os.environ.get('QT_API')\n\nif rcParams['backend'] == 'Qt5Agg':\n QT_RC_MAJOR_VERSION = 5\nelse:\n QT_RC_MAJOR_VERSION = 4\n\nQT_API = None\n\nif (QT_API_ENV is not None):\n try:\n QT_ENV_MAJOR_VERSION = ETS[QT_API_ENV][1]\n except KeyError:\n raise RuntimeError(\n ('Unrecognized environment variable %r, valid values are:'\n ' %r, %r or %r' % (QT_API_ENV, 'pyqt', 'pyside', 'pyqt5')))\n if QT_ENV_MAJOR_VERSION == QT_RC_MAJOR_VERSION:\n # Only if backend and env qt major version are\n # compatible use the env variable.\n QT_API = ETS[QT_API_ENV][0]\n\nif QT_API is None:\n # No ETS environment or incompatible so use rcParams.\n if rcParams['backend'] == 'Qt5Agg':\n QT_API = rcParams['backend.qt5']\n else:\n QT_API = rcParams['backend.qt4']\n\n# We will define an appropriate wrapper for the differing versions\n# of file dialog.\n_getSaveFileName = None\n\n# Flag to check if sip could be imported\n_sip_imported = False\n\n# Now perform the imports.\nif QT_API in (QT_API_PYQT, QT_API_PYQTv2, QT_API_PYQT5):\n try:\n import sip\n _sip_imported = True\n except ImportError:\n # Try using PySide\n QT_API = QT_API_PYSIDE\n cond = (\"Could not import sip; falling back on PySide\\n\"\n \"in place of PyQt4 or PyQt5.\\n\")\n verbose.report(cond, 'helpful')\n\nif _sip_imported:\n if QT_API == QT_API_PYQTv2:\n if QT_API_ENV == 'pyqt':\n cond = (\"Found 'QT_API=pyqt' environment variable. \"\n \"Setting PyQt4 API accordingly.\\n\")\n else:\n cond = \"PyQt API v2 specified.\"\n try:\n sip.setapi('QString', 2)\n except:\n res = 'QString API v2 specification failed. Defaulting to v1.'\n verbose.report(cond + res, 'helpful')\n # condition has now been reported, no need to repeat it:\n cond = \"\"\n try:\n sip.setapi('QVariant', 2)\n except:\n res = 'QVariant API v2 specification failed. Defaulting to v1.'\n verbose.report(cond + res, 'helpful')\n\n if QT_API in [QT_API_PYQT, QT_API_PYQTv2]: # PyQt4 API\n\n from PyQt4 import QtCore, QtGui\n\n try:\n if sip.getapi(\"QString\") > 1:\n # Use new getSaveFileNameAndFilter()\n _getSaveFileName = QtGui.QFileDialog.getSaveFileNameAndFilter\n else:\n\n # Use old getSaveFileName()\n def _getSaveFileName(*args, **kwargs):\n return (QtGui.QFileDialog.getSaveFileName(*args, **kwargs),\n None)\n\n except (AttributeError, KeyError):\n\n # call to getapi() can fail in older versions of sip\n def _getSaveFileName(*args, **kwargs):\n return QtGui.QFileDialog.getSaveFileName(*args, **kwargs), None\n\n else: # PyQt5 API\n from PyQt5 import QtCore, QtGui, QtWidgets\n _getSaveFileName = QtWidgets.QFileDialog.getSaveFileName\n\n # Alias PyQt-specific functions for PySide compatibility.\n QtCore.Signal = QtCore.pyqtSignal\n try:\n QtCore.Slot = QtCore.pyqtSlot\n except AttributeError:\n # Not a perfect match but works in simple cases\n QtCore.Slot = QtCore.pyqtSignature\n\n QtCore.Property = QtCore.pyqtProperty\n __version__ = QtCore.PYQT_VERSION_STR\n\nelse: # try importing pyside\n try:\n from PySide import QtCore, QtGui, __version__, __version_info__\n except ImportError:\n raise ImportError(\n \"Matplotlib qt-based backends require an external PyQt4, PyQt5,\\n\"\n \"or PySide package to be installed, but it was not found.\")\n\n if __version_info__ < (1, 0, 3):\n raise ImportError(\n \"Matplotlib backend_qt4 and backend_qt4agg require PySide >=1.0.3\")\n\n _getSaveFileName = QtGui.QFileDialog.getSaveFileName\n\n\n# Apply shim to Qt4 APIs to make them look like Qt5\nif QT_API in (QT_API_PYQT, QT_API_PYQTv2, QT_API_PYSIDE):\n '''Import all used QtGui objects into QtWidgets\n\n Here I've opted to simple copy QtGui into QtWidgets as that\n achieves the same result as copying over the objects, and will\n continue to work if other objects are used.\n\n '''\n QtWidgets = QtGui\n"
] | [
[
"matplotlib.verbose.report"
]
] |
DaulPavid/pyturbo | [
"878e0b1b514c043f1b4ea5cd5268b23c0df5192e"
] | [
"turbo/turbo_encoder.py"
] | [
"#\n# Turbo Encoder\n#\n\nimport numpy as np\n\nfrom .rsc import RSC\n\n\nclass TurboEncoder:\n def __init__(self, interleaver):\n self.interleaver = interleaver\n self.block_size = len(self.interleaver)\n self.encoders = 2 * [RSC()]\n\n def reset(self):\n for e in self.encoders:\n e.reset()\n\n def interleave(self, vector):\n interleaved = np.zeros(self.block_size, dtype=int)\n for i in range(0, self.block_size):\n interleaved[i] = vector[self.interleaver[i]]\n\n return interleaved\n\n def execute(self, vector):\n output_size = 3 * (len(vector) + len(self.encoders[0].registers))\n output = np.zeros(output_size, dtype=int)\n interleaved = self.interleave(vector)\n\n output[1::3], output[::3] = self.encoders[0].execute(vector)\n output[2::3], _ = self.encoders[1].execute(interleaved)\n\n return output\n"
] | [
[
"numpy.zeros"
]
] |
wmcnally/evopose2d | [
"ea05b818044d8d84e9cbbee778bc465be59ebd59"
] | [
"inference_speed.py"
] | [
"import os\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'\nimport tensorflow as tf\nfrom dataset.dataloader import load_tfds\nfrom time import time\nimport argparse\nfrom nets.simple_basline import SimpleBaseline\nfrom nets.evopose2d import EvoPose\nfrom nets.hrnet import HRNet\nfrom utils import detect_hardware\n\n\ndef speed_test(strategy, cfg, split='val', n=1000):\n with strategy.scope():\n if cfg.MODEL.TYPE == 'simple_baseline':\n model = SimpleBaseline(cfg)\n elif cfg.MODEL.TYPE == 'hrnet':\n model = HRNet(cfg)\n elif cfg.MODEL.TYPE == 'evopose':\n model = EvoPose(cfg)\n\n cfg.DATASET.OUTPUT_SHAPE = model.output_shape[1:]\n\n ds = load_tfds(cfg, split, det=cfg.VAL.DET,\n predict_kp=True, drop_remainder=cfg.VAL.DROP_REMAINDER)\n ds = strategy.experimental_distribute_dataset(ds)\n\n @tf.function\n def predict(imgs, flip=False):\n if flip:\n imgs = imgs[:, :, ::-1, :]\n return model(imgs, training=False)\n\n for count, batch in enumerate(ds):\n if count == 1: # skip first pass\n ti = time()\n\n _, imgs, _, _, scores = batch\n\n hms = strategy.run(predict, args=(imgs,)).numpy()\n\n if cfg.VAL.FLIP:\n flip_hms = strategy.run(predict, args=(imgs, True,)).numpy()\n flip_hms = flip_hms[:, :, ::-1, :]\n tmp = flip_hms.copy()\n for i in range(len(cfg.DATASET.KP_FLIP)):\n flip_hms[:, :, :, i] = tmp[:, :, :, cfg.DATASET.KP_FLIP[i]]\n # shift to align features\n flip_hms[:, :, 1:, :] = flip_hms[:, :, 0:-1, :].copy()\n hms = (hms + flip_hms) / 2.\n\n if count == n:\n break\n\n print('FPS: {:.5f}'.format((n * cfg.VAL.BATCH_SIZE) / (time() - ti)))\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--cpu', action='store_true')\n parser.add_argument('--gpu', action='store_true')\n parser.add_argument('--tpu', default='')\n parser.add_argument('-c', '--cfg', required=True) # yaml\n parser.add_argument('-bs', '--batch-size', type=int, default=1)\n parser.add_argument('-n', type=int, default=1000)\n args = parser.parse_args()\n\n from dataset.coco import cn as cfg\n cfg.merge_from_file('configs/' + args.cfg)\n cfg.MODEL.NAME = args.cfg.split('.')[0]\n cfg.VAL.BATCH_SIZE = args.batch_size\n\n if args.cpu:\n strategy = tf.distribute.OneDeviceStrategy('/CPU:0')\n elif args.gpu:\n strategy = tf.distribute.OneDeviceStrategy('/GPU:0')\n else:\n tpu, strategy = detect_hardware(args.tpu)\n\n tf.config.optimizer.set_experimental_options({'disable_meta_optimizer': True})\n speed_test(strategy, cfg, split='val', n=args.n)\n\n\n\n"
] | [
[
"tensorflow.config.optimizer.set_experimental_options",
"tensorflow.distribute.OneDeviceStrategy"
]
] |
ourDirection/ourDirection | [
"b99ed67a8cc0fe5016e03fe3b5ad083b7f8bbdc0"
] | [
"momus/VHRED/split-examples-by-token.py"
] | [
"\"\"\"\nTakes as input a binarized dialogue corpus, splits the examples by a certain token and shuffles it\n\nExample run:\n\n python split-examples-by-token.py Training.dialogues.pkl 2 Training_SplitByDialogues.dialogues --join_last_two_examples\n\n@author Iulian Vlad Serban\n\"\"\"\n\nimport collections\nimport numpy\nimport math\nimport operator\nimport os\nimport sys\nimport logging\nimport cPickle\n\nfrom collections import Counter\n\nlogging.basicConfig(level=logging.INFO)\nlogger = logging.getLogger('text2dict')\n\ndef safe_pickle(obj, filename):\n if os.path.isfile(filename):\n logger.info(\"Overwriting %s.\" % filename)\n else:\n logger.info(\"Saving to %s.\" % filename)\n \n with open(filename, 'wb') as f:\n cPickle.dump(obj, f, protocol=cPickle.HIGHEST_PROTOCOL)\n\n# Thanks to Emile on Stackoverflow:\n# http://stackoverflow.com/questions/4322705/split-a-list-into-nested-lists-on-a-value\n\ndef _itersplit(l, splitters):\n current = []\n for item in l:\n if item in splitters:\n yield current\n current = []\n else:\n current.append(item)\n yield current\n\ndef magicsplit(l, *splitters):\n return [subl for subl in _itersplit(l, splitters) if subl]\n\nimport argparse\nparser = argparse.ArgumentParser()\nparser.add_argument(\"input\", type=str, help=\"Binarized dialogue corpus (pkl file)\")\nparser.add_argument(\"token_id\", type=int, help=\"Token index to split examples by (e.g. to split by end-of-dialogue set this to 2)\")\nparser.add_argument(\"consecutive_examples_to_merge\", type=int, default='1', help=\"After splitting these number of examples will be merged.\")\nparser.add_argument(\"--join_last_two_examples\",\n action=\"store_true\", default=False,\n help=\"If on, will join the last two splits generated from each example. This is useful to handle empty or very short last samples\")\n\n\nparser.add_argument(\"output\", type=str, help=\"Filename of processed binarized dialogue corpus (pkl file)\")\nargs = parser.parse_args()\n\nif not os.path.isfile(args.input):\n raise Exception(\"Input file not found!\")\n\nlogger.info(\"Loading dialogue corpus\")\ndata = cPickle.load(open(args.input, 'r'))\ndata_len = len(data)\n\nlogger.info('Corpus loaded... Data len is %d' % data_len)\n\n# Count number of tokens\ntokens_count = 0\nfor i in range(data_len):\n tokens_count += len(data[i])\nlogger.info('Tokens count %d' % tokens_count)\n\n\nlogger.info(\"Splitting corpus examples by token id... \")\nprocessed_binarized_corpus = []\nfor i in range(data_len):\n logger.info(' Example %d ' % i)\n new_examples = magicsplit(data[i], int(args.token_id))\n\n # If option is specified, we append the last new example to the second last one\n if args.join_last_two_examples and len(new_examples) > 1:\n new_examples[len(new_examples)-2] += new_examples[len(new_examples)-1]\n del new_examples[len(new_examples)-1]\n\n # Simpler version of the two for loops, which does not allow merging together samples\n #for new_example in new_examples:\n # processed_binarized_corpus.append(new_example + [int(args.token_id)])\n\n s = int(math.floor(len(new_examples) / args.consecutive_examples_to_merge))\n for j in range(1, s):\n start_index = j*args.consecutive_examples_to_merge\n merged_example = []\n for k in reversed(range(args.consecutive_examples_to_merge)):\n merged_example += new_examples[start_index-k-1] + [int(args.token_id)]\n processed_binarized_corpus.append(merged_example)\n\n if s > 0:\n merged_example = []\n for k in range((s-1)*args.consecutive_examples_to_merge, len(new_examples)):\n merged_example += new_examples[k] + [int(args.token_id)]\n processed_binarized_corpus.append(merged_example)\n else:\n merged_example = []\n for k in range(len(new_examples)):\n merged_example += new_examples[k] + [int(args.token_id)]\n processed_binarized_corpus.append(merged_example)\n\n\nlogger.info('New data len is %d' % len(processed_binarized_corpus))\n\n# Count number of tokens\nprocessed_tokens_count = 0\nfor i in range(len(processed_binarized_corpus)):\n processed_tokens_count += len(processed_binarized_corpus[i])\nlogger.info('New tokens count %d' % processed_tokens_count)\n\n# When splitting by end-of-utterance token </s>, there are some instances with multiple </s> at the end of each example. Our splitting method will effectively remove these, but it is not of any concern to us.\n# assert(processed_tokens_count == tokens_count)\n\nlogger.info(\"Reshuffling corpus.\")\nrng = numpy.random.RandomState(13248)\nrng.shuffle(processed_binarized_corpus)\n\nlogger.info(\"Saving corpus.\")\nsafe_pickle(processed_binarized_corpus, args.output + \".pkl\")\n\nlogger.info(\"Corpus saved. All done!\")\n"
] | [
[
"numpy.random.RandomState"
]
] |
YosefLab/SingleCellLineageTracing | [
"010072b307f7eadbf10dc4af8b2165e48f1736a7"
] | [
"test/simulator_tests/birth_death_simulator_test.py"
] | [
"import unittest\n\nimport networkx as nx\nimport numpy as np\n\nfrom typing import List, Tuple\n\n\nfrom cassiopeia.data.CassiopeiaTree import CassiopeiaTree\nfrom cassiopeia.mixins import TreeSimulatorError\nfrom cassiopeia.simulator.BirthDeathFitnessSimulator import (\n BirthDeathFitnessSimulator,\n)\n\nimport cassiopeia.data.utilities as utilities\n\n\ndef extract_tree_statistics(\n tree: CassiopeiaTree,\n) -> Tuple[List[float], int, bool]:\n \"\"\"A helper function for testing simulated trees.\n\n Outputs the total lived time for each extant lineage, the number of extant\n lineages, and whether the tree has the expected node degrees (to ensure\n unifurcations were collapsed).\n\n Args:\n tree: The tree to test\n\n Returns:\n The total time lived for each leaf, the number of leaves, and if the\n degrees only have degree 0 or 2\n \"\"\"\n\n times = []\n out_degrees = []\n for i in tree.nodes:\n if tree.is_leaf(i):\n times.append(tree.get_time(i))\n out_degrees.append(len(tree.children(i)))\n out_degrees.pop(0)\n\n correct_degrees = all(x == 2 or x == 0 for x in out_degrees)\n\n return times, len(times), correct_degrees\n\n\nclass BirthDeathSimulatorTest(unittest.TestCase):\n def test_bad_waiting_distributions(self):\n \"\"\"Ensures errors when invalid distributions are given.\"\"\"\n with self.assertRaises(TreeSimulatorError):\n bd_sim = BirthDeathFitnessSimulator(\n lambda _: -1, 1, experiment_time=1\n )\n tree = bd_sim.simulate_tree()\n\n with self.assertRaises(TreeSimulatorError):\n bd_sim = BirthDeathFitnessSimulator(lambda _: 0, 1, num_extant=4)\n tree = bd_sim.simulate_tree()\n\n with self.assertRaises(TreeSimulatorError):\n bd_sim = BirthDeathFitnessSimulator(\n lambda _: 1, 1, lambda: -1, num_extant=1\n )\n tree = bd_sim.simulate_tree()\n\n with self.assertRaises(TreeSimulatorError):\n bd_sim = BirthDeathFitnessSimulator(\n lambda _: 1, 1, lambda: 0, experiment_time=1\n )\n tree = bd_sim.simulate_tree()\n\n with self.assertRaises(TreeSimulatorError):\n bd_sim = BirthDeathFitnessSimulator(\n lambda _: 1,\n 1,\n lambda: 0,\n mutation_distribution=lambda: -1,\n fitness_distribution=lambda: 1,\n experiment_time=1,\n )\n tree = bd_sim.simulate_tree()\n\n def test_bad_stopping_conditions(self):\n \"\"\"Ensures errors when an invalid stopping conditions are given.\"\"\"\n with self.assertRaises(TreeSimulatorError):\n bd_sim = BirthDeathFitnessSimulator(lambda _: 1, 1, lambda: 2)\n\n with self.assertRaises(TreeSimulatorError):\n bd_sim = BirthDeathFitnessSimulator(\n lambda _: 1, 1, lambda: 2, num_extant=0.5\n )\n\n with self.assertRaises(TreeSimulatorError):\n bd_sim = BirthDeathFitnessSimulator(\n lambda _: 1, 1, lambda: 2, num_extant=-1\n )\n\n with self.assertRaises(TreeSimulatorError):\n bd_sim = BirthDeathFitnessSimulator(\n lambda _: 1, 1, lambda: 2, num_extant=0\n )\n\n with self.assertRaises(TreeSimulatorError):\n bd_sim = BirthDeathFitnessSimulator(\n lambda _: 1, 1, lambda: 2, experiment_time=-1\n )\n\n with self.assertRaises(TreeSimulatorError):\n bd_sim = BirthDeathFitnessSimulator(\n lambda _: 1, 1, lambda: 2, experiment_time=0\n )\n\n def test_dead_at_start(self):\n \"\"\"Ensures errors in base case where all lineages die on first event.\"\"\"\n with self.assertRaises(TreeSimulatorError):\n bd_sim = BirthDeathFitnessSimulator(\n lambda _: 2, 1, lambda: 1, num_extant=4\n )\n tree = bd_sim.simulate_tree()\n\n with self.assertRaises(TreeSimulatorError):\n bd_sim = BirthDeathFitnessSimulator(\n lambda _: 2, 1, lambda: 1, experiment_time=4\n )\n tree = bd_sim.simulate_tree()\n\n def test_dead_before_end(self):\n \"\"\"Ensures errors when all lineages die before stopping condition.\"\"\"\n birth_wd = lambda scale: np.random.exponential(scale)\n death_wd = lambda: np.random.exponential(0.6)\n\n with self.assertRaises(TreeSimulatorError):\n bd_sim = BirthDeathFitnessSimulator(\n birth_wd, 0.5, death_wd, num_extant=8, random_seed=5\n )\n tree = bd_sim.simulate_tree()\n\n with self.assertRaises(TreeSimulatorError):\n bd_sim = BirthDeathFitnessSimulator(\n birth_wd, 0.5, death_wd, experiment_time=2, random_seed=5\n )\n tree = bd_sim.simulate_tree()\n\n def test_single_lineage(self):\n \"\"\"Tests base case that stopping conditions work before divisions.\"\"\"\n bd_sim = BirthDeathFitnessSimulator(lambda _: 1, 1, num_extant=1)\n tree = bd_sim.simulate_tree()\n results = extract_tree_statistics(tree)\n self.assertEqual(results[1], 1)\n self.assertEqual(tree.get_branch_length(\"0\", \"1\"), 1.0)\n self.assertEqual(results[0], [1])\n\n bd_sim = BirthDeathFitnessSimulator(lambda _: 1, 1, experiment_time=1)\n tree = bd_sim.simulate_tree()\n results = extract_tree_statistics(tree)\n self.assertEqual(results[1], 1)\n self.assertEqual(tree.get_branch_length(\"0\", \"1\"), 1.0)\n self.assertEqual(results[0], [1])\n\n def test_constant_yule(self):\n \"\"\"Tests small case without death with constant waiting times.\"\"\"\n bd_sim = BirthDeathFitnessSimulator(lambda _: 1, 1, num_extant=32)\n tree = bd_sim.simulate_tree()\n results = extract_tree_statistics(tree)\n for i in results[0]:\n self.assertEqual(i, 6)\n self.assertEqual(results[1], 32)\n self.assertTrue(results[2])\n\n bd_sim = BirthDeathFitnessSimulator(lambda _: 1, 1, experiment_time=6)\n tree = bd_sim.simulate_tree()\n results = extract_tree_statistics(tree)\n for i in results[0]:\n self.assertEqual(i, 6)\n self.assertEqual(results[1], 32)\n self.assertTrue(results[2])\n\n def test_nonconstant_yule(self):\n \"\"\"Tests case without death with variable waiting times.\"\"\"\n birth_wd = lambda scale: np.random.exponential(scale)\n\n bd_sim = BirthDeathFitnessSimulator(\n birth_wd, 1, num_extant=16, random_seed=54\n )\n tree = bd_sim.simulate_tree()\n results = extract_tree_statistics(tree)\n self.assertTrue(all(np.isclose(x, results[0][0]) for x in results[0]))\n self.assertEqual(results[1], 16)\n self.assertTrue(results[2])\n self.assertEqual(max([int(i) for i in tree.nodes]), 31)\n\n bd_sim = BirthDeathFitnessSimulator(\n birth_wd, 1, experiment_time=2, random_seed=54\n )\n tree = bd_sim.simulate_tree()\n results = extract_tree_statistics(tree)\n for i in results[0]:\n self.assertEqual(i, 2)\n self.assertTrue(results[2])\n\n def test_nonconstant_birth_death(self):\n \"\"\"Tests case with with variable birth and death waiting times.\n Also, tests pruning dead lineages and unifurcation collapsing.\"\"\"\n birth_wd = lambda scale: np.random.exponential(scale)\n death_wd = lambda: np.random.exponential(1.5)\n\n bd_sim = BirthDeathFitnessSimulator(\n birth_wd, 0.5, death_wd, num_extant=8, random_seed=1234\n )\n tree = bd_sim.simulate_tree()\n results = extract_tree_statistics(tree)\n self.assertTrue(all(np.isclose(x, results[0][0]) for x in results[0]))\n self.assertEqual(results[1], 8)\n self.assertTrue(results[2])\n self.assertNotIn(\"9\", tree.nodes)\n self.assertNotIn(\"2\", tree.nodes)\n\n bd_sim = BirthDeathFitnessSimulator(\n birth_wd, 0.5, death_wd, experiment_time=2, random_seed=1234\n )\n tree = bd_sim.simulate_tree()\n results = extract_tree_statistics(tree)\n for i in results[0]:\n self.assertTrue(np.isclose(i, 2))\n self.assertTrue(results[2])\n self.assertNotIn(\"9\", tree.nodes)\n self.assertNotIn(\"2\", tree.nodes)\n\n def test_nonconstant_birth_death_no_unifurcation_collapsing(self):\n \"\"\"Tests case with with variable birth and death waiting times.\n Checks that unifurcations are not collapsed.\"\"\"\n birth_wd = lambda scale: np.random.exponential(scale)\n death_wd = lambda: np.random.exponential(1.5)\n\n bd_sim = BirthDeathFitnessSimulator(\n birth_wd,\n 0.5,\n death_wd,\n num_extant=8,\n collapse_unifurcations=False,\n random_seed=12,\n )\n tree = bd_sim.simulate_tree()\n results = extract_tree_statistics(tree)\n self.assertTrue(all(np.isclose(x, results[0][0]) for x in results[0]))\n self.assertEqual(results[1], 8)\n self.assertFalse(results[2])\n self.assertNotIn(\"3\", tree.nodes)\n self.assertIn(\"2\", tree.nodes)\n self.assertIn(\"6\", tree.nodes)\n\n bd_sim = BirthDeathFitnessSimulator(\n birth_wd,\n 0.5,\n death_wd,\n experiment_time=1.3,\n collapse_unifurcations=False,\n random_seed=12,\n )\n tree = bd_sim.simulate_tree()\n results = extract_tree_statistics(tree)\n for i in results[0]:\n self.assertTrue(np.isclose(i, 1.3))\n self.assertFalse(results[2])\n self.assertNotIn(\"3\", tree.nodes)\n self.assertIn(\"2\", tree.nodes)\n self.assertIn(\"6\", tree.nodes)\n\n def test_nonconstant_birth_death_both_stopping_conditions(self):\n \"\"\"Tests case with with variable birth and death waiting times.\n Checks that using both stopping conditions works fine.\"\"\"\n birth_wd = lambda scale: np.random.exponential(scale)\n death_wd = lambda: np.random.exponential(1.5)\n\n bd_sim = BirthDeathFitnessSimulator(\n birth_wd,\n 0.5,\n death_wd,\n num_extant=8,\n experiment_time=2,\n random_seed=17,\n )\n tree = bd_sim.simulate_tree()\n results = extract_tree_statistics(tree)\n self.assertTrue(all(np.isclose(x, results[0][0]) for x in results[0]))\n self.assertTrue(all(x > 1 for x in results[0]))\n self.assertEqual(results[1], 8)\n self.assertTrue(results[2])\n\n bd_sim = BirthDeathFitnessSimulator(\n birth_wd,\n 0.5,\n death_wd,\n num_extant=8,\n experiment_time=1,\n random_seed=17,\n )\n tree = bd_sim.simulate_tree()\n results = extract_tree_statistics(tree)\n for i in results[0]:\n self.assertTrue(np.isclose(i, 1))\n self.assertEqual(results[1], 3)\n self.assertTrue(results[2])\n\n def test_nonconstant_yule_with_predictable_fitness(self):\n \"\"\"Tests case with birth and death with constant fitness.\"\"\"\n\n def check_fitness_values_as_expected(tree: nx.DiGraph):\n \"\"\"Checks if the fitness value stored at each node is what we\n expect given deterministic fitness evolution\"\"\"\n tree = tree.copy()\n for u, v in tree.edges:\n tree[u][v][\"val\"] = 1\n tree.nodes[\"0\"][\"depth\"] = 0\n for u, v in nx.dfs_edges(tree, source=\"0\"):\n tree.nodes[v][\"depth\"] = (\n tree.nodes[u][\"depth\"] + tree[u][v][\"val\"]\n )\n leaves = [n for n in tree if tree.out_degree(n) == 0]\n for i in tree.nodes:\n if i in leaves:\n self.assertTrue(\n np.isclose(\n tree.nodes[i][\"birth_scale\"],\n 0.5 * 0.98 ** (2 * (tree.nodes[i][\"depth\"] - 1)),\n )\n )\n else:\n self.assertTrue(\n np.isclose(\n tree.nodes[i][\"birth_scale\"],\n 0.5 * 0.98 ** (2 * tree.nodes[i][\"depth\"]),\n )\n )\n\n birth_wd = lambda scale: np.random.exponential(scale)\n\n bd_sim = BirthDeathFitnessSimulator(\n birth_wd,\n 0.5,\n mutation_distribution=lambda: 2,\n fitness_distribution=lambda: 1,\n fitness_base=0.98,\n num_extant=8,\n random_seed=1234,\n )\n tree = bd_sim.simulate_tree()\n results = extract_tree_statistics(tree)\n self.assertTrue(all(np.isclose(x, results[0][0]) for x in results[0]))\n self.assertEqual(results[1], 8)\n self.assertTrue(results[2])\n check_fitness_values_as_expected(tree.get_tree_topology())\n\n bd_sim = BirthDeathFitnessSimulator(\n birth_wd,\n 0.5,\n mutation_distribution=lambda: 2,\n fitness_distribution=lambda: 1,\n fitness_base=0.98,\n experiment_time=0.6,\n random_seed=1234,\n )\n tree = bd_sim.simulate_tree()\n results = extract_tree_statistics(tree)\n for i in results[0]:\n self.assertTrue(np.isclose(i, 0.6))\n self.assertTrue(results[2])\n check_fitness_values_as_expected(tree.get_tree_topology())\n\n def test_nonconstant_birth_death_with_variable_fitness(self):\n \"\"\"Tests a case with variable birth and death waiting times, as well\n as variable fitness evolution. Also tests pruning and collapsing.\"\"\"\n\n birth_wd = lambda scale: np.random.exponential(scale)\n death_wd = lambda: np.random.exponential(0.6)\n mut_dist = lambda: 1 if np.random.uniform() < 0.2 else 0\n fit_dist = lambda: np.random.uniform(-1, 1)\n\n bd_sim = BirthDeathFitnessSimulator(\n birth_wd,\n 0.5,\n death_wd,\n mut_dist,\n fit_dist,\n 1.5,\n num_extant=8,\n random_seed=12364,\n )\n tree = bd_sim.simulate_tree()\n results = extract_tree_statistics(tree)\n self.assertTrue(all(np.isclose(x, results[0][0]) for x in results[0]))\n self.assertEqual(results[1], 8)\n self.assertTrue(results[2])\n self.assertNotIn(2, tree.nodes)\n self.assertNotIn(3, tree.nodes)\n\n bd_sim = BirthDeathFitnessSimulator(\n birth_wd,\n 0.5,\n death_wd,\n mut_dist,\n fit_dist,\n 1.5,\n experiment_time=3,\n random_seed=12364,\n )\n tree = bd_sim.simulate_tree()\n results = extract_tree_statistics(tree)\n for i in results[0]:\n self.assertTrue(np.isclose(i, 3))\n self.assertTrue(results[2])\n self.assertNotIn(2, tree.nodes)\n self.assertNotIn(3, tree.nodes)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n"
] | [
[
"numpy.random.uniform",
"numpy.isclose",
"numpy.random.exponential"
]
] |
ssccutyy/KWS-Transformer | [
"7ae6d2e8fce1a293d88eedc0dbfacae726151a08"
] | [
"kws_streaming/train/train.py"
] | [
"# coding=utf-8\n# Copyright (c) 2021, Arm Limited and Contributors.\n# SPDX-License-Identifier: Apache-2.0\n# Copyright 2021 The Google Research Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# \n# http://www.apache.org/licenses/LICENSE-2.0\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Train utility functions, based on tensorflow/examples/speech_commands.\n\n It consists of several steps:\n 1. Creates model.\n 2. Reads data\n 3. Trains model\n 4. Select the best model and evaluates it\n\"\"\"\n\nimport json\nfrom types import SimpleNamespace\nimport os.path\nimport pprint\nfrom absl import logging\nimport numpy as np\nimport tensorflow.compat.v1 as tf\nimport tensorflow_addons as tfa\nimport kws_streaming.data.input_data as input_data\nfrom kws_streaming.models import models\nfrom kws_streaming.models import utils\n\nimport math\n\nfrom transformers import AdamWeightDecay\n\n\nfrom kws_streaming.models import model_flags\n\n\ndef train(flags):\n \"\"\"Model training.\"\"\"\n\n flags.training = True\n\n # Set the verbosity based on flags (default is INFO, so we see all messages)\n logging.set_verbosity(flags.verbosity)\n\n # Start a new TensorFlow session.\n tf.reset_default_graph()\n config = tf.ConfigProto()\n config.gpu_options.allow_growth = True\n sess = tf.Session(config=config)\n tf.keras.backend.set_session(sess)\n\n audio_processor = input_data.AudioProcessor(flags)\n\n time_shift_samples = int((flags.time_shift_ms * flags.sample_rate) / 1000)\n\n # Figure out the learning rates for each training phase. Since it's often\n # effective to have high learning rates at the start of training, followed by\n # lower levels towards the end, the number of steps and learning rates can be\n # specified as comma-separated lists to define the rate at each stage. For\n # example --how_many_training_steps=10000,3000 --learning_rate=0.001,0.0001\n # will run 13,000 training loops in total, with a rate of 0.001 for the first\n # 10,000, and 0.0001 for the final 3,000.\n training_steps_list = list(map(int, flags.how_many_training_steps.split(',')))\n learning_rates_list = list(map(float, flags.learning_rate.split(',')))\n if len(training_steps_list) != len(learning_rates_list):\n raise Exception(\n '--how_many_training_steps and --learning_rate must be equal length '\n 'lists, but are %d and %d long instead' % (len(training_steps_list),\n len(learning_rates_list)))\n logging.info(flags)\n\n model = models.MODELS[flags.model_name](flags)\n if flags.distill_teacher_json:\n with open(flags.distill_teacher_json, 'r') as f:\n teacher_flags = json.load(f, object_hook=lambda d: SimpleNamespace(\n **{ k: v for k, v in flags.__dict__.items() if not k in d },\n **d))\n teacher_base = models.MODELS[teacher_flags.model_name](teacher_flags)\n hard_labels = tf.keras.layers.Lambda(lambda logits: tf.one_hot(tf.math.argmax(logits, axis=-1), depth=flags.label_count))\n teacher = tf.keras.models.Sequential([teacher_base, hard_labels])\n teacher_base.trainable = False\n teacher.trainable = False\n else:\n teacher = None\n teacher_flags = None\n\n base_model = model\n\n logging.info(model.summary())\n\n # save model summary\n utils.save_model_summary(model, flags.train_dir)\n\n # save model and data flags\n with open(os.path.join(flags.train_dir, 'flags.txt'), 'wt') as f:\n pprint.pprint(flags, stream=f)\n\n loss = tf.keras.losses.CategoricalCrossentropy(from_logits=True, label_smoothing=flags.label_smoothing)\n metrics = ['accuracy']\n\n if flags.optimizer == 'adam':\n optimizer = tf.keras.optimizers.Adam(epsilon=flags.optimizer_epsilon)\n elif flags.optimizer == 'momentum':\n optimizer = tf.keras.optimizers.SGD(momentum=0.9)\n elif flags.optimizer == 'novograd':\n optimizer = tfa.optimizers.NovoGrad(\n lr=0.05,\n beta_1=flags.novograd_beta_1,\n beta_2=flags.novograd_beta_2,\n weight_decay=flags.novograd_weight_decay,\n grad_averaging=bool(flags.novograd_grad_averaging))\n elif flags.optimizer == 'adamw':\n # Exclude some layers for weight decay\n exclude = [\"pos_emb\", \"class_emb\", \"layer_normalization\", \"bias\"]\n optimizer = AdamWeightDecay(learning_rate=0.05, weight_decay_rate=flags.l2_weight_decay, exclude_from_weight_decay=exclude)\n else:\n raise ValueError('Unsupported optimizer:%s' % flags.optimizer)\n\n loss_weights = [ 0.5, 0.5, 0.0 ] if teacher else [ 1. ] # equally weight losses form label and teacher, ignore ensemble output\n model.compile(optimizer=optimizer, loss=loss, loss_weights=loss_weights, metrics=metrics)\n\n train_writer = tf.summary.FileWriter(flags.summaries_dir + '/train',\n sess.graph)\n validation_writer = tf.summary.FileWriter(flags.summaries_dir + '/validation')\n\n sess.run(tf.global_variables_initializer())\n\n if flags.start_checkpoint:\n model.load_weights(flags.start_checkpoint).expect_partial()\n logging.info('Weights loaded from %s', flags.start_checkpoint)\n\n if teacher_flags and teacher_flags.start_checkpoint:\n # Load weights into teacher base as this is the actual model that was saved, teacher includes hard label head\n teacher_base.load_weights(teacher_flags.start_checkpoint).assert_existing_objects_matched()\n logging.info('Distillation teacher weights loaded from %s', teacher_flags.start_checkpoint)\n\n start_step = 0\n\n logging.info('Training from step: %d ', start_step)\n\n # Save graph.pbtxt.\n tf.train.write_graph(sess.graph_def, flags.train_dir, 'graph.pbtxt')\n\n # Save list of words.\n with tf.io.gfile.GFile(os.path.join(flags.train_dir, 'labels.txt'), 'w') as f:\n f.write('\\n'.join(audio_processor.words_list))\n\n best_accuracy = 0.0\n\n # prepare parameters for exp learning rate decay\n training_steps_max = np.sum(training_steps_list)\n lr_init = learning_rates_list[0]\n exp_rate = -np.log(learning_rates_list[-1] / lr_init)/training_steps_max\n mode = 'training'\n\n if flags.lr_schedule == 'cosine':\n # Currently, no restarts are performed, so it is just a cosine decay over the entire\n # training process. I think this is how DeiT does it.\n lr_init = lr_init * flags.batch_size / 512\n num_train = audio_processor.set_size(mode)\n warmup_steps = int((num_train / flags.batch_size) * flags.warmup_epochs)\n first_decay_steps=training_steps_max\n\n # Training loop.\n for training_step in range(start_step, training_steps_max + 1):\n if training_step > 0:\n offset = (training_step -\n 1) * flags.batch_size if flags.pick_deterministically else 0\n\n # Pull the audio samples we'll use for training.\n train_fingerprints, train_ground_truth = audio_processor.get_data(\n flags.batch_size, offset, flags, flags.background_frequency,\n flags.background_volume, time_shift_samples, mode,\n flags.resample, flags.volume_resample, sess)\n\n if flags.lr_schedule == 'exp':\n learning_rate_value = lr_init * np.exp(-exp_rate * training_step)\n elif flags.lr_schedule == 'linear':\n # Figure out what the current learning rate is.\n training_steps_sum = 0\n for i in range(len(training_steps_list)):\n training_steps_sum += training_steps_list[i]\n if training_step <= training_steps_sum:\n learning_rate_value = learning_rates_list[i]\n break\n elif flags.lr_schedule == 'cosine':\n learning_rate_value = lr_init * min(1, float(training_step) / max(1, warmup_steps)) * (math.cos(math.pi * training_step / training_steps_max) + 1) / 2.\n else:\n raise ValueError('Wrong lr_schedule: %s' % flags.lr_schedule)\n\n tf.keras.backend.set_value(model.optimizer.learning_rate, learning_rate_value)\n\n one_hot_labels = tf.keras.utils.to_categorical(train_ground_truth, num_classes=flags.label_count)\n\n if teacher:\n teacher_labels = teacher.predict_on_batch(train_fingerprints)\n one_hot_labels = [ one_hot_labels, teacher_labels, one_hot_labels ] # third is for the ensemble output, gradient is unused\n\n result = model.train_on_batch(train_fingerprints, one_hot_labels)\n\n if teacher:\n loss_total, loss_label, loss_teacher, loss_average, acc_label, acc_teacher, acc_ensemble = result\n differences = (teacher_labels != one_hot_labels).astype(dtype=int).sum()\n logging.info(\n 'Step #%d: rate %f, accuracy %.2f%%, cross entropy %f, teacher acc %.2f%% (%d diff), teacher cross entropy %f, ensemble acc %.2f%%',\n *(training_step, learning_rate_value, acc_label * 100, loss_total, acc_teacher * 100, differences, loss_teacher, acc_ensemble * 100))\n summary = tf.Summary(value=[\n tf.Summary.Value(tag='accuracy', simple_value=acc_label),\n tf.Summary.Value(tag='teacher_accuracy', simple_value=acc_teacher),\n tf.Summary.Value(tag='ensemble_accuracy', simple_value=acc_ensemble),\n ])\n else:\n loss_label, acc_label = result\n logging.info(\n 'Step #%d: rate %f, accuracy %.2f%%, cross entropy %f',\n *(training_step, learning_rate_value, acc_label * 100, loss_label))\n summary = tf.Summary(value=[\n tf.Summary.Value(tag='accuracy', simple_value=acc_label),\n ])\n\n train_writer.add_summary(summary, training_step)\n\n is_last_step = (training_step == training_steps_max)\n if (training_step % flags.eval_step_interval) == 0 or is_last_step:\n set_size = audio_processor.set_size('validation')\n set_size = int(set_size / flags.batch_size) * flags.batch_size\n total_accuracy = 0.0\n count = 0.0\n for i in range(0, set_size, flags.batch_size):\n validation_fingerprints, validation_ground_truth = audio_processor.get_data(\n flags.batch_size, i, flags, 0.0,\n 0.0, 0, 'validation',\n 0.0, 0.0, sess)\n\n one_hot_labels = tf.keras.utils.to_categorical(validation_ground_truth, num_classes=flags.label_count)\n if teacher:\n one_hot_labels = [ one_hot_labels, one_hot_labels, one_hot_labels ]\n # Run a validation step and capture training summaries for TensorBoard\n # with the `merged` op.\n result = model.test_on_batch(validation_fingerprints,\n one_hot_labels)\n\n if teacher:\n loss_total, loss_label, loss_teacher, loss_average, acc_label, acc_teacher, acc_ensemble = result\n summary = tf.Summary(value=[\n tf.Summary.Value(tag='accuracy', simple_value=acc_ensemble),\n tf.Summary.Value(tag='label_head_accuracy', simple_value=acc_label),\n tf.Summary.Value(tag='distill_head_accuracy', simple_value=acc_teacher),\n ])\n accuracy = acc_ensemble\n else:\n loss_label, acc_label = result\n summary = tf.Summary(value=[\n tf.Summary.Value(tag='accuracy', simple_value=acc_label),])\n accuracy = acc_label\n\n validation_writer.add_summary(summary, training_step)\n\n total_accuracy += accuracy\n count = count + 1.0\n\n total_accuracy = total_accuracy / count\n logging.info('Step %d: Validation accuracy = %.2f%% (N=%d)',\n *(training_step, total_accuracy * 100, set_size))\n\n # Save the model checkpoint when validation accuracy improves\n if total_accuracy >= best_accuracy:\n best_accuracy = total_accuracy\n # overwrite the best model weights\n model.save_weights(flags.train_dir + 'best_weights')\n logging.info('So far the best validation accuracy is %.2f%%',\n (best_accuracy * 100))\n\n tf.keras.backend.set_learning_phase(0)\n set_size = audio_processor.set_size('testing')\n set_size = int(set_size / flags.batch_size) * flags.batch_size\n logging.info('set_size=%d', set_size)\n total_accuracy = 0.0\n count = 0.0\n\n for i in range(0, set_size, flags.batch_size):\n test_fingerprints, test_ground_truth = audio_processor.get_data(\n flags.batch_size, i, flags, 0.0, 0.0, 0, 'testing', 0.0, 0.0, sess)\n\n one_hot_labels = tf.keras.utils.to_categorical(test_ground_truth, num_classes=flags.label_count)\n if teacher:\n one_hot_labels = [ one_hot_labels, one_hot_labels, one_hot_labels ]\n result = model.test_on_batch(test_fingerprints, one_hot_labels)\n\n total_accuracy += result[-1] if teacher else result[1]\n count = count + 1.0\n total_accuracy = total_accuracy / count\n\n logging.info('Final test accuracy = %.2f%% (N=%d)',\n *(total_accuracy * 100, set_size))\n with open(os.path.join(flags.train_dir, 'accuracy_last.txt'), 'wt') as fd:\n fd.write(str(total_accuracy * 100))\n model.save_weights(flags.train_dir + 'last_weights')\n\nif __name__ == '__main__':\n flags = model_flags.update_flags(None)\n train(flags)"
] | [
[
"numpy.sum",
"tensorflow.compat.v1.math.argmax",
"tensorflow.compat.v1.keras.backend.set_learning_phase",
"numpy.log",
"tensorflow.compat.v1.keras.utils.to_categorical",
"tensorflow.compat.v1.Summary.Value",
"tensorflow.compat.v1.keras.models.Sequential",
"tensorflow.compat.v1.keras.optimizers.Adam",
"tensorflow.compat.v1.train.write_graph",
"tensorflow.compat.v1.keras.backend.set_session",
"tensorflow.compat.v1.keras.optimizers.SGD",
"tensorflow.compat.v1.summary.FileWriter",
"tensorflow.compat.v1.keras.losses.CategoricalCrossentropy",
"tensorflow.compat.v1.Session",
"tensorflow.compat.v1.global_variables_initializer",
"tensorflow.compat.v1.reset_default_graph",
"tensorflow.compat.v1.ConfigProto",
"tensorflow.compat.v1.keras.backend.set_value",
"numpy.exp"
]
] |
paudetseis/OBStools | [
"c6c02d8864c25a14f22d1fae17ff5ad911b9ff00"
] | [
"obstools/scripts/atacr_clean_spectra.py"
] | [
"#!/usr/bin/env python\n\n# Copyright 2019 Pascal Audet & Helen Janiszewski\n#\n# This file is part of OBStools.\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\n\n# Import modules and functions\nimport numpy as np\nimport pickle\nimport stdb\nfrom obstools.atacr import StaNoise, Power, Cross, Rotation\nfrom obstools.atacr import utils, plotting\nfrom pathlib import Path\n\nfrom argparse import ArgumentParser\nfrom os.path import exists as exist\nfrom obspy import UTCDateTime\nfrom numpy import nan\n\n\ndef get_cleanspec_arguments(argv=None):\n \"\"\"\n Get Options from :class:`~optparse.OptionParser` objects.\n\n Calling options for the script `obs_clean_spectra.py` that accompany this\n package.\n\n \"\"\"\n\n parser = ArgumentParser(\n usage=\"%(prog)s [options] <indb>\",\n description=\"Script used \"\n \"to extract daily spectra calculated from \" +\n \"`obs_daily_spectra.py` and flag days for outlier \" +\n \"PSDs and calculate spectral averages of the \" +\n \"corresponding Fourier transforms over the entire \" +\n \"time period specified. The stations are processed \" +\n \"one by one and the data are stored to disk.\")\n parser.add_argument(\n \"indb\",\n help=\"Station Database to process from.\",\n type=str)\n\n # General Settings\n parser.add_argument(\n \"--keys\",\n action=\"store\",\n type=str,\n dest=\"stkeys\",\n default=\"\",\n help=\"Specify a comma separated list of station \" +\n \"keys for which to perform the analysis. These must \" +\n \"be contained within the station database. Partial \" +\n \"keys will be used to match against those in the \" +\n \"dictionary. For instance, providing IU will match \" +\n \"with all stations in the IU network. \" +\n \"[Default processes all stations in the database]\")\n parser.add_argument(\n \"-O\", \"--overwrite\",\n action=\"store_true\",\n dest=\"ovr\",\n default=False,\n help=\"Force the overwriting of pre-existing data. \" +\n \"[Default False]\")\n\n # Event Selection Criteria\n DaysGroup = parser.add_argument_group(\n title=\"Time Search Settings\",\n description=\"Time settings associated with \" +\n \"searching for day-long seismograms\")\n DaysGroup.add_argument(\n \"--start\",\n action=\"store\",\n type=str,\n dest=\"startT\",\n default=\"\",\n help=\"Specify a UTCDateTime compatible string \" +\n \"representing the start day for the data search. \" +\n \"This will override any station start times. \" +\n \"[Default start date of each station in database]\")\n DaysGroup.add_argument(\n \"--end\",\n action=\"store\",\n type=str,\n dest=\"endT\",\n default=\"\",\n help=\"Specify a UTCDateTime compatible string \" +\n \"representing the start time for the data search. \" +\n \"This will override any station end times. \" +\n \"[Default end date of each station in database]\")\n\n # Constants Settings\n ConstGroup = parser.add_argument_group(\n title='Parameter Settings',\n description=\"Miscellaneous default values \" +\n \"and settings\")\n ConstGroup.add_argument(\n \"--freq-band\",\n action=\"store\",\n type=str,\n dest=\"pd\",\n default=None,\n help=\"Specify comma-separated frequency limits \" +\n \"(float, in Hz) over which to calculate spectral \" +\n \"features used in flagging the days/windows. \" +\n \"[Default 0.004,2.0]\")\n ConstGroup.add_argument(\n \"--tolerance\",\n action=\"store\",\n type=float,\n dest=\"tol\",\n default=1.5,\n help=\"Specify parameter for tolerance threshold. \" +\n \"If spectrum > std*tol, window is flagged as bad. \" +\n \"[Default 1.5]\")\n ConstGroup.add_argument(\n \"--alpha\",\n action=\"store\",\n type=float,\n dest=\"alpha\",\n default=0.05,\n help=\"Confidence level for f-test, for iterative \" +\n \"flagging of windows. [Default 0.05, or 95 percent confidence]\")\n\n # Constants Settings\n FigureGroup = parser.add_argument_group(\n title='Figure Settings',\n description=\"Flags for plotting figures\")\n FigureGroup.add_argument(\n \"--figQC\",\n action=\"store_true\",\n dest=\"fig_QC\",\n default=False,\n help=\"Plot Quality-Control figure. \" +\n \"[Default does not plot figure]\")\n FigureGroup.add_argument(\n \"--debug\",\n action=\"store_true\",\n dest=\"debug\",\n default=False,\n help=\"Plot intermediate steps for debugging. \" +\n \"[Default does not plot figure]\")\n FigureGroup.add_argument(\n \"--figAverage\",\n action=\"store_true\",\n dest=\"fig_average\",\n default=False,\n help=\"Plot daily average figure. \" +\n \"[Default does not plot figure]\")\n FigureGroup.add_argument(\n \"--figCoh\",\n action=\"store_true\",\n dest=\"fig_coh_ph\",\n default=False,\n help=\"Plot Coherence and Phase figure. \" +\n \"[Default does not plot figure]\")\n FigureGroup.add_argument(\n \"--figCross\",\n action=\"store_true\",\n dest=\"fig_av_cross\",\n default=False,\n help=\"Plot cross-spectra figure. \" +\n \"[Default does not plot figure]\")\n FigureGroup.add_argument(\n \"--save-fig\",\n action=\"store_true\",\n dest=\"saveplot\",\n default=False,\n help=\"Set this option if you wish to save the figure(s). [Default \" +\n \"does not save figure]\")\n FigureGroup.add_argument(\n \"--format\",\n action=\"store\",\n type=str,\n dest=\"form\",\n default=\"png\",\n help=\"Specify format of figure. Can be any one of the valid\" +\n \"matplotlib formats: 'png', 'jpg', 'eps', 'pdf'. [Default 'png']\")\n\n args = parser.parse_args(argv)\n\n # Check inputs\n if not exist(args.indb):\n parser.error(\"Input file \" + args.indb + \" does not exist\")\n\n # create station key list\n if len(args.stkeys) > 0:\n args.stkeys = args.stkeys.split(',')\n\n # construct start time\n if len(args.startT) > 0:\n try:\n args.startT = UTCDateTime(args.startT)\n except Exception:\n parser.error(\n \"Error: Cannot construct UTCDateTime from start time: \" +\n args.startT)\n else:\n args.startT = None\n\n # construct end time\n if len(args.endT) > 0:\n try:\n args.endT = UTCDateTime(args.endT)\n except Exception:\n parser.error(\n \"Error: Cannot construct UTCDateTime from end time: \" +\n args.endT)\n else:\n args.endT = None\n\n if args.pd is None:\n args.pd = [0.004, 2.0]\n else:\n args.pd = [float(val) for val in args.pd.split(',')]\n args.pd = sorted(args.pd)\n if (len(args.pd)) != 2:\n raise(Exception(\n \"Error: --freq-band should contain 2 \" +\n \"comma-separated floats\"))\n\n return args\n\n\ndef main(args=None):\n\n if args is None:\n # Run Input Parser\n args = get_cleanspec_arguments()\n\n # Load Database\n # stdb>0.1.3\n try:\n db, stkeys = stdb.io.load_db(fname=args.indb, keys=args.stkeys)\n\n # stdb=0.1.3\n except Exception:\n db = stdb.io.load_db(fname=args.indb)\n\n # Construct station key loop\n allkeys = db.keys()\n sorted(allkeys)\n\n # Extract key subset\n if len(args.stkeys) > 0:\n stkeys = []\n for skey in args.stkeys:\n stkeys.extend([s for s in allkeys if skey in s])\n else:\n stkeys = db.keys()\n sorted(stkeys)\n\n # Loop over station keys\n for stkey in list(stkeys):\n\n # Extract station information from dictionary\n sta = db[stkey]\n\n # Path where spectra are located\n specpath = Path('SPECTRA') / stkey\n if not specpath.is_dir():\n raise(Exception(\n \"Path to \" + str(specpath) +\n \" doesn`t exist - aborting\"))\n\n # Path where average spectra will be saved\n avstpath = Path('AVG_STA') / stkey\n if not avstpath.is_dir():\n print(\"Path to \"+str(avstpath)+\" doesn`t exist - creating it\")\n avstpath.mkdir(parents=True)\n\n # Path where plots will be saved\n if args.saveplot:\n plotpath = avstpath / 'PLOTS'\n if not plotpath.is_dir():\n plotpath.mkdir(parents=True)\n else:\n plotpath = False\n\n # Get catalogue search start time\n if args.startT is None:\n tstart = sta.startdate\n else:\n tstart = args.startT\n\n # Get catalogue search end time\n if args.endT is None:\n tend = sta.enddate\n else:\n tend = args.endT\n\n if tstart > sta.enddate or tend < sta.startdate:\n continue\n\n # Temporary print locations\n tlocs = sta.location\n if len(tlocs) == 0:\n tlocs = ['']\n for il in range(0, len(tlocs)):\n if len(tlocs[il]) == 0:\n tlocs[il] = \"--\"\n sta.location = tlocs\n\n # Update Display\n print(\"\\n|===============================================|\")\n print(\"|===============================================|\")\n print(\"| {0:>8s} |\".format(\n sta.station))\n print(\"|===============================================|\")\n print(\"|===============================================|\")\n print(\"| Station: {0:>2s}.{1:5s} |\".format(\n sta.network, sta.station))\n print(\"| Channel: {0:2s}; Locations: {1:15s} |\".format(\n sta.channel, \",\".join(tlocs)))\n print(\"| Lon: {0:7.2f}; Lat: {1:6.2f} |\".format(\n sta.longitude, sta.latitude))\n print(\"| Start time: {0:19s} |\".format(\n sta.startdate.strftime(\"%Y-%m-%d %H:%M:%S\")))\n print(\"| End time: {0:19s} |\".format(\n sta.enddate.strftime(\"%Y-%m-%d %H:%M:%S\")))\n print(\"|-----------------------------------------------|\")\n\n # Filename for output average spectra\n dstart = str(tstart.year).zfill(4)+'.'+str(tstart.julday).zfill(3)+'-'\n dend = str(tend.year).zfill(4)+'.'+str(tend.julday).zfill(3)+'.'\n fileavst = avstpath / (dstart+dend+'avg_sta.pkl')\n\n if fileavst.exists():\n if not args.ovr:\n print(\"* -> file \"+str(fileavst)+\" exists - continuing\")\n continue\n\n # Containers for power and cross spectra\n coh_all = []\n ph_all = []\n coh_12_all = []\n coh_1Z_all = []\n coh_1P_all = []\n coh_2Z_all = []\n coh_2P_all = []\n coh_ZP_all = []\n ph_12_all = []\n ph_1Z_all = []\n ph_1P_all = []\n ph_2Z_all = []\n ph_2P_all = []\n ph_ZP_all = []\n ad_12_all = []\n ad_1Z_all = []\n ad_1P_all = []\n ad_2Z_all = []\n ad_2P_all = []\n ad_ZP_all = []\n nwins = []\n\n t1 = tstart\n\n # Initialize StaNoise object\n stanoise = StaNoise()\n\n # Loop through each day withing time range\n while t1 < tend:\n\n year = str(t1.year).zfill(4)\n jday = str(t1.julday).zfill(3)\n\n tstamp = year+'.'+jday+'.'\n filespec = specpath / (tstamp + 'spectra.pkl')\n\n # Load file if it exists\n if filespec.exists():\n print(\"\\n\"+\"*\"*60)\n print('* Calculating noise spectra for key ' +\n stkey+' and day '+year+'.'+jday)\n print(\"* -> file \"+str(filespec)+\" found - loading\")\n file = open(filespec, 'rb')\n daynoise = pickle.load(file)\n file.close()\n stanoise += daynoise\n else:\n t1 += 3600.*24.\n continue\n\n coh_all.append(daynoise.rotation.coh)\n ph_all.append(daynoise.rotation.ph)\n\n # Coherence\n coh_12_all.append(\n utils.smooth(\n utils.coherence(\n daynoise.cross.c12,\n daynoise.power.c11,\n daynoise.power.c22), 50))\n coh_1Z_all.append(\n utils.smooth(\n utils.coherence(\n daynoise.cross.c1Z,\n daynoise.power.c11,\n daynoise.power.cZZ), 50))\n coh_1P_all.append(\n utils.smooth(\n utils.coherence(\n daynoise.cross.c1P,\n daynoise.power.c11,\n daynoise.power.cPP), 50))\n coh_2Z_all.append(\n utils.smooth(\n utils.coherence(\n daynoise.cross.c2Z,\n daynoise.power.c22,\n daynoise.power.cZZ), 50))\n coh_2P_all.append(\n utils.smooth(\n utils.coherence(\n daynoise.cross.c2P,\n daynoise.power.c22,\n daynoise.power.cPP), 50))\n coh_ZP_all.append(\n utils.smooth(\n utils.coherence(\n daynoise.cross.cZP,\n daynoise.power.cZZ,\n daynoise.power.cPP), 50))\n\n # Phase\n try:\n ph_12_all.append(\n 180./np.pi*utils.phase(daynoise.cross.c12))\n except Exception:\n ph_12_all.append(None)\n try:\n ph_1Z_all.append(\n 180./np.pi*utils.phase(daynoise.cross.c1Z))\n except Exception:\n ph_1Z_all.append(None)\n try:\n ph_1P_all.append(\n 180./np.pi*utils.phase(daynoise.cross.c1P))\n except Exception:\n ph_1P_all.append(None)\n try:\n ph_2Z_all.append(\n 180./np.pi*utils.phase(daynoise.cross.c2Z))\n except Exception:\n ph_2Z_all.append(None)\n try:\n ph_2P_all.append(\n 180./np.pi*utils.phase(daynoise.cross.c2P))\n except Exception:\n ph_2P_all.append(None)\n try:\n ph_ZP_all.append(\n 180./np.pi*utils.phase(daynoise.cross.cZP))\n except Exception:\n ph_ZP_all.append(None)\n\n # Admittance\n ad_12_all.append(utils.smooth(utils.admittance(\n daynoise.cross.c12, daynoise.power.c11), 50))\n ad_1Z_all.append(utils.smooth(utils.admittance(\n daynoise.cross.c1Z, daynoise.power.c11), 50))\n ad_1P_all.append(utils.smooth(utils.admittance(\n daynoise.cross.c1P, daynoise.power.c11), 50))\n ad_2Z_all.append(utils.smooth(utils.admittance(\n daynoise.cross.c2Z, daynoise.power.c22), 50))\n ad_2P_all.append(utils.smooth(utils.admittance(\n daynoise.cross.c2P, daynoise.power.c22), 50))\n ad_ZP_all.append(utils.smooth(utils.admittance(\n daynoise.cross.cZP, daynoise.power.cZZ), 50))\n\n t1 += 3600.*24.\n\n # Convert to numpy arrays\n coh_all = np.array(coh_all)\n ph_all = np.array(ph_all)\n coh_12_all = np.array(coh_12_all)\n coh_1Z_all = np.array(coh_1Z_all)\n coh_1P_all = np.array(coh_1P_all)\n coh_2Z_all = np.array(coh_2Z_all)\n coh_2P_all = np.array(coh_2P_all)\n coh_ZP_all = np.array(coh_ZP_all)\n ph_12_all = np.array(ph_12_all)\n ph_1Z_all = np.array(ph_1Z_all)\n ph_1P_all = np.array(ph_1P_all)\n ph_2Z_all = np.array(ph_2Z_all)\n ph_2P_all = np.array(ph_2P_all)\n ph_ZP_all = np.array(ph_ZP_all)\n ad_12_all = np.array(ad_12_all)\n ad_1Z_all = np.array(ad_1Z_all)\n ad_1P_all = np.array(ad_1P_all)\n ad_2Z_all = np.array(ad_2Z_all)\n ad_2P_all = np.array(ad_2P_all)\n ad_ZP_all = np.array(ad_ZP_all)\n\n # Store transfer functions as objects for plotting\n coh = Cross(coh_12_all, coh_1Z_all, coh_1P_all,\n coh_2Z_all, coh_2P_all, coh_ZP_all)\n ph = Cross(ph_12_all, ph_1Z_all, ph_1P_all,\n ph_2Z_all, ph_2P_all, ph_ZP_all)\n ad = Cross(ad_12_all, ad_1Z_all, ad_1P_all,\n ad_2Z_all, ad_2P_all, ad_ZP_all)\n\n # Quality control to identify outliers\n stanoise.QC_sta_spectra(pd=args.pd, tol=args.tol, alpha=args.alpha,\n fig_QC=args.fig_QC, debug=args.debug,\n save=plotpath, form=args.form)\n\n # Average spectra for good days\n stanoise.average_sta_spectra(\n fig_average=args.fig_average,\n save=plotpath, form=args.form)\n\n if args.fig_av_cross:\n fname = stkey + '.' + 'av_coherence'\n plot = plotting.fig_av_cross(\n stanoise.f, coh, stanoise.gooddays,\n 'Coherence', stanoise.ncomp, key=stkey, lw=0.5)\n # if plotpath.is_dir():\n if plotpath:\n plot.savefig(\n str(plotpath / (fname + '.' + args.form)),\n dpi=300, bbox_inches='tight', format=args.form)\n else:\n plot.show()\n\n fname = stkey + '.' + 'av_admittance'\n plot = plotting.fig_av_cross(\n stanoise.f, ad, stanoise.gooddays,\n 'Admittance', stanoise.ncomp, key=stkey, lw=0.5)\n\n if plotpath:\n plot.savefig(\n str(plotpath / (fname + '.' + args.form)),\n dpi=300, bbox_inches='tight', format=args.form)\n else:\n plot.show()\n\n fname = stkey + '.' + 'av_phase'\n plot = plotting.fig_av_cross(\n stanoise.f, ph, stanoise.gooddays,\n 'Phase', stanoise.ncomp, key=stkey, marker=',', lw=0)\n\n if plotpath:\n plot.savefig(\n str(plotpath / (fname + '.' + args.form)),\n dpi=300, bbox_inches='tight', format=args.form)\n else:\n plot.show()\n\n if args.fig_coh_ph and stanoise.direc is not None:\n fname = stkey + '.' + 'coh_ph'\n plot = plotting.fig_coh_ph(coh_all, ph_all, stanoise.direc)\n if plotpath:\n plot.savefig(\n str(plotpath / (fname + '.' + args.form)),\n dpi=300, bbox_inches='tight', format=args.form)\n else:\n plot.show()\n\n # Save to file\n stanoise.save(fileavst)\n\n\nif __name__ == \"__main__\":\n\n # Run main program\n main()\n"
] | [
[
"numpy.array"
]
] |
tempoCollaboration/OQuPy | [
"a389a161991a59259e5df47d8e0f405fcac75fe5"
] | [
"oqupy/backends/tempo_backend.py"
] | [
"# Copyright 2020 The TEMPO Collaboration\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\nModule for tempo and mean-field tempo backend.\n\"\"\"\n\nfrom typing import Callable, Dict, Optional, Tuple\nfrom copy import copy\n\nfrom numpy import ndarray, moveaxis, dot\n\nfrom oqupy import operators\nfrom oqupy.config import TEMPO_BACKEND_CONFIG\nfrom oqupy.backends import node_array as na\nfrom oqupy.util import create_delta\n\nclass BaseTempoBackend:\n \"\"\"\n Backend class for TEMPO.\n\n Parameters\n ----------\n initial_state: ndarray\n The initial density matrix (as a vector).\n influence: callable(int) -> ndarray\n Callable that takes an integer `step` and returns the influence super\n operator of that `step`.\n unitary_transform: ndarray\n Unitary that transforms the coupling operator into a diagonal form.\n sum_north: ndarray\n The summing vector for the north legs.\n sum_west: ndarray\n The summing vector for the west legs.\n dkmax: int\n Number of influences to include. If ``dkmax == None`` then all\n influences are included.\n epsrel: float\n Maximal relative SVD truncation error.\n \"\"\"\n def __init__(\n self,\n initial_state: ndarray,\n influence: Callable[[int], ndarray],\n unitary_transform: ndarray,\n sum_north: ndarray,\n sum_west: ndarray,\n dkmax: int,\n epsrel: float,\n config: Optional[Dict] = None):\n \"\"\"Create a TempoBackend object. \"\"\"\n self._initial_state = initial_state\n self._influence = influence\n self._unitary_transform = unitary_transform\n self._sum_north = sum_north\n self._sum_west = sum_west\n self._dkmax = dkmax\n self._epsrel = epsrel\n self._step = None\n self._state = None\n self._config = TEMPO_BACKEND_CONFIG if config is None else config\n self._mps = None\n self._mpo = None\n self._super_u = None\n self._super_u_dagg = None\n self._sum_north_na = None\n\n @property\n def step(self) -> int:\n \"\"\"The current step in the TEMPO computation. \"\"\"\n return self._step\n\n def _initialize_mps_mpo(self) :\n \"\"\"ToDo\"\"\"\n self._initial_state = copy(self._initial_state).reshape(-1)\n\n self._super_u = operators.left_right_super(\n self._unitary_transform,\n self._unitary_transform.conjugate().T)\n self._super_u_dagg = operators.left_right_super(\n self._unitary_transform.conjugate().T,\n self._unitary_transform)\n\n self._sum_north_na = na.NodeArray([self._sum_north],\n left=False,\n right=False,\n name=\"Sum north\")\n influences = []\n if self._dkmax is None:\n dkmax_pre_compute = 1\n else:\n dkmax_pre_compute = self._dkmax + 1\n\n for i in range(dkmax_pre_compute):\n infl = self._influence(i)\n infl_four_legs = create_delta(infl, [1, 0, 0, 1])\n if i == 0:\n tmp = dot(moveaxis(infl_four_legs, 1, -1),\n self._super_u_dagg)\n tmp = moveaxis(tmp, -1, 1)\n tmp = dot(tmp, self._super_u.T)\n infl_four_legs = tmp\n influences.append(infl_four_legs)\n\n self._mps = na.NodeArray([self._initial_state],\n left=False,\n right=False,\n name=\"Thee MPS\")\n self._mpo = na.NodeArray(list(reversed(influences)),\n left=True,\n right=True,\n name=\"Thee Time Evolving MPO\")\n\n\n def _compute_system_step(self, current_step, prop_1, prop_2) -> ndarray:\n \"\"\"\n Takes a step in the TEMPO tensor network computation.\n\n For example, for at step 4, we start with:\n\n A ... self._mps\n B ... self._mpo\n w ... self._sum_west\n n ... self._sum_north_array\n p1 ... prop_1\n p2 ... prop_2\n\n n n n n\n | | | |\n\n | | | | |\n w~~ ~~B~~B~~B~~B~~ ~~p2\n | | | |\n p1\n | | | |\n A~~A~~A~~A\n\n return:\n step = 4\n state = contraction of A,B,w,n,p1\n\n effects:\n self._mpo will grow to the left with the next influence functional\n self._mps will be contraction of A,B,w,p1,p2\n\n Returns\n -------\n step: int\n The current step count.\n state: ndarray\n Density matrix at the current step.\n\n \"\"\"\n prop_1_na = na.NodeArray([prop_1.T],\n left=False,\n right=False,\n name=\"first half-step\")\n prop_2_na = na.NodeArray([prop_2.T],\n left=True,\n right=False,\n name=\"second half-step\")\n\n if self._dkmax is None:\n mpo = self._mpo.copy()\n infl = self._influence(len(mpo))\n infl_four_legs = create_delta(infl, [1, 0, 0, 1])\n infl_na = na.NodeArray([infl_four_legs],\n left=True,\n right=True)\n self._mpo = na.join(infl_na,\n self._mpo,\n name=\"The Time Evolving MPO\",\n copy=False)\n elif current_step <= self._dkmax:\n _, mpo = na.split(self._mpo,\n int(0 - current_step),\n copy=True)\n else: # current_step > self._dkmax\n mpo = self._mpo.copy()\n infl = self._influence(self._dkmax-current_step)\n if infl is not None:\n infl_four_legs = create_delta(infl, [1, 0, 0, 1])\n infl_na = na.NodeArray([infl_four_legs],\n left=True,\n right=True)\n _, mpo = na.split(self._mpo,\n index=1,\n copy=True)\n mpo = na.join(infl_na,\n mpo,\n name=\"Thee Time Evolving MPO\",\n copy=False)\n\n mpo.name = \"temporary MPO\"\n mpo.apply_vector(self._sum_west, left=True)\n\n self._mps.zip_up(prop_1_na,\n axes=[(0,0)],\n left_index=-1,\n right_index=-1,\n direction=\"left\",\n max_singular_values=None,\n max_truncation_err=self._epsrel,\n relative=True,\n copy=False)\n\n if len(self._mps) != len(mpo):\n self._mps.contract(self._sum_north_na,\n axes=[(0,0)],\n left_index=0,\n right_index=0,\n direction=\"right\",\n copy=True)\n\n self._mps.zip_up(mpo,\n axes=[(0, 0)],\n left_index=0,\n right_index=-1,\n direction=\"right\",\n max_singular_values=None,\n max_truncation_err=self._epsrel,\n relative=True,\n copy=False)\n\n self._mps.svd_sweep(from_index=-1,\n to_index=0,\n max_singular_values=None,\n max_truncation_err=self._epsrel,\n relative=True)\n\n self._mps = na.join(self._mps,\n prop_2_na,\n copy=False,\n name=f\"The MPS ({current_step})\")\n\n tmp_mps = self._mps.copy()\n for _ in range(len(tmp_mps)-1):\n tmp_mps.contract(self._sum_north_na,\n axes=[(0,0)],\n left_index=0,\n right_index=0,\n direction=\"right\",\n copy=True)\n\n assert len(tmp_mps) == 1\n assert not tmp_mps.left\n assert not tmp_mps.right\n assert tmp_mps.rank == 1\n state = tmp_mps.nodes[0].get_tensor()\n\n return state\n\nclass TempoBackend(BaseTempoBackend):\n \"\"\"\n ToDo\n \"\"\"\n def __init__(\n self,\n initial_state: ndarray,\n influence: Callable[[int], ndarray],\n unitary_transform: ndarray,\n propagators: Callable[[int], Tuple[ndarray, ndarray]],\n sum_north: ndarray,\n sum_west: ndarray,\n dkmax: int,\n epsrel: float,\n config: Optional[Dict] = None):\n \"\"\"Create a TempoBackend object. \"\"\"\n super().__init__(\n initial_state,\n influence,\n unitary_transform,\n sum_north,\n sum_west,\n dkmax,\n epsrel,\n config)\n self._propagators = propagators\n\n def initialize(self)-> Tuple[int, ndarray]:\n \"\"\"\n ToDo\n \"\"\"\n self._step = 0\n self._initialize_mps_mpo()\n self._state = self._initial_state\n return self._step, copy(self._state)\n\n def compute_step(self) -> Tuple[int, ndarray]:\n \"\"\"\n ToDo\n \"\"\"\n self._step += 1\n prop_1, prop_2 = self._propagators(self._step-1)\n self._state = self._compute_system_step(self._step, prop_1, prop_2)\n return self._step, copy(self._state)\n\n\nclass TempoWithFieldBackend(BaseTempoBackend):\n \"\"\"\n backend for tensor network tempo with coherent field evolution.\n Note the only difference from TensorNetworkTempoBackend in the\n signature is the addition of the initial_field and compute_field\n parameters, and the change of the propagator signature.\n\n Parameters\n ----------\n initial_state: ndarray\n The initial density matrix (as a vector).\n initial_field: complex\n The initial field value.\n influence: callable(int) -> ndarray\n Callable that takes an integer `step` and returns the influence super\n operator of that `step`.\n unitary_transform: ndarray\n Unitary that transforms the coupling operator into a diagonal form.\n propagators: callable(int, ndarray, complex) -> ndarray, ndarray\n Callable that takes an integer `step`, an ndarray `state` and a complex\n `field` and returns the first and second half of the system propagator\n of that `step`.\n compute_field: callable(int, ndarray, complex, ndarray) -> complex\n Callable that takes an integer `step`, a complex `field` (the current\n value of the field) and two ndarrays for (respectively) the current and\n next density matrix as vectors, and returns the next field value.\n sum_north: ndarray\n The summing vector for the north legs.\n sum_west: ndarray\n The summing vector for the west legs.\n dkmax: int\n Number of influences to include. If ``dkmax == -1`` then all influences\n are included.\n epsrel: float\n Maximal relative SVD truncation error.\n \"\"\"\n def __init__(\n self,\n initial_state: ndarray,\n initial_field: ndarray,\n influence: Callable[[int], ndarray],\n unitary_transform: ndarray,\n propagators: Callable[[int, ndarray, complex],\n Tuple[ndarray, ndarray]],\n compute_field: Callable[[float, ndarray, complex], complex],\n sum_north: ndarray,\n sum_west: ndarray,\n dkmax: int,\n epsrel: float,\n config: Dict):\n # Field specific variables\n self._initial_field = initial_field\n self._compute_field = compute_field\n self._field = initial_field\n self._propagators = propagators\n \"\"\"Create a TempoWithFieldBackend object. \"\"\"\n super().__init__(initial_state,\n influence,\n unitary_transform,\n sum_north,\n sum_west,\n dkmax,\n epsrel,\n config)\n\n def initialize(self) -> Tuple[int, ndarray, complex]:\n \"\"\"See BaseBackend.initialize() for main docstring.\"\"\"\n self._step = 0\n self._initialize_mps_mpo()\n self._state = self._initial_state\n self._field = self._initial_field\n return self._step, copy(self._state), self._field\n\n def compute_step(self) -> Tuple[int, ndarray, complex]:\n \"\"\"\n ToDo\n \"\"\"\n current_step = self._step\n next_step = current_step + 1\n current_state = copy(self._state)\n current_field = self._field\n prop_1, prop_2 = self._propagators(current_step, current_state,\n current_field)\n next_state = self._compute_system_step(next_step, prop_1, prop_2)\n next_field = self._compute_field(current_step, current_state,\n current_field, next_state)\n self._state = next_state\n self._field = next_field\n self._step = next_step\n\n return self._step, copy(self._state), self._field\n"
] | [
[
"numpy.dot",
"numpy.moveaxis"
]
] |
j-chan-hkust/deep_testing_of_advanced_learning_systems | [
"ec535e2b4dc489d407b664a138d3f5262b71d21e"
] | [
"2_data_collection/CIFAR_10/vgg16_CIFAR10.py"
] | [
"from __future__ import print_function\nimport keras\nfrom keras.datasets import cifar10\nfrom keras.preprocessing.image import ImageDataGenerator\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Dropout, Activation, Flatten\nfrom keras.layers import Conv2D, MaxPooling2D, BatchNormalization\nfrom keras import optimizers\nimport numpy as np\nfrom keras.layers.core import Lambda\nfrom keras import backend as K\nfrom keras import regularizers\n\nclass cifar10vgg:\n def __init__(self,train=True):\n self.num_classes = 10\n self.weight_decay = 0.0005\n self.x_shape = [32,32,3]\n\n self.model = self.build_model()\n if train:\n self.model = self.train(self.model)\n else:\n self.model.load_weights('cifar10vgg.h5')\n\n\n def build_model(self):\n # Build the network of vgg for 10 classes with massive dropout and weight decay as described in the paper.\n\n model = Sequential()\n weight_decay = self.weight_decay\n\n model.add(Conv2D(64, (3, 3), padding='same',\n input_shape=self.x_shape,kernel_regularizer=regularizers.l2(weight_decay)))\n model.add(Activation('relu'))\n model.add(BatchNormalization())\n model.add(Dropout(0.3))\n\n model.add(Conv2D(64, (3, 3), padding='same',kernel_regularizer=regularizers.l2(weight_decay)))\n model.add(Activation('relu'))\n model.add(BatchNormalization())\n\n model.add(MaxPooling2D(pool_size=(2, 2)))\n\n model.add(Conv2D(128, (3, 3), padding='same',kernel_regularizer=regularizers.l2(weight_decay)))\n model.add(Activation('relu'))\n model.add(BatchNormalization())\n model.add(Dropout(0.4))\n\n model.add(Conv2D(128, (3, 3), padding='same',kernel_regularizer=regularizers.l2(weight_decay)))\n model.add(Activation('relu'))\n model.add(BatchNormalization())\n\n model.add(MaxPooling2D(pool_size=(2, 2)))\n\n model.add(Conv2D(256, (3, 3), padding='same',kernel_regularizer=regularizers.l2(weight_decay)))\n model.add(Activation('relu'))\n model.add(BatchNormalization())\n model.add(Dropout(0.4))\n\n model.add(Conv2D(256, (3, 3), padding='same',kernel_regularizer=regularizers.l2(weight_decay)))\n model.add(Activation('relu'))\n model.add(BatchNormalization())\n model.add(Dropout(0.4))\n\n model.add(Conv2D(256, (3, 3), padding='same',kernel_regularizer=regularizers.l2(weight_decay)))\n model.add(Activation('relu'))\n model.add(BatchNormalization())\n\n model.add(MaxPooling2D(pool_size=(2, 2)))\n\n\n model.add(Conv2D(512, (3, 3), padding='same',kernel_regularizer=regularizers.l2(weight_decay)))\n model.add(Activation('relu'))\n model.add(BatchNormalization())\n model.add(Dropout(0.4))\n\n model.add(Conv2D(512, (3, 3), padding='same',kernel_regularizer=regularizers.l2(weight_decay)))\n model.add(Activation('relu'))\n model.add(BatchNormalization())\n model.add(Dropout(0.4))\n\n model.add(Conv2D(512, (3, 3), padding='same',kernel_regularizer=regularizers.l2(weight_decay)))\n model.add(Activation('relu'))\n model.add(BatchNormalization())\n\n model.add(MaxPooling2D(pool_size=(2, 2)))\n\n\n model.add(Conv2D(512, (3, 3), padding='same',kernel_regularizer=regularizers.l2(weight_decay)))\n model.add(Activation('relu'))\n model.add(BatchNormalization())\n model.add(Dropout(0.4))\n\n model.add(Conv2D(512, (3, 3), padding='same',kernel_regularizer=regularizers.l2(weight_decay)))\n model.add(Activation('relu'))\n model.add(BatchNormalization())\n model.add(Dropout(0.4))\n\n model.add(Conv2D(512, (3, 3), padding='same',kernel_regularizer=regularizers.l2(weight_decay)))\n model.add(Activation('relu'))\n model.add(BatchNormalization())\n\n model.add(MaxPooling2D(pool_size=(2, 2)))\n model.add(Dropout(0.5))\n\n model.add(Flatten())\n model.add(Dense(512,kernel_regularizer=regularizers.l2(weight_decay)))\n model.add(Activation('relu'))\n model.add(BatchNormalization())\n\n model.add(Dropout(0.5))\n model.add(Dense(self.num_classes))\n model.add(Activation('softmax'))\n return model\n\n\n def normalize(self,X_train,X_test):\n #this function normalize inputs for zero mean and unit variance\n # it is used when training a model.\n # Input: training set and test set\n # Output: normalized training set and test set according to the trianing set statistics.\n mean = np.mean(X_train,axis=(0,1,2,3))\n std = np.std(X_train, axis=(0, 1, 2, 3))\n X_train = (X_train-mean)/(std+1e-7)\n X_test = (X_test-mean)/(std+1e-7)\n return X_train, X_test\n\n def normalize_production(self,x):\n #this function is used to normalize instances in production according to saved training set statistics\n # Input: X - a training set\n # Output X - a normalized training set according to normalization constants.\n\n #these values produced during first training and are general for the standard cifar10 training set normalization\n mean = 120.707\n std = 64.15\n return (x-mean)/(std+1e-7)\n\n def predict(self,x,normalize=True,batch_size=50):\n if normalize:\n x = self.normalize_production(x)\n return self.model.predict(x,batch_size)\n\n def train(self,model):\n\n model.load_weights(\"cifar10vgg.h5\")\n #training parameters\n batch_size = 128\n maxepoches = 250\n learning_rate = 0.01\n lr_decay = 1e-6\n lr_drop = 20\n # The data, shuffled and split between train and test sets:\n (x_train, y_train), (x_test, y_test) = cifar10.load_data()\n x_train = x_train.astype('float32')\n x_test = x_test.astype('float32')\n x_train, x_test = self.normalize(x_train, x_test)\n\n y_train = keras.utils.to_categorical(y_train, self.num_classes)\n y_test = keras.utils.to_categorical(y_test, self.num_classes)\n\n def lr_scheduler(epoch):\n return learning_rate * (0.5 ** (epoch // lr_drop))\n reduce_lr = keras.callbacks.LearningRateScheduler(lr_scheduler)\n\n #data augmentation\n datagen = ImageDataGenerator(\n featurewise_center=False, # set input mean to 0 over the dataset\n samplewise_center=False, # set each sample mean to 0\n featurewise_std_normalization=False, # divide inputs by std of the dataset\n samplewise_std_normalization=False, # divide each input by its std\n zca_whitening=False, # apply ZCA whitening\n rotation_range=15, # randomly rotate images in the range (degrees, 0 to 180)\n width_shift_range=0.1, # randomly shift images horizontally (fraction of total width)\n height_shift_range=0.1, # randomly shift images vertically (fraction of total height)\n horizontal_flip=True, # randomly flip images\n vertical_flip=False) # randomly flip images\n # (std, mean, and principal components if ZCA whitening is applied).\n datagen.fit(x_train)\n\n\n\n #optimization details\n sgd = optimizers.SGD(lr=learning_rate, decay=lr_decay, momentum=0.9, nesterov=True)\n model.compile(loss='categorical_crossentropy', optimizer=sgd,metrics=['accuracy'])\n\n\n # training process in a for loop with learning rate drop every 25 epoches.\n\n historytemp = model.fit_generator(datagen.flow(x_train, y_train,\n batch_size=batch_size),\n steps_per_epoch=x_train.shape[0] // batch_size,\n epochs=maxepoches,\n validation_data=(x_test, y_test),callbacks=[reduce_lr],verbose=2)\n model.save_weights('cifar10vgg.h5')\n return model\n\nif __name__ == '__main__':\n\n\n (x_train, y_train), (x_test, y_test) = cifar10.load_data()\n x_train = x_train.astype('float32')\n x_test = x_test.astype('float32')\n\n y_train = keras.utils.to_categorical(y_train, 10)\n y_test = keras.utils.to_categorical(y_test, 10)\n\n model = cifar10vgg()\n\n predicted_x = model.predict(x_test)\n residuals = np.argmax(predicted_x,1)!=np.argmax(y_test,1)\n\n loss = sum(residuals)/len(residuals)\n print(\"the validation 0/1 loss is: \",loss)\n"
] | [
[
"numpy.std",
"numpy.argmax",
"numpy.mean"
]
] |
kul-group/MAZE-sim | [
"0f85e74bf93f9242a73bcfaa20a593ae966f38fa"
] | [
"scraps/forcefield_v2.py"
] | [
"from maze.extra_framework_maker import ExtraFrameworkMaker, ExtraFrameworkAnalyzer\nfrom maze.io_zeolite import read_vasp\nfrom maze.zeolite import PerfectZeolite, Zeolite\nfrom ase.neighborlist import natural_cutoffs, NeighborList\nimport os\nfrom pathlib import Path\nfrom ase.io import write, read, gromacs, proteindatabank\nfrom ase.visualize import view\nimport copy\nimport shutil\nfrom glob import glob\nfrom ase.constraints import FixAtoms\nfrom simtk.openmm.app import *\nfrom simtk.openmm import *\nfrom simtk.unit import *\nfrom sys import stdout\nfrom ase.geometry.analysis import Analysis\nimport numpy as np\nfrom itertools import permutations\nfrom lxml import etree\nfrom contextlib import closing\nfrom collections import OrderedDict\nfrom scipy.optimize import least_squares, minimize\nimport matplotlib.pyplot as plt\nfrom statistics import mode\nimport pickle\nimport time\nfrom ase.data import atomic_masses, atomic_numbers\n\n\ndef get_EF_atom_indices(atoms):\n \"\"\"\n for index tracking, to ensure we are comparing the DFT and FF forces on the same EF atoms after before and after\n scooping out the smaller cluster.\n alse used for recentering the cluster based on the EF-O atom\n \"\"\"\n TM_list = ['Pt', 'Cu', 'Co', 'Pd', 'Fe', 'Cr', 'Rh', 'Ru']\n index_EF_TM = [a.index for a in atoms if a.symbol in TM_list]\n index_Al = [a.index for a in atoms if a.symbol == 'Al']\n nl = NeighborList(natural_cutoffs(atoms), bothways=True, self_interaction=False)\n nl.update(atoms)\n Al_neigh_list = np.concatenate((nl.get_neighbors(index_Al[0])[0], nl.get_neighbors(index_Al[1])[0]))\n Al_neigh_list = [x for x in Al_neigh_list if atoms[x].symbol == 'O']\n\n TM_neigh_list = np.concatenate((nl.get_neighbors(index_EF_TM[0])[0], nl.get_neighbors(index_EF_TM[1])[0]))\n centering_o = [[x for x in TM_neigh_list if list(TM_neigh_list).count(x) > 1 and x not in Al_neigh_list][0]]\n return index_EF_TM + centering_o\n\n\ndef get_capped_cluster(atoms, folder_path, file_name, save_traj, EF_O_index):\n \"\"\" #TODO: check whether capping is necessary\n Inconsistent capping (remove all caps for now, does not need this cluster to be physical)\n Possible fix: change mult in neighbor list\n\n Extract smaller cluster containing the extra-framework atoms and cap all the O. Then the capped cluster is moved\n to the center of the cell to avoid boundary issue.\n Save cluster in both .traj file and .pdb format.\n :param atoms:\n :param folder_path:\n :param file_name:\n :param save_traj: if True, save clusters into .traj as well, for later comparison and trouble shooting\n :param EF_O_index: if not none, will use this value, else, will find the index using Extraframework code\n :return: 1. EF-cluster including 13 atoms, index of the EF atoms in original zeolite, index of the EF atoms in\n the current cluster (the later two output index lists share the ordering)\n \"\"\"\n EFMaker = ExtraFrameworkAnalyzer(atoms)\n cluster = atoms[[index for index in EFMaker.get_extraframework_cluster(EF_O_index)]]\n\n cluster_EF_index = get_EF_atom_indices(cluster)\n centering_pos = cluster.get_positions()[cluster_EF_index[-1]]\n recentered_cluster = EFMaker.recentering_atoms(cluster, centering_pos)[0]\n # FIXME: recentering doesn't work well for very small unit cells. eg. SOD\n # cluster = Zeolite(cluster).cap_atoms()\n\n proteindatabank.write_proteindatabank(folder_path + '/%s.pdb' % file_name, recentered_cluster)\n if save_traj is True:\n write(folder_path + '/%s.traj' % file_name, recentered_cluster)\n\n return cluster, EFMaker.get_extraframework_cluster(EF_O_index), cluster_EF_index\n\n\ndef label_pdb(folder_path, file_name, del_unlabeled_pdb):\n \"\"\"\n Relabeling the Atom name in proteindatabank file. (required step for openMM)\n The same atom type connecting to different neighboring types are treated differently due to differences in their\n chemical environments, and is therefore named separately.\n :param folder_path:\n :param file_name:\n :param del_unlabeled_pdb:\n \"\"\"\n filein = open(folder_path + '/%s.pdb' % file_name, 'r')\n fileout = open(folder_path + '/%s_labeled.pdb' % file_name, 'w')\n\n name_list = []\n for line in filein.readlines():\n if line.startswith('ATOM') or line.startswith('HETATM'):\n name = line[12:16].strip()\n name_list.append(name)\n name = name + str(name_list.count(name))\n name = name.rjust(4)\n line = line.replace(line[12:16], name, 1)\n # only replacing the first occurrence of line[12:16], atomic symbols are maintained\n fileout.writelines(line)\n\n filein.close()\n fileout.close()\n if del_unlabeled_pdb is True:\n os.remove(folder_path + '/%s.pdb' % file_name)\n\n\ndef get_bonds(cluster, mult=1, excluded_index=None, excluded_pair=None):\n \"\"\"\n Using ase.geometry.analysis.Analysis to get all bonds, then remove the repeated ones.\n Function also allows removing certain bonding pair defined by user (excluded_pair).\n Or removing pairs including certain atomic indices (excluded_index).\n :param cluster:\n :param mult:\n :param excluded_index: list of integers\n :param excluded_pair: list of lists\n :return: full bonding list, shortened list.\n If both excluded_index and excluded_pair are None, bonding list == shortened list\n \"\"\"\n if excluded_index is None:\n excluded_index = []\n if excluded_pair is None:\n excluded_pair = []\n\n nl = NeighborList(natural_cutoffs(cluster, mult=mult), bothways=True, self_interaction=False)\n nl.update(cluster)\n\n bond_list, shortened_list = [], []\n for count, indices in enumerate(Analysis(cluster, nl=nl).all_bonds[0]):\n for index in indices:\n if [count, index] not in bond_list and [index, count] not in bond_list:\n bond_list.append([count, index])\n\n for bond in bond_list:\n if all(single_index not in bond for single_index in excluded_index) and \\\n all(tuple(bond) not in list(permutations(pair)) for pair in excluded_pair):\n shortened_list.append(bond)\n\n return bond_list, shortened_list\n\n\ndef get_angles(cluster, mult=1, excluded_index=None, excluded_pair=None):\n \"\"\"\n #TODO: consider combining get_bonds and get_angles function\n ase.geometry.analysis.Analysis.unique_angles function does not work, return all angles.\n three-body interactions.\n :param excluded_pair: excluding all [particle1, particle2, particle3] lists involving the excluded pair\n \"\"\"\n if excluded_index is None:\n excluded_index = []\n if excluded_pair is None:\n excluded_pair = []\n\n nl = NeighborList(natural_cutoffs(cluster, mult=mult), bothways=True, self_interaction=False)\n nl.update(cluster)\n\n angle_list, shortened_list = [], []\n for count, indices in enumerate(Analysis(cluster, nl=nl).all_angles[0]):\n for index in indices:\n if all(list(val) not in angle_list for val in list(permutations([count, index[0], index[1]]))):\n angle_list.append([count, index[0], index[1]])\n\n for angle in angle_list:\n if all(single_index not in angle for single_index in excluded_index) and \\\n all(list(value) not in excluded_pair for value in list(permutations(angle, 2))):\n shortened_list.append(angle)\n\n return angle_list, shortened_list\n\n\ndef write_xml(atoms, bonds, save_as):\n # on-the-fly generation of force field xml file, matching atoms and bonds with pdb file\n root = etree.Element('ForceField')\n\n xml_section = etree.SubElement(root, \"AtomTypes\")\n for atom in atoms:\n element_type = ''.join(filter(lambda x: not x.isdigit(), atom.name))\n # properties = {'name': atom.name, 'class': atom.name, 'element': element_type, 'mass': str(atomic_mass)}\n if element_type == 'Cu' or atom.name == 'O9':\n atomic_mass = atomic_masses[atomic_numbers[element_type]]\n else:\n atomic_mass = 0.0\n properties = {'name': atom.name, 'class': atom.name, 'element': element_type, 'mass': str(atomic_mass)}\n etree.SubElement(xml_section, 'Type', **properties)\n\n xml_section = etree.SubElement(root, 'Residues')\n xml_residue = etree.SubElement(xml_section, 'Residue', name='MOL')\n for atom in atoms:\n etree.SubElement(xml_residue, 'Atom', name=atom.name, type=atom.name)\n for bond in bonds:\n etree.SubElement(xml_residue, 'Bond', atomName1=bond[0].name, atomName2=bond[1].name)\n\n tree = etree.ElementTree(root)\n xml = etree.tostring(tree, pretty_print=True).decode('utf-8')\n\n with closing(open(save_as, 'w')) as f:\n f.write(xml)\n\n\ndef check_atom_types(cluster, index):\n \"\"\" assign atom types, same element connected to different neighbors are assigned into different classes.\n For example, extra-framework O (in Cu-O-Cu) is in a different class from framework O (Si-O-Si). Each class\n assignment is unique (each atom belongs to one class and one class only).\n O_EF: extra-framework O\n O-Cu: framework O, connecting to one T-site(Al) and Cu\n O-H: framework O, connecting to one T-site(Al) and H (capping)\n \"\"\"\n nl = NeighborList(natural_cutoffs(cluster), bothways=True, self_interaction=False)\n nl.update(cluster)\n\n class_Al = [atom.index for atom in cluster if atom.symbol == 'Al']\n class_Cu = [atom.index for atom in cluster if atom.symbol == 'Cu']\n class_H = [atom.index for atom in cluster if atom.symbol == 'H']\n class_O_EF = [get_EF_atom_indices(cluster)[-1]]\n class_O_Cu = [atom.index for atom in cluster if atom.symbol == 'O' and atom.index not in class_O_EF and\n all(val not in class_H for val in nl.get_neighbors(atom.index)[0])]\n class_O_H = [atom.index for atom in cluster if atom.symbol == 'O' and atom.index not in class_O_EF + class_O_Cu]\n\n if index in class_Al:\n return 'Al'\n if index in class_Cu:\n return 'Cu'\n if index in class_H:\n return 'H'\n if index in class_O_EF:\n return 'O-EF'\n if index in class_O_Cu:\n return 'O-Cu'\n if index in class_O_H:\n return 'O-H'\n else:\n return 'None'\n\n\ndef get_property_types(cluster, property_list):\n \"\"\" assign all bonding pairs or angles into different types based on differences in atom types. For example,\n O(extra-framework)-Cu is different from O(framework)-Cu.\n :param property_list: bond or angle index list of the cluster of interests\n :return type_dict: return a dictionary of all unique bond-pairs or angle types, with \"keys\" being integers starting\n from 0, and \"values\" being a list of two atom types string for bonds or three atom types string for angles.\n eg. {0: [AtomClass1, AtomClass2], 1: [AtomClass1, AtomClass3], ...} for bonds\n Note: Bond types such as [AtomClass1, AtomClass2] and [AtomClass2, AtomClass1] are considered the same. Same rules\n also apply for angles.\n :return whole_type_list: return the entire list of bond or angle types assignment of the input.\n len(whole_type_list) = len(my_list)\n \"\"\"\n type_dict, repeated_list, whole_type_list, count = {}, [], [], 0\n\n for items in property_list:\n my_list = []\n for val in items:\n my_list.append(check_atom_types(cluster, val))\n whole_type_list.append(my_list)\n if all(list(pair) not in repeated_list for pair in list(permutations(my_list))):\n repeated_list.append(my_list)\n type_dict[count] = my_list\n count += 1\n\n return type_dict, whole_type_list\n\n\ndef _get_index_dict(type_dict, whole_type_list, index_list):\n \"\"\" assign bond pairs or angles indices into different bond or angle types, all the pairs or angles within the same\n types will share the same set of force field parameters.\n :param type_dict:\n :param whole_type_list:\n :param index_list:\n :return index_dict: return a dictionary of all bond-pairs or angle indices for each unique bond or angle type,\n using the the same keys as type_dict.\n \"\"\"\n index_dict = {}\n for key, value in type_dict.items():\n temp_list = []\n for count, items in enumerate(whole_type_list):\n if any(list(pair) == value for pair in list(permutations(items))):\n temp_list.append(index_list[count])\n index_dict[key] = temp_list\n\n return index_dict\n\n\ndef get_type_index_pair(type_dict, whole_type_list, index_list):\n \"\"\" write bond_type and bond_index into a single dictionary; can use tuples as dictionary key, not lists\n :param type_dict:\n :param whole_type_list:\n :param index_list:\n \"\"\"\n bond_index_dict = _get_index_dict(type_dict, whole_type_list, index_list)\n type_index_dict = {}\n for key, value in type_dict.items():\n type_index_dict[tuple(value)] = bond_index_dict[key]\n return type_index_dict\n\n\ndef pretty_print(my_dict):\n \"\"\" for better visualization of the bond (or angle) types and bond (or angle) indices that belong to certain types.\n \"\"\"\n for key, value in my_dict.items():\n print(key, '-->', value)\n\n\ndef shorten_index_list_by_types(type_index_dict, exclude_atom_type=None, exclude_property_type=None,\n include_property_type=None, case=0):\n \"\"\"\n allow excluding certain property types or only including certain types\n \"\"\"\n\n if exclude_atom_type is not None and exclude_property_type is None:\n case = 1\n if exclude_property_type is not None and exclude_atom_type is None:\n case = 2\n if exclude_property_type is not None and exclude_atom_type is not None:\n case = 3\n if include_property_type is not None:\n case = 4\n\n shortened_list = []\n for type_list, index_list in type_index_dict.items():\n if case == 1 and all(single_type not in type_list for single_type in exclude_atom_type):\n shortened_list.extend(index_list)\n elif case == 2 and all(list(value) not in exclude_property_type for value in list(permutations(type_list))):\n shortened_list.extend(index_list)\n elif case == 3 and all(single_type not in type_list for single_type in exclude_atom_type) and \\\n all(list(value) not in exclude_property_type for value in list(permutations(type_list))):\n shortened_list.extend(index_list)\n elif case == 4 and any(list(value) in include_property_type for value in list(permutations(type_list))):\n shortened_list.extend(index_list)\n\n return shortened_list\n\n\ndef set_up_openMM_system(folder_path, cluster_tag_number, shortened_bond_list):\n \"\"\" Feed pdb topology file and xml force field file into openMM, generate a system for the MD simulation/force\n calculation.\n :param folder_path:\n :param cluster_tag_number:\n :param shortened_bond_list:\n :return pdb:\n :return system:\n \"\"\"\n pdb = PDBFile(folder_path + '/cluster_%s_labeled.pdb' % cluster_tag_number)\n atoms = list(pdb.topology.atoms())\n\n for index in shortened_bond_list:\n pdb.topology.addBond(atoms[index[0]], atoms[index[1]])\n bonds = list(pdb.topology.bonds())\n\n write_xml(atoms, bonds, folder_path + '/forcefield.xml')\n FF = ForceField(folder_path + '/forcefield.xml')\n system = FF.createSystem(pdb.topology)\n return pdb, system\n\n\ndef custom_openMM_force_object(system, bond_list, bond_type_index_dict, bond_param_dict, angle_list=None,\n angle_type_index_dict=None, angle_param_dict=None):\n \"\"\" #todo: add argument allowing this custom function to be fed in as an input (more flexible used-designed ff)\n :param bond_list: list to be included into force field\n :param angle_list:\n :param bond_type_index_dict: {(type): [index], ...}\n :param angle_type_index_dict:\n :param bond_param_dict: {(type): [param], ...} Note: parameters here uses the standard units, kJ, nm, ...\n :param angle_param_dict:\n :return system: openMM system with custom forces added onto it\n \"\"\"\n force = CustomBondForce(\"D*(1-exp(-alpha*(r-r0)))^2\") # Morse bond\n force.addPerBondParameter(\"D\")\n force.addPerBondParameter(\"alpha\")\n force.addPerBondParameter(\"r0\")\n force.setUsesPeriodicBoundaryConditions(periodic=True)\n\n for bond in bond_list:\n for my_type, my_index in bond_type_index_dict.items():\n if any(list(val) in my_index for val in list(permutations(bond))):\n try:\n force.addBond(int(bond[0]), int(bond[1]), bond_param_dict.get(my_type))\n except:\n my_type = tuple(reversed(my_type))\n force.addBond(int(bond[0]), int(bond[1]), bond_param_dict.get(my_type))\n # note: consider updating the info_dict to make it order insensitive\n system.addForce(force)\n\n force = HarmonicAngleForce() # Harmonic angle\n force.setUsesPeriodicBoundaryConditions(periodic=True) # adding periodic conditions\n\n for angle in angle_list:\n for my_type, my_index in angle_type_index_dict.items():\n if any(list(val) in my_index for val in list(permutations(angle))):\n type_tag = [tuple(val) for val in list(angle_param_dict.keys()) if val in list(permutations(my_type))]\n force.addAngle(int(angle[0]), int(angle[1]), int(angle[2]), *angle_param_dict.get(type_tag[0]))\n system.addForce(force)\n\n # assert(system.usesPeriodicBoundaryConditions() == True)\n return system\n\n\ndef get_openMM_forces(pdb, system, bond_list, bond_type_index_dict, bond_param_dict, angle_list=None,\n angle_type_index_dict=None, angle_param_dict=None):\n \"\"\" forces for a single configuration\n use numb to keep track of individual configurations\n integrator used for advancing the equations of motion in MD\n doesn't matter what we pick here since we only need the forces on the initial structure, but do need to have it\n :return: forces values on atoms in units of eV/A\n \"\"\"\n system = custom_openMM_force_object(system, bond_list, bond_type_index_dict, bond_param_dict, angle_list,\n angle_type_index_dict, angle_param_dict)\n integrator = LangevinMiddleIntegrator(3 * kelvin, 1 / picosecond, 0.4 * picoseconds) # randomly picked\n simulation = Simulation(pdb.topology, system, integrator)\n simulation.context.setPositions(pdb.positions)\n state = simulation.context.getState(getForces=True)\n forces = np.array(state.getForces(asNumpy=True)) * 1.0364e-2 * 0.1 # convert forces from kJ/nm mol to eV/A\n\n return forces\n\n\n# NOTE: section below deals with multiple input structures for force field training\n\ndef get_EF_O_index(traj):\n \"\"\"\n get the mode of EF_O, and use that to extract the EF cluster for the force field training\n all EF atoms should have the same indices regardless of there is binds on the zeolite, as long as the zeolite\n framework is the same - (all EF atoms, aka. Cu-O-Cu insertion follows the same procedures)\n :param traj: traj of configurations containing all atoms, including both the zeolite backbone and EF atoms\n \"\"\"\n EF_O_index_list = []\n for atoms in traj:\n try:\n EFAnalyzer = ExtraFrameworkAnalyzer(atoms)\n EF_O_index_list.append(EFAnalyzer.get_extraframework_cluster()[-1])\n except:\n ...\n return mode(tuple(EF_O_index_list))\n\n\ndef prep_topologies(folder_path, sample_zeolite, traj_name=None, save_traj=False, del_unlabeled_pdb=False,\n show_all=False):\n \"\"\"\n :param folder_path:\n :param sample_zeolite:\n :param traj_name:\n :param save_traj:\n :param del_unlabeled_pdb:\n :param show_all:\n \"\"\"\n if traj_name is not None:\n traj = read(folder_path + '/%s.traj' % traj_name, ':')\n output_dir = os.path.join(folder_path, traj_name)\n else:\n traj = read(folder_path + '/%s.traj' % sample_zeolite, ':')\n output_dir = os.path.join(folder_path, sample_zeolite)\n Path(output_dir).mkdir(parents=True, exist_ok=True)\n\n cluster_traj, EF_O_index, EF_atoms_index, cluster_EF_index = [], get_EF_O_index(traj[0:100]), [], []\n for count, atoms in enumerate(traj):\n try:\n cluster, EF_atoms_index, cluster_EF_index = get_capped_cluster(atoms, output_dir, 'cluster_' + str(count),\n save_traj, [EF_O_index])\n label_pdb(output_dir, 'cluster_%s' % str(count), del_unlabeled_pdb)\n cluster_traj.append(cluster)\n print(sample_zeolite, count)\n except:\n print(sample_zeolite, count, 'failed!')\n\n if show_all is True:\n view(cluster_traj)\n\n return EF_atoms_index, cluster_EF_index\n\n\ndef reformat_inputs(bond_param_dict, angle_param_dict):\n \"\"\" reformat input dict into lists\n :return bond_type: List[List[str]] eg. ['Cu', 'O']\n :return angle_type: List[List[str]] eg. ['Cu', 'O', 'Cu']\n :return param_list: List[float], extend all parameters into a single list, since scipy.optimize.minimize can only\n take an 1D array as initial guess parameter\n \"\"\"\n bond_type, angle_type, param_list = [], [], []\n for types, indices in bond_param_dict.items():\n bond_type.append(list(types))\n param_list.extend([val for val in np.array(indices)])\n\n for types, indices in angle_param_dict.items():\n angle_type.append(list(types))\n param_list.extend([val for val in np.array(indices)])\n\n return bond_type, angle_type, param_list\n\n\ndef get_required_objects_for_ff(folder_path, cluster_tag_number, included_bond_type, included_angle_type,\n bond_type_index_dict, angle_type_index_dict):\n \"\"\" To reduce computational cost, objects such as pdb, system, shortened_bond_list, bond_type_index_dict are kept\n fixed for each configuration during the optimization (only run once).\n \"\"\"\n\n shortened_bond_list = shorten_index_list_by_types(bond_type_index_dict, include_property_type=included_bond_type)\n shortened_angle_list = shorten_index_list_by_types(angle_type_index_dict, include_property_type=included_angle_type)\n pdb, system = set_up_openMM_system(folder_path, cluster_tag_number, shortened_bond_list)\n\n return pdb, system, shortened_bond_list, shortened_angle_list\n\n\ndef get_FF_forces(param, info_dict, ini_bond_param_dict, ini_angle_param_dict, bond_type_index_dict,\n angle_type_index_dict, EF_index):\n \"\"\" openMM forces for multiple configuration based on the same set of parameters\n \"\"\"\n bond_param_dict, angle_param_dict, number_of_bond_param = {}, {}, 0\n for count, (types, indices) in enumerate(ini_bond_param_dict.items()):\n bond_param_dict[types] = list(param[count * len(indices):(count + 1) * len(indices)])\n number_of_bond_param += len(indices)\n\n for count, (types, indices) in enumerate(ini_angle_param_dict.items()):\n angle_param_dict[types] = list(\n param[count * len(indices) + number_of_bond_param:(count + 1) * len(indices) + number_of_bond_param])\n\n predicted_f = []\n my_dict = copy.deepcopy(info_dict)\n for config_tag, info_list in my_dict.items():\n ff_forces = get_openMM_forces(info_list[0], info_list[1], info_list[2], bond_type_index_dict, bond_param_dict,\n info_list[3], angle_type_index_dict, angle_param_dict)[EF_index]\n predicted_f.append([force_list for force_list in ff_forces])\n\n return predicted_f\n\n\ndef get_DFT_forces_single(atoms, atom_index):\n \"\"\"\n reference DFT forces on single atoms\n \"\"\"\n f_vec = atoms.calc.results['forces'][atom_index] # self.atoms.get_forces()[atom_index]\n f_mag = np.linalg.norm(f_vec)\n return f_vec\n\n\ndef get_residue(param, info_dict, DFT_f, weights, ini_bond_param_dict, ini_angle_param_dict,\n bond_type_index_dict, angle_type_index_dict, EF_index):\n \"\"\"\n optimize force field parameters by minimizing this loss function (MSE), weighted by DFT electronic energies\n k (Boltzmann's constant) = 8.617e-5 eV/K\n T = 298 K\n \"\"\"\n predicted_f = get_FF_forces(param, info_dict, ini_bond_param_dict, ini_angle_param_dict, bond_type_index_dict,\n angle_type_index_dict, EF_index)\n residue = np.reshape(np.array(np.reshape(predicted_f, [-1, 3])) - np.array(np.reshape(DFT_f, [-1, 3])), -1)\n weighted_residue = residue * weights # 39 number of atoms\n print(np.mean(weighted_residue ** 2))\n return np.mean(weighted_residue ** 2)\n\n\ndef get_fitting_parameters(initial_param, info_dict, DFT_f, weights, ini_bond_param_dict, ini_angle_param_dict,\n bond_type_index_dict, angle_type_index_dict, EF_index):\n # todo: more flexible bond reformating and feeding\n bounds = ((-np.Inf, np.Inf), (-np.Inf, np.Inf), (0, np.Inf), (-np.Inf, np.Inf), (-np.Inf, np.Inf),\n (0, np.Inf), (-np.Inf, np.Inf), (-np.Inf, np.Inf), (0, np.Inf), (0, np.pi),\n (-np.Inf, np.Inf), (0, np.pi), (-np.Inf, np.Inf), (0, np.pi), (-np.Inf, np.Inf))\n res = minimize(get_residue, initial_param, method='Powell', bounds=bounds, options={'ftol': 0.01, 'maxiter': 1000},\n args=(info_dict, DFT_f, weights, ini_bond_param_dict, ini_angle_param_dict,\n bond_type_index_dict, angle_type_index_dict, EF_index))\n print(res.success)\n return res\n\n\ndef make_parity_plot(ff_forces, dft_forces, atom_name):\n \"\"\" plot FF forces vs. DFT forces\n \"\"\"\n plt.figure()\n fig, ax = plt.subplots()\n plt.plot(dft_forces, ff_forces, 'o')\n plt.xlabel('DFT_force', fontsize=18)\n plt.ylabel('FF_force', fontsize=18)\n lims = [np.min([ax.get_xlim(), ax.get_ylim()]), np.max([ax.get_xlim(), ax.get_ylim()])]\n ax.plot(lims, lims, 'k-', alpha=0.75, zorder=0)\n ax.set_aspect('equal')\n ax.set_xlim(lims)\n ax.set_ylim(lims)\n plt.title('Force fitting on %s' % atom_name, fontsize=18)\n plt.show()\n\n\ndef func():\n tic = time.perf_counter()\n zeolite = 'SOD'\n folder_path, sample_zeolite, traj_name = '/Users/jiaweiguo/Box/openMM_FF', zeolite, zeolite + '_md'\n # prep_topologies(folder_path, sample_zeolite, traj_name, del_unlabeled_pdb=True)\n \"\"\"\n ini_bond_param_dict = {('O-Cu', 'Cu'): [1.2, 4, 0.3], ('O-EF', 'Cu'): [1.2, 4, 0.2], ('Al', 'Cu'): [1.2, 4, 0.4]}\n ini_angle_param_dict = {('Cu', 'O-EF', 'Cu'): [2.3, 10], ('O-Cu', 'Cu', 'O-EF'): [2.3, 10],\n ('Al', 'Cu', 'O-EF'): [2.3, 10]}\n \"\"\"\n ini_bond_param_dict = {('O-Cu', 'Cu'): [60.097, 2.267, 0.228], ('O-EF', 'Cu'): [4405.247, 4.163, 0.177],\n ('Al', 'Cu'): [-2.656, 4.608, 0.413]}\n ini_angle_param_dict = {('Cu', 'O-EF', 'Cu'): [2.458, 16.552], ('O-Cu', 'Cu', 'O-EF'): [3.266, 4.136],\n ('Al', 'Cu', 'O-EF'): [1.925, 1.673]}\n included_bond_type, included_angle_type, ini_param = reformat_inputs(ini_bond_param_dict, ini_angle_param_dict)\n\n # set up type_index_dict using a single set of data #fixme: randomly pick several initial clusters to built dict\n cluster = read(os.path.join(folder_path, traj_name) + '/cluster_0_labeled.pdb', '0')\n bond_index_list, shortened_bond_index_list = get_bonds(cluster, mult=2)\n bond_type_dict, whole_bond_type_list = get_property_types(cluster, bond_index_list)\n angle_index_list, shortened_angle_index_list = get_angles(cluster, mult=2)\n angle_type_dict, whole_angle_type_list = get_property_types(cluster, angle_index_list)\n bond_type_index_dict = get_type_index_pair(bond_type_dict, whole_bond_type_list, bond_index_list)\n angle_type_index_dict = get_type_index_pair(angle_type_dict, whole_angle_type_list, angle_index_list)\n\n numb_skip = 2000\n info_dict, output_path = {}, os.path.join(folder_path, traj_name)\n files = [files for files in os.listdir(os.path.join(folder_path, traj_name)) if '.pdb' in files]\n for cluster_tag_number in np.arange(0, len(files), numb_skip):\n cluster_tag_number = int(cluster_tag_number)\n pdb, system, shortened_bond_list, shortened_angle_list = \\\n get_required_objects_for_ff(output_path, cluster_tag_number, included_bond_type, included_angle_type,\n bond_type_index_dict, angle_type_index_dict)\n info_dict[cluster_tag_number] = [pdb, system, shortened_bond_list, shortened_angle_list]\n print(cluster_tag_number)\n\n with open(output_path + '/info_dict_%s.pickle' % numb_skip, 'wb') as f:\n pickle.dump(info_dict, f)\n\n with open(folder_path + '/EF_index_dict.pickle', 'rb') as f:\n EF_index_dict = pickle.load(f)\n\n traj = read(folder_path + '/%s.traj' % traj_name, '0::%s' % numb_skip)\n DFT_f = []\n for atoms in traj:\n DFT_f.append([get_DFT_forces_single(atoms, atom_index=val) for val in EF_index_dict.get(zeolite)[-3:]])\n print(np.array(DFT_f).shape)\n\n ref_E = read(folder_path + '/%s.traj' % traj_name, '-1').calc.results['energy']\n DFT_E = []\n for atoms in traj:\n DFT_E.append(atoms.calc.results['energy'])\n\n with open(os.path.join(folder_path, traj_name) + '/info_dict_%s.pickle' % numb_skip, 'rb') as f:\n info_dict = pickle.load(f)\n\n with open(folder_path + '/cluster_EF_index_dict.pickle', 'rb') as f:\n cluster_EF_index_dict = pickle.load(f)\n\n my_dict = copy.deepcopy(info_dict) # important, need to keep openMM \"systems\" fixed\n weights = []\n for value in np.exp(-(np.array(DFT_E) - ref_E) / len(traj[0]) / (8.617e-5 * 298)):\n weights.extend([value, value, value, value, value, value, value, value, value])\n res = get_fitting_parameters(ini_param, my_dict, DFT_f, np.array(weights), ini_bond_param_dict, ini_angle_param_dict,\n bond_type_index_dict, angle_type_index_dict, cluster_EF_index_dict.get(zeolite))\n\n print([np.around(float(val), decimals=3) for val in res.x])\n FF_f = get_FF_forces(res.x, info_dict, ini_bond_param_dict, ini_angle_param_dict, bond_type_index_dict,\n angle_type_index_dict, cluster_EF_index_dict.get(zeolite))\n make_parity_plot(np.array(np.reshape(FF_f, [-1, 3])), np.array(np.reshape(DFT_f, [-1, 3])), 'Cu-O-Cu')\n\n force_dict = {'FF': np.array(np.reshape(FF_f, [-1, 3])), 'DFT': np.array(np.reshape(DFT_f, [-1, 3]))}\n with open(output_path + '/forces_%s.pickle' % numb_skip, 'wb') as f:\n pickle.dump(force_dict, f)\n\n toc = time.perf_counter()\n print(f\"Program terminated in {toc - tic:0.4f} seconds\")\n\n\nif __name__ == '__main__':\n # func()\n \n \"\"\" weighting factor for the loss function\n zeolite = 'SOD'\n folder_path, traj_name, numb_skip = '/Users/jiaweiguo/Box/openMM_FF', zeolite + '_md', 2000\n traj = read(folder_path + '/%s.traj' % traj_name, '0::%s' % numb_skip)\n ref_E = read(folder_path + '/%s.traj' % traj_name, '-1').calc.results['energy']\n DFT_E = []\n for atoms in traj:\n DFT_E.append(atoms.calc.results['energy'])\n weight = np.exp(-(np.array(DFT_E) - ref_E) / len(traj[0]) / (8.617e-5 * 298))\n plt.plot(DFT_E, weight, 'o')\n plt.xlabel('DFT electronic energies (eV)', fontsize=16)\n plt.ylabel('Boltzmann weighting', fontsize=16)\n plt.show()\n \"\"\"\n"
] | [
[
"matplotlib.pyplot.figure",
"scipy.optimize.minimize",
"numpy.reshape",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.title",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylabel",
"numpy.array",
"matplotlib.pyplot.plot",
"numpy.linalg.norm",
"matplotlib.pyplot.xlabel",
"numpy.mean"
]
] |
Duncanswilson/keras | [
"32aa192548b6b59bf407e583fbd246ba9f5f5676"
] | [
"keras/layers/recurrent.py"
] | [
"# -*- coding: utf-8 -*-\n\"\"\"Recurrent layers and their base classes.\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\nimport warnings\n\nfrom .. import backend as K\nfrom .. import activations\nfrom .. import initializers\nfrom .. import regularizers\nfrom .. import constraints\nfrom ..engine import Layer\nfrom ..engine import InputSpec\nfrom ..utils.generic_utils import has_arg\n\n# Legacy support.\nfrom ..legacy.layers import Recurrent\nfrom ..legacy import interfaces\n\n\nclass StackedRNNCells(Layer):\n \"\"\"Wrapper allowing a stack of RNN cells to behave as a single cell.\n\n Used to implement efficient stacked RNNs.\n\n # Arguments\n cells: List of RNN cell instances.\n\n # Examples\n\n ```python\n cells = [\n keras.layers.LSTMCell(output_dim),\n keras.layers.LSTMCell(output_dim),\n keras.layers.LSTMCell(output_dim),\n ]\n\n inputs = keras.Input((timesteps, input_dim))\n x = keras.layers.RNN(cells)(inputs)\n ```\n \"\"\"\n\n def __init__(self, cells, **kwargs):\n for cell in cells:\n if not hasattr(cell, 'call'):\n raise ValueError('All cells must have a `call` method. '\n 'received cells:', cells)\n if not hasattr(cell, 'state_size'):\n raise ValueError('All cells must have a '\n '`state_size` attribute. '\n 'received cells:', cells)\n self.cells = cells\n super(StackedRNNCells, self).__init__(**kwargs)\n\n @property\n def state_size(self):\n # States are a flat list\n # in reverse order of the cell stack.\n # This allows to preserve the requirement\n # `stack.state_size[0] == output_dim`.\n # e.g. states of a 2-layer LSTM would be\n # `[h2, c2, h1, c1]`\n # (assuming one LSTM has states [h, c])\n state_size = []\n for cell in self.cells[::-1]:\n if hasattr(cell.state_size, '__len__'):\n state_size += list(cell.state_size)\n else:\n state_size.append(cell.state_size)\n return tuple(state_size)\n\n def call(self, inputs, states, **kwargs):\n # Recover per-cell states.\n nested_states = []\n for cell in self.cells[::-1]:\n if hasattr(cell.state_size, '__len__'):\n nested_states.append(states[:len(cell.state_size)])\n states = states[len(cell.state_size):]\n else:\n nested_states.append([states[0]])\n states = states[1:]\n nested_states = nested_states[::-1]\n\n # Call the cells in order and store the returned states.\n new_nested_states = []\n for cell, states in zip(self.cells, nested_states):\n inputs, states = cell.call(inputs, states, **kwargs)\n new_nested_states.append(states)\n\n # Format the new states as a flat list\n # in reverse cell order.\n states = []\n for cell_states in new_nested_states[::-1]:\n states += cell_states\n return inputs, states\n\n def build(self, input_shape):\n for cell in self.cells:\n if isinstance(cell, Layer):\n cell.build(input_shape)\n if hasattr(cell.state_size, '__len__'):\n output_dim = cell.state_size[0]\n else:\n output_dim = cell.state_size\n input_shape = (input_shape[0], input_shape[1], output_dim)\n self.built = True\n\n def get_config(self):\n cells = []\n for cell in self.cells:\n cells.append({'class_name': cell.__class__.__name__,\n 'config': cell.get_config()})\n config = {'cells': cells}\n base_config = super(StackedRNNCells, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n\n @classmethod\n def from_config(cls, config, custom_objects=None):\n from . import deserialize as deserialize_layer\n cells = []\n for cell_config in config.pop('cells'):\n cells.append(deserialize_layer(cell_config,\n custom_objects=custom_objects))\n return cls(cells, **config)\n\n @property\n def trainable_weights(self):\n if not self.trainable:\n return []\n weights = []\n for cell in self.cells:\n if isinstance(cell, Layer):\n weights += cell.trainable_weights\n return weights\n\n @property\n def non_trainable_weights(self):\n weights = []\n for cell in self.cells:\n if isinstance(cell, Layer):\n weights += cell.non_trainable_weights\n if not self.trainable:\n trainable_weights = []\n for cell in self.cells:\n if isinstance(cell, Layer):\n trainable_weights += cell.trainable_weights\n return trainable_weights + weights\n return weights\n\n def get_weights(self):\n \"\"\"Retrieves the weights of the model.\n\n # Returns\n A flat list of Numpy arrays.\n \"\"\"\n weights = []\n for cell in self.cells:\n if isinstance(cell, Layer):\n weights += cell.weights\n return K.batch_get_value(weights)\n\n def set_weights(self, weights):\n \"\"\"Sets the weights of the model.\n\n # Arguments\n weights: A list of Numpy arrays with shapes and types matching\n the output of `model.get_weights()`.\n \"\"\"\n tuples = []\n for cell in self.cells:\n if isinstance(cell, Layer):\n num_param = len(cell.weights)\n weights = weights[:num_param]\n for sw, w in zip(cell.weights, weights):\n tuples.append((sw, w))\n weights = weights[num_param:]\n K.batch_set_value(tuples)\n\n @property\n def losses(self):\n losses = []\n for cell in self.cells:\n if isinstance(cell, Layer):\n cell_losses = cell.losses\n losses += cell_losses\n return losses\n\n def get_losses_for(self, inputs=None):\n losses = []\n for cell in self.cells:\n if isinstance(cell, Layer):\n cell_losses = cell.get_losses_for(inputs)\n losses += cell_losses\n return losses\n\n\nclass RNN(Layer):\n \"\"\"Base class for recurrent layers.\n\n # Arguments\n cell: A RNN cell instance. A RNN cell is a class that has:\n - a `call(input_at_t, states_at_t)` method, returning\n `(output_at_t, states_at_t_plus_1)`. The call method of the\n cell can also take the optional argument `constants`, see\n section \"Note on passing external constants\" below.\n - a `state_size` attribute. This can be a single integer\n (single state) in which case it is\n the size of the recurrent state\n (which should be the same as the size of the cell output).\n This can also be a list/tuple of integers\n (one size per state). In this case, the first entry\n (`state_size[0]`) should be the same as\n the size of the cell output.\n It is also possible for `cell` to be a list of RNN cell instances,\n in which cases the cells get stacked on after the other in the RNN,\n implementing an efficient stacked RNN.\n return_sequences: Boolean. Whether to return the last output.\n in the output sequence, or the full sequence.\n return_state: Boolean. Whether to return the last state\n in addition to the output.\n go_backwards: Boolean (default False).\n If True, process the input sequence backwards and return the\n reversed sequence.\n stateful: Boolean (default False). If True, the last state\n for each sample at index i in a batch will be used as initial\n state for the sample of index i in the following batch.\n unroll: Boolean (default False).\n If True, the network will be unrolled,\n else a symbolic loop will be used.\n Unrolling can speed-up a RNN,\n although it tends to be more memory-intensive.\n Unrolling is only suitable for short sequences.\n input_dim: dimensionality of the input (integer).\n This argument (or alternatively,\n the keyword argument `input_shape`)\n is required when using this layer as the first layer in a model.\n input_length: Length of input sequences, to be specified\n when it is constant.\n This argument is required if you are going to connect\n `Flatten` then `Dense` layers upstream\n (without it, the shape of the dense outputs cannot be computed).\n Note that if the recurrent layer is not the first layer\n in your model, you would need to specify the input length\n at the level of the first layer\n (e.g. via the `input_shape` argument)\n\n # Input shape\n 3D tensor with shape `(batch_size, timesteps, input_dim)`.\n\n # Output shape\n - if `return_state`: a list of tensors. The first tensor is\n the output. The remaining tensors are the last states,\n each with shape `(batch_size, units)`.\n - if `return_sequences`: 3D tensor with shape\n `(batch_size, timesteps, units)`.\n - else, 2D tensor with shape `(batch_size, units)`.\n\n # Masking\n This layer supports masking for input data with a variable number\n of timesteps. To introduce masks to your data,\n use an [Embedding](embeddings.md) layer with the `mask_zero` parameter\n set to `True`.\n\n # Note on using statefulness in RNNs\n You can set RNN layers to be 'stateful', which means that the states\n computed for the samples in one batch will be reused as initial states\n for the samples in the next batch. This assumes a one-to-one mapping\n between samples in different successive batches.\n\n To enable statefulness:\n - specify `stateful=True` in the layer constructor.\n - specify a fixed batch size for your model, by passing\n if sequential model:\n `batch_input_shape=(...)` to the first layer in your model.\n else for functional model with 1 or more Input layers:\n `batch_shape=(...)` to all the first layers in your model.\n This is the expected shape of your inputs\n *including the batch size*.\n It should be a tuple of integers, e.g. `(32, 10, 100)`.\n - specify `shuffle=False` when calling fit().\n\n To reset the states of your model, call `.reset_states()` on either\n a specific layer, or on your entire model.\n\n # Note on specifying the initial state of RNNs\n You can specify the initial state of RNN layers symbolically by\n calling them with the keyword argument `initial_state`. The value of\n `initial_state` should be a tensor or list of tensors representing\n the initial state of the RNN layer.\n\n You can specify the initial state of RNN layers numerically by\n calling `reset_states` with the keyword argument `states`. The value of\n `states` should be a numpy array or list of numpy arrays representing\n the initial state of the RNN layer.\n\n # Note on passing external constants to RNNs\n You can pass \"external\" constants to the cell using the `constants`\n keyword argument of `RNN.__call__` (as well as `RNN.call`) method. This\n requires that the `cell.call` method accepts the same keyword argument\n `constants`. Such constants can be used to condition the cell\n transformation on additional static inputs (not changing over time),\n a.k.a. an attention mechanism.\n\n # Examples\n\n ```python\n # First, let's define a RNN Cell, as a layer subclass.\n\n class MinimalRNNCell(keras.layers.Layer):\n\n def __init__(self, units, **kwargs):\n self.units = units\n self.state_size = units\n super(MinimalRNNCell, self).__init__(**kwargs)\n\n def build(self, input_shape):\n self.kernel = self.add_weight(shape=(input_shape[-1], self.units),\n initializer='uniform',\n name='kernel')\n self.recurrent_kernel = self.add_weight(\n shape=(self.units, self.units),\n initializer='uniform',\n name='recurrent_kernel')\n self.built = True\n\n def call(self, inputs, states):\n prev_output = states[0]\n h = K.dot(inputs, self.kernel)\n output = h + K.dot(prev_output, self.recurrent_kernel)\n return output, [output]\n\n # Let's use this cell in a RNN layer:\n\n cell = MinimalRNNCell(32)\n x = keras.Input((None, 5))\n layer = RNN(cell)\n y = layer(x)\n\n # Here's how to use the cell to build a stacked RNN:\n\n cells = [MinimalRNNCell(32), MinimalRNNCell(64)]\n x = keras.Input((None, 5))\n layer = RNN(cells)\n y = layer(x)\n ```\n \"\"\"\n\n def __init__(self, cell,\n return_sequences=False,\n return_state=False,\n go_backwards=False,\n stateful=False,\n unroll=False,\n **kwargs):\n if isinstance(cell, (list, tuple)):\n cell = StackedRNNCells(cell)\n if not hasattr(cell, 'call'):\n raise ValueError('`cell` should have a `call` method. '\n 'The RNN was passed:', cell)\n if not hasattr(cell, 'state_size'):\n raise ValueError('The RNN cell should have '\n 'an attribute `state_size` '\n '(tuple of integers, '\n 'one integer per RNN state).')\n super(RNN, self).__init__(**kwargs)\n self.cell = cell\n self.return_sequences = return_sequences\n self.return_state = return_state\n self.go_backwards = go_backwards\n self.stateful = stateful\n self.unroll = unroll\n\n self.supports_masking = True\n self.input_spec = [InputSpec(ndim=3)]\n self.state_spec = None\n self._states = None\n self.constants_spec = None\n self._num_constants = None\n\n @property\n def states(self):\n if self._states is None:\n if isinstance(self.cell.state_size, int):\n num_states = 1\n else:\n num_states = len(self.cell.state_size)\n return [None for _ in range(num_states)]\n return self._states\n\n @states.setter\n def states(self, states):\n self._states = states\n\n def compute_output_shape(self, input_shape):\n if isinstance(input_shape, list):\n input_shape = input_shape[0]\n\n if hasattr(self.cell.state_size, '__len__'):\n state_size = self.cell.state_size\n else:\n state_size = [self.cell.state_size]\n output_dim = state_size[0]\n\n if self.return_sequences:\n output_shape = (input_shape[0], input_shape[1], output_dim)\n else:\n output_shape = (input_shape[0], output_dim)\n\n if self.return_state:\n state_shape = [(input_shape[0], dim) for dim in state_size]\n return [output_shape] + state_shape\n else:\n return output_shape\n\n def compute_mask(self, inputs, mask):\n if isinstance(mask, list):\n mask = mask[0]\n output_mask = mask if self.return_sequences else None\n if self.return_state:\n state_mask = [None for _ in self.states]\n return [output_mask] + state_mask\n else:\n return output_mask\n\n def build(self, input_shape):\n # Note input_shape will be list of shapes of initial states and\n # constants if these are passed in __call__.\n if self._num_constants is not None:\n constants_shape = input_shape[-self._num_constants:]\n else:\n constants_shape = None\n\n if isinstance(input_shape, list):\n input_shape = input_shape[0]\n\n batch_size = input_shape[0] if self.stateful else None\n input_dim = input_shape[-1]\n self.input_spec[0] = InputSpec(shape=(batch_size, None, input_dim))\n\n # allow cell (if layer) to build before we set or validate state_spec\n if isinstance(self.cell, Layer):\n step_input_shape = (input_shape[0],) + input_shape[2:]\n if constants_shape is not None:\n self.cell.build([step_input_shape] + constants_shape)\n else:\n self.cell.build(step_input_shape)\n\n # set or validate state_spec\n if hasattr(self.cell.state_size, '__len__'):\n state_size = list(self.cell.state_size)\n else:\n state_size = [self.cell.state_size]\n\n if self.state_spec is not None:\n # initial_state was passed in call, check compatibility\n if [spec.shape[-1] for spec in self.state_spec] != state_size:\n raise ValueError(\n 'An `initial_state` was passed that is not compatible with '\n '`cell.state_size`. Received `state_spec`={}; '\n 'however `cell.state_size` is '\n '{}'.format(self.state_spec, self.cell.state_size))\n else:\n self.state_spec = [InputSpec(shape=(None, dim))\n for dim in state_size]\n if self.stateful:\n self.reset_states()\n\n def get_initial_state(self, inputs):\n # build an all-zero tensor of shape (samples, output_dim)\n initial_state = K.zeros_like(inputs) # (samples, timesteps, input_dim)\n initial_state = K.sum(initial_state, axis=(1, 2)) # (samples,)\n initial_state = K.expand_dims(initial_state) # (samples, 1)\n if hasattr(self.cell.state_size, '__len__'):\n return [K.tile(initial_state, [1, dim])\n for dim in self.cell.state_size]\n else:\n return [K.tile(initial_state, [1, self.cell.state_size])]\n\n def __call__(self, inputs, initial_state=None, constants=None, **kwargs):\n inputs, initial_state, constants = self._standardize_args(\n inputs, initial_state, constants)\n\n if initial_state is None and constants is None:\n return super(RNN, self).__call__(inputs, **kwargs)\n\n # If any of `initial_state` or `constants` are specified and are Keras\n # tensors, then add them to the inputs and temporarily modify the\n # input_spec to include them.\n\n additional_inputs = []\n additional_specs = []\n if initial_state is not None:\n kwargs['initial_state'] = initial_state\n additional_inputs += initial_state\n self.state_spec = [InputSpec(shape=K.int_shape(state))\n for state in initial_state]\n additional_specs += self.state_spec\n if constants is not None:\n kwargs['constants'] = constants\n additional_inputs += constants\n self.constants_spec = [InputSpec(shape=K.int_shape(constant))\n for constant in constants]\n self._num_constants = len(constants)\n additional_specs += self.constants_spec\n # at this point additional_inputs cannot be empty\n is_keras_tensor = hasattr(additional_inputs[0], '_keras_history')\n for tensor in additional_inputs:\n if hasattr(tensor, '_keras_history') != is_keras_tensor:\n raise ValueError('The initial state or constants of an RNN'\n ' layer cannot be specified with a mix of'\n ' Keras tensors and non-Keras tensors')\n\n if is_keras_tensor:\n # Compute the full input spec, including state and constants\n full_input = [inputs] + additional_inputs\n full_input_spec = self.input_spec + additional_specs\n # Perform the call with temporarily replaced input_spec\n original_input_spec = self.input_spec\n self.input_spec = full_input_spec\n output = super(RNN, self).__call__(full_input, **kwargs)\n self.input_spec = original_input_spec\n return output\n else:\n return super(RNN, self).__call__(inputs, **kwargs)\n\n def call(self,\n inputs,\n mask=None,\n training=None,\n initial_state=None,\n constants=None):\n # input shape: `(samples, time (padded with zeros), input_dim)`\n # note that the .build() method of subclasses MUST define\n # self.input_spec and self.state_spec with complete input shapes.\n if isinstance(inputs, list):\n inputs = inputs[0]\n if initial_state is not None:\n pass\n elif self.stateful:\n initial_state = self.states\n else:\n initial_state = self.get_initial_state(inputs)\n\n if isinstance(mask, list):\n mask = mask[0]\n\n if len(initial_state) != len(self.states):\n raise ValueError('Layer has ' + str(len(self.states)) +\n ' states but was passed ' +\n str(len(initial_state)) +\n ' initial states.')\n input_shape = K.int_shape(inputs)\n timesteps = input_shape[1]\n if self.unroll and timesteps in [None, 1]:\n raise ValueError('Cannot unroll a RNN if the '\n 'time dimension is undefined or equal to 1. \\n'\n '- If using a Sequential model, '\n 'specify the time dimension by passing '\n 'an `input_shape` or `batch_input_shape` '\n 'argument to your first layer. If your '\n 'first layer is an Embedding, you can '\n 'also use the `input_length` argument.\\n'\n '- If using the functional API, specify '\n 'the time dimension by passing a `shape` '\n 'or `batch_shape` argument to your Input layer.')\n\n kwargs = {}\n if has_arg(self.cell.call, 'training'):\n kwargs['training'] = training\n\n if constants:\n if not has_arg(self.cell.call, 'constants'):\n raise ValueError('RNN cell does not support constants')\n\n def step(inputs, states):\n constants = states[-self._num_constants:]\n states = states[:-self._num_constants]\n return self.cell.call(inputs, states, constants=constants,\n **kwargs)\n else:\n def step(inputs, states):\n return self.cell.call(inputs, states, **kwargs)\n\n last_output, outputs, states = K.rnn(step,\n inputs,\n initial_state,\n constants=constants,\n go_backwards=self.go_backwards,\n mask=mask,\n unroll=self.unroll,\n input_length=timesteps)\n if self.stateful:\n updates = []\n for i in range(len(states)):\n updates.append((self.states[i], states[i]))\n self.add_update(updates, inputs)\n\n if self.return_sequences:\n output = outputs\n else:\n output = last_output\n\n # Properly set learning phase\n if getattr(last_output, '_uses_learning_phase', False):\n output._uses_learning_phase = True\n for state in states:\n state._uses_learning_phase = True\n\n if self.return_state:\n if not isinstance(states, (list, tuple)):\n states = [states]\n else:\n states = list(states)\n return [output] + states\n else:\n return output\n\n def _standardize_args(self, inputs, initial_state, constants):\n \"\"\"Standardize `__call__` to a single list of tensor inputs.\n\n When running a model loaded from file, the input tensors\n `initial_state` and `constants` can be passed to `RNN.__call__` as part\n of `inputs` instead of by the dedicated keyword arguments. This method\n makes sure the arguments are separated and that `initial_state` and\n `constants` are lists of tensors (or None).\n\n # Arguments\n inputs: tensor or list/tuple of tensors\n initial_state: tensor or list of tensors or None\n constants: tensor or list of tensors or None\n\n # Returns\n inputs: tensor\n initial_state: list of tensors or None\n constants: list of tensors or None\n \"\"\"\n if isinstance(inputs, list):\n assert initial_state is None and constants is None\n if self._num_constants is not None:\n constants = inputs[-self._num_constants:]\n inputs = inputs[:-self._num_constants]\n if len(inputs) > 1:\n initial_state = inputs[1:]\n inputs = inputs[0]\n\n def to_list_or_none(x):\n if x is None or isinstance(x, list):\n return x\n if isinstance(x, tuple):\n return list(x)\n return [x]\n\n initial_state = to_list_or_none(initial_state)\n constants = to_list_or_none(constants)\n\n return inputs, initial_state, constants\n\n def reset_states(self, states=None):\n if not self.stateful:\n raise AttributeError('Layer must be stateful.')\n batch_size = self.input_spec[0].shape[0]\n if not batch_size:\n raise ValueError('If a RNN is stateful, it needs to know '\n 'its batch size. Specify the batch size '\n 'of your input tensors: \\n'\n '- If using a Sequential model, '\n 'specify the batch size by passing '\n 'a `batch_input_shape` '\n 'argument to your first layer.\\n'\n '- If using the functional API, specify '\n 'the batch size by passing a '\n '`batch_shape` argument to your Input layer.')\n # initialize state if None\n if self.states[0] is None:\n if hasattr(self.cell.state_size, '__len__'):\n self.states = [K.zeros((batch_size, dim))\n for dim in self.cell.state_size]\n else:\n self.states = [K.zeros((batch_size, self.cell.state_size))]\n elif states is None:\n if hasattr(self.cell.state_size, '__len__'):\n for state, dim in zip(self.states, self.cell.state_size):\n K.set_value(state, np.zeros((batch_size, dim)))\n else:\n K.set_value(self.states[0],\n np.zeros((batch_size, self.cell.state_size)))\n else:\n if not isinstance(states, (list, tuple)):\n states = [states]\n if len(states) != len(self.states):\n raise ValueError('Layer ' + self.name + ' expects ' +\n str(len(self.states)) + ' states, '\n 'but it received ' + str(len(states)) +\n ' state values. Input received: ' +\n str(states))\n for index, (value, state) in enumerate(zip(states, self.states)):\n if hasattr(self.cell.state_size, '__len__'):\n dim = self.cell.state_size[index]\n else:\n dim = self.cell.state_size\n if value.shape != (batch_size, dim):\n raise ValueError('State ' + str(index) +\n ' is incompatible with layer ' +\n self.name + ': expected shape=' +\n str((batch_size, dim)) +\n ', found shape=' + str(value.shape))\n # TODO: consider batch calls to `set_value`.\n K.set_value(state, value)\n\n def get_config(self):\n config = {'return_sequences': self.return_sequences,\n 'return_state': self.return_state,\n 'go_backwards': self.go_backwards,\n 'stateful': self.stateful,\n 'unroll': self.unroll}\n if self._num_constants is not None:\n config['num_constants'] = self._num_constants\n\n cell_config = self.cell.get_config()\n config['cell'] = {'class_name': self.cell.__class__.__name__,\n 'config': cell_config}\n base_config = super(RNN, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n\n @classmethod\n def from_config(cls, config, custom_objects=None):\n from . import deserialize as deserialize_layer\n cell = deserialize_layer(config.pop('cell'),\n custom_objects=custom_objects)\n num_constants = config.pop('num_constants', None)\n layer = cls(cell, **config)\n layer._num_constants = num_constants\n return layer\n\n @property\n def trainable_weights(self):\n if not self.trainable:\n return []\n if isinstance(self.cell, Layer):\n return self.cell.trainable_weights\n return []\n\n @property\n def non_trainable_weights(self):\n if isinstance(self.cell, Layer):\n if not self.trainable:\n return self.cell.weights\n return self.cell.non_trainable_weights\n return []\n\n @property\n def losses(self):\n if isinstance(self.cell, Layer):\n return self.cell.losses\n return []\n\n def get_losses_for(self, inputs=None):\n if isinstance(self.cell, Layer):\n cell_losses = self.cell.get_losses_for(inputs)\n return cell_losses + super(RNN, self).get_losses_for(inputs)\n return super(RNN, self).get_losses_for(inputs)\n\n\nclass SimpleRNNCell(Layer):\n \"\"\"Cell class for SimpleRNN.\n\n # Arguments\n units: Positive integer, dimensionality of the output space.\n activation: Activation function to use\n (see [activations](../activations.md)).\n If you pass None, no activation is applied\n (ie. \"linear\" activation: `a(x) = x`).\n use_bias: Boolean, whether the layer uses a bias vector.\n kernel_initializer: Initializer for the `kernel` weights matrix,\n used for the linear transformation of the inputs\n (see [initializers](../initializers.md)).\n recurrent_initializer: Initializer for the `recurrent_kernel`\n weights matrix,\n used for the linear transformation of the recurrent state\n (see [initializers](../initializers.md)).\n bias_initializer: Initializer for the bias vector\n (see [initializers](../initializers.md)).\n kernel_regularizer: Regularizer function applied to\n the `kernel` weights matrix\n (see [regularizer](../regularizers.md)).\n recurrent_regularizer: Regularizer function applied to\n the `recurrent_kernel` weights matrix\n (see [regularizer](../regularizers.md)).\n bias_regularizer: Regularizer function applied to the bias vector\n (see [regularizer](../regularizers.md)).\n kernel_constraint: Constraint function applied to\n the `kernel` weights matrix\n (see [constraints](../constraints.md)).\n recurrent_constraint: Constraint function applied to\n the `recurrent_kernel` weights matrix\n (see [constraints](../constraints.md)).\n bias_constraint: Constraint function applied to the bias vector\n (see [constraints](../constraints.md)).\n dropout: Float between 0 and 1.\n Fraction of the units to drop for\n the linear transformation of the inputs.\n recurrent_dropout: Float between 0 and 1.\n Fraction of the units to drop for\n the linear transformation of the recurrent state.\n \"\"\"\n\n def __init__(self, units,\n activation='tanh',\n use_bias=True,\n kernel_initializer='glorot_uniform',\n recurrent_initializer='orthogonal',\n bias_initializer='zeros',\n kernel_regularizer=None,\n recurrent_regularizer=None,\n bias_regularizer=None,\n kernel_constraint=None,\n recurrent_constraint=None,\n bias_constraint=None,\n dropout=0.,\n recurrent_dropout=0.,\n **kwargs):\n super(SimpleRNNCell, self).__init__(**kwargs)\n self.units = units\n self.activation = activations.get(activation)\n self.use_bias = use_bias\n\n self.kernel_initializer = initializers.get(kernel_initializer)\n self.recurrent_initializer = initializers.get(recurrent_initializer)\n self.bias_initializer = initializers.get(bias_initializer)\n\n self.kernel_regularizer = regularizers.get(kernel_regularizer)\n self.recurrent_regularizer = regularizers.get(recurrent_regularizer)\n self.bias_regularizer = regularizers.get(bias_regularizer)\n\n self.kernel_constraint = constraints.get(kernel_constraint)\n self.recurrent_constraint = constraints.get(recurrent_constraint)\n self.bias_constraint = constraints.get(bias_constraint)\n\n self.dropout = min(1., max(0., dropout))\n self.recurrent_dropout = min(1., max(0., recurrent_dropout))\n self.state_size = self.units\n self._dropout_mask = None\n self._recurrent_dropout_mask = None\n\n def build(self, input_shape):\n self.kernel = self.add_weight(shape=(input_shape[-1], self.units),\n name='kernel',\n initializer=self.kernel_initializer,\n regularizer=self.kernel_regularizer,\n constraint=self.kernel_constraint)\n self.recurrent_kernel = self.add_weight(\n shape=(self.units, self.units),\n name='recurrent_kernel',\n initializer=self.recurrent_initializer,\n regularizer=self.recurrent_regularizer,\n constraint=self.recurrent_constraint)\n if self.use_bias:\n self.bias = self.add_weight(shape=(self.units,),\n name='bias',\n initializer=self.bias_initializer,\n regularizer=self.bias_regularizer,\n constraint=self.bias_constraint)\n else:\n self.bias = None\n self.built = True\n\n def call(self, inputs, states, training=None):\n prev_output = states[0]\n if 0 < self.dropout < 1 and self._dropout_mask is None:\n self._dropout_mask = _generate_dropout_mask(\n _generate_dropout_ones(inputs, K.shape(inputs)[-1]),\n self.dropout,\n training=training)\n if (0 < self.recurrent_dropout < 1 and\n self._recurrent_dropout_mask is None):\n self._recurrent_dropout_mask = _generate_dropout_mask(\n _generate_dropout_ones(inputs, self.units),\n self.recurrent_dropout,\n training=training)\n\n dp_mask = self._dropout_mask\n rec_dp_mask = self._recurrent_dropout_mask\n\n if dp_mask is not None:\n h = K.dot(inputs * dp_mask, self.kernel)\n else:\n h = K.dot(inputs, self.kernel)\n if self.bias is not None:\n h = K.bias_add(h, self.bias)\n\n if rec_dp_mask is not None:\n prev_output *= rec_dp_mask\n output = h + K.dot(prev_output, self.recurrent_kernel)\n if self.activation is not None:\n output = self.activation(output)\n\n # Properly set learning phase on output tensor.\n if 0 < self.dropout + self.recurrent_dropout:\n if training is None:\n output._uses_learning_phase = True\n return output, [output]\n\n def get_config(self):\n config = {'units': self.units,\n 'activation': activations.serialize(self.activation),\n 'use_bias': self.use_bias,\n 'kernel_initializer': initializers.serialize(self.kernel_initializer),\n 'recurrent_initializer': initializers.serialize(self.recurrent_initializer),\n 'bias_initializer': initializers.serialize(self.bias_initializer),\n 'kernel_regularizer': regularizers.serialize(self.kernel_regularizer),\n 'recurrent_regularizer': regularizers.serialize(self.recurrent_regularizer),\n 'bias_regularizer': regularizers.serialize(self.bias_regularizer),\n 'kernel_constraint': constraints.serialize(self.kernel_constraint),\n 'recurrent_constraint': constraints.serialize(self.recurrent_constraint),\n 'bias_constraint': constraints.serialize(self.bias_constraint),\n 'dropout': self.dropout,\n 'recurrent_dropout': self.recurrent_dropout}\n base_config = super(SimpleRNNCell, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n\n\nclass SimpleRNN(RNN):\n \"\"\"Fully-connected RNN where the output is to be fed back to input.\n\n # Arguments\n units: Positive integer, dimensionality of the output space.\n activation: Activation function to use\n (see [activations](../activations.md)).\n If you pass None, no activation is applied\n (ie. \"linear\" activation: `a(x) = x`).\n use_bias: Boolean, whether the layer uses a bias vector.\n kernel_initializer: Initializer for the `kernel` weights matrix,\n used for the linear transformation of the inputs\n (see [initializers](../initializers.md)).\n recurrent_initializer: Initializer for the `recurrent_kernel`\n weights matrix,\n used for the linear transformation of the recurrent state\n (see [initializers](../initializers.md)).\n bias_initializer: Initializer for the bias vector\n (see [initializers](../initializers.md)).\n kernel_regularizer: Regularizer function applied to\n the `kernel` weights matrix\n (see [regularizer](../regularizers.md)).\n recurrent_regularizer: Regularizer function applied to\n the `recurrent_kernel` weights matrix\n (see [regularizer](../regularizers.md)).\n bias_regularizer: Regularizer function applied to the bias vector\n (see [regularizer](../regularizers.md)).\n activity_regularizer: Regularizer function applied to\n the output of the layer (its \"activation\").\n (see [regularizer](../regularizers.md)).\n kernel_constraint: Constraint function applied to\n the `kernel` weights matrix\n (see [constraints](../constraints.md)).\n recurrent_constraint: Constraint function applied to\n the `recurrent_kernel` weights matrix\n (see [constraints](../constraints.md)).\n bias_constraint: Constraint function applied to the bias vector\n (see [constraints](../constraints.md)).\n dropout: Float between 0 and 1.\n Fraction of the units to drop for\n the linear transformation of the inputs.\n recurrent_dropout: Float between 0 and 1.\n Fraction of the units to drop for\n the linear transformation of the recurrent state.\n return_sequences: Boolean. Whether to return the last output.\n in the output sequence, or the full sequence.\n return_state: Boolean. Whether to return the last state\n in addition to the output.\n go_backwards: Boolean (default False).\n If True, process the input sequence backwards and return the\n reversed sequence.\n stateful: Boolean (default False). If True, the last state\n for each sample at index i in a batch will be used as initial\n state for the sample of index i in the following batch.\n unroll: Boolean (default False).\n If True, the network will be unrolled,\n else a symbolic loop will be used.\n Unrolling can speed-up a RNN,\n although it tends to be more memory-intensive.\n Unrolling is only suitable for short sequences.\n \"\"\"\n\n @interfaces.legacy_recurrent_support\n def __init__(self, units,\n activation='tanh',\n use_bias=True,\n kernel_initializer='glorot_uniform',\n recurrent_initializer='orthogonal',\n bias_initializer='zeros',\n kernel_regularizer=None,\n recurrent_regularizer=None,\n bias_regularizer=None,\n activity_regularizer=None,\n kernel_constraint=None,\n recurrent_constraint=None,\n bias_constraint=None,\n dropout=0.,\n recurrent_dropout=0.,\n return_sequences=False,\n return_state=False,\n go_backwards=False,\n stateful=False,\n unroll=False,\n **kwargs):\n if 'implementation' in kwargs:\n kwargs.pop('implementation')\n warnings.warn('The `implementation` argument '\n 'in `SimpleRNN` has been deprecated. '\n 'Please remove it from your layer call.')\n if K.backend() == 'theano':\n warnings.warn(\n 'RNN dropout is no longer supported with the Theano backend '\n 'due to technical limitations. '\n 'You can either set `dropout` and `recurrent_dropout` to 0, '\n 'or use the TensorFlow backend.')\n dropout = 0.\n recurrent_dropout = 0.\n\n cell = SimpleRNNCell(units,\n activation=activation,\n use_bias=use_bias,\n kernel_initializer=kernel_initializer,\n recurrent_initializer=recurrent_initializer,\n bias_initializer=bias_initializer,\n kernel_regularizer=kernel_regularizer,\n recurrent_regularizer=recurrent_regularizer,\n bias_regularizer=bias_regularizer,\n kernel_constraint=kernel_constraint,\n recurrent_constraint=recurrent_constraint,\n bias_constraint=bias_constraint,\n dropout=dropout,\n recurrent_dropout=recurrent_dropout)\n super(SimpleRNN, self).__init__(cell,\n return_sequences=return_sequences,\n return_state=return_state,\n go_backwards=go_backwards,\n stateful=stateful,\n unroll=unroll,\n **kwargs)\n self.activity_regularizer = regularizers.get(activity_regularizer)\n\n def call(self, inputs, mask=None, training=None, initial_state=None):\n return super(SimpleRNN, self).call(inputs,\n mask=mask,\n training=training,\n initial_state=initial_state)\n\n @property\n def units(self):\n return self.cell.units\n\n @property\n def activation(self):\n return self.cell.activation\n\n @property\n def use_bias(self):\n return self.cell.use_bias\n\n @property\n def kernel_initializer(self):\n return self.cell.kernel_initializer\n\n @property\n def recurrent_initializer(self):\n return self.cell.recurrent_initializer\n\n @property\n def bias_initializer(self):\n return self.cell.bias_initializer\n\n @property\n def kernel_regularizer(self):\n return self.cell.kernel_regularizer\n\n @property\n def recurrent_regularizer(self):\n return self.cell.recurrent_regularizer\n\n @property\n def bias_regularizer(self):\n return self.cell.bias_regularizer\n\n @property\n def kernel_constraint(self):\n return self.cell.kernel_constraint\n\n @property\n def recurrent_constraint(self):\n return self.cell.recurrent_constraint\n\n @property\n def bias_constraint(self):\n return self.cell.bias_constraint\n\n @property\n def dropout(self):\n return self.cell.dropout\n\n @property\n def recurrent_dropout(self):\n return self.cell.recurrent_dropout\n\n def get_config(self):\n config = {'units': self.units,\n 'activation': activations.serialize(self.activation),\n 'use_bias': self.use_bias,\n 'kernel_initializer': initializers.serialize(self.kernel_initializer),\n 'recurrent_initializer': initializers.serialize(self.recurrent_initializer),\n 'bias_initializer': initializers.serialize(self.bias_initializer),\n 'kernel_regularizer': regularizers.serialize(self.kernel_regularizer),\n 'recurrent_regularizer': regularizers.serialize(self.recurrent_regularizer),\n 'bias_regularizer': regularizers.serialize(self.bias_regularizer),\n 'activity_regularizer': regularizers.serialize(self.activity_regularizer),\n 'kernel_constraint': constraints.serialize(self.kernel_constraint),\n 'recurrent_constraint': constraints.serialize(self.recurrent_constraint),\n 'bias_constraint': constraints.serialize(self.bias_constraint),\n 'dropout': self.dropout,\n 'recurrent_dropout': self.recurrent_dropout}\n base_config = super(SimpleRNN, self).get_config()\n del base_config['cell']\n return dict(list(base_config.items()) + list(config.items()))\n\n @classmethod\n def from_config(cls, config):\n if 'implementation' in config:\n config.pop('implementation')\n return cls(**config)\n\n\nclass GRUCell(Layer):\n \"\"\"Cell class for the GRU layer.\n\n # Arguments\n units: Positive integer, dimensionality of the output space.\n activation: Activation function to use\n (see [activations](../activations.md)).\n If you pass None, no activation is applied\n (ie. \"linear\" activation: `a(x) = x`).\n recurrent_activation: Activation function to use\n for the recurrent step\n (see [activations](../activations.md)).\n use_bias: Boolean, whether the layer uses a bias vector.\n kernel_initializer: Initializer for the `kernel` weights matrix,\n used for the linear transformation of the inputs\n (see [initializers](../initializers.md)).\n recurrent_initializer: Initializer for the `recurrent_kernel`\n weights matrix,\n used for the linear transformation of the recurrent state\n (see [initializers](../initializers.md)).\n bias_initializer: Initializer for the bias vector\n (see [initializers](../initializers.md)).\n kernel_regularizer: Regularizer function applied to\n the `kernel` weights matrix\n (see [regularizer](../regularizers.md)).\n recurrent_regularizer: Regularizer function applied to\n the `recurrent_kernel` weights matrix\n (see [regularizer](../regularizers.md)).\n bias_regularizer: Regularizer function applied to the bias vector\n (see [regularizer](../regularizers.md)).\n kernel_constraint: Constraint function applied to\n the `kernel` weights matrix\n (see [constraints](../constraints.md)).\n recurrent_constraint: Constraint function applied to\n the `recurrent_kernel` weights matrix\n (see [constraints](../constraints.md)).\n bias_constraint: Constraint function applied to the bias vector\n (see [constraints](../constraints.md)).\n dropout: Float between 0 and 1.\n Fraction of the units to drop for\n the linear transformation of the inputs.\n recurrent_dropout: Float between 0 and 1.\n Fraction of the units to drop for\n the linear transformation of the recurrent state.\n implementation: Implementation mode, either 1 or 2.\n Mode 1 will structure its operations as a larger number of\n smaller dot products and additions, whereas mode 2 will\n batch them into fewer, larger operations. These modes will\n have different performance profiles on different hardware and\n for different applications.\n \"\"\"\n\n def __init__(self, units,\n activation='tanh',\n recurrent_activation='hard_sigmoid',\n use_bias=True,\n kernel_initializer='glorot_uniform',\n recurrent_initializer='orthogonal',\n bias_initializer='zeros',\n kernel_regularizer=None,\n recurrent_regularizer=None,\n bias_regularizer=None,\n kernel_constraint=None,\n recurrent_constraint=None,\n bias_constraint=None,\n dropout=0.,\n recurrent_dropout=0.,\n implementation=1,\n **kwargs):\n super(GRUCell, self).__init__(**kwargs)\n self.units = units\n self.activation = activations.get(activation)\n self.recurrent_activation = activations.get(recurrent_activation)\n self.use_bias = use_bias\n\n self.kernel_initializer = initializers.get(kernel_initializer)\n self.recurrent_initializer = initializers.get(recurrent_initializer)\n self.bias_initializer = initializers.get(bias_initializer)\n\n self.kernel_regularizer = regularizers.get(kernel_regularizer)\n self.recurrent_regularizer = regularizers.get(recurrent_regularizer)\n self.bias_regularizer = regularizers.get(bias_regularizer)\n\n self.kernel_constraint = constraints.get(kernel_constraint)\n self.recurrent_constraint = constraints.get(recurrent_constraint)\n self.bias_constraint = constraints.get(bias_constraint)\n\n self.dropout = min(1., max(0., dropout))\n self.recurrent_dropout = min(1., max(0., recurrent_dropout))\n self.implementation = implementation\n self.state_size = self.units\n self._dropout_mask = None\n self._recurrent_dropout_mask = None\n\n def build(self, input_shape):\n input_dim = input_shape[-1]\n self.kernel = self.add_weight(shape=(input_dim, self.units * 3),\n name='kernel',\n initializer=self.kernel_initializer,\n regularizer=self.kernel_regularizer,\n constraint=self.kernel_constraint)\n self.recurrent_kernel = self.add_weight(\n shape=(self.units, self.units * 3),\n name='recurrent_kernel',\n initializer=self.recurrent_initializer,\n regularizer=self.recurrent_regularizer,\n constraint=self.recurrent_constraint)\n\n if self.use_bias:\n self.bias = self.add_weight(shape=(self.units * 3,),\n name='bias',\n initializer=self.bias_initializer,\n regularizer=self.bias_regularizer,\n constraint=self.bias_constraint)\n else:\n self.bias = None\n\n self.kernel_z = self.kernel[:, :self.units]\n self.recurrent_kernel_z = self.recurrent_kernel[:, :self.units]\n self.kernel_r = self.kernel[:, self.units: self.units * 2]\n self.recurrent_kernel_r = self.recurrent_kernel[:,\n self.units:\n self.units * 2]\n self.kernel_h = self.kernel[:, self.units * 2:]\n self.recurrent_kernel_h = self.recurrent_kernel[:, self.units * 2:]\n\n if self.use_bias:\n self.bias_z = self.bias[:self.units]\n self.bias_r = self.bias[self.units: self.units * 2]\n self.bias_h = self.bias[self.units * 2:]\n else:\n self.bias_z = None\n self.bias_r = None\n self.bias_h = None\n self.built = True\n\n def call(self, inputs, states, training=None):\n h_tm1 = states[0] # previous memory\n\n if 0 < self.dropout < 1 and self._dropout_mask is None:\n self._dropout_mask = _generate_dropout_mask(\n _generate_dropout_ones(inputs, K.shape(inputs)[-1]),\n self.dropout,\n training=training,\n count=3)\n if (0 < self.recurrent_dropout < 1 and\n self._recurrent_dropout_mask is None):\n self._recurrent_dropout_mask = _generate_dropout_mask(\n _generate_dropout_ones(inputs, self.units),\n self.recurrent_dropout,\n training=training,\n count=3)\n\n # dropout matrices for input units\n dp_mask = self._dropout_mask\n # dropout matrices for recurrent units\n rec_dp_mask = self._recurrent_dropout_mask\n\n if self.implementation == 1:\n if 0. < self.dropout < 1.:\n inputs_z = inputs * dp_mask[0]\n inputs_r = inputs * dp_mask[1]\n inputs_h = inputs * dp_mask[2]\n else:\n inputs_z = inputs\n inputs_r = inputs\n inputs_h = inputs\n x_z = K.dot(inputs_z, self.kernel_z)\n x_r = K.dot(inputs_r, self.kernel_r)\n x_h = K.dot(inputs_h, self.kernel_h)\n if self.use_bias:\n x_z = K.bias_add(x_z, self.bias_z)\n x_r = K.bias_add(x_r, self.bias_r)\n x_h = K.bias_add(x_h, self.bias_h)\n\n if 0. < self.recurrent_dropout < 1.:\n h_tm1_z = h_tm1 * rec_dp_mask[0]\n h_tm1_r = h_tm1 * rec_dp_mask[1]\n h_tm1_h = h_tm1 * rec_dp_mask[2]\n else:\n h_tm1_z = h_tm1\n h_tm1_r = h_tm1\n h_tm1_h = h_tm1\n z = self.recurrent_activation(x_z + K.dot(h_tm1_z,\n self.recurrent_kernel_z))\n r = self.recurrent_activation(x_r + K.dot(h_tm1_r,\n self.recurrent_kernel_r))\n\n hh = self.activation(x_h + K.dot(r * h_tm1_h,\n self.recurrent_kernel_h))\n else:\n if 0. < self.dropout < 1.:\n inputs *= dp_mask[0]\n matrix_x = K.dot(inputs, self.kernel)\n if self.use_bias:\n matrix_x = K.bias_add(matrix_x, self.bias)\n if 0. < self.recurrent_dropout < 1.:\n h_tm1 *= rec_dp_mask[0]\n matrix_inner = K.dot(h_tm1,\n self.recurrent_kernel[:, :2 * self.units])\n\n x_z = matrix_x[:, :self.units]\n x_r = matrix_x[:, self.units: 2 * self.units]\n recurrent_z = matrix_inner[:, :self.units]\n recurrent_r = matrix_inner[:, self.units: 2 * self.units]\n\n z = self.recurrent_activation(x_z + recurrent_z)\n r = self.recurrent_activation(x_r + recurrent_r)\n\n x_h = matrix_x[:, 2 * self.units:]\n recurrent_h = K.dot(r * h_tm1,\n self.recurrent_kernel[:, 2 * self.units:])\n hh = self.activation(x_h + recurrent_h)\n h = z * h_tm1 + (1 - z) * hh\n if 0 < self.dropout + self.recurrent_dropout:\n if training is None:\n h._uses_learning_phase = True\n return h, [h]\n\n def get_config(self):\n config = {'units': self.units,\n 'activation': activations.serialize(self.activation),\n 'recurrent_activation': activations.serialize(self.recurrent_activation),\n 'use_bias': self.use_bias,\n 'kernel_initializer': initializers.serialize(self.kernel_initializer),\n 'recurrent_initializer': initializers.serialize(self.recurrent_initializer),\n 'bias_initializer': initializers.serialize(self.bias_initializer),\n 'kernel_regularizer': regularizers.serialize(self.kernel_regularizer),\n 'recurrent_regularizer': regularizers.serialize(self.recurrent_regularizer),\n 'bias_regularizer': regularizers.serialize(self.bias_regularizer),\n 'kernel_constraint': constraints.serialize(self.kernel_constraint),\n 'recurrent_constraint': constraints.serialize(self.recurrent_constraint),\n 'bias_constraint': constraints.serialize(self.bias_constraint),\n 'dropout': self.dropout,\n 'recurrent_dropout': self.recurrent_dropout,\n 'implementation': self.implementation}\n base_config = super(GRUCell, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n\n\nclass GRU(RNN):\n \"\"\"Gated Recurrent Unit - Cho et al. 2014.\n\n # Arguments\n units: Positive integer, dimensionality of the output space.\n activation: Activation function to use\n (see [activations](../activations.md)).\n If you pass None, no activation is applied\n (ie. \"linear\" activation: `a(x) = x`).\n recurrent_activation: Activation function to use\n for the recurrent step\n (see [activations](../activations.md)).\n use_bias: Boolean, whether the layer uses a bias vector.\n kernel_initializer: Initializer for the `kernel` weights matrix,\n used for the linear transformation of the inputs\n (see [initializers](../initializers.md)).\n recurrent_initializer: Initializer for the `recurrent_kernel`\n weights matrix,\n used for the linear transformation of the recurrent state\n (see [initializers](../initializers.md)).\n bias_initializer: Initializer for the bias vector\n (see [initializers](../initializers.md)).\n kernel_regularizer: Regularizer function applied to\n the `kernel` weights matrix\n (see [regularizer](../regularizers.md)).\n recurrent_regularizer: Regularizer function applied to\n the `recurrent_kernel` weights matrix\n (see [regularizer](../regularizers.md)).\n bias_regularizer: Regularizer function applied to the bias vector\n (see [regularizer](../regularizers.md)).\n activity_regularizer: Regularizer function applied to\n the output of the layer (its \"activation\").\n (see [regularizer](../regularizers.md)).\n kernel_constraint: Constraint function applied to\n the `kernel` weights matrix\n (see [constraints](../constraints.md)).\n recurrent_constraint: Constraint function applied to\n the `recurrent_kernel` weights matrix\n (see [constraints](../constraints.md)).\n bias_constraint: Constraint function applied to the bias vector\n (see [constraints](../constraints.md)).\n dropout: Float between 0 and 1.\n Fraction of the units to drop for\n the linear transformation of the inputs.\n recurrent_dropout: Float between 0 and 1.\n Fraction of the units to drop for\n the linear transformation of the recurrent state.\n implementation: Implementation mode, either 1 or 2.\n Mode 1 will structure its operations as a larger number of\n smaller dot products and additions, whereas mode 2 will\n batch them into fewer, larger operations. These modes will\n have different performance profiles on different hardware and\n for different applications.\n return_sequences: Boolean. Whether to return the last output.\n in the output sequence, or the full sequence.\n return_state: Boolean. Whether to return the last state\n in addition to the output.\n go_backwards: Boolean (default False).\n If True, process the input sequence backwards and return the\n reversed sequence.\n stateful: Boolean (default False). If True, the last state\n for each sample at index i in a batch will be used as initial\n state for the sample of index i in the following batch.\n unroll: Boolean (default False).\n If True, the network will be unrolled,\n else a symbolic loop will be used.\n Unrolling can speed-up a RNN,\n although it tends to be more memory-intensive.\n Unrolling is only suitable for short sequences.\n\n # References\n - [On the Properties of Neural Machine Translation: Encoder-Decoder Approaches](https://arxiv.org/abs/1409.1259)\n - [Empirical Evaluation of Gated Recurrent Neural Networks on Sequence Modeling](http://arxiv.org/abs/1412.3555v1)\n - [A Theoretically Grounded Application of Dropout in Recurrent Neural Networks](http://arxiv.org/abs/1512.05287)\n \"\"\"\n\n @interfaces.legacy_recurrent_support\n def __init__(self, units,\n activation='tanh',\n recurrent_activation='hard_sigmoid',\n use_bias=True,\n kernel_initializer='glorot_uniform',\n recurrent_initializer='orthogonal',\n bias_initializer='zeros',\n kernel_regularizer=None,\n recurrent_regularizer=None,\n bias_regularizer=None,\n activity_regularizer=None,\n kernel_constraint=None,\n recurrent_constraint=None,\n bias_constraint=None,\n dropout=0.,\n recurrent_dropout=0.,\n implementation=1,\n return_sequences=False,\n return_state=False,\n go_backwards=False,\n stateful=False,\n unroll=False,\n **kwargs):\n if implementation == 0:\n warnings.warn('`implementation=0` has been deprecated, '\n 'and now defaults to `implementation=1`.'\n 'Please update your layer call.')\n if K.backend() == 'theano':\n warnings.warn(\n 'RNN dropout is no longer supported with the Theano backend '\n 'due to technical limitations. '\n 'You can either set `dropout` and `recurrent_dropout` to 0, '\n 'or use the TensorFlow backend.')\n dropout = 0.\n recurrent_dropout = 0.\n\n cell = GRUCell(units,\n activation=activation,\n recurrent_activation=recurrent_activation,\n use_bias=use_bias,\n kernel_initializer=kernel_initializer,\n recurrent_initializer=recurrent_initializer,\n bias_initializer=bias_initializer,\n kernel_regularizer=kernel_regularizer,\n recurrent_regularizer=recurrent_regularizer,\n bias_regularizer=bias_regularizer,\n kernel_constraint=kernel_constraint,\n recurrent_constraint=recurrent_constraint,\n bias_constraint=bias_constraint,\n dropout=dropout,\n recurrent_dropout=recurrent_dropout,\n implementation=implementation)\n super(GRU, self).__init__(cell,\n return_sequences=return_sequences,\n return_state=return_state,\n go_backwards=go_backwards,\n stateful=stateful,\n unroll=unroll,\n **kwargs)\n self.activity_regularizer = regularizers.get(activity_regularizer)\n\n def call(self, inputs, mask=None, training=None, initial_state=None):\n return super(GRU, self).call(inputs,\n mask=mask,\n training=training,\n initial_state=initial_state)\n\n @property\n def units(self):\n return self.cell.units\n\n @property\n def activation(self):\n return self.cell.activation\n\n @property\n def recurrent_activation(self):\n return self.cell.recurrent_activation\n\n @property\n def use_bias(self):\n return self.cell.use_bias\n\n @property\n def kernel_initializer(self):\n return self.cell.kernel_initializer\n\n @property\n def recurrent_initializer(self):\n return self.cell.recurrent_initializer\n\n @property\n def bias_initializer(self):\n return self.cell.bias_initializer\n\n @property\n def kernel_regularizer(self):\n return self.cell.kernel_regularizer\n\n @property\n def recurrent_regularizer(self):\n return self.cell.recurrent_regularizer\n\n @property\n def bias_regularizer(self):\n return self.cell.bias_regularizer\n\n @property\n def kernel_constraint(self):\n return self.cell.kernel_constraint\n\n @property\n def recurrent_constraint(self):\n return self.cell.recurrent_constraint\n\n @property\n def bias_constraint(self):\n return self.cell.bias_constraint\n\n @property\n def dropout(self):\n return self.cell.dropout\n\n @property\n def recurrent_dropout(self):\n return self.cell.recurrent_dropout\n\n @property\n def implementation(self):\n return self.cell.implementation\n\n def get_config(self):\n config = {'units': self.units,\n 'activation': activations.serialize(self.activation),\n 'recurrent_activation': activations.serialize(self.recurrent_activation),\n 'use_bias': self.use_bias,\n 'kernel_initializer': initializers.serialize(self.kernel_initializer),\n 'recurrent_initializer': initializers.serialize(self.recurrent_initializer),\n 'bias_initializer': initializers.serialize(self.bias_initializer),\n 'kernel_regularizer': regularizers.serialize(self.kernel_regularizer),\n 'recurrent_regularizer': regularizers.serialize(self.recurrent_regularizer),\n 'bias_regularizer': regularizers.serialize(self.bias_regularizer),\n 'activity_regularizer': regularizers.serialize(self.activity_regularizer),\n 'kernel_constraint': constraints.serialize(self.kernel_constraint),\n 'recurrent_constraint': constraints.serialize(self.recurrent_constraint),\n 'bias_constraint': constraints.serialize(self.bias_constraint),\n 'dropout': self.dropout,\n 'recurrent_dropout': self.recurrent_dropout,\n 'implementation': self.implementation}\n base_config = super(GRU, self).get_config()\n del base_config['cell']\n return dict(list(base_config.items()) + list(config.items()))\n\n @classmethod\n def from_config(cls, config):\n if 'implementation' in config and config['implementation'] == 0:\n config['implementation'] = 1\n return cls(**config)\n\n\nclass LSTMCell(Layer):\n \"\"\"Cell class for the LSTM layer.\n\n # Arguments\n units: Positive integer, dimensionality of the output space.\n activation: Activation function to use\n (see [activations](../activations.md)).\n If you pass None, no activation is applied\n (ie. \"linear\" activation: `a(x) = x`).\n recurrent_activation: Activation function to use\n for the recurrent step\n (see [activations](../activations.md)).\n use_bias: Boolean, whether the layer uses a bias vector.\n kernel_initializer: Initializer for the `kernel` weights matrix,\n used for the linear transformation of the inputs\n (see [initializers](../initializers.md)).\n recurrent_initializer: Initializer for the `recurrent_kernel`\n weights matrix,\n used for the linear transformation of the recurrent state\n (see [initializers](../initializers.md)).\n bias_initializer: Initializer for the bias vector\n (see [initializers](../initializers.md)).\n unit_forget_bias: Boolean.\n If True, add 1 to the bias of the forget gate at initialization.\n Setting it to true will also force `bias_initializer=\"zeros\"`.\n This is recommended in [Jozefowicz et al.](http://www.jmlr.org/proceedings/papers/v37/jozefowicz15.pdf)\n kernel_regularizer: Regularizer function applied to\n the `kernel` weights matrix\n (see [regularizer](../regularizers.md)).\n recurrent_regularizer: Regularizer function applied to\n the `recurrent_kernel` weights matrix\n (see [regularizer](../regularizers.md)).\n bias_regularizer: Regularizer function applied to the bias vector\n (see [regularizer](../regularizers.md)).\n kernel_constraint: Constraint function applied to\n the `kernel` weights matrix\n (see [constraints](../constraints.md)).\n recurrent_constraint: Constraint function applied to\n the `recurrent_kernel` weights matrix\n (see [constraints](../constraints.md)).\n bias_constraint: Constraint function applied to the bias vector\n (see [constraints](../constraints.md)).\n dropout: Float between 0 and 1.\n Fraction of the units to drop for\n the linear transformation of the inputs.\n recurrent_dropout: Float between 0 and 1.\n Fraction of the units to drop for\n the linear transformation of the recurrent state.\n implementation: Implementation mode, either 1 or 2.\n Mode 1 will structure its operations as a larger number of\n smaller dot products and additions, whereas mode 2 will\n batch them into fewer, larger operations. These modes will\n have different performance profiles on different hardware and\n for different applications.\n \"\"\"\n\n def __init__(self, units,\n activation='tanh',\n recurrent_activation='hard_sigmoid',\n use_bias=True,\n kernel_initializer='glorot_uniform',\n recurrent_initializer='orthogonal',\n bias_initializer='zeros',\n unit_forget_bias=True,\n kernel_regularizer=None,\n recurrent_regularizer=None,\n bias_regularizer=None,\n kernel_constraint=None,\n recurrent_constraint=None,\n bias_constraint=None,\n dropout=0.,\n recurrent_dropout=0.,\n implementation=1,\n **kwargs):\n super(LSTMCell, self).__init__(**kwargs)\n self.units = units\n self.activation = activations.get(activation)\n self.recurrent_activation = activations.get(recurrent_activation)\n self.use_bias = use_bias\n\n self.kernel_initializer = initializers.get(kernel_initializer)\n self.recurrent_initializer = initializers.get(recurrent_initializer)\n self.bias_initializer = initializers.get(bias_initializer)\n self.unit_forget_bias = unit_forget_bias\n\n self.kernel_regularizer = regularizers.get(kernel_regularizer)\n self.recurrent_regularizer = regularizers.get(recurrent_regularizer)\n self.bias_regularizer = regularizers.get(bias_regularizer)\n\n self.kernel_constraint = constraints.get(kernel_constraint)\n self.recurrent_constraint = constraints.get(recurrent_constraint)\n self.bias_constraint = constraints.get(bias_constraint)\n\n self.dropout = min(1., max(0., dropout))\n self.recurrent_dropout = min(1., max(0., recurrent_dropout))\n self.implementation = implementation\n self.state_size = (self.units, self.units)\n self._dropout_mask = None\n self._recurrent_dropout_mask = None\n\n def build(self, input_shape):\n input_dim = input_shape[-1]\n self.kernel = self.add_weight(shape=(input_dim, self.units * 4),\n name='kernel',\n initializer=self.kernel_initializer,\n regularizer=self.kernel_regularizer,\n constraint=self.kernel_constraint)\n self.recurrent_kernel = self.add_weight(\n shape=(self.units, self.units * 4),\n name='recurrent_kernel',\n initializer=self.recurrent_initializer,\n regularizer=self.recurrent_regularizer,\n constraint=self.recurrent_constraint)\n\n if self.use_bias:\n if self.unit_forget_bias:\n def bias_initializer(_, *args, **kwargs):\n return K.concatenate([\n self.bias_initializer((self.units,), *args, **kwargs),\n initializers.Ones()((self.units,), *args, **kwargs),\n self.bias_initializer((self.units * 2,), *args, **kwargs),\n ])\n else:\n bias_initializer = self.bias_initializer\n self.bias = self.add_weight(shape=(self.units * 4,),\n name='bias',\n initializer=bias_initializer,\n regularizer=self.bias_regularizer,\n constraint=self.bias_constraint)\n else:\n self.bias = None\n\n self.kernel_i = self.kernel[:, :self.units]\n self.kernel_f = self.kernel[:, self.units: self.units * 2]\n self.kernel_c = self.kernel[:, self.units * 2: self.units * 3]\n self.kernel_o = self.kernel[:, self.units * 3:]\n\n self.recurrent_kernel_i = self.recurrent_kernel[:, :self.units]\n self.recurrent_kernel_f = self.recurrent_kernel[:, self.units: self.units * 2]\n self.recurrent_kernel_c = self.recurrent_kernel[:, self.units * 2: self.units * 3]\n self.recurrent_kernel_o = self.recurrent_kernel[:, self.units * 3:]\n\n if self.use_bias:\n self.bias_i = self.bias[:self.units]\n self.bias_f = self.bias[self.units: self.units * 2]\n self.bias_c = self.bias[self.units * 2: self.units * 3]\n self.bias_o = self.bias[self.units * 3:]\n else:\n self.bias_i = None\n self.bias_f = None\n self.bias_c = None\n self.bias_o = None\n self.built = True\n\n def call(self, inputs, states, training=None):\n if 0 < self.dropout < 1 and self._dropout_mask is None:\n self._dropout_mask = _generate_dropout_mask(\n _generate_dropout_ones(inputs, K.shape(inputs)[-1]),\n self.dropout,\n training=training,\n count=4)\n if (0 < self.recurrent_dropout < 1 and\n self._recurrent_dropout_mask is None):\n self._recurrent_dropout_mask = _generate_dropout_mask(\n _generate_dropout_ones(inputs, self.units),\n self.recurrent_dropout,\n training=training,\n count=4)\n\n # dropout matrices for input units\n dp_mask = self._dropout_mask\n # dropout matrices for recurrent units\n rec_dp_mask = self._recurrent_dropout_mask\n\n h_tm1 = states[0] # previous memory state\n c_tm1 = states[1] # previous carry state\n\n if self.implementation == 1:\n if 0 < self.dropout < 1.:\n inputs_i = inputs * dp_mask[0]\n inputs_f = inputs * dp_mask[1]\n inputs_c = inputs * dp_mask[2]\n inputs_o = inputs * dp_mask[3]\n else:\n inputs_i = inputs\n inputs_f = inputs\n inputs_c = inputs\n inputs_o = inputs\n x_i = K.dot(inputs_i, self.kernel_i)\n x_f = K.dot(inputs_f, self.kernel_f)\n x_c = K.dot(inputs_c, self.kernel_c)\n x_o = K.dot(inputs_o, self.kernel_o)\n if self.use_bias:\n x_i = K.bias_add(x_i, self.bias_i)\n x_f = K.bias_add(x_f, self.bias_f)\n x_c = K.bias_add(x_c, self.bias_c)\n x_o = K.bias_add(x_o, self.bias_o)\n\n if 0 < self.recurrent_dropout < 1.:\n h_tm1_i = h_tm1 * rec_dp_mask[0]\n h_tm1_f = h_tm1 * rec_dp_mask[1]\n h_tm1_c = h_tm1 * rec_dp_mask[2]\n h_tm1_o = h_tm1 * rec_dp_mask[3]\n else:\n h_tm1_i = h_tm1\n h_tm1_f = h_tm1\n h_tm1_c = h_tm1\n h_tm1_o = h_tm1\n i = self.recurrent_activation(x_i + K.dot(h_tm1_i,\n self.recurrent_kernel_i))\n f = self.recurrent_activation(x_f + K.dot(h_tm1_f,\n self.recurrent_kernel_f))\n c = f * c_tm1 + i * self.activation(x_c + K.dot(h_tm1_c,\n self.recurrent_kernel_c))\n o = self.recurrent_activation(x_o + K.dot(h_tm1_o,\n self.recurrent_kernel_o))\n else:\n if 0. < self.dropout < 1.:\n inputs *= dp_mask[0]\n z = K.dot(inputs, self.kernel)\n if 0. < self.recurrent_dropout < 1.:\n h_tm1 *= rec_dp_mask[0]\n z += K.dot(h_tm1, self.recurrent_kernel)\n if self.use_bias:\n z = K.bias_add(z, self.bias)\n\n z0 = z[:, :self.units]\n z1 = z[:, self.units: 2 * self.units]\n z2 = z[:, 2 * self.units: 3 * self.units]\n z3 = z[:, 3 * self.units:]\n\n i = self.recurrent_activation(z0)\n f = self.recurrent_activation(z1)\n c = f * c_tm1 + i * self.activation(z2)\n o = self.recurrent_activation(z3)\n\n h = o * self.activation(c)\n if 0 < self.dropout + self.recurrent_dropout:\n if training is None:\n h._uses_learning_phase = True\n return h, [h, c]\n\n def get_config(self):\n config = {'units': self.units,\n 'activation': activations.serialize(self.activation),\n 'recurrent_activation': activations.serialize(self.recurrent_activation),\n 'use_bias': self.use_bias,\n 'kernel_initializer': initializers.serialize(self.kernel_initializer),\n 'recurrent_initializer': initializers.serialize(self.recurrent_initializer),\n 'bias_initializer': initializers.serialize(self.bias_initializer),\n 'unit_forget_bias': self.unit_forget_bias,\n 'kernel_regularizer': regularizers.serialize(self.kernel_regularizer),\n 'recurrent_regularizer': regularizers.serialize(self.recurrent_regularizer),\n 'bias_regularizer': regularizers.serialize(self.bias_regularizer),\n 'kernel_constraint': constraints.serialize(self.kernel_constraint),\n 'recurrent_constraint': constraints.serialize(self.recurrent_constraint),\n 'bias_constraint': constraints.serialize(self.bias_constraint),\n 'dropout': self.dropout,\n 'recurrent_dropout': self.recurrent_dropout,\n 'implementation': self.implementation}\n base_config = super(LSTMCell, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n\n\nclass LSTM(RNN):\n \"\"\"Long-Short Term Memory layer - Hochreiter 1997.\n\n # Arguments\n units: Positive integer, dimensionality of the output space.\n activation: Activation function to use\n (see [activations](../activations.md)).\n If you pass None, no activation is applied\n (ie. \"linear\" activation: `a(x) = x`).\n recurrent_activation: Activation function to use\n for the recurrent step\n (see [activations](../activations.md)).\n use_bias: Boolean, whether the layer uses a bias vector.\n kernel_initializer: Initializer for the `kernel` weights matrix,\n used for the linear transformation of the inputs.\n (see [initializers](../initializers.md)).\n recurrent_initializer: Initializer for the `recurrent_kernel`\n weights matrix,\n used for the linear transformation of the recurrent state.\n (see [initializers](../initializers.md)).\n bias_initializer: Initializer for the bias vector\n (see [initializers](../initializers.md)).\n unit_forget_bias: Boolean.\n If True, add 1 to the bias of the forget gate at initialization.\n Setting it to true will also force `bias_initializer=\"zeros\"`.\n This is recommended in [Jozefowicz et al.](http://www.jmlr.org/proceedings/papers/v37/jozefowicz15.pdf)\n kernel_regularizer: Regularizer function applied to\n the `kernel` weights matrix\n (see [regularizer](../regularizers.md)).\n recurrent_regularizer: Regularizer function applied to\n the `recurrent_kernel` weights matrix\n (see [regularizer](../regularizers.md)).\n bias_regularizer: Regularizer function applied to the bias vector\n (see [regularizer](../regularizers.md)).\n activity_regularizer: Regularizer function applied to\n the output of the layer (its \"activation\").\n (see [regularizer](../regularizers.md)).\n kernel_constraint: Constraint function applied to\n the `kernel` weights matrix\n (see [constraints](../constraints.md)).\n recurrent_constraint: Constraint function applied to\n the `recurrent_kernel` weights matrix\n (see [constraints](../constraints.md)).\n bias_constraint: Constraint function applied to the bias vector\n (see [constraints](../constraints.md)).\n dropout: Float between 0 and 1.\n Fraction of the units to drop for\n the linear transformation of the inputs.\n recurrent_dropout: Float between 0 and 1.\n Fraction of the units to drop for\n the linear transformation of the recurrent state.\n implementation: Implementation mode, either 1 or 2.\n Mode 1 will structure its operations as a larger number of\n smaller dot products and additions, whereas mode 2 will\n batch them into fewer, larger operations. These modes will\n have different performance profiles on different hardware and\n for different applications.\n return_sequences: Boolean. Whether to return the last output.\n in the output sequence, or the full sequence.\n return_state: Boolean. Whether to return the last state\n in addition to the output.\n go_backwards: Boolean (default False).\n If True, process the input sequence backwards and return the\n reversed sequence.\n stateful: Boolean (default False). If True, the last state\n for each sample at index i in a batch will be used as initial\n state for the sample of index i in the following batch.\n unroll: Boolean (default False).\n If True, the network will be unrolled,\n else a symbolic loop will be used.\n Unrolling can speed-up a RNN,\n although it tends to be more memory-intensive.\n Unrolling is only suitable for short sequences.\n\n # References\n - [Long short-term memory](http://www.bioinf.jku.at/publications/older/2604.pdf) (original 1997 paper)\n - [Learning to forget: Continual prediction with LSTM](http://www.mitpressjournals.org/doi/pdf/10.1162/089976600300015015)\n - [Supervised sequence labeling with recurrent neural networks](http://www.cs.toronto.edu/~graves/preprint.pdf)\n - [A Theoretically Grounded Application of Dropout in Recurrent Neural Networks](http://arxiv.org/abs/1512.05287)\n \"\"\"\n\n @interfaces.legacy_recurrent_support\n def __init__(self, units,\n activation='tanh',\n recurrent_activation='hard_sigmoid',\n use_bias=True,\n kernel_initializer='glorot_uniform',\n recurrent_initializer='orthogonal',\n bias_initializer='zeros',\n unit_forget_bias=True,\n kernel_regularizer=None,\n recurrent_regularizer=None,\n bias_regularizer=None,\n activity_regularizer=None,\n kernel_constraint=None,\n recurrent_constraint=None,\n bias_constraint=None,\n dropout=0.,\n recurrent_dropout=0.,\n implementation=1,\n return_sequences=False,\n return_state=False,\n go_backwards=False,\n stateful=False,\n unroll=False,\n **kwargs):\n if implementation == 0:\n warnings.warn('`implementation=0` has been deprecated, '\n 'and now defaults to `implementation=1`.'\n 'Please update your layer call.')\n if K.backend() == 'theano':\n warnings.warn(\n 'RNN dropout is no longer supported with the Theano backend '\n 'due to technical limitations. '\n 'You can either set `dropout` and `recurrent_dropout` to 0, '\n 'or use the TensorFlow backend.')\n dropout = 0.\n recurrent_dropout = 0.\n\n cell = LSTMCell(units,\n activation=activation,\n recurrent_activation=recurrent_activation,\n use_bias=use_bias,\n kernel_initializer=kernel_initializer,\n recurrent_initializer=recurrent_initializer,\n unit_forget_bias=unit_forget_bias,\n bias_initializer=bias_initializer,\n kernel_regularizer=kernel_regularizer,\n recurrent_regularizer=recurrent_regularizer,\n bias_regularizer=bias_regularizer,\n kernel_constraint=kernel_constraint,\n recurrent_constraint=recurrent_constraint,\n bias_constraint=bias_constraint,\n dropout=dropout,\n recurrent_dropout=recurrent_dropout,\n implementation=implementation)\n super(LSTM, self).__init__(cell,\n return_sequences=return_sequences,\n return_state=return_state,\n go_backwards=go_backwards,\n stateful=stateful,\n unroll=unroll,\n **kwargs)\n self.activity_regularizer = regularizers.get(activity_regularizer)\n\n def call(self, inputs, mask=None, training=None, initial_state=None):\n return super(LSTM, self).call(inputs,\n mask=mask,\n training=training,\n initial_state=initial_state)\n\n @property\n def units(self):\n return self.cell.units\n\n @property\n def activation(self):\n return self.cell.activation\n\n @property\n def recurrent_activation(self):\n return self.cell.recurrent_activation\n\n @property\n def use_bias(self):\n return self.cell.use_bias\n\n @property\n def kernel_initializer(self):\n return self.cell.kernel_initializer\n\n @property\n def recurrent_initializer(self):\n return self.cell.recurrent_initializer\n\n @property\n def bias_initializer(self):\n return self.cell.bias_initializer\n\n @property\n def unit_forget_bias(self):\n return self.cell.unit_forget_bias\n\n @property\n def kernel_regularizer(self):\n return self.cell.kernel_regularizer\n\n @property\n def recurrent_regularizer(self):\n return self.cell.recurrent_regularizer\n\n @property\n def bias_regularizer(self):\n return self.cell.bias_regularizer\n\n @property\n def kernel_constraint(self):\n return self.cell.kernel_constraint\n\n @property\n def recurrent_constraint(self):\n return self.cell.recurrent_constraint\n\n @property\n def bias_constraint(self):\n return self.cell.bias_constraint\n\n @property\n def dropout(self):\n return self.cell.dropout\n\n @property\n def recurrent_dropout(self):\n return self.cell.recurrent_dropout\n\n @property\n def implementation(self):\n return self.cell.implementation\n\n def get_config(self):\n config = {'units': self.units,\n 'activation': activations.serialize(self.activation),\n 'recurrent_activation': activations.serialize(self.recurrent_activation),\n 'use_bias': self.use_bias,\n 'kernel_initializer': initializers.serialize(self.kernel_initializer),\n 'recurrent_initializer': initializers.serialize(self.recurrent_initializer),\n 'bias_initializer': initializers.serialize(self.bias_initializer),\n 'unit_forget_bias': self.unit_forget_bias,\n 'kernel_regularizer': regularizers.serialize(self.kernel_regularizer),\n 'recurrent_regularizer': regularizers.serialize(self.recurrent_regularizer),\n 'bias_regularizer': regularizers.serialize(self.bias_regularizer),\n 'activity_regularizer': regularizers.serialize(self.activity_regularizer),\n 'kernel_constraint': constraints.serialize(self.kernel_constraint),\n 'recurrent_constraint': constraints.serialize(self.recurrent_constraint),\n 'bias_constraint': constraints.serialize(self.bias_constraint),\n 'dropout': self.dropout,\n 'recurrent_dropout': self.recurrent_dropout,\n 'implementation': self.implementation}\n base_config = super(LSTM, self).get_config()\n del base_config['cell']\n return dict(list(base_config.items()) + list(config.items()))\n\n @classmethod\n def from_config(cls, config):\n if 'implementation' in config and config['implementation'] == 0:\n config['implementation'] = 1\n return cls(**config)\n\n\ndef _generate_dropout_ones(inputs, dims):\n # Currently, CTNK can't instantiate `ones` with symbolic shapes.\n # Will update workaround once CTNK supports it.\n if K.backend() == 'cntk':\n ones = K.ones_like(K.reshape(inputs[:, 0], (-1, 1)))\n return K.tile(ones, (1, dims))\n else:\n return K.ones((K.shape(inputs)[0], dims))\n\n\ndef _generate_dropout_mask(ones, rate, training=None, count=1):\n def dropped_inputs():\n return K.dropout(ones, rate)\n\n if count > 1:\n return [K.in_train_phase(\n dropped_inputs,\n ones,\n training=training) for _ in range(count)]\n return K.in_train_phase(\n dropped_inputs,\n ones,\n training=training)\n"
] | [
[
"numpy.zeros"
]
] |
Msegade/pyNastran | [
"ae36548579c6bb2ee3a4fff207f7211c1986a5ab",
"ae36548579c6bb2ee3a4fff207f7211c1986a5ab"
] | [
"pyNastran/dev/bdf_vectorized/cards/dynamic.py",
"pyNastran/bdf/cards/dmig.py"
] | [
"# pylint: disable=C0103,R0902,R0904,R0914\n\"\"\"\nAll dynamic control cards are defined in this file. This includes:\n\n * FREQ\n * FREQ1\n * FREQ2 (not implemented)\n * FREQ3\n * FREQ4\n * FREQ5 (not implemented)\n * NLPCI\n * NLPARM\n * TSTEP\n * TSTEPNL\n\nAll cards are BaseCard objects.\n\n\"\"\"\nfrom math import log, exp, ceil\nimport numpy as np\nfrom numpy import unique, hstack\n\nfrom pyNastran.utils.numpy_utils import integer_types\nfrom pyNastran.bdf.field_writer_8 import set_blank_if_default\nfrom pyNastran.bdf.cards.base_card import BaseCard\nfrom pyNastran.bdf.bdf_interface.assign_type import (\n integer, integer_or_blank, double, double_or_blank,\n string_or_blank, blank, fields, components_or_blank\n)\nfrom pyNastran.bdf.field_writer_8 import print_card_8\nfrom pyNastran.bdf.field_writer_16 import print_card_16\nif TYPE_CHECKING: # pragma: no cover\n from pyNastran.bdf.bdf import BDF\n\n\nclass DELAY(BaseCard):\n type = 'DELAY'\n\n def __init__(self, sid, nodes, components, delays, comment=''):\n \"\"\"\n +-------+-----+-----------+-----+--------+------+-----+--------+-----+\n | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 |\n +=======+=====+===========+=====+========+======+=====+========+=====+\n | DELAY | SID | POINT ID1 | C1 | T1 | P2 | C2 | T2 | |\n +-------+-----+-----------+-----+--------+------+-----+--------+-----+\n \"\"\"\n if comment:\n self.comment = comment\n\n #: Identification number of DELAY entry. (Integer > 0)\n self.sid = sid\n #: Grid, extra, or scalar point identification number. (Integer > 0)\n self.nodes = nodes\n #: Component number. (Integers 1 through 6 for grid points; zero or blank for extra\n #: or scalar points)\n self.components = components\n #: Time delay (tau) for designated point Pi and component Ci. (Real)\n self.delays = delays\n\n @classmethod\n def add_card(cls, card, comment=''):\n \"\"\"\n Adds a DELAY card from ``BDF.add_card(...)``\n\n Parameters\n ----------\n card : BDFCard()\n a BDFCard object\n comment : str; default=''\n a comment for the card\n \"\"\"\n sid = integer(card, 1, 'sid')\n nodes = [integer(card, 2, 'node')]\n components = [integer(card, 3, 'components')]\n delays = [double_or_blank(card, 4, 'delay')]\n assert components[0] in [0, 1, 2, 3, 4, 5, 6], components\n if card.field(5):\n nodes.append(integer(card, 5, 'node'))\n components.append(integer(card, 6, 'components'))\n delays.append(double_or_blank(card, 7, 'delay'))\n assert components[1] in [0, 1, 2, 3, 4, 5, 6], components\n return DELAY(sid, nodes, components, delays, comment=comment)\n\n def add(self, delay):\n assert self.sid == delay.sid, 'sid=%s delay.sid=%s' % (self.sid, delay.sid)\n if delay.comment:\n if hasattr('_comment'):\n self._comment += delay.comment\n else:\n self._comment = delay.comment\n self.nodes += delay.nodes\n self.components += delay.components\n self.delays += delay.delays\n\n def get_delay_at_freq(self, freq):\n return self.nodes, self.components, self.delays\n\n #def cross_reference(self, model: BDF) -> None:\n #\"\"\"\n #Cross links the card so referenced cards can be extracted directly\n\n #Parameters\n #----------\n #model : BDF()\n #the BDF object\n #\"\"\"\n #msg = ', which is required by DELAY sid=%s' % self.sid\n #self.nodes_ref = model.Node(self.node_ids, msg=msg)\n\n #@property\n #def node_id1(self):\n #if isinstance(self.nodes[0], integer_types):\n #return self.nodes[0]\n #return self.nodes_ref[0].nid\n\n #@property\n #def node_id2(self):\n #if isinstance(self.nodes[1], integer_types):\n #return self.nodes[1]\n #return self.nodes_ref[1].nid\n\n @property\n def node_ids(self):\n node_ids = [self.node_id1]\n if len(self.components) == 2:\n node_ids.append(self.node_id2)\n return node_ids\n\n def raw_fields(self):\n list_fields = ['DELAY', self.sid]\n for nid, comp, delay in zip(self.node_ids, self.components, self.delays):\n if isinstance(nid, integer_types):\n nidi = nid\n else:\n nidi = nid.nid\n list_fields += [nidi, comp, delay]\n return list_fields\n\n def write_card(self, size: int=8, is_double: bool=False) -> str:\n msg = self.comment\n node_ids = self.node_ids\n if size == 8:\n for nid, comp, delay in zip(node_ids, self.components, self.delays):\n msg += print_card_8(['DELAY', self.sid, nid, comp, delay])\n else:\n for nid, comp, delay in zip(node_ids, self.components, self.delays):\n msg += print_card_16(['DELAY', self.sid, nid, comp, delay])\n return msg\n\n\nclass DPHASE(BaseCard):\n type = 'DPHASE'\n\n def __init__(self, sid, nodes, components, phase_leads, comment=''):\n \"\"\"\n +--------+-----+-----------+-----+------+------+-----+-----+-----+\n | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 |\n +========+=====+===========+=====+======+======+=====+=====+=====+\n | DPHASE | SID | POINT ID1 | C1 | TH1 | P2 | C2 | TH2 | |\n +--------+-----+-----------+-----+------+------+-----+-----+-----+\n \"\"\"\n if comment:\n self.comment = comment\n self.sid = sid\n self.nodes = nodes\n self.components = components\n self.phase_leads = phase_leads\n\n @classmethod\n def add_card(cls, card, comment=''):\n \"\"\"\n Adds a DPHASE card from ``BDF.add_card(...)``\n\n Parameters\n ----------\n card : BDFCard()\n a BDFCard object\n comment : str; default=''\n a comment for the card\n \"\"\"\n sid = integer(card, 1, 'sid')\n nodes = [integer(card, 2, 'node')]\n components = [integer(card, 3, 'components')]\n phase_leads = [double_or_blank(card, 4, 'phase_lead')]\n assert components[0] in [0, 1, 2, 3, 4, 5, 6], components\n if card.field(5):\n nodes.append(integer(card, 5, 'node'))\n components.append(integer(card, 6, 'components'))\n phase_leads.append(double_or_blank(card, 7, 'phase_lead'))\n assert components[1] in [0, 1, 2, 3, 4, 5, 6], components\n return DPHASE(sid, nodes, components, phase_leads, comment=comment)\n\n def add(self, dphase):\n assert self.sid == dphase.sid, 'sid=%s dphase.sid=%s' % (self.sid, dphase.sid)\n if dphase.comment:\n if hasattr('_comment'):\n self._comment += dphase.comment\n else:\n self._comment = dphase.comment\n self.nodes += dphase.nodes\n self.components += dphase.components\n self.phase_leads += dphase.phase_leads\n\n #def cross_reference(self, model: BDF) -> None:\n #\"\"\"\n #Cross links the card so referenced cards can be extracted directly\n\n #Parameters\n #----------\n #model : BDF()\n #the BDF object\n #\"\"\"\n #msg = ', which is required by DPHASE sid=%s' % self.sid\n #self.nodes_ref = model.Nodes(self.node_ids, msg=msg)\n\n #@property\n #def node_id1(self):\n #if isinstance(self.nodes[0], integer_types):\n #return self.nodes[0]\n #return self.nodes_ref[0].nid\n\n #@property\n #def node_id2(self):\n #if isinstance(self.nodes[1], integer_types):\n #return self.nodes[1]\n #return self.nodes_ref[1].nid\n\n @property\n def node_ids(self):\n node_ids = [self.node_id1]\n if len(self.components) == 2:\n node_ids.append(self.node_id2)\n return node_ids\n\n def raw_fields(self):\n list_fields = ['DPHASE', self.sid]\n for nid, comp, delay in zip(self.nodes, self.components, self.phase_leads):\n if isinstance(nid, integer_types):\n nidi = nid\n else:\n nidi = nid.nid\n list_fields += [nidi, comp, delay]\n return list_fields\n\n def write_card(self, size: int=8, is_double: bool=False) -> str:\n msg = self.comment\n node_ids = self.node_ids\n if size == 8:\n for nid, comp, delay in zip(node_ids, self.components, self.phase_leads):\n msg += print_card_8(['DPHASE', self.sid, nid, comp, delay])\n else:\n for nid, comp, delay in zip(node_ids, self.components, self.phase_leads):\n msg += print_card_16(['DPHASE', self.sid, nid, comp, delay])\n return msg\n\n\nclass FREQ(BaseCard):\n \"\"\"\n Defines a set of frequencies to be used in the solution of frequency\n response problems.\n\n +------+-----+-----+-----+------+-----+-----+-----+-----+\n | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 |\n +======+=====+=====+=====+======+=====+=====+=====+=====+\n | FREQ | SID | F1 | F2 | etc. | | | | |\n +------+-----+-----+-----+------+-----+-----+-----+-----+\n \"\"\"\n type = 'FREQ'\n\n def __init__(self, sid, freqs, comment=''):\n if comment:\n self.comment = comment\n self.sid = sid\n self.freqs = np.unique(freqs)\n\n @classmethod\n def add_card(cls, card, comment=''):\n \"\"\"\n Adds a FREQ card from ``BDF.add_card(...)``\n\n Parameters\n ----------\n card : BDFCard()\n a BDFCard object\n comment : str; default=''\n a comment for the card\n \"\"\"\n sid = integer(card, 1, 'sid')\n freqs = fields(double, card, 'freq', i=2, j=len(card))\n return FREQ(sid, freqs, comment=comment)\n\n def get_freqs(self):\n return self.freqs\n\n def add_frequencies(self, freqs):\n \"\"\"\n Combines the frequencies from 1 FREQx object with another.\n All FREQi entries with the same frequency set identification numbers\n will be used. Duplicate frequencies will be ignored.\n\n Parameters\n ----------\n freqs : ???\n the frequencies for a FREQx object\n \"\"\"\n #print(\"self.freqs = \",self.freqs)\n #print(\"freqs = \",freqs)\n self.freqs = unique(hstack([self.freqs, freqs]))\n\n def add_frequency_object(self, freq):\n \"\"\"\n :param freq: a FREQx object\n\n .. seealso:: :func:`addFrequencies`\n \"\"\"\n self.add_frequencies(freq.freqs)\n\n def raw_fields(self):\n list_fields = ['FREQ', self.sid] + list(self.freqs)\n return list_fields\n\n def write_card(self, size: int=8, is_double: bool=False) -> str:\n card = self.repr_fields()\n if size == 8:\n return self.comment + print_card_8(card)\n return self.comment + print_card_16(card)\n\n\nclass FREQ1(FREQ):\n \"\"\"\n Defines a set of frequencies to be used in the solution of frequency\n response problems by specification of a starting frequency, frequency\n increment, and the number of increments desired.\n\n +-------+-----+-----+-----+-----+-----+-----+-----+-----+\n | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 |\n +=======+=====+=====+=====+=====+=====+=====+=====+=====+\n | FREQ1 | SID | F1 | DF | NDF | | | | |\n +-------+-----+-----+-----+-----+-----+-----+-----+-----+\n\n .. note:: this card rewrites as a FREQ card\n \"\"\"\n type = 'FREQ1'\n\n def __init__(self, sid, f1, df, ndf, comment=''):\n if comment:\n self.comment = comment\n self.sid = sid\n self.f1 = f1\n self.df = df\n self.ndf = ndf\n\n freqs = []\n for i in range(ndf):\n freqs.append(f1 + i * df)\n self.freqs = unique(freqs)\n\n @classmethod\n def add_card(cls, card, comment=''):\n \"\"\"\n Adds a FREQ1 card from ``BDF.add_card(...)``\n\n Parameters\n ----------\n card : BDFCard()\n a BDFCard object\n comment : str; default=''\n a comment for the card\n \"\"\"\n sid = integer(card, 1, 'sid')\n f1 = double_or_blank(card, 2, 'f1', 0.0)\n df = double(card, 3, 'df')\n ndf = integer_or_blank(card, 4, 'ndf', 1)\n assert len(card) <= 5, 'len(FREQ card) = %i\\ncard=%s' % (len(card), card)\n return FREQ1(sid, f1, df, ndf, comment=comment)\n\n def write_card(self, size: int=8, is_double: bool=False) -> str:\n card = self.repr_fields()\n if size == 8:\n return self.comment + print_card_8(card)\n return self.comment + print_card_16(card)\n\n\nclass FREQ2(FREQ):\n \"\"\"\n Defines a set of frequencies to be used in the solution of frequency\n response problems by specification of a starting frequency, final\n frequency, and the number of logarithmic increments desired.\n\n +-------+-----+-----+-----+-----+-----+-----+-----+-----+\n | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 |\n +=======+=====+=====+=====+=====+=====+=====+=====+=====+\n | FREQ2 | SID | F1 | F2 | NDF | | | | |\n +-------+-----+-----+-----+-----+-----+-----+-----+-----+\n\n .. note:: this card rewrites as a FREQ card\n \"\"\"\n type = 'FREQ2'\n\n def __init__(self, sid, f1, f2, ndf=1, comment=''):\n if comment:\n self.comment = comment\n self.sid = sid\n self.f1 = f1\n self.f2 = f2\n self.ndf = ndf\n\n d = 1. / ndf * log(f2 / f1)\n freqs = []\n for i in range(ndf):\n freqs.append(f1 * exp(i * d)) # 0 based index\n self.freqs = np.unique(freqs)\n\n @classmethod\n def add_card(cls, card, comment=''):\n \"\"\"\n Adds a FREQ2 card from ``BDF.add_card(...)``\n\n Parameters\n ----------\n card : BDFCard()\n a BDFCard object\n comment : str; default=''\n a comment for the card\n \"\"\"\n sid = integer(card, 1, 'sid')\n f1 = double(card, 2, 'f1') # default=0.0 ?\n f2 = double(card, 3, 'f2')\n ndf = integer_or_blank(card, 4, 'nf', 1)\n assert len(card) <= 5, 'len(FREQ2 card) = %i\\ncard=%s' % (len(card), card)\n return FREQ2(sid, f1, f2, ndf, comment=comment)\n #return FREQ(sid, freqs, comment=comment)\n\n\nclass FREQ3(FREQ):\n \"\"\"\n +-------+-----+------+-------+--------+-----+---------+\n | 1 | 2 | 3 | 4 | 5 | 6 | 7 |\n +=======+=====+======+=======+========+=====+=========+\n | FREQ3 | SID | F1 | F2 | TYPE | NEF | CLUSTER |\n +-------+-----+------+-------+--------+-----+---------+\n | FREQ3 | 6 | 20.0 | 200.0 | LINEAR | 10 | 2.0 |\n +-------+-----+------+-------+--------+-----+---------+\n \"\"\"\n type = 'FREQ3'\n\n def __init__(self, f1, f2=None, Type='LINEAR', nef=10, cluster=1.0, comment=''):\n if comment:\n self.comment = comment\n if f2 is None:\n f2 = f1\n self.sid = sid\n self.f1 = f1\n self.f2 = f2\n self.Type = Type\n self.nef = nef\n self.cluster = cluster\n\n @classmethod\n def add_card(cls, card, comment=''):\n sid = integer(card, 1, 'sid')\n f1 = double(card, 1, 'f1')\n f2 = integer_or_blank(card, 1, 'f2', f1)\n Type = string_or_blank(card, 1, 'Type', 'LINEAR')\n nef = integer_or_blank(card, 1, 'nef', 10)\n cluster = double_or_blank(card, 1, 'cluster', 1.0)\n\n return FREQ3(sid, f1, f2, Type, nef, cluster, comment='')\n\n def raw_fields(self):\n return ['FREQ3', self.sid, self.f1, self.f2, self.Type, self.nef, self.cluster]\n\n def write_card(self, size: int=8, is_double: bool=False) -> str:\n card = self.repr_fields()\n if size == 8:\n return self.comment + print_card_8(card)\n return self.comment + print_card_16(card)\n\n\nclass FREQ4(FREQ):\n \"\"\"\n Defines a set of frequencies used in the solution of modal frequency\n response problems by specifying the amount of 'spread' around each natural\n frequency and the number of equally spaced excitation frequencies within\n the spread.\n\n +-------+-----+-----+-----+------+-----+-----+-----+-----+\n | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 |\n +=======+=====+=====+=====+======+=====+=====+=====+=====+\n | FREQ4 | SID | F1 | F2 | FSPD | NFM | | | |\n +-------+-----+-----+-----+------+-----+-----+-----+-----+\n\n .. note:: this card rewrites as a FREQ card\n .. todo:: not done...\n \"\"\"\n type = 'FREQ4'\n\n def __init__(self, sid, f1, f2, fspread, nfm, comment=''):\n if comment:\n self.comment = comment\n self.sid = sid\n self.f1 = f1\n self.f2 = f2\n self.fspread = fspread\n self.nfm = nfm\n\n @classmethod\n def add_card(cls, card, comment=''):\n \"\"\"\n Adds a FREQ4 card from ``BDF.add_card(...)``\n\n Parameters\n ----------\n card : BDFCard()\n a BDFCard object\n comment : str; default=''\n a comment for the card\n \"\"\"\n sid = integer(card, 1, 'sid')\n f1 = double_or_blank(card, 2, 'f1', 0.0)\n f2 = double_or_blank(card, 3, 'f2', 1.e20)\n fspread = double_or_blank(card, 4, 'fspd', 0.1)\n nfm = integer_or_blank(card, 5, 'nfm', 3)\n assert len(card) <= 6, 'len(FREQ card) = %i\\ncard=%s' % (len(card), card)\n return FREQ4(sid, f1, f2, fspread, nfm, comment=comment)\n\n def raw_fields(self):\n list_fields = ['FREQ4', self.sid, self.f1, self.f2, self.fspread,\n self.nfm]\n return list_fields\n\n def repr_fields(self):\n return self.raw_fields()\n\n def write_card(self, size: int=8, is_double: bool=False) -> str:\n card = self.repr_fields()\n if size == 8:\n return self.comment + print_card_8(card)\n return self.comment + print_card_16(card)\n\n\n#class FREQ5(FREQ):\n #type = 'FREQ5'\n\n #def __init__(self, card=None, data=None, comment=''):\n #if comment:\n # self.comment = comment\n #raise NotImplementedError()\n\n #def write_card(self, size: int=8, is_double: bool=False) -> str:\n #card = self.repr_fields()\n #if size == 8:\n #return self.comment + print_card_8(card)\n #return self.comment + print_card_16(card)\n\n\nclass NLPARM(BaseCard):\n \"\"\"\n Defines a set of parameters for nonlinear static analysis iteration\n strategy.\n\n +--------+--------+------+------+---------+-------+---------+---------+--------+\n | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 |\n +========+========+======+======+=========+=======+=========+=========+========+\n | NLPARM | ID | NINC | DT | KMETHOD | KSTEP | MAXITER | CONV | INTOUT |\n +--------+--------+------+------+---------+-------+---------+---------+--------+\n | | ESPU | EPSP | EPSW | MAXDIV | MAXQN | MAXLS | FSTRESS | LSTOL |\n +--------+--------+------+------+---------+-------+---------+---------+--------+\n | | MAXBIS | | | | MAXR | | RTOLB | CONV |\n +--------+--------+------+------+---------+-------+---------+---------+--------+\n \"\"\"\n type = 'NLPARM'\n\n def __init__(self, nlparm_id, ninc=10, dt=0.0, kmethod='AUTO', kstep=5,\n max_iter=25, conv='PW', int_out='NO',\n eps_u=0.01, eps_p=0.01, eps_w=0.01, max_div=3, max_qn=None, max_ls=4,\n fstress=0.2, ls_tol=0.5, max_bisect=5, max_r=20., rtol_b=20., comment=''):\n if comment:\n self.comment = comment\n self.nlparm_id = nlparm_id\n self.ninc = ninc\n self.dt = dt\n self.kmethod = kmethod\n self.kstep = kstep\n self.max_iter = max_iter\n self.conv = conv\n self.int_out = int_out\n\n # line 2\n self.eps_p = eps_p\n self.eps_u = eps_u\n self.eps_w = eps_w\n self.max_div = max_div\n self.max_qn = max_qn\n self.max_ls = max_ls\n self.fstress = fstress\n self.ls_tol = ls_tol\n\n # line 3\n self.max_bisect = max_bisect\n self.max_r = max_r\n self.rtol_b = rtol_b\n\n if self.max_qn is None:\n if kmethod == 'PFNT':\n self.max_qn = 0\n else:\n self.max_qn = max_iter\n\n @classmethod\n def add_card(cls, card, comment=''):\n \"\"\"\n Adds a NLPARM card from ``BDF.add_card(...)``\n\n Parameters\n ----------\n card : BDFCard()\n a BDFCard object\n comment : str; default=''\n a comment for the card\n \"\"\"\n nlparm_id = integer(card, 1, 'nlparm_id')\n ninc = integer_or_blank(card, 2, 'ninc', 10)\n dt = double_or_blank(card, 3, 'dt', 0.0)\n kmethod = string_or_blank(card, 4, 'kmethod', 'AUTO')\n kstep = integer_or_blank(card, 5, 'kstep', 5)\n max_iter = integer_or_blank(card, 6, 'max_iter', 25)\n conv = string_or_blank(card, 7, 'conv', 'PW')\n int_out = string_or_blank(card, 8, 'intOut', 'NO')\n\n # line 2\n eps_u = double_or_blank(card, 9, 'eps_u', 0.01)\n eps_p = double_or_blank(card, 10, 'eps_p', 0.01)\n eps_w = double_or_blank(card, 11, 'eps_w', 0.01)\n max_div = integer_or_blank(card, 12, 'max_div', 3)\n\n if kmethod == 'PFNT':\n max_qn = integer_or_blank(card, 13, 'max_qn', 0)\n else:\n max_qn = integer_or_blank(card, 13, 'max_qn', max_iter)\n\n max_ls = integer_or_blank(card, 14, 'max_ls', 4)\n fstress = double_or_blank(card, 15, 'fstress', 0.2)\n ls_tol = double_or_blank(card, 16, 'ls_tol', 0.5)\n\n # line 3\n max_bisect = integer_or_blank(card, 17, 'max_bisect', 5)\n max_r = double_or_blank(card, 21, 'max_r', 20.)\n rtol_b = double_or_blank(card, 23, 'rtol_b', 20.)\n assert len(card) <= 24, 'len(NLPARM card) = %i\\ncard=%s' % (len(card), card)\n return NLPARM(nlparm_id, ninc, dt, kmethod, kstep, max_iter, conv,\n int_out, eps_u, eps_p, eps_w, max_div,\n max_qn, max_ls, fstress,\n ls_tol, max_bisect, max_r,\n rtol_b, comment=comment)\n\n @classmethod\n def add_op2_data(cls, data, comment=''):\n \"\"\"\n Adds a NLPARM card from the OP2\n\n Parameters\n ----------\n data : List[varies]\n a list of fields defined in OP2 format\n comment : str; default=''\n a comment for the card\n \"\"\"\n (nlparm_id, ninc, dt, kmethod, kstep, max_iter, conv, int_out, eps_u, eps_p,\n eps_w, max_div, max_qn, max_ls, fstress, ls_tol, max_bisect, max_r,\n rtol_b) = data\n\n if kmethod == 1:\n kmethod = 'AUTO'\n elif kmethod == 2:\n kmethod = 'ITER'\n elif kmethod == 4:\n kmethod = 'SEMI'\n elif kmethod == 3:\n kmethod = 'ADAPT'\n else:\n msg = 'nlparm_id=%s kmethod=%r data=%s' % (nlparm_id, kmethod, data)\n raise NotImplementedError(msg)\n\n if conv == 1:\n conv = 'W'\n elif conv == 2:\n conv = 'P'\n elif conv == 3:\n conv = 'PW'\n elif conv == 4:\n conv = 'U'\n elif conv == 5:\n conv = 'UW'\n elif conv == 6:\n conv = 'UP'\n elif conv == 7:\n conv = 'UPW'\n else:\n msg = 'nlparm_id=%s conv=%r data=%s' % (nlparm_id, conv, data)\n raise NotImplementedError(msg)\n\n if int_out == 0:\n int_out = 'NO'\n elif int_out == 1:\n int_out = 'YES'\n elif int_out == 2:\n int_out = 'ALL'\n else:\n msg = 'nlparm_id=%s int_out=%r data=%s' % (nlparm_id, int_out, data)\n raise NotImplementedError(msg)\n return NLPARM(nlparm_id, ninc, dt, kmethod, kstep, max_iter, conv,\n int_out, eps_u, eps_p, eps_w, max_div,\n max_qn, max_ls, fstress,\n ls_tol, max_bisect, max_r,\n rtol_b, comment=comment)\n\n def raw_fields(self):\n list_fields = ['NLPARM', self.nlparm_id, self.ninc, self.dt, self.kmethod,\n self.kstep, self.max_iter, self.conv, self.int_out, self.eps_u,\n self.eps_p, self.eps_w, self.max_div, self.max_qn, self.max_ls,\n self.fstress, self.ls_tol, self.max_bisect, None, None, None,\n self.max_r, None, self.rtol_b]\n return list_fields\n\n def repr_fields(self):\n ninc = set_blank_if_default(self.ninc, 10)\n dt = set_blank_if_default(self.dt, 0.0)\n kmethod = set_blank_if_default(self.kmethod, 'AUTO')\n kstep = set_blank_if_default(self.kstep, 5)\n max_iter = set_blank_if_default(self.max_iter, 25)\n conv = set_blank_if_default(self.conv, 'PW')\n int_out = set_blank_if_default(self.int_out, 'NO')\n eps_u = set_blank_if_default(self.eps_u, 0.01)\n eps_p = set_blank_if_default(self.eps_p, 0.01)\n eps_w = set_blank_if_default(self.eps_w, 0.01)\n max_div = set_blank_if_default(self.max_div, 3)\n max_qn = set_blank_if_default(self.max_qn, self.max_iter)\n max_ls = set_blank_if_default(self.max_ls, 4)\n fstress = set_blank_if_default(self.fstress, 0.2)\n ls_tol = set_blank_if_default(self.ls_tol, 0.5)\n max_bisect = set_blank_if_default(self.max_bisect, 5)\n max_r = set_blank_if_default(self.max_r, 20.)\n rtol_b = set_blank_if_default(self.rtol_b, 20.)\n\n list_fields = ['NLPARM', self.nlparm_id, ninc, dt, kmethod, kstep, max_iter,\n conv, int_out, eps_u, eps_p, eps_w, max_div, max_qn, max_ls,\n fstress, ls_tol, max_bisect, None, None, None, max_r, None,\n rtol_b]\n return list_fields\n\n def write_card(self, size: int=8, is_double: bool=False) -> str:\n card = self.repr_fields()\n if size == 8:\n return self.comment + print_card_8(card) # having trouble with double precision...\n return self.comment + print_card_16(card)\n\n\nclass NLPCI(BaseCard):\n type = 'NLPCI'\n\n def __init__(self, nlpci_id, Type='CRIS', minalr=0.25, maxalr=4.,\n scale=0., desiter=12, mxinc=20, comment=''):\n if comment:\n self.comment = comment\n self.nlpci_id = nlpci_id\n self.Type = Type\n self.minalr = minalr\n self.maxalr = maxalr\n self.scale = scale\n self.desiter = desiter\n self.mxinc = mxinc\n\n @classmethod\n def add_card(cls, card, comment=''):\n \"\"\"\n Adds a NLPCI card from ``BDF.add_card(...)``\n\n Parameters\n ----------\n card : BDFCard()\n a BDFCard object\n comment : str; default=''\n a comment for the card\n \"\"\"\n nlpci_id = integer(card, 1, 'nlpci_id')\n Type = string_or_blank(card, 2, 'Type', 'CRIS')\n minalr = double_or_blank(card, 3, 'minalr', 0.25)\n maxalr = double_or_blank(card, 4, 'maxalr', 4.0)\n scale = double_or_blank(card, 5, 'scale', 0.0)\n blank(card, 6, 'blank')\n desiter = integer_or_blank(card, 7, 'desiter', 12)\n mxinc = integer_or_blank(card, 8, 'mxinc', 20)\n return NLPCI(nlpci_id, Type=Type, minalr=minalr, maxalr=maxalr,\n scale=scale, desiter=desiter, mxinc=mxinc, comment=comment)\n\n def raw_fields(self):\n list_fields = ['NLPCI', self.nlpci_id, self.Type, self.minalr,\n self.maxalr, self.scale, None, self.desiter, self.mxinc]\n return list_fields\n\n def repr_fields(self):\n #minalr = set_blank_if_default(self.minalr, 0.25)\n return self.raw_fields()\n\n def write_card(self, size: int=8, is_double: bool=False) -> str:\n card = self.repr_fields()\n if size == 8:\n return self.comment + print_card_8(card)\n return self.comment + print_card_16(card)\n\n\nclass TF(BaseCard):\n \"\"\"\n Defines a dynamic transfer function of the form:\n (B0 + B1 p + B2 *p2)*ud sum(A0_i + A1_i*p + A2_i*p2)*ui = 0\n\n +----+-----+-----+------+------+------+--------+----+----+\n | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 |\n +====+=====+=====+======+======+======+========+====+====+\n | TF | SID | GD | CD | B0 | B1 | B2 | | |\n +----+-----+-----+------+------+------+--------+----+----+\n | | G_1 | C_1 | A0_1 | A1_1 | A2_1 | etc. | | |\n +----+-----+-----+------+------+------+--------+----+----+\n\n \"\"\"\n type = 'TF'\n def __init__(self, sid, nid0, c, b0, b1, b2, nids, components, a, comment=''):\n if comment:\n self.comment = comment\n self.sid = sid\n self.nid0 = nid0\n self.c = c\n self.b0 = b0\n self.b1 = b1\n self.b2 = b2\n self.nids = nids\n self.components = components\n self.a = a\n\n def validate(self):\n pass\n #assert len(self.grids1) > 0, 'ngrids1=%s\\n%s' % (len(self.grids1), str(self))\n\n #def cross_reference(self, model: BDF) -> None:\n #pass\n\n @classmethod\n def add_card(cls, card, comment=''):\n \"\"\"\n Adds a TF card from ``BDF.add_card(...)``\n\n Parameters\n ----------\n card : BDFCard()\n a BDFCard object\n comment : str; default=''\n a comment for the card\n \"\"\"\n sid = integer(card, 1, 'sid')\n nid0 = integer(card, 2, 'nid0')\n # component 0 means an SPOINT/EPOINT\n c = components_or_blank(card, 3, 'components_0', 0)\n b0 = double_or_blank(card, 4, 'b0', 0.)\n b1 = double_or_blank(card, 5, 'b1', 0.)\n b2 = double_or_blank(card, 6, 'b2', 0.)\n\n nfields = len(card) - 9\n nrows = nfields // 8\n if nfields % 8 > 0:\n nrows += 1\n\n nids = []\n components = []\n a = []\n for irow in range(nrows):\n j = irow * 8 + 9\n #ifield = irow + 1\n nid = integer(card, j, 'grid_%i' % (irow + 1))\n component = components_or_blank(card, j + 1, 'components_%i' % (irow + 1), 0)\n a0 = double_or_blank(card, j + 2, 'a0_%i' % (irow + 1), 0.)\n a1 = double_or_blank(card, j + 3, 'a1_%i' % (irow + 1), 0.)\n a2 = double_or_blank(card, j + 4, 'a2_%i' % (irow + 1), 0.)\n nids.append(nid)\n components.append(component)\n a.append([a0, a1, a2])\n return TF(sid, nid0, c, b0, b1, b2, nids, components, a,\n comment=comment)\n\n def raw_fields(self):\n list_fields = ['TF', self.sid, self.nid0, self.c, self.b0, self.b1, self.b2, None, None]\n for grid, c, (a0, a1, a2) in zip(self.nids, self.components, self.a):\n list_fields += [grid, c, a0, a1, a2, None, None, None]\n return list_fields\n\n def write_card(self, size: int=8, is_double: bool=False) -> str:\n # double precision?\n card = self.repr_fields()\n if size == 8:\n return self.comment + print_card_8(card)\n return self.comment + print_card_16(card)\n\n\nclass TSTEP(BaseCard):\n \"\"\"\n Transient Time Step\n Defines time step intervals at which a solution will be generated and\n output in transient analysis.\n\n +-------+------+------+------+------+-----+-----+-----+-----+\n | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 |\n +=======+======+======+======+======+=====+=====+=====+=====+\n | TSTEP | SID | N1 | DT1 | NO1 | | | | |\n +-------+------+------+------+------+-----+-----+-----+-----+\n | | | N2 | DT2 | NO2 | | | | |\n +-------+------+------+------+------+-----+-----+-----+-----+\n | | | etc. | | | | | | |\n +-------+------+------+------+------+-----+-----+-----+-----+\n\n +-------+------+------+------+------+-----+-----+-----+-----+\n | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 |\n +=======+======+======+======+======+=====+=====+=====+=====+\n | TSTEP | 101 | 9000 | .001 | 9000 | | | | |\n +-------+------+------+------+------+-----+-----+-----+-----+\n | | | 1000 | .001 | 1 | | | | |\n +-------+------+------+------+------+-----+-----+-----+-----+\n \"\"\"\n type = 'TSTEP'\n\n def __init__(self, sid, N, DT, NO, comment=''):\n \"\"\"\n Creates a TSTEP card\n\n Parameters\n ----------\n sid : int\n the time step id\n N : List[int/None]\n ???\n DT : List[float/None]\n ???\n NO : List[int/None]\n ???\n comment : str; default=''\n a comment for the card\n \"\"\"\n if comment:\n self.comment = comment\n self.sid = sid\n #: Number of time steps of value DTi. (Integer > 1)\n self.N = N\n #: Time increment (float)\n self.DT = DT\n #: Skip factor for output. Every NOi-th step will be saved for output (default=1)\n self.NO = NO\n\n def validate(self):\n assert len(self.N) == len(self.DT), 'N=%s DT=%s' % (self.N, self.DT)\n assert len(self.N) == len(self.NO), 'N=%s NO=%s' % (self.N, self.NO)\n\n @classmethod\n def add_card(cls, card, comment=''):\n \"\"\"\n Adds a TSTEP card from ``BDF.add_card(...)``\n\n Parameters\n ----------\n card : BDFCard()\n a BDFCard object\n comment : str; default=''\n a comment for the card\n \"\"\"\n sid = integer(card, 1, 'sid')\n N = []\n DT = []\n NO = []\n\n nrows = int(ceil((len(card) - 1.) / 8.))\n for i in range(nrows):\n n = 8 * i + 1\n ni = integer_or_blank(card, n + 1, 'N' + str(i), 1)\n dt = double_or_blank(card, n + 2, 'dt' + str(i), 0.)\n no = integer_or_blank(card, n + 3, 'NO' + str(i), 1)\n N.append(ni)\n DT.append(dt)\n NO.append(no)\n return TSTEP(sid, N, DT, NO, comment=comment)\n\n def raw_fields(self):\n list_fields = ['TSTEP', self.sid]\n for (N, dt, no) in zip(self.N, self.DT, self.NO):\n list_fields += [N, dt, no, None, None, None, None, None]\n return list_fields\n\n def repr_fields(self):\n return self.raw_fields()\n\n def write_card(self, size: int=8, is_double: bool=False) -> str:\n card = self.repr_fields()\n if size == 8:\n return self.comment + print_card_8(card)\n return self.comment + print_card_16(card)\n\n\nclass TSTEPNL(BaseCard):\n \"\"\"\n Defines parametric controls and data for nonlinear transient structural or\n heat transfer analysis. TSTEPNL is intended for SOLs 129, 159, and 600.\n Parameters for Nonlinear Transient Analysis.\n\n +---------+--------+--------+-------+--------+--------+-------+---------+------+\n | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 |\n +=========+========+========+=======+========+========+=======+=========+======+\n | TSTEPNL | ID | NDT | DT | NO | METHOD | KSTEP | MAXITER | CONV |\n +---------+--------+--------+-------+--------+--------+-------+---------+------+\n | | ESPU | EPSP | EPSW | MAXDIV | MAXQN | MAXLS | FSTRESS | |\n +---------+--------+--------+-------+--------+--------+-------+---------+------+\n | | MAXBIS | ADJUST | MSTEP | RB | MAXR | UTOL | RTOLB | |\n +---------+--------+--------+-------+--------+--------+-------+---------+------+\n\n method = None for NX, but apparently TSTEP as well, which is not in the QRG\n \"\"\"\n type = 'TSTEPNL'\n allowed_methods = ['AUTO', 'ITER', 'ADAPT', 'SEMI', 'FNT', 'PFNT', # MSC\n 'TSTEP'] # NX\n\n def __init__(self, sid, ndt, dt, no, method='ADAPT', kstep=None,\n max_iter=10, conv='PW', eps_u=1.e-2, eps_p=1.e-3,\n eps_w=1.e-6, max_div=2, max_qn=10, max_ls=2,\n fstress=0.2, max_bisect=5, adjust=5, mstep=None,\n rb=0.6, max_r=32., utol=0.1, rtol_b=20.,\n min_iter=None, comment=''):\n \"\"\"\n Creates a TSTEPNL card\n\n Parameters\n ----------\n sid : int\n the time step id\n ndt : ???\n ???\n dt : ???\n ???\n no : ???\n ???\n eps_u : float; default=1.e-2\n ???\n eps_p : float; default=1.e-3\n ???\n eps_w : float; default=1.e-6\n ???\n max_div : int; default=2\n ???\n max_qn : int; default=10\n ???\n max_ls : int; default=2\n ???\n fstress : float; default=0.2\n ???\n max_bisect : int; default=5\n ???\n adjust : int; default=5\n ???\n mstep : int; default=None\n ???\n rb : float; default=0.6\n ???\n max_r = float; default=32.\n ???\n utol = float; default=0.1\n ???\n rtol_b = float; default=20.\n ???\n min_iter : int; default=None\n not listed in all QRGs\n comment : str; default=''\n a comment for the card\n \"\"\"\n if comment:\n self.comment = comment\n\n # line 1\n self.sid = sid\n self.ndt = ndt\n self.dt = dt\n self.no = no\n self.method = method\n self.kstep = kstep\n self.max_iter = max_iter\n self.conv = conv\n\n self.eps_u = eps_u\n self.eps_p = eps_p\n self.eps_w = eps_w\n self.max_div = max_div\n self.max_qn = max_qn\n self.max_ls = max_ls\n self.fstress = fstress\n\n # line 3\n self.max_bisect = max_bisect\n self.adjust = adjust\n self.mstep = mstep\n self.rb = rb\n self.max_r = max_r\n self.utol = utol\n self.rtol_b = rtol_b\n self.min_iter = min_iter\n assert self.ndt >= 3\n assert self.dt > 0.\n\n def validate(self):\n if self.method not in self.allowed_methods:\n msg = 'method=%r allowed_methods=[%s]' % (\n self.method, ', '.join(self.allowed_methods))\n raise ValueError(msg)\n\n @classmethod\n def add_card(cls, card, comment=''):\n \"\"\"\n Adds a TSTEPNL card from ``BDF.add_card(...)``\n\n Parameters\n ----------\n card : BDFCard()\n a BDFCard object\n comment : str; default=''\n a comment for the card\n \"\"\"\n sid = integer(card, 1, 'sid')\n ndt = integer(card, 2, 'ndt')\n dt = double(card, 3, 'dt')\n no = integer_or_blank(card, 4, 'no', 1)\n\n #: .. note:: not listed in all QRGs\n method = string_or_blank(card, 5, 'method', 'ADAPT')\n if method == 'ADAPT':\n kstep = integer_or_blank(card, 6, 'kStep', 2)\n elif method == 'ITER':\n kstep = integer_or_blank(card, 6, 'kStep', 10)\n elif method in ['AUTO', 'TSTEP', 'SEMI']:\n kstep = None\n #kstep = blank(card, 6, 'kStep') #: .. todo:: not blank\n else:\n msg = 'invalid TSTEPNL Method. method=%r; allowed_methods=[%s]' % (\n method, ', '.join(cls.allowed_methods))\n raise RuntimeError(msg)\n max_iter = integer_or_blank(card, 7, 'maxIter', 10)\n conv = string_or_blank(card, 8, 'conv', 'PW')\n\n # line 2\n eps_u = double_or_blank(card, 9, 'epsU', 1.E-2)\n eps_p = double_or_blank(card, 10, 'epsP', 1.E-3)\n eps_w = double_or_blank(card, 11, 'epsW', 1.E-6)\n max_div = integer_or_blank(card, 12, 'maxDiv', 2)\n max_qn = integer_or_blank(card, 13, 'maxQn', 10)\n max_ls = integer_or_blank(card, 14, 'MaxLs', 2)\n fstress = double_or_blank(card, 15, 'fStress', 0.2)\n\n # line 3\n max_bisect = integer_or_blank(card, 17, 'maxBisect', 5)\n adjust = integer_or_blank(card, 18, 'adjust', 5)\n mstep = integer_or_blank(card, 19, 'mStep')\n rb = double_or_blank(card, 20, 'rb', 0.6)\n max_r = double_or_blank(card, 21, 'maxR', 32.)\n utol = double_or_blank(card, 22, 'uTol', 0.1)\n rtol_b = double_or_blank(card, 23, 'rTolB', 20.)\n\n # not listed in all QRGs\n min_iter = integer_or_blank(card, 24, 'minIter')\n assert len(card) <= 25, 'len(TSTEPNL card) = %i\\ncard=%s' % (len(card), card)\n return TSTEPNL(\n sid, ndt, dt, no, method, kstep, max_iter, conv,\n eps_u, eps_p, eps_w, max_div, max_qn, max_ls, fstress,\n max_bisect, adjust, mstep, rb, max_r, utol, rtol_b, min_iter,\n comment=comment)\n\n @classmethod\n def add_op2_data(cls, data, comment=''):\n \"\"\"\n Adds a TSTEPNL card from the OP2\n\n Parameters\n ----------\n data : List[varies]\n a list of fields defined in OP2 format\n comment : str; default=''\n a comment for the card\n \"\"\"\n (sid, ndt, dt, no, method, kstep, max_iter, conv, eps_u, eps_p, eps_w,\n max_div, max_qn, max_ls, fstress, max_bisect,\n adjust, mstep, rb, max_r, utol, rtol_b) = data\n\n if method == 1:\n method = 'AUTO'\n elif method == 3:\n method = 'ADAPT'\n else:\n raise NotImplementedError('tstepnl=%s method=%r data=%s' % (sid, method, data))\n\n if conv == 3:\n conv = 'PW'\n elif conv == 4:\n conv = 'U'\n #elif conv == 3:\n #conv = 'ADAPT'\n else:\n raise NotImplementedError('tstepnl=%s conv=%r data=%s' % (sid, conv, data))\n\n min_iter = None # not listed in DMAP 2005\n return TSTEPNL(\n sid, ndt, dt, no, method, kstep, max_iter, conv,\n eps_u, eps_p, eps_w, max_div, max_qn, max_ls, fstress,\n max_bisect, adjust, mstep, rb, max_r, utol, rtol_b, min_iter,\n comment=comment)\n #self.sid = sid\n #self.ndt = ndt\n #self.dt = dt\n #self.no = no\n #self.method = method\n #self.kStep = kStep\n #self.maxIter = maxIter\n #self.conv = conv\n\n ## line 2\n #self.epsU = epsU\n #self.epsP = epsP\n #self.epsW = epsW\n #self.maxDiv = maxDiv\n #self.maxQn = maxQn\n #self.MaxLs = maxLs\n #self.fStress = fStress\n\n ## line 3\n #self.maxBisect = maxBisect\n #self.adjust = adjust\n #self.mStep = mStep\n #self.rb = rb\n #self.maxR = maxR\n #self.uTol = uTol\n #self.rTolB = rTolB\n\n def raw_fields(self):\n list_fields = ['TSTEPNL', self.sid, self.ndt, self.dt, self.no,\n self.method, self.kstep, self.max_iter, self.conv, self.eps_u,\n self.eps_p, self.eps_w, self.max_div, self.max_qn, self.max_ls,\n self.fstress, None, self.max_bisect, self.adjust, self.mstep,\n self.rb, self.max_r, self.utol, self.rtol_b, self.min_iter]\n return list_fields\n\n def repr_fields(self):\n #no = set_blank_if_default(self.no,1)\n no = self.no\n method = set_blank_if_default(self.method, 'ADAPT')\n\n kstep = self.kstep\n #if self.method == 'ADAPT':\n #kStep = set_blank_if_default(self.kStep, 2)\n #elif self.method == 'ITER':\n #kStep = set_blank_if_default(self.kStep, 10)\n #else:\n #msg = 'invalid TSTEPNL Method. method=|%s|' %(self.method)\n #raise RuntimeError(msg)\n\n #maxIter = set_blank_if_default(self.maxIter, 10)\n conv = set_blank_if_default(self.conv, 'PW')\n\n eps_u = set_blank_if_default(self.eps_u, 1e-2)\n eps_p = set_blank_if_default(self.eps_p, 1e-3)\n eps_w = set_blank_if_default(self.eps_w, 1e-6)\n max_div = set_blank_if_default(self.max_div, 2)\n max_qn = set_blank_if_default(self.max_qn, 10)\n max_ls = set_blank_if_default(self.max_ls, 2)\n fstress = set_blank_if_default(self.fstress, 0.2)\n\n max_bisect = set_blank_if_default(self.max_bisect, 5)\n adjust = set_blank_if_default(self.adjust, 5)\n rb = set_blank_if_default(self.rb, 0.6)\n max_r = set_blank_if_default(self.max_r, 32.)\n utol = set_blank_if_default(self.utol, 0.1)\n rtol_b = set_blank_if_default(self.rtol_b, 20.)\n\n list_fields = ['TSTEPNL', self.sid, self.ndt, self.dt, no, method,\n kstep, self.max_iter, conv, eps_u, eps_p, eps_w, max_div, max_qn,\n max_ls, fstress, None, max_bisect, adjust, self.mstep, rb,\n max_r, utol, rtol_b, self.min_iter]\n return list_fields\n\n def write_card(self, size: int=8, is_double: bool=False) -> str:\n card = self.repr_fields()\n if size == 8:\n return self.comment + print_card_8(card)\n return self.comment + print_card_16(card)\n",
"# pylint: disable=R0902,R0904,R0914\nfrom math import sin, cos, radians, atan2, sqrt, degrees\nfrom itertools import count\nfrom typing import Tuple # , TYPE_CHECKING\n\nimport numpy as np\nfrom numpy import array, zeros\nfrom scipy.sparse import coo_matrix # type: ignore\n\nfrom pyNastran.utils.numpy_utils import integer_types\nfrom pyNastran.bdf.cards.base_card import BaseCard\nfrom pyNastran.bdf.field_writer_8 import print_card_8\nfrom pyNastran.bdf.field_writer_16 import print_card_16\nfrom pyNastran.bdf.field_writer_double import print_card_double\n\nfrom pyNastran.bdf.bdf_interface.assign_type import (\n integer, integer_or_blank, double, string, string_or_blank,\n parse_components, interpret_value, integer_double_string_or_blank)\n\n\nclass DTI(BaseCard):\n \"\"\"\n +-----+-------+-----+------+-------+--------+------+-------------+\n | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 |\n +=====+=======+=====+======+=======+========+======+=============+\n | DTI | UNITS | \"1\" | MASS | FORCE | LENGTH | TIME | STRESS |\n +-----+-------+-----+------+-------+--------+------+-------------+\n\n MSC\n\n +-----+-------+-----+------+-------+--------+------+-------------+\n | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 |\n +=====+=======+=====+======+=======+========+======+=============+\n | DTI | UNITS | \"1\" | MASS | FORCE | LENGTH | TIME | TEMPERATURE |\n +-----+-------+-----+------+-------+--------+------+-------------+\n\n NX\n \"\"\"\n type = 'DTI'\n #_properties = ['shape', 'ifo', 'is_real', 'is_complex', 'is_polar', 'matrix_type', 'tin_dtype', 'tout_dtype']\n\n @classmethod\n def _init_from_empty(cls):\n name = 'name'\n fields = []\n return DTI(name, fields, comment='')\n\n def _finalize_hdf5(self, encoding):\n \"\"\"hdf5 helper function\"\"\"\n keys, values = self.fields\n\n # nan != nan\n values = [value if value == value else None for value in values]\n values_str = [value.decode(encoding) if isinstance(value, bytes) else value\n for value in values]\n #values = [valuei.decode(encoding) if isinstance(valuei, bytes) else (\n # None if np.isnan(valuei) else valuei)\n # for valuei in values]\n self.fields = {key : value for key, value in zip(keys, values_str)}\n\n @classmethod\n def export_to_hdf5(cls, h5_file, model, encoding):\n \"\"\"exports the elements in a vectorized way\"\"\"\n from pyNastran.bdf.bdf_interface.hdf5_exporter import _export_list\n for name, dti in sorted(model.dti.items()):\n if name == 'UNITS':\n i = 0\n for key, value in sorted(dti.fields.items()):\n #print(key, value)\n h5_group = h5_file.create_group(str(key))\n if value is None:\n h5_group.create_dataset(str(i), data=np.nan)\n else:\n h5_group.create_dataset(str(i), data=value)\n i += 1\n #fields = {\n #'mass' : mass,\n #'force' : force,\n #'length' : length,\n #'time' : time,\n #'temp_stress' : temp_stress\n #}\n else:\n for irecord, fields in sorted(dti.fields.items()):\n #h5_group = h5_file.create_group(str(irecord))\n attr = 'irecord=%s' % irecord\n namei = str(irecord)\n values = fields\n _export_list(h5_file, attr, namei, values, encoding)\n #print(h5_group)\n #print(irecord, fields)\n\n def __init__(self, name, fields, comment=''):\n \"\"\"\n Creates a DTI card\n\n Parameters\n ----------\n name : str\n UNITS\n fields : List[varies]\n the fields\n comment : str; default=''\n a comment for the card\n \"\"\"\n if comment:\n self.comment = comment\n self.name = name\n self.fields = fields\n assert len(fields) > 0, fields\n\n @classmethod\n def add_card(cls, card, comment):\n \"\"\"\n Adds a DTI card from ``BDF.add_card(...)``\n\n Parameters\n ----------\n card : BDFCard()\n a BDFCard object\n comment : str; default=''\n a comment for the card\n\n \"\"\"\n name = string(card, 1, 'name')\n if name == 'UNITS':\n integer(card, 2, '1')\n mass = string(card, 3, 'mass')\n force = string(card, 4, 'force')\n length = string(card, 5, 'length')\n time = string(card, 6, 'time')\n temp_stress = string_or_blank(card, 7, 'stress/temperature')\n fields = {\n 'mass' : mass,\n 'force' : force,\n 'length' : length,\n 'time' : time,\n 'temp_stress' : temp_stress\n }\n else:\n fields = []\n #field2 = card[2]\n\n list_fields = []\n irecord = integer(card, 2, 'record')\n if irecord == 0:\n for i in range(3, len(card)):\n val = integer_double_string_or_blank(\n card, i, 'T%i' % (i-1), default=32767)\n list_fields.append(val)\n else:\n for i in range(3, len(card)):\n val = integer_double_string_or_blank(\n card, i, 'T%i' % (i-1), default=None)\n list_fields.append(val)\n fields = {irecord: list_fields,}\n return DTI(name, fields, comment=comment)\n\n def raw_fields(self):\n if self.name == 'UNITS':\n mass = self.fields['mass']\n force = self.fields['force']\n length = self.fields['length']\n time = self.fields['time']\n temp_stress = self.fields['temp_stress']\n list_fields = ['DTI', self.name, '1', mass, force, length, time, temp_stress]\n else:\n list_fields = []\n for irecord, fields in sorted(self.fields.items()):\n nfields = len(fields)\n list_fields += ['DTI', self.name] + fields\n nleftover = nfields % 8\n if nleftover:\n list_fields += [None] * nleftover\n return list_fields\n\n def write_card(self, size: int=8, is_double: bool=False) -> str:\n if self.name == 'UNITS':\n card = self.repr_fields()\n return self.comment + print_card_8(card)\n\n msg = self.comment\n for irecord, fields in sorted(self.fields.items()):\n list_fields = ['DTI', self.name, irecord, ] + fields\n msg += print_card_8(list_fields)\n return msg\n\n\nclass NastranMatrix(BaseCard):\n \"\"\"\n Base class for the DMIG, DMIJ, DMIJI, DMIK matrices\n \"\"\"\n def _finalize_hdf5(self, encoding):\n \"\"\"hdf5 helper function\"\"\"\n self.finalize()\n\n def __init__(self, name, matrix_form, tin, tout, polar, ncols,\n GCj, GCi, Real, Complex=None, comment='', finalize=True):\n \"\"\"\n Creates a NastranMatrix\n\n Parameters\n ----------\n name : str\n the name of the matrix\n matrix_form : int\n matrix shape\n 4=Lower Triangular\n 5=Upper Triangular\n 6=Symmetric\n 8=Identity (m=nRows, n=m)\n tin : int\n matrix input precision\n 1=Real, Single Precision\n 2=Real, Double Precision\n 3=Complex, Single Precision\n 4=Complex, Double Precision\n tout : int\n matrix output precision\n 0=same as tin\n 1=Real, Single Precision\n 2=Real, Double Precision\n 3=Complex, Single Precision\n 4=Complex, Double Precision\n polar : int; default=0\n Input format of Ai, Bi\n Integer=blank or 0 indicates real, imaginary format\n Integer > 0 indicates amplitude, phase format\n ncols : int\n ???\n GCj : List[(node, dof)]\n the jnode, jDOFs\n GCi : List[(node, dof)]\n the inode, iDOFs\n Real : List[float]\n The real values\n Complex : List[float]; default=None\n The complex values (if the matrix is complex)\n comment : str; default=''\n a comment for the card\n\n \"\"\"\n if comment:\n self.comment = comment\n if Complex is None:\n Complex = []\n if tout is None:\n tout = 0\n\n polar = _set_polar(polar)\n\n if matrix_form not in [1, 2, 4, 5, 6, 8, 9]:\n msg = (\n 'matrix_form=%r must be [1, 2, 4, 5, 6, 8, 9]\\n'\n ' 1: Square\\n'\n ' 2: Rectangular\\n'\n #' 4: Lower Triangular\\n'\n #' 5: Upper Triangular\\n'\n ' 6: Symmetric\\n'\n #' 8: Identity (m=nRows, n=m)\\n'\n ' 9: Rectangular\\n' % matrix_form)\n raise ValueError(msg)\n self.name = name\n\n #: 4-Lower Triangular; 5=Upper Triangular; 6=Symmetric; 8=Identity (m=nRows, n=m)\n self.matrix_form = matrix_form\n\n #: 1-Real, Single Precision; 2=Real,Double Precision;\n # 3=Complex, Single; 4=Complex, Double\n self.tin = tin\n\n #: 0-Set by cell precision\n self.tout = tout\n\n #: Input format of Ai, Bi. (Integer=blank or 0 indicates real, imaginary format;\n #: Integer > 0 indicates amplitude, phase format.)\n self.polar = polar\n\n self.ncols = ncols\n self.GCj = GCj\n self.GCi = GCi\n\n self.Real = Real\n if len(Complex) or self.is_complex:\n self.Complex = Complex\n assert self.tin in [3, 4], 'tin=%r and must 3 or 4 to be complex' % self.tin\n assert self.tout in [0, 3, 4], 'tin=%r and must 0, 3 or 4 to be complex' % self.tout\n assert isinstance(matrix_form, integer_types), 'matrix_form=%r type=%s' % (matrix_form, type(matrix_form))\n assert not isinstance(matrix_form, bool), 'matrix_form=%r type=%s' % (matrix_form, type(matrix_form))\n if finalize:\n self.finalize()\n\n @classmethod\n def add_card(cls, card, comment=''):\n \"\"\"\n Adds a NastranMatrix (DMIG, DMIJ, DMIK, DMIJI) card from ``BDF.add_card(...)``\n\n Parameters\n ----------\n card : BDFCard()\n a BDFCard object\n comment : str; default=''\n a comment for the card\n\n \"\"\"\n name = string(card, 1, 'name')\n #zero\n\n matrix_form = integer(card, 3, 'ifo')\n tin = integer(card, 4, 'tin')\n tout = integer_or_blank(card, 5, 'tout', 0)\n polar = integer_or_blank(card, 6, 'polar', 0)\n if matrix_form == 1: # square\n ncols = integer_or_blank(card, 8, 'matrix_form=%s; ncol' % matrix_form)\n elif matrix_form == 6: # symmetric\n ncols = integer_or_blank(card, 8, 'matrix_form=%s; ncol' % matrix_form)\n elif matrix_form in [2, 9]: # rectangular\n ncols = integer(card, 8, 'matrix_form=%s; ncol' % (matrix_form))\n else:\n # technically right, but nulling this will fix bad decks\n #self.ncols = blank(card, 8, 'matrix_form=%s; ncol' % self.matrix_form)\n\n msg = (\n '%s name=%r matrix_form=%r is not supported. Valid forms:\\n'\n ' 4=Lower Triangular\\n'\n ' 5=Upper Triangular\\n'\n ' 6=Symmetric\\n'\n ' 8=Identity (m=nRows, n=m)\\n' % (cls.type, name, matrix_form)\n )\n raise NotImplementedError(msg)\n\n GCj = []\n GCi = []\n Real = []\n Complex = []\n return cls(name, matrix_form, tin, tout, polar, ncols,\n GCj, GCi, Real, Complex, comment=comment, finalize=False)\n\n @property\n def matrix_type(self):\n \"\"\"gets the matrix type\"\"\"\n if not isinstance(self.matrix_form, integer_types):\n msg = 'ifo must be an integer; matrix_form=%r type=%s name=%s' % (\n self.matrix_form, type(self.matrix_form), self.name)\n raise TypeError(msg)\n if isinstance(self.matrix_form, bool):\n msg = 'matrix_form must not be a boolean; matrix_form=%r type=%s name=%s' % (\n self.matrix_form, type(self.matrix_form), self.name)\n raise TypeError(msg)\n\n if self.matrix_form == 1:\n matrix_type = 'square'\n elif self.matrix_form == 6:\n matrix_type = 'symmetric'\n elif self.matrix_form in [2, 9]:\n matrix_type = 'rectangular'\n else:\n # technically right, but nulling this will fix bad decks\n #self.ncols = blank(card, 8, 'matrix_form=%s; ncol' % self.matrix_form)\n raise NotImplementedError('%s matrix_form=%r is not supported' % (\n self.type, self.matrix_form))\n return matrix_type\n\n def finalize(self):\n \"\"\"converts the lists into numpy arrays\"\"\"\n self.GCi = np.asarray(self.GCi)\n self.GCj = np.asarray(self.GCj)\n self.Real = np.asarray(self.Real)\n if self.is_complex:\n self.Complex = np.asarray(self.Complex)\n\n @property\n def shape(self):\n \"\"\"gets the matrix shape\"\"\"\n if self.matrix_form in [1, 6]: # square, symmetric\n if self.ncols is not None:\n shape = (self.ncols, self.ncols)\n else:\n nrows, ncols = get_row_col_map(\n self, self.GCi, self.GCj, self.matrix_form)[:2]\n shape = (nrows, ncols)\n elif self.matrix_form in [2, 9]:\n raise NotImplementedError('need to pull the nrows after reading in everything')\n #shape = (self.ncols, self.ncols)\n else:\n raise NotImplementedError('matrix_form=%s' % self.matrix_form)\n return shape\n\n def _add_column(self, card, comment=''):\n \"\"\"adds an additional column entry to the matrix\"\"\"\n if comment:\n if hasattr(self, '_comment'):\n self.comment += comment\n else:\n self.comment = comment\n\n name = string(card, 1, 'name')\n if name == 'UACCEL':\n self._add_column_uaccel()\n return\n\n Gj = integer(card, 2, 'Gj')\n # Cj = integer(card, 3, 'Cj')\n Cj = integer_or_blank(card, 3, 'Cj', 0)\n #Cj = parse_components(card, 3, 'Cj')\n assert 0 <= Cj <= 6, 'C%i must be between [0, 6]; Cj=%s' % (0, Cj)\n\n nfields = len(card)\n #print(\"nfields = %i\" % nfields)\n #print(\"card[5:] =\", card[5:])\n #print(\"(nfields - 5) %% 4 = %i\" % ((nfields - 5) % 4))\n\n nloops = (nfields - 5) // 4\n if (nfields - 5) % 4 in [2, 3]: # real/complex\n nloops += 1\n #assert nfields <= 8,'nfields=%s' % nfields\n #print(\"nloops = %i\" % nloops)\n assert nloops > 0, 'nloops=%s' % nloops\n\n for i in range(nloops):\n self.GCj.append((Gj, Cj))\n\n if self.is_complex:\n if self.is_polar:\n for i in range(nloops):\n n = 5 + 4 * i\n Gi = integer(card, n, 'Gi')\n # Ci = integer(card, n + 1, 'Ci')\n Ci = integer_or_blank(card, n + 1, 'Ci', 0)\n #Ci = parse_components(card, n + 1, 'Ci')\n assert 0 <= Ci <= 6, 'C%i must be between [0, 6]; Ci=%s' % (i + 1, Ci)\n self.GCi.append((Gi, Ci))\n magi = double(card, n + 2, 'ai')\n phasei = double(card, n + 3, 'bi')\n reali = magi * cos(radians(phasei))\n complexi = magi * sin(radians(phasei))\n self.Real.append(reali)\n self.Complex.append(complexi)\n else:\n for i in range(nloops):\n n = 5 + 4 * i\n Gi = integer(card, n, 'Gi')\n # Ci = integer(card, n + 1, 'Ci')\n Ci = integer_or_blank(card, n + 1, 'Ci', 0)\n #Ci = parse_components(card, n + 1, 'Ci')\n assert 0 <= Ci <= 6, 'C%i must be between [0, 6]; Ci=%s' % (i + 1, Ci)\n self.GCi.append((Gi, Ci))\n reali = double(card, n + 2, 'real')\n complexi = double(card, n + 3, 'complex')\n self.Real.append(reali)\n self.Complex.append(complexi)\n else:\n # real\n for i in range(nloops):\n n = 5 + 4 * i\n Gi = integer(card, n, 'Gi')\n # Ci = integer(card, n + 1, 'Ci')\n Ci = integer_or_blank(card, n + 1, 'Ci', 0)\n #Ci = parse_components(card, n + 1, 'Ci')\n assert 0 <= Ci <= 6, 'C%i must be between [0, 6]; Ci=%s' % (i + 1, Ci)\n reali = double(card, n + 2, 'real')\n self.GCi.append((Gi, Ci))\n self.Real.append(reali)\n #print(\"GC=%s,%s real=%s\" % (Gi, Ci, reali))\n\n msg = '(len(GCj)=%s len(GCi)=%s' % (len(self.GCj), len(self.GCi))\n assert len(self.GCj) == len(self.GCi), msg\n #if self.is_complex:\n #self.Complex(double(card, v, 'complex')\n\n def get_matrix(self, is_sparse=False, apply_symmetry=True):\n \"\"\"\n Builds the Matrix\n\n Parameters\n ----------\n is_sparse : bool; default=False\n should the matrix be returned as a sparse matrix.\n Slower for dense matrices.\n apply_symmetry : bool; default=True\n If the matrix is symmetric (ifo=6), returns a symmetric matrix.\n Supported as there are symmetric matrix routines.\n\n Returns\n -------\n M : numpy.ndarray or scipy.coomatrix\n the matrix\n rows : dict[int] = [int, int]\n dictionary of keys=rowID, values=(Grid,Component) for the matrix\n cols: dict[int] = [int, int]\n dictionary of keys=columnID, values=(Grid,Component) for the matrix\n\n .. warning:: is_sparse=True WILL fail\n\n \"\"\"\n return get_matrix(self, is_sparse=is_sparse, apply_symmetry=apply_symmetry)\n\n @property\n def is_real(self):\n \"\"\"real vs. complex attribute\"\"\"\n return not self.is_complex\n\n @property\n def is_complex(self):\n \"\"\"real vs. complex attribute\"\"\"\n if self.tin in [1, 2]: # real\n return False\n elif self.tin in [3, 4]: # complex\n return True\n msg = ('Matrix %r must have a value of TIN = [1, 2, 3, 4].\\n'\n 'TIN defines the type (real, complex) '\n 'of the matrix. TIN=%r.\\n'\n ' TIN=1,2 -> real\\n'\n ' TIN=3,4 -> complex' % (self.name, self.tin))\n raise ValueError(msg)\n\n @property\n def is_polar(self):\n \"\"\"\n Used by:\n - DMIG\n - DMIJ\n - DMIJI\n - DMIK\n\n Not used by:\n - DMI\n - DMIAX\n - DMIG, UACCEL\n - DMIGOUT\n - DMIGROT\n\n \"\"\"\n if self.polar == 0: # real, imag\n return False\n elif self.polar == 1: # mag, phase\n return True\n elif self.polar is None:\n return False\n msg = ('Matrix %r must have a value of POLAR = [0, 1].\\n'\n 'POLAR defines the type (real/imag or mag/phase) complex) '\n 'of the matrix. POLAR=%r.' % (self.name, self.polar))\n raise ValueError(msg)\n\n @property\n def tin_dtype(self):\n \"\"\"gets the input dtype\"\"\"\n return _get_dtype(self.is_complex, self.tin)\n\n @property\n def tout_dtype(self):\n \"\"\"gets the output dtype\"\"\"\n return _get_dtype(self.is_complex, self.tout)\n\n def __repr__(self):\n return self.write_card(size=8, is_double=False)\n\n def fill_in_default_components(self, model):\n for i, (Gi, Ci) in enumerate(self.GCi):\n if Ci is None:\n node = model.nodes[Gi]\n if node.type == 'GRID':\n msg = ('Ci on DMIG card must be 1, 2, 3, 4, 5, or 6; '\n 'Node=%i (GRID); Ci=%s' % (Gi, Ci))\n raise RuntimeError(msg)\n elif node.type in ['SPOINT', 'EPOINT']:\n Ci = 0\n else:\n raise NotImplementedError(node)\n self.GCi[i] = [Gi, Ci]\n\n for i, (Gj, Cj) in enumerate(self.GCj):\n if Cj is None:\n node = model.nodes[Gj]\n if node.type == 'GRID':\n msg = ('Cj on DMIG card must be 1, 2, 3, 4, 5, or 6; '\n 'Node=%i (GRID); Cj=%s' % (Gj, Cj))\n raise RuntimeError(msg)\n elif node.type in ['SPOINT', 'EPOINT']:\n Cj = 0\n else:\n raise NotImplementedError(node)\n self.GCj[i] = [Gj, Cj]\n return\n\n def write_card(self, size: int=8, is_double: bool=False) -> str:\n if self.tin in [1, 3]:\n is_double = False\n elif self.tin in [2, 4]:\n is_double = True\n size = 16\n else:\n raise RuntimeError('tin=%r must be 1, 2, 3, or 4' % self.tin)\n\n assert isinstance(self.GCi, (list, np.ndarray)), 'type(GCi)=%s' % type(self.GCi)\n assert isinstance(self.GCj, (list, np.ndarray)), 'type(GCj)=%s' % type(self.GCj)\n assert isinstance(self.Real, (list, np.ndarray)), 'type(Real)=%s' % type(self.Real)\n #assert isinstance(self.GCi[0], (list, np.ndarray)), 'type(GCi[0])=%s' % type(self.GCi[0])\n #assert isinstance(self.GCj[0], (list, np.ndarray)), 'type(GCj[0])=%s' % type(self.GCj[0])\n\n msg = '\\n$' + '-' * 80\n msg += '\\n$ %s Matrix %s\\n' % (self.type, self.name)\n list_fields = [self.type, self.name, 0, self.matrix_form, self.tin,\n self.tout, self.polar, None, self.ncols]\n if size == 8:\n msg += print_card_8(list_fields)\n else:\n msg += print_card_16(list_fields)\n\n if self.is_complex:\n if self.is_polar:\n for (GCi, GCj, reali, complexi) in zip(self.GCi, self.GCj, self.Real, self.Complex):\n magi = sqrt(reali**2 + complexi**2)\n if reali == 0.0:\n phasei = 0.0\n else:\n phasei = degrees(atan2(complexi, reali))\n list_fields = [self.type, self.name, GCj[0], GCj[1],\n None, GCi[0], GCi[1], magi, phasei]\n if size == 8:\n msg += print_card_8(list_fields)\n elif is_double:\n msg += print_card_double(list_fields)\n else:\n msg += print_card_16(list_fields)\n else:\n for (GCi, GCj, reali, complexi) in zip(self.GCi, self.GCj, self.Real, self.Complex):\n list_fields = [self.type, self.name, GCj[0], GCj[1],\n None, GCi[0], GCi[1], reali, complexi]\n if size == 8:\n msg += print_card_8(list_fields)\n elif is_double:\n msg += print_card_double(list_fields)\n else:\n msg += print_card_16(list_fields)\n else:\n for (GCi, GCj, reali) in zip(self.GCi, self.GCj, self.Real):\n list_fields = [self.type, self.name, GCj[0], GCj[1],\n None, GCi[0], GCi[1], reali, None]\n if size == 8:\n msg += print_card_8(list_fields)\n elif is_double:\n msg += print_card_double(list_fields)\n else:\n msg += print_card_16(list_fields)\n\n #msg += '\\n\\nGCi[0]=%s\\n' % self.GCi[0]\n #msg += 'GCj[0]=%s\\n' % self.GCj[0]\n #msg += 'Real[0]=%s\\n' % self.Real[0]\n #assert isinstance(self.GCi[0], (list, np.ndarray)), msg\n #assert isinstance(self.GCj[0], (list, np.ndarray)), msg\n #assert isinstance(self.Real[0], (list, np.ndarray)), msg\n\n return msg\n\n\nclass DMIG_UACCEL(BaseCard):\n \"\"\"\n Direct Matrix Input of Enforced Static Acceleration\n Defines rigid body accelerations in the basic coordinate system.\n\n +------+--------+-----+-----+-----+-----+-----+-------+-------+\n | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | |\n +======+========+=====+=====+=====+=====+=====+=======+=======+\n | DMIG | UACCEL | \"0\" | \"9\" | TIN | | | | NCOL |\n +------+--------+-----+-----+-----+-----+-----+-------+-------+\n | DMIG | UACCEL | L | | | G1 | C1 | X1 | |\n +------+--------+-----+-----+-----+-----+-----+-------+-------+\n | | G2 | C2 | X2 | | G3 | C3 | X3 | |\n +------+--------+-----+-----+-----+-----+-----+-------+-------+\n\n +------+--------+-----+-----+-----+-----+-----+-------+-------+\n | DMIG | UACCEL | 0 | 9 | 1 | | | | 4 |\n +------+--------+-----+-----+-----+-----+-----+-------+-------+\n | DMIG | UACCEL | 2 | | | 2 | 3 | 386.4 | |\n +------+--------+-----+-----+-----+-----+-----+-------+-------+\n | DMIG | UACCEL | 3 | | | 2 | 4 | 3.0 | |\n +------+--------+-----+-----+-----+-----+-----+-------+-------+\n | DMIG | UACCEL | 4 | | | 2 | 6 | 1.0 | |\n +------+--------+-----+-----+-----+-----+-----+-------+-------+\n \"\"\"\n type = 'DMIG'\n name = 'UACCEL'\n def __init__(self, tin, ncol, load_sequences, comment=''):\n if comment:\n self.comment = comment\n self.tin = tin\n self.ncol = ncol\n self.load_sequences = load_sequences\n #print(str(self))\n\n @classmethod\n def export_to_hdf5(cls, h5_file, model, encoding):\n _export_dmig_to_hdf5(h5_file, model, model.dmig, encoding)\n\n @classmethod\n def add_card(cls, card, comment=''):\n \"\"\"\n Adds a DMIG,UACCEL card from ``BDF.add_card(...)``\n\n Parameters\n ----------\n card : BDFCard()\n a BDFCard object\n comment : str; default=''\n a comment for the card\n\n \"\"\"\n tin = integer(card, 4, 'tin')\n ncol = integer_or_blank(card, 8, 'ncol')\n return DMIG_UACCEL(tin, ncol, load_sequences={}, comment=comment)\n\n def _add_column(self, card, comment=''):\n if comment:\n if hasattr(self, '_comment'):\n self.comment += comment\n else:\n self.comment = comment\n load_seq = integer(card, 2, 'load_seq')\n\n i = 0\n ifield = 5\n self.load_sequences[load_seq] = []\n assert len(card) >= 8, 'len=%s card=%s' % (len(card), card)\n while ifield < len(card):\n g1 = integer(card, ifield, 'nid%d' % i)\n c1 = parse_components(card, ifield+1, 'c%d' % i)\n x1 = double(card, ifield+2, 'x%d' % i)\n #assert len(card) <= 8, 'len=%s card=%s' % (len(card), card)\n gcx = [g1, c1, x1]\n self.load_sequences[load_seq].append(gcx)\n ifield += 4\n i += 1\n\n\n @staticmethod\n def finalize():\n \"\"\"a passer method\"\"\"\n pass\n\n def raw_fields(self):\n list_fields = [\n 'DMIG', 'UACCEL', 0, 9, self.tin, None, None, None, self.ncol\n ]\n for lseq, ncx in sorted(self.load_sequences.items()):\n list_fields += [lseq, None, None]\n for ncxi in ncx:\n list_fields += ncxi\n #for (nid, comp, xi) in ncx:\n #print('list_fields= %s' % list_fields)\n self.write_card()\n return list_fields\n\n def write_card(self, size: int=8, is_double: bool=False) -> str:\n if self.tin in [1, 3]:\n is_double = False\n msg = self.write_card_8()\n elif self.tin in [2, 4]:\n is_double = True\n size = 16\n msg = self.write_card_16()\n else:\n raise RuntimeError('tin=%r must be 1, 2, 3, or 4' % self.tin)\n return msg\n\n def write_card_8(self):\n \"\"\"writes the card in small field format\"\"\"\n return self._write_card(print_card_8)\n\n def write_card_16(self):\n \"\"\"writes the card in small large format\"\"\"\n return self._write_card(print_card_16)\n\n def _write_card(self, func):\n \"\"\"writes the card\"\"\"\n msg = '\\n$' + '-' * 80\n msg += '\\n$ DMIG Matrix UACCEL\\n'\n list_fields = [\n 'DMIG', 'UACCEL', 0, 9, self.tin, None, None, None, self.ncol,\n ]\n msg += func(list_fields)\n\n for lseq, ncx in sorted(self.load_sequences.items()):\n list_fields = ['DMIG', 'UACCEL']\n list_fields += [lseq, None, None]\n for ncxi in ncx:\n list_fields += ncxi + [None]\n list_fields.pop()\n msg += func(list_fields)\n #print(msg)\n #if self.is_complex:\n #msg += self._get_complex_fields(func)\n #else:\n #msg += self._get_real_fields(func)\n return msg\n\n def __repr__(self):\n return self.write_card(size=8)\n\nclass DMIG(NastranMatrix):\n \"\"\"\n Defines direct input matrices related to grid, extra, and/or scalar points.\n The matrix is defined by a single header entry and one or more column\n entries. A column entry is required for each column with nonzero elements.\n\n +------+------+----+-----+-----+------+-------+----+------+\n | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 |\n +======+======+====+=====+=====+======+=======+====+======+\n | DMIG | NAME | 0 | IFO | TIN | TOUT | POLAR | | NCOL |\n +------+------+----+-----+-----+------+-------+----+------+\n | DMIG | NAME | GJ | CJ | | G1 | C1 | A1 | B1 |\n +------+------+----+-----+-----+------+-------+----+------+\n | | G2 | C2 | A2 | B2 | | | | |\n +------+------+----+-----+-----+------+-------+----+------+\n \"\"\"\n type = 'DMIG'\n _properties = ['is_real', 'is_complex', 'is_polar', 'matrix_type', 'shape',\n 'tin_dtype', 'tout_dtype']\n\n #@classmethod\n #def _init_from_empty(cls):\n #name = 'name'\n #ifo = 1\n #tin = 1\n #tout = 1\n #polar = 0\n #ncols = 1\n #GCj = []\n #GCi = []\n #Real = []\n #return DMIG(name, ifo, tin, tout, polar, ncols, GCj, GCi, Real,\n #Complex=None, comment='', finalize=True)\n\n @classmethod\n def export_to_hdf5(cls, h5_file, model, encoding):\n _export_dmig_to_hdf5(h5_file, model, model.dmig, encoding)\n\n def __init__(self, name, ifo, tin, tout, polar, ncols,\n GCj, GCi, Real, Complex=None, comment='', finalize=True):\n \"\"\"\n Creates a DMIG card\n\n Parameters\n ----------\n name : str\n the name of the matrix\n ifo : int\n matrix shape\n 4=Lower Triangular\n 5=Upper Triangular\n 6=Symmetric\n 8=Identity (m=nRows, n=m)\n tin : int\n matrix input precision\n 1=Real, Single Precision\n 2=Real, Double Precision\n 3=Complex, Single Precision\n 4=Complex, Double Precision\n tout : int\n matrix output precision\n 0=same as tin\n 1=Real, Single Precision\n 2=Real, Double Precision\n 3=Complex, Single Precision\n 4=Complex, Double Precision\n polar : int; default=0\n Input format of Ai, Bi\n Integer=blank or 0 indicates real, imaginary format\n Integer > 0 indicates amplitude, phase format\n ncols : int\n ???\n GCj : List[(node, dof)]\n the [jnode, jDOFs]\n GCi : List[(node, dof)]\n the inode, iDOFs\n Real : List[float]\n The real values\n Complex : List[float]; default=None\n The complex values (if the matrix is complex)\n comment : str; default=''\n a comment for the card\n\n \"\"\"\n NastranMatrix.__init__(self, name, ifo, tin, tout, polar, ncols,\n GCj, GCi, Real, Complex, comment=comment,\n finalize=finalize)\n\n\nclass DMIAX(BaseCard):\n \"\"\"\n Direct Matrix Input for Axisymmetric Analysis\n\n Defines axisymmetric (fluid or structure) related direct input matrix\n terms. The matrix is defined by a single header entry and one or\n more column entries. Only one header entry is required. A column\n entry is required for each column with nonzero elements.\n\n +-------+------+----+--------+------+--------+-------+----+------+\n | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 |\n +=======+======+====+========+======+========+=======+====+======+\n | DMIAX | NAME | 0 | IFO | TIN | TOUT | | | |\n +-------+------+----+--------+------+--------+-------+----+------+\n\n +-------+------+----+--------+------+--------+-------+----+------+\n | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 |\n +=======+======+====+========+======+========+=======+====+======+\n | DMIAX | NAME | GJ | CJ | NJ | | | | |\n +-------+------+----+--------+------+--------+-------+----+------+\n | | G1 | C1 | N1 | A1 | B1 | | | |\n +-------+------+----+--------+------+--------+-------+----+------+\n | | G2 | C2 | etc. | | | | | |\n +-------+------+----+--------+------+--------+-------+----+------+\n\n +-------+------+----+--------+------+--------+-------+----+------+\n | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 |\n +=======+======+====+========+======+========+=======+====+======+\n | DMIAX | B2PP | 0 | 1 | 3 | | | | |\n +-------+------+----+--------+------+--------+-------+----+------+\n | DMIAX | B2PP | 32 | | | | | | |\n +-------+------+----+--------+------+--------+-------+----+------+\n | | 1027 | 3 | 4.25+6 | | 2.27+3 | | | |\n +-------+------+----+--------+------+--------+-------+----+------+\n\n \"\"\"\n type = 'DMIAX'\n\n def __init__(self, name, matrix_form, tin, tout, ncols,\n GCNj, GCNi, Real, Complex=None, comment=''):\n \"\"\"\n Creates a DMIAX card\n\n Parameters\n ----------\n name : str\n the name of the matrix\n matrix_form : int\n matrix shape\n 1=Square\n 2=General Rectangular\n 6=Symmetric\n tin : int\n matrix input precision\n 1=Real, Single Precision\n 3=Complex, Single Precision\n tout : int\n matrix output precision\n 1=Real, Single Precision\n 2=Real, Double Precision\n 3=Complex, Single Precision\n 4=Complex, Double Precision\n GCNj : List[(node, dof, harmonic_number)]???\n the jnode, jDOFs\n GCNi : List[(node, dof, harmonic_number)]???\n the inode, iDOFs\n Real : List[float]???\n The real values\n Complex : List[float]???; default=None\n The complex values (if the matrix is complex)\n comment : str; default=''\n a comment for the card\n\n \"\"\"\n ncols = None\n\n if comment:\n self.comment = comment\n\n if Complex is None:\n Complex = []\n\n if tout is None:\n tout = 0\n\n self.name = name\n\n #: ifo/4-Lower Triangular; 5=Upper Triangular; 6=Symmetric; 8=Identity (m=nRows, n=m)\n self.matrix_form = matrix_form\n\n #: 1-Real, Single Precision; 2=Real,Double Precision;\n # 3=Complex, Single; 4=Complex, Double\n self.tin = tin\n\n #: 0-Set by cell precision\n self.tout = tout\n\n self.ncols = ncols\n self.GCNj = GCNj\n self.GCNi = GCNi\n\n self.Real = Real\n if len(Complex) or self.is_complex:\n self.Complex = Complex\n if matrix_form not in [1]: #4, 5, 6, 8\n msg = (\n f'{self.type} name={name!r} matrix_form={matrix_form!r} '\n 'must be [1, 2, 6]\\n'\n ' 1: Square\\n'\n ' 2: General Rectangular\\n'\n ' 4: Lower Triangular\\n'\n ' 5: Upper Triangular\\n'\n ' 6: Symmetric\\n'\n ' 8: Identity (m=nRows, n=m)\\n')\n raise ValueError(msg)\n\n assert isinstance(matrix_form, integer_types), 'matrix_form=%r type=%s' % (matrix_form, type(matrix_form))\n assert not isinstance(matrix_form, bool), 'matrix_form=%r type=%s' % (matrix_form, type(matrix_form))\n\n def finalize(self):\n \"\"\"converts the lists into numpy arrays\"\"\"\n return\n #self.GCi = np.asarray(self.GCi)\n #self.GCj = np.asarray(self.GCj)\n self.Real = np.asarray(self.Real)\n if self.is_complex:\n self.Complex = np.asarray(self.Complex)\n\n @classmethod\n def export_to_hdf5(cls, h5_file, model, encoding):\n _export_dmiax_to_hdf5(h5_file, model, model.dmiax, encoding)\n\n @property\n def is_real(self):\n \"\"\"is the matrix real?\"\"\"\n if self.tin in [1, 2]:\n return True\n return False\n\n @property\n def is_complex(self):\n \"\"\"is the matrix complex\"\"\"\n return not self.is_real\n\n @property\n def is_polar(self):\n \"\"\"is the matrix polar (vs real/imag)?\"\"\"\n return False\n\n @property\n def tin_dtype(self):\n \"\"\"gets the input dtype\"\"\"\n return _get_dtype(self.is_complex, self.tin)\n\n @property\n def tout_dtype(self):\n \"\"\"gets the output dtype\"\"\"\n return _get_dtype(self.is_complex, self.tout)\n\n @property\n def matrix_type(self):\n \"\"\"gets the matrix type\"\"\"\n if not isinstance(self.matrix_form, integer_types):\n msg = 'ifo must be an integer; matrix_form=%r type=%s name=%s' % (\n self.matrix_form, type(self.matrix_form), self.name)\n raise TypeError(msg)\n if isinstance(self.matrix_form, bool):\n msg = 'matrix_form must not be a boolean; matrix_form=%r type=%s name=%s' % (\n self.matrix_form, type(self.matrix_form), self.name)\n raise TypeError(msg)\n\n if self.matrix_form == 1:\n matrix_type = 'square'\n #elif self.matrix_form == 6:\n #matrix_type = 'symmetric'\n #elif self.matrix_form in [2, 9]:\n #matrix_type = 'rectangular'\n else:\n # technically right, but nulling this will fix bad decks\n #self.ncols = blank(card, 8, 'matrix_form=%s; ncol' % self.matrix_form)\n raise NotImplementedError(f'{self.type} matrix_form={self.matrix_form} '\n 'is not supported')\n return matrix_type\n\n @classmethod\n def add_card(cls, card, comment=''):\n \"\"\"\n Adds a NastranMatrix (DMIAX) card from ``BDF.add_card(...)``\n\n Parameters\n ----------\n card : BDFCard()\n a BDFCard object\n comment : str; default=''\n a comment for the card\n\n \"\"\"\n name = string(card, 1, 'name')\n #zero\n\n matrix_form = integer(card, 3, 'ifo')\n tin = integer(card, 4, 'tin')\n tout = integer_or_blank(card, 5, 'tout', 0)\n if matrix_form == 1: # square\n ncols = integer_or_blank(card, 8, 'matrix_form=%s; ncol' % matrix_form)\n elif matrix_form == 6: # symmetric\n ncols = integer_or_blank(card, 8, 'matrix_form=%s; ncol' % matrix_form)\n elif matrix_form in [2, 9]: # rectangular\n ncols = integer(card, 8, 'matrix_form=%s; ncol' % matrix_form)\n else:\n # technically right, but nulling this will fix bad decks\n #self.ncols = blank(card, 8, 'matrix_form=%s; ncol' % self.matrix_form)\n raise NotImplementedError('matrix_form=%s is not supported' % matrix_form)\n\n GCj = []\n GCi = []\n Real = []\n Complex = []\n return DMIAX(name, matrix_form, tin, tout, ncols,\n GCj, GCi, Real, Complex, comment=comment)\n\n def _add_column(self, card, comment=''):\n if comment:\n if hasattr(self, '_comment'):\n self.comment += comment\n else:\n self.comment = comment\n\n unused_name = string(card, 1, 'name')\n\n Gj = integer(card, 2, 'Gj')\n # Cj = integer(card, 3, 'Cj')\n Cj = integer_or_blank(card, 3, 'Cj', 0)\n #Cj = parse_components(card, 3, 'Cj')\n Nj = integer_or_blank(card, 4, 'Nj')\n\n assert 0 <= Cj <= 6, 'C%i must be between [0, 6]; Cj=%s' % (0, Cj)\n\n nfields = len(card)\n #print(\"nfields = %i\" % nfields)\n #print(\"card[5:] =\", card[5:])\n #print(\"(nfields - 5) %% 4 = %i\" % ((nfields - 5) % 4))\n\n nloops = (nfields - 8) // 8\n if nfields - 8 % 8:\n nloops += 1\n #assert nfields <= 8,'nfields=%s' % nfields\n #print(\"nloops = %i\" % nloops)\n assert nloops > 0, 'nloops=%s' % nloops\n\n self.GCNj.append((Gj, Cj, Nj))\n GCNi = []\n self.GCNi.append(GCNi)\n if self.is_complex:\n for i in range(nloops):\n #print(dir(card))\n n = 9 + 8 * i\n Gi = integer(card, n, f'Gi{i}')\n # Ci = integer(card, n + 1, 'Ci')\n Ci = integer_or_blank(card, n + 1, f'Ci{i}', 0)\n #Ci = parse_components(card, n + 1, 'Ci')\n Ni = integer_or_blank(card, n + 2, f'Ni{i}')\n\n assert 0 <= Ci <= 6, 'C%i must be between [0, 6]; Ci=%s' % (i + 1, Ci)\n GCNi.append((Gi, Ci, Ni))\n reali = double(card, n + 3, 'real')\n complexi = double(card, n + 4, 'complex')\n self.Real.append(reali)\n self.Complex.append(complexi)\n else:\n # real\n for i in range(nloops):\n n = 9 + 9 * i\n Gi = integer(card, n, 'Gi')\n # Ci = integer(card, n + 1, 'Ci')\n Ci = integer_or_blank(card, n + 1, 'Ci', 0)\n #Ci = parse_components(card, n + 1, 'Ci')\n Ni = integer(card, n + 2, 'Ni')\n\n assert 0 <= Ci <= 6, 'C%i must be between [0, 6]; Ci=%s' % (i + 1, Ci)\n reali = double(card, n + 3, 'real')\n GCNi.append((Gi, Ci, Ni))\n self.Real.append(reali)\n #print(\"GC=%s,%s real=%s\" % (Gi, Ci, reali))\n\n msg = '(len(GCNj)=%s len(GCNi)=%s' % (len(self.GCNj), len(self.GCNi))\n assert len(self.GCNj) == len(self.GCNi), msg\n #if self.is_complex:\n #self.Complex(double(card, v, 'complex')\n\n def raw_fields(self):\n list_fields = [\n 'DMIAX', self.name, 0, self.matrix_form, self.tin, None, None, None, self.ncols,\n ]\n k = 0\n if self.is_real:\n for i, GCNj in enumerate(self.GCNj):\n gj, cj, nj = GCNj\n list_fields += ['DMIAX', self.name, gj, cj, nj, None, None, None, None]\n for unused_j, GCNi in enumerate(self.GCNi[i]):\n gi, ci, ni = GCNi\n reali = self.Real[k]\n list_fields += [gi, ci, ni, reali, None, None, None, None]\n k += 1\n else:\n for i, GCNj in enumerate(self.GCNj):\n gj, cj, nj = GCNj\n list_fields += ['DMIAX', self.name, gj, cj, nj, None, None, None, None]\n for unused_j, GCNi in enumerate(self.GCNi[i]):\n gi, ci, ni = GCNi\n reali = self.Real[k]\n imagi = self.Complex[k]\n list_fields += [gi, ci, ni, reali, imagi, None, None, None, None]\n k += 1\n\n self.write_card()\n return list_fields\n\n def write_card(self, size: int=8, is_double: bool=False) -> str:\n if self.tin in [1, 3]:\n is_double = False\n msg = self.write_card_8()\n elif self.tin in [2, 4]:\n is_double = True\n size = 16\n msg = self.write_card_16()\n else:\n raise RuntimeError('tin=%r must be 1, 2, 3, or 4' % self.tin)\n return msg\n\n def write_card_8(self):\n \"\"\"writes the card in small field format\"\"\"\n return self._write_card(print_card_8)\n\n def write_card_16(self):\n \"\"\"writes the card in small large format\"\"\"\n return self._write_card(print_card_16)\n\n def _write_card(self, func):\n \"\"\"writes the card\"\"\"\n msg = '\\n$' + '-' * 80\n msg += f'\\n$ DMIAX Matrix {self.name}\\n'\n list_fields = [\n 'DMIAX', self.name, 0, self.matrix_form, self.tin, None, None, None, self.ncols,\n ]\n msg += func(list_fields)\n k = 0\n assert len(self.GCNj) > 0, self.get_stats()\n assert len(self.GCNi) > 0, self.get_stats()\n if self.is_real:\n for i, GCNj in enumerate(self.GCNj):\n gj, cj, nj = GCNj\n list_fields = ['DMIAX', self.name, gj, cj, nj, None, None, None, None]\n for unused_j, GCNi in enumerate(self.GCNi[i]):\n gi, ci, ni = GCNi\n reali = self.Real[k]\n list_fields += [gi, ci, ni, reali, None, None, None, None]\n k += 1\n msg += func(list_fields)\n else:\n for i, GCNj in enumerate(self.GCNj):\n gj, cj, nj = GCNj\n list_fields = ['DMIAX', self.name, gj, cj, nj, None, None, None, None]\n for unused_j, GCNi in enumerate(self.GCNi[i]):\n gi, ci, ni = GCNi\n reali = self.Real[k]\n imagi = self.Complex[k]\n list_fields += [gi, ci, ni, reali, imagi, None, None, None]\n k += 1\n msg += func(list_fields)\n return msg\n\n def __repr__(self):\n return self.write_card(size=8)\n\nclass DMIJ(NastranMatrix):\n \"\"\"\n Direct Matrix Input at js-Set of the Aerodynamic Mesh\n Defines direct input matrices related to collation degrees-of-freedom\n (js-set) of aerodynamic mesh points for CAERO1, CAERO3, CAERO4 and CAERO5\n and for the slender body elements of CAERO2. These include W2GJ, FA2J and\n input pressures and downwashes associated with AEPRESS and AEDW entries.\n The matrix is described by a single header entry and one or more column\n entries. A column entry is required for each column with nonzero elements.\n For entering data for the interference elements of a CAERO2, use DMIJI\n or DMI.\n\n \"\"\"\n type = 'DMIJ'\n _properties = ['shape', 'ifo', 'is_real', 'is_complex', 'is_polar', 'matrix_type',\n 'tin_dtype', 'tout_dtype']\n\n @classmethod\n def _init_from_empty(cls):\n name = 'name'\n ifo = 1\n tin = 1\n tout = 1\n polar = 0\n ncols = 1\n GCj = []\n GCi = []\n Real = []\n return DMIJ(name, ifo, tin, tout, polar, ncols, GCj, GCi, Real,\n Complex=None, comment='', finalize=True)\n\n @classmethod\n def export_to_hdf5(cls, h5_file, model, encoding):\n _export_dmig_to_hdf5(h5_file, model, model.dmij, encoding)\n\n def __init__(self, name, matrix_form, tin, tout, polar, ncols,\n GCj, GCi, Real, Complex=None, comment='',\n finalize=True):\n \"\"\"\n Creates a DMIJ card\n\n Parameters\n ----------\n name : str\n the name of the matrix\n matrix_form : int\n matrix shape\n 4=Lower Triangular\n 5=Upper Triangular\n 6=Symmetric\n 8=Identity (m=nRows, n=m)\n tin : int\n matrix input precision\n 1=Real, Single Precision\n 2=Real, Double Precision\n 3=Complex, Single Precision\n 4=Complex, Double Precision\n tout : int\n matrix output precision\n 0=same as tin\n 1=Real, Single Precision\n 2=Real, Double Precision\n 3=Complex, Single Precision\n 4=Complex, Double Precision\n polar : int; default=0\n Input format of Ai, Bi\n Integer=blank or 0 indicates real, imaginary format\n Integer > 0 indicates amplitude, phase format\n ncols : int\n ???\n GCj : List[(node, dof)]???\n the jnode, jDOFs\n GCi : List[(node, dof)]???\n the inode, iDOFs\n Real : List[float]???\n The real values\n Complex : List[float]???; default=None\n The complex values (if the matrix is complex)\n comment : str; default=''\n a comment for the card\n\n \"\"\"\n NastranMatrix.__init__(self, name, matrix_form, tin, tout, polar, ncols,\n GCj, GCi, Real, Complex, comment=comment,\n finalize=finalize)\n\n\nclass DMIJI(NastranMatrix):\n \"\"\"\n Direct Matrix Input at js-Set of the Interference Body\n Defines direct input matrices related to collation degrees-of-freedom\n (js-set) of aerodynamic mesh points for the interference elements of CAERO2.\n These include W2GJ, FA2J and input pressures and downwashes associated with\n AEPRESS and AEDW entries. The matrix is described by a single header entry\n and one or more column entries. A column entry is required for each column\n with nonzero elements. For entering data for the slender elements of a\n CAERO2, or a CAERO1, 3, 4 or 5 use DMIJ or DMI.\n\n \"\"\"\n type = 'DMIJI'\n _properties = ['shape', 'ifo', 'is_real', 'is_complex', 'is_polar', 'matrix_type',\n 'tin_dtype', 'tout_dtype']\n\n #@classmethod\n #def _init_from_empty(cls):\n #name = 'name'\n #ifo = 1\n #tin = 1\n #tout = 1\n #polar = 0\n #ncols = 1\n #GCj = []\n #GCi = []\n #Real = []\n #return DMIJI(name, ifo, tin, tout, polar, ncols, GCj, GCi, Real,\n #Complex=None, comment='', finalize=True)\n\n @classmethod\n def export_to_hdf5(cls, h5_file, model, encoding):\n _export_dmig_to_hdf5(h5_file, model, model.dmiji, encoding)\n\n def __init__(self, name, ifo, tin, tout, polar, ncols,\n GCj, GCi, Real, Complex=None, comment='', finalize=True):\n \"\"\"\n Creates a DMIJI card\n\n Parameters\n ----------\n name : str\n the name of the matrix\n ifo : int\n matrix shape\n 4=Lower Triangular\n 5=Upper Triangular\n 6=Symmetric\n 8=Identity (m=nRows, n=m)\n tin : int\n matrix input precision\n 1=Real, Single Precision\n 2=Real, Double Precision\n 3=Complex, Single Precision\n 4=Complex, Double Precision\n tout : int\n matrix output precision\n 0=same as tin\n 1=Real, Single Precision\n 2=Real, Double Precision\n 3=Complex, Single Precision\n 4=Complex, Double Precision\n polar : int; default=0\n Input format of Ai, Bi\n Integer=blank or 0 indicates real, imaginary format\n Integer > 0 indicates amplitude, phase format\n ncols : int\n ???\n GCj : List[(node, dof)]???\n the jnode, jDOFs\n GCi : List[(node, dof)]???\n the inode, iDOFs\n Real : List[float]???\n The real values\n Complex : List[float]???; default=None\n The complex values (if the matrix is complex)\n comment : str; default=''\n a comment for the card\n\n \"\"\"\n NastranMatrix.__init__(self, name, ifo, tin, tout, polar, ncols,\n GCj, GCi, Real, Complex, comment=comment,\n finalize=finalize)\n\n\nclass DMIK(NastranMatrix):\n \"\"\"\n Direct Matrix Input at ks-Set of the Aerodynamic Mesh\n Defines direct input matrices related to physical (displacement)\n degrees-of-freedom (ks-set) of aerodynamic grid points. These include WKK,\n WTFACT and input forces associated with AEFORCE entries. The matrix is\n described by a single header entry and one or more column entries. A column\n entry is required for each column with nonzero elements.\n\n +------+-------+----+-----+-----+------+-------+----+------+\n | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 |\n +======+=======+====+=====+=====+======+=======+====+======+\n | DMIK | NAME | 0 | IFO | TIN | TOUT | POLAR | | NCOL |\n +------+-------+----+-----+-----+------+-------+----+------+\n | DMIK | NAME | GJ | CJ | | G1 | C1 | A1 | B1 |\n +------+-------+----+-----+-----+------+-------+----+------+\n | | G2 | C2 | A2 | B2 | | | | |\n +------+-------+----+-----+-----+------+-------+----+------+\n | DMIK | ALPH1 | 0 | 9 | 2 | 0 | 1 | | |\n +------+-------+----+-----+-----+------+-------+----+------+\n | DMIK | ALPH1 | 1 | 1 | 1 | 1 | 1.0 | | |\n +------+-------+----+-----+-----+------+-------+----+------+\n | | 2 | 1 | 1.0 | | | | | |\n +------+-------+----+-----+-----+------+-------+----+------+\n \"\"\"\n type = 'DMIK'\n _properties = ['shape', 'ifo', 'is_real', 'is_complex', 'is_polar', 'matrix_type',\n 'tin_dtype', 'tout_dtype']\n\n #@classmethod\n #def _init_from_empty(cls):\n #name = 'name'\n #ifo = 1\n #tin = 1\n #tout = 1\n #polar = 0\n #ncols = 1\n #GCj = []\n #GCi = []\n #Real = []\n #return DMIK(name, ifo, tin, tout, polar, ncols, GCj, GCi, Real,\n #Complex=None, comment='', finalize=True)\n\n @classmethod\n def export_to_hdf5(cls, h5_file, model, encoding):\n _export_dmig_to_hdf5(h5_file, model, model.dmik, encoding)\n\n def __init__(self, name, ifo, tin, tout, polar, ncols,\n GCj, GCi, Real, Complex=None, comment='', finalize=True):\n \"\"\"\n Creates a DMIK card\n\n Parameters\n ----------\n name : str\n the name of the matrix\n ifo : int\n matrix shape\n 4=Lower Triangular\n 5=Upper Triangular\n 6=Symmetric\n 8=Identity (m=nRows, n=m)\n tin : int\n matrix input precision\n 1=Real, Single Precision\n 2=Real, Double Precision\n 3=Complex, Single Precision\n 4=Complex, Double Precision\n tout : int\n matrix output precision\n 0=same as tin\n 1=Real, Single Precision\n 2=Real, Double Precision\n 3=Complex, Single Precision\n 4=Complex, Double Precision\n polar : int; default=0\n Input format of Ai, Bi\n Integer=blank or 0 indicates real, imaginary format\n Integer > 0 indicates amplitude, phase format\n ncols : int\n ???\n GCj : List[(node, dof)]\n the jnode, jDOFs\n GCi : List[(node, dof)]\n the inode, iDOFs\n Real : List[float]\n The real values\n Complex : List[float]; default=None\n The complex values (if the matrix is complex)\n comment : str; default=''\n a comment for the card\n\n \"\"\"\n NastranMatrix.__init__(self, name, ifo, tin, tout, polar, ncols,\n GCj, GCi, Real, Complex, comment=comment,\n finalize=finalize)\n\n\nclass DMI(NastranMatrix):\n \"\"\"\n +------+-------+------+------+---------+----------+-----------+-----------+------+\n | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 |\n +======+=======+======+======+=========+==========+===========+===========+======+\n | DMI | NAME | 0 | FORM | TIN | TOUT | | M | N |\n +------+-------+------+------+---------+----------+-----------+-----------+------+\n | DMI | NAME | J | I1 | A(I1,J) | A(I1,J) | A(I1+1,J) | A(I1+2,J) | etc. |\n +------+-------+------+------+---------+----------+-----------+-----------+------+\n | | I2 | etc. | | | | | | |\n +------+-------+------+------+---------+----------+-----------+-----------+------+\n \"\"\"\n type = 'DMI'\n _properties = ['shape', 'ifo', 'is_real', 'is_complex', 'is_polar', 'matrix_type',\n 'tin_dtype', 'tout_dtype']\n\n @classmethod\n def _init_from_empty(cls):\n name = 'name'\n matrix_form = 8\n tin = 1\n tout = 1\n nrows = 5\n ncols = 5\n GCj = []\n GCi = []\n Real = []\n return DMI(name, matrix_form, tin, tout, nrows, ncols, GCj, GCi, Real,\n Complex=None, comment='', finalize=False)\n\n @classmethod\n def export_to_hdf5(cls, h5_file, model, encoding):\n _export_dmig_to_hdf5(h5_file, model, model.dmi, encoding)\n\n def __init__(self, name, matrix_form, tin, tout, nrows, ncols,\n GCj, GCi, Real, Complex=None, comment='', finalize=True):\n #NastranMatrix.__init__(self, name, ifo, tin, tout, polar, ncols,\n #GCj, GCi, Real, Complex, comment='')\n if comment:\n self.comment = comment\n\n if Complex is None:\n Complex = []\n\n if tout is None:\n tout = 0\n\n if matrix_form not in [1, 2, 3, 4, 5, 6, 8]:\n msg = (\n '%s name=%r matrix_form=%r must be [1, 2, 3, 4, 5, 6, 8]\\n'\n ' 1: Square\\n'\n ' 2: Rectangular\\n'\n ' 3: Diagonal matrix (M=number of rows, N=1)\\n'\n ' 4: Lower Triangular\\n'\n ' 5: Upper Triangular\\n'\n ' 6: Symmetric\\n'\n ' 8: Identity (m=nRows, n=m)\\n'\n #' 9: Rectangular\\n'\n % (self.type, name, matrix_form))\n raise ValueError(msg)\n\n self.name = name\n self.matrix_form = matrix_form\n self.tin = tin\n self.tout = tout\n self.nrows = nrows\n self.ncols = ncols\n self.GCi = GCi\n self.GCj = GCj\n self.Real = Real\n if len(Complex) or self.is_complex:\n self.Complex = Complex\n if finalize:\n self.finalize()\n\n #@property\n #def form(self):\n #\"\"\"gets the matrix_form\"\"\"\n #self.deprecated('form', 'matrix_form', '1.1')\n #return self.matrix_form\n\n #@form.setter\n #def form(self, matrix_form):\n #\"\"\"sets the matrix_form\"\"\"\n #self.deprecated('form', 'matrix_form', '1.1')\n #self.matrix_form = matrix_form\n\n @classmethod\n def add_card(cls, card, comment=''):\n \"\"\"\n Adds a DMI card from ``BDF.add_card(...)``\n\n Parameters\n ----------\n card : BDFCard()\n a BDFCard object\n comment : str; default=''\n a comment for the card\n\n \"\"\"\n name = string(card, 1, 'name')\n #zero\n\n #: Form of the matrix: 1=Square (not symmetric); 2=Rectangular;\n #: 3=Diagonal (m=nRows,n=1); 4=Lower Triangular; 5=Upper Triangular;\n #: 6=Symmetric; 8=Identity (m=nRows, n=m)\n matrix_form = integer(card, 3, 'form')\n\n #: 1-Real, Single Precision; 2=Real,Double Precision;\n #: 3=Complex, Single; 4=Complex, Double\n tin = integer(card, 4, 'tin')\n\n #: 0-Set by cell precision\n tout = integer_or_blank(card, 5, 'tout', 0)\n\n nrows = integer(card, 7, 'nrows')\n ncols = integer(card, 8, 'ncols')\n\n assert len(card) == 9, 'len(DMI card) = %i\\ncard=%s' % (len(card), card)\n\n GCj = []\n GCi = []\n Real = []\n Complex = []\n return DMI(name, matrix_form, tin, tout, nrows, ncols,\n GCj, GCi, Real, Complex, comment=comment, finalize=False)\n\n def finalize(self):\n self.GCi = np.asarray(self.GCi)\n self.GCj = np.asarray(self.GCj)\n self.Real = np.asarray(self.Real)\n if self.is_complex:\n self.Complex = np.asarray(self.Complex)\n\n @property\n def matrix_type(self):\n \"\"\"\n gets the matrix type\n\n 1 Square matrix (not symmetric)\n 2 General rectangular matrix\n 3 Diagonal matrix (M=number of rows, N = 1)\n #4 Lower triangular factor\n #5 Upper triangular factor\n 6 Symmetric matrix\n 8 Identity matrix (M=number of rows, N = M)\n \"\"\"\n if not isinstance(self.matrix_form, integer_types):\n msg = 'ifo must be an integer; matrix_form=%r type=%s name=%s' % (\n self.matrix_form, type(self.matrix_form), self.name)\n raise TypeError(msg)\n if isinstance(self.matrix_form, bool):\n msg = 'matrix_form must not be a boolean; matrix_form=%r type=%s name=%s' % (\n self.matrix_form, type(self.matrix_form), self.name)\n raise TypeError(msg)\n\n if self.matrix_form == 1:\n matrix_type = 'square'\n elif self.matrix_form == 2: # 9 ???\n matrix_type = 'rectangular'\n elif self.matrix_form == 3:\n matrix_type = 'diagonal'\n elif self.matrix_form == 6:\n matrix_type = 'symmetric'\n elif self.matrix_form == 9:\n matrix_type = 'identity'\n else:\n raise NotImplementedError('%s matrix_form=%r is not supported' % (\n self.type, self.matrix_form))\n return matrix_type\n\n @property\n def is_polar(self):\n if self.tin in [1, 2]:\n is_polar = False\n elif self.tin in [3, 4]:\n is_polar = False # TODO: could be wrong...\n else:\n raise NotImplementedError('nrows=%s ncols=%s' % (self.nrows, self.ncols))\n return is_polar\n\n @property\n def shape(self):\n return (self.nrows, self.ncols)\n\n @property\n def ifo(self):\n \"\"\"\n ifo\n #: 4-Lower Triangular; 5=Upper Triangular; 6=Symmetric; 8=Identity (m=nRows, n=m)\n\n #: Form of the matrix: 1=Square (not symmetric); 2=Rectangular;\n #: 3=Diagonal (m=nRows,n=1); 4=Lower Triangular; 5=Upper Triangular;\n #: 6=Symmetric; 8=Identity (m=nRows, n=m)\n self.matrix_form = integer(card, 3, 'matrix_form')\n\n \"\"\"\n return self.matrix_form\n #if self.nrows == self.ncols:\n ## symmetric\n #ifo = 6\n ##elif self.nrows > 1 and self.ncols > 1:\n ##ifo = 2\n #else:\n #raise NotImplementedError('matrix_form=%r nrows=%s ncols=%s' % (\n #self.matrix_form, self.nrows, self.ncols))\n #return ifo\n\n def _add_column(self, card, comment=''):\n \"\"\"\n .. todo:: support comment\n \"\"\"\n if self.is_complex:\n self._read_complex(card)\n else:\n self._read_real(card)\n\n def _read_real(self, card):\n \"\"\"reads a real DMI column\"\"\"\n # column number\n j = integer(card, 2, 'icol')\n\n # counter\n i = 0\n fields = [interpret_value(field, card) for field in card[3:]]\n\n # Real, starts at A(i1,j), goes to A(i2,j) in a column\n while i < len(fields):\n i1 = fields[i]\n if isinstance(i1, integer_types):\n i += 1\n is_done_reading_floats = False\n while not is_done_reading_floats and i < len(fields):\n real_value = fields[i]\n if isinstance(real_value, integer_types):\n is_done_reading_floats = True\n elif isinstance(real_value, float):\n #print('adding j=%s i1=%s val=%s' % (j, i1, real_value))\n self.GCj.append(j)\n self.GCi.append(i1)\n self.Real.append(real_value)\n i += 1\n i1 += 1\n else:\n real_value = self.Real[-1]\n end_i = fields[i + 1]\n for ii in range(i1, end_i + 1):\n #print('adding j=%s i1=%s val=%s' % (j, ii, real_value))\n self.GCj.append(j)\n self.GCi.append(ii)\n self.Real.append(real_value)\n i += 1\n is_done_reading_floats = True\n\n def _read_complex(self, card):\n \"\"\"reads a complex DMI column\"\"\"\n #msg = 'complex matrices not supported in the DMI reader...'\n #raise NotImplementedError(msg)\n # column number\n j = integer(card, 2, 'icol')\n # counter\n i = 0\n fields = [interpret_value(field, card) for field in card[3:]]\n # Complex, starts at A(i1,j)+imag*A(i1,j), goes to A(i2,j) in a column\n if 0: # pragma: no cover\n is_real = True\n gci = None\n for field in fields:\n if isinstance(field, integer_types):\n gci = field\n elif isinstance(field, float):\n if is_real:\n real = field\n else:\n self.GCj.append(j)\n self.GCi.append(gci)\n self.Real.append(real)\n self.Complex.append(field)\n is_real = not is_real\n\n while i < len(fields):\n i1 = fields[i]\n assert isinstance(i1, int), card\n i += 1\n is_done_reading_floats = False\n while not is_done_reading_floats and i < len(fields):\n value = fields[i]\n #print(\"i=%s len(fields)=%s value=%s\" % (\n #i, len(fields), value))\n if isinstance(value, integer_types):\n is_done_reading_floats = True\n elif isinstance(value, float):\n complex_value = fields[i + 1]\n assert isinstance(complex_value, float), card\n self.GCj.append(j)\n self.GCi.append(i1)\n self.Real.append(value)\n self.Complex.append(complex_value)\n i += 2\n else:\n raise NotImplementedError()\n\n @property\n def is_real(self):\n \"\"\"real vs. complex attribute\"\"\"\n return not self.is_complex\n\n @property\n def is_complex(self):\n \"\"\"real vs. complex attribute\"\"\"\n if self.tin in [3, 4]:\n return True\n return False\n\n def raw_fields(self):\n \"\"\"\n .. warning:: All the writers are bad because Nastran insists on\n making columns a single DMI card. This makes\n writing a card much harder, so there are a lot of\n NotImplementedErrors floating about.\n\n This is an invalid method, but is not disabled\n because it's currently needed for checking results\n\n \"\"\"\n list_fields = ['DMI', self.name, 0, self.matrix_form, self.tin,\n self.tout, None, self.nrows, self.ncols]\n\n if self.is_complex:\n for (gci, gcj, reali, imagi) in zip(self.GCi, self.GCj, self.Real, self.Complex):\n list_fields += ['DMI', self.name, gcj, gci, reali, imagi]\n else:\n for (gci, gcj, reali) in zip(self.GCi, self.GCj, self.Real):\n list_fields += ['DMI', self.name, gcj, gci, reali]\n return list_fields\n\n def write_card_8(self):\n \"\"\"writes the card in single precision\"\"\"\n return self._write_card(print_card_8)\n\n def _get_real_fields(self, func):\n msg = ''\n uGCj = np.unique(self.GCj)\n for gcj in uGCj:\n i = np.where(gcj == self.GCj)[0]\n gcis = self.GCi[i]\n reals = self.Real[i]\n isort = np.argsort(gcis)\n list_fields = ['DMI', self.name, gcj]\n\n # will always write the first one\n gci_last = -1\n for gci, real in zip(gcis[isort], reals[isort]):\n if gci == gci_last + 1:\n pass\n else:\n list_fields.append(gci)\n list_fields.append(real)\n gci_last = gci\n msg += func(list_fields)\n return msg\n\n def _get_complex_fields(self, func):\n msg = ''\n uGCj = np.unique(self.GCj)\n for gcj in uGCj:\n i = np.where(gcj == self.GCj)[0]\n gcis = self.GCi[i]\n reals = self.Real[i]\n complexs = self.Complex[i]\n isort = np.argsort(gcis)\n list_fields = ['DMI', self.name, gcj]\n\n # will always write the first one\n gci_last = -10\n #print('gcis=%s \\nreals=%s \\ncomplexs=%s' % (\n #gcis[isort], reals[isort], complexs[isort]))\n if max(gcis) == min(gcis):\n list_fields += [gcis[0]]\n for reali, complexi in zip(reals, complexs):\n list_fields.extend([reali, complexi])\n msg += func(list_fields)\n else:\n #print(f'list_fields0 = {list_fields}')\n for i, gci, reali, complexi in zip(count(), gcis[isort], reals[isort], complexs[isort]):\n #print('B', gci, reali, complexi, gci_last)\n if gci != gci_last + 1 and i != 0:\n pass\n else:\n list_fields.append(gci)\n list_fields.append(reali)\n list_fields.append(complexi)\n gci_last = gci\n #print(f'list_fields = {list_fields}')\n msg += func(list_fields)\n return msg\n\n def get_matrix(self, is_sparse=False, apply_symmetry=True):\n \"\"\"\n Builds the Matrix\n\n Parameters\n ----------\n is_sparse : bool; default=False\n should the matrix be returned as a sparse matrix.\n Slower for dense matrices.\n apply_symmetry : bool; default=True\n If the matrix is symmetric (ifo=6), returns a symmetric matrix.\n Supported as there are symmetric matrix routines.\n\n Returns\n -------\n M : numpy.ndarray or scipy.coomatrix\n the matrix\n rows : dict[int] = [int, int]\n dictionary of keys=rowID, values=(Grid,Component) for the matrix\n cols: dict[int] = [int, int]\n dictionary of keys=columnID, values=(Grid,Component) for the matrix\n\n .. warning:: is_sparse=True WILL fail\n\n \"\"\"\n return get_dmi_matrix(self, is_sparse=is_sparse, apply_symmetry=apply_symmetry)\n\n def write_card_16(self):\n \"\"\"writes the card in single precision\"\"\"\n return self._write_card(print_card_16)\n\n def write_card_double(self):\n \"\"\"writes the card in double precision\"\"\"\n return self._write_card(print_card_16)\n\n def _write_card(self, func):\n \"\"\"writes the card in single/double precision\"\"\"\n msg = '\\n$' + '-' * 80\n msg += '\\n$ %s Matrix %s\\n' % ('DMI', self.name)\n list_fields = ['DMI', self.name, 0, self.matrix_form, self.tin,\n self.tout, None, self.nrows, self.ncols]\n msg += print_card_8(list_fields)\n\n if self.is_complex:\n msg += self._get_complex_fields(func)\n else:\n msg += self._get_real_fields(func)\n return msg\n\n def write_card(self, size: int=8, is_double: bool=False) -> str:\n if size == 8:\n return self.write_card_8()\n if is_double:\n return self.write_card_double()\n return self.write_card_16()\n\n def __repr__(self):\n \"\"\"\n .. todo:: support shortened output format. There's a very low 1000\n DMI cap, I assume this is entries and not matrices.\n\n \"\"\"\n return self.write_card(size=8, is_double=False)\n\n\ndef get_row_col_map(matrix, GCi, GCj, ifo):\n ndim = len(GCi.shape)\n #print('ndim=%s' % ndim)\n #print('GCj=%s' % GCj)\n #print('GCi=%s' % GCi)\n if ndim == 1:\n rows, cols, rows_reversed, cols_reversed = _get_row_col_map_1d(matrix, GCi, GCj, ifo)\n else:\n rows, cols, rows_reversed, cols_reversed = _get_row_col_map_2d(matrix, GCi, GCj, ifo)\n\n nrows = len(rows)\n ncols = len(cols)\n assert nrows > 0, 'nrows=%s' % nrows\n assert ncols > 0, 'ncols=%s' % ncols\n return nrows, ncols, ndim, rows, cols, rows_reversed, cols_reversed\n\ndef _get_row_col_map_1d(matrix, GCi, GCj, ifo):\n \"\"\"helper for ``get_row_col_map``\"\"\"\n rows = {}\n rows_reversed = {}\n\n cols = {}\n cols_reversed = {}\n i = 0\n #nrows = np.unique(GCi)\n #ncols = np.unique(GCj)\n for gci in GCi:\n if gci not in rows:\n rows[gci] = i\n rows_reversed[i] = gci\n i += 1\n\n if ifo == 6:\n # symmetric\n #print(GCj)\n for gcj in GCj:\n if gcj not in rows:\n #print('row.gcj = %s' % str(gcj))\n rows[gcj] = i\n rows_reversed[i] = gcj\n i += 1\n cols = rows\n cols_reversed = rows_reversed\n else:\n j = 0\n for gcj in GCj:\n if gcj not in cols:\n cols[gcj] = j\n cols_reversed[j] = gcj\n j += 1\n return rows, cols, rows_reversed, cols_reversed\n\ndef _get_row_col_map_2d(matrix, GCi, GCj, ifo):\n \"\"\"helper for ``get_row_col_map``\"\"\"\n rows = {}\n rows_reversed = {}\n\n cols = {}\n cols_reversed = {}\n #print('i0=%s j0=%s' % (i, j))\n #nrows = len(GCi)\n #ncols = len(GCj)\n #rows_array = np.zeros((nrows, 2), dtype='int32')\n #cols_array = np.zeros((ncols, 2), dtype='int32')\n #for i, (nid, comp) in enumerate(GCi):\n ##print('i=%s nid=%s comp=%s nrows=%s rows_array.shape=%s' % (\n ##i, nid, comp, nrows, str(rows_array.shape)))\n #rows_array[i, :] = [nid, comp]\n #print('rows_array = \\n%s' % rows_array)\n\n #for j, (nid, comp) in enumerate(GCj):\n #cols_array[j, :] = [nid, comp]\n #print('cols_array = \\n%s' % cols_array)\n\n #print(GCi)\n #print(GCj)\n i = 0\n for (nid, comp) in GCi:\n gci = (nid, comp)\n if gci not in rows:\n #print('row.gci = %s' % str(gci))\n rows[gci] = i\n rows_reversed[i] = gci\n i += 1\n if ifo == 6:\n # symmetric\n for (nid, comp) in GCj:\n gcj = (nid, comp)\n if gcj not in rows:\n #print('row.gcj = %s' % str(gcj))\n rows[gcj] = i\n rows_reversed[i] = gcj\n i += 1\n cols = rows\n cols_reversed = rows_reversed\n else:\n j = 0\n for (nid, comp) in GCj:\n gcj = (nid, comp)\n if gcj not in cols:\n #print('col.gcj = %s' % str(gcj))\n cols[gcj] = j\n cols_reversed[j] = gcj\n j += 1\n return rows, cols, rows_reversed, cols_reversed\n\ndef _fill_sparse_matrix(matrix, nrows, ncols):\n \"\"\"helper method for get_matrix\"\"\"\n GCj = array(matrix.GCj, dtype='int32') - 1\n GCi = array(matrix.GCi, dtype='int32') - 1\n reals = array(matrix.Real, dtype='float32')\n\n # TODO: matrix size: is this correct?\n nrows = max(GCi) + 1\n ncols = max(GCj) + 1\n\n dtype = _get_dtype(matrix.is_complex, matrix.tin)\n # TODO: no check for symmetry\n # TODO: no check for dtype\n if matrix.is_complex:\n complexs = array(matrix.Complex, dtype='float32')\n data = array([reals, complexs]).astype(complex)\n else:\n data = reals\n\n if matrix.matrix_form in [1, 6]:\n nrows = max(nrows, ncols)\n ncols = nrows\n\n #A = coo_matrix( (entries,(rows,cols)),shape=(nrows,ncols),dtype=dtype) # test\n sparse_matrix = coo_matrix((data, (matrix.GCi, matrix.GCj)),\n shape=(nrows, ncols), dtype=dtype)\n #sparse_matrix = coo_matrix( (data,(matrix.GCi,matrix.GCj)),shape=(i,j)) # old\n #sparse_matrix = coo_matrix( (data,(matrix.GCi,matrix.GCj)),shape=(nrows,ncols))\n #print(sparse_matrix.toarray())\n #print(sparse_matrix)\n return sparse_matrix\n\n\ndef _fill_dense_rectangular_matrix(self, nrows, ncols, ndim, rows, cols, apply_symmetry):\n \"\"\"helper method for get_matrix\"\"\"\n is_sparse = False\n if self.is_complex:\n dense_mat = zeros((nrows, ncols), dtype='complex128')\n if self.matrix_form == 6 and apply_symmetry: # symmetric\n for (gcj, gci, reali, complexi) in zip(self.GCj, self.GCi,\n self.Real, self.Complex):\n i = rows[(gci[0], gci[1])]\n j = cols[(gcj[0], gcj[1])]\n dense_mat[i, j] = complex(reali, complexi)\n dense_mat[j, i] = complex(reali, complexi)\n else:\n for (gcj, gci, reali, complexi) in zip(self.GCj, self.GCi,\n self.Real, self.Complex):\n i = rows[(gci[0], gci[1])]\n j = cols[(gcj[0], gcj[1])]\n dense_mat[i, j] = complex(reali, complexi)\n else:\n dense_mat = zeros((nrows, ncols), dtype='float64')\n if self.matrix_form == 6 and apply_symmetry: # symmetric\n try:\n for (gcj, gci, reali) in zip(self.GCj, self.GCi, self.Real):\n i = rows[(gci[0], gci[1])]\n j = cols[(gcj[0], gcj[1])]\n dense_mat[i, j] = reali\n dense_mat[j, i] = reali\n except IndexError:\n msg = ('name=%s ndim=%s i=%s j=%s matrix_type=%s '\n 'is_polar=%s ncols=%s M.shape=%s\\n' % (\n self.name, ndim, i, j, self.matrix_type,\n self.is_polar, self.ncols, dense_mat.shape))\n msg += 'Rows:\\n'\n for i, row in enumerate(rows):\n msg += 'i=%s row=%s\\n' % (i, row)\n raise RuntimeError(msg)\n else:\n try:\n for (gcj, gci, reali) in zip(self.GCj, self.GCi, self.Real):\n i = rows[(gci[0], gci[1])]\n j = cols[(gcj[0], gcj[1])]\n dense_mat[i, j] = reali\n except KeyError:\n msg = ('name=%s ndim=%s gci=%s gcj=%s matrix_type=%s '\n 'is_polar=%s is_sparse=%s ncols=%s M.shape=%s\\n\\n' % (\n self.name, ndim, str(gci), str(gcj), self.matrix_type,\n self.is_polar, is_sparse, self.ncols, dense_mat.shape))\n\n gci2 = (gci[0], gci[1])\n gcj2 = (gcj[0], gcj[1])\n if gci2 in rows:\n msg += 'gci/row_key=%s found\\n' % str(gci2)\n else:\n msg += 'gci/row_key=%s not found\\n' % str(gci2)\n msg += 'Rows:\\n'\n for i, row in enumerate(rows):\n msg += ' i=%s row=%s\\n' % (i, row)\n\n if gcj2 in cols:\n msg += '\\ngcj/col_key=%s found\\n' % str(gcj2)\n else:\n msg += '\\ngcj/col_key=%s not found\\n' % str(gcj2)\n msg += 'Cols:\\n'\n for j, col in enumerate(cols):\n msg += ' j=%s row=%s\\n' % (j, col)\n\n msg += '\\n'\n print(msg)\n raise KeyError(msg)\n except IndexError:\n msg = ('name=%s ndim=%s i=%s j=%s matrix_type=%s '\n 'is_polar=%s is_sparse=%s ncols=%s M.shape=%s\\n' % (\n self.name, ndim, i, j, self.matrix_type,\n self.is_polar, is_sparse, self.ncols, dense_mat.shape))\n msg += 'Rows:\\n'\n for i, row in enumerate(rows):\n msg += ' i=%s row=%s\\n' % (i, row)\n\n msg += '\\nCols:\\n'\n for j, row in enumerate(cols):\n msg += ' j=%s row=%s\\n' % (j, col)\n raise RuntimeError(msg)\n return dense_mat\n\n\ndef _fill_dense_column_matrix(self, nrows, ncols, ndim, rows, cols, apply_symmetry):\n \"\"\"helper method for get_matrix\"\"\"\n is_sparse = False\n if self.is_complex:\n dense_mat = zeros((nrows, ncols), dtype='complex128')\n if self.matrix_form == 6 and apply_symmetry: # symmetric\n assert nrows == ncols, 'nrows=%s ncols=%s' % (nrows, ncols)\n for (gcj, gci, reali, complexi) in zip(self.GCj, self.GCi,\n self.Real, self.Complex):\n i = rows[gci]\n j = cols[gcj]\n dense_mat[i, j] = complex(reali, complexi)\n dense_mat[j, i] = complex(reali, complexi)\n elif self.matrix_form == 2: # rectangular\n assert nrows == ncols, 'nrows=%s ncols=%s' % (nrows, ncols)\n for (gcj, gci, reali, complexi) in zip(self.GCj, self.GCi,\n self.Real, self.Complex):\n i = rows[gci]\n j = cols[gcj]\n dense_mat[i, j] = complex(reali, complexi)\n else:\n for (gcj, gci, reali, complexi) in zip(self.GCj, self.GCi,\n self.Real, self.Complex):\n i = rows[gci]\n j = cols[gcj]\n else:\n #print('nrows=%s ncols=%s' % (nrows, ncols))\n dense_mat = zeros((nrows, ncols), dtype='float64')\n if self.matrix_form == 6 and apply_symmetry: # symmetric\n assert nrows == ncols, 'nrows=%s ncols=%s' % (nrows, ncols)\n for (gcj, gci, reali) in zip(self.GCj, self.GCi, self.Real):\n i = rows[gci]\n j = cols[gcj]\n dense_mat[i, j] = reali\n dense_mat[j, i] = reali\n else:\n try:\n for (gcj, gci, reali) in zip(self.GCj, self.GCi, self.Real):\n i = rows[gci]\n j = cols[gcj]\n dense_mat[i, j] = reali\n except IndexError:\n msg = ('name=%s ndim=%s i=%s j=%s matrix_type=%s '\n 'is_polar=%s is_sparse=%s ncols=%s M.shape=%s\\n' % (\n self.name, ndim, i, j, self.matrix_type,\n self.is_polar, is_sparse, self.ncols, dense_mat.shape))\n msg += 'Rows:\\n'\n for i, row in enumerate(rows):\n msg += ' i=%s row=%s\\n' % (i, row)\n raise RuntimeError(msg)\n return dense_mat\n\ndef get_dmi_matrix(matrix: DMI, is_sparse: bool=False,\n apply_symmetry: bool=True) -> Tuple[np.array, None, None]:\n \"\"\"\n Builds the Matrix\n\n Parameters\n ----------\n is_sparse : bool\n should the matrix be returned as a sparse matrix (default=True).\n Slower for dense matrices.\n apply_symmetry: bool\n If the matrix is symmetric (matrix_form=6), returns a symmetric matrix.\n Supported as there are symmetric matrix routines.\n TODO: unused...\n\n Returns\n -------\n M : ndarray\n the matrix\n rows : None\n unused\n cols : None\n unused\n\n .. warning:: is_sparse=True WILL fail\n\n \"\"\"\n ifo = matrix.ifo\n GCj = array(matrix.GCj, dtype='int32') - 1\n GCi = array(matrix.GCi, dtype='int32') - 1\n\n dtype = matrix.tin_dtype\n\n if matrix.is_complex:\n data = matrix.Real + matrix.Complex * 1j\n else:\n data = matrix.Real\n\n if ifo == 2:\n # rectangular\n nrows = matrix.nrows\n ncols = matrix.ncols\n\n M = coo_matrix((data, (GCi, GCj)),\n shape=(nrows, ncols), dtype=dtype)\n if not is_sparse:\n M = M.toarray()\n\n else:\n nrows = matrix.nrows\n ncols = matrix.ncols\n if ifo == 6:\n nrows = max(nrows, ncols)\n ncols = nrows\n M = coo_matrix((data, (GCi, GCj)),\n shape=(nrows, ncols), dtype=dtype)\n if not is_sparse:\n M = M.toarray()\n #else:\n #ifo : int\n # matrix shape\n # 4=Lower Triangular\n # 5=Upper Triangular\n # 6=Symmetric\n # 8=Identity (m=nRows, n=m)\n #raise RuntimeError(matrix.get_stats())\n return M, None, None\n\ndef get_matrix(self, is_sparse=False, apply_symmetry=True):\n \"\"\"\n Builds the Matrix\n\n Parameters\n ----------\n is_sparse : bool\n should the matrix be returned as a sparse matrix (default=True).\n Slower for dense matrices.\n apply_symmetry: bool\n If the matrix is symmetric (matrix_form=6), returns a symmetric matrix.\n Supported as there are symmetric matrix routines.\n TODO: unused...\n\n Returns\n -------\n M : ndarray\n the matrix\n rows : Dict[(nid, nid)] = float\n dictionary of keys=rowID, values=(Grid,Component) for the matrix\n cols : Dict[](int, int)] = float\n dictionary of keys=columnID, values=(Grid,Component) for the matrix\n\n .. warning:: is_sparse=True WILL fail\n\n \"\"\"\n nrows, ncols, ndim, rows, cols, rows_reversed, cols_reversed = get_row_col_map(\n self, self.GCi, self.GCj, self.matrix_form)\n #print('rows = ', rows)\n #print('cols = ', cols)\n #print('i=%s j=%s' % (i, j))\n #nrows = len(rows2)\n #ncols = len(cols2)\n\n #A = ss.lil_matrix((3,3), dtype='d') # double precision\n #rows = []\n #cols = []\n #data = []\n #for i in range(3):\n #for j in range(3):\n #k = float((i+1)*(j+1))\n #rows.append(i)\n #cols.append(j)\n #data.append(k)\n #A[i,j] = k\n\n #is_sparse = False\n if is_sparse:\n M = _fill_sparse_matrix(self, nrows, ncols)\n return M\n else:\n if ndim == 1:\n M = _fill_dense_column_matrix(self, nrows, ncols, ndim, rows, cols, apply_symmetry)\n else:\n M = _fill_dense_rectangular_matrix(self, nrows, ncols, ndim, rows, cols, apply_symmetry)\n\n #print(M)\n return (M, rows_reversed, cols_reversed)\n\n\ndef _export_dmig_to_hdf5(h5_file, model, dict_obj, encoding):\n \"\"\"export dmigs, dmij, dmiji, dmik, dmi\"\"\"\n for name, dmig in dict_obj.items():\n dmig_group = h5_file.create_group(name)\n dmig_group.create_dataset('tin', data=dmig.tin)\n\n if hasattr(dmig, 'tout'):\n dmig_group.create_dataset('tout', data=dmig.tout)\n\n if dmig.type == 'DMIG' and name == 'UACCEL':\n if dmig.ncol is not None:\n dmig_group.create_dataset('ncol', data=dmig.ncol)\n #load_seq_group = dmig_group.create_group('load_sequences')\n\n nids = []\n dofs = []\n values = []\n for lseq, ncx in sorted(dmig.load_sequences.items()):\n lseq_group = dmig_group.create_group(str(lseq))\n #list_fields += [lseq, None, None]\n for (nid, dof, value) in ncx:\n nids.append(nid)\n dofs.append(int(dof))\n values.append(value)\n\n #print('nids =', nids)\n #print('dofs =', dofs)\n #print('values =', values)\n lseq_group.create_dataset('nids', data=nids)\n lseq_group.create_dataset('dofs', data=dofs)\n lseq_group.create_dataset('values', data=values)\n else:\n if hasattr(dmig, 'nrows') and dmig.nrows is not None:\n dmig_group.create_dataset('nrows', data=dmig.nrows)\n if dmig.ncols is not None:\n dmig_group.create_dataset('ncols', data=dmig.ncols)\n if hasattr(dmig, 'polar'):\n dmig_group.create_dataset('polar', data=dmig.polar)\n\n dmig_group.create_dataset('matrix_form', data=dmig.matrix_form)\n dmig_group.create_dataset('tin_dtype', data=dmig.tin_dtype)\n dmig_group.create_dataset('tout_dtype', data=dmig.tout_dtype)\n\n dmig_group.create_dataset('matrix_type', data=dmig.matrix_type)\n dmig_group.create_dataset('is_complex', data=dmig.is_complex)\n dmig_group.create_dataset('is_real', data=dmig.is_real)\n dmig_group.create_dataset('is_polar', data=dmig.is_polar)\n\n dmig_group.create_dataset('GCi', data=dmig.GCi)\n dmig_group.create_dataset('GCj', data=dmig.GCj)\n dmig_group.create_dataset('Real', data=dmig.Real)\n if hasattr(dmig, 'Complex') and dmig.Complex is not None:\n dmig_group.create_dataset('Complex', data=dmig.Complex)\n\n\ndef _export_dmiax_to_hdf5(h5_file, model, dict_obj, encoding):\n \"\"\"export dmiax\"\"\"\n for name, dmiax in dict_obj.items():\n #print(f'exporting {dmiax.type} name={name!r}')\n dmiax_group = h5_file.create_group(name)\n dmiax_group.create_dataset('tin', data=dmiax.tin)\n\n if hasattr(dmiax, 'tout'):\n dmiax_group.create_dataset('tout', data=dmiax.tout)\n\n if hasattr(dmiax, 'nrows') and dmiax.nrows is not None:\n dmiax_group.create_dataset('nrows', data=dmiax.nrows)\n if dmiax.ncols is not None:\n dmiax_group.create_dataset('ncols', data=dmiax.ncols)\n if hasattr(dmiax, 'polar'):\n dmiax_group.create_dataset('polar', data=dmiax.polar)\n\n dmiax_group.create_dataset('matrix_form', data=dmiax.matrix_form)\n dmiax_group.create_dataset('tin_dtype', data=dmiax.tin_dtype)\n dmiax_group.create_dataset('tout_dtype', data=dmiax.tout_dtype)\n\n dmiax_group.create_dataset('matrix_type', data=dmiax.matrix_type)\n dmiax_group.create_dataset('is_complex', data=dmiax.is_complex)\n dmiax_group.create_dataset('is_real', data=dmiax.is_real)\n dmiax_group.create_dataset('is_polar', data=dmiax.is_polar)\n\n gcnj = []\n j_none_flags = []\n\n gcni = []\n i_none_flags = []\n for j, GCNj in enumerate(dmiax.GCNj):\n gj, cj, nj = GCNj\n is_none_flag_j = False\n if nj is None:\n nj = 0\n is_none_flag_j = True\n j_none_flags.append(is_none_flag_j)\n gcnj.append((gj, cj, nj))\n for unused_i, GCNi in enumerate(dmiax.GCNi[j]):\n gi, ci, ni = GCNi\n is_none_flag_i = False\n if ni is None:\n ni = 0\n is_none_flag_i = True\n i_none_flags.append(is_none_flag_i)\n gcni.append((gi, ci, ni, j))\n\n dmiax_group.create_dataset('GCNi_j', data=gcni)\n dmiax_group.create_dataset('GCNj', data=gcnj)\n dmiax_group.create_dataset('i_none_flags', data=i_none_flags)\n dmiax_group.create_dataset('j_none_flags', data=j_none_flags)\n\n dmiax_group.create_dataset('Real', data=dmiax.Real)\n if hasattr(dmiax, 'Complex') and dmiax.Complex is not None:\n dmiax_group.create_dataset('Complex', data=dmiax.Complex)\n\ndef _set_polar(polar):\n if polar in [None, 0, False]:\n polar = 0\n elif polar in [1, True]:\n polar = 1\n else:\n raise ValueError('polar=%r and must be 0 or 1' % polar)\n return polar\n\ndef _get_dtype(is_complex, type_flag):\n if type_flag == 1:\n dtype = 'float32'\n elif type_flag == 2:\n dtype = 'float64'\n elif type_flag == 3:\n dtype = 'complex64'\n elif type_flag == 4:\n dtype = 'complex128'\n elif type_flag == 0:\n if is_complex:\n dtype = 'complex128'\n else:\n dtype = 'float64'\n else:\n raise RuntimeError(\"invalid option for matrix format\")\n return dtype\n\n"
] | [
[
"numpy.hstack",
"numpy.unique"
],
[
"numpy.zeros",
"numpy.argsort",
"numpy.asarray",
"scipy.sparse.coo_matrix",
"numpy.array",
"numpy.where",
"numpy.unique"
]
] |
hengwei-chan/molecular_attention_transformer | [
"29193d4155df528e3a6a0c1e0da39111d0b8db93"
] | [
"soltrannet/__init__.py"
] | [
"from .predict import predict \nimport argparse\nimport sys, multiprocessing\nimport torch\n\ndef _parse_args():\n parser=argparse.ArgumentParser(description=\"Run SolTranNet aqueous solubility predictor\")\n parser.add_argument('input',nargs='?',type=argparse.FileType('r'),default=sys.stdin,help='PATH to the file containing the SMILES you wish to use. Assumes the content is 1 SMILE per line.')\n parser.add_argument('output',nargs='?',type=argparse.FileType('w'),default=sys.stdout,help='Name of the output file. Defaults to stdout.')\n parser.add_argument('--batchsize',default=32,type=int,help='Batch size for the data loader. Defaults to 32.')\n parser.add_argument('--cpus',default=multiprocessing.cpu_count(),type=int,help='Number of CPU cores to use for the data loader. Defaults to use all available cores. Pass 0 to only run on 1 CPU.')\n parser.add_argument('--cpu_predict',action='store_true',help='Flag to force the predictions to be made on only the CPU. Default behavior is to use GPU if available.')\n\n args=parser.parse_args()\n\n return args\n\ndef _run(args):\n\n smiles=[x.rstrip() for x in args.input]\n if args.cpu_predict:\n predictions=predict(smiles,batch_size=args.batchsize,num_workers=args.cpus,device=torch.device('cpu'))\n else:\n predictions=predict(smiles,batch_size=args.batchsize,num_workers=args.cpus)\n for pred, smi, warn in predictions:\n args.output.write(f'{smi},{pred:.3f},{warn}\\n')\n\n"
] | [
[
"torch.device"
]
] |
adibellathur/garage | [
"482a26a07d46091f878c41b582f1478588e397ff"
] | [
"src/garage/torch/algos/_utils.py"
] | [
"\"\"\"Utility functions used by PyTorch algorithms.\"\"\"\nimport torch\nimport torch.nn.functional as F\n\n\nclass _Default: # pylint: disable=too-few-public-methods\n \"\"\"A wrapper class to represent default arguments.\n\n Args:\n val (object): Argument value.\n\n \"\"\"\n\n def __init__(self, val):\n self.val = val\n\n\ndef make_optimizer(optimizer_type, module, **kwargs):\n \"\"\"Create an optimizer for PyTorch algos.\n\n Args:\n optimizer_type (Union[type, tuple[type, dict]]): Type of optimizer.\n This can be an optimizer type such as 'torch.optim.Adam' or a\n tuple of type and dictionary, where dictionary contains arguments\n to initialize the optimizer e.g. (torch.optim.Adam, {'lr' = 1e-3})\n module (torch.nn.Module): The module whose parameters needs to be\n optimized.\n kwargs (dict): Other keyword arguments to initialize optimizer. This\n is not used when `optimizer_type` is tuple.\n\n Returns:\n torch.optim.Optimizer: Constructed optimizer.\n\n Raises:\n ValueError: Raises value error when `optimizer_type` is tuple, and\n non-default argument is passed in `kwargs`.\n\n \"\"\"\n if isinstance(optimizer_type, tuple):\n opt_type, opt_args = optimizer_type\n for name, arg in kwargs.items():\n if not isinstance(arg, _Default):\n raise ValueError('Should not specify {} and explicit \\\n optimizer args at the same time'.format(name))\n return opt_type(module.parameters(), **opt_args)\n\n opt_args = {}\n for name, arg in kwargs.items():\n if isinstance(arg, _Default):\n opt_args[name] = arg.val\n else:\n opt_args[name] = arg\n return optimizer_type(module.parameters(), **opt_args)\n\n\ndef compute_advantages(discount, gae_lambda, max_path_length, baselines,\n rewards):\n \"\"\"Calculate advantages.\n\n Advantages are a discounted cumulative sum.\n\n Calculate advantages using a baseline (value function) according to\n Generalized Advantage Estimation (GAE)\n\n The discounted cumulative sum can be computed using conv2d with filter.\n filter:\n [1, (discount * gae_lambda), (discount * gae_lambda) ^ 2, ...]\n where the length is same with max_path_length.\n\n baselines and rewards are also has same shape.\n baselines:\n [ [b_11, b_12, b_13, ... b_1n],\n [b_21, b_22, b_23, ... b_2n],\n ...\n [b_m1, b_m2, b_m3, ... b_mn] ]\n rewards:\n [ [r_11, r_12, r_13, ... r_1n],\n [r_21, r_22, r_23, ... r_2n],\n ...\n [r_m1, r_m2, r_m3, ... r_mn] ]\n\n Args:\n discount (float): RL discount factor (i.e. gamma).\n gae_lambda (float): Lambda, as used for Generalized Advantage\n Estimation (GAE).\n max_path_length (int): Maximum length of a single rollout.\n baselines (torch.Tensor): A 2D vector of value function estimates with\n shape (N, T), where N is the batch dimension (number of episodes)\n and T is the maximum path length experienced by the agent. If an\n episode terminates in fewer than T time steps, the remaining\n elements in that episode should be set to 0.\n rewards (torch.Tensor): A 2D vector of per-step rewards with shape\n (N, T), where N is the batch dimension (number of episodes) and T\n is the maximum path length experienced by the agent. If an episode\n terminates in fewer than T time steps, the remaining elements in\n that episode should be set to 0.\n\n Returns:\n torch.Tensor: A 2D vector of calculated advantage values with shape\n (N, T), where N is the batch dimension (number of episodes) and T\n is the maximum path length experienced by the agent. If an episode\n terminates in fewer than T time steps, the remaining values in that\n episode should be set to 0.\n\n \"\"\"\n adv_filter = torch.full((1, 1, 1, max_path_length - 1),\n discount * gae_lambda)\n adv_filter = torch.cumprod(F.pad(adv_filter, (1, 0), value=1), dim=-1)\n\n deltas = (rewards + discount * F.pad(baselines, (0, 1))[:, 1:] - baselines)\n deltas = F.pad(deltas, (0, max_path_length - 1)).unsqueeze(0).unsqueeze(0)\n\n advantages = F.conv2d(deltas, adv_filter, stride=1).squeeze()\n return advantages\n\n\ndef pad_to_last(nums, total_length, axis=-1, val=0):\n \"\"\"Pad val to last in nums in given axis.\n\n length of the result in given axis should be total_length.\n\n Raises:\n IndexError: If the input axis value is out of range of the nums array\n\n Args:\n nums (numpy.ndarray): The array to pad.\n total_length (int): The final width of the Array.\n axis (int): Axis along which a sum is performed.\n val (int): The value to set the padded value.\n\n Returns:\n torch.Tensor: Padded array\n\n \"\"\"\n tensor = torch.Tensor(nums)\n axis = (axis + len(tensor.shape)) if axis < 0 else axis\n\n if len(tensor.shape) <= axis:\n raise IndexError('axis {} is out of range {}'.format(\n axis, tensor.shape))\n\n padding_config = [0, 0] * len(tensor.shape)\n padding_idx = abs(axis - len(tensor.shape)) * 2 - 1\n padding_config[padding_idx] = max(total_length - tensor.shape[axis], val)\n return F.pad(tensor, padding_config)\n\n\ndef filter_valids(tensor, valids):\n \"\"\"Filter out tensor using valids (last index of valid tensors).\n\n valids contains last indices of each rows.\n\n Args:\n tensor (torch.Tensor): The tensor to filter\n valids (list[int]): Array of length of the valid values\n\n Returns:\n torch.Tensor: Filtered Tensor\n\n \"\"\"\n return [tensor[i][:valids[i]] for i in range(len(valids))]\n"
] | [
[
"torch.nn.functional.conv2d",
"torch.nn.functional.pad",
"torch.full",
"torch.Tensor"
]
] |
amuamushu/wavedata | [
"1745c646ff3a76b38a81c439a0edd900c986c9f7"
] | [
"wavedata/tools/core/voxel_grid_2d.py"
] | [
"import numpy as np\n\nfrom wavedata.wavedata.tools.core import geometry_utils\n\n\nclass VoxelGrid2D(object):\n \"\"\"\n Voxel grids represent occupancy info. The voxelize_2d method projects a point cloud\n onto a plane, while saving height and point density information for each voxel.\n \"\"\"\n\n # Class Constants\n VOXEL_EMPTY = -1\n VOXEL_FILLED = 0\n\n def __init__(self):\n\n # Quantization size of the voxel grid\n self.voxel_size = 0.0\n\n # Voxels at the most negative/positive xyz\n self.min_voxel_coord = np.array([])\n self.max_voxel_coord = np.array([])\n\n # Size of the voxel grid along each axis\n self.num_divisions = np.array([0, 0, 0])\n\n # Points in sorted order, to match the order of the voxels\n self.points = []\n\n # Indices of filled voxels\n self.voxel_indices = []\n\n # Max point height in projected voxel\n self.heights = []\n\n # Number of points corresponding to projected voxel\n self.num_pts_in_voxel = []\n\n # Full occupancy grid, VOXEL_EMPTY or VOXEL_FILLED\n self.leaf_layout_2d = []\n\n def voxelize_2d(self, pts, voxel_size, extents=None,\n ground_plane=None, create_leaf_layout=True):\n \"\"\"Voxelizes the point cloud into a 2D voxel grid by\n projecting it down into a flat plane, and stores the maximum\n point height, and number of points corresponding to the voxel\n\n :param pts: Point cloud as N x [x, y, z]\n :param voxel_size: Quantization size for the grid\n :param extents: Optional, specifies the full extents of the point cloud.\n Used for creating same sized voxel grids.\n :param ground_plane: Plane coefficients (a, b, c, d), xz plane used if\n not specified\n :param create_leaf_layout: Set this to False to create an empty\n leaf_layout, which will save computation\n time.\n \"\"\"\n # Check if points are 3D, otherwise early exit\n if pts.shape[1] != 3:\n raise ValueError(\"Points have the wrong shape: {}\".format(\n pts.shape))\n\n self.voxel_size = voxel_size\n\n # Discretize voxel coordinates to given quantization size\n discrete_pts = np.floor(pts / voxel_size).astype(np.int32)\n\n # Use Lex Sort, sort by x, then z, then y (\n x_col = discrete_pts[:, 0]\n y_col = discrete_pts[:, 1]\n z_col = discrete_pts[:, 2]\n sorted_order = np.lexsort((y_col, z_col, x_col))\n\n # Save original points in sorted order\n self.points = pts[sorted_order]\n\n # Save discrete points in sorted order\n discrete_pts = discrete_pts[sorted_order]\n\n # Project all points to a 2D plane\n discrete_pts_2d = discrete_pts.copy()\n discrete_pts_2d[:, 1] = 0\n\n # Format the array to c-contiguous array for unique function\n contiguous_array = np.ascontiguousarray(discrete_pts_2d).view(\n np.dtype((np.void, discrete_pts_2d.dtype.itemsize *\n discrete_pts_2d.shape[1])))\n\n # The new coordinates are the discretized array with its unique indexes\n _, unique_indices = np.unique(contiguous_array, return_index=True)\n\n # Sort unique indices to preserve order\n unique_indices.sort()\n\n voxel_coords = discrete_pts_2d[unique_indices]\n\n # Number of points per voxel, last voxel calculated separately\n num_points_in_voxel = np.diff(unique_indices)\n num_points_in_voxel = np.append(num_points_in_voxel,\n discrete_pts_2d.shape[0] -\n unique_indices[-1])\n\n if ground_plane is None:\n # Use first point in voxel as highest point\n height_in_voxel = self.points[unique_indices, 1]\n else:\n # Ground plane provided\n height_in_voxel = geometry_utils.dist_to_plane(\n ground_plane, self.points[unique_indices])\n\n # Set the height and number of points for each voxel\n self.heights = height_in_voxel\n self.num_pts_in_voxel = num_points_in_voxel\n\n # Find the minimum and maximum voxel coordinates\n if extents is not None:\n # Check provided extents\n extents_transpose = np.array(extents).transpose()\n if extents_transpose.shape != (2, 3):\n raise ValueError(\"Extents are the wrong shape {}\".format(\n extents.shape))\n\n # Set voxel grid extents\n self.min_voxel_coord = np.floor(extents_transpose[0] / voxel_size)\n self.max_voxel_coord = \\\n np.ceil((extents_transpose[1] / voxel_size) - 1)\n\n self.min_voxel_coord[1] = 0\n self.max_voxel_coord[1] = 0\n\n # Check that points are bounded by new extents\n if not (self.min_voxel_coord <= np.amin(voxel_coords,\n axis=0)).all():\n raise ValueError(\"Extents are smaller than min_voxel_coord\")\n if not (self.max_voxel_coord >= np.amax(voxel_coords,\n axis=0)).all():\n raise ValueError(\"Extents are smaller than max_voxel_coord\")\n\n else:\n # Automatically calculate extents\n self.min_voxel_coord = np.amin(voxel_coords, axis=0)\n self.max_voxel_coord = np.amax(voxel_coords, axis=0)\n\n # Get the voxel grid dimensions\n self.num_divisions = ((self.max_voxel_coord - self.min_voxel_coord)\n + 1).astype(np.int32)\n\n # Bring the min voxel to the origin\n self.voxel_indices = (voxel_coords - self.min_voxel_coord).astype(int)\n\n if create_leaf_layout:\n # Create Voxel Object with -1 as empty/occluded, 0 as occupied\n self.leaf_layout_2d = self.VOXEL_EMPTY * \\\n np.ones(self.num_divisions.astype(int))\n\n # Fill out the leaf layout\n self.leaf_layout_2d[self.voxel_indices[:, 0], 0,\n self.voxel_indices[:, 2]] = \\\n self.VOXEL_FILLED\n\n def map_to_index(self, map_index):\n \"\"\"Converts map coordinate values to 1-based discretized grid index\n coordinate. Note: Any values outside the extent of the grid will be\n forced to be the maximum grid coordinate.\n\n :param map_index: N x 2 points\n\n :return: N x length(dim) (grid coordinate)\n [] if min_voxel_coord or voxel_size or grid_index or dim is not set\n \"\"\"\n if self.voxel_size == 0 \\\n or len(self.min_voxel_coord) == 0 \\\n or len(map_index) == 0:\n return []\n\n num_divisions_2d = self.num_divisions[[0, 2]]\n min_voxel_coord_2d = self.min_voxel_coord[[0, 2]]\n\n # Truncate index (same as np.floor for positive values) and clip\n # to valid voxel index range\n indices = np.int32(map_index / self.voxel_size) - min_voxel_coord_2d\n indices[:, 0] = np.clip(indices[:, 0], 0, num_divisions_2d[0])\n indices[:, 1] = np.clip(indices[:, 1], 0, num_divisions_2d[1])\n\n return indices\n"
] | [
[
"numpy.append",
"numpy.ceil",
"numpy.diff",
"numpy.dtype",
"numpy.floor",
"numpy.lexsort",
"numpy.amax",
"numpy.amin",
"numpy.clip",
"numpy.int32",
"numpy.array",
"numpy.ascontiguousarray",
"numpy.unique"
]
] |
zmcx16/ReclassifyAnimeCG | [
"f5f95b229447564502564d9ffc7edf6215fec83d"
] | [
"src/data/dataset.py"
] | [
"import torch\nfrom torch.utils.data import Dataset, DataLoader\nimport numpy as np\nfrom PIL import Image\nImage.MAX_IMAGE_PIXELS = None\n\nfrom data import get_train_transform, get_test_transform\n\n\nclass CustomDataset(Dataset):\n img_aug = True\n imgs = []\n transform = None\n\n def __init__(self, label_file, image_set, input_size):\n with open(label_file, 'r', encoding=\"utf-8\") as f:\n self.imgs = list(map(lambda line: line.strip().split('|'), f))\n\n if image_set == 'train':\n self.transform = get_train_transform(size=input_size)\n else:\n self.transform = get_test_transform(size=input_size)\n self.input_size = input_size\n\n def __getitem__(self, index):\n # print(self.imgs)\n # print(index)\n # print(len(self.imgs[index]))\n img_path, label = self.imgs[index]\n # print(img_path)\n img = Image.open(img_path).convert('RGB')\n if self.img_aug:\n img = self.transform(img)\n else:\n img = np.array(img)\n img = torch.from_numpy(img)\n\n return img, torch.from_numpy(np.array(int(label)))\n \n def __len__(self):\n return len(self.imgs)\n\n\ndef get_datasets_and_dataloader(label_path, image_set, batch_size, input_size):\n _dataset = CustomDataset(label_path, image_set=image_set, input_size=input_size)\n _dataloader = DataLoader(_dataset, batch_size=batch_size, shuffle=True, num_workers=2)\n return _dataset, _dataloader\n"
] | [
[
"numpy.array",
"torch.utils.data.DataLoader",
"torch.from_numpy"
]
] |
stormymcstorm/condensa | [
"c7321e0a362f73eca9349769b341a7dd688ee1b9"
] | [
"test/schemes/test_qz.py"
] | [
"# Copyright 2019 NVIDIA Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport torch\n\nimport condensa\nfrom condensa import schemes\n\ndef test_float16(device):\n scheme = schemes.Quantize(condensa.float16)\n fc = torch.nn.Linear(100, 10).float().to(device)\n\n scheme.pi(fc)\n assert fc.weight.dtype == torch.float16\n scheme.delta(fc)\n assert fc.weight.dtype == torch.float32\n\nif __name__ == '__main__':\n test_float16('cpu')\n if torch.cuda.is_available():\n test_float16('cpu')\n"
] | [
[
"torch.cuda.is_available",
"torch.nn.Linear"
]
] |
mathischeap/mifem | [
"3242e253fb01ca205a76568eaac7bbdb99e3f059"
] | [
"objects/CSCG/_3d/forms/standard/base/export/field.py"
] | [
"\"\"\"We want to export the field to some data files.\n\"\"\"\n\nfrom root.config.main import *\nfrom screws.freeze.main import FrozenOnly\nfrom screws.miscellaneous.timer import check_filename, check_no_splcharacter\nfrom scipy.io import savemat\n\n\n\nclass _3dCSC_SF_Export_Field(FrozenOnly):\n \"\"\"\"\"\"\n\n def __init__(self, sf):\n \"\"\"\"\"\"\n assert '3dCSCG_standard_form' in sf.standard_properties.tags\n self._sf_ = sf\n self._freeze_self_()\n\n\n def to_file(self, filename, numOfSamples=1e6, regions=None):\n \"\"\"\"\"\"\n filename, extension = check_filename(filename)\n if extension is None: extension = 'txt'\n\n supported_formats = ('txt', 'mat')\n assert extension in supported_formats, \\\n f\"format={extension} is not among the supported formats {supported_formats}.\"\n\n if isinstance(numOfSamples, (int, float)):\n assert numOfSamples > 0, f\"numOfSamples={numOfSamples} is wrong.\"\n numOfSamples = [numOfSamples, numOfSamples, numOfSamples]\n else:\n assert isinstance(numOfSamples, (tuple, list)) and len(numOfSamples) == 3, \\\n f\"numOfSamples={numOfSamples} wrong.\"\n for nos in numOfSamples:\n assert isinstance(nos, (int, float)) and nos > 0, f\"numOfSamples={numOfSamples} wrong.\"\n\n mesh = self._sf_.mesh\n\n if regions is None:\n regions = mesh.domain.regions.names\n elif isinstance(regions, str):\n regions = [regions,]\n else:\n pass\n assert isinstance(regions, (list, tuple)), f\"regions={regions} is wrong.\"\n assert len(set(regions)) == len(regions), f\"regions={regions} has repeated regions.\"\n for i, r in enumerate(regions):\n assert r in mesh.domain.regions, f\"regions[{i}]={r} is wrong.\"\n\n rst = list()\n for i in range(3):\n density = int((numOfSamples[i] / mesh.elements.GLOBAL_num) ** (1/3)) + 1\n interval = 2 / density\n rst.append(np.linspace(-1 + interval/2, 1-interval/2, density))\n\n xyz, v = self._sf_.reconstruct(*rst, regions=regions)\n\n # Now, we gather xyz & v from all cores into Master Core, store in XYZ & V --- BELOW ---\n if rAnk == mAster_rank:\n X = [None for _ in range(mesh.elements.GLOBAL_num)]\n Y = [None for _ in range(mesh.elements.GLOBAL_num)]\n Z = [None for _ in range(mesh.elements.GLOBAL_num)]\n Vx = [None for _ in range(mesh.elements.GLOBAL_num)]\n if self._sf_.k in (1, 2):\n Vy = [None for _ in range(mesh.elements.GLOBAL_num)]\n Vz = [None for _ in range(mesh.elements.GLOBAL_num)]\n for j in mesh.elements.indices:\n X[j] = xyz[j][0]\n Y[j] = xyz[j][1]\n Z[j] = xyz[j][2]\n Vx[j] = v[j][0]\n if self._sf_.k in (1, 2):\n # noinspection PyUnboundLocalVariable\n Vy[j] = v[j][1]\n # noinspection PyUnboundLocalVariable\n Vz[j] = v[j][2]\n for i in sLave_ranks:\n xyz, v = cOmm.recv(source=i, tag=0)\n for j in xyz:\n X[j] = xyz[j][0]\n Y[j] = xyz[j][1]\n Z[j] = xyz[j][2]\n Vx[j] = v[j][0]\n if self._sf_.k in (1, 2):\n Vy[j] = v[j][1]\n Vz[j] = v[j][2]\n del xyz, v\n else:\n cOmm.send([xyz, v], dest=mAster_rank, tag=0)\n del xyz, v\n\n # Now, we reshape the XYZ and V for export in the master core. -------- BELOW ----------\n if rAnk == mAster_rank:\n if self._sf_.k in (1, 2):\n # noinspection PyUnboundLocalVariable\n X, Y, Z, Vx, Vy, Vz = mesh.do.regionwsie_stack(X, Y, Z, Vx, Vy, Vz)\n else:\n # noinspection PyUnboundLocalVariable\n X, Y, Z, V = mesh.do.regionwsie_stack(X, Y, Z, Vx)\n\n for rn in regions:\n assert rn in X and rn in Y and rn in Z, \"Data not full!\"\n\n x, y, z = X[rn], Y[rn], Z[rn]\n if self._sf_.k in (1, 2):\n vx, vy, vz = Vx[rn], Vy[rn], Vz[rn]\n else:\n # noinspection PyUnboundLocalVariable\n vx = V[rn]\n\n # we take care of the file names ------------------ BELOW -----------------------\n RN = rn[2:] # if regions name is R:center, we select\n assert check_no_splcharacter(RN), f\"region name={RN} wrong.\"\n\n FILE_NAME = filename + '__InRegion_' + RN\n if self._sf_.k in (1, 2):\n FILE_NAME += '__x_y_z_vx_vy_vz'\n else:\n FILE_NAME += '__x_y_z_v'\n FILE_NAME = FILE_NAME + '.' + extension\n\n\n # It's time to do the save or writing ------------------- BELOW -----------------\n\n if extension == 'txt':\n # for .txt, we have to flat the data =====================\n x = x.ravel(order='F')[:,np.newaxis]\n y = y.ravel(order='F')[:,np.newaxis]\n z = z.ravel(order='F')[:,np.newaxis]\n if self._sf_.k in (1, 2):\n vx = vx.ravel(order='F')[:,np.newaxis]\n # noinspection PyUnboundLocalVariable\n vy = vy.ravel(order='F')[:,np.newaxis]\n # noinspection PyUnboundLocalVariable\n vz = vz.ravel(order='F')[:,np.newaxis]\n else:\n vx = vx.ravel(order='F')[:,np.newaxis]\n if self._sf_.k in (1, 2):\n # noinspection PyUnboundLocalVariable\n TO_BE_WRITTEN = np.hstack((x, y, z, vx, vy, vz))\n else:\n TO_BE_WRITTEN = np.hstack((x, y, z, vx))\n # noinspection PyTypeChecker\n np.savetxt(FILE_NAME, TO_BE_WRITTEN)\n\n elif extension == 'mat':\n # for .mat, we save 3-d arrays. ==========================\n m_dic = dict()\n m_dic['x'] = x\n m_dic['y'] = y\n m_dic['z'] = z\n if self._sf_.k in (1, 2):\n m_dic['vx'] = vx\n m_dic['vy'] = vy\n m_dic['vz'] = vz\n else:\n m_dic['v'] = vx\n\n savemat(FILE_NAME, m_dic)\n\n else:\n raise Exception(f\"Format=.{extension} is not supported.\")"
] | [
[
"scipy.io.savemat"
]
] |
LongKt7/Face_Recognize_Pytorch | [
"baa02e633d379abe1001c8b8acb942617177329c",
"baa02e633d379abe1001c8b8acb942617177329c"
] | [
"config.py",
"Face_Alignt/predict_m.py"
] | [
"from easydict import EasyDict as edict\n# from pathlib import Path\nimport torch\nimport os\nfrom torchvision import transforms as trans\nfrom utils.constants import *\nlist_model = ['wget https://www.dropbox.com/s/akktsgxp0n8cwn2/model_mobilefacenet.pth?dl=0 -O model_mobilefacenet.pth',\n'wget https://www.dropbox.com/s/kzo52d9neybjxsb/model_ir_se50.pth?dl=0 -O model_ir_se50.pth',\n'wget https://www.dropbox.com/s/rxavczg9dlxy3a8/model_ir50.pth?dl=0 -O model_ir50.pth']\ndef get_config(mode = 'app', net_size = 'large', net_mode = 'ir_se', use_mtcnn = 1, threshold = 1.25):\n conf = edict()\n conf.device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n conf.input_size = [112, 112]\n conf.face_limit = 5 \n conf.min_face_size = 30\n conf.mode = mode\n conf.net_size = net_size\n if mode =='app':\n assert net_size in ['mobi', 'large', None], 'net_size should be mobi or large, please change in cogfig.py'\n conf.use_tensor = True\n conf.work_path = WORK_PATH\n conf.model_path = '%s/models'%WORK_PATH\n conf.log_path = '%s/log'%WORK_PATH\n conf.save_path = '%s/save'%WORK_PATH\n conf.facebank_path = '%s/Face_bank'%WORK_PATH\n conf.threshold = threshold\n if use_mtcnn:\n conf.use_mtcnn = True\n else:\n conf.use_mtcnn = False\n #when inference, at maximum detect 10 faces in one image, my laptop is slow\n conf.test_transform = trans.Compose([\n trans.ToTensor(),\n trans.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])\n ])\n if net_size == 'large':\n conf.use_mobilfacenet = False\n if net_mode == 'ir_se':\n conf.net_mode = 'ir_se' # or 'ir'\n conf.weight_path = '%s/weights/model_ir_se50.pth'%WORK_PATH\n conf.url = list_model[1]\n else:\n conf.net_mode = 'ir' # or 'ir'\n conf.weight_path = '%s/weights/model_ir50.pth'%WORK_PATH\n conf.url = list_model[2]\n if net_size =='mobi':\n conf.use_mobilfacenet = True\n conf.weight_path = '%s/weights/model_mobilefacenet.pth'%WORK_PATH\n conf.url = list_model[0]\n conf.video_source = 0\n\n if mode =='training_eval':\n conf.lr = 1e-3\n conf.milestones = [18,30,42]\n conf.momentum = 0.9\n conf.pin_memory = True\n# conf.num_workers = 4 # when batchsize is 200\n conf.num_workers = 3\n conf.train_root = \"/mnt/01D4A1D481139570/Dataset/Face/casia\"\n conf.file_list = '/mnt/01D4A1D481139570/Dataset/Face/casia_train.txt' \n conf.batch_size = 4\n conf.lfw_root = '/mnt/01D4A1D481139570/Dataset/Face/data/LFW/lfw_align_112'\n conf.lfw_file_list = '/mnt/01D4A1D481139570/Dataset/Face/data/LFW/pairs.txt'\n conf.agedb_root = '/mnt/01D4A1D481139570/Dataset/Face/data/AgeDB-30/agedb30_align_112'\n conf.agedb_file_list = '/mnt/01D4A1D481139570/Dataset/Face/data/AgeDB-30/agedb_30_pair.txt'\n conf.cfp_root = '/mnt/01D4A1D481139570/Dataset/Face/data/CFP-FP/CFP_FP_aligned_112'\n conf.cfp_file_list = '/mnt/01D4A1D481139570/Dataset/Face/data/CFP-FP/cfp_fp_pair.txt'\n return conf",
"from network import PNet,ONet\r\nimport torch,cv2,itertools\r\nfrom torch.autograd import Variable\r\nimport torch.nn.functional as F\r\nimport numpy as np\r\nimport time\r\nfrom matlab_cp2tform import get_similarity_transform_for_cv2\r\n\r\nimport math\r\ndef alignment(src_img,src_pts, crop_size = (112, 112)):\r\n ref_pts = np.array([ [30.2946, 51.6963],\r\n [65.5318, 51.5014],\r\n [48.0252, 71.7366],\r\n [33.5493, 92.3655],\r\n [62.7299, 92.2041] ])\r\n \r\n if crop_size[1]==112:\r\n ref_pts[:,0] += 8.0\r\n src_pts = np.array(src_pts).reshape(5,2)\r\n \r\n s = np.array(src_pts).astype(np.float32)\r\n r = np.array(ref_pts).astype(np.float32)\r\n\r\n tfm = get_similarity_transform_for_cv2(s, r)\r\n face_img = cv2.warpAffine(src_img, tfm, crop_size)\r\n return face_img\r\ndef resize_square(img, height=128, color=(0, 0, 0)): # resize a rectangular image to a padded square\r\n shape = img.shape[:2] # shape = [height, width]\r\n ratio = float(height) / max(shape) # ratio = old / new\r\n new_shape = [round(shape[0] * ratio), round(shape[1] * ratio)]\r\n dw = height - new_shape[1] # width padding\r\n dh = height - new_shape[0] # height padding\r\n top, bottom = dh // 2, dh - (dh // 2)\r\n left, right = dw // 2, dw - (dw // 2)\r\n img = cv2.resize(img, (new_shape[1], new_shape[0]), interpolation=cv2.INTER_AREA) # resized, no border\r\n return cv2.copyMakeBorder(img, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color), ratio, dw // 2, dh // 2\r\n\r\ndef dotproduct(v1, v2):\r\n return sum((a*b) for a, b in zip(v1, v2))\r\n\r\ndef length(v):\r\n return math.sqrt(dotproduct(v, v))\r\n\r\ndef angle(v1, v2):\r\n return math.acos(dotproduct(v1, v2) / (length(v1) * length(v2)))\r\ndef get_anchors(scale=64):\r\n '''\r\n compute anchors\r\n return:\r\n u_boxes:tensor([anchor_num,4]) (cx,cy,w,h): real anchors\r\n boxes:tensor([anchor_num,4]) (x1,y1,x2,y2): crop box for ONet,each with size 80\r\n '''\r\n sizes = [float(s) / scale for s in [32]]\r\n \r\n aspect_ratios = [(1.,)]\r\n feature_map_sizes = [int(scale/16)]\r\n \r\n num_layers = len(feature_map_sizes)\r\n u_boxes,boxes = [],[]\r\n for i in range(num_layers):\r\n fmsize = feature_map_sizes[i]\r\n for h,w in itertools.product(range(fmsize),repeat=2):\r\n cx = float(w)/feature_map_sizes[i]\r\n cy = float(h)/feature_map_sizes[i]\r\n \r\n s = sizes[i]\r\n for j,ar in enumerate(aspect_ratios[i]):\r\n u_boxes.append((cx,cy,float(s)*ar,float(s)*ar))\r\n boxes.append((w*16-32,h*16-32,w*16+32,h*16+32)) \r\n return torch.Tensor(u_boxes),torch.Tensor(boxes).long()\r\n\r\ndef nms(bboxes,scores,threshold=0.35):\r\n '''\r\n bboxes(tensor) [N,4]\r\n scores(tensor) [N,]\r\n '''\r\n x1 = bboxes[:,0]\r\n y1 = bboxes[:,1]\r\n x2 = bboxes[:,2]\r\n y2 = bboxes[:,3]\r\n areas = (x2-x1) * (y2-y1)\r\n\r\n _,order = scores.sort(0,descending=True)\r\n keep = []\r\n while order.numel() > 0:\r\n if order.numel() == 1:\r\n i = order.item()\r\n else:\r\n i = order[0].item()\r\n keep.append(i) \r\n\r\n if order.numel() == 1:\r\n break \r\n\r\n xx1 = x1[order[1:]].clamp(min=x1[i]) \r\n yy1 = y1[order[1:]].clamp(min=y1[i])\r\n xx2 = x2[order[1:]].clamp(max=x2[i])\r\n yy2 = y2[order[1:]].clamp(max=y2[i])\r\n\r\n w = (xx2-xx1).clamp(min=0)\r\n h = (yy2-yy1).clamp(min=0)\r\n inter = w*h\r\n\r\n ovr = inter / (areas[i] + areas[order[1:]] - inter) \r\n ids = (ovr<=threshold).nonzero().squeeze()\r\n if ids.numel() == 0:\r\n break\r\n order = order[ids+1] \r\n return torch.LongTensor(keep)\r\n \r\ndef decode_box(loc, size=64):\r\n variances = [0.1,0.2]\r\n anchor,crop = get_anchors(scale=size)\r\n cxcy = loc[:,:2] * variances[0] * anchor[:,2:] + anchor[:,:2]\r\n wh = torch.exp(loc[:,2:] * variances[1]) * anchor[:,2:]\r\n boxes = torch.cat([cxcy-wh/2,cxcy+wh/2],1)\r\n \r\n return boxes,anchor,crop\r\n \r\ndef decode_ldmk(ldmk,anchor):\r\n variances = [0.1,0.2]\r\n index_x = torch.Tensor([0,2,4,6,8]).long()\r\n index_y = torch.Tensor([1,3,5,7,9]).long()\r\n ldmk[:,index_x] = ldmk[:,index_x] * variances[0] * anchor[:,2].view(-1,1) + anchor[:,0].view(-1,1)\r\n ldmk[:,index_y] = ldmk[:,index_y] * variances[0] * anchor[:,3].view(-1,1) + anchor[:,1].view(-1,1)\r\n return ldmk\r\n \r\nimport os\r\n# list_per = []\r\ndef detect(file, pic = None):\r\n def change(boxes,ldmks, h, w, pad1):\r\n index_x = torch.LongTensor([0,2,4,6,8])\r\n index_y = torch.LongTensor([1,3,5,7,9])\r\n if h <= w:\r\n boxes[:,1] = boxes[:,1]*w-pad1\r\n boxes[:,3] = boxes[:,3]*w-pad1\r\n boxes[:,0] = boxes[:,0]*w\r\n boxes[:,2] = boxes[:,2]*w \r\n ldmks[:,index_x] = ldmks[:,index_x] * w\r\n ldmks[:,index_y] = ldmks[:,index_y] * w - torch.Tensor([pad1])\r\n else:\r\n boxes[:,1] = boxes[:,1]*h\r\n boxes[:,3] = boxes[:,3]*h\r\n boxes[:,0] = boxes[:,0]*h-pad1\r\n boxes[:,2] = boxes[:,2]*h-pad1\r\n ldmks[:,index_x] = ldmks[:,index_x] * h - torch.Tensor([pad1])\r\n ldmks[:,index_y] = ldmks[:,index_y] * h \r\n return boxes,ldmks\r\n if not isinstance(file, np.ndarray):\r\n im = cv2.imread(file)\r\n else:\r\n im = file\r\n if im is None:\r\n print(\"can not open image:\", file)\r\n return\r\n\r\n # pad img to square\r\n h, w,_ = im.shape\r\n\r\n dim_diff = np.abs(h - w)\r\n pad1, pad2 = dim_diff //2, dim_diff - dim_diff // 2\r\n pad = ((pad1,pad2),(0,0),(0,0)) if h<=w else ((0,0),(pad1, pad2),(0,0))\r\n img = np.pad(im, pad,'constant', constant_values=128)\r\n \r\n #get img_pyramid\r\n img_scale, img_size = 0,int((img.shape[0]-1)/32)\r\n while img_size > 0:\r\n img_scale += 1\r\n img_size /= 2\r\n if img_scale == 6:\r\n break\r\n img_size = 64\r\n img_pyramid = []\r\n t_boxes,t_probs, t_anchors, t_crops, t_which = None, None, None, None, None\r\n \r\n for scale in range(4):\r\n # print('scale:{0} img_size:{1}'.format(scale, img_size))\r\n input_img = cv2.resize(img,(img_size, img_size))\r\n img_pyramid.append(input_img.transpose(2,0,1))\r\n im_tensor = torch.from_numpy(input_img.transpose(2,0,1)).float()\r\n if use_gpu:\r\n im_tensor = im_tensor.cuda()\r\n #get conf and loc(box)\r\n if use_gpu:\r\n torch.cuda.synchronize()\r\n loc,conf = pnet(torch.unsqueeze(im_tensor,0))\r\n if use_gpu:\r\n torch.cuda.synchronize()\r\n \r\n # print('forward time:{}s'.format(e_t-s_t)) \r\n loc, conf = loc.detach().cpu(),conf.detach().cpu()\r\n loc, conf = loc.data.squeeze(0),F.softmax(conf.squeeze(0))\r\n boxes, anchor, crop = decode_box(loc,size=img_size)\r\n which_img = torch.tensor([scale]).long().expand((crop.shape[0],))\r\n \r\n #add box into stack\r\n if scale == 0:\r\n t_boxes, t_confs, t_anchors, t_crops, t_which = boxes, conf, anchor, crop, which_img\r\n else:\r\n t_boxes = torch.cat((t_boxes, boxes),0)\r\n t_confs = torch.cat((t_confs, conf),0)\r\n t_anchors = torch.cat((t_anchors, anchor),0)\r\n t_crops = torch.cat((t_crops, crop),0)\r\n t_which = torch.cat((t_which, which_img),0)\r\n img_size *= 2\r\n\r\n #get right boxes and nms\r\n t_confs[:,0] = 0.6\r\n max_conf, labels = t_confs.max(1)\r\n if labels.long().sum().item() is 0:\r\n return None\r\n ids = labels.nonzero().squeeze(1)\r\n t_boxes, t_confs, t_anchors, t_crops, t_which = t_boxes[ids], t_confs[ids], t_anchors[ids], t_crops[ids], t_which[ids]\r\n max_conf = max_conf[ids]\r\n \r\n keep = nms(t_boxes, max_conf)\r\n t_boxes, max_conf, t_anchors, t_crops, t_which = t_boxes[keep], max_conf[keep], t_anchors[keep], t_crops[keep], t_which[keep]\r\n\r\n t_boxes = t_boxes.detach().numpy()\r\n max_conf = max_conf.detach().numpy()\r\n \r\n #get crop and ldmks\r\n crop_imgs = []\r\n for i in range(t_boxes.shape[0]):\r\n img = img_pyramid[t_which[i]]\r\n crop = t_crops[i].numpy()\r\n _,h_,w_ = img.shape\r\n o_x1,o_y1,o_x2,o_y2 = max(crop[0],0),max(crop[1],0),min(crop[2],w_),min(crop[3],h_)\r\n c_x1 = 0 if crop[0] >=0 else -crop[0]\r\n c_y1 = 0 if crop[1] >=0 else -crop[1]\r\n c_x2 = 64 if crop[2] <= w_ else 64 - (crop[2] - w_)\r\n c_y2 = 64 if crop[3] <= h_ else 64 - (crop[3] - h_)\r\n crop_img = np.ones((3,64,64))*128\r\n np.copyto(crop_img[:,c_y1:c_y2,c_x1:c_x2],img[:,o_y1:o_y2,o_x1:o_x2])\r\n crop_imgs.append(crop_img)\r\n crop_imgs = torch.from_numpy(np.array(crop_imgs)).float()\r\n if use_gpu:\r\n crop_imgs = crop_imgs.cuda()\r\n t_ldmks = onet(crop_imgs).detach().cpu()[:,10,:].squeeze(1)\r\n t_ldmks = decode_ldmk(t_ldmks, t_anchors)\r\n t_boxes, t_ldmks = change(t_boxes,t_ldmks, h, w, pad1)\r\n t_faces = []\r\n for i in range(len(t_boxes)):\r\n box, prob, ldmk = t_boxes[i], max_conf[i], t_ldmks[i]\r\n if prob <= 0.7:\r\n continue\r\n ldmk_fn = ldmk.reshape(5,2)\r\n x1 = min(int(box[0])-5, 0)\r\n x2 = min(int(box[2]) -5, 0)\r\n y1 = max(int(box[1])+5, im.shape[1])\r\n y2 = max(int(box[3])+5, im.shape[2])\r\n face = alignment(im, ldmk_fn)\r\n cv2.rectangle(im, (x1,y1),(x2,y2), (255,0,0), 1)\r\n cv2.imwrite('a.png',im) \r\n t_faces.append(face)\r\n return t_boxes, t_faces\r\nimport glob, tqdm\r\nclass Face_Alignt():\r\n def __init__(self, use_gpu = True):\r\n pnet,onet = PNet(),ONet() \r\n pnet.load_state_dict(torch.load('weight/msos_pnet_rotate.pt',map_location=lambda storage, loc:storage), strict=False) \r\n onet.load_state_dict(torch.load('weight/msos_onet_rotate.pt',map_location=lambda storage, loc:storage), strict=False)\r\n onet.float()\r\n pnet.eval()\r\n onet.eval()\r\n if use_gpu:\r\n torch.cuda.set_device(0)\r\n pnet.cuda()\r\n onet.cuda()\r\n else:\r\n torch.set_num_threads(1)\r\n def align_multi(img, limit=None, min_face_size=30.0):\r\n return detect(img)"
] | [
[
"torch.cuda.is_available"
],
[
"torch.unsqueeze",
"numpy.ones",
"torch.load",
"numpy.abs",
"torch.cuda.synchronize",
"torch.exp",
"numpy.copyto",
"torch.set_num_threads",
"torch.tensor",
"numpy.array",
"numpy.pad",
"torch.LongTensor",
"torch.cat",
"torch.Tensor",
"torch.cuda.set_device"
]
] |
Splendon/examples | [
"ed4a8a01857b6ddca49559141acf5d0986eb01e1"
] | [
"utils/tests/test_util.py"
] | [
"# Copyright 2019 Graphcore Ltd.\nfrom statistics import mean\nimport numpy as np\nimport os\nimport re\nimport subprocess\nimport sys\nimport time\n\n\n\"\"\"Library of utility functions common between frameworks\"\"\"\n\n\ndef parse_results_for_speed(output, iter_tolerance, speed_tolerance):\n \"\"\"Look for <iter number> sec/itr. <speed number> {other stuff}\"\"\"\n found_a_result = False\n\n for line in output.split(\"\\n\"):\n matches = re.match(r\"([\\d.]+) +sec/itr. +([\\d.]+)\", line)\n if matches:\n found_a_result = True\n iterations, speed = matches.groups()\n iterations = float(iterations)\n speed = float(speed)\n _verify_model_numbers(\n iter_tolerance, iterations, speed_tolerance, speed, line\n )\n\n if not found_a_result:\n raise AssertionError(\"No results detected in this run\")\n\n\ndef parse_results_for_accuracy(output, expected_accuracies, acc_tolerance):\n \"\"\"Look for Accuracy=<accuracy>%\"\"\"\n\n accuracies = []\n for line in output.split(\"\\n\"):\n if re.match(r\" + Accuracy=+([\\d.]+)%\", line):\n accuracy = float(re.match(r\" + Accuracy=+([\\d.]+)%\", line).groups()[0])\n accuracies.append(accuracy)\n elif re.search(r\"Validation accuracy\", line):\n accuracy_str = re.search(r\"accuracy:\\s(.*)\", line).group(1)\n accuracy = float(accuracy_str[:accuracy_str.rfind(\"%\")])\n accuracies.append(accuracy)\n\n if len(accuracies) == 0:\n raise AssertionError(\"No results detected in this run\")\n elif len(accuracies) != len(expected_accuracies):\n raise AssertionError(\"Expected accuracies and parsed accuracies have\"\n \" different lengths\")\n\n _verify_model_accuracies(accuracies, expected_accuracies, acc_tolerance)\n\n\ndef _verify_model_numbers(iter_tolerance, iterations,\n speed_tolerance, speed, line):\n iter_error = \"\"\n speed_error = \"\"\n\n # Verify iteration speed\n if iterations > iter_tolerance[1]:\n iter_error = (\"The time per iteration has regressed above\"\n \" the tolerance maximum: \" +\n str(iter_tolerance[1]))\n elif iterations < iter_tolerance[0]:\n iter_error = (\"Time taken to compete an iteration was \"\n \"suspiciously fast. Please verify the model\"\n \" is operating correctly and tune tolerances\"\n \" accordingly.\")\n\n # Verify item processing speed\n if speed < speed_tolerance[0]:\n speed_error = (\"The number of items processed per second\"\n \" has regressed below the tolerance: \" +\n str(speed_tolerance[0]))\n elif speed > speed_tolerance[1]:\n speed_error = (\"The number of items processed per second\"\n \" was suspiciously high. Please verify the\"\n \" model is behaving correctly and tune\"\n \" tolerances accordingly.\")\n\n if iter_error and speed_error:\n sys.stderr.write(\"\\n\".join([line, iter_error, speed_error]))\n raise AssertionError(\"Timings out of tolerance range\")\n elif iter_error or speed_error:\n sys.stderr.write(line)\n raise AssertionError(iter_error + speed_error)\n\n\ndef _verify_model_accuracies(accuracies, expected_accuracy, acc_tolerance):\n \"\"\"Asserts a list of accuracies is within a list of expected accuracies\n with a tolerance applied.\n\n Args:\n accuracies: A list of floats representing the accuracies (%) produced\n by the model at each step.\n expected_accuracy: A list of floats representing the expected\n accuracies (%) produced by the model at each step.\n acc_tolerance: A float representing a percentage tolerance applied on\n top of the expected accuracies that the accuracies produced by\n the model should sit within.\n\n Raises:\n Assertion Error: Accuracy produced by the model are not within\n the expected limits.\n \"\"\"\n\n for iter_num in range(len(accuracies)):\n exp_acc = expected_accuracy[iter_num]\n exp_acc_str = (\n \"{0} = {1} +- {2} = [{3:.{5}f}, {4:.{5}f}]\".format(\n \"Expected accuracy (%)\".ljust(22),\n exp_acc,\n acc_tolerance,\n exp_acc - acc_tolerance,\n exp_acc + acc_tolerance,\n 2\n )\n )\n acc = accuracies[iter_num]\n acc_str = \"{} = {:.{}f}\".format(\n \"Accuracy (%)\".ljust(22),\n acc,\n 2\n )\n full_acc_str = \"{}\\n{}\".format(acc_str, exp_acc_str)\n if acc < exp_acc - acc_tolerance:\n raise AssertionError(\n \"After iteration {}, the model is less accurate\"\n \" than expected.\\n\"\n \"{}\".format(iter_num + 1, full_acc_str)\n )\n elif acc > exp_acc + acc_tolerance:\n raise AssertionError(\n \"After iteration {}, the model is producing an accuracy\"\n \" that is suspiciously high and should be reviewed.\\n\"\n \"{}\".format(iter_num + 1, full_acc_str)\n )\n\n\ndef assert_result_equals_tensor_value(output, tensor):\n \"\"\"Searches for a single tensor result in the first line of the output\n\n\n Searches the first line of the string output for a line with format\n '[array([3., 8.], dtype=float32)]' and asserts its equal to the numpy\n tensor argument\n\n Args:\n output: String containing the string representation of a numpy\n tensor\n tensor: numpy tensor representing the expected result\n\n Returns:\n None\n\n Raises:\n Assertion Error: Output is not in correct format\n Assertion Error: Output does not contain a string representation\n of a numpy array\n Assertion Error: Output numpy array does not equal the expected\n numpy array\n \"\"\"\n # TODO - np representation over multiple lines\n # TODO - large np array output\n # TODO - multiple dimension np output\n list_regex = r\"^\\[.*?\\]$\"\n np_array_str_regex = r\"array\\(.*?, dtype=.*?\\)$\"\n first_line = output.split(\"\\n\")[0]\n if not re.match(list_regex, first_line):\n raise AssertionError(\n \"Result not in expected string format.\"\n \" Expecting stringified list \"\n \" eg. [array([3., 8.], dtype=float32)]\"\n )\n\n contents = first_line[1:-1]\n if not re.match(np_array_str_regex, contents):\n raise AssertionError(\n \"Expecting numpy representation \"\n \"array with dtype \"\n \"eg. array([3., 8.], dtype=float32)\"\n )\n\n assert contents == np.array_repr(tensor), (\n \"Output value {} does not \"\n \"equal expected value {}\".format(np.array_repr(contents), tensor)\n )\n\n\ndef parse_results_for_ipus_used(output):\n \"\"\"Finds the number of IPUs used in the model by looking for\n string with format ' On 2 IPUs.' in output\"\"\"\n shards_regex = r\" On ([\\d.]+) IPUs.\"\n for line in output.split(\"\\n\"):\n matches = re.match(shards_regex, line)\n if matches:\n shards = matches.group(1)\n return int(shards)\n raise AssertionError(\"Expecting line detailing IPU usage \"\n \"eg. ' On 2 IPUs.'\")\n\n\ndef assert_shards(output, expected_shards):\n \"\"\"Verify the expected number of shards used were actually\n used\"\"\"\n actual_shards = parse_results_for_ipus_used(output)\n assert actual_shards == expected_shards\n\n\ndef get_final_accuracy(output):\n \"\"\"Find and return the accuracy reported in a test's output.\"\"\"\n result_regex = r\"Accuracy=([\\d.]+)\\%\"\n result_list = parse_results_with_regex(output, result_regex)\n result = result_list[0]\n return result[-1]\n\n\ndef get_final_loss(output):\n \"\"\"Find and return the loss reported in a test's output.\"\"\"\n result_regex = r\"Loss=([\\d.]+)\"\n result_list = parse_results_with_regex(output, result_regex)\n result = result_list[0]\n return result[-1]\n\n\ndef get_average_speeds(output):\n \"\"\"Finds the average seconds/iteration and tokens/second\n\n Args:\n output: String representing the output of a test.\n\n Returns:\n A tuple where the first element is a float representing\n the average iterations per second and the second the\n average tokens processed per second\n \"\"\"\n\n result_regex = r\"([\\d.]+) +sec/itr. +([\\d.]+)\"\n results = parse_results_with_regex(output, result_regex)\n\n itr_sec_list = results[0]\n tokens_sec_list = results[1]\n\n return mean(itr_sec_list), mean(tokens_sec_list)\n\n\ndef parse_results_with_regex(output, regex):\n \"\"\"Find and returns the regex matching results in output\n\n Looks through the output line by line looking for a matching regex.\n The function assembles a list of lists where each parent list is\n the results for that position in the regex string and each item in\n the child lists represents an order of the results found in the output\n\n Args:\n output: String representing the output of a test.\n regex: Regex of result to find.\n\n Returns:\n A list of lists of floats. Parent list represents the result at each\n position in the regex. Child list contains results received in the\n order they were output.\n\n Raises:\n AssertionError: a line matching the regex could not be found in the\n output\n \"\"\"\n\n results = []\n\n for line in output.split(\"\\n\"):\n matches = re.search(regex, line)\n if matches:\n number_of_results = matches.lastindex\n if results == []:\n results = [None] * number_of_results\n for match_index in range(0, number_of_results):\n result = float(matches.group(match_index + 1))\n if results[match_index]:\n results[match_index].append(result)\n continue\n results[match_index] = [result]\n\n if results == []:\n raise AssertionError(\"Regex {} not found in result\".format(regex))\n\n return results\n\n\ndef get_total_epochs(output):\n \"\"\"Finds the number of epochs model has run through by looking for\n string with format 'Epoch #3' in the models raw output\"\"\"\n epochs = None\n for line in output.split(\"\\n\"):\n epoch_match = re.search(r\"Epoch #([\\d.]+)\", line)\n if epoch_match:\n epochs = int(epoch_match.group(1))\n if not epochs:\n raise AssertionError(\"Epochs not found in output, eg. \"\n \"Epoch #3\")\n return epochs\n\n\ndef assert_total_run_time(total_time, time_range):\n \"\"\"Checks total run time is within the required range\n\n Args:\n total_time: float representing number of seconds the test took to\n run\n time_range: a tuple of floats where the first element is the minimum\n time the test should run in in seconds and the second the\n maximum\n\n Raises:\n AssertionError: if the total_time is not between the minimum time\n and maximum time\n \"\"\"\n minimum_time = time_range[0]\n maximum_time = time_range[1]\n assert total_time >= minimum_time\n assert total_time <= maximum_time\n\n\ndef assert_final_accuracy(output, minimum, maximum):\n \"\"\"Gets the final accuracy given a raw model output and checks its value\n is between the minimum and maximum\n\n Args:\n output: String representing the raw output of a model\n minimum: a float representing a percentage (between 0.0% and 100%)\n that is the minimum accuracy for the model after running\n maximum: a float representing a percentage (between 0.0% and 100%)\n that is the maximum accuracy for the model after running\n\n Raises:\n AssertionError: if the final accuracy is not between the maximum and\n minimum percentages\n \"\"\"\n accuracy = get_final_accuracy(output)\n assert accuracy >= minimum\n assert accuracy <= maximum\n\n\ndef run_python_script_helper(cwd, script, **kwargs):\n \"\"\"A function that given a path and python script name, runs the script\n with kwargs as the command line arguments\n\n Args:\n cwd: string representing the directory of the python script\n script: string representing the full name of the python script\n kwargs: dictionary of string key and values that form the command\n line arguments when the script is run.\n\n Returns:\n A string representing the raw output of the python script run\n\n Raises:\n AssertionError: if the final accuracy is not between the maximum and\n minimum percentages\n \"\"\"\n py_version = \"python{}\".format(sys.version_info[0])\n cmd = [py_version, script]\n if kwargs:\n args = [\n str(item) for sublist in kwargs.items() for item in sublist if item != \"\"\n ]\n cmd.extend(args)\n out = subprocess.check_output(cmd, cwd=cwd, universal_newlines=True)\n print(out)\n return out\n\n\ndef run_test_helper(subprocess_function, total_run_time=None,\n total_run_time_tolerance=0.1, **kwargs):\n \"\"\"Helper function for running tests\n\n Takes in testable parameters, runs the test and checks the relevant\n parameters against test results\n\n Args:\n subprocess_function: the function that runs a subprocess of\n the model in question\n total_run_time_range: tuple float representing the expected\n upper and lower bounds for the total time taken to run\n the test\n\n Returns:\n A String representing the raw output of the models subprocess\n\n Raises:\n AssertionError: If the accuracy, time taken etc. are not within\n the expected bounds\n \"\"\"\n\n start_time = time.time()\n\n out = subprocess_function(**kwargs)\n\n total_time = time.time() - start_time\n\n if total_run_time:\n total_run_time_range = range_from_tolerances(\n total_run_time, total_run_time_tolerance\n )\n assert_total_run_time(total_time, total_run_time_range)\n\n return out\n\n\ndef range_from_tolerances(value, tolerance):\n \"\"\"Helper function that takes a value and applies the tolerance\n\n Args:\n value: a float representing the mean value to which the tolerance\n will be applied\n tolerance: a float representing a percentage (between 0.0 and 1.0)\n which is applied symmetrically across the value argument\n\n Returns:\n A tuple of floats, the first element representing the tolerance\n applied below the value (minimum) and the second above (maximum)\n \"\"\"\n return (\n get_minimum_with_tolerance(value, tolerance),\n get_maximum_with_tolerance(value, tolerance),\n )\n\n\ndef get_minimum_with_tolerance(value, tolerance):\n \"\"\"Helper function that takes a value and applies the tolerance\n below the value\n\n Args:\n value: a float representing the mean value to which the tolerance\n will be applied\n tolerance: a float representing a percentage (between 0.0 and 1.0)\n which is applied to the value argument\n\n Returns:\n A float representing the tolerance applied below the value (maximum)\n \"\"\"\n return value * (1 - tolerance)\n\n\ndef get_maximum_with_tolerance(value, tolerance):\n \"\"\"Helper function that takes a value and applies the tolerance\n above the value\n\n Args:\n value: a float representing the mean value to which the tolerance\n will be applied\n tolerance: a float representing a percentage (between 0.0 and 1.0)\n which is applied to the value argument\n\n Returns:\n A float representing the tolerance applied above the value (minimum)\n \"\"\"\n return value * (1 + tolerance)\n\n\ndef check_data_exists(data_path, expected_files_list):\n \"\"\"Helper function that checks the expected data exists in a directory\n\n Args:\n data_path: A string representing the directory of where the\n data is expected to be\n expected_files_list: a list of strings representing the expected\n file names in the data_path directory\n\n Returns:\n A boolean which represents whether the expected files are found in\n the data_path directory\n \"\"\"\n\n if os.path.exists(data_path):\n for filename in expected_files_list:\n if not os.path.isfile(os.path.join(data_path, filename)):\n return False\n return True\n\n return False\n"
] | [
[
"numpy.array_repr"
]
] |
MasterScott/Formasaurus | [
"d7d916237a6d2ca4c80c4c8ae5d66999c8beebed"
] | [
"tests/test_fieldtype_model.py"
] | [
"# -*- coding: utf-8 -*-\nfrom __future__ import absolute_import, division\nimport itertools\n\nimport numpy as np\nfrom sklearn_crfsuite.metrics import flat_accuracy_score\n\nfrom formasaurus.fieldtype_model import (\n train,\n _PRECISE_C1_C2,\n _REALISTIC_C1_C2,\n get_Xy,\n)\n\n\ndef test_training(storage, capsys):\n annotations = (a for a in storage.iter_annotations(\n simplify_form_types=True,\n simplify_field_types=True,\n ) if a.fields_annotated)\n annotations = list(itertools.islice(annotations, 0, 300))\n\n crf = train(\n annotations=annotations,\n use_precise_form_types=False,\n optimize_hyperparameters_iters=2,\n optimize_hyperparameters_folds=2,\n optimize_hyperparameters_jobs=-1,\n full_form_type_names=False,\n full_field_type_names=False\n )\n\n out, err = capsys.readouterr()\n\n assert 'Training on 300 forms' in out\n assert 'realistic form types' in out\n assert 'Best hyperparameters' in out\n\n assert 0.0 < crf.c1 < 2.5\n assert 0.0 < crf.c2 < 0.9\n assert crf.c1, crf.c2 != _REALISTIC_C1_C2\n assert crf.c1, crf.c2 != _PRECISE_C1_C2\n\n form_types = np.asarray([a.type for a in annotations])\n X, y = get_Xy(annotations, form_types, full_type_names=False)\n y_pred = crf.predict(X)\n score = flat_accuracy_score(y, y_pred)\n assert 0.9 < score < 1.0 # overfitting FTW!\n\n field_schema = storage.get_field_schema()\n short_names = set(field_schema.types_inv.keys())\n assert set(crf.classes_).issubset(short_names)\n"
] | [
[
"numpy.asarray"
]
] |
Wentaobi/Udacity | [
"00af9c36b42d6bca5f2d42d2744efed2ddb51587"
] | [
"Self_Driving_Car/P1/LaneLines-P1/P1.py"
] | [
"#importing some useful packages\nimport matplotlib.pyplot as plt\nimport matplotlib.image as mpimg\nimport numpy as np\nimport cv2\n\n\n#reading in an image\nimage = mpimg.imread('test_images/solidWhiteRight.jpg');\n#printing out some stats and plotting\nprint('This image is:', type(image), 'with dimesions:', image.shape)\nplt.imshow(image); #call as plt.imshow(gray, cmap='gray') to show a grayscaled image\n\nimport math\n\ndef grayscale(img):\n \"\"\"Applies the Grayscale transform\n This will return an image with only one color channel\n but NOTE: to see the returned image as grayscale\n you should call plt.imshow(gray, cmap='gray')\"\"\"\n return cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)\n # Or use BGR2GRAY if you read an image with cv2.imread()\n # return cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n\ndef hsv(img):\n return cv2.cvtColor(img, cv2.COLOR_RGB2HSV)\n\ndef canny(img, low_threshold, high_threshold):\n \"\"\"Applies the Canny transform\"\"\"\n return cv2.Canny(img, low_threshold, high_threshold)\n\ndef gaussian_blur(img, kernel_size):\n \"\"\"Applies a Gaussian Noise kernel\"\"\"\n return cv2.GaussianBlur(img, (kernel_size, kernel_size), 0)\n\ndef region_of_interest(img, vertices):\n \"\"\"\n Applies an image mask.\n\n Only keeps the region of the image defined by the polygon\n formed from `vertices`. The rest of the image is set to black.\n \"\"\"\n #defining a blank mask to start with\n mask = np.zeros_like(img)\n\n #defining a 3 channel or 1 channel color to fill the mask with depending on the input image\n if len(img.shape) > 2:\n channel_count = img.shape[2] # i.e. 3 or 4 depending on your image\n ignore_mask_color = (255,) * channel_count\n else:\n ignore_mask_color = 255\n\n #filling pixels inside the polygon defined by \"vertices\" with the fill color\n cv2.fillPoly(mask, vertices, ignore_mask_color)\n\n #returning the image only where mask pixels are nonzero\n masked_image = cv2.bitwise_and(img, mask)\n return masked_image\n\n\ndef draw_lines(img, lines, color=[255, 0, 0], thickness=13):\n \"\"\"\n NOTE: this is the function you might want to use as a starting point once you want to\n average/extrapolate the line segments you detect to map out the full\n extent of the lane (going from the result shown in raw-lines-example.mp4\n to that shown in P1_example.mp4).\n\n Think about things like separating line segments by their\n slope ((y2-y1)/(x2-x1)) to decide which segments are part of the left\n line vs. the right line. Then, you can average the position of each of\n the lines and extrapolate to the top and bottom of the lane.\n\n This function draws `lines` with `color` and `thickness`.\n Lines are drawn on the image inplace (mutates the image).\n If you want to make the lines semi-transparent, think about combining\n this function with the weighted_img() function below\n \"\"\"\n x_size = img.shape[1]\n y_size = img.shape[0]\n lines_slope_intercept = np.zeros(shape=(len(lines),2))\n for index,line in enumerate(lines):\n for x1,y1,x2,y2 in line:\n slope = (y2-y1)/(x2-x1)\n intercept = y1 - x1 * slope\n lines_slope_intercept[index]=[slope,intercept]\n max_slope_line = lines_slope_intercept[lines_slope_intercept.argmax(axis=0)[0]]\n min_slope_line = lines_slope_intercept[lines_slope_intercept.argmin(axis=0)[0]]\n left_slopes = []\n left_intercepts = []\n right_slopes = []\n right_intercepts = []\n # this gets slopes and intercepts of lines similar to the lines with the max (immediate left) and min\n # (immediate right) slopes (i.e. slope and intercept within x%)\n for line in lines_slope_intercept:\n if abs(line[0] - max_slope_line[0]) < 0.15 and abs(line[1] - max_slope_line[1]) < (0.15 * x_size):\n left_slopes.append(line[0])\n left_intercepts.append(line[1])\n elif abs(line[0] - min_slope_line[0]) < 0.15 and abs(line[1] - min_slope_line[1]) < (0.15 * x_size):\n right_slopes.append(line[0])\n right_intercepts.append(line[1])\n # left and right lines are averages of these slopes and intercepts, extrapolate lines to edges and center*\n # *roughly\n new_lines = np.zeros(shape=(1,2,4), dtype=np.int32)\n if len(left_slopes) > 0:\n left_line = [sum(left_slopes)/len(left_slopes),sum(left_intercepts)/len(left_intercepts)]\n left_bottom_x = (y_size - left_line[1])/left_line[0]\n left_top_x = (y_size*.575 - left_line[1])/left_line[0]\n if (left_bottom_x >= 0):\n new_lines[0][0] =[left_bottom_x,y_size,left_top_x,y_size*.575]\n if len(right_slopes) > 0:\n right_line = [sum(right_slopes)/len(right_slopes),sum(right_intercepts)/len(right_intercepts)]\n right_bottom_x = (y_size - right_line[1])/right_line[0]\n right_top_x = (y_size*.575 - right_line[1])/right_line[0]\n if (right_bottom_x <= x_size):\n new_lines[0][1]=[right_bottom_x,y_size,right_top_x,y_size*.575]\n for line in new_lines:\n for x1,y1,x2,y2 in line:\n cv2.line(img, (x1, y1), (x2, y2), color, thickness)\n\ndef hough_lines(img, rho, theta, threshold, min_line_len, max_line_gap):\n \"\"\"\n `img` should be the output of a Canny transform.\n\n Returns an image with hough lines drawn.\n \"\"\"\n lines = cv2.HoughLinesP(img, rho, theta, threshold, np.array([]), minLineLength=min_line_len, maxLineGap=max_line_gap)\n line_img = np.zeros((img.shape[0], img.shape[1], 3), dtype=np.uint8)\n draw_lines(line_img, lines)\n return line_img\n\n# Python 3 has support for cool math symbols.\n\ndef weighted_img(img, initial_img, α=0.8, β=1., λ=0.):\n \"\"\"\n `img` is the output of the hough_lines(), An image with lines drawn on it.\n Should be a blank image (all black) with lines drawn on it.\n\n `initial_img` should be the image before any processing.\n\n The result image is computed as follows:\n\n initial_img * α + img * β + λ\n NOTE: initial_img and img must be the same shape!\n \"\"\"\n return cv2.addWeighted(initial_img, α, img, β, λ)\n\nimport os\nos.listdir(\"test_images/\")\n\n#reading in an image\nfor index, img in enumerate(os.listdir(\"test_images/\")):\n image = mpimg.imread('test_images/' + img)\n\n gray_img = grayscale(image)\n\n hsv_img = hsv(image)\n\n # define range of color in HSV\n lower_yel = np.array([20,100,100])\n upper_yel = np.array([30,255,255])\n lower_wht = np.array([0,0,235])\n upper_wht = np.array([255,255,255])\n\n # Threshold the HSV image to get only yellow/white\n yellow_mask = cv2.inRange(hsv_img, lower_yel, upper_yel)\n white_mask = cv2.inRange(hsv_img, lower_wht, upper_wht)\n # Bitwise-AND mask and original image\n full_mask = cv2.bitwise_or(yellow_mask, white_mask)\n\n subdued_gray = (gray_img / 2).astype('uint8')\n\n boosted_lanes = cv2.bitwise_or(subdued_gray, full_mask)\n\n kernel_size = 5\n blurred_img = gaussian_blur(boosted_lanes,kernel_size)\n\n canny_low_threshold = 60\n canny_high_threshold = 150\n edges_img = canny(blurred_img,canny_low_threshold,canny_high_threshold)\n\n x = edges_img.shape[1]\n y = edges_img.shape[0]\n vertices = np.array([[(x*0.,y),(x*.475, y*.575), (x*.525, y*.575), (x,y)]], dtype=np.int32)\n masked_img = region_of_interest(edges_img, vertices)\n\n hough_rho = 3\n hough_theta = np.pi/180\n hough_threshold = 70\n hough_min_line_length = 70\n hough_max_line_gap = 250\n hough_img = hough_lines(masked_img,hough_rho,hough_theta,hough_threshold,hough_min_line_length,hough_max_line_gap)\n\n result = weighted_img(hough_img,image)\n\n fig = plt.figure(figsize=(6,10))\n plt.imshow(result, cmap=\"gray\") #call as plt.imshow(gray, cmap='gray') to show a grayscaled image\n\n#reading in an image\nfor index, img in enumerate(os.listdir(\"test_images2/\")):\n image = mpimg.imread('test_images2/' + img)\n\n gray_img = grayscale(image)\n\n hsv_img = hsv(image)\n\n # define range of color in HSV\n lower_yel = np.array([20,100,100])\n upper_yel = np.array([30,255,255])\n lower_wht = np.array([0,0,235])\n upper_wht = np.array([255,255,255])\n\n # Threshold the HSV image to get only yellow/white\n yellow_mask = cv2.inRange(hsv_img, lower_yel, upper_yel)\n white_mask = cv2.inRange(hsv_img, lower_wht, upper_wht)\n # Bitwise-AND mask and original image\n full_mask = cv2.bitwise_or(yellow_mask, white_mask)\n\n subdued_gray = (gray_img / 2).astype('uint8')\n\n boosted_lanes = cv2.bitwise_or(subdued_gray, full_mask)\n\n kernel_size = 5\n blurred_img = gaussian_blur(boosted_lanes,kernel_size)\n\n canny_low_threshold = 60\n canny_high_threshold = 150\n edges_img = canny(blurred_img,canny_low_threshold,canny_high_threshold)\n\n x = edges_img.shape[1]\n y = edges_img.shape[0]\n vertices = np.array([[(x*0.,y),(x*.475, y*.575), (x*.525, y*.575), (x,y)]], dtype=np.int32)\n masked_img = region_of_interest(edges_img, vertices)\n\n hough_rho = 3\n hough_theta = np.pi/180\n hough_threshold = 70\n hough_min_line_length = 70\n hough_max_line_gap = 250\n hough_img = hough_lines(masked_img,hough_rho,hough_theta,hough_threshold,hough_min_line_length,hough_max_line_gap)\n\n result = weighted_img(hough_img,image)\n\n fig = plt.figure(figsize=(8,10))\n plt.imshow(result, cmap=\"gray\") #call as plt.imshow(gray, cmap='gray') to show a grayscaled image\n\n# Import everything needed to edit/save/watch video clips\nfrom moviepy.editor import VideoFileClip\n# from IPython.display import HTML\n\ndef process_image(image):\n # NOTE: The output you return should be a color image (3 channel) for processing video below\n # TODO: put your pipeline here,\n # you should return the final output (image with lines are drawn on lanes)\n\n gray_img = grayscale(image)\n\n hsv_img = hsv(image)\n\n # define range of color in HSV\n lower_yel = np.array([20,100,100])\n upper_yel = np.array([30,255,255])\n lower_wht = np.array([0,0,235])\n upper_wht = np.array([255,255,255])\n\n # Threshold the HSV image to get only yellow/white\n yellow_mask = cv2.inRange(hsv_img, lower_yel, upper_yel)\n white_mask = cv2.inRange(hsv_img, lower_wht, upper_wht)\n # Bitwise-AND mask and original image\n full_mask = cv2.bitwise_or(yellow_mask, white_mask)\n\n subdued_gray = (gray_img / 2).astype('uint8')\n\n boosted_lanes = cv2.bitwise_or(subdued_gray, full_mask)\n\n kernel_size = 5\n blurred_img = gaussian_blur(boosted_lanes,kernel_size)\n\n canny_low_threshold = 60\n canny_high_threshold = 150\n edges_img = canny(blurred_img,canny_low_threshold,canny_high_threshold)\n\n x = edges_img.shape[1]\n y = edges_img.shape[0]\n vertices = np.array([[(x*0.,y),(x*.475, y*.575), (x*.525, y*.575), (x,y)]], dtype=np.int32)\n masked_img = region_of_interest(edges_img, vertices)\n\n hough_rho = 3\n hough_theta = np.pi/180\n hough_threshold = 70\n hough_min_line_length = 70\n hough_max_line_gap = 250\n hough_img = hough_lines(masked_img,hough_rho,hough_theta,hough_threshold,hough_min_line_length,hough_max_line_gap)\n\n result = weighted_img(hough_img,image)\n\n #return cv2.cvtColor(masked_img, cv2.COLOR_GRAY2RGB)\n return result\n\n\nwhite_output = 'white.mp4'\nclip1 = VideoFileClip(\"solidWhiteRight.mp4\")\nwhite_clip = clip1.fl_image(process_image) #NOTE: this function expects color images!!\nwhite_clip.write_videofile(white_output, audio=False)\n\n\n# HTML(\"\"\"\n# <video width=\"960\" height=\"540\" controls>\n# <source src=\"{0}\">\n# </video>\n# \"\"\".format(white_output))\n\n\nyellow_output = 'yellow.mp4'\nclip2 = VideoFileClip('solidYellowLeft.mp4')\nyellow_clip = clip2.fl_image(process_image)\nyellow_clip.write_videofile(yellow_output, audio=False)\n\n\n# HTML(\"\"\"\n# <video width=\"960\" height=\"540\" controls>\n# <source src=\"{0}\">\n# </video>\n# \"\"\".format(yellow_output))\n\n\nchallenge_output = 'extra.mp4'\nclip2 = VideoFileClip('challenge.mp4')\nchallenge_clip = clip2.fl_image(process_image)\nchallenge_clip.write_videofile(challenge_output, audio=False)\n\n#\n# HTML(\"\"\"\n# <video width=\"960\" height=\"540\" controls>\n# <source src=\"{0}\">\n# </video>\n# \"\"\".format(challenge_output))\n"
] | [
[
"numpy.zeros_like",
"numpy.zeros",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.imshow",
"numpy.array",
"matplotlib.image.imread"
]
] |
adelavega/pliers | [
"dee21102689c77a56b7da48bf9a0ac10c90be0eb"
] | [
"pliers/tests/extractors/api/test_clarifai_extractors.py"
] | [
"from os.path import join\nfrom ...utils import get_test_data_path\nfrom pliers.extractors import ClarifaiAPIExtractor\nfrom pliers.stimuli import ImageStim\nfrom pliers.extractors.base import merge_results\nimport numpy as np\nimport pytest\n\n\[email protected](\"'CLARIFAI_API_KEY' not in os.environ\")\ndef test_clarifai_api_extractor():\n image_dir = join(get_test_data_path(), 'image')\n stim = ImageStim(join(image_dir, 'apple.jpg'))\n result = ClarifaiAPIExtractor().transform(stim).to_df()\n assert result['apple'][0] > 0.5\n assert result.ix[:, 5][0] > 0.0\n\n result = ClarifaiAPIExtractor(max_concepts=5).transform(stim).to_df()\n assert result.shape == (1, 9)\n\n result = ClarifaiAPIExtractor(\n min_value=0.9).transform(stim).to_df(object_id=False)\n assert all(np.isnan(d) or d > 0.9 for d in result.values[0, 3:])\n\n concepts = ['cat', 'dog']\n result = ClarifaiAPIExtractor(select_concepts=concepts).transform(stim)\n result = result.to_df()\n assert result.shape == (1, 6)\n assert 'cat' in result.columns and 'dog' in result.columns\n\n\[email protected](\"'CLARIFAI_API_KEY' not in os.environ\")\ndef test_clarifai_api_extractor_batch():\n image_dir = join(get_test_data_path(), 'image')\n stim = ImageStim(join(image_dir, 'apple.jpg'))\n stim2 = ImageStim(join(image_dir, 'obama.jpg'))\n ext = ClarifaiAPIExtractor()\n results = ext.transform([stim, stim2])\n results = merge_results(results)\n assert results['ClarifaiAPIExtractor#apple'][0] > 0.5 or \\\n results['ClarifaiAPIExtractor#apple'][1] > 0.5\n\n # This takes too long to execute\n # video = VideoStim(join(get_test_data_path(), 'video', 'small.mp4'))\n # results = ExtractorResult.merge_stims(ext.transform(video))\n # assert 'Lego' in results.columns and 'robot' in results.columns\n"
] | [
[
"numpy.isnan"
]
] |
makistsantekidis/opendr | [
"07dee3b59d3487b9c5a93d6946317178a02c9890",
"07dee3b59d3487b9c5a93d6946317178a02c9890",
"07dee3b59d3487b9c5a93d6946317178a02c9890"
] | [
"src/opendr/perception/object_tracking_2d/fair_mot/object_tracking_2d_fair_mot_learner.py",
"src/opendr/perception/facial_expression_recognition/landmark_based_facial_expression_recognition/algorithm/datasets/gen_facial_muscles_data.py",
"projects/control/single_demo_grasp/simulation_ws/src/single_demo_grasping_demo/inference/single_demo_grasp_camera_stream.py"
] | [
"# Copyright 2020-2021 OpenDR European Project\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\nimport json\nimport torch\nimport ntpath\nimport shutil\nimport numpy as np\nimport onnxruntime as ort\nfrom torchvision.transforms import transforms as T\nfrom opendr.engine.learners import Learner\nfrom opendr.engine.datasets import DatasetIterator, ExternalDataset, MappedDatasetIterator\nfrom opendr.perception.object_tracking_2d.logger import Logger\nfrom opendr.perception.object_tracking_2d.datasets.mot_dataset import JointDataset, RawMotDatasetIterator\nfrom opendr.perception.object_tracking_2d.fair_mot.algorithm.lib.models.model import create_model\nfrom opendr.perception.object_tracking_2d.fair_mot.algorithm.run import train, evaluate\nfrom opendr.perception.object_tracking_2d.fair_mot.algorithm.load import load_from_checkpoint\nfrom opendr.perception.object_tracking_2d.datasets.mot_dataset import letterbox, process as process_dataset\nfrom opendr.perception.object_tracking_2d.fair_mot.algorithm.lib.tracker.multitracker import JDETracker\nfrom opendr.engine.data import Image\nfrom opendr.engine.target import TrackingAnnotation, TrackingAnnotationList\nfrom opendr.engine.constants import OPENDR_SERVER_URL\nfrom urllib.request import urlretrieve\n\n\nclass ObjectTracking2DFairMotLearner(Learner):\n def __init__(\n self,\n lr=0.0001,\n iters=-1,\n batch_size=4,\n optimizer=\"adam\",\n lr_schedule=\"\",\n backbone=\"dla_34\",\n network_head=\"\",\n checkpoint_after_iter=0,\n checkpoint_load_iter=0,\n temp_path=\"\",\n device=\"cuda\",\n threshold=0.3,\n scale=1.0,\n lr_step=[20],\n head_conv=256,\n ltrb=True,\n num_classes=1,\n reg_offset=True,\n gpus=[0],\n num_workers=4,\n mse_loss=False,\n reg_loss='l1',\n dense_wh=False,\n cat_spec_wh=False,\n reid_dim=128,\n norm_wh=False,\n wh_weight=0.1,\n off_weight=1,\n id_weight=1,\n num_epochs=30,\n hm_weight=1,\n down_ratio=4,\n max_objs=500,\n track_buffer=30,\n image_mean=[0.408, 0.447, 0.47],\n image_std=[0.289, 0.274, 0.278],\n frame_rate=30,\n min_box_area=100,\n ):\n # Pass the shared parameters on super's constructor so they can get initialized as class attributes\n super(ObjectTracking2DFairMotLearner, self).__init__(\n lr=lr,\n iters=iters,\n batch_size=batch_size,\n optimizer=optimizer,\n lr_schedule=lr_schedule,\n backbone=backbone,\n network_head=network_head,\n checkpoint_after_iter=checkpoint_after_iter,\n checkpoint_load_iter=checkpoint_load_iter,\n temp_path=temp_path,\n device=device,\n threshold=threshold,\n scale=scale,\n )\n\n self.ltrb = ltrb\n self.head_conv = head_conv\n self.num_classes = num_classes\n self.reid_dim = reid_dim\n self.reg_offset = reg_offset\n self.gpus = gpus\n self.num_workers = num_workers\n self.mse_loss = mse_loss\n self.reg_loss = reg_loss\n self.dense_wh = dense_wh\n self.cat_spec_wh = cat_spec_wh\n self.reid_dim = reid_dim\n self.norm_wh = norm_wh\n self.wh_weight = wh_weight\n self.off_weight = off_weight\n self.id_weight = id_weight\n self.num_epochs = num_epochs\n self.lr_step = lr_step\n self.hm_weight = hm_weight\n self.down_ratio = down_ratio\n self.max_objs = max_objs\n self.track_buffer = track_buffer\n self.image_mean = image_mean\n self.image_mean = image_mean\n self.image_std = image_std\n self.frame_rate = frame_rate\n self.min_box_area = min_box_area\n\n main_batch_size = self.batch_size // len(self.gpus)\n rest_batch_size = (self.batch_size - main_batch_size)\n self.chunk_sizes = [main_batch_size]\n\n for i in range(len(self.gpus) - 1):\n worker_chunk_size = rest_batch_size // (len(self.gpus) - 1)\n if i < rest_batch_size % (len(self.gpus) - 1):\n worker_chunk_size += 1\n self.chunk_sizes.append(worker_chunk_size)\n\n self.__create_model()\n\n def save(self, path, verbose=False):\n \"\"\"\n This method is used to save a trained model.\n Provided with the path, absolute or relative, including a *folder* name, it creates a directory with the name\n of the *folder* provided and saves the model inside with a proper format and a .json file with metadata.\n If self.optimize was ran previously, it saves the optimized ONNX model in a similar fashion, by copying it\n from the self.temp_path it was saved previously during conversion.\n :param path: for the model to be saved, including the folder name\n :type path: str\n :param verbose: whether to print success message or not, defaults to 'False'\n :type verbose: bool, optional\n \"\"\"\n\n if self.model is None and self.ort_session is None:\n raise UserWarning(\"No model is loaded, cannot save.\")\n\n folder_name, _, tail = self.__extract_trailing(path) # Extract trailing folder name from path\n # Also extract folder name without any extension if extension is erroneously provided\n folder_name_no_ext = folder_name.split(sep='.')[0]\n\n # Extract path without folder name, by removing folder name from original path\n path_no_folder_name = ''.join(path.rsplit(folder_name, 1))\n # If tail is '', then path was a/b/c/, which leaves a trailing double '/'\n if tail == '':\n path_no_folder_name = path_no_folder_name[0:-1] # Remove one '/'\n\n # Create model directory\n new_path = path_no_folder_name + folder_name_no_ext\n os.makedirs(new_path, exist_ok=True)\n\n model_metadata = {\"model_paths\": [], \"framework\": \"pytorch\", \"format\": \"\", \"has_data\": False,\n \"inference_params\": {}, \"optimized\": None, \"optimizer_info\": {}}\n\n if self.model.ort_session is None:\n model_metadata[\"model_paths\"] = [\n folder_name_no_ext + \".pth\",\n ]\n model_metadata[\"optimized\"] = False\n model_metadata[\"format\"] = \"pth\"\n\n torch.save({\n 'state_dict': self.model.state_dict()\n }, os.path.join(path_no_folder_name, folder_name_no_ext, model_metadata[\"model_paths\"][0]))\n if verbose:\n print(\"Saved Pytorch model.\")\n else:\n model_metadata[\"model_paths\"] = [\n folder_name_no_ext + \".onnx\"\n ]\n model_metadata[\"optimized\"] = True\n model_metadata[\"format\"] = \"onnx\"\n\n shutil.copy2(\n os.path.join(self.temp_path, \"onnx_model_temp.onnx\"),\n os.path.join(path_no_folder_name, folder_name_no_ext, model_metadata[\"model_paths\"][0])\n )\n if verbose:\n print(\"Saved ONNX model.\")\n\n with open(os.path.join(new_path, folder_name_no_ext + \".json\"), 'w') as outfile:\n json.dump(model_metadata, outfile)\n\n def load(\n self,\n path,\n verbose=False,\n ):\n \"\"\"\n Loads the model from inside the path provided, based on the metadata .json file included.\n :param path: path of the directory the model was saved\n :type path: str\n :param verbose: whether to print success message or not, defaults to 'False'\n :type verbose: bool, optional\n \"\"\"\n\n model_name, _, _ = self.__extract_trailing(path) # Trailing folder name from the path provided\n\n with open(os.path.join(path, model_name + \".json\")) as metadata_file:\n metadata = json.load(metadata_file)\n\n if not metadata[\"optimized\"]:\n self.__load_from_pth(self.model, os.path.join(path, metadata[\"model_paths\"][0]))\n if verbose:\n print(\"Loaded Pytorch model.\")\n else:\n self.__load_rpn_from_onnx(os.path.join(path, metadata[\"model_paths\"][0]))\n if verbose:\n print(\"Loaded ONNX model.\")\n\n def reset(self):\n self.tracker.reset()\n\n def fit(\n self,\n dataset,\n val_dataset=None,\n val_epochs=-1,\n logging_path=None,\n silent=False,\n verbose=False,\n train_split_paths=None,\n val_split_paths=None,\n resume_optimizer=False,\n nID=None\n ):\n\n if train_split_paths is None:\n train_split_paths = {\n \"mot20\": os.path.join(\n \"perception\", \"object_tracking_2d\", \"datasets\", \"splits\", \"mot20.train\"\n )\n }\n\n if val_split_paths is None:\n val_split_paths = train_split_paths\n\n logger = Logger(silent, verbose, logging_path)\n\n (\n input_dataset_iterator,\n eval_dataset_iterator,\n ) = self._prepare_datasets(\n dataset,\n val_dataset,\n train_split_paths,\n val_split_paths,\n require_val_dataset=val_epochs > 0,\n )\n\n if nID is None:\n nID = input_dataset_iterator.nID if hasattr(input_dataset_iterator, \"nID\") else dataset.nID\n\n checkpoints_path = os.path.join(self.temp_path, \"checkpoints\")\n if self.checkpoint_after_iter != 0 or self.checkpoint_load_iter != 0:\n os.makedirs(checkpoints_path, exist_ok=True)\n\n start_epoch = 0\n\n if self.checkpoint_load_iter != 0:\n _, _, start_epoch = load_from_checkpoint(\n self.model, os.path.join(checkpoints_path, f\"checkpoint_{self.checkpoint_load_iter}.pth\"),\n self.model_optimizer, resume_optimizer, self.lr, self.lr_step, log=logger.log,\n )\n\n last_eval_result = train(\n self.model,\n self.infer,\n self.model_optimizer,\n input_dataset_iterator,\n eval_dataset_iterator,\n self.batch_size,\n self.num_workers,\n self.gpus,\n self.chunk_sizes,\n self.iters,\n \"train\", # exp_id,\n self.device,\n silent, # hide_data_time,\n 1 if verbose else (-1 if silent else 10), # print_iter,\n self.mse_loss,\n self.reg_loss,\n self.dense_wh,\n self.cat_spec_wh,\n self.reid_dim,\n nID,\n self.norm_wh,\n 1, # num_stack,\n self.wh_weight,\n self.off_weight,\n self.id_weight,\n self.num_epochs,\n self.lr_step,\n self.temp_path,\n self.lr,\n self.reg_offset,\n self.hm_weight,\n checkpoints_path,\n self.checkpoint_after_iter,\n start_epoch,\n val_epochs=val_epochs,\n log=logger.log,\n )\n\n logger.close()\n\n return last_eval_result\n\n def eval(\n self,\n dataset,\n val_split_paths=None,\n logging_path=None,\n silent=False,\n verbose=False,\n ):\n\n logger = Logger(silent, verbose, logging_path)\n\n (\n _,\n eval_dataset_iterator,\n ) = self._prepare_datasets(\n None,\n dataset,\n None,\n val_split_paths,\n require_dataset=False,\n )\n\n result = evaluate(self.infer, dataset)\n\n logger.log(Logger.LOG_WHEN_NORMAL, result)\n\n logger.close()\n\n return result\n\n def infer(self, batch, frame_ids=None, img_size=(1088, 608)):\n\n if self.model is None:\n raise ValueError(\"No model loaded or created\")\n\n self.model.eval()\n\n is_single_image = False\n\n if isinstance(batch, Image):\n batch = [batch]\n is_single_image = True\n elif not isinstance(batch, list):\n raise ValueError(\"Input batch should be an engine.Image or a list of engine.Image\")\n\n if frame_ids is None:\n frame_ids = [-1] * len(batch)\n elif is_single_image:\n frame_ids = [frame_ids]\n\n results = []\n\n for image, frame_id in zip(batch, frame_ids):\n\n img0 = image.convert(\"channels_last\", \"bgr\") # BGR\n img, _, _, _ = letterbox(img0, height=img_size[1], width=img_size[0])\n\n # Normalize RGB\n img = img[:, :, ::-1].transpose(2, 0, 1)\n img = np.ascontiguousarray(img, dtype=np.float32)\n img /= 255.0\n\n blob = torch.from_numpy(img).to(self.device).unsqueeze(0)\n\n online_targets = self.tracker.update(blob, img0)\n online_tlwhs = []\n online_ids = []\n online_scores = []\n for t in online_targets:\n tlwh = t.tlwh\n tid = t.track_id\n vertical = tlwh[2] / tlwh[3] > 1.6\n if tlwh[2] * tlwh[3] > self.min_box_area and not vertical:\n online_tlwhs.append(tlwh)\n online_ids.append(tid)\n online_scores.append(t.score)\n\n result = TrackingAnnotationList([\n TrackingAnnotation(\n name=0,\n top=tlwh[0],\n left=tlwh[1],\n width=tlwh[2],\n height=tlwh[3],\n id=id,\n score=score,\n frame=frame_id,\n ) for tlwh, id, score in zip(\n online_tlwhs,\n online_ids,\n online_scores\n )\n ])\n\n results.append(result)\n\n if is_single_image:\n results = results[0]\n\n return results\n\n def optimize(self, do_constant_folding=False, img_size=(1088, 608), optimizable_dcn_v2=False):\n \"\"\"\n Optimize method converts the model to ONNX format and saves the\n model in the parent directory defined by self.temp_path. The ONNX model is then loaded.\n :param do_constant_folding: whether to optimize constants, defaults to 'False'\n :type do_constant_folding: bool, optional\n \"\"\"\n\n if not optimizable_dcn_v2:\n raise Exception(\"Can not optimize the model while DCNv2 implementation is not optimizable\")\n\n if self.model is None:\n raise UserWarning(\"No model is loaded, cannot optimize. Load or train a model first.\")\n if self.model.ort_session is not None:\n raise UserWarning(\"Model is already optimized in ONNX.\")\n\n input_shape = [\n 1,\n 3,\n img_size[1],\n img_size[0],\n ]\n\n try:\n self.__convert_to_onnx(\n input_shape,\n os.path.join(self.temp_path, \"onnx_model_temp.onnx\"), do_constant_folding\n )\n except FileNotFoundError:\n # Create temp directory\n os.makedirs(self.temp_path, exist_ok=True)\n self.__convert_rpn_to_onnx(\n input_shape,\n os.path.join(self.temp_path, \"onnx_model_temp.onnx\"), do_constant_folding\n )\n\n self.__load_rpn_from_onnx(os.path.join(self.temp_path, \"onnx_model_rpn_temp.onnx\"))\n\n @staticmethod\n def download(model_name, path, server_url=None):\n\n if server_url is None and model_name not in [\n \"crowdhuman_dla34\",\n \"fairmot_dla34\",\n ]:\n raise ValueError(\"Unknown model_name: \" + model_name)\n\n os.makedirs(path, exist_ok=True)\n\n if server_url is None:\n server_url = os.path.join(\n OPENDR_SERVER_URL, \"perception\", \"object_tracking_2d\",\n \"fair_mot\"\n )\n\n url = os.path.join(\n server_url, model_name\n )\n\n model_dir = os.path.join(path, model_name)\n os.makedirs(model_dir, exist_ok=True)\n\n urlretrieve(os.path.join(\n url, model_name + \".json\"\n ), os.path.join(\n model_dir, model_name + \".json\"\n ))\n\n try:\n urlretrieve(os.path.join(\n url, model_name + \".pth\"\n ), os.path.join(\n model_dir, model_name + \".pth\"\n ))\n except Exception:\n urlretrieve(os.path.join(\n url, model_name + \".tckpt\"\n ), os.path.join(\n model_dir, model_name + \".pth\"\n ))\n\n print(\"Downloaded model\", model_name, \"to\", model_dir)\n\n return model_dir\n\n def __convert_to_onnx(self, input_shape, output_name, do_constant_folding=False, verbose=False):\n inp = torch.randn(input_shape).to(self.device)\n input_names = [\"data\"]\n output_names = self.heads.keys()\n\n torch.onnx.export(\n self.model, inp, output_name, verbose=verbose, enable_onnx_checker=True,\n do_constant_folding=do_constant_folding, input_names=input_names, output_names=output_names\n )\n\n def __load_from_onnx(self, path):\n \"\"\"\n This method loads an ONNX model from the path provided into an onnxruntime inference session.\n\n :param path: path to ONNX model\n :type path: str\n \"\"\"\n self.model.rpn_ort_session = ort.InferenceSession(path)\n\n # The comments below are the alternative way to use the onnx model, it might be useful in the future\n # depending on how ONNX saving/loading will be implemented across the toolkit.\n # # Load the ONNX model\n # self.model = onnx.load(path)\n #\n # # Check that the IR is well formed\n # onnx.checker.check_model(self.model)\n #\n # # Print a human readable representation of the graph\n # onnx.helper.printable_graph(self.model.graph)\n\n def __load_from_pth(self, model, path, use_original_dict=False):\n all_params = torch.load(path, map_location=self.device)\n model.load_state_dict(all_params if use_original_dict else all_params[\"state_dict\"])\n\n def _prepare_datasets(\n self,\n dataset,\n val_dataset,\n train_split_paths,\n val_split_paths,\n require_dataset=True,\n require_val_dataset=True,\n ):\n\n input_dataset_iterator = None\n eval_dataset_iterator = None\n\n if isinstance(dataset, ExternalDataset):\n\n dataset_path = dataset.path\n if dataset.dataset_type.lower() != \"mot\":\n raise ValueError(\n \"ExternalDataset (\" + str(dataset) +\n \") is given as a dataset, but it is not a MOT dataset\")\n\n transforms = T.Compose([T.ToTensor()])\n input_dataset_iterator = JointDataset(\n dataset_path,\n train_split_paths,\n down_ratio=self.down_ratio,\n max_objects=self.max_objs,\n ltrb=self.ltrb,\n mse_loss=self.mse_loss,\n augment=False, transforms=transforms,\n )\n elif isinstance(dataset, DatasetIterator):\n input_dataset_iterator = MappedDatasetIterator(\n dataset,\n lambda d: process_dataset(\n d[0], d[1], self.ltrb, self.down_ratio,\n self.max_objs, self.num_classes, self.mse_loss\n )\n )\n else:\n if require_dataset or dataset is not None:\n raise ValueError(\n \"dataset parameter should be an ExternalDataset or a DatasetIterator\"\n )\n\n if isinstance(val_dataset, ExternalDataset):\n\n val_dataset_path = val_dataset.path\n if val_dataset.dataset_type.lower() != \"mot\":\n raise ValueError(\n \"ExternalDataset (\" + str(val_dataset) +\n \") is given as a val_dataset, but it is not a MOT dataset\"\n )\n\n eval_dataset_iterator = RawMotDatasetIterator(\n val_dataset_path,\n val_split_paths,\n down_ratio=self.down_ratio,\n max_objects=self.max_objs,\n ltrb=self.ltrb,\n mse_loss=self.mse_loss,\n )\n\n elif isinstance(val_dataset, DatasetIterator):\n eval_dataset_iterator = val_dataset\n elif val_dataset is None:\n if isinstance(dataset, ExternalDataset):\n val_dataset_path = dataset.path\n if dataset.dataset_type.lower() != \"mot\":\n raise ValueError(\n \"ExternalDataset (\" + str(dataset) +\n \") is given as a dataset, but it is not a MOT dataset\"\n )\n\n eval_dataset_iterator = RawMotDatasetIterator(\n val_dataset_path,\n val_split_paths,\n down_ratio=self.down_ratio,\n max_objects=self.max_objs,\n ltrb=self.ltrb,\n mse_loss=self.mse_loss,\n )\n\n elif require_val_dataset:\n raise ValueError(\n \"val_dataset is None and can't be derived from\" +\n \" the dataset object because the dataset is not an ExternalDataset\"\n )\n else:\n eval_dataset_iterator = input_dataset_iterator\n else:\n raise ValueError(\n \"val_dataset parameter should be an ExternalDataset or a DatasetIterator or None\"\n )\n\n return input_dataset_iterator, eval_dataset_iterator\n\n def __create_model(self):\n\n heads = {\n 'hm': self.num_classes,\n 'wh': 2 if not self.ltrb else 4,\n 'id': self.reid_dim\n }\n if self.reg_offset:\n heads.update({'reg': 2})\n\n self.heads = heads\n\n self.model = create_model(self.backbone, heads, self.head_conv)\n self.model.to(self.device)\n self.model.ort_session = None\n self.model.heads_names = heads.keys()\n\n self.model_optimizer = torch.optim.Adam(self.model.parameters(), self.lr)\n\n self.tracker = JDETracker(\n self.model,\n self.threshold,\n self.track_buffer,\n self.max_objs,\n self.image_mean,\n self.image_std,\n self.down_ratio,\n self.num_classes,\n self.reg_offset,\n self.ltrb,\n self.frame_rate,\n )\n\n @staticmethod\n def __extract_trailing(path):\n \"\"\"\n Extracts the trailing folder name or filename from a path provided in an OS-generic way, also handling\n cases where the last trailing character is a separator. Returns the folder name and the split head and tail.\n :param path: the path to extract the trailing filename or folder name from\n :type path: str\n :return: the folder name, the head and tail of the path\n :rtype: tuple of three strings\n \"\"\"\n head, tail = ntpath.split(path)\n folder_name = tail or ntpath.basename(head) # handle both a/b/c and a/b/c/\n return folder_name, head, tail\n",
"# Copyright 2020-2021 OpenDR European Project\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport numpy as np\nfrom numpy.lib.format import open_memmap\nfrom scipy.spatial import Delaunay\nimport argparse\n\n\ndef find_graph_edges(x):\n points = np.transpose(x[0, :, 0, :, 0])\n print(points.shape)\n tri = Delaunay(points)\n neigh = tri.simplices\n print(neigh.shape)\n G = []\n N = neigh.shape[0]\n for i in range(N):\n G.append((neigh[i][0], neigh[i][1]))\n G.append((neigh[i][0], neigh[i][2]))\n G.append((neigh[i][1], neigh[i][2]))\n # connect the master node (nose) to all other nodes\n for i in range(51):\n G.append((i+1, 17))\n edges = G\n return edges\n\n\ndef gen_muscle_data(data, muscle_path):\n \"\"\"Generate facial muscle data from facial landmarks\"\"\"\n N, C, T, V, M = data.shape\n edges = find_graph_edges(data)\n V_muscle = len(edges)\n fp_sp = open_memmap(muscle_path, dtype='float32', mode='w+', shape=(N, C, T, V_muscle, M))\n # Copy the landmark data to muscle placeholder tensor\n fp_sp[:, :, :, :V, :] = data\n for edge_id, (source_node, target_node) in enumerate(edges):\n fp_sp[:, :, :, edge_id, :] = data[:, :, :, source_node-1, :] - data[:, :, :, target_node-1, :]\n return fp_sp\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='Facial muscle data generator.')\n parser.add_argument('--landmark_data_folder', default='./data/CASIA_10fold/')\n parser.add_argument('--muscle_data_folder', default='./data/muscle_data/')\n parser.add_argument('--dataset_name', default='CASIA')\n arg = parser.parse_args()\n part = ['Train', 'Val']\n for p in part:\n if arg.dataset_name == 'CASIA' or arg.dataset_name == 'CK+':\n for i in range(10):\n landmark_path = arg.landmark_data_folder + '/{}/{}_{}.npy'.format(arg.dataset_name, p, i)\n landmark_data = np.load(landmark_path)\n muscle_path = arg.muscle_data_folder + '/{}/{}_muscle_{}.npy'.format(arg.dataset_name, p, i)\n muscle_data = gen_muscle_data(landmark_data, muscle_path)\n elif arg.dataset_name == 'AFEW':\n landmark_path = arg.landmark_data_folder + '/{}/{}.npy'.format(arg.dataset_name, p)\n landmark_data = np.load(landmark_path)\n muscle_path = arg.muscle_data_folder + '/{}/{}_muscle.npy'.format(arg.dataset_name, p)\n muscle_data = gen_muscle_data(landmark_data, muscle_path)\n",
"#!/usr/bin/env python\n\n# Copyright 2020-2021 OpenDR European Project\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\nimport sys\nimport rospy\nimport numpy as np\nfrom std_msgs.msg import Int16\nfrom sensor_msgs.msg import Image as ROS_Image\nfrom std_msgs.msg import Float32MultiArray\nfrom single_demo_inference import SingleDemoInference\nfrom opendr_bridge import ROSBridge\n\n\nclass SingleDemoGraspCameraStream(object):\n\n def __init__(self, path_to_dt_model, thresh):\n \"\"\"SingleDemoGraspCameraStream initialization\"\"\"\n self.object_locator = SingleDemoInference(path_to_dt_model, thresh)\n self.rgb_image = None\n self.command_publisher = rospy.Publisher('/commands', Float32MultiArray, queue_size=1)\n self.detection_request_sub = rospy.Subscriber(\"/request_detection\", Int16, self.request_callback)\n self.image_sub = rospy.Subscriber(\"/camera/color/raw\", ROS_Image, self.image_callback)\n self.bridge = ROSBridge()\n\n def image_callback(self, data):\n self.rgb_image = self.bridge.from_ros_image(data, encoding='rgb8')\n\n def request_callback(self, data):\n print(\"new request:\")\n print(data.data)\n self.image_analyze(data.data)\n\n def image_analyze(self, msg_id):\n analyze_img = self.rgb_image.opencv()\n flag, bbx, pred_angle, pred_kps_center = self.object_locator.predict(analyze_img)\n bbx = np.asarray(bbx)\n bbx = bbx.astype(int)\n msg = Float32MultiArray()\n\n if (flag > 0):\n print(bbx)\n ctr_X = int((bbx[0] + bbx[2]) / 2)\n ctr_Y = int((bbx[1] + bbx[3]) / 2)\n angle = pred_angle\n ref_x = 640 / 2\n ref_y = 480 / 2\n\n # distance to the center of bounding box representing the center of object\n dist = [ctr_X - ref_x, ref_y - ctr_Y]\n # distance center of keypoints representing the grasp location of the object\n dist_kps_ctr = [pred_kps_center[0] - ref_x, ref_y - pred_kps_center[1]]\n msg.data = [msg_id, dist[0], dist[1], angle, dist_kps_ctr[0], dist_kps_ctr[1]]\n self.command_publisher.publish(msg)\n\n else:\n # 1e10 as a big large enough number out of range. reciever use this number\n # to check whether a detection is available or not\n msg.data = [msg_id, 1e10, 1e10, 1e10, 1e10]\n self.command_publisher.publish(msg)\n\n\nif __name__ == '__main__':\n\n dir_temp = os.path.join(\"./\", \"sdg_temp\")\n rospy.init_node('grasp_server', anonymous=True)\n camera_streamer = SingleDemoGraspCameraStream(os.path.join(dir_temp, \"pendulum\", \"output\", \"model_final.pth\"), 0.8)\n rospy.spin()\n input()\n sys.exit()\n"
] | [
[
"torch.onnx.export",
"torch.load",
"torch.randn",
"torch.from_numpy",
"numpy.ascontiguousarray"
],
[
"numpy.lib.format.open_memmap",
"numpy.load",
"scipy.spatial.Delaunay",
"numpy.transpose"
],
[
"numpy.asarray"
]
] |
pedrob37/Phys_Seg | [
"7adc65d7b228b3a5702acfa9e6d0494d6b4c2dee"
] | [
"Phys_Seg/run.py"
] | [
"import torch\nimport numpy as np\nimport SimpleITK as sitk\nfrom Phys_Seg.data_loading import load_and_preprocess, save_segmentation_nifti, read_file, save_img\nfrom Phys_Seg.predict_case import predict_phys_seg, physics_preprocessing, image_preprocessing\nimport importlib\nfrom Phys_Seg.utils import postprocess_prediction, get_params_fname, maybe_download_parameters\nfrom network_architecture import nnUNet\nimport os\nimport Phys_Seg\n\n\ndef apply_phys_seg(img, out_fname):\n img_itk = sitk.ReadImage(img)\n img_npy = sitk.GetArrayFromImage(img_itk)\n out = sitk.GetImageFromArray(img_npy)\n out.CopyInformation(img_itk)\n sitk.WriteImage(out, out_fname)\n\n\ndef run_phys_seg(mri_fnames, output_fnames, sequence='MPRAGE', physics_params=None,\n # config_file=os.path.join(Phys_Seg.__path__[0], \"config.py\"),\n device=None, overwrite=True):\n \"\"\"\n\n :param mri_fnames: str or list/tuple of str\n :param output_fnames: str or list/tuple of str. If list: must have the same length as output_fnames\n :param sequence: MPRAGE or SPGR (for now)\n :param config_file: config.py\n :param device: either int (for device id) or 'cpu'\n :param overwrite: True or False\n :param postprocess: whether to do postprocessing or not. Postprocessing here consists of simply discarding all\n but the largest predicted connected component. Default False\n :return:\n \"\"\"\n\n physics_input_size = {'MPRAGE': 4,\n 'SPGR': 6}\n\n # Load in model weights\n maybe_download_parameters(sequence=sequence, physics_flag=True if physics_params else False)\n params_file = get_params_fname(sequence=sequence, physics_flag=True if physics_params else False)\n\n net = nnUNet(1, 4, physics_flag=True if physics_params else False,\n physics_input=physics_input_size[sequence],\n physics_output=40)\n\n if device == \"cpu\":\n net = net.cpu()\n else:\n net.cuda(device)\n\n net = torch.nn.DataParallel(net, device_ids=[device, int(1-device)])\n net.to(f'cuda:{net.device_ids[0]}')\n # net = torch.nn.DataParallel(net)\n\n if not isinstance(mri_fnames, (list, tuple)):\n mri_fnames = [mri_fnames]\n\n if not isinstance(output_fnames, (list, tuple)):\n output_fnames = [output_fnames]\n\n params = torch.load(params_file, map_location=lambda storage, loc: storage)\n\n for in_fname, out_fname in zip(mri_fnames, output_fnames):\n if overwrite or not (os.path.isfile(out_fname)):\n print(\"File:\", in_fname)\n print(\"preprocessing...\")\n try:\n data, aff = read_file(in_fname)\n except RuntimeError:\n print(\"\\nERROR\\nCould not read file\", in_fname, \"\\n\")\n continue\n except AssertionError as e:\n print(e)\n continue\n\n # Process data\n if physics_params is not None:\n physics_params = eval(physics_params)\n # Convert TR to pTD\n physics_params[1] = physics_params[1] - physics_params[0]\n print(physics_params)\n processed_physics = physics_preprocessing(np.array(physics_params), sequence)\n else:\n processed_physics = None\n data = image_preprocessing(patient_data=data)\n\n print(\"prediction (CNN id)...\")\n net.load_state_dict(params['model_state_dict'])\n net.eval()\n seg = predict_phys_seg(net=net,\n patient_data=data,\n processed_physics=processed_physics,\n main_device=device)\n\n print(\"exporting segmentation...\")\n save_segmentation_nifti(seg, aff, out_fname)\n\n # apply_phys_seg(in_fname, out_fname)\n"
] | [
[
"numpy.array",
"torch.load"
]
] |
teristam/openephys-fileIO | [
"8089e7c4aff829c13a79656b8812a3d3e68eb1eb"
] | [
"test/test_binary.py"
] | [
"import numpy as np \nfrom openephys_fileIO.fileIO import *\nfrom openephys_fileIO.Binary import *\n\ndef test_write_binary_data():\n # Test writing of binary data\n \n dataFolder = 'test/data'\n\n # Read the data in original int16 format\n data,headers = load_OpenEphysRecording4BinaryFile(dataFolder,\n num_data_channel=1,num_aux_channel=1, num_adc_channel=1)\n print(headers)\n\n # Write to binary file\n writeBinaryData(dataFolder+'/experiment1/recording1/',data)\n writeStructFile(dataFolder+'/experiment1/recording1/structure.oebin',headers)\n\n #load the data in float format (take care of the bit per volt)\n data,headers = load_OpenEphysRecording4BinaryFile(dataFolder,\n num_data_channel=1,num_aux_channel=1, num_adc_channel=1,dtype=float)\n\n # Load binary file using the offical function\n data2, rate2 = Load('test/data')\n\n np.allclose(data.T,data2['100']['0']['0'])\n\ndef test_numpy2binary():\n # test write of numpy data\n Fs = 30000\n x = np.random.randn(3*Fs,4)\n bitVolts = 0.195\n dataFolder = 'test/data2'\n channel_names = [f'CH{i}' for i in range(x.shape[1])]\n writeBinaryData(dataFolder+'/experiment1/recording1/', x, bitVolts)\n writeStructFile(dataFolder+'/experiment1/recording1/structure.oebin',samplerate=30000,\n num_channels= x.shape[1], bit_volts=bitVolts,channel_names=channel_names)\n\n # load the binary file\n data, rate = Load(dataFolder)\n\n np.allclose(x, data['100']['0']['0'])\n\n\n\n\n\n\n \n"
] | [
[
"numpy.allclose",
"numpy.random.randn"
]
] |
FlowerForAlgernon/ai_tetris | [
"7ac0d3875ad9b31fb260f7567a218e0de340c4e4"
] | [
"QLearning.py"
] | [
"\"\"\"\n这份代码使用 Q learning 算法训练并运行俄罗斯方块游戏 ai。其中简化状态空间的方法可参考论文 Adapting Reinforcement Learning to Tetris\n\"\"\"\n\nimport numpy as np\nfrom game import *\n\n\n\nsub_well = 4\nbase = 7\n\n\ndef getStateIndex(field_width, field_height, field_map):\n \"\"\"\n 因为每一列有 7 种不同的情况,所以采用七进制数来作为状态索引\n \"\"\"\n temp = [0 for _ in range(field_width)]\n convert = {}\n for i in range(-(base - 1)//2, (base - 1)//2 + 1):\n convert[i] = i + (base - 1)//2\n for x in range(field_width):\n while temp[x] < field_height and field_map[temp[x]][x] == 0:\n temp[x] += 1\n index = 0\n for i in range(field_width-1):\n if temp[i+1] - temp[i] > (base - 1)//2:\n index += base**i * convert[(base - 1)//2]\n elif temp[i+1] - temp[i] < -(base - 1)//2:\n index += base**i * convert[-(base - 1)//2]\n else:\n index += base**i * convert[temp[i+1] - temp[i]]\n return index\n\n\ndef getAllPossibleLocation(field_width, field_map, block, layout):\n all_possible_position = []\n for x in range(field_width):\n if block.isLegal(layout, (x, -4), field_map) is not State.Middle:\n all_possible_position.append(x)\n return all_possible_position\n\n\ndef findBottomPosition(field_map, block, x, layout):\n y = -4\n while block.isLegal(layout, (x, y), field_map) is not State.Bottom:\n y += 1\n return y - 1\n\n\ndef dropBlock(field_height, field_map, x0, y0, layout):\n for (x, y) in layout:\n if 0 <= y0 + y < field_height:\n field_map[y0 + y][x0 + x] = 1\n if y0 + y < 0:\n return False\n return True\n\n\ndef resetMap(field_width, field_height, field_map):\n count = 0\n for y in range(field_height):\n for x in range(field_width):\n if field_map[y][x] == 1:\n field_map[y][x] = 0\n count += 1\n if count == 4:\n return\n\n\ndef getNewMap(block, position, direction, field_map):\n while block.direction is not direction:\n block.rotate(field_map)\n while block.position[0] > position[0]:\n block.left(field_map)\n while block.position[0] < position[0]:\n block.right(field_map)\n while not block.is_stop:\n block.down(field_map)\n\n\nclass QLearning(Game):\n def __init__(self):\n super(QLearning, self).__init__(sub_well, 1000)\n self.repeat_num = 200\n self.alpha = 0.2\n self.gamma = 0.8\n self.lambda_ = 0.3\n self.epsilon = 0.01\n self.key = [((s, b), (p, d)) for s in range(base**(self.field_width-1)) for b in range(7) for p in range(self.field_width) for d in range(4)]\n self.V = [0 for _ in range(len(self.key))]\n self.Q = dict(zip(self.key, self.V))\n #self.Q = np.load('QL.npy').item()\n\n def checkEvents(self):\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.quit()\n exit(0)\n\n def getBlock(self, block):\n for x in range(len(Blocks_color)):\n if block.color == Blocks_color[x]:\n return x\n\n def getReward(self):\n temp = [0 for _ in range(self.field_width)]\n for x in range(self.field_width):\n while temp[x] < self.field_height and self.field_map[temp[x]][x] == 0:\n temp[x] += 1\n buried_holes = 0\n block = self.block_factory.cur_block\n for (x, y) in block.layout:\n i = 1\n while block.position[1]+y+i < self.field_height and self.field_map[block.position[1]+y+i][x] == 0:\n buried_holes += 1\n i += 1\n return np.var(temp)*(-2) + buried_holes*(-1)\n\n def getAllActions(self, block):\n actions = []\n for direction in range(len(block.layouts)):\n for x in getAllPossibleLocation(self.field_width, self.field_map, block, block.layouts[direction]):\n y = findBottomPosition(self.field_map, block, x, block.layouts[direction])\n if dropBlock(self.field_height, self.field_map, x, y, block.layouts[direction]):\n actions.append((x, direction))\n resetMap(self.field_width, self.field_height, self.field_map)\n return actions\n\n def getBestActionWithGreedy(self, block):\n block_type = self.getBlock(block)\n state = getStateIndex(self.field_width, self.field_height, self.field_map)\n actions = self.getAllActions(block)\n actions_value = {}\n for action in actions:\n actions_value[action] = self.Q[((state, block_type), action)]\n if actions_value == {}:\n return None\n elif random.random() > self.epsilon:\n return max(actions_value, key=actions_value.get)\n else:\n return list(actions_value.keys())[random.randint(0, len(actions_value)-1)]\n\n def getBestAction(self, block):\n block_type = self.getBlock(block)\n state = getStateIndex(self.field_width, self.field_height, self.field_map)\n actions = self.getAllActions(block)\n actions_value = {}\n for action in actions:\n actions_value[action] = self.Q[((state, block_type), action)]\n if actions_value == {}:\n return None\n return max(actions_value, key=actions_value.get)\n\n def train(self):\n record = []\n for i in range(1, self.repeat_num+1):\n self.initialize()\n while not self.block_factory.is_failed:\n cur_state = getStateIndex(self.field_width, self.field_height, self.field_map)\n cur_block = self.getBlock(self.block_factory.cur_block)\n cur_action = self.getBestActionWithGreedy(self.block_factory.cur_block)\n cur_index = ((cur_state, cur_block), cur_action)\n if cur_action == None: break\n getNewMap(self.block_factory.cur_block, cur_action, cur_action[1], self.field_map)\n next_state = getStateIndex(self.field_width, self.field_height, self.field_map)\n next_block = self.getBlock(self.block_factory.next_block)\n next_action = self.getBestAction(self.block_factory.next_block)\n next_index = ((next_state, next_block), next_action)\n if next_action == None: break\n self.Q[cur_index] += self.alpha*(self.getReward()+self.gamma*self.Q[next_index] - self.Q[cur_index])\n self.update()\n print(\"Epoch:\"+str(i)+\"/\"+str(self.repeat_num)+\" Lines:\"+ str(self.lines_num)+\" Alpha:\"+str(self.alpha))\n record.append(self.lines_num)\n if i % 100 == 0:\n self.alpha *= 0.5\n np.save('QL.npy', {\"V\": self.V})\n np.save('record_QL.npy', {\"record\": record})\n np.save('QL.npy', self.Q)\n np.save('record_QL.npy', {\"record\": record})\n\n\nclass QLGame(Game):\n def __init__(self):\n super(QLGame, self).__init__(10, 20)\n self.Q = np.load('QL.npy', allow_pickle=True).item()\n self.col = 0\n\n def checkEvents(self):\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.quit()\n exit(0)\n\n def getBlock(self, block):\n for x in range(len(Blocks_color)):\n if block.color == Blocks_color[x]:\n return x\n\n def cutFieldMap(self, position):\n new_field_map = [[0]*sub_well for _ in range(self.field_height)]\n for y in range(self.field_height):\n for x in range(sub_well):\n new_field_map[y][x] = self.field_map[y][position+x]\n return new_field_map\n\n def getAllActions(self, field_width, field_height, block, field_map, init_pos):\n actions = {}\n for direction in range(len(block.layouts)):\n for x in getAllPossibleLocation(field_width, field_map, block, block.layouts[direction]):\n y = findBottomPosition(field_map, block, x, block.layouts[direction])\n if dropBlock(field_height, field_map, x, y, block.layouts[direction]):\n block_type = self.getBlock(block)\n state = getStateIndex(field_width, field_height, field_map)\n actions[(x + init_pos, direction)] = self.Q[((state, block_type), (x, direction))]\n resetMap(field_width, field_height, field_map)\n return actions\n\n def getBestAction(self):\n actions = {}\n cur_block = Block(self.block_factory.cur_block.screen, sub_well, self.field_height, self.block_factory.cur_block.layouts, self.block_factory.cur_block.direction, self.block_factory.cur_block.color, (0, -4))\n for x in range(self.field_width - sub_well + 1):\n loc_actions = self.getAllActions(sub_well, self.field_height, cur_block, self.cutFieldMap(x), x)\n for k, v in loc_actions.items():\n if k in actions:\n actions[k].append(v)\n else:\n actions[k] = [v]\n for k, v in actions.items():\n actions[k] = max(v)\n return max(actions, key=actions.get) if actions != {} else None\n\n def start(self):\n self.initialize()\n self.initializePygame()\n while not self.block_factory.is_failed:\n self.checkEvents()\n action = self.getBestAction()\n if action == None:\n break\n getNewMap(self.block_factory.cur_block, action, action[1], self.field_map)\n self.update()\n self.draw()\n return self.lines_num\n\n\n\nif __name__ == '__main__':\n train = QLearning()\n train.train()\n \n game = QLGame()\n game.start()\n"
] | [
[
"numpy.var",
"numpy.save",
"numpy.load"
]
] |
starkworld/Python-Course-work | [
"28715f079939129b442aedcd7edb2e0838886ba0"
] | [
"source code/Data Visualization.py"
] | [
"\"\"\"\nAuthor : nkalyan🤠\nimplementing Python Scripts on reading and returning the name no of mails that sent each day in week\n and plot/display them in bar graph\n\n I wrote code In counting to count the number of emails sent by each distinct user. That code may be helpful for this assignment.\n\"\"\"\n\n\nimport matplotlib.pyplot as plt\nfrom os import getcwd\n\n\ndef file_path():\n \"\"\"Method that ask the users file name and returns it\"\"\"\n file_name = input(\"Enter the file name:\")\n return file_name\n\n\ndef pop_values(filename):\n \"\"\"Method the reads file and returning value\"\"\"\n file_name = filename\n try: # look for exception\n fp = open(file_name, \"r\")\n except FileNotFoundError: # if found exception display error\n print(\"File Does not exist, please check your file name\")\n exit()\n else: # if no exceptions thrown then performs this block\n with fp:\n for line in fp:\n line = line.strip(\"\\n\")\n offset = line.find(\"From\")\n offset1 = line.find(\"@\")\n line = line[-24:]\n offset3 = line.find(\"@\")\n if offset == 0 and offset1 > 0 and offset3 == -1:\n line = line[:-21]\n yield line\n\n\ndef main():\n \"\"\"Calls the all functions that necessary to get the output\"\"\"\n name = file_path() # calls the file path method\n dictionary = {'Sun': 0, 'Mon': 0, 'Tue': 0, 'Wed': 0, 'Thu': 0, 'Fri': 0, 'Sat': 0} # store the day val in dict\n value = pop_values(name)\n count = 0\n for i in value:\n if i in dictionary:\n dictionary[i] += 1\n count += len(i)\n val = dictionary.values()\n keys = dictionary.keys()\n zp = zip(dictionary.keys(), dictionary.values())\n for item in val:\n i = val\n j = keys\n plt.bar(j, i, align='center', alpha=0.5)\n\n plt.ylabel('Number of messages') \n plt.title('Emails per day')\n plt.show() # method that shows the bar graph of our code result\n\n\nif __name__ == '__main__':\n \"\"\"calls the main method\"\"\"\n main()\n"
] | [
[
"matplotlib.pyplot.title",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.bar"
]
] |
jasonrute/puzzle_cube | [
"7e05a21acd26cb30e729ba6a95e14e16c76c1780"
] | [
"analysis/stats.py"
] | [
"\"\"\"\nTraining Statics Tools\n\nA class for loading statistics related to a particular rutraiining session.\n\"\"\"\n\nimport numpy as np\n#from scipy import stats\nimport pandas as pd\nimport os\n\ndef str_between(s, start, end):\n return (s.split(start))[1].split(end)[0]\n\ndef is_stat_file_version(file_name, version):\n return file_name.startswith(\"stats_{}_gen\".format(version)) and file_name.endswith(\".h5\")\n\nclass TrainingStates:\n def __init__(self, versions, directory, verbose=True):\n self.stats_files = self.get_stat_files(versions, directory)\n \n if verbose:\n print(\"Loading files:\")\n for f in self.stats_files:\n print(directory + f)\n\n self.generation_stats = self.load_stats('generation_stats')\n self.game_stats = self.load_stats('game_stats')\n self.move_stats = self.load_stats('self_play_stats')\n\n def get_stat_files(self, versions, directory):\n stat_files = []\n for version in reversed(versions):\n files = [directory + f for f in os.listdir(directory) if is_stat_file_version(f, version)]\n stat_files += list(sorted(files))\n\n return stat_files\n\n def load_stats(self, key_name):\n df_list = []\n for f in self.stats_files:\n path = f\n generation = str_between(f, \"_gen\", \".h5\")\n df = pd.read_hdf(path, key=key_name)\n df['_generation'] = int(generation)\n df_list.append(df)\n\n if df_list:\n stats = pd.concat(df_list, ignore_index=True)\n else:\n return pd.DataFrame()\n \n return stats\n\n def first_move_stats(self):\n \"\"\"\n Note: There is an indexing issue (the index of first_play_stats is the orginal index\n while the index of game_stats is the game number). The easiest fix is to just use\n the values (an array) of the series and not the series itself.\n \"\"\"\n return self.move_stats[self.move_stats['_step_id'] == 0]\n\n def found_target_on_first_move(self):\n return (self.first_move_stats()['shortest_path'] >= 0).values\n\n def lost_but_found_target_on_first_move(self):\n return self.found_target_on_first_move() & ~self.game_stats['win']\n\n def win_but_did_not_find_target_on_first_move(self):\n return ~self.found_target_on_first_move() & self.game_stats['win']\n\nif __name__ == '__main__':\n from pprint import pprint\n versions = ['v0.9.3']\n save_dir = '../save/stats_v0.9.3/'\n #VERSIONS = ['v0.9.2.1', 'v0.9.2']\n #SAVE_DIR = '../save/stats_archive/'\n\n cube_stats = TrainingStates(versions, save_dir)\n\n pprint(cube_stats.generation_stats)\n\n pprint(np.mean(cube_stats.lost_but_found_target_on_first_move()))\n pprint(np.mean(cube_stats.win_but_did_not_find_target_on_first_move()))\n\n\n\n"
] | [
[
"pandas.DataFrame",
"pandas.read_hdf",
"pandas.concat"
]
] |
kufusha/cabot | [
"52a40a39a29f0bd79b6fdd8f961708e09fda9a51"
] | [
"cabot_ui/src/cabot_ui/geojson.py"
] | [
"# Copyright (c) 2020 Carnegie Mellon University\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\n\"\"\"\nMapService GeoJson mapper\n\nMapService: https://github.com/hulop/MapService\n\nAuthor: Daisuke Sato<[email protected]>\n\"\"\"\n# -*- coding: utf-8 -*-\nimport sys\nimport traceback\nimport copy\nimport math\nimport json\nimport scipy\nimport scipy.spatial\nimport numpy\nimport numpy.linalg\nimport rospy\nimport tf\nimport angles\nimport geometry_msgs.msg\nfrom cabot_ui import geoutil, i18n\n\nclass Geometry(object):\n \"\"\"Geometry class\"\"\"\n \n @classmethod\n def marshal(cls, dic):\n \"\"\"marshal Geometry subclasses object\"\"\"\n if 'type' in dic:\n if dic['type'] == \"Point\":\n cls = Point\n elif dic['type'] == \"LineString\":\n cls = LineString\n if cls == Geometry:\n return cls(**dic)\n return cls.marshal(dic)\n\n def __init__(self, **dic):\n s = super(Geometry, self)\n if self.__class__.mro()[-2] == s.__thisclass__:\n s.__init__()\n else:\n s.__init__(**dic)\n\n if 'coordinates' in dic:\n self.coordinates = dic['coordinates']\n if 'type' in dic:\n self.geometry_type = dic['type']\n\nclass Point(Geometry, geoutil.Latlng):\n \"\"\"Point class representing global point\"\"\"\n \n @classmethod\n def marshal(cls, dic):\n \"\"\"marshal Point object\"\"\"\n return cls(**dic)\n\n def __init__(self, **dic):\n c = dic['coordinates']\n super(Point, self).__init__(lat=c[1], lng=c[0], **dic)\n\nclass LineString(Geometry):\n \"\"\"Point class representing global line (start to end)\"\"\"\n @classmethod\n def marshal(cls, dic):\n \"\"\"marshal LineString object\"\"\"\n return cls(**dic)\n\n def __init__(self, **dic):\n super(LineString, self).__init__(**dic)\n self.start = geoutil.Latlng(lat=self.coordinates[0][1], lng=self.coordinates[0][0])\n self.end = geoutil.Latlng(lat=self.coordinates[1][1], lng=self.coordinates[1][0])\n\n def distance_to(self, point):\n if isinstance(point, Point):\n return self.nearest_point_on_line(point).distance_to(point)\n raise RuntimeError(\"Need to pass a Point object (%s)\"%(type(point))) \n\n def nearest_point_on_line(self, point):\n A = geoutil.latlng2mercator(self.start)\n B = geoutil.latlng2mercator(self.end)\n C = geoutil.latlng2mercator(point)\n \n # Distance between A and B\n distAB = math.sqrt(math.pow(A.x - B.x, 2) + math.pow(A.y - B.y, 2));\n \n # Direction vector from A to B\n vecABx = (B.x - A.x) / distAB;\n vecABy = (B.y - A.y) / distAB;\n \n # Time from A to C\n timeAC = max(0, min(distAB, vecABx * (C.x - A.x) + vecABy * (C.y - A.y)));\n \n # LatLng of the point\n x = timeAC * vecABx + A.x;\n y = timeAC * vecABy + A.y;\n \n return geoutil.mercator2latlng(geoutil.Point(x=x, y=y))\n\n\nclass Properties(object):\n @classmethod\n def marshal(cls, dic):\n \"\"\"marshal Properties object\"\"\"\n return cls(**dic)\n\n DEFAULT_VALUES = {\n \"hulop_building\": None,\n \"hulop_major_category\": None,\n \"hulop_sub_category\": None,\n \"hulop_minor_category\": None,\n \"hulop_heading\": 0,\n \"hulop_angle\": 180,\n \"hulop_height\": 0,\n \"hulop_long_description\": None,\n \"hulop_short_description\": None,\n \"hulop_description\": None,\n \"hulop_location_description\": None,\n \"hulop_content\": None,\n \"hulop_tags\": None,\n \"hulop_poi_external_category\": None,\n \"hulop_show_labels_zoomlevel\": None\n }\n \n def __getattr__(self, name):\n value = self.__dict__.get(name)\n if not value:\n if name in Properties.DEFAULT_VALUES:\n return Properties.DEFAULT_VALUES[name]\n\n raise AttributeError(\"%s.%s is invalid\"%(self.__class__.__name__, name))\n return value\n\n def __init__(self, **dic):\n for key in dic:\n try:\n setattr(self, key, dic[key])\n except:\n print(\"Cannot use unicode string for a property name: \\\"{}\\\"\".format(key.encode('utf8')))\n\n def __str__(self):\n return json.dumps(self.__dict__, sort_keys=True, indent=2)\n\n\nclass Object(object):\n \"\"\"Object class\"\"\"\n\n @classmethod\n def marshal_list(cls, objects):\n \"\"\"marshal list of Object subclasses objects\"\"\"\n temp = []\n for obj in objects:\n temp.append(cls.marshal(obj))\n return temp\n\n @classmethod\n def marshal_dict(cls, objects):\n \"\"\"marshal dict of Object subclasses objects\"\"\"\n temp = {}\n for key in objects.keys():\n temp[key] = cls.marshal(objects[key])\n return temp\n\n @classmethod\n def marshal(cls, dic):\n \"\"\"marshal Object subclasses object\"\"\"\n if 'node' in dic:\n cls = Landmark\n else:\n prop = dic['properties'] if 'properties' in dic else None\n if prop is not None:\n if 'node_id' in prop:\n cls = Node\n if 'link_id' in prop:\n cls = Link\n if 'facil_id' in prop:\n cls = Facility\n\n if cls == Object:\n return cls(**dic)\n return cls.marshal(dic)\n\n _id_map = {}\n _all_objects = []\n @staticmethod\n def get_object_by_id(_id, func=None):\n \"\"\"get object having id by callback function, it can be defered\"\"\"\n if _id in Object._id_map:\n if isinstance(Object._id_map[_id], list):\n Object._id_map[_id].append(func)\n else:\n if func is not None and callable(func):\n func(Object._id_map[_id])\n return None\n return Object._id_map[_id]\n else:\n Object._id_map[_id] = [func]\n return None\n\n @staticmethod\n def get_objects_by_type(_type):\n \"\"\"get objects of specified type\"\"\"\n temp = []\n for obj in Object._all_objects:\n if isinstance(obj, _type):\n temp.append(obj)\n return temp\n\n @staticmethod\n def get_all_objects():\n return Object._all_objects\n\n @staticmethod\n def _register(obj):\n \"\"\"store object with id and type\"\"\"\n # register with id\n _id = obj._id\n if _id in Object._id_map:\n if isinstance(Object._id_map[_id], list):\n for func in Object._id_map[_id]:\n if callable(func):\n func(obj)\n Object._id_map[_id] = obj\n Object._all_objects.append(obj)\n else:\n #raise RuntimeError(\"duplicate id\")\n pass\n else:\n Object._id_map[_id] = obj\n Object._all_objects.append(obj)\n\n @staticmethod\n def reset_all_objects():\n \"\"\"reset all state in the objects\"\"\"\n for obj in Object._all_objects:\n obj.reset()\n\n @staticmethod\n def _reset_link_index():\n Object._link_index = []\n Object._link_points = []\n Object._link_kdtree = None\n \n _link_index = []\n _link_points = []\n _link_kdtree = None\n @staticmethod\n def _build_link_index():\n for obj in Object.get_objects_by_type(Link):\n if obj.start_node and obj.end_node:\n sp = numpy.array([obj.start_node.local_geometry.x, obj.start_node.local_geometry.y])\n ep = numpy.array([obj.end_node.local_geometry.x, obj.end_node.local_geometry.y])\n Object._add_link_index(sp, ep, obj)\n if Object._link_points:\n Object._link_kdtree = scipy.spatial.KDTree(Object._link_points)\n\n @staticmethod\n def _add_link_index(sp, ep, obj):\n mp = (sp+ep)/2.0\n Object._link_points.append(mp)\n Object._link_index.append(obj)\n if numpy.linalg.norm(sp-ep) > 1:\n Object._add_link_index(sp, mp, obj)\n Object._add_link_index(mp, ep, obj)\n\n @staticmethod\n def get_nearest_link(node, exclude=None):\n point = node.local_geometry\n latlng = node.geometry\n _, index = Object._link_kdtree.query([point.x, point.y], 50)\n\n min_index = None\n min_dist = 1000\n for i in index:\n link = Object._link_index[i]\n if exclude is not None and exclude(link):\n continue\n \n dist = link.geometry.distance_to(latlng)\n if node.floor is not None:\n if link.start_node.floor != node.floor and \\\n link.end_node.floor != node.floor:\n dist += 1000\n if dist < min_dist:\n min_dist = dist\n min_index = i\n \n if min_index is None:\n return None\n return Object._link_index[min_index]\n\n @staticmethod\n def update_anchor_all(anchor):\n \"\"\"update anchor of all object\"\"\"\n Object._reset_link_index()\n for obj in Object._all_objects:\n obj.update_anchor(anchor)\n Object._build_link_index()\n\n\n def __init__(self, **dic):\n s = super(Object, self)\n if self.__class__.mro()[-2] == s.__thisclass__:\n s.__init__()\n else:\n s.__init__(**dic)\n \n if 'geometry' in dic:\n self.geometry = Geometry.marshal(dic['geometry'])\n if 'properties' in dic:\n self.properties = Properties.marshal(dic['properties'])\n if '_id' in dic:\n self._id = dic['_id']\n if 'no_registration' not in dic or not dic['no_registration']:\n Object._register(self)\n self.anchor = None\n self.local_geometry = None \n\n def __str__(self):\n ret = \"%s, (%s)\\n\" % (type(self), hex(id(self)))\n for key in self.__dict__:\n value = getattr(self, key)\n if isinstance(value, Object):\n ret += \"%s: %s<%s>\\n\"%(key, type(value), value._id)\n else:\n ret += \"%s: %s\\n\"%(key, str(value))\n \n import inspect\n for method in inspect.getmembers(type(self), predicate=lambda o: isinstance(o, property)):\n ret += \"%s: %s\\n\"%(method[0], method[1].__get__(self, type(self)))\n\n return ret\n\n def __repr__(self):\n return \"%s<%s>\"%(type(self), self._id)\n\n def update_anchor(self, anchor):\n self.anchor = anchor\n if anchor is not None:\n try:\n self.local_geometry = geoutil.global2local(self.geometry, anchor)\n except:\n print(\"Could not convert geometry: {}\".format(self.local_geometry))\n\n def distance_to(self, point):\n if isinstance(point, geoutil.Point):\n return self.local_geometry.distance_to(point)\n if isinstance(point, geoutil.Latlng):\n return self.geometry.distance_to(point)\n\n def reset(self):\n pass\n\nclass Link(Object):\n \"\"\"Link class\"\"\"\n ROUTE_TYPE_WALKWAY = 1\n ROUTE_TYPE_MOVING_WALKWAY = 2\n ROUTE_TYPE_RAILROAD_CROSSING = 3\n ROUTE_TYPE_ELEVATOR = 4\n ROUTE_TYPE_ESCALATOR = 5\n ROUTE_TYPE_STAIRS = 6\n ROUTE_TYPE_SLOPE = 7\n ROUTE_TYPE_UNKNOWN = 99\n\n @classmethod\n def marshal(cls, dic):\n \"\"\"marshal Link subclasses object\"\"\"\n if 'properties' in dic:\n prop = dic['properties']\n if 'sourceNode' in prop:\n cls = RouteLink\n if cls == Link:\n return cls(**dic)\n return cls.marshal(dic)\n\n def __init__(self, **dic):\n super(Link, self).__init__(**dic)\n self.start_node = None\n self.end_node = None\n self.pois = []\n self.floor = 0\n Object.get_object_by_id(self.properties.start_id, self._set_start_node)\n Object.get_object_by_id(self.properties.end_id, self._set_end_node)\n\n def _set_start_node(self, node):\n self.start_node = node\n self._update()\n\n def _set_end_node(self, node):\n self.end_node = node\n self._update()\n\n def _update(self):\n if self.start_node is not None and \\\n self.end_node is not None:\n self.floor = (self.start_node.floor + self.end_node.floor)/2.0\n\n @property\n def is_elevator(self):\n \"\"\"wheather this links is an elevator or not\"\"\"\n return self.properties.route_type == Link.ROUTE_TYPE_ELEVATOR\n\n @property\n def is_escalator(self):\n \"\"\"wheather this links is an escalator or not\"\"\"\n return self.properties.route_type == Link.ROUTE_TYPE_ESCALATOR\n\n @property\n def is_leaf(self):\n \"\"\"wheather this links is a leaf or not\"\"\"\n if self.start_node is None or self.end_node is None:\n return False\n return self.start_node.is_leaf or self.end_node.is_leaf\n\n @property\n def length(self):\n \"\"\"distance from start to end\"\"\"\n if self.start_node is None or self.end_node is None:\n return float('nan')\n return self.start_node.geometry.distance_to(self.end_node.geometry)\n\n def register_poi(self, poi):\n self.pois.append(poi)\n\n def update_anchor(self, anchor):\n self.anchor = anchor\n #TODO\n\nclass RouteLink(Link):\n \"\"\"Route Link class\"\"\"\n\n @classmethod\n def marshal(cls, dic):\n \"\"\"marshal Directed Link object\"\"\"\n return cls(**dic)\n\n def __init__(self, **dic):\n super(RouteLink, self).__init__(no_registration=True, **dic)\n self.source_node = None\n self.target_node = None\n Object.get_object_by_id(self.properties.sourceNode, self._set_source_node)\n Object.get_object_by_id(self.properties.targetNode, self._set_target_node)\n Object.get_object_by_id(self._id, self._found_link)\n\n def _set_source_node(self, node):\n self.source_node = node\n\n def _set_target_node(self, node):\n self.target_node = node\n\n def _found_link(self, link):\n self.pois = link.pois\n\n @property\n def is_temp(self):\n return self._id.startswith(\"_TEMP_LINK\")\n\n\nclass Node(Object):\n \"\"\"Node class\"\"\"\n\n @classmethod\n def marshal(cls, dic):\n \"\"\"marshal Node object\"\"\"\n return cls(**dic)\n\n def __init__(self, **dic):\n super(Node, self).__init__(**dic)\n self.links = []\n for i in range(1, 100):\n attr = \"link%d_id\"%(i)\n if hasattr(self.properties, attr):\n Object.get_object_by_id(getattr(self.properties, attr), self._add_link)\n\n if hasattr(self.properties, 'floor'):\n self.floor = self.properties.floor\n else:\n self.floor = 0\n\n self.facility = None\n Facility.get_facility_by_id(self._id, self._set_facility)\n\n def _add_link(self, link):\n self.links.append(link)\n\n def _set_facility(self, facility):\n self.facility = facility\n\n @property\n def is_leaf(self):\n \"\"\"wheather this node is the end of leaf link\"\"\"\n return len(self.links) == 1\n\n @property\n def is_elevator(self):\n \"\"\"wheather this node is connected to elevator link\"\"\"\n res = False\n for link in self.links:\n res = res or link.is_elevator\n return res\n\n\nclass Facility(Object):\n \"\"\"Facility class\"\"\"\n\n @classmethod\n def marshal(cls, dic):\n \"\"\"marshal Facility subclasses object\"\"\"\n if 'properties' in dic:\n prop = dic['properties']\n if 'hulop_major_category' in prop:\n category = prop['hulop_major_category']\n if category == '_nav_poi_':\n cls = POI\n if cls == Facility:\n return cls(**dic)\n return cls.marshal(dic)\n\n def __init__(self, **dic):\n super(Facility, self).__init__(**dic)\n self.entrances = []\n for i in range(1, 100):\n attr = \"ent%d_node\"%(i)\n if hasattr(self.properties, attr):\n Facility._id_map[getattr(self.properties, attr)] = self\n Object.get_object_by_id(getattr(self.properties, attr), self._add_facility)\n\n self.name = i18n.localized_attr(self.properties, \"name\")\n self.name_pron = i18n.localized_attr(self.properties, \"name_hira\", only_if=\"ja\") ## special case\n self.long_description = i18n.localized_attr(self.properties, \"hulop_long_description\")\n\n def _add_facility(self, node):\n self.entrances.append(node)\n\n _id_map = {}\n @staticmethod\n def get_facility_by_id(_id, func=None):\n \"\"\"get facility having id by callback function, it can be defered\"\"\"\n if _id in Facility._id_map:\n if isinstance(Facility._id_map[_id], list):\n Facility._id_map[_id].append(func)\n else:\n if func is not None and callable(func):\n func(Facility._id_map[_id])\n return None\n return Facility._id_map[_id]\n else:\n Facility._id_map[_id] = [func]\n return None\n\nclass POI(Facility, geoutil.TargetPlace):\n \"\"\"POI class\"\"\"\n\n @classmethod\n def marshal(cls, dic):\n \"\"\"marshal POI object\"\"\"\n if 'properties' in dic:\n prop = dic['properties']\n if 'hulop_sub_category' in prop:\n category = prop['hulop_sub_category']\n if category == '_nav_door_':\n cls = DoorPOI\n if category == '_nav_info_':\n cls = InfoPOI\n if category == '_cabot_speed_':\n cls = SpeedPOI\n if category == '_nav_elevator_cab_':\n cls = ElevatorCabPOI\n if category == '_nav_queue_wait_':\n cls = QueueWaitPOI\n if category == '_nav_queue_target_':\n cls = QueueTargetPOI\n\n if cls == POI:\n return cls(**dic)\n return cls.marshal(dic)\n\n def __init__(self, **dic):\n if 'properties' in dic:\n prop = dic['properties']\n get_prop = lambda prop, key: prop[key] if key in prop else Properties.DEFAULT_VALUES[key]\n r = (-get_prop(prop, 'hulop_heading') + 90) / 180.0 * math.pi\n angle = get_prop(prop, 'hulop_angle')\n self.floor = get_prop(prop, 'hulop_height')\n\n super(POI, self).__init__(r=r, x=0, y=0, angle=angle, floor=self.floor, **dic)\n\n self.sub_category = self.properties.hulop_sub_category \\\n if hasattr(self.properties, 'hulop_sub_category') else \"\"\n self.minor_category = self.properties.hulop_minor_category \\\n if hasattr(self.properties, 'hulop_minor_category') else \"\"\n\n #backward compatibility\n self.local_pose = self\n\n def approaching_statement(self):\n return None\n\n def approached_statement(self):\n return None\n\n def passed_statement(self):\n return None\n\n def update_anchor(self, anchor):\n super(POI, self).update_anchor(anchor) \n if anchor is not None:\n rad = (-self.properties.hulop_heading + 90 + anchor.rotate) / 180.0 * math.pi\n self.update_pose(self.local_geometry, rad)\n\n def reset(self):\n self.reset_target()\n\nclass DoorPOI(POI):\n \"\"\"POI class\"\"\"\n\n @classmethod\n def marshal(cls, dic):\n \"\"\"marshal Door POI object\"\"\"\n return cls(**dic)\n\n def __init__(self, **dic):\n super(DoorPOI, self).__init__(**dic)\n\n @property\n def title(self):\n if self.is_auto:\n return i18n.localized_string(\"AUTO_DOOR\")\n else:\n return i18n.localized_string(\"DOOR\")\n\n @property\n def is_auto(self):\n \"\"\"wheather this is auto door or not\"\"\"\n return self.minor_category is not None and \\\n '_flag_auto_' in self.minor_category\n\n def approaching_statement(self):\n return i18n.localized_string(\"DOOR_POI_APPROACHING\", self.title) \n\nclass InfoPOI(POI):\n \"\"\"Nav Info POI class\"\"\"\n\n @classmethod\n def marshal(cls, dic):\n \"\"\"marshal Info POI object\"\"\"\n return cls(**dic)\n\n def __init__(self, **dic):\n super(InfoPOI, self).__init__(**dic)\n\n def approached_statement(self):\n return self.name\n\nclass SpeedPOI(POI):\n \"\"\"Cabot Speed POI class\"\"\"\n\n @classmethod\n def marshal(cls, dic):\n \"\"\"marshal Speed POI object\"\"\"\n return cls(**dic)\n\n def __init__(self, **dic):\n super(SpeedPOI, self).__init__(**dic)\n self.limit = float(self.properties.hulop_content)\n\nclass ElevatorCabPOI(POI):\n \"\"\"Elevator Cab POI class\"\"\"\n\n @classmethod\n def marshal(cls, dic):\n \"\"\"marshal Elevator Cab POI object\"\"\"\n return cls(**dic)\n\n def __init__(self, **dic):\n super(ElevatorCabPOI, self).__init__(**dic)\n self.set_back = (3.0, 0.0)\n self.set_forward = (3.0, 0.0)\n self.door = (1.0, 0.0)\n if self.properties.hulop_content:\n try:\n hulop_content_json = json.loads(self.properties.hulop_content)\n if \"set_back\" in hulop_content_json:\n self.set_back = hulop_content_json[\"set_back\"]\n if \"set_forward\" in hulop_content_json:\n self.set_forward = hulop_content_json[\"set_forward\"]\n if \"door\" in hulop_content_json:\n self.door = hulop_content_json[\"door\"]\n if \"buttons\" in hulop_content_json:\n self.buttons = hulop_content_json[\"buttons\"]\n except:\n traceback.print_exc(file=sys.std_out)\n\n @property\n def door_geometry(self):\n x = self.x + math.cos(self.r) * self.door[0] - math.sin(self.r) * self.door[1]\n y = self.y + math.sin(self.r) * self.door[0] + math.cos(self.r) * self.door[1]\n return geoutil.Point(x=x, y=y)\n\n def where_is_buttons(self, pose):\n x = self.x + math.cos(self.r) * self.buttons[0] - math.sin(self.r) * self.buttons[1]\n y = self.y + math.sin(self.r) * self.buttons[0] + math.cos(self.r) * self.buttons[1]\n\n b_pos = geoutil.Point(x=x,y=y)\n b_pose = geoutil.Pose.pose_from_points(b_pos, pose)\n dir = angles.shortest_angular_distance(pose.r, b_pose.r)\n\n print(pose, b_pos, b_pose, dir)\n\n if abs(dir) > math.pi / 3 * 2:\n return \"BACK\"\n elif abs(dir) > math.pi / 3:\n if dir > 0:\n return \"LEFT\"\n elif dir < 0:\n return \"RIGHT\"\n elif abs(dir) < math.pi / 10:\n return \"FRONT\"\n elif dir > 0:\n return \"FRONT_LEFT\"\n elif dir < 0:\n return \"FRONT_RIGHT\"\n\n rospy.logerror(\"should not happen\")\n return None\n\nclass QueueWaitPOI(POI):\n \"\"\"Queue Wait POI class\"\"\"\n\n @classmethod\n def marshal(cls, dic):\n \"\"\"marshal Queue TaWaitrget POI object\"\"\"\n return cls(**dic)\n\n def __init__(self, **dic):\n super(QueueWaitPOI, self).__init__(**dic)\n self.interval = 1.0\n hulop_content_json = json.loads(self.properties.hulop_content)\n if \"interval\" in hulop_content_json:\n self.interval = float(hulop_content_json[\"interval\"])\n self.is_copied = False\n self.link_orientation = None\n\n# def approached_statement(self):\n# return \"queue wait point\"\n\n def register_link(self, link):\n end_pose = geoutil.Pose.pose_from_points(link.end_node.local_geometry, link.start_node.local_geometry)\n quat = tf.transformations.quaternion_from_euler(0, 0, end_pose.r)\n\n self.link_orientation = geometry_msgs.msg.Quaternion()\n self.link_orientation.x = quat[0]\n self.link_orientation.y = quat[1]\n self.link_orientation.z = quat[2]\n self.link_orientation.w = quat[3]\n\n def copy_to_link(self, link, local_geometry_x, local_geometry_y):\n copied_poi = copy.deepcopy(self)\n copied_poi.x = local_geometry_x\n copied_poi.y = local_geometry_y\n copied_poi.local_geometry.x = local_geometry_x\n copied_poi.local_geometry.y = local_geometry_y\n copied_poi.geometry = geoutil.local2global(copied_poi.local_geometry, copied_poi.anchor)\n \n link.register_poi(copied_poi)\n copied_poi.register_link(link)\n self.is_copied = True\n return copied_poi\n\nclass QueueTargetPOI(POI):\n \"\"\"Queue Target POI class\"\"\"\n\n @classmethod\n def marshal(cls, dic):\n \"\"\"marshal Queue Target POI object\"\"\"\n return cls(**dic)\n\n def __init__(self, **dic):\n super(QueueTargetPOI, self).__init__(**dic)\n self.enter_node = None\n self.exit_node = None\n hulop_content_json = json.loads(self.properties.hulop_content)\n Object.get_object_by_id(hulop_content_json[\"enter\"], self._set_enter_node)\n Object.get_object_by_id(hulop_content_json[\"exit\"], self._set_exit_node)\n\n def _set_enter_node(self, node):\n self.enter_node = node\n\n def _set_exit_node(self, node):\n self.exit_node = node\n\nclass Landmark(Facility):\n \"\"\"Landmark class\"\"\"\n\n @classmethod\n def marshal(cls, dic):\n \"\"\"marshal Landmark object\"\"\"\n return cls(**dic)\n\n\n def __init__(self, **dic):\n self._id = dic['node']+\"_landmark\"\n super(Landmark, self).__init__(**dic)\n"
] | [
[
"numpy.array",
"numpy.linalg.norm",
"scipy.spatial.KDTree"
]
] |
Corentin-LF/pyGPs | [
"b9d36777584cd53756bd4311c3c20ea52e945451"
] | [
"pyGPs/Core/gp.py"
] | [
"from __future__ import division\nfrom __future__ import absolute_import\nfrom builtins import str\nfrom builtins import range\nfrom builtins import object\nfrom past.utils import old_div\n#================================================================================\n# Marion Neumann [marion dot neumann at uni-bonn dot de]\n# Daniel Marthaler [dan dot marthaler at gmail dot com]\n# Shan Huang [shan dot huang at iais dot fraunhofer dot de]\n# Kristian Kersting [kristian dot kersting at cs dot tu-dortmund dot de]\n#\n# This file is part of pyGPs.\n# The software package is released under the BSD 2-Clause (FreeBSD) License.\n#\n# Copyright (c) by\n# Marion Neumann, Daniel Marthaler, Shan Huang & Kristian Kersting, 18/02/2014\n#================================================================================\n\n# MEANING OF NOTATION:\n#\n# inffunc function specifying the inference method\n# covfunc prior covariance function (see below)\n# meanfunc prior mean function\n# likfunc likelihood function\n# x n by D matrix of training inputs\n# y column vector of length n of training targets\n# xs n by D matrix of test inputs\n# ys column vector of length nn of true test targets (optional)\n# nlZ returned value of the negative log marginal likelihood\n# dnlZ column vector of partial derivatives of the negative\n# log marginal likelihood w.r.t. each hyperparameter\n# ym column vector (of length ns) of predictive output means\n# ys2 column vector (of length ns) of predictive output variances\n# fm column vector (of length ns) of predictive latent means\n# fs2 column vector (of length ns) of predictive latent variances\n# lp column vector (of length ns) of log predictive probabilities\n# post struct representation of the (approximate) posterior\n# post consists of post.alpha, post.L, post.sW\n#\n# This is a object-oriented python implementation of gpml functionality\n# (Copyright (c) by Carl Edward Rasmussen and Hannes Nickisch, 2011-02-18).\n# based on the functional-version of python implementation\n# (Copyright (c) by Marion Neumann and Daniel Marthaler, 20/05/2013)\n#\n# Copyright (c) by Marion Neumann and Shan Huang, 30/09/2013\n\nimport itertools\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom . import inf, mean, lik, cov, opt\nfrom .tools import unique, jitchol, solve_chol\nfrom copy import deepcopy\nimport pyGPs\nfrom pyGPs.Core.cov import FITCOfKernel\nimport logging\n\nSHADEDCOLOR = [0.7539, 0.89453125, 0.62890625, 1.0]\nMEANCOLOR = [ 0.2109375, 0.63385, 0.1796875, 1.0]\nDATACOLOR = [0.12109375, 0.46875, 1., 1.0]\n\nclass GP(object):\n '''\n Base class for GP model.\n '''\n def __init__(self):\n super(GP, self).__init__()\n self.usingDefaultMean = True # was using default mean function now?\n self.meanfunc = None # mean function\n self.covfunc = None # covariance function\n self.likfunc = None # likelihood function\n self.inffunc = None # inference function\n self.optimizer = None # optimizer object\n self.nlZ = None # negative log marginal likelihood\n self.dnlZ = None # column vector of partial derivatives of the negative\n # log marginal likelihood w.r.t. each hyperparameter\n self.posterior = None # struct representation of the (approximate) posterior\n self.x = None # n by D matrix of training inputs\n self.y = None # column vector of length n of training targets\n self.xs = None # n by D matrix of test inputs\n self.ys = None # column vector of length nn of true test targets (optional)\n self.ym = None # column vector (of length ns) of predictive output means\n self.ys2 = None # column vector (of length ns) of predictive output variances\n self.fm = None # column vector (of length ns) of predictive latent means\n self.fs2 = None # column vector (of length ns) of predictive latent variances\n self.lp = None # column vector (of length ns) of log predictive probabilities\n\n self.logger = logging.getLogger(__name__)\n\n\n\n def __str__(self):\n strvalue = 'To get the properties of the model use:\\n'+\\\n 'model.nlZ # negative log marginal likelihood\\n'+\\\n 'model.dnlZ.cov # derivatives of cov func of negative log marginal likelihood\\n'+\\\n 'model.dnlZ.lik # derivatives of lik func of negative log marginal likelihood\\n'+\\\n 'model.dnlZ.mean # derivatives of mean func of negative log marginal likelihood\\n'+\\\n 'model.posterior # posterior structure\\n'+\\\n 'model.covfunc.hyp # hyperparameters of cov func\\n'+\\\n 'model.meanfunc.hyp # hyperparameters of mean func\\n'+\\\n 'model.likfunc.hyp # hyperparameters of lik func\\n'+\\\n 'model.fm # latent mean\\n'+\\\n 'model.fs2 # latent variance\\n'+\\\n 'model.ym # predictive mean\\n'+\\\n 'model.ys2 # predictive variance\\n'+\\\n 'model.lp # log predictive probability'\n return strvalue\n\n\n\n def __repr__(self):\n strvalue = str(type(self))+': '+\\\n 'to get the properties of the model use:\\n'+\\\n 'model.nlZ # negative log marginal likelihood\\n'+\\\n 'model.dnlZ.cov # derivatives of cov func of negative log marginal likelihood\\n'+\\\n 'model.dnlZ.lik # derivatives of lik func of negative log marginal likelihood\\n'+\\\n 'model.dnlZ.mean # derivatives of mean func of negative log marginal likelihood\\n'+\\\n 'model.posterior # posterior structure\\n'+\\\n 'model.covfunc.hyp # hyperparameters of cov func\\n'+\\\n 'model.meanfunc.hyp # hyperparameters of mean func\\n'+\\\n 'model.likfunc.hyp # hyperparameters of lik func\\n'+\\\n 'model.fm # latent mean\\n'+\\\n 'model.fs2 # latent variance\\n'+\\\n 'model.ym # predictive mean\\n'+\\\n 'model.ys2 # predictive variance\\n'+\\\n 'model.lp # log predictive probability'\n return strvalue\n\n\n\n\n def setData(self, x, y):\n '''\n Set training inputs and traning labels to model.\n\n :param x: training inputs in shape (n,D)\n :param y: training labels in shape (n,1)\n\n Note this method will transform x, y to correct shape\n if x, y is given in 1d array.\n '''\n # check wether the number of inputs and labels match\n assert x.shape[0] == y.shape[0], \"number of inputs and labels does not match\"\n\n # check the shape of inputs\n # transform to the correct shape\n if x.ndim == 1:\n x = np.reshape(x, (x.shape[0],1))\n if y.ndim == 1:\n y = np.reshape(y, (y.shape[0],1))\n\n self.x = x\n self.y = y\n if self.usingDefaultMean:\n c = np.mean(y)\n self.meanfunc = mean.Const(c) # adapt default prior mean wrt. training labels\n\n\n\n def plotData_1d(self, axisvals=None):\n '''\n Toy Method for ploting 1d data of the model.\n\n :param list axisvals: [min_x, max_x, min_y, max_y] setting the plot range\n '''\n plt.figure()\n plt.plot(self.x, self.y, ls='None', marker='+', color=DATACOLOR, ms=12, mew=2)\n if axisvals:\n plt.axis(axisvals)\n plt.grid()\n plt.xlabel('input x')\n plt.ylabel('target y')\n plt.show()\n\n\n\n def plotData_2d(self,x1,x2,t1,t2,p1,p2,axisvals=None):\n '''\n Toy Method for ploting 2d data of the model. \\n\n For plotting, we superimpose the data points with the posterior equi-probability contour\n lines for the probability of class two given complete information about the generating mechanism.\n\n :param x1: inputs for class +1\n :param x2: inputs for class -1\n :param t1: meshgrid array for the first axis\n :param t2: meshgrid array for the second axis\n :param p1,p2: contour lines contains p2/(p1+p2)\n :param list axisvals: [min_x, max_x, min_y, max_y] setting the plot range\n\n That is to say, the contour is ploted by plt.contour(t1, t2, p2/(p1+p2) )\n Note these parameters are (only) used for our hard-coded data for classification demo.\n '''\n fig = plt.figure()\n plt.plot(x1[:,0], x1[:,1], 'b+', markersize = 12)\n plt.plot(x2[:,0], x2[:,1], 'r+', markersize = 12)\n pc = plt.contour(t1, t2, np.reshape(old_div(p2,(p1+p2)), (t1.shape[0],t1.shape[1]) ))\n fig.colorbar(pc)\n plt.grid()\n if axisvals:\n plt.axis(axisvals)\n plt.show()\n\n\n\n def setPrior(self, mean=None, kernel=None):\n '''\n Set prior mean and covariance other than the default setting of current model.\n\n :param mean: instance of mean class. (e.g. mean.Linear())\n :param kernel: instance of covariance class. (e.g. cov.RBF())\n '''\n # check the type of inputs\n # ensure they are the right class before setting prior\n if not mean is None:\n assert isinstance(mean, pyGPs.mean.Mean), \"mean function is not an instance of pyGPs.mean.Mean\"\n self.meanfunc = mean\n self.usingDefaultMean = False\n if not kernel is None:\n assert isinstance(kernel, pyGPs.cov.Kernel), \"cov function is not an instance of pyGPs.cov.Kernel\"\n self.covfunc = kernel\n if type(kernel) is cov.Pre:\n self.usingDefaultMean = False\n\n\n\n def setOptimizer(self, method, num_restarts=None, min_threshold=None, meanRange=None, covRange=None, likRange=None):\n '''\n This method is used to sepecify optimization configuration. By default, gp uses a single run \"minimize\".\n\n :param method: Optimization methods. Possible values are:\\n\n \"Minimize\" -> minimize by Carl Rasmussen (python implementation of \"minimize\" in GPML)\\n\n \"CG\" -> conjugent gradient\\n\n \"BFGS\" -> quasi-Newton method of Broyden, Fletcher, Goldfarb, and Shanno (BFGS)\\n\n \"SCG\" -> scaled conjugent gradient (faster than CG)\\n\n :param num_restarts: Set if you want to run mulitiple times of optimization with different initial guess.\n It specifys the maximum number of runs/restarts/trials.\n :param min_threshold: Set if you want to run mulitiple times of optimization with different initial guess.\n It specifys the threshold of objective function value. Stop optimization when this value is reached.\n :param meanRange: The range of initial guess for mean hyperparameters.\n e.g. meanRange = [(-2,2), (-5,5), (0,1)].\n Each tuple specifys the range (low, high) of this hyperparameter,\n This is only the range of initial guess, during optimization process, optimal hyperparameters may go out of this range.\n (-5,5) for each hyperparameter by default.\n :param covRange: The range of initial guess for kernel hyperparameters. Usage see meanRange\n :param likRange: The range of initial guess for likelihood hyperparameters. Usage see meanRange\n '''\n pass\n\n\n\n def optimize40(self, x=None, y=None, numIterations=40):\n '''\n Train optimal hyperparameters based on training data,\n adjust new hyperparameters to all mean/cov/lik functions.\n\n :param x: training inputs in shape (n,D)\n :param y: training labels in shape (n,1)\n '''\n # check wether the number of inputs and labels match\n if x is not None and y is not None:\n assert x.shape[0] == y.shape[0], \"number of inputs and labels does not match\"\n\n # check the shape of inputs\n # transform to the correct shape\n if not x is None:\n if x.ndim == 1:\n x = np.reshape(x, (x.shape[0],1))\n self.x = x\n\n if not y is None:\n if y.ndim == 1:\n y = np.reshape(y, (y.shape[0],1))\n self.y = y\n\n if self.usingDefaultMean and self.meanfunc is None:\n c = np.mean(y)\n self.meanfunc = mean.Const(c) # adapt default prior mean wrt. training labels\n\n # optimize\n optimalHyp, optimalNlZ = self.optimizer.findMin(self.x, self.y, numIters = numIterations)\n self.nlZ = optimalNlZ\n\n # apply optimal hyp to all mean/cov/lik functions here\n self.optimizer._apply_in_objects(optimalHyp)\n self.getPosterior()\n\n \n \n def optimize(self, x=None, y=None, numIterations=1000):\n '''\n Train optimal hyperparameters based on training data,\n adjust new hyperparameters to all mean/cov/lik functions.\n\n :param x: training inputs in shape (n,D)\n :param y: training labels in shape (n,1)\n '''\n # check wether the number of inputs and labels match\n if x is not None and y is not None:\n assert x.shape[0] == y.shape[0], \"number of inputs and labels does not match\"\n\n # check the shape of inputs\n # transform to the correct shape\n if not x is None:\n if x.ndim == 1:\n x = np.reshape(x, (x.shape[0],1))\n self.x = x\n\n if not y is None:\n if y.ndim == 1:\n y = np.reshape(y, (y.shape[0],1))\n self.y = y\n\n if self.usingDefaultMean and self.meanfunc is None:\n c = np.mean(y)\n self.meanfunc = mean.Const(c) # adapt default prior mean wrt. training labels\n\n # optimize\n optimalHyp, optimalNlZ = self.optimizer.findMin(self.x, self.y, numIters = numIterations)\n self.nlZ = optimalNlZ\n\n # apply optimal hyp to all mean/cov/lik functions here\n self.optimizer._apply_in_objects(optimalHyp)\n self.getPosterior()\n\n\n def getPosterior(self, x=None, y=None, der=True):\n '''\n Fit the training data. Update negative log marginal likelihood(nlZ),\n partial derivatives of nlZ w.r.t. each hyperparameter(dnlZ),\n and struct representation of the (approximate) posterior(post),\n which consists of post.alpha, post.L, post.sW.\n\n nlZ, dnlZ, post = getPosterior(x, y, der=True)\\n\n nlZ, post = getPosterior(x, y, der=False )\n\n :param x: training inputs in shape (n,D)\n :param y: training labels in shape (n,1)\n :param boolean der: flag for whether to compute derivatives\n\n :return: negative log marginal likelihood (nlZ), derivatives of nlZ (dnlZ), posterior structure(post)\n\n You can print post to see descriptions of posterior.\n or see pyGPs.Core.inf for details.\n '''\n\n # check wether the number of inputs and labels match\n if x is not None and y is not None:\n assert x.shape[0] == y.shape[0], \"number of inputs and labels does not match\"\n\n # check the shape of inputs\n # transform to the correct shape\n if not x is None:\n if x.ndim == 1:\n x = np.reshape(x, (x.shape[0],1))\n self.x = x\n\n if not y is None:\n if y.ndim == 1:\n y = np.reshape(y, (y.shape[0],1))\n self.y = y\n\n if self.usingDefaultMean and self.meanfunc is None:\n c = np.mean(y)\n self.meanfunc = mean.Const(c) # adapt default prior mean wrt. training labels\n\n # call inference method\n if isinstance(self.likfunc, lik.Erf): #or is instance(self.likfunc, lik.Logistic):\n uy = unique(self.y)\n ind = ( uy != 1 )\n if any( uy[ind] != -1):\n raise Exception('You attempt classification using labels different from {+1,-1}')\n if not der:\n post, nlZ = self.inffunc.evaluate(self.meanfunc, self.covfunc, self.likfunc, self.x, self.y, 2)\n self.nlZ = nlZ\n self.posterior = deepcopy(post)\n return nlZ, post\n else:\n post, nlZ, dnlZ = self.inffunc.evaluate(self.meanfunc, self.covfunc, self.likfunc, self.x, self.y, 3)\n self.nlZ = nlZ\n self.dnlZ = deepcopy(dnlZ)\n self.posterior = deepcopy(post)\n return nlZ, dnlZ, post\n\n\n\n def predict(self, xs, ys=None):\n '''\n Prediction of test points (given by xs) based on training data of the current model.\n This method will output the following value:\\n\n predictive output means(ym),\\n\n predictive output variances(ys2),\\n\n predictive latent means(fm),\\n\n predictive latent variances(fs2),\\n\n log predictive probabilities(lp).\\n\n Theses values can also be achieved from model's property. (e.g. model.ym)\n\n :param xs: test input in shape of nn by D\n :param ys: test target(optional) in shape of nn by 1 if given\n\n :return: ym, ys2, fm, fs2, lp\n '''\n # check the shape of inputs\n # transform to correct shape if neccessary\n if xs.ndim == 1:\n xs = np.reshape(xs, (xs.shape[0],1))\n self.xs = xs\n if not ys is None:\n if ys.ndim == 1:\n ys = np.reshape(ys, (ys.shape[0],1))\n self.ys = ys\n\n meanfunc = self.meanfunc\n covfunc = self.covfunc\n likfunc = self.likfunc\n inffunc = self.inffunc\n x = self.x\n y = self.y\n\n if self.posterior is None:\n self.getPosterior()\n alpha = self.posterior.alpha\n L = self.posterior.L\n sW = self.posterior.sW\n\n nz = list(range(len(alpha[:,0]))) # non-sparse representation\n if len(L) == 0: # in case L is not provided, we compute it\n K = covfunc.getCovMatrix(x=x[nz,:], mode='train')\n #L = np.linalg.cholesky( (np.eye(nz) + np.dot(sW,sW.T)*K).T )\n L = jitchol( (np.eye(len(nz)) + np.dot(sW,sW.T)*K).T )\n Ltril = np.all( np.tril(L,-1) == 0 ) # is L an upper triangular matrix?\n ns = xs.shape[0] # number of data points\n nperbatch = 1000 # number of data points per mini batch\n nact = 0 # number of already processed test data points\n ymu = np.zeros((ns,1))\n ys2 = np.zeros((ns,1))\n fmu = np.zeros((ns,1))\n fs2 = np.zeros((ns,1))\n lp = np.zeros((ns,1))\n while nact<=ns-1: # process minibatches of test cases to save memory\n ids = list(range(nact,min(nact+nperbatch,ns))) # data points to process\n kss = covfunc.getCovMatrix(z=xs[ids,:], mode='self_test') # self-variances\n if isinstance(covfunc, FITCOfKernel):\n Ks = covfunc.getCovMatrix(x=x, z=xs[ids,:], mode='cross') # cross-covariances\n Ks = Ks[nz,:]\n else:\n Ks = covfunc.getCovMatrix(x=x[nz,:], z=xs[ids,:], mode='cross') # cross-covariances\n ms = meanfunc.getMean(xs[ids,:])\n N = (alpha.shape)[1] # number of alphas (usually 1; more in case of sampling)\n Fmu = np.tile(ms,(1,N)) + np.dot(Ks.T,alpha[nz]) # conditional mean fs|f\n fmu[ids] = np.reshape(old_div(Fmu.sum(axis=1),N),(len(ids),1)) # predictive means\n if Ltril: # L is triangular => use Cholesky parameters (alpha,sW,L)\n V = np.linalg.solve(L.T,np.tile(sW,(1,len(ids)))*Ks)\n fs2[ids] = kss - np.array([(V*V).sum(axis=0)]).T # predictive variances\n else: # L is not triangular => use alternative parametrization\n fs2[ids] = kss + np.array([(Ks*np.dot(L,Ks)).sum(axis=0)]).T # predictive variances\n fs2[ids] = np.maximum(fs2[ids],0) # remove numerical noise i.e. negative variances\n Fs2 = np.tile(fs2[ids],(1,N)) # we have multiple values in case of sampling\n if ys is None:\n Lp, Ymu, Ys2 = likfunc.evaluate(None,Fmu[:],Fs2[:],None,None,3)\n else:\n Lp, Ymu, Ys2 = likfunc.evaluate(np.tile(ys[ids],(1,N)), Fmu[:], Fs2[:],None,None,3)\n lp[ids] = np.reshape( old_div(np.reshape(Lp,(np.prod(Lp.shape),N)).sum(axis=1),N) , (len(ids),1) ) # log probability; sample averaging\n ymu[ids] = np.reshape( old_div(np.reshape(Ymu,(np.prod(Ymu.shape),N)).sum(axis=1),N) ,(len(ids),1) ) # predictive mean ys|y and ... \n ys2[ids] = np.reshape( old_div(np.reshape(Ys2,(np.prod(Ys2.shape),N)).sum(axis=1),N) , (len(ids),1) ) # .. variance\n nact = ids[-1]+1 # set counter to index of next data point\n self.ym = ymu\n self.ys2 = ys2\n self.lp = lp\n self.fm = fmu\n self.fs2 = fs2\n if ys is None:\n return ymu, ys2, fmu, fs2, None\n else:\n return ymu, ys2, fmu, fs2, lp\n\n\n\n def predict_with_posterior(self, post, xs, ys=None):\n '''\n Prediction of test points (given by xs) based on training data\n of the current model with posterior already provided.\n (i.e. you already have the posterior and thus don't need the fitting phase.)\n This method will output the following value:\\n\n predictive output means(ym),\\n\n predictive output variances(ys2),\\n\n predictive latent means(fm),\\n\n predictive latent variances(fs2),\\n\n log predictive probabilities(lp).\\n\n Theses values can also be achieved from model's property. (e.g. model.ym)\n\n :param post: struct representation of posterior\n :param xs: test input\n :param ys: test target(optional)\n\n :return: ym, ys2, fm, fs2, lp\n '''\n # check the shape of inputs\n # transform to correct shape if neccessary\n if xs.ndim == 1:\n xs = np.reshape(xs, (xs.shape[0],1))\n self.xs = xs\n if not ys is None:\n if ys.ndim == 1:\n ys = np.reshape(ys, (ys.shape[0],1))\n self.ys = ys\n\n meanfunc = self.meanfunc\n covfunc = self.covfunc\n likfunc = self.likfunc\n inffunc = self.inffunc\n x = self.x\n y = self.y\n\n self.posterior = deepcopy(post)\n alpha = post.alpha\n L = post.L\n sW = post.sW\n\n nz = list(range(len(alpha[:,0]))) # non-sparse representation\n if len(L) == 0: # in case L is not provided, we compute it\n K = covfunc.getCovMatrix(x=x[nz,:], mode='train')\n #L = np.linalg.cholesky( (np.eye(nz) + np.dot(sW,sW.T)*K).T )\n L = jitchol( (np.eye(len(nz)) + np.dot(sW,sW.T)*K).T )\n Ltril = np.all( np.tril(L,-1) == 0 ) # is L an upper triangular matrix?\n ns = xs.shape[0] # number of data points\n nperbatch = 1000 # number of data points per mini batch\n nact = 0 # number of already processed test data points\n ymu = np.zeros((ns,1))\n ys2 = np.zeros((ns,1))\n fmu = np.zeros((ns,1))\n fs2 = np.zeros((ns,1))\n lp = np.zeros((ns,1))\n while nact<=ns-1: # process minibatches of test cases to save memory\n id = list(range(nact,min(nact+nperbatch,ns))) # data points to process\n kss = covfunc.getCovMatrix(z=xs[id,:], mode='self_test') # self-variances\n Ks = covfunc.getCovMatrix(x=x[nz,:], z=xs[id,:], mode='cross') # cross-covariances\n ms = meanfunc.getMean(xs[id,:])\n N = (alpha.shape)[1] # number of alphas (usually 1; more in case of sampling)\n Fmu = np.tile(ms,(1,N)) + np.dot(Ks.T,alpha[nz]) # conditional mean fs|f\n fmu[id] = np.reshape(old_div(Fmu.sum(axis=1),N),(len(id),1)) # predictive means\n if Ltril: # L is triangular => use Cholesky parameters (alpha,sW,L)\n V = np.linalg.solve(L.T,np.tile(sW,(1,len(id)))*Ks)\n fs2[id] = kss - np.array([(V*V).sum(axis=0)]).T # predictive variances\n else: # L is not triangular => use alternative parametrization\n fs2[id] = kss + np.array([(Ks*np.dot(L,Ks)).sum(axis=0)]).T # predictive variances\n fs2[id] = np.maximum(fs2[id],0) # remove numerical noise i.e. negative variances\n Fs2 = np.tile(fs2[id],(1,N)) # we have multiple values in case of sampling\n if ys is None:\n [Lp, Ymu, Ys2] = likfunc.evaluate(None,Fmu[:],Fs2[:],None,None,3)\n else:\n [Lp, Ymu, Ys2] = likfunc.evaluate(np.tile(ys[id],(1,N)), Fmu[:], Fs2[:],None,None,3)\n lp[id] = np.reshape( old_div(np.reshape(Lp,(np.prod(Lp.shape),N)).sum(axis=1),N) , (len(id),1) ) # log probability; sample averaging\n ymu[id] = np.reshape( old_div(np.reshape(Ymu,(np.prod(Ymu.shape),N)).sum(axis=1),N) ,(len(id),1) ) # predictive mean ys|y and ...\n ys2[id] = np.reshape( old_div(np.reshape(Ys2,(np.prod(Ys2.shape),N)).sum(axis=1),N) , (len(id),1) ) # .. variance\n nact = id[-1]+1 # set counter to index of next data point\n self.ym = ymu\n self.ys2 = ys2\n self.lp = lp\n self.fm = fmu\n self.fs2 = fs2\n if ys is None:\n return ymu, ys2, fmu, fs2, None\n else:\n return ymu, ys2, fmu, fs2, lp\n\n\n\n\n\nclass GPR(GP):\n '''\n Model for Gaussian Process Regression\n '''\n def __init__(self):\n super(GPR, self).__init__()\n self.meanfunc = mean.Zero() # default prior mean\n self.covfunc = cov.RBF() # default prior covariance\n self.likfunc = lik.Gauss() # likihood with default noise variance 0.1\n self.inffunc = inf.Exact() # inference method\n self.optimizer = opt.Minimize(self) # default optimizer\n\n\n\n def setNoise(self,log_sigma):\n '''\n Set noise other than default noise value\n\n :param log_sigma: logarithm of the noise sigma\n '''\n self.likfunc = lik.Gauss(log_sigma)\n\n\n def setOptimizer(self, method, num_restarts=None, min_threshold=None, meanRange=None, covRange=None, likRange=None):\n '''\n Overriding. Usage see base class pyGPs.gp.GP.setOptimizer\n '''\n conf = None\n if (num_restarts!=None) or (min_threshold!=None):\n conf = pyGPs.Optimization.conf.random_init_conf(self.meanfunc,self.covfunc,self.likfunc)\n conf.num_restarts = num_restarts\n conf.min_threshold = min_threshold\n if not meanRange is None:\n conf.meanRange = meanRange\n if not covRange is None:\n conf.covRange = covRange\n if not likRange is None:\n conf.likRange = likRange\n if method == \"Minimize\":\n self.optimizer = opt.Minimize(self,conf)\n elif method == \"SCG\":\n self.optimizer = opt.SCG(self,conf)\n elif method == \"CG\":\n self.optimizer = opt.CG(self,conf)\n elif method == \"BFGS\":\n self.optimizer = opt.BFGS(self,conf)\n elif method == \"Nelder-Mead\":\n self.optimizer = opt.Simplex(self, conf)\n else:\n raise Exception('Optimization method is not set correctly in setOptimizer')\n\n\n def plot(self,axisvals=None):\n '''\n Plot 1d GP regression result.\n\n :param list axisvals: [min_x, max_x, min_y, max_y] setting the plot range\n '''\n xs = self.xs # test point\n x = self.x\n y = self.y\n ym = self.ym # predictive test mean\n ys2 = self.ys2 # predictive test variance\n plt.figure()\n xss = np.reshape(xs,(xs.shape[0],))\n ymm = np.reshape(ym,(ym.shape[0],))\n ys22 = np.reshape(ys2,(ys2.shape[0],))\n plt.plot(x, y, color=DATACOLOR, ls='None', marker='+',ms=12, mew=2)\n plt.plot(xs, ym, color=MEANCOLOR, ls='-', lw=3.)\n plt.fill_between(xss,ymm + 2.*np.sqrt(ys22), ymm - 2.*np.sqrt(ys22), facecolor=SHADEDCOLOR,linewidths=0.0)\n plt.grid()\n if not axisvals is None:\n plt.axis(axisvals)\n plt.xlabel('input x')\n plt.ylabel('target y')\n plt.show()\n\n\n def useInference(self, newInf):\n '''\n Use another inference techinique other than default exact inference.\n\n :param str newInf: 'Laplace' or 'EP'\n '''\n if newInf == \"Laplace\":\n self.inffunc = inf.Laplace()\n elif newInf == \"EP\":\n self.inffunc = inf.EP()\n else:\n raise Exception('Possible inf values are \"Laplace\", \"EP\".')\n\n\n def useLikelihood(self,newLik):\n '''\n Use another likelihood function other than default Gaussian likelihood.\n\n :param str newLik: 'Laplace'\n '''\n if newLik == \"Laplace\":\n self.likfunc = lik.Laplace()\n self.inffunc = inf.EP()\n else:\n raise Exception('Possible lik values are \"Laplace\".')\n\n\n\n\n\nclass GPC(GP):\n '''\n Model for Gaussian Process Classification.\n '''\n def __init__(self):\n super(GPC, self).__init__()\n self.meanfunc = mean.Zero() # default prior mean\n self.covfunc = cov.RBF() # default prior covariance\n self.likfunc = lik.Erf() # erf likihood\n self.inffunc = inf.EP() # default inference method\n self.optimizer = opt.Minimize(self) # default optimizer\n\n\n\n def setOptimizer(self, method, num_restarts=None, min_threshold=None, meanRange=None, covRange=None, likRange=None):\n '''\n Overriding. Usage see base class pyGPs.gp.GP.setOptimizer\n '''\n conf = None\n if (num_restarts!=None) or (min_threshold!=None):\n conf = pyGPs.Optimization.conf.random_init_conf(self.meanfunc,self.covfunc,self.likfunc)\n conf.num_restarts = num_restarts\n conf.min_threshold = min_threshold\n if not meanRange is None:\n conf.meanRange = meanRange\n if not covRange is None:\n conf.covRange = covRange\n if not likRange is None:\n conf.likRange = likRange\n if method == \"Minimize\":\n self.optimizer = opt.Minimize(self,conf)\n elif method == \"SCG\":\n self.optimizer = opt.SCG(self,conf)\n elif method == \"CG\":\n self.optimizer = opt.CG(self,conf)\n elif method == \"BFGS\":\n self.optimizer = opt.BFGS(self,conf)\n\n\n\n def plot(self,x1,x2,t1,t2,axisvals=None):\n '''\n Plot 2d GP Classification result.\n\n For plotting, we superimpose the data points with the posterior equi-probability contour\n lines for the probability of class two given complete information about the generating mechanism.\n\n :param x1: inputs for class +1\n :param x2: inputs for class -1\n :param t1: meshgrid array for the first axis\n :param t2: meshgrid array for the second axis\n :param list axisvals: [min_x, max_x, min_y, max_y] setting the plot range\n\n Note these parameters are (only) used for our hard-coded data for classification demo.\n '''\n fig = plt.figure()\n plt.plot(x1[:,0], x1[:,1], 'b+', markersize = 12)\n plt.plot(x2[:,0], x2[:,1], 'r+', markersize = 12)\n pc = plt.contour(t1, t2, np.reshape(np.exp(self.lp), (t1.shape[0],t1.shape[1]) ))\n fig.colorbar(pc)\n plt.grid()\n if not axisvals is None:\n plt.axis(axisvals)\n plt.show()\n\n\n\n def useInference(self, newInf):\n '''\n Use another inference techinique other than default EP inference.\n\n :param str newInf: 'Laplace'\n '''\n if newInf == \"Laplace\":\n self.inffunc = inf.Laplace()\n else:\n raise Exception('Possible inf values are \"Laplace\".')\n\n\n\n def useLikelihood(self,newLik):\n '''\n Use another likelihood function other than default error function.\n (Not used in this version)\n\n :param str newLik: 'Logistic'\n '''\n if newLik == \"Logistic\":\n raise Exception(\"Logistic likelihood is currently not implemented.\")\n #self.likfunc = lik.Logistic()\n else:\n raise Exception('Possible lik values are \"Logistic\".')\n\n\n\n\n\nclass GPMC(object):\n '''\n This is a one vs. one classification wrapper for GP Classification\n '''\n def __init__(self, n_class):\n self.meanfunc = mean.Zero() # default prior mean\n self.covfunc = cov.RBF() # default prior covariance\n self.n_class = n_class # number of different classes\n self.x_all = None\n self.y_all = None\n self.newInf = None # new inference? -> call useInference\n self.newLik = None # new likelihood? -> call useLikelihood\n self.newPrior = False\n\n\n\n def setPrior(self, mean=None, kernel=None):\n '''\n Set prior mean and covariance other than the default setting of current model.\n\n :param mean: instance of mean class. (e.g. mean.Linear())\n :param kernel: instance of covariance class. (e.g. cov.RBF())\n '''\n # check the type of inputs\n # ensure they are the right class before setting prior\n if not mean is None:\n assert isinstance(mean, pyGPs.mean.Mean), \"mean function is not an instance of pyGPs.mean.Mean\"\n self.meanfunc = mean\n self.usingDefaultMean = False\n if not kernel is None:\n assert isinstance(kernel, pyGPs.cov.Kernel), \"cov function is not an instance of pyGPs.cov.Kernel\"\n self.covfunc = kernel\n if type(kernel) is cov.Pre:\n self.usingDefaultMean = False\n self.newPrior = True\n\n\n\n def useInference(self, newInf):\n '''\n Use another inference techinique other than default EP inference.\n\n :param str newInf: 'Laplace'\n '''\n if newInf == \"Laplace\":\n self.inffunc = inf.Laplace()\n else:\n raise Exception('Possible inf values are \"Laplace\".')\n\n\n\n def useLikelihood(self,newLik):\n '''\n Use another likelihood function other than default error function.\n (Not used in this version)\n\n :param str newLik: 'Logistic'\n '''\n if newLik == \"Logistic\":\n raise Exception(\"Logistic likelihood is currently not implemented.\")\n #self.likfunc = lik.Logistic()\n else:\n raise Exception('Possible lik values are \"Logistic\".')\n\n\n\n def setData(self,x,y):\n '''\n Set training inputs and traning labels to model.\n\n :param x: training inputs in shape (n,D)\n :param y: training labels in shape (n,1)\n\n Note this method will transform x, y to correct shape\n if x, y is given in 1d array.\n '''\n # check wether the number of inputs and labels match\n assert x.shape[0] == y.shape[0], \"number of inputs and labels does not match\"\n\n # check the shape of inputs\n # transform to the correct shape\n if x.ndim == 1:\n x = np.reshape(x, (x.shape[0],1))\n if y.ndim == 1:\n y = np.reshape(y, (y.shape[0],1))\n\n self.x_all = x\n self.y_all = y\n\n\n\n def fitAndPredict(self, xs):\n '''\n Fit the model with given training data and predict for test points (given by xs).\n predictive_vote is a matrix where row i is each test point i,\n and column j is the probability for being class j\n\n :param xs: test inputs in shape of nn by D\n :return: predictive_vote\n '''\n # check the shape of inputs\n if xs.ndim == 1:\n xs = np.reshape(xs, (xs.shape[0],1))\n\n predictive_vote = np.zeros((xs.shape[0],self.n_class))\n for i in range(self.n_class): # classifier for class i...\n for j in range(i+1,self.n_class): # ...and class j\n x,y = self.createBinaryClass(i,j)\n model = GPC()\n if self.newPrior:\n model.setPrior(mean=self.meanfunc, kernel=self.covfunc)\n if self.newInf:\n model.useInference(self.newInf)\n if self.newLik:\n model.useLikelihood(self.newLik)\n model.getPosterior(x,y) # fitting\n ym = model.predict(xs)[0]\n ym += 1 # now scale into 0 to 2, ym=0 is class j, ym=2 is class i\n vote_i = np.zeros((xs.shape[0],self.n_class))\n vote_j = np.zeros((xs.shape[0],self.n_class))\n vote_i[:,i:i+1] = ym\n vote_j[:,j:j+1] = 2-ym\n predictive_vote += vote_i\n predictive_vote += vote_j\n predictive_vote /= predictive_vote.sum(axis=1)[:,np.newaxis]\n return predictive_vote\n\n\n\n def optimizeAndPredict(self, xs):\n '''\n Optimize the model with given training data and predict for test points (given by xs).\n predictive_vote is a matrix where row i is each test point i,\n and column j is the probability for being class j\n\n :param xs: test inputs in shape of nn by D\n :return: predictive_vote\n '''\n # check the shape of inputs\n if xs.ndim == 1:\n xs = np.reshape(xs, (xs.shape[0],1))\n\n predictive_vote = np.zeros((xs.shape[0],self.n_class))\n for i in range(self.n_class): # classifier for class i...\n for j in range(i+1,self.n_class): # ...and class j\n x,y = self.createBinaryClass(i,j)\n model = GPC()\n if self.newPrior:\n model.setPrior(mean=self.meanfunc, kernel=self.covfunc)\n if self.newInf:\n model.useInference(self.newInf)\n if self.newLik:\n model.useLikelihood(self.newLik)\n model.optimize(x,y) # training\n ym = model.predict(xs)[0]\n ym += 1 # now scale into 0 to 2, ym=0 is class j, ym=2 is class i\n vote_i = np.zeros((xs.shape[0],self.n_class))\n vote_j = np.zeros((xs.shape[0],self.n_class))\n vote_i[:,i:i+1] = ym\n vote_j[:,j:j+1] = 2-ym\n predictive_vote += vote_i\n predictive_vote += vote_j\n predictive_vote /= predictive_vote.sum(axis=1)[:,np.newaxis]\n return predictive_vote\n\n\n\n def createBinaryClass(self, i,j):\n '''\n Create dataset x(data) and y(label) which only contains class i and j.\n Relabel class i to +1 and class j to -1\n\n :param int i: the i_th class\n :param int j: the j_th class\n :return: x(data) and y(label) which only contains class i and j\n\n '''\n class_i = []\n class_j = []\n for index in range(len(self.y_all)): # check all classes\n target = self.y_all[index]\n if target == i:\n class_i.append(index)\n elif target == j:\n class_j.append(index)\n n1 = len(class_i)\n n2 = len(class_j)\n class_i.extend(class_j)\n x = self.x_all[class_i,:]\n y = np.concatenate((np.ones((1,n1)),-np.ones((1,n2))),axis=1).T\n return x,y\n\n\n\n\n\nclass GP_FITC(GP):\n '''\n Model for FITC GP base class\n '''\n def __init__(self):\n super(GP_FITC, self).__init__()\n self.u = None # inducing points\n\n\n\n def setData(self, x, y, value_per_axis=5):\n '''\n Set training inputs and traning labels to model and derive deault inducing_points..\n\n :param x: training inputs in shape (n,D)\n :param y: training labels in shape (n,1)\n :param int value_per_axis: number of value in each dimension\n when using a uni-distant default inducing points\n\n Note this method will transform x, y to correct shape\n if x, y is given in 1d array.\n '''\n # check wether the number of inputs and labels match\n assert x.shape[0] == y.shape[0], \"number of inputs and labels does not match\"\n\n # check dimension of inputs\n # transform to correct shape if neccessary\n if x.ndim == 1:\n x = np.reshape(x, (x.shape[0],1))\n if y.ndim == 1:\n y = np.reshape(y, (y.shape[0],1))\n self.x = x\n self.y = y\n if self.usingDefaultMean:\n c = np.mean(y)\n self.meanfunc = mean.Const(c) # adapt default prior mean wrt. training labels\n\n # get range of x in each dimension\n # 5 uniformally selected value for each dimension\n gridAxis=[]\n for d in range(x.shape[1]):\n column = x[:,d]\n mini = np.min(column)\n maxi = np.max(column)\n axis = np.linspace(mini,maxi,value_per_axis)\n gridAxis.append(axis)\n # default inducing points-> a grid\n if self.u is None:\n self.u = np.array(list(itertools.product(*gridAxis)))\n self.covfunc = self.covfunc.fitc(self.u)\n\n\n\n def setPrior(self, mean=None, kernel=None, inducing_points=None):\n '''\n Set prior mean and covariance other than the default setting of current model,\n as well as the inducing points\n\n :param mean: instance of mean class. (e.g. mean.Linear())\n :param kernel: instance of covariance class. (e.g. cov.RBF())\n :inducing_points: matrix of inducing points in shape of (nu,D)\n '''\n if not kernel is None:\n if not inducing_points is None:\n self.covfunc = kernel.fitc(inducing_points)\n self.u = inducing_points\n else:\n if not self.u is None:\n self.covfunc = kernel.fitc(self.u)\n else:\n raise Exception(\"To use default inducing points, please call setData() first!\")\n if type(kernel) is cov.Pre:\n self.usingDefaultMean = False\n if not mean is None:\n self.meanfunc = mean\n self.usingDefaultMean = False\n\n\n\n\n\nclass GPR_FITC(GP_FITC):\n '''\n Model for Gaussian Process Regression FITC\n '''\n def __init__(self):\n super(GPR_FITC, self).__init__()\n self.meanfunc = mean.Zero() # default prior mean\n self.covfunc = cov.RBF() # default prior covariance\n self.likfunc = lik.Gauss() # likihood with default noise variance 0.1\n self.inffunc = inf.FITC_Exact() # inference method\n self.optimizer = opt.Minimize(self) # default optimizer\n self.u = None # no default inducing points\n\n\n\n def setNoise(self,log_sigma):\n '''\n Set noise other than default noise value\n\n :param log_sigma: logarithm of the noise sigma\n '''\n self.likfunc = lik.Gauss(log_sigma)\n\n\n\n def setOptimizer(self, method, num_restarts=None, min_threshold=None, meanRange=None, covRange=None, likRange=None):\n '''\n Overriding. Usage see base class pyGPs.gp.GP.setOptimizer\n '''\n conf = None\n if (num_restarts!=None) or (min_threshold!=None):\n conf = pyGPs.Optimization.conf.random_init_conf(self.meanfunc,self.covfunc,self.likfunc)\n conf.num_restarts = num_restarts\n conf.min_threshold = min_threshold\n if not meanRange is None:\n conf.meanRange = meanRange\n if not covRange is None:\n conf.covRange = covRange\n if not likRange is None:\n conf.likRange = likRange\n if method == \"Minimize\":\n self.optimizer = opt.Minimize(self,conf)\n elif method == \"SCG\":\n self.optimizer = opt.SCG(self,conf)\n elif method == \"CG\":\n self.optimizer = opt.CG(self,conf)\n elif method == \"BFGS\":\n self.optimizer = opt.BFGS(self,conf)\n\n\n\n def plot(self,axisvals=None):\n '''\n Plot 1d GP FITC Regression result.\n\n :param list axisvals: [min_x, max_x, min_y, max_y] setting the plot range\n '''\n plt.figure()\n xss = np.reshape(self.xs,(self.xs.shape[0],))\n ymm = np.reshape(self.ym,(self.ym.shape[0],))\n ys22 = np.reshape(self.ys2,(self.ys2.shape[0],))\n plt.plot(self.x, self.y, color=DATACOLOR, ls='None', marker='+',ms=12, mew=2)\n plt.plot(self.xs, self.ym, color=MEANCOLOR, ls='-', lw=3.)\n plt.fill_between(xss,ymm + 2.*np.sqrt(ys22), ymm - 2.*np.sqrt(ys22), facecolor=SHADEDCOLOR,linewidths=0.0)\n plt.grid()\n if not axisvals is None:\n plt.axis(axisvals)\n plt.xlabel('input x')\n plt.ylabel('output y')\n plt.plot(self.u,np.ones_like(self.u), ls='None', color='k',marker='x',markersize=12,mew=2)\n plt.show()\n\n\n\n def useInference(self, newInf):\n '''\n Use another inference techinique other than default exact inference.\n\n :param str newInf: 'Laplace' or 'EP'\n '''\n if newInf == \"Laplace\":\n self.inffunc = inf.FITC_Laplace()\n elif newInf == \"EP\":\n self.inffunc = inf.FITC_EP()\n else:\n raise Exception('Possible inf values are \"Laplace\", \"EP\".')\n\n\n\n def useLikelihood(self,newLik):\n '''\n Use another inference techinique other than default Gaussian likelihood.\n\n :param str newLik: 'Laplace'\n '''\n if newLik == \"Laplace\":\n self.likfunc = lik.Laplace()\n self.inffunc = inf.FITC_EP()\n else:\n raise Exception('Possible lik values are \"Laplace\".')\n\n\n\n\n\nclass GPC_FITC(GP_FITC):\n '''\n Model for Gaussian Process Classification FITC\n '''\n def __init__(self):\n super(GPC_FITC, self).__init__()\n self.meanfunc = mean.Zero() # default prior mean\n self.covfunc = cov.RBF() # default prior covariance\n self.likfunc = lik.Erf() # erf liklihood\n self.inffunc = inf.FITC_EP() # default inference method\n self.optimizer = opt.Minimize(self) # default optimizer\n self.u = None # no default inducing points\n\n\n\n def setOptimizer(self, method, num_restarts=None, min_threshold=None, meanRange=None, covRange=None, likRange=None):\n '''\n Overriding. Usage see base class pyGPs.gp.GP.setOptimizer\n '''\n conf = None\n if (num_restarts!=None) or (min_threshold!=None):\n conf = pyGPs.Optimization.conf.random_init_conf(self.meanfunc,self.covfunc,self.likfunc)\n conf.num_restarts = num_restarts\n conf.min_threshold = min_threshold\n if not meanRange is None:\n conf.meanRange = meanRange\n if not covRange is None:\n conf.covRange = covRange\n if not likRange is None:\n conf.likRange = likRange\n if method == \"Minimize\":\n self.optimizer = opt.Minimize(self,conf)\n elif method == \"SCG\":\n self.optimizer = opt.SCG(self,conf)\n elif method == \"CG\":\n self.optimizer = opt.CG(self,conf)\n elif method == \"BFGS\":\n self.optimizer = opt.BFGS(self,conf)\n\n\n\n def plot(self,x1,x2,t1,t2,axisvals=None):\n '''Plot 2d GP FITC classification.\n For plotting, we superimpose the data points with the posterior equi-probability contour\n lines for the probability of class two given complete information about the generating mechanism.\n\n :param x1: inputs for class +1\n :param x2: inputs for class -1\n :param t1: meshgrid array for the first axis\n :param t2: meshgrid array for the second axis\n :param list axisvals: [min_x, max_x, min_y, max_y] setting the plot range\n\n Note these parameters are (only) used for our hard-coded data for classification demo.\n '''\n fig = plt.figure()\n plt.plot(x1[:,0], x1[:,1], 'b+', markersize = 12)\n plt.plot(x2[:,0], x2[:,1], 'r+', markersize = 12)\n plt.plot(self.u[:,0],self.u[:,1],'ko', markersize=12)\n pc = plt.contour(t1, t2, np.reshape(np.exp(self.lp), (t1.shape[0],t1.shape[1]) ))\n fig.colorbar(pc)\n plt.grid()\n if not axisvals is None:\n plt.axis(axisvals)\n plt.show()\n\n\n\n def useInference(self, newInf):\n '''\n Use another inference techinique other than default exact inference.\n\n :param str newInf: 'Laplace' or 'EP'\n '''\n if newInf == \"Laplace\":\n self.inffunc = inf.FITC_Laplace()\n else:\n raise Exception('Possible inf values are \"Laplace\".')\n\n\n\n def useLikelihood(self,newLik):\n '''\n Use another inference techinique other than default Erf likelihood.\n (Not used in this version)\n\n :param str newLik: 'Logistic'\n '''\n if newLik == \"Logistic\":\n raise Exception(\"Logistic likelihood is currently not implemented.\")\n else:\n raise Exception('Possible lik values are \"Logistic\".')\n"
] | [
[
"numpy.ones",
"numpy.ones_like",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.plot",
"numpy.tril",
"matplotlib.pyplot.figure",
"numpy.reshape",
"numpy.linspace",
"numpy.mean",
"numpy.tile",
"numpy.zeros",
"matplotlib.pyplot.axis",
"numpy.max",
"numpy.min",
"numpy.prod",
"numpy.maximum",
"matplotlib.pyplot.grid",
"numpy.exp",
"matplotlib.pyplot.show",
"numpy.sqrt",
"numpy.dot",
"matplotlib.pyplot.xlabel"
]
] |
Algomorph/NeuralTracking | [
"6312be8e18828344c65e25a423c239efcd3428dd",
"6312be8e18828344c65e25a423c239efcd3428dd"
] | [
"tests/data_generation/animate_berlin_y_stretch.py",
"tests/data_generation/animate_berlin_x_offset.py"
] | [
"import sys\nimport os\nimport shutil\n\nimport cv2\nimport open3d as o3d\nimport open3d.core as o3c\nimport numpy as np\n\nfrom rendering.pytorch3d_renderer import PyTorch3DRenderer\nfrom data import StandaloneFrameDataset\nimport data.presets as presets\nimport tsdf.default_voxel_grid\nimport data.camera\nfrom settings import process_arguments, PathParameters, DeformNetParameters\n\nPROGRAM_EXIT_SUCCESS = 0\n\n\ndef main():\n process_arguments()\n frame_dataset: StandaloneFrameDataset = presets.StandaloneFramePreset.BERLIN_0.value\n\n device = o3c.Device(\"cuda:0\")\n volume: o3d.t = tsdf.default_voxel_grid.make_default_tsdf_voxel_grid(device)\n\n depth_image = frame_dataset.load_depth_image_open3d(device)\n color_image = frame_dataset.load_color_image_open3d(device)\n intrinsics_open3d_cpu, _ = data.camera.load_open3d_intrinsics_from_text_4x4_matrix_and_image(frame_dataset.get_intrinsics_path(),\n frame_dataset.get_depth_image_path())\n intrinsics_open3d_cuda = o3d.core.Tensor(intrinsics_open3d_cpu.intrinsic_matrix, o3d.core.Dtype.Float32, device)\n extrinsics_open3d_cuda = o3d.core.Tensor.eye(4, o3d.core.Dtype.Float32, device)\n\n volume.integrate(depth_image, color_image, intrinsics_open3d_cuda, extrinsics_open3d_cuda, DeformNetParameters.depth_scale.value, 3.0)\n original_mesh: o3d.geometry.TriangleMesh = volume.extract_surface_mesh(-1, 0).to_legacy_triangle_mesh()\n renderer = PyTorch3DRenderer((depth_image.rows, depth_image.columns), device, intrinsics_open3d_cuda)\n\n frame_count = 6\n scale_factor_increment = 0.1\n\n scale_center = np.array([0.0855289, -0.03289237, 2.79831315], dtype=np.float32)\n\n def scale_mesh_y(mesh: o3d.geometry.TriangleMesh, factor: float) -> o3d.geometry.TriangleMesh:\n vertices = np.array(mesh.vertices)\n stretched_vertices = vertices - scale_center\n stretched_vertices[:, 1] *= factor\n stretched_vertices += scale_center\n\n _scaled_mesh = o3d.geometry.TriangleMesh(o3d.cuda.pybind.utility.Vector3dVector(stretched_vertices), mesh.triangles)\n _scaled_mesh.vertex_colors = mesh.vertex_colors\n return _scaled_mesh\n\n # prepare folders\n root_output_directory = os.path.join(PathParameters.output_directory.value, \"berlin_y_stretch_sequence\")\n depth_output_directory = os.path.join(root_output_directory, \"depth\")\n if not os.path.exists(depth_output_directory):\n os.makedirs(depth_output_directory)\n color_output_directory = os.path.join(root_output_directory, \"color\")\n if not os.path.exists(color_output_directory):\n os.makedirs(color_output_directory)\n\n # record animation rendering output\n for i_frame in range(0, frame_count):\n scaled_mesh = scale_mesh_y(original_mesh, 1.0 + scale_factor_increment * i_frame)\n depth, color = renderer.render_mesh_legacy(scaled_mesh, depth_scale=1000.0)\n color_path = os.path.join(color_output_directory, f\"{i_frame:06d}.jpg\")\n depth_path = os.path.join(depth_output_directory, f\"{i_frame:06d}.png\")\n cv2.imwrite(color_path, color)\n cv2.imwrite(depth_path, depth.astype(np.uint16))\n\n shutil.copy(frame_dataset.get_intrinsics_path(), os.path.join(root_output_directory, \"intrinsics.txt\"))\n\n return PROGRAM_EXIT_SUCCESS\n\n\nif __name__ == \"__main__\":\n sys.exit(main())\n",
"import sys\nimport os\nimport shutil\n\nimport cv2\nimport open3d as o3d\nimport open3d.core as o3c\nimport numpy as np\n\nfrom rendering.pytorch3d_renderer import PyTorch3DRenderer\nfrom data import StandaloneFrameDataset\nimport data.presets as presets\nimport tsdf.default_voxel_grid\nimport data.camera\nfrom settings import process_arguments, PathParameters, DeformNetParameters\n\nPROGRAM_EXIT_SUCCESS = 0\n\n\ndef main():\n process_arguments()\n frame_dataset: StandaloneFrameDataset = presets.StandaloneFramePreset.BERLIN_0.value\n\n device = o3c.Device(\"cuda:0\")\n volume: o3d.t = tsdf.default_voxel_grid.make_default_tsdf_voxel_grid(device)\n\n depth_image = frame_dataset.load_depth_image_open3d(device)\n color_image = frame_dataset.load_color_image_open3d(device)\n intrinsics_open3d_cpu, _ = data.camera.load_open3d_intrinsics_from_text_4x4_matrix_and_image(frame_dataset.get_intrinsics_path(),\n frame_dataset.get_depth_image_path())\n intrinsics_open3d_cuda = o3d.core.Tensor(intrinsics_open3d_cpu.intrinsic_matrix, o3d.core.Dtype.Float32, device)\n extrinsics_open3d_cuda = o3d.core.Tensor.eye(4, o3d.core.Dtype.Float32, device)\n\n volume.integrate(depth_image, color_image, intrinsics_open3d_cuda, extrinsics_open3d_cuda, DeformNetParameters.depth_scale.value, 3.0)\n original_mesh: o3d.geometry.TriangleMesh = volume.extract_surface_mesh(-1, 0).to_legacy_triangle_mesh()\n renderer = PyTorch3DRenderer((depth_image.rows, depth_image.columns), device, intrinsics_open3d_cuda)\n\n frame_count = 6\n offset_increment = 0.01\n\n def offset_mesh_plus_x(mesh: o3d.geometry.TriangleMesh, offset: float) -> o3d.geometry.TriangleMesh:\n vertices = np.array(mesh.vertices)\n vertices[:, 0] += offset\n _offset_mesh = o3d.geometry.TriangleMesh(o3d.cuda.pybind.utility.Vector3dVector(vertices), mesh.triangles)\n _offset_mesh.vertex_colors = mesh.vertex_colors\n return _offset_mesh\n\n # prepare folders\n root_output_directory = os.path.join(PathParameters.output_directory.value, \"berlin_x_offset_sequence\")\n depth_output_directory = os.path.join(root_output_directory, \"depth\")\n if not os.path.exists(depth_output_directory):\n os.makedirs(depth_output_directory)\n color_output_directory = os.path.join(root_output_directory, \"color\")\n if not os.path.exists(color_output_directory):\n os.makedirs(color_output_directory)\n\n # record animation rendering output\n for i_frame in range(0, frame_count):\n offset_mesh = offset_mesh_plus_x(original_mesh, offset_increment * i_frame)\n depth, color = renderer.render_mesh_legacy(offset_mesh, depth_scale=1000.0)\n color_path = os.path.join(color_output_directory, f\"{i_frame:06d}.jpg\")\n depth_path = os.path.join(depth_output_directory, f\"{i_frame:06d}.png\")\n cv2.imwrite(color_path, color)\n cv2.imwrite(depth_path, depth.astype(np.uint16))\n\n shutil.copy(frame_dataset.get_intrinsics_path(), os.path.join(root_output_directory, \"intrinsics.txt\"))\n\n return PROGRAM_EXIT_SUCCESS\n\n\nif __name__ == \"__main__\":\n sys.exit(main())\n"
] | [
[
"numpy.array"
],
[
"numpy.array"
]
] |
aliabid2243/deepgaze | [
"8c602db89a1d1d8a644b44a381ddb8a693375e08"
] | [
"new_model/test_big.py"
] | [
"import os\nfrom load_data import load_batch, load_data_names, load_batch_from_names, load_batch_from_names_random\nfrom my_model import get_eye_tracker_model\nimport numpy as np\nfrom keras.models import load_model\nfrom keras.optimizers import SGD, adam\n\ndef generator(data, batch_size, img_cols, img_rows, img_ch):\n\n while True:\n for it in list(range(0, data[0].shape[0], batch_size)):\n x, y = load_batch([l[it:it + batch_size] for l in data], img_cols, img_rows, img_ch)\n yield x, y\n\n\ndef test_big(args):\n\n os.environ[\"CUDA_DEVICE_ORDER\"] = \"PCI_BUS_ID\"\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = args.dev\n names_path = r\"C:\\Users\\Aliab\\PycharmProjects\\data\\test\"\n print(\"Names to test: {}\".format(names_path))\n\n dataset_path = r\"D:\\GazeCapture\"\n print(\"Dataset: {}\".format(names_path))\n\n weights_path = \"weight_vgg.hdf5\"\n print(\"Weights: {}\".format(weights_path))\n\n # image parameter\n img_cols = 128\n img_rows = 128\n img_ch = 3\n\n # test parameter\n batch_size = 64\n chunk_size = 500\n\n # model\n model = get_eye_tracker_model(img_cols, img_rows, img_ch)\n\n # model summary\n model.summary()\n\n # weights\n print(\"Loading weights...\")\n model = load_model(weights_path)\n\n model.load_weights(weights_path)\n # data\n test_names = load_data_names(names_path)\n\n # limit amount of testing data\n # test_names = test_names[:1000]\n\n # results\n err_x = []\n err_y = []\n\n print(\"Loading testing data...\")\n for it in list(range(0, len(test_names), chunk_size)):\n\n x, y = load_batch_from_names_random(test_names[it:it + chunk_size], dataset_path, batch_size, img_cols, img_rows, img_ch)\n # x, y = load_batch_from_names(test_names[it:it + chunk_size], dataset_path, img_ch, img_cols, img_rows)\n predictions = model.predict(x=x, batch_size=batch_size, verbose=1)\n\n # print and analyze predictions\n for i, prediction in enumerate(predictions):\n print(\"PR: {} {}\".format(prediction[0], prediction[1]))\n print(\"GT: {} {} \\n\".format(y[i][0], y[i][1]))\n\n err_x.append(abs(prediction[0] - y[i][0]))\n err_y.append(abs(prediction[1] - y[i][1]))\n\n # mean absolute error\n mae_x = np.mean(err_x)\n mae_y = np.mean(err_y)\n\n # standard deviation\n std_x = np.std(err_x)\n std_y = np.std(err_y)\n\n # final results\n print(\"MAE: {} {} ( samples)\".format(mae_x, mae_y))\n print(\"STD: {} {} ( samples)\".format(std_x, std_y))\n\n\nif __name__ == '__main__':\n test_big()\n"
] | [
[
"numpy.std",
"numpy.mean"
]
] |
edpolanco/air_cargo | [
"20ddf6c72dafed85b87486ca46a9c09656f31d90"
] | [
"analysis.py"
] | [
"\"\"\"Module for summarizing cargo planning testing results.\n\n Ed Polanco\n [email protected]\n\"\"\"\nimport pandas as pd\nfrom collections import OrderedDict\nimport datetime\nimport time \nfrom aimacode.search import Problem, Node\nfrom timeit import default_timer as timer\nfrom run_search import PrintableProblem, PROBLEMS\nfrom aimacode.search import (breadth_first_search, astar_search,\n breadth_first_tree_search, depth_first_graph_search, uniform_cost_search,\n greedy_best_first_graph_search, depth_limited_search,\n recursive_best_first_search)\n\n#Names of the various search algorithms\nSEARCHES_SHORT_NAME = [[\"Breadth First\", breadth_first_search, \"\"], #1\n ['Breadth First Tree', breadth_first_tree_search, \"\"], #2\n ['Depth First Graph', depth_first_graph_search, \"\"], #3\n ['Depth Limited', depth_limited_search, \"\"], #4\n ['Uniform Cost', uniform_cost_search, \"\"], #5\n ['Recursive Best First w/ h1', recursive_best_first_search, 'h_1'], #6\n ['Greedy Best First Graph w/ h1', greedy_best_first_graph_search, 'h_1'], #7\n ['Astar w/ h1', astar_search, 'h_1'], #8\n ['Astar w/ ignore pre-cond.', astar_search, 'h_ignore_preconditions'], #9\n ['Astar w/ level-sum', astar_search, 'h_pg_levelsum'], #10\n ]\n\ndef show_path(node:Node):\n \"\"\"\n Print solution set to screen\n\n Paremeter\n ----------\n node: Node\n Search tree object that has 'solution()' method \n \"\"\"\n if node is None:\n print(\"The selected planner did not find a solution for this problem. \" +\n \"Make sure you have completed the AirCargoProblem implementation \" +\n \"and pass all unit tests first.\")\n else:\n msg = \"Search function {} plan length: {} \".format(node[0],len(node[1].solution()) )\n print(msg)\n for action in node[1].solution():\n print(\"{}{}\".format(action.name, action.args))\n\ndef run_search_table(problem: Problem, search_function, parameter=None):\n \"\"\"Perform a test to find a solution to one of cargo problems.\n\n Paremeters:\n ----------\n problem: Problem\n Cargo planning problem\n \n search_function: str\n Search algorithm function name\n \n parameter: parameter value if any [None]\n Parameter value for the search algorithms that require it.\n\n Returns:\n ----------\n Returns tuple of 5 values:\n 1 = Node expansions count\n 2 = number of times we tested for goal state\n 3 = Number of new nodes\n 4 = Number of steps\n 5 = Search tree Node object\n \"\"\" \n start = timer()\n ip = PrintableProblem(problem)\n if parameter is not None:\n node = search_function(ip, parameter)\n else:\n node = search_function(ip)\n end = timer()\n\n return (ip.succs, ip.goal_tests, ip.states, end - start, node )\n\ndef search_data(problem_id: int, s_choices: list):\n \"\"\" Perform test to solve cargo planning problem with\n the given search algorithms.\n\n Paremeters:\n ----------\n problem_id: int\n Cargo planning problem id\n \n s_choices: list\n List of the search algorithm to try.\n\n Returns:\n ----------\n Returns tuple of two items:\n 1 = DataFrame that summarizes test result\n 2 = A list of tuples, where the first item in the \n tuple is the search algorithm name and the second\n is its corresponding search Node object.\n \"\"\"\n #lets get a list of problems and search algorithms\n problem_name,problem = PROBLEMS[problem_id - 1][0],PROBLEMS[problem_id- 1][1]\n searches = [SEARCHES_SHORT_NAME[i-1] for i in map(int, s_choices)]\n\n # helper variables to create DataFrame\n steps = []\n fun_name = []\n expansions = []\n goal_test =[]\n new_nodes = []\n elapsed_time = []\n nodes = []\n\n for sname, s, h in searches:\n start_time = datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %I:%M:%S%p')\n print(\"\\nSolving {} using {} start time {}...\".format(problem_name, sname, start_time))\n\n _p = problem()\n _h = None if not h else getattr(_p, h)\n \n #perform test get result\n result = run_search_table(_p, s, _h)\n\n #update helper list variables\n fun_name.append(sname)\n expansions.append(result[0])\n goal_test.append(result[1])\n new_nodes.append(result[2])\n elapsed_time.append(result[3])\n steps.append(len(result[4].solution()) )\n nodes.append([sname,result[4]])\n \n #create dictionary for DataFrame input\n table_dict = OrderedDict()\n table_dict[\"Function Name\"] = fun_name\n table_dict[\"Solution Steps\"] = steps\n table_dict[\"Expansions\"] = expansions\n table_dict[\"Goal Tests\"] = goal_test\n table_dict[\"New_Nodes\"] = new_nodes\n table_dict[\"Elapsed Seconds\"] = elapsed_time\n \n dataframe = pd.DataFrame(table_dict)\n dataframe.index +=1\n return dataframe, nodes"
] | [
[
"pandas.DataFrame"
]
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.